label
stringlengths 1
61
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
test empty resizes min args | import unittest
import importlib
import numpy as np
utils = importlib.import_module('extensions.sd-webui-controlnet.tests.utils', 'utils')
utils.setup_test_env()
from copy import copy
from scripts import external_code
from scripts import controlnet
from modules import scripts, ui, shared
class TestExternalCodeWorking(unittest.TestCase):
max_models = 6
args_offset = 10
def setUp(self):
self.scripts = copy(scripts.scripts_txt2img)
self.scripts.initialize_scripts(False)
ui.create_ui()
self.cn_script = controlnet.Script()
self.cn_script.args_from = self.args_offset
self.cn_script.args_to = self.args_offset + self.max_models
self.scripts.alwayson_scripts = [self.cn_script]
self.script_args = [None] * self.cn_script.args_from
self.initial_max_models = shared.opts.data.get("control_net_unit_count", 3)
shared.opts.data.update(control_net_unit_count=self.max_models)
self.extra_models = 0
def tearDown(self):
shared.opts.data.update(control_net_unit_count=self.initial_max_models)
def get_expected_args_to(self):
args_len = max(self.max_models, len(self.cn_units))
return self.args_offset + args_len
def assert_update_in_place_ok(self):
external_code.update_cn_script_in_place(self.scripts, self.script_args, self.cn_units)
self.assertEqual(self.cn_script.args_to, self.get_expected_args_to())
def METHOD_NAME(self):
self.cn_units = []
self.assert_update_in_place_ok()
def test_empty_resizes_extra_args(self):
extra_models = 1
self.cn_units = [external_code.ControlNetUnit()] * (self.max_models + extra_models)
self.assert_update_in_place_ok()
class TestControlNetUnitConversion(unittest.TestCase):
def setUp(self):
self.dummy_image = 'base64...'
self.input = {}
self.expected = external_code.ControlNetUnit()
def assert_converts_to_expected(self):
self.assertEqual(vars(external_code.to_processing_unit(self.input)), vars(self.expected))
def test_empty_dict_works(self):
self.assert_converts_to_expected()
def test_image_works(self):
self.input = {
'image': self.dummy_image
}
self.expected = external_code.ControlNetUnit(image=self.dummy_image)
self.assert_converts_to_expected()
def test_image_alias_works(self):
self.input = {
'input_image': self.dummy_image
}
self.expected = external_code.ControlNetUnit(image=self.dummy_image)
self.assert_converts_to_expected()
def test_masked_image_works(self):
self.input = {
'image': self.dummy_image,
'mask': self.dummy_image,
}
self.expected = external_code.ControlNetUnit(image={'image': self.dummy_image, 'mask': self.dummy_image})
self.assert_converts_to_expected()
class TestControlNetUnitImageToDict(unittest.TestCase):
def setUp(self):
self.dummy_image = utils.readImage("test/test_files/img2img_basic.png")
self.input = external_code.ControlNetUnit()
self.expected_image = external_code.to_base64_nparray(self.dummy_image)
self.expected_mask = external_code.to_base64_nparray(self.dummy_image)
def assert_dict_is_valid(self):
actual_dict = controlnet.image_dict_from_any(self.input.image)
self.assertEqual(actual_dict['image'].tolist(), self.expected_image.tolist())
self.assertEqual(actual_dict['mask'].tolist(), self.expected_mask.tolist())
def test_none(self):
self.assertEqual(controlnet.image_dict_from_any(self.input.image), None)
def test_image_without_mask(self):
self.input.image = self.dummy_image
self.expected_mask = np.zeros_like(self.expected_image, dtype=np.uint8)
self.assert_dict_is_valid()
def test_masked_image_tuple(self):
self.input.image = (self.dummy_image, self.dummy_image,)
self.assert_dict_is_valid()
def test_masked_image_dict(self):
self.input.image = {'image': self.dummy_image, 'mask': self.dummy_image}
self.assert_dict_is_valid()
class TestPixelPerfectResolution(unittest.TestCase):
def test_outer_fit(self):
image = np.zeros((100, 100, 3))
target_H, target_W = 50, 100
resize_mode = external_code.ResizeMode.OUTER_FIT
result = external_code.pixel_perfect_resolution(image, target_H, target_W, resize_mode)
expected = 50 # manually computed expected result
self.assertEqual(result, expected)
def test_inner_fit(self):
image = np.zeros((100, 100, 3))
target_H, target_W = 50, 100
resize_mode = external_code.ResizeMode.INNER_FIT
result = external_code.pixel_perfect_resolution(image, target_H, target_W, resize_mode)
expected = 100 # manually computed expected result
self.assertEqual(result, expected)
class TestGetAllUnitsFrom(unittest.TestCase):
def test_none(self):
self.assertListEqual(external_code.get_all_units_from([None]), [])
def test_bool(self):
self.assertListEqual(external_code.get_all_units_from([True]), [])
def test_inheritance(self):
class Foo(external_code.ControlNetUnit):
def __init__(self):
super().__init__(self)
self.bar = 'a'
foo = Foo()
self.assertListEqual(external_code.get_all_units_from([foo]), [foo])
def test_dict(self):
units = external_code.get_all_units_from([{}])
self.assertGreater(len(units), 0)
self.assertIsInstance(units[0], external_code.ControlNetUnit)
def test_unitlike(self):
class Foo(object):
""" bar """
foo = Foo()
for key in vars(external_code.ControlNetUnit()).keys():
setattr(foo, key, True)
setattr(foo, 'bar', False)
self.assertListEqual(external_code.get_all_units_from([foo]), [foo])
if __name__ == '__main__':
unittest.main( | null |
test 02 delete stats | # -*- coding: utf-8 -*-
from .base import MyApiTestCase
from privacyidea.lib.monitoringstats import write_stats
from privacyidea.lib.tokenclass import AUTH_DATE_FORMAT
from privacyidea.models import db
import datetime
class APIMonitoringTestCase(MyApiTestCase):
def test_01_get_stats(self):
# create some statistics
write_stats("key1", 1)
write_stats("key2", 50)
write_stats("key1", 2)
ts = datetime.datetime.now().isoformat()
write_stats("key2", 60)
write_stats("key1", 3)
write_stats("key2", 50)
write_stats("key1", 4)
# get available stats keys
with self.app.test_request_context('/monitoring/',
method='GET',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertEqual(200, res.status_code, res)
result = res.json.get("result")
self.assertIn("key1", result.get("value"), result)
self.assertIn("key2", result.get("value"), result)
# check values of key1
with self.app.test_request_context('/monitoring/key1',
method='GET',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = res.json.get("result")
self.assertEqual(len(result.get("value")), 4)
self.assertEqual(result.get("value")[0][1], 1)
self.assertEqual(result.get("value")[1][1], 2)
self.assertEqual(result.get("value")[2][1], 3)
self.assertEqual(result.get("value")[3][1], 4)
# check values of key1, with a start value in the past
with self.app.test_request_context('/monitoring/key1',
data={"start": "2010-01-01 10:00+0200"},
method='GET',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = res.json.get("result")
self.assertEqual(len(result.get("value")), 4)
# End value in the past will return no data.
with self.app.test_request_context('/monitoring/key1',
data={"end": "2010-01-01"},
method='GET',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = res.json.get("result")
self.assertEqual(len(result.get("value")), 0)
# check with start timestamp after the 2nd value.
# This should return the 3rd and 4th.
with self.app.test_request_context('/monitoring/key1',
data={"start": ts},
method='GET',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = res.json.get("result")
self.assertEqual(len(result.get("value")), 2)
self.assertEqual(result.get("value")[0][1], 3)
self.assertEqual(result.get("value")[1][1], 4)
# check the last value of key1
with self.app.test_request_context('/monitoring/key1/last',
method='GET',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = res.json.get("result")
self.assertEqual(4, result.get("value"), result)
def METHOD_NAME(self):
ts = datetime.datetime.now()
write_stats("key2", 60)
# Now we delete some keys (the three old ones)
with self.app.test_request_context('/monitoring/key2',
method='DELETE',
data={"start": "2010-01-01",
"end": ts.strftime(AUTH_DATE_FORMAT)},
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = res.json.get("result")
# Number of deleted values
self.assertEqual(result.get("value"), 3)
# ..and check if there is only one key left!
with self.app.test_request_context('/monitoring/key2',
method='GET',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = res.json.get("result")
# Number of remaining values
self.assertEqual(1, len(result.get("value")), result) | null |
prepare | #! /usr/bin/env python
"""Retrieving DOIs from a papers website/url as a prep operation"""
from __future__ import annotations
import collections
import re
from dataclasses import dataclass
from sqlite3 import OperationalError
from typing import TYPE_CHECKING
import requests
import zope.interface
from dataclasses_jsonschema import JsonSchemaMixin
import colrev.env.package_manager
import colrev.exceptions as colrev_exceptions
import colrev.ops.built_in.search_sources.doi_org as doi_connector
import colrev.ops.search_sources
import colrev.record
if TYPE_CHECKING:
import colrev.ops.prep
# pylint: disable=too-few-public-methods
# pylint: disable=duplicate-code
@zope.interface.implementer(colrev.env.package_manager.PrepPackageEndpointInterface)
@dataclass
class DOIFromURLsPrep(JsonSchemaMixin):
"""Prepares records by retrieving its DOI from the website (URL)"""
settings_class = colrev.env.package_manager.DefaultSettings
ci_supported: bool = True
source_correction_hint = "check with the developer"
always_apply_changes = False
# https://www.crossref.org/blog/dois-and-matching-regular-expressions/
doi_regex = re.compile(r"10\.\d{4,9}/[-._;/:A-Za-z0-9]*")
def __init__(
self,
*,
prep_operation: colrev.ops.prep.Prep, # pylint: disable=unused-argument
settings: dict,
) -> None:
self.settings = self.settings_class.load_settings(data=settings)
self.same_record_type_required = (
prep_operation.review_manager.settings.is_curated_masterdata_repo()
)
try:
self.session = prep_operation.review_manager.get_cached_session()
except OperationalError as exc:
raise colrev_exceptions.ServiceNotAvailableException(
dep="sqlite-requests-cache"
) from exc
_, self.email = prep_operation.review_manager.get_committer()
def METHOD_NAME(
self, prep_operation: colrev.ops.prep.Prep, record: colrev.record.PrepRecord
) -> colrev.record.Record:
"""Prepare the record by retrieving its DOI from the website (url) if available"""
if ("url" not in record.data and "fulltext" not in record.data) or (
"doi" in record.data
):
return record
try:
url = record.data.get("url", record.data.get("fulltext", "NA"))
headers = {"user-agent": f"{__name__} " f"(mailto:{self.email})"}
ret = self.session.request(
"GET", url, headers=headers, timeout=prep_operation.timeout
)
ret.raise_for_status()
res = re.findall(self.doi_regex, ret.text)
if not res:
return record
if len(res) == 1:
ret_dois = [(res[0], 1)]
else:
counter = collections.Counter(res)
ret_dois = counter.most_common()
if not ret_dois:
return record
doi, _ = ret_dois[0]
retrieved_record_dict = {
"doi": doi.upper(),
"ID": record.data["ID"],
}
retrieved_record = colrev.record.PrepRecord(data=retrieved_record_dict)
doi_connector.DOIConnector.retrieve_doi_metadata(
review_manager=prep_operation.review_manager,
record=retrieved_record,
timeout=prep_operation.timeout,
)
similarity = colrev.record.PrepRecord.get_retrieval_similarity(
record_original=record,
retrieved_record_original=retrieved_record,
same_record_type_required=self.same_record_type_required,
)
if similarity < prep_operation.retrieval_similarity:
return record
record.merge(merging_record=retrieved_record, default_source=url)
except (
requests.exceptions.RequestException,
colrev_exceptions.InvalidMerge,
colrev_exceptions.RecordNotParsableException,
):
pass
return record | null |
outputs | """
rotate_fc
=========
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.METHOD_NAME import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class rotate_fc(Operator):
"""Apply a transformation (rotation) matrix on all the fields of a fields
container.
Parameters
----------
fields_container : FieldsContainer
coordinate_system : Field
3-3 rotation matrix
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.geo.rotate_fc()
>>> # Make input connections
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> my_coordinate_system = dpf.Field()
>>> op.inputs.coordinate_system.connect(my_coordinate_system)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.geo.rotate_fc(
... fields_container=my_fields_container,
... coordinate_system=my_coordinate_system,
... )
>>> # Get output data
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(
self, fields_container=None, coordinate_system=None, config=None, server=None
):
super().__init__(name="rotate_fc", config=config, server=server)
self._inputs = InputsRotateFc(self)
self._outputs = OutputsRotateFc(self)
if fields_container is not None:
self.inputs.fields_container.connect(fields_container)
if coordinate_system is not None:
self.inputs.coordinate_system.connect(coordinate_system)
@staticmethod
def _spec():
description = """Apply a transformation (rotation) matrix on all the fields of a fields
container."""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="fields_container",
type_names=["fields_container"],
optional=False,
document="""""",
),
1: PinSpecification(
name="coordinate_system",
type_names=["field"],
optional=False,
document="""3-3 rotation matrix""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="fields_container",
type_names=["fields_container"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the global server.
"""
return Operator.default_config(name="rotate_fc", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsRotateFc
"""
return super().inputs
@property
def METHOD_NAME(self):
"""Enables to get outputs of the operator by evaluating it
Returns
--------
outputs : OutputsRotateFc
"""
return super().METHOD_NAME
class InputsRotateFc(_Inputs):
"""Intermediate class used to connect user inputs to
rotate_fc operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.rotate_fc()
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> my_coordinate_system = dpf.Field()
>>> op.inputs.coordinate_system.connect(my_coordinate_system)
"""
def __init__(self, op: Operator):
super().__init__(rotate_fc._spec().inputs, op)
self._fields_container = Input(rotate_fc._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._fields_container)
self._coordinate_system = Input(rotate_fc._spec().input_pin(1), 1, op, -1)
self._inputs.append(self._coordinate_system)
@property
def fields_container(self):
"""Allows to connect fields_container input to the operator.
Parameters
----------
my_fields_container : FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.rotate_fc()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> # or
>>> op.inputs.fields_container(my_fields_container)
"""
return self._fields_container
@property
def coordinate_system(self):
"""Allows to connect coordinate_system input to the operator.
3-3 rotation matrix
Parameters
----------
my_coordinate_system : Field
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.rotate_fc()
>>> op.inputs.coordinate_system.connect(my_coordinate_system)
>>> # or
>>> op.inputs.coordinate_system(my_coordinate_system)
"""
return self._coordinate_system
class OutputsRotateFc(_Outputs):
"""Intermediate class used to get outputs from
rotate_fc operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.rotate_fc()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(self, op: Operator):
super().__init__(rotate_fc._spec().METHOD_NAME, op)
self._fields_container = Output(rotate_fc._spec().output_pin(0), 0, op)
self._outputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to get fields_container output of the operator
Returns
----------
my_fields_container : FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.rotate_fc()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
""" # noqa: E501
return self._fields_container | null |
cmake args | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import spack.build_systems.cmake
import spack.build_systems.generic
from spack.package import *
class Superlu(CMakePackage, Package):
"""SuperLU is a general purpose library for the direct solution of large,
sparse, nonsymmetric systems of linear equations on high performance
machines. SuperLU is designed for sequential machines."""
homepage = "https://portal.nersc.gov/project/sparse/superlu/"
url = "https://github.com/xiaoyeli/superlu/archive/refs/tags/v5.3.0.tar.gz"
tags = ["e4s"]
test_requires_compiler = True
version("6.0.0", sha256="5c199eac2dc57092c337cfea7e422053e8f8229f24e029825b0950edd1d17e8e")
version(
"5.3.0",
sha256="3e464afa77335de200aeb739074a11e96d9bef6d0b519950cfa6684c4be1f350",
preferred=True,
)
version("5.2.2", sha256="470334a72ba637578e34057f46948495e601a5988a602604f5576367e606a28c")
version("5.2.1", sha256="28fb66d6107ee66248d5cf508c79de03d0621852a0ddeba7301801d3d859f463")
version(
"4.3",
sha256="169920322eb9b9c6a334674231479d04df72440257c17870aaa0139d74416781",
deprecated=True,
url="https://crd-legacy.lbl.gov/~xiaoye/SuperLU/superlu_4.3.tar.gz",
)
version(
"4.2",
sha256="5a06e19bf5a597405dfeea39fe92aa8c5dd41da73c72c7187755a75f581efb28",
deprecated=True,
url="https://crd-legacy.lbl.gov/~xiaoye/SuperLU/superlu_4.2.tar.gz",
)
build_system(
conditional("cmake", when="@5:"), conditional("generic", when="@:4"), default="cmake"
)
variant("pic", default=True, description="Build with position independent code")
depends_on("blas")
conflicts(
"@:5.2.1",
when="%apple-clang@12:",
msg="Older SuperLU is incompatible with newer compilers",
)
examples_src_dir = "EXAMPLE"
def test_example(self):
"""build and run test example"""
test_dir = join_path(self.test_suite.current_test_cache_dir, self.examples_src_dir)
test_exe = "superlu"
test_src = f"{test_exe}.c"
if not os.path.isfile(join_path(test_dir, test_src)):
raise SkipTest(f"Cached {test_src} is missing")
with working_dir(test_dir):
args = []
if self.version < Version("5.2.2"):
args.append("HEADER=" + self.prefix.include)
args.append(test_exe)
make = which("make")
make(*args)
superlu = which(test_exe)
superlu()
class BaseBuilder(metaclass=spack.builder.PhaseCallbacksMeta):
@run_after("install")
def setup_standalone_tests(self):
"""Set up and copy example source files after the package is installed
to an install test subdirectory for use during `spack test run`."""
makefile = join_path(self.pkg.examples_src_dir, "Makefile")
if self.spec.satisfies("@5.2.2:"):
# Include dir was hardcoded in 5.2.2
filter_file(
r"INCLUDEDIR = -I\.\./SRC", "INCLUDEDIR = -I" + self.prefix.include, makefile
)
# Create the example makefile's include file and ensure the new file
# is the one use.
filename = "make.inc"
config_args = []
if self.spec.satisfies("@5:"):
lib = "libsuperlu.a"
else:
config_args.append("PLAT = _x86_64")
lib = f"libsuperlu_{self.spec.version}.a"
config_args.extend(self._make_hdr_for_test(lib))
with open(join_path(self.pkg.examples_src_dir, filename), "w") as inc:
for option in config_args:
inc.write(f"{option}\n")
# change the path in the example's Makefile to the file written above
filter_file(r"include \.\./" + filename, "include ./" + filename, makefile)
# Cache the examples directory for use by stand-alone tests
self.pkg.cache_extra_test_sources(self.pkg.examples_src_dir)
def _make_hdr_for_test(self, lib):
"""Standard configure arguments for make.inc"""
ranlib = "ranlib" if which("ranlib") else "echo"
return [
f"SuperLUroot = {self.prefix}",
f"SUPERLULIB = {self.prefix.lib}/{lib}",
f"BLASLIB = {self.spec['blas'].libs.ld_flags}",
"TMGLIB = libtmglib.a",
"LIBS = $(SUPERLULIB) $(BLASLIB)",
"ARCH = ar",
"ARCHFLAGS = cr",
f"RANLIB = {ranlib}",
f"CC = {env['CC']}",
f"FORTRAN = {env['FC']}",
f"LOADER = {env['CC']}",
"CFLAGS = -O3 -DNDEBUG -DUSE_VENDOR_BLAS -DPRNTlevel=0 -DAdd_",
"NOOPTS = -O0",
]
class CMakeBuilder(BaseBuilder, spack.build_systems.cmake.CMakeBuilder):
def METHOD_NAME(self):
if self.pkg.version > Version("5.2.1"):
_blaslib_key = "enable_internal_blaslib"
else:
_blaslib_key = "enable_blaslib"
args = [
self.define(_blaslib_key, False),
self.define("CMAKE_INSTALL_LIBDIR", self.prefix.lib),
self.define_from_variant("CMAKE_POSITION_INDEPENDENT_CODE", "pic"),
self.define("enable_tests", self.pkg.run_tests),
]
return args
class GenericBuilder(BaseBuilder, spack.build_systems.generic.GenericBuilder):
def install(self, pkg, spec, prefix):
"""Use autotools before version 5"""
# Define make.inc file
config = [
"PLAT = _x86_64",
"SuperLUroot = %s" % self.pkg.stage.source_path,
# 'SUPERLULIB = $(SuperLUroot)/lib/libsuperlu$(PLAT).a',
"SUPERLULIB = $(SuperLUroot)/lib/libsuperlu_{0}.a".format(self.pkg.spec.version),
"BLASDEF = -DUSE_VENDOR_BLAS",
"BLASLIB = {0}".format(spec["blas"].libs.ld_flags),
# or BLASLIB = -L/usr/lib64 -lblas
"TMGLIB = libtmglib.a",
"LIBS = $(SUPERLULIB) $(BLASLIB)",
"ARCH = ar",
"ARCHFLAGS = cr",
"RANLIB = {0}".format("ranlib" if which("ranlib") else "echo"),
"CC = {0}".format(env["CC"]),
"FORTRAN = {0}".format(env["FC"]),
"LOADER = {0}".format(env["CC"]),
"CDEFS = -DAdd_",
]
if "+pic" in spec:
config.extend(
[
# Use these lines instead when pic_flag capability arrives
"CFLAGS = -O3 {0}".format(self.pkg.compiler.cc_pic_flag),
"NOOPTS = {0}".format(self.pkg.compiler.cc_pic_flag),
"FFLAGS = -O2 {0}".format(self.pkg.compiler.f77_pic_flag),
"LOADOPTS = {0}".format(self.pkg.compiler.cc_pic_flag),
]
)
else:
config.extend(
["CFLAGS = -O3", "NOOPTS = ", "FFLAGS = -O2", "LOADOPTS = "]
)
with open("make.inc", "w") as inc:
for option in config:
inc.write("{0}\n".format(option))
make(parallel=False)
install_tree("lib", prefix.lib)
mkdir(prefix.include)
install(join_path("SRC", "*.h"), prefix.include) | null |
test poa chain connection | #!/usr/bin/env python3
# encoding: utf-8
import unittest as ut
from SEEDBlockchain import Wallet
from web3 import Web3
from seedemu import *
class MultipleChainsTestCase(ut.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.wallet1 = Wallet(chain_id=1337)
cls.wallet2 = Wallet(chain_id=1338)
for name in ['Alice', 'Bob', 'Charlie', 'David', 'Eve']:
cls.wallet1.createAccount(name)
cls.wallet2.createAccount(name)
url_1 = 'http://10.152.0.71:8540'
url_2 = 'http://10.160.0.71:8545'
cls.wallet1.connectToBlockchain(url_1)
cls.wallet2.connectToBlockchain(url_2, isPOA=True)
def test_pow_chain_connection(self):
self.assertTrue(self.wallet1._web3.isConnected())
def METHOD_NAME(self):
self.assertTrue(self.wallet2._web3.isConnected())
def test_pow_chain_id(self):
self.assertEqual(self.wallet1._web3.eth.chain_id, 1337)
def test_poa_chain_id(self):
self.assertEqual(self.wallet2._web3.eth.chain_id, 1338)
def test_pow_send_transaction(self):
recipient = self.wallet1.getAccountAddressByName('Bob')
txhash = self.wallet1.sendTransaction(recipient, 0.1, sender_name='David', wait=True, verbose=False)
self.assertTrue(self.wallet1.getTransactionReceipt(txhash)["status"], 1)
def test_poa_send_transaction(self):
recipient = self.wallet2.getAccountAddressByName('Alice')
txhash = self.wallet2.sendTransaction(recipient, 0.1, sender_name='Eve', wait=True, verbose=False)
self.assertTrue(self.wallet2.getTransactionReceipt(txhash)["status"], 1)
def test_pow_chain_consensus(self):
config = dict(self.wallet1._web3.geth.admin.nodeInfo().protocols.eth.config)
self.assertTrue("ethash" in config.keys())
def test_poa_chain_consensus(self):
config = dict(self.wallet2._web3.geth.admin.nodeInfo().protocols.eth.config)
self.assertTrue("clique" in config.keys())
def test_pow_peer_counts(self):
peer_counts = len(self.wallet1._web3.geth.admin.peers())
self.assertEqual(peer_counts, 3)
def test_poa_peer_counts(self):
peer_counts = len(self.wallet2._web3.geth.admin.peers())
self.assertEqual(peer_counts, 2)
def test_import_account(self):
self.assertEqual(self.wallet1._web3.eth.getBalance(Web3.toChecksumAddress("9f189536def35811e1a759860672fe49a4f89e94")), 10)
def test_pow_emulator_account(self):
accounts = []
for i in range(1,5):
accounts.extend(EthAccount.createEmulatorAccountsFromMnemonic(i, mnemonic="great awesome fun seed security lab protect system network prevent attack future", balance=32*EthUnit.ETHER.value, total=1, password="admin"))
for account in accounts:
self.assertTrue(self.wallet1._web3.eth.getBalance(account.address) >= 32*EthUnit.ETHER.value)
def test_poa_emulator_account(self):
accounts = []
for i in range(5,9):
accounts.extend(EthAccount.createEmulatorAccountsFromMnemonic(i, mnemonic="great awesome fun seed security lab protect system network prevent attack future", balance=32*EthUnit.ETHER.value, total=1, password="admin"))
for account in accounts:
self.assertTrue(self.wallet2._web3.eth.getBalance(account.address) >= 32*EthUnit.ETHER.value)
def test_pow_create_account(self):
account = EthAccount.createEmulatorAccountFromMnemonic(3, mnemonic="great awesome fun seed security lab protect system network prevent attack future", balance=20*EthUnit.ETHER.value, index=1, password="admin")
self.assertTrue(self.wallet1._web3.eth.getBalance(account.address) >= 20*EthUnit.ETHER.value)
def test_pow_create_accounts(self):
accounts = []
for index in range(1, 4):
accounts.append(EthAccount.createEmulatorAccountFromMnemonic(7, mnemonic="great awesome fun seed security lab protect system network prevent attack future", balance=30*EthUnit.ETHER.value, index=index, password="admin"))
for account in accounts:
self.assertTrue(self.wallet2._web3.eth.getBalance(account.address) >= 30*EthUnit.ETHER.value)
if __name__ == "__main__":
test_suite = ut.TestLoader().loadTestsFromTestCase(MultipleChainsTestCase)
res = ut.TextTestRunner(verbosity=2).run(test_suite)
num, errs, fails = res.testsRun, len(res.errors), len(res.failures)
print("score: %d of %d (%d errors, %d failures)" % (num - (errs+fails), num, errs, fails)) | null |
create archiver | #!/usr/bin/env python3
"""
DESCRIPTION
"""
import sys
import pscheduler
from kafka import KafkaProducer
from kafka.errors import KafkaError
import tempfile
import os
MAX_SCHEMA = 1
SECURITY_PROTOCOL_SSL = "SSL"
log_prefix = "archiver-kafka"
log = pscheduler.Log(prefix=log_prefix, quiet=False)
archiver = None
class psKafkaArchiver(object):
def is_connected(self):
"""
Determine if producer is connected
"""
return self.producer is not None
def close(self):
"""
Close producer connection
"""
if self.producer:
self.producer.close()
self.producer = None
if(self.security_protocol == SECURITY_PROTOCOL_SSL):
log.debug("Cleaning up cert files")
os.unlink(self.ssl_cafile)
os.unlink(self.ssl_keyfile)
os.unlink(self.ssl_certfile)
def publish(self, message):
"""
Send message to producer
"""
if self.is_connected() is True:
self.producer.send(self.topic, message)
def _write_to_tempfile(self, data):
tfile = tempfile.NamedTemporaryFile(delete=False)
tfile.write(bytes(data,'utf-8'))
tfile.flush()
tfile.close()
return tfile
def __init__(
self,
bootstrap_servers,
topic,
archiver_id='ps-kafka-archiver',
retries=3,
security_protocol="",
ssl_cacert=None,
ssl_cert=None,
ssl_key=None,
ssl_password="",
ssl_checkhostname=False,
):
self.bootstrap_servers = bootstrap_servers
self.topic = topic
self.ssl_checkhostname = False
self.client_id = archiver_id
self.retries = retries
log.debug(security_protocol)
if security_protocol == SECURITY_PROTOCOL_SSL:
self.security_protocol = security_protocol
self.ssl_cacert = ssl_cacert
cafile = self._write_to_tempfile(ssl_cacert)
self.ssl_cafile = cafile.name
self.ssl_cert = ssl_cert
certfile = self._write_to_tempfile(ssl_cert)
self.ssl_certfile = certfile.name
self.ssl_key = ssl_key
keyfile = self._write_to_tempfile(ssl_key)
self.ssl_keyfile = keyfile.name
if ssl_password:
self.ssl_password = ssl_password
log.debug("Cert Files: "+self.ssl_cafile+","+self.ssl_certfile+","+self.ssl_keyfile)
self.ssl_checkhostname = ssl_checkhostname
self.producer = KafkaProducer(
bootstrap_servers=self.bootstrap_servers,
client_id=self.client_id,
retries=self.retries,
security_protocol=self.security_protocol,
ssl_cafile=self.ssl_cafile,
ssl_certfile=self.ssl_certfile,
ssl_keyfile=self.ssl_keyfile,
ssl_password=self.ssl_password,
ssl_check_hostname=self.ssl_checkhostname,
)
else:
self.producer = KafkaProducer(
bootstrap_servers=self.bootstrap_servers,
client_id=self.client_id,
retries=self.retries,
)
def get_archiver(archiverconfig):
global archiver
if archiver is not None and archiver.is_connected():
log.debug("Kafka archiver exists. So reusing")
return
else:
if archiver:
archiver.close()
archiver = None
METHOD_NAME(archiverconfig)
def METHOD_NAME(archiverconfig):
bootstrap_servers = archiverconfig["server"]
log.debug("Bootstrap server: " + bootstrap_servers)
topic = archiverconfig["topic"]
log.debug("Topic: " + topic)
archiver_id = archiverconfig["archiver-id"]
retries = archiverconfig["kafka-retries"]
security_protocol = archiverconfig["security-protocol"]
log.debug("Security protocol is"+security_protocol)
global archiver
if (security_protocol == SECURITY_PROTOCOL_SSL):
log.debug("Found security protocol in config")
archiver = psKafkaArchiver(
bootstrap_servers=bootstrap_servers,
archiver_id=archiver_id,
topic=topic,
retries=retries,
security_protocol=security_protocol,
ssl_cert=archiverconfig["_ssl-cert"],
ssl_cacert=archiverconfig["_ssl-cacert"],
ssl_key=archiverconfig["_ssl-key"],
ssl_password=archiverconfig["_ssl-password"],
ssl_checkhostname=archiverconfig["ssl-checkhostname"],
)
else:
log.debug("Creating plain text archiver")
archiver = psKafkaArchiver(
bootstrap_servers=bootstrap_servers,
archiver_id=archiver_id,
topic=topic,
retries=retries,
)
def archive(json):
# Do archiving here
data = json["data"]
message = pscheduler.json_dump(json["result"]).encode("utf-8")
log.debug("Kafka server and topic %s" % data)
try:
get_archiver(data)
archiver.publish(message)
result = {"succeeded": True}
archiver.close()
except Exception as ex:
# Cleanup producer and start a fresh connection
if archiver:
archiver.close()
get_archiver(data)
result = {"succeeded": False, "error": "Failed to send message: %s" % (str(ex))}
if "retry-policy" in data:
log.debug("Retry exists")
policy = pscheduler.RetryPolicy(data["retry-policy"], iso8601=True)
retry_time = policy.retry(json["attempts"])
if retry_time is not None:
result["retry"] = retry_time
return result
PARSER = pscheduler.RFC7464Parser(sys.stdin)
EMITTER = pscheduler.RFC7464Emitter(sys.stdout)
for parsed in PARSER:
EMITTER(archive(parsed))
pscheduler.succeed( | null |
physical resource id | from typing import Any, Dict, List, Optional
from moto.core import CloudFormationModel
from moto.core.utils import iso_8601_datetime_with_milliseconds, utcnow
from .core import TaggedEC2Resource
from ..utils import random_nat_gateway_id, random_private_ip
class NatGateway(CloudFormationModel, TaggedEC2Resource):
def __init__(
self,
backend: Any,
subnet_id: str,
allocation_id: str,
tags: Optional[Dict[str, str]] = None,
connectivity_type: str = "public",
):
# public properties
self.id = random_nat_gateway_id()
self.subnet_id = subnet_id
self.address_set: List[Dict[str, Any]] = []
self.state = "available"
self.private_ip = random_private_ip()
self.connectivity_type = connectivity_type
# protected properties
self._created_at = utcnow()
self.ec2_backend = backend
# NOTE: this is the core of NAT Gateways creation
self._eni = self.ec2_backend.create_network_interface(
backend.get_subnet(self.subnet_id), self.private_ip
)
# associate allocation with ENI
if allocation_id and connectivity_type != "private":
self.ec2_backend.associate_address(
eni=self._eni, allocation_id=allocation_id
)
self.add_tags(tags or {})
self.vpc_id = self.ec2_backend.get_subnet(subnet_id).vpc_id
@property
def METHOD_NAME(self) -> str:
return self.id
@property
def create_time(self) -> str:
return iso_8601_datetime_with_milliseconds(self._created_at)
@staticmethod
def cloudformation_name_type() -> str:
return ""
@staticmethod
def cloudformation_type() -> str:
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-natgateway.html
return "AWS::EC2::NatGateway"
@classmethod
def create_from_cloudformation_json( # type: ignore[misc]
cls,
resource_name: str,
cloudformation_json: Any,
account_id: str,
region_name: str,
**kwargs: Any
) -> "NatGateway":
from ..models import ec2_backends
ec2_backend = ec2_backends[account_id][region_name]
nat_gateway = ec2_backend.create_nat_gateway(
cloudformation_json["Properties"]["SubnetId"],
cloudformation_json["Properties"]["AllocationId"],
)
return nat_gateway
class NatGatewayBackend:
def __init__(self) -> None:
self.nat_gateways: Dict[str, NatGateway] = {}
def describe_nat_gateways(
self, filters: Any, nat_gateway_ids: Optional[List[str]]
) -> List[NatGateway]:
nat_gateways = list(self.nat_gateways.values())
if nat_gateway_ids:
nat_gateways = [item for item in nat_gateways if item.id in nat_gateway_ids]
if filters is not None:
if filters.get("nat-gateway-id") is not None:
nat_gateways = [
nat_gateway
for nat_gateway in nat_gateways
if nat_gateway.id in filters["nat-gateway-id"]
]
if filters.get("vpc-id") is not None:
nat_gateways = [
nat_gateway
for nat_gateway in nat_gateways
if nat_gateway.vpc_id in filters["vpc-id"]
]
if filters.get("subnet-id") is not None:
nat_gateways = [
nat_gateway
for nat_gateway in nat_gateways
if nat_gateway.subnet_id in filters["subnet-id"]
]
if filters.get("state") is not None:
nat_gateways = [
nat_gateway
for nat_gateway in nat_gateways
if nat_gateway.state in filters["state"]
]
return nat_gateways
def create_nat_gateway(
self,
subnet_id: str,
allocation_id: str,
tags: Optional[Dict[str, str]] = None,
connectivity_type: str = "public",
) -> NatGateway:
nat_gateway = NatGateway(
self, subnet_id, allocation_id, tags, connectivity_type
)
address_set: Dict[str, Any] = {}
if allocation_id:
eips = self.address_by_allocation([allocation_id]) # type: ignore[attr-defined]
eip = eips[0] if len(eips) > 0 else None
if eip:
address_set["allocationId"] = allocation_id
address_set["publicIp"] = eip.public_ip or None
address_set["associationId"] = eip.association_id or None
address_set["networkInterfaceId"] = nat_gateway._eni.id
address_set["privateIp"] = nat_gateway._eni.private_ip_address
nat_gateway.address_set.append(address_set)
self.nat_gateways[nat_gateway.id] = nat_gateway
return nat_gateway
def delete_nat_gateway(self, nat_gateway_id: str) -> NatGateway:
nat_gw: NatGateway = self.nat_gateways.get(nat_gateway_id) # type: ignore
nat_gw.state = "deleted"
return nat_gw | null |
test decimals include nan | # This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
import decimal
from math import copysign, inf
import pytest
from hypothesis import HealthCheck, assume, given, reject, settings
from hypothesis.errors import InvalidArgument
from hypothesis.internal.floats import next_down, next_up
from hypothesis.strategies import (
booleans,
data,
decimals,
floats,
fractions,
integers,
none,
sampled_from,
tuples,
)
from tests.common.debug import find_any
@settings(suppress_health_check=list(HealthCheck))
@given(data())
def test_fuzz_floats_bounds(data):
width = data.draw(sampled_from([64, 32, 16]))
bound = none() | floats(allow_nan=False, width=width)
low, high = data.draw(tuples(bound, bound), label="low, high")
if low is not None and high is not None and low > high:
low, high = high, low
if low is not None and high is not None and low > high:
low, high = high, low
exmin = low is not None and low != inf and data.draw(booleans(), label="ex_min")
exmax = high is not None and high != -inf and data.draw(booleans(), label="ex_max")
if low is not None and high is not None:
lo = next_up(low, width) if exmin else low
hi = next_down(high, width) if exmax else high
# There must actually be floats between these bounds
assume(lo <= hi)
if lo == hi == 0:
assume(not exmin and not exmax and copysign(1.0, lo) <= copysign(1.0, hi))
s = floats(low, high, exclude_min=exmin, exclude_max=exmax, width=width)
val = data.draw(s, label="value")
assume(val) # positive/negative zero is an issue
if low is not None:
assert low <= val
if high is not None:
assert val <= high
if exmin:
assert low != val
if exmax:
assert high != val
@given(data())
def test_fuzz_fractions_bounds(data):
denom = data.draw(none() | integers(1, 100), label="denominator")
fracs = none() | fractions(max_denominator=denom)
low, high = data.draw(tuples(fracs, fracs), label="low, high")
if low is not None and high is not None and low > high:
low, high = high, low
try:
val = data.draw(fractions(low, high, max_denominator=denom), label="value")
except InvalidArgument:
reject() # fractions too close for given max_denominator
if low is not None:
assert low <= val
if high is not None:
assert val <= high
if denom is not None:
assert 1 <= val.denominator <= denom
@given(data())
def test_fuzz_decimals_bounds(data):
places = data.draw(none() | integers(0, 20), label="places")
finite_decs = (
decimals(allow_nan=False, allow_infinity=False, places=places) | none()
)
low, high = data.draw(tuples(finite_decs, finite_decs), label="low, high")
if low is not None and high is not None and low > high:
low, high = high, low
ctx = decimal.Context(prec=data.draw(integers(1, 100), label="precision"))
try:
with decimal.localcontext(ctx):
strat = decimals(
low, high, allow_nan=False, allow_infinity=False, places=places
)
val = data.draw(strat, label="value")
except InvalidArgument:
reject() # decimals too close for given places
if low is not None:
assert low <= val
if high is not None:
assert val <= high
if places is not None:
assert val.as_tuple().exponent == -places
def test_all_decimals_can_be_exact_floats():
find_any(
decimals(), lambda x: assume(x.is_finite()) and decimal.Decimal(float(x)) == x
)
@given(fractions(), fractions(), fractions())
def test_fraction_addition_is_well_behaved(x, y, z):
assert x + y + z == y + x + z
def METHOD_NAME():
find_any(decimals(), lambda x: x.is_nan())
def test_decimals_include_inf():
find_any(decimals(), lambda x: x.is_infinite(), settings(max_examples=10**6))
@given(decimals(allow_nan=False))
def test_decimals_can_disallow_nan(x):
assert not x.is_nan()
@given(decimals(allow_infinity=False))
def test_decimals_can_disallow_inf(x):
assert not x.is_infinite()
@pytest.mark.parametrize("places", range(10))
def test_decimals_have_correct_places(places):
@given(decimals(0, 10, allow_nan=False, places=places))
def inner_tst(n):
assert n.as_tuple().exponent == -places
inner_tst()
@given(decimals(min_value="0.1", max_value="0.2", allow_nan=False, places=1))
def test_works_with_few_values(dec):
assert dec in (decimal.Decimal("0.1"), decimal.Decimal("0.2"))
@given(decimals(places=3, allow_nan=False, allow_infinity=False))
def test_issue_725_regression(x):
pass
@given(decimals(min_value="0.1", max_value="0.3"))
def test_issue_739_regression(x):
pass
def test_consistent_decimal_error():
bad = "invalid argument to Decimal"
with pytest.raises(InvalidArgument) as excinfo:
decimals(bad).example()
with pytest.raises(InvalidArgument) as excinfo2:
with decimal.localcontext(decimal.Context(traps=[])):
decimals(bad).example()
assert str(excinfo.value) == str(excinfo2.value)
@pytest.mark.parametrize(
"s, msg",
[
(
floats(min_value=inf, allow_infinity=False),
"allow_infinity=False excludes min_value=inf",
),
(
floats(min_value=next_down(inf), exclude_min=True, allow_infinity=False),
"exclude_min=True turns min_value=.+? into inf, but allow_infinity=False",
),
(
floats(max_value=-inf, allow_infinity=False),
"allow_infinity=False excludes max_value=-inf",
),
(
floats(max_value=next_up(-inf), exclude_max=True, allow_infinity=False),
"exclude_max=True turns max_value=.+? into -inf, but allow_infinity=False",
),
],
)
def test_floats_message(s, msg):
# https://github.com/HypothesisWorks/hypothesis/issues/3207
with pytest.raises(InvalidArgument, match=msg):
s.validate() | null |
version | """
Module for managing dnsmasq
"""
import logging
import os
import salt.utils.files
import salt.utils.platform
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
def __virtual__():
"""
Only work on POSIX-like systems.
"""
if salt.utils.platform.is_windows():
return (
False,
"dnsmasq execution module cannot be loaded: only works on "
"non-Windows systems.",
)
return True
def METHOD_NAME():
"""
Shows installed version of dnsmasq.
CLI Example:
.. code-block:: bash
salt '*' dnsmasq.version
"""
cmd = "dnsmasq -v"
out = __salt__["cmd.run"](cmd).splitlines()
comps = out[0].split()
return comps[2]
def fullversion():
"""
Shows installed version of dnsmasq and compile options.
CLI Example:
.. code-block:: bash
salt '*' dnsmasq.fullversion
"""
cmd = "dnsmasq -v"
out = __salt__["cmd.run"](cmd).splitlines()
comps = out[0].split()
version_num = comps[2]
comps = out[1].split()
return {"version": version_num, "compile options": comps[3:]}
def set_config(config_file="/etc/dnsmasq.conf", follow=True, **kwargs):
"""
Sets a value or a set of values in the specified file. By default, if
conf-dir is configured in this file, salt will attempt to set the option
in any file inside the conf-dir where it has already been enabled. If it
does not find it inside any files, it will append it to the main config
file. Setting follow to False will turn off this behavior.
If a config option currently appears multiple times (such as dhcp-host,
which is specified at least once per host), the new option will be added
to the end of the main config file (and not to any includes). If you need
an option added to a specific include file, specify it as the config_file.
:param string config_file: config file where settings should be updated / added.
:param bool follow: attempt to set the config option inside any file within
the ``conf-dir`` where it has already been enabled.
:param kwargs: key value pairs that contain the configuration settings that you
want set.
CLI Examples:
.. code-block:: bash
salt '*' dnsmasq.set_config domain=mydomain.com
salt '*' dnsmasq.set_config follow=False domain=mydomain.com
salt '*' dnsmasq.set_config config_file=/etc/dnsmasq.conf domain=mydomain.com
"""
dnsopts = get_config(config_file)
includes = [config_file]
if follow is True and "conf-dir" in dnsopts:
for filename in os.listdir(dnsopts["conf-dir"]):
if filename.startswith("."):
continue
if filename.endswith("~"):
continue
if filename.endswith("bak"):
continue
if filename.endswith("#") and filename.endswith("#"):
continue
includes.append("{}/{}".format(dnsopts["conf-dir"], filename))
ret_kwargs = {}
for key in kwargs:
# Filter out __pub keys as they should not be added to the config file
# See Issue #34263 for more information
if key.startswith("__"):
continue
ret_kwargs[key] = kwargs[key]
if key in dnsopts:
if isinstance(dnsopts[key], str):
for config in includes:
__salt__["file.sed"](
path=config,
before="^{}=.*".format(key),
after="{}={}".format(key, kwargs[key]),
)
else:
__salt__["file.append"](config_file, "{}={}".format(key, kwargs[key]))
else:
__salt__["file.append"](config_file, "{}={}".format(key, kwargs[key]))
return ret_kwargs
def get_config(config_file="/etc/dnsmasq.conf"):
"""
Dumps all options from the config file.
config_file
The location of the config file from which to obtain contents.
Defaults to ``/etc/dnsmasq.conf``.
CLI Examples:
.. code-block:: bash
salt '*' dnsmasq.get_config
salt '*' dnsmasq.get_config config_file=/etc/dnsmasq.conf
"""
dnsopts = _parse_dnamasq(config_file)
if "conf-dir" in dnsopts:
for filename in os.listdir(dnsopts["conf-dir"]):
if filename.startswith("."):
continue
if filename.endswith("~"):
continue
if filename.endswith("#") and filename.endswith("#"):
continue
dnsopts.update(
_parse_dnamasq("{}/{}".format(dnsopts["conf-dir"], filename))
)
return dnsopts
def _parse_dnamasq(filename):
"""
Generic function for parsing dnsmasq files including includes.
"""
fileopts = {}
if not os.path.isfile(filename):
raise CommandExecutionError("Error: No such file '{}'".format(filename))
with salt.utils.files.fopen(filename, "r") as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if not line.strip():
continue
if line.startswith("#"):
continue
if "=" in line:
comps = line.split("=")
if comps[0] in fileopts:
if isinstance(fileopts[comps[0]], str):
temp = fileopts[comps[0]]
fileopts[comps[0]] = [temp]
fileopts[comps[0]].append(comps[1].strip())
else:
fileopts[comps[0]] = comps[1].strip()
else:
if "unparsed" not in fileopts:
fileopts["unparsed"] = []
fileopts["unparsed"].append(line)
return fileopts | null |
resolve possibly secret value | import re
import sentry_sdk
from flask import current_app
from spiffworkflow_backend.exceptions.api_error import ApiError
from spiffworkflow_backend.models.db import db
from spiffworkflow_backend.models.secret_model import SecretModel
class SecretService:
CIPHER_ENCODING = "ascii"
@classmethod
def _encrypt(cls, value: str) -> str:
encrypted_bytes: bytes = b""
if current_app.config.get("SPIFFWORKFLOW_BACKEND_ENCRYPTION_LIB") == "cryptography":
# cryptography needs a bytes object
value_as_bytes = str.encode(value)
encrypted_bytes = current_app.config["CIPHER"].encrypt(value_as_bytes)
else:
encrypted_bytes = current_app.config["CIPHER"].encrypt(value)
return encrypted_bytes.decode(cls.CIPHER_ENCODING)
@classmethod
def _decrypt(cls, value: str) -> str:
bytes_to_decrypt = bytes(value, cls.CIPHER_ENCODING)
decrypted_bytes: bytes = current_app.config["CIPHER"].decrypt(bytes_to_decrypt)
return decrypted_bytes.decode(cls.CIPHER_ENCODING)
@classmethod
def add_secret(
cls,
key: str,
value: str,
user_id: int,
) -> SecretModel:
value = cls._encrypt(value)
secret_model = SecretModel(key=key, value=value, user_id=user_id)
db.session.add(secret_model)
try:
db.session.commit()
except Exception as e:
raise ApiError(
error_code="create_secret_error",
message=(
f"There was an error creating a secret with key: {key} and value"
f" ending with: {value[:-4]}. Original error is {e}"
),
) from e
return secret_model
@staticmethod
def get_secret(key: str) -> SecretModel:
secret = db.session.query(SecretModel).filter(SecretModel.key == key).first()
if isinstance(secret, SecretModel):
return secret
else:
raise ApiError(
error_code="missing_secret_error",
message=f"Unable to locate a secret with the name: {key}. ",
)
@classmethod
def update_secret(
cls,
key: str,
value: str,
user_id: int | None = None,
create_if_not_exists: bool | None = False,
) -> None:
"""Does this pass pre commit?"""
secret_model = SecretModel.query.filter(SecretModel.key == key).first()
if secret_model:
value = cls._encrypt(value)
secret_model.value = value
db.session.add(secret_model)
try:
db.session.commit()
except Exception as e:
db.session.rollback()
raise e
elif create_if_not_exists:
if user_id is None:
raise ApiError(
error_code="update_secret_error_no_user_id",
message=f"Cannot update secret with key: {key}. Missing user id.",
status_code=404,
)
SecretService.add_secret(key=key, value=value, user_id=user_id)
else:
raise ApiError(
error_code="update_secret_error",
message=f"Cannot update secret with key: {key}. Resource does not exist.",
status_code=404,
)
@staticmethod
def delete_secret(key: str, user_id: int) -> None:
"""Delete secret."""
secret_model = SecretModel.query.filter(SecretModel.key == key).first()
if secret_model:
db.session.delete(secret_model)
try:
db.session.commit()
except Exception as e:
raise ApiError(
error_code="delete_secret_error",
message=f"Could not delete secret with key: {key}. Original error is: {e}",
) from e
else:
raise ApiError(
error_code="delete_secret_error",
message=f"Cannot delete secret with key: {key}. Resource does not exist.",
status_code=404,
)
@classmethod
def METHOD_NAME(cls, value: str) -> str:
if "SPIFF_SECRET:" in value:
spiff_secret_match = re.match(r".*SPIFF_SECRET:(?P<variable_name>\w+).*", value)
if spiff_secret_match is not None:
spiff_variable_name = spiff_secret_match.group("variable_name")
secret = cls.get_secret(spiff_variable_name)
with sentry_sdk.start_span(op="task", description="decrypt_secret"):
decrypted_value = cls._decrypt(secret.value)
return re.sub(r"\bSPIFF_SECRET:\w+", decrypted_value, value)
return value | null |
has access | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""access.py contains static methods around access permissions."""
from clusterfuzz._internal.base import errors
from clusterfuzz._internal.base import external_users
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import db_config
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.datastore import data_handler
from libs import auth
from libs import helpers
from libs.issue_management import issue_tracker_utils
def _is_privileged_user(email):
"""Check if an email is in the privileged users list."""
if local_config.AuthConfig().get('all_users_privileged'):
return True
privileged_user_emails = (db_config.get_value('privileged_users') or
'').splitlines()
return any(
utils.emails_equal(email, privileged_user_email)
for privileged_user_email in privileged_user_emails)
def _is_blacklisted_user(email):
"""Check if an email is in the privileged users list."""
blacklisted_user_emails = (db_config.get_value('blacklisted_users') or
'').splitlines()
return any(
utils.emails_equal(email, blacklisted_user_email)
for blacklisted_user_email in blacklisted_user_emails)
def get_user_job_type():
"""Return the job_type that is assigned to the current user. None means one
can access any job type. You might want to invoke get_access(..) with
the job type afterward."""
email = helpers.get_user_email()
privileged_user_emails = (db_config.get_value('privileged_users') or
'').splitlines()
for privileged_user_email in privileged_user_emails:
if ';' in privileged_user_email:
tokens = privileged_user_email.split(';')
privileged_user_real_email = tokens[0]
privileged_user_job_type = tokens[1]
if utils.emails_equal(email, privileged_user_real_email):
return privileged_user_job_type
return None
def _is_domain_allowed(email):
"""Check if the email's domain is allowed."""
domains = local_config.AuthConfig().get('whitelisted_domains', default=[])
for domain in domains:
if utils.normalize_email(email).endswith('@%s' % domain.lower()):
return True
return False
class UserAccess:
Allowed, Denied, Redirected = list(range(3)) # pylint: disable=invalid-name
def METHOD_NAME(need_privileged_access=False, job_type=None, fuzzer_name=None):
"""Check if the user has access."""
result = get_access(
need_privileged_access=need_privileged_access,
job_type=job_type,
fuzzer_name=fuzzer_name)
return result == UserAccess.Allowed
def get_access(need_privileged_access=False, job_type=None, fuzzer_name=None):
"""Return 'allowed', 'redirected', or 'failed'."""
if auth.is_current_user_admin():
return UserAccess.Allowed
user = auth.get_current_user()
if not user:
return UserAccess.Redirected
email = user.email
if _is_blacklisted_user(email):
return UserAccess.Denied
if _is_privileged_user(email):
return UserAccess.Allowed
if job_type and external_users.is_job_allowed_for_user(email, job_type):
return UserAccess.Allowed
if (fuzzer_name and
external_users.is_fuzzer_allowed_for_user(email, fuzzer_name)):
return UserAccess.Allowed
if not need_privileged_access and _is_domain_allowed(email):
return UserAccess.Allowed
return UserAccess.Denied
def can_user_access_testcase(testcase):
"""Checks if the current user can access the testcase."""
config = db_config.get()
need_privileged_access = (
testcase.security_flag and not config.relax_security_bug_restrictions)
if METHOD_NAME(
fuzzer_name=testcase.actual_fuzzer_name(),
job_type=testcase.job_type,
need_privileged_access=need_privileged_access):
return True
user_email = helpers.get_user_email()
if testcase.uploader_email and testcase.uploader_email == user_email:
return True
# Allow owners of bugs to see associated test cases and test case groups.
issue_id = testcase.bug_information or testcase.group_bug_information
if not issue_id:
return False
issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(testcase)
associated_issue = issue_tracker.get_issue(issue_id)
if not associated_issue:
return False
# Look at both associated issue and original issue (if the associated one
# is a duplicate of the original issue).
issues_to_check = [associated_issue]
if associated_issue.merged_into:
original_issue = issue_tracker.get_original_issue(issue_id)
if original_issue:
issues_to_check.append(original_issue)
relaxed_restrictions = (
config.relax_testcase_restrictions or _is_domain_allowed(user_email))
for issue in issues_to_check:
if relaxed_restrictions:
if (any(utils.emails_equal(user_email, cc) for cc in issue.ccs) or
utils.emails_equal(user_email, issue.assignee) or
utils.emails_equal(user_email, issue.reporter)):
return True
elif utils.emails_equal(user_email, issue.assignee):
return True
return False
def check_access_and_get_testcase(testcase_id):
"""Check the failed attempt count and get the testcase."""
if not helpers.get_user_email():
raise helpers.UnauthorizedError()
if not testcase_id:
raise helpers.EarlyExitError('No test case specified!', 404)
try:
testcase = data_handler.get_testcase_by_id(testcase_id)
except errors.InvalidTestcaseError:
raise helpers.EarlyExitError('Invalid test case!', 404)
if not can_user_access_testcase(testcase):
raise helpers.AccessDeniedError()
return testcase | null |
list modes | from boxbranding import getBoxType, getMachineName, getHaveRCA, getHaveDVI, getHaveSCART, getHaveAVJACK
from Screens.Wizard import WizardSummary
from Screens.WizardLanguage import WizardLanguage
from Screens.Rc import Rc
from Components.AVSwitch import iAVSwitch as iAV
from Components.Pixmap import Pixmap
from Components.config import config, ConfigBoolean, configfile
from Components.SystemInfo import SystemInfo
from Tools.Directories import resolveFilename, SCOPE_SKIN, SCOPE_CURRENT_SKIN
from Tools.HardwareInfo import HardwareInfo
config.misc.showtestcard = ConfigBoolean(default=False)
class VideoWizardSummary(WizardSummary):
def __init__(self, session, parent):
WizardSummary.__init__(self, session, parent)
def setLCDPicCallback(self):
self.parent.setLCDTextCallback(self.setText)
def setLCDPic(self, file):
self["pic"].instance.setPixmapFromFile(file)
class VideoWizard(WizardLanguage, Rc):
skin = """
<screen position="fill" title="Welcome..." flags="wfNoBorder" >
<panel name="WizardMarginsTemplate"/>
<panel name="WizardPictureLangTemplate"/>
<panel name="RemoteControlTemplate"/>
<panel position="left" size="10,*" />
<panel position="right" size="10,*" />
<panel position="fill">
<widget name="text" position="top" size="*,270" font="Regular;23" valign="center" />
<panel position="fill">
<panel position="left" size="150,*">
<widget name="portpic" position="top" zPosition="10" size="150,150" transparent="1" alphatest="on"/>
</panel>
<panel position="fill" layout="stack">
<widget source="list" render="Listbox" position="fill" scrollbarMode="showOnDemand" >
<convert type="StringList" />
</widget>
<!--<widget name="config" position="fill" zPosition="1" scrollbarMode="showOnDemand" />-->
</panel>
</panel>
</panel>
</screen>"""
def __init__(self, session):
# FIXME anyone knows how to use relative paths from the plugin's directory?
self.xmlfile = resolveFilename(SCOPE_SKIN, "videowizard.xml")
self.hw = iAV # needed by VideoWizard.xml do not change
WizardLanguage.__init__(self, session, showSteps=False, showStepSlider=False)
Rc.__init__(self)
self["wizard"] = Pixmap()
self["portpic"] = Pixmap()
self.port = None
self.mode = None
self.rate = None
def createSummary(self):
print("[VideoWizard] createSummary")
return VideoWizardSummary
def markDone(self):
iAV.saveMode(self.port, self.mode, self.rate)
config.misc.videowizardenabled.value = 0
config.misc.videowizardenabled.save()
configfile.save()
def listInputChannels(self):
# hw_type = HardwareInfo().get_device_name()
# has_hdmi = HardwareInfo().has_hdmi()
list = []
for port in iAV.getPortList():
if iAV.isPortUsed(port):
descr = port
if descr == "Scart" and not SystemInfo["hasScart"]:
continue
if port != "DVI-PC":
list.append((descr, port))
list.sort(key=lambda x: x[0])
print("[VideoWizard] listInputChannels:", list)
return list
def inputSelectionMade(self, index):
print("[VideoWizard] inputSelectionMade:", index)
self.port = index
self.inputSelect(index)
def inputSelectionMoved(self):
# hw_type = HardwareInfo().get_device_name()
# has_hdmi = HardwareInfo().has_hdmi()
print("[VideoWizard] input selection moved:", self.selection)
self.inputSelect(self.selection)
if self["portpic"].instance is not None:
picname = self.selection
if picname == "Jack":
picname = "JACK"
if picname == "Scart-YPbPr":
picname = "Scart"
self["portpic"].instance.setPixmapFromFile(resolveFilename(SCOPE_CURRENT_SKIN, "icons/%s.png" % picname))
def inputSelect(self, port):
print("[VideoWizard] inputSelect:", port)
modeList = iAV.getModeList(self.selection)
print("[VideoWizard] modeList:", modeList)
self.port = port
if len(modeList) > 0:
ratesList = self.listRates(modeList[0][0])
iAV.setMode(port=port, mode=modeList[0][0], rate=ratesList[0][0])
def METHOD_NAME(self):
list = []
print("[VideoWizard] modes for port", self.port)
for mode in iAV.getModeList(self.port):
# if mode[0] != "PC":
list.append((mode[0], mode[0]))
print("[VideoWizard] modeslist:", list)
return list
def modeSelectionMade(self, index):
print("[VideoWizard] modeSelectionMade:", index)
self.mode = index
self.modeSelect(index)
def modeSelectionMoved(self):
print("[VideoWizard] mode selection moved:", self.selection)
self.modeSelect(self.selection)
def modeSelect(self, mode):
ratesList = self.listRates(mode)
print("[VideoWizard] ratesList:", ratesList)
if self.port == "HDMI" and mode in ("720p", "1080i", "1080p", "2160p"):
self.rate = "multi"
iAV.setMode(port=self.port, mode=mode, rate="multi")
else:
iAV.setMode(port=self.port, mode=mode, rate=ratesList[0][0])
def listRates(self, querymode=None):
if querymode is None:
querymode = self.mode
list = []
print("[VideoWizard] modes for port", self.port, "and mode", querymode)
for mode in iAV.getModeList(self.port):
print("[VideoWizard] mode:", mode)
if mode[0] == querymode:
for rate in mode[1]:
if self.port == "DVI-PC":
print("[VideoWizard] rate:", rate)
if rate == "640x480":
list.insert(0, (rate, rate))
continue
list.append((rate, rate))
return list
def rateSelectionMade(self, index):
print("[VideoWizard] rateSelectionMade:", index)
self.rate = index
self.rateSelect(index)
def rateSelectionMoved(self):
print("[VideoWizard] rate selection moved:", self.selection)
self.rateSelect(self.selection)
def rateSelect(self, rate):
iAV.setMode(port=self.port, mode=self.mode, rate=rate)
def showTestCard(self, selection=None):
if selection is None:
selection = self.selection
print("[VideoWizard] set config.misc.showtestcard to", {"yes": True, "no": False}[selection])
if selection == "yes":
config.misc.showtestcard.value = True
else:
config.misc.showtestcard.value = False
def keyNumberGlobal(self, number):
if number in (1, 2, 3):
if number == 1:
iAV.saveMode("HDMI", "720p", "multi")
elif number == 2:
iAV.saveMode("HDMI", "1080i", "multi")
elif number == 3:
iAV.saveMode("Scart", "Multi", "multi")
iAV.setConfiguredMode()
self.close()
WizardLanguage.keyNumberGlobal(self, number) | null |
read | """Simple class to read IFF chunks.
An IFF chunk (used in formats such as AIFF, TIFF, RMFF (RealMedia File
Format)) has the following structure:
+----------------+
| ID (4 bytes) |
+----------------+
| size (4 bytes) |
+----------------+
| data |
| ... |
+----------------+
The ID is a 4-byte string which identifies the type of chunk.
The size field (a 32-bit value, encoded using big-endian byte order)
gives the size of the whole chunk, including the 8-byte header.
Usually an IFF-type file consists of one or more chunks. The proposed
usage of the Chunk class defined here is to instantiate an instance at
the start of each chunk and read from the instance until it reaches
the end, after which a new instance can be instantiated. At the end
of the file, creating a new instance will fail with a EOFError
exception.
Usage:
while True:
try:
chunk = Chunk(file)
except EOFError:
break
chunktype = chunk.getname()
while True:
data = chunk.read(nbytes)
if not data:
pass
# do something with data
The interface is file-like. The implemented methods are:
read, close, seek, tell, isatty.
Extra methods are: skip() (called by close, skips to the end of the chunk),
getname() (returns the name (ID) of the chunk)
The __init__ method has one required argument, a file-like object
(including a chunk instance), and one optional argument, a flag which
specifies whether or not chunks are aligned on 2-byte boundaries. The
default is 1, i.e. aligned.
"""
class Chunk:
def __init__(self, file, align=True, bigendian=True, inclheader=False):
import struct
self.closed = False
self.align = align # whether to align to word (2-byte) boundaries
if bigendian:
strflag = '>'
else:
strflag = '<'
self.file = file
self.chunkname = file.METHOD_NAME(4)
if len(self.chunkname) < 4:
raise EOFError
try:
self.chunksize = struct.unpack(strflag+'L', file.METHOD_NAME(4))[0]
except struct.error:
raise EOFError
if inclheader:
self.chunksize = self.chunksize - 8 # subtract header
self.size_read = 0
try:
self.offset = self.file.tell()
except (AttributeError, IOError):
self.seekable = False
else:
self.seekable = True
def getname(self):
"""Return the name (ID) of the current chunk."""
return self.chunkname
def getsize(self):
"""Return the size of the current chunk."""
return self.chunksize
def close(self):
if not self.closed:
self.skip()
self.closed = True
def isatty(self):
if self.closed:
raise ValueError, "I/O operation on closed file"
return False
def seek(self, pos, whence=0):
"""Seek to specified position into the chunk.
Default position is 0 (start of chunk).
If the file is not seekable, this will result in an error.
"""
if self.closed:
raise ValueError, "I/O operation on closed file"
if not self.seekable:
raise IOError, "cannot seek"
if whence == 1:
pos = pos + self.size_read
elif whence == 2:
pos = pos + self.chunksize
if pos < 0 or pos > self.chunksize:
raise RuntimeError
self.file.seek(self.offset + pos, 0)
self.size_read = pos
def tell(self):
if self.closed:
raise ValueError, "I/O operation on closed file"
return self.size_read
def METHOD_NAME(self, size=-1):
"""Read at most size bytes from the chunk.
If size is omitted or negative, read until the end
of the chunk.
"""
if self.closed:
raise ValueError, "I/O operation on closed file"
if self.size_read >= self.chunksize:
return ''
if size < 0:
size = self.chunksize - self.size_read
if size > self.chunksize - self.size_read:
size = self.chunksize - self.size_read
data = self.file.METHOD_NAME(size)
self.size_read = self.size_read + len(data)
if self.size_read == self.chunksize and \
self.align and \
(self.chunksize & 1):
dummy = self.file.METHOD_NAME(1)
self.size_read = self.size_read + len(dummy)
return data
def skip(self):
"""Skip the rest of the chunk.
If you are not interested in the contents of the chunk,
this method should be called so that the file points to
the start of the next chunk.
"""
if self.closed:
raise ValueError, "I/O operation on closed file"
if self.seekable:
try:
n = self.chunksize - self.size_read
# maybe fix alignment
if self.align and (self.chunksize & 1):
n = n + 1
self.file.seek(n, 1)
self.size_read = self.size_read + n
return
except IOError:
pass
while self.size_read < self.chunksize:
n = min(8192, self.chunksize - self.size_read)
dummy = self.METHOD_NAME(n)
if not dummy:
raise EOFError | null |
component metadata | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor-like objects that are composed from tf.Tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.util import nest
@six.add_metaclass(abc.ABCMeta)
class CompositeTensor(object):
"""Abstract base class for Tensor-like objects that are composed from Tensors.
Each `CompositeTensor` can be decomposed into a structured collection of
component `tf.Tensor`s, and reconstructed from those components.
The `tensorflow.python.util.nest` module has support for treating composite
tensors as structure, which makes it easy to flatten and reconstruct
composite tensors (or larger structures that contain composite tensors).
E.g.:
```python
ct = ... # Create a composite tensor.
flat_list_of_tensors = nest.flatten(ct, expand_composites=True)
transformed_list_of_tensors = ... # do something with the flat tensors.
result = nest.pack_sequence_as(ct, transformed_list_of_tensors,
expand_composites=True)
```
"""
@abc.abstractproperty
def _type_spec(self):
"""A `TypeSpec` describing the type of this value."""
raise NotImplementedError("%s._type_spec()" % type(self).__name__)
# Deprecated -- use self._type_spec._to_components(self) instead.
# TODO(b/133606651) Remove all callers and then delete this method.
def _to_components(self):
"""Decomposes this composite tensor into its component tensors.
Returns:
A nested structure of `tf.Tensor`s and `CompositeTensor`s that can be
used to reconstruct this composite tensor (along with metadata returned
by `_component_metadata`).
"""
return self._type_spec._to_components(self) # pylint: disable=protected-access
# Deprecated -- use self._type_spec instead.
# TODO(b/133606651) Remove all callers and then delete this method.
def METHOD_NAME(self):
"""Returns any non-tensor metadata needed to reconstruct a composite tensor.
Returns:
A nested structure of metadata that can be used to reconstruct this
composite tensor (along with the tensors returned by `_to_components`).
"""
return self._type_spec
# Deprecated -- use metadata._from_components(components) instead.
# TODO(b/133606651) Remove all callers and then delete this method.
@staticmethod
def _from_components(components, metadata):
"""Creates a composite tensor of type `cls` from components.
Args:
components: A nested structure whose values are `tf.Tensor`s or
`tf.CompositeTensor`s (as returned by `_to_components`).
metadata: A nested structure containing any additional metadata needed to
reconstruct the composite tensor (as returned by `_composite_metadata`).
Returns:
A `CompositeTensor` of type `cls`.
"""
return metadata._from_components(components) # pylint: disable=protected-access
def _shape_invariant_to_type_spec(self, shape):
"""Returns a TypeSpec given a shape invariant (used by `tf.while_loop`).
Args:
shape: A `tf.TensorShape` object. The shape invariant for this
`CompositeTensor`, or `None` if a default shape invariant should be
used (based on the value of this `CompositeTensor`).
Returns:
A nested structure whose values are `tf.TensorShape` objects, specifying
the shape invariants for the tensors that comprise this `CompositeTensor`.
"""
# New TypeSpec subclasses generally do not need to implement this --
# this method is used for backwards compatibility. Users of tf.while_loop
# can specify a type by passing in TypeSpec instead.
raise NotImplementedError("%s._shape_invariant_to_type_spec"
% type(self).__name__)
# TODO(b/133606651) Remove this property, since it's not clear what it should
# return if a CompositeTensor has a mix of graph and non-graph components.
# Update users to perform an appropraite check themselves.
@property
def _is_graph_tensor(self):
"""Returns True if this tensor's components belong to a TF graph."""
components = self._type_spec._to_components(self) # pylint: disable=protected-access
tensors = nest.flatten(components, expand_composites=True)
return any(hasattr(t, "graph") for t in tensors)
def _consumers(self):
"""Returns a list of `Operation`s that consume this `CompositeTensor`.
Returns:
A list of `Operation`s.
Raises:
RuntimeError: If this method is called while executing eagerly.
"""
consumers = nest.flatten([
component.consumers()
for component in self._to_components()
if getattr(component, "graph", None) is not None
])
return list(set(consumers))
pywrap_tensorflow.RegisterType("CompositeTensor", CompositeTensor)
def replace_composites_with_components(structure):
"""Recursively replaces CompositeTensors with their components.
Args:
structure: A `nest`-compatible structure, possibly containing composite
tensors.
Returns:
A copy of `structure`, where each composite tensor has been replaced by
its components. The result will contain no composite tensors.
Note that `nest.flatten(replace_composites_with_components(structure))`
returns the same value as `nest.flatten(structure)`.
"""
if isinstance(structure, CompositeTensor):
return replace_composites_with_components(structure._to_components()) # pylint: disable=protected-access
elif not nest.is_sequence(structure):
return structure
else:
return nest.map_structure(replace_composites_with_components, structure,
expand_composites=False)
# @TODO(edloper): Can we replace convert_to_tensor_or_xyz with just
# convert_to_tensor_or_composite? Alternatively, should composite tensors
# register a dispatch override for tf.convert_to_tensor? | null |
add doc meta data to answer | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2021 deepset GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Sequence, Dict, Tuple
import numpy as np
from scipy.special import expit
from abc import abstractmethod
from copy import deepcopy
from functools import wraps
from time import perf_counter
from pipelines.schema import Document, Answer, Span
from pipelines.nodes.base import BaseComponent
class BaseReader(BaseComponent):
return_no_answers: bool
outgoing_edges = 1
query_count = 0
query_time = 0
@abstractmethod
def predict(self, query: str, documents: List[Document], top_k: Optional[int] = None):
pass
@abstractmethod
def predict_batch(self, query_doc_list: List[dict], top_k: Optional[int] = None, batch_size: Optional[int] = None):
pass
@staticmethod
def _calc_no_answer(
no_ans_gaps: Sequence[float], best_score_answer: float, use_confidence_scores: bool = True
) -> Tuple[Answer, float]:
# "no answer" scores and positive answers scores are difficult to compare, because
# + a positive answer score is related to one specific document
# - a "no answer" score is related to all input documents
# Thus we compute the "no answer" score relative to the best possible answer and adjust it by
# the most significant difference between scores.
# Most significant difference: a model switching from predicting an answer to "no answer" (or vice versa).
# No_ans_gap is a list of this most significant difference per document
no_ans_gap_array = np.array(no_ans_gaps)
max_no_ans_gap = np.max(no_ans_gap_array)
# all passages "no answer" as top score
if np.sum(no_ans_gap_array < 0) == len(no_ans_gap_array):
no_ans_score = (
best_score_answer - max_no_ans_gap
) # max_no_ans_gap is negative, so it increases best pos score
else: # case: at least one passage predicts an answer (positive no_ans_gap)
no_ans_score = best_score_answer - max_no_ans_gap
no_ans_prediction = Answer(
answer="",
type="extractive",
score=float(expit(np.asarray(no_ans_score) / 8))
if use_confidence_scores
else no_ans_score, # just a pseudo prob for now or old score,
context=None,
offsets_in_context=[Span(start=0, end=0)],
offsets_in_document=[Span(start=0, end=0)],
document_id=None,
meta=None,
)
return no_ans_prediction, max_no_ans_gap
@staticmethod
def METHOD_NAME(documents: List[Document], answer):
# Add corresponding document_name and more meta data, if the answer contains the document_id
if answer.meta is None:
answer.meta = {}
# get meta from doc
meta_from_doc = {}
for doc in documents:
if doc.id == answer.document_id:
meta_from_doc = deepcopy(doc.meta)
break
# append to "own" meta
answer.meta.update(meta_from_doc)
return answer
def run(
self, query: str, documents: List[Document], top_k: Optional[int] = None, add_isolated_node_eval: bool = False
): # type: ignore
self.query_count += 1
if documents:
predict = self.timing(self.predict, "query_time")
results = predict(query=query, documents=documents, top_k=top_k)
else:
results = {"answers": []}
# Add corresponding document_name and more meta data, if an answer contains the document_id
results["answers"] = [
BaseReader.METHOD_NAME(documents=documents, answer=answer) for answer in results["answers"]
]
return results, "output_1"
def run_batch(self, query_doc_list: List[Dict], top_k: Optional[int] = None):
"""A unoptimized implementation of running Reader queries in batch"""
self.query_count += len(query_doc_list)
results = []
if query_doc_list:
for qd in query_doc_list:
q = qd["queries"]
docs = qd["docs"]
predict = self.timing(self.predict, "query_time")
result = predict(query=q, documents=docs, top_k=top_k)
results.append(result)
else:
results = [{"answers": [], "query": ""}]
return {"results": results}, "output_1"
def timing(self, fn, attr_name):
"""Wrapper method used to time functions."""
@wraps(fn)
def wrapper(*args, **kwargs):
if attr_name not in self.__dict__:
self.__dict__[attr_name] = 0
tic = perf_counter()
ret = fn(*args, **kwargs)
toc = perf_counter()
self.__dict__[attr_name] += toc - tic
return ret
return wrapper
def print_time(self):
print("Reader (Speed)")
print("---------------")
if not self.query_count:
print("No querying performed via Retriever.run()")
else:
print(f"Queries Performed: {self.query_count}")
print(f"Query time: {self.query_time}s")
print(f"{self.query_time / self.query_count} seconds per query") | null |
set instance | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
from fate_arch.common import Party
class Role:
def __init__(self, parties) -> None:
self._parties = parties
self._size = len(self._parties)
def __getitem__(self, key):
return self._parties[key]
class _PartiesMeta(type):
@property
def Guest(cls) -> Role:
return cls._get_instance()._guest
@property
def Host(cls) -> Role:
return cls._get_instance()._host
@property
def Arbiter(cls) -> Role:
return cls._get_instance()._arbiter
class PartiesInfo(metaclass=_PartiesMeta):
_instance = None
@classmethod
def METHOD_NAME(cls, inst):
cls._instance = inst
@classmethod
def _get_instance(cls) -> "PartiesInfo":
if cls._instance is None:
raise RuntimeError(f"parties not initialized")
return cls._instance
@classmethod
def get_parties(cls, parties) -> typing.List[Party]:
if isinstance(parties, Party):
return [parties]
elif isinstance(parties, Role):
return parties[:]
elif isinstance(parties, list):
plain_parties = []
for p in parties:
plain_parties.extend(cls.get_parties(p))
if len(set(plain_parties)) != len(plain_parties):
raise ValueError(f"duplicated parties exsits: {plain_parties}")
return plain_parties
raise ValueError(f"unsupported type: {type(parties)}")
@staticmethod
def from_conf(conf: typing.MutableMapping[str, dict]):
try:
local = Party(
role=conf["local"]["role"], party_id=conf["local"]["party_id"]
)
role_to_parties = {}
for role, party_id_list in conf.get("role", {}).items():
role_to_parties[role] = [
Party(role=role, party_id=party_id) for party_id in party_id_list
]
except Exception as e:
raise RuntimeError(
"conf parse error, a correct configuration could be:\n"
"{\n"
" 'local': {'role': 'guest', 'party_id': 10000},\n"
" 'role': {'guest': [10000], 'host': [9999, 9998]}, 'arbiter': [9997]}\n"
"}"
) from e
return PartiesInfo(local, role_to_parties)
def __init__(
self,
local: Party,
role_to_parties: typing.MutableMapping[str, typing.List[Party]],
):
self._local = local
self._role_to_parties = role_to_parties
self._guest = Role(role_to_parties.get("guest", []))
self._host = Role(role_to_parties.get("host", []))
self._arbiter = Role(role_to_parties.get("arbiter", []))
self.METHOD_NAME(self)
@property
def local_party(self) -> Party:
return self._local
@property
def all_parties(self):
return [
party for parties in self._role_to_parties.values() for party in parties
]
@property
def role_set(self):
return set(self._role_to_parties)
def roles_to_parties(self, roles: typing.Iterable, strict=True) -> list:
parties = []
for role in roles:
if role not in self._role_to_parties:
if strict:
raise RuntimeError(
f"try to get role {role} "
f"which is not configured in `role` in runtime conf({self._role_to_parties})"
)
else:
continue
parties.extend(self._role_to_parties[role])
return parties
def role_to_party(self, role, idx) -> Party:
return self._role_to_parties[role][idx]
__all__ = ["PartiesInfo", "Role"] | null |
merge | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
from dataclasses import dataclass
from typing import Any, Iterable, Mapping
from pants.util.frozendict import FrozenDict
from pants.util.strutil import strip_prefix
@dataclass(frozen=True)
class EmbedConfig:
patterns: FrozenDict[str, tuple[str, ...]]
files: FrozenDict[str, str]
def __init__(self, patterns: Mapping[str, Iterable[str]], files: Mapping[str, str]) -> None:
"""Configuration passed to the Go compiler to configure file embedding.
The compiler relies entirely on the caller to map embed patterns to actual filesystem
paths. All embed patterns
contained in the package must be mapped. Consult
`FirstPartyPkgAnalysis.embed_patterns` for the embed patterns obtained from analysis.
:param patterns: Maps each pattern provided via a //go:embed directive to a list of file
paths relative to the package directory for files to embed for that pattern. When the
embedded variable is an `embed.FS`, those relative file paths define the virtual
directory hierarchy exposed by the embed.FS filesystem abstraction. The relative file
paths are resolved to actual filesystem paths for their content by consulting the
`files` dictionary.
:param files: Maps each virtual, relative file path used as a value in the `patterns`
dictionary to the actual filesystem path with that file's content.
"""
object.__setattr__(self, "patterns", FrozenDict({k: tuple(v) for k, v in patterns.items()}))
object.__setattr__(self, "files", FrozenDict(files))
@classmethod
def from_json_dict(
cls, d: dict[str, Any], prefix_to_strip: str | None = None
) -> EmbedConfig | None:
patterns = d.get("Patterns", {})
files = d.get("Files", {})
if prefix_to_strip:
files = {key: strip_prefix(value, prefix_to_strip) for key, value in files.items()}
result = cls(
patterns=FrozenDict({key: tuple(value) for key, value in patterns.items()}),
files=FrozenDict(files),
)
return result if result else None
def to_embedcfg(self) -> bytes:
data = {
"Patterns": dict(self.patterns),
"Files": dict(self.files),
}
return json.dumps(data).encode("utf-8")
def __bool__(self) -> bool:
return bool(self.patterns) or bool(self.files)
def METHOD_NAME(self, other: EmbedConfig) -> EmbedConfig:
"""Merge two EmbedConfig's into one.
Overlapping keys must have the same values.
"""
overlapping_patterns_keys = set(self.patterns.keys()) & set(other.patterns.keys())
for key in overlapping_patterns_keys:
if self.patterns[key] != other.patterns[key]:
raise AssertionError(
"Unable to merge conflicting golang file embed configurations. This should not have occurred. "
"Please open an issue at https://github.com/pantsbuild/pants/issues/new/choose "
"with the following information: "
f"Patterns Key: {key}; Left: {self.patterns[key]}; Right: {other.patterns[key]} "
)
overlapping_files_keys = set(self.files.keys()) & set(other.files.keys())
for key in overlapping_files_keys:
if self.files[key] != other.files[key]:
raise AssertionError(
"Unable to merge conflicting golang file embed configurations. This should not have occurred. "
"Please open an issue at https://github.com/pantsbuild/pants/issues/new/choose "
"with the following information: "
f"Files Key: {key}; Left: {self.patterns[key]}; Right: {other.patterns[key]} "
)
return EmbedConfig(
patterns={**self.patterns, **other.patterns},
files={**self.files, **other.files},
) | null |
resolve target str | from abc import ABC, abstractmethod
from numba.core.registry import DelayedRegistry, CPUDispatcher
from numba.core.decorators import jit
from numba.core.errors import InternalTargetMismatchError, NumbaValueError
from threading import local as tls
_active_context = tls()
_active_context_default = 'cpu'
class _TargetRegistry(DelayedRegistry):
def __getitem__(self, item):
try:
return super().__getitem__(item)
except KeyError:
msg = "No target is registered against '{}', known targets:\n{}"
known = '\n'.join([f"{k: <{10}} -> {v}"
for k, v in target_registry.items()])
raise NumbaValueError(msg.format(item, known)) from None
# Registry mapping target name strings to Target classes
target_registry = _TargetRegistry()
# Registry mapping Target classes the @jit decorator for that target
jit_registry = DelayedRegistry()
class target_override(object):
"""Context manager to temporarily override the current target with that
prescribed."""
def __init__(self, name):
self._orig_target = getattr(_active_context, 'target',
_active_context_default)
self.target = name
def __enter__(self):
_active_context.target = self.target
def __exit__(self, ty, val, tb):
_active_context.target = self._orig_target
def current_target():
"""Returns the current target
"""
return getattr(_active_context, 'target', _active_context_default)
def get_local_target(context):
"""
Gets the local target from the call stack if available and the TLS
override if not.
"""
# TODO: Should this logic be reversed to prefer TLS override?
if len(context.callstack._stack) > 0:
target = context.callstack[0].target
else:
target = target_registry.get(current_target(), None)
if target is None:
msg = ("The target found is not registered."
"Given target was {}.")
raise ValueError(msg.format(target))
else:
return target
def METHOD_NAME(target_str):
"""Resolves a target specified as a string to its Target class."""
return target_registry[target_str]
def resolve_dispatcher_from_str(target_str):
"""Returns the dispatcher associated with a target string"""
target_hw = METHOD_NAME(target_str)
return dispatcher_registry[target_hw]
def _get_local_target_checked(tyctx, hwstr, reason):
"""Returns the local target if it is compatible with the given target
name during a type resolution; otherwise, raises an exception.
Parameters
----------
tyctx: typing context
hwstr: str
target name to check against
reason: str
Reason for the resolution. Expects a noun.
Returns
-------
target_hw : Target
Raises
------
InternalTargetMismatchError
"""
# Get the class for the target declared by the function
hw_clazz = METHOD_NAME(hwstr)
# get the local target
target_hw = get_local_target(tyctx)
# make sure the target_hw is in the MRO for hw_clazz else bail
if not target_hw.inherits_from(hw_clazz):
raise InternalTargetMismatchError(reason, target_hw, hw_clazz)
return target_hw
class JitDecorator(ABC):
@abstractmethod
def __call__(self):
return NotImplemented
class Target(ABC):
""" Implements a target """
@classmethod
def inherits_from(cls, other):
"""Returns True if this target inherits from 'other' False otherwise"""
return issubclass(cls, other)
class Generic(Target):
"""Mark the target as generic, i.e. suitable for compilation on
any target. All must inherit from this.
"""
class CPU(Generic):
"""Mark the target as CPU.
"""
class GPU(Generic):
"""Mark the target as GPU, i.e. suitable for compilation on a GPU
target.
"""
class CUDA(GPU):
"""Mark the target as CUDA.
"""
class NPyUfunc(Target):
"""Mark the target as a ufunc
"""
target_registry['generic'] = Generic
target_registry['CPU'] = CPU
target_registry['cpu'] = CPU
target_registry['GPU'] = GPU
target_registry['gpu'] = GPU
target_registry['CUDA'] = CUDA
target_registry['cuda'] = CUDA
target_registry['npyufunc'] = NPyUfunc
dispatcher_registry = DelayedRegistry(key_type=Target)
# Register the cpu target token with its dispatcher and jit
cpu_target = target_registry['cpu']
dispatcher_registry[cpu_target] = CPUDispatcher
jit_registry[cpu_target] = jit | null |
make order by deterministic | from collections import namedtuple
from sqlalchemy import select
from db.columns import utils as col_utils
from db.records.exceptions import BadSortFormat, SortFieldNotFound
def METHOD_NAME(relation, order_by=None):
"""
Makes an order_by deterministic (totally ordering).
Given a relation, and a `order_by` spec, that defines the ordering to be applied to the
relation, returns a new order_by that is the totally ordered (deterministic) version of the
input order_by.
Appending primary key sort guarantees determinism, but if that fails, we revert to ordering by
all columns.
"""
if order_by is None:
order_by = []
order_by = _append_primary_key_sort(relation, order_by)
if not order_by:
order_by = _build_order_by_all_columns_clause(relation)
return order_by
def _append_primary_key_sort(relation, order_by):
"""
Given an order by clause, we can guarantee a deterministic ordering
overall by appending a final ordering by primary key if one exists.
"""
pk_cols = col_utils.get_primary_key_column_collection_from_relation(relation)
order_by = list(order_by)
if pk_cols is not None:
order_by += [
{'field': col, 'direction': 'asc'}
for col
in set(pk_cols).intersection(relation.columns)
]
return order_by
def _build_order_by_all_columns_clause(relation):
"""
To be used when we have failed to find any other ordering criteria,
since ordering by all columns is inherently inefficient.
Note the filtering out some columns, namely internal columns and non-orderable columns. See
their docstrings for details.
"""
return [
{'field': col, 'direction': 'asc'}
for col
in relation.columns
if _is_col_orderable(col) and not _is_internal_column(col)
]
def _is_internal_column(col):
"""
Columns that Mathesar adds for its own devices and does not expose to the user. We don't want
to sort by these.
Might not be exhaustive, take care.
"""
return col.name == '__mathesar_group_metadata'
def _is_col_orderable(col):
"""
Some columns are not orderable (or at least don't have a non-ambiguous way to define order
without additional logic). We only want to order by orderably columns.
"""
data_type = col.type
non_orderable_type = ['Binary', 'LargeBinary', 'PickleType', 'ARRAY', 'JSON', 'JSONB', 'NULL']
return str(data_type) not in non_orderable_type
def apply_relation_sorting(relation, sort_spec):
order_by_list = [
_get_sorted_column_obj_from_spec(relation, spec) for spec in sort_spec
]
return select(relation).order_by(*order_by_list)
def _get_sorted_column_obj_from_spec(relation, spec):
try:
sort_spec = _deserialize_sort_spec(spec)
except (KeyError, TypeError, AssertionError):
raise BadSortFormat
try:
column = col_utils.get_column_obj_from_relation(relation, sort_spec.field)
except KeyError as e:
raise SortFieldNotFound(e)
except AttributeError:
raise BadSortFormat
try:
directed_col = _build_directed_column_expr(column, sort_spec)
except AttributeError:
raise BadSortFormat
return directed_col
def _deserialize_sort_spec(spec):
sort_spec = namedtuple(
'_sort_spec',
['field', 'direction', 'nullsfirst', 'nullslast']
)(
field=spec['field'],
direction=spec['direction'],
nullsfirst=spec.get('nullsfirst', False),
nullslast=spec.get('nullslast', False)
)
# Since it's not valid to have both nullsfirst and nullslast.
assert not sort_spec.nullsfirst or not sort_spec.nullslast
return sort_spec
def _build_directed_column_expr(column, sort_spec):
directed_col = getattr(column, sort_spec.direction)()
if sort_spec.nullsfirst:
directed_col = directed_col.nulls_first()
elif sort_spec.nullslast:
directed_col = directed_col.nulls_last()
return directed_col | null |
test state universe get states map | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests google3.corp.bizapps.rews.carson.ontology.validation.state_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from yamlformat.validator import findings_lib
from yamlformat.validator import state_lib
_GOOD_PATH = 'mynamespace/states/anyfolder'
class StateLibTest(absltest.TestCase):
def METHOD_NAME(self):
folder = state_lib.StateFolder(_GOOD_PATH)
namespace = folder.local_namespace
namespace.InsertState(state_lib.State('STATE_ONE', 'one'))
namespace.InsertState(state_lib.State('STATE_TWO', 'two'))
state_universe = state_lib.StateUniverse([folder])
states = state_universe.GetStatesMap('mynamespace')
self.assertIn('STATE_ONE', states)
self.assertIn('STATE_TWO', states)
def testStateUniverseGetFindings(self):
context = findings_lib.FileContext(f'{_GOOD_PATH}/file.yaml')
folder = state_lib.StateFolder(_GOOD_PATH)
folder.AddFinding(findings_lib.InconsistentFileLocationError('', context))
namespace = folder.local_namespace
namespace.AddFinding(
findings_lib.DuplicateStateDefinitionError(namespace,
state_lib.State('STATE'),
context))
state = state_lib.State('STATE', 'description')
state.AddFinding(findings_lib.MissingStateDescriptionWarning(state))
namespace.InsertState(state)
state_universe = state_lib.StateUniverse([folder])
findings = state_universe.GetFindings()
self.assertLen(findings, 3)
self.assertTrue(
state_universe.HasFindingTypes([
findings_lib.InconsistentFileLocationError,
findings_lib.DuplicateStateDefinitionError,
findings_lib.MissingStateDescriptionWarning
]))
self.assertFalse(state_universe.IsValid())
def testStateFolderAddValidState(self):
folder = state_lib.StateFolder(_GOOD_PATH)
folder.AddState(state_lib.State('STATE', 'description'))
self.assertIn('STATE', folder.local_namespace.states)
self.assertEmpty(folder.GetFindings())
def testStateFolderAddInvalidStateFails(self):
folder = state_lib.StateFolder(_GOOD_PATH)
folder.AddState(state_lib.State('bad-state', 'invalid'))
self.assertNotIn('bad-state', folder.local_namespace.states)
self.assertIsInstance(folder.GetFindings()[0],
findings_lib.InvalidStateNameError)
def testStateFolderAddDuplicateStateFails(self):
folder = state_lib.StateFolder(_GOOD_PATH)
folder.AddState(state_lib.State('STATE', 'description'))
self.assertIn('STATE', folder.local_namespace.states)
self.assertEmpty(folder.local_namespace.GetFindings())
folder.AddState(state_lib.State('STATE', 'duplicate'))
self.assertIsInstance(folder.local_namespace.GetFindings()[0],
findings_lib.DuplicateStateDefinitionError)
def testStateFolderAddFromConfig(self):
doc = {
'STATE_ONE': 'one',
'STATE_TWO': 'two',
}
folder = state_lib.StateFolder(_GOOD_PATH)
folder.AddFromConfig([doc], f'{_GOOD_PATH}/file.yaml')
self.assertCountEqual(['STATE_ONE', 'STATE_TWO'],
folder.local_namespace.states)
self.assertEmpty(folder.GetFindings())
def testStateFolderAddFromConfigNotYamlFails(self):
folder = state_lib.StateFolder(_GOOD_PATH)
folder.AddFromConfig([{}], f'{_GOOD_PATH}/file.txt')
self.assertIsInstance(folder.GetFindings()[0],
findings_lib.InconsistentFileLocationError)
def testStateWithIllegalKeyTypeHasFindings(self):
state = state_lib.State(False, 'invalid')
self.assertIsInstance(state.GetFindings()[0],
findings_lib.IllegalKeyTypeError)
def testStateWithIllegalNameHasFindings(self):
state = state_lib.State('bad-state', 'invalid')
self.assertIsInstance(state.GetFindings()[0],
findings_lib.InvalidStateNameError)
def testStateWithNoDescriptionHasFindings(self):
state = state_lib.State('STATE', '')
self.assertIsInstance(state.GetFindings()[0],
findings_lib.MissingStateDescriptionWarning)
def testStateEquals(self):
state_one = state_lib.State('STATE_ONE', 'description')
state_one_dup = state_lib.State('STATE_ONE', 'description')
state_one_no_desc = state_lib.State('STATE_ONE', '')
state_two = state_lib.State('STATE_TWO', 'description')
self.assertEqual(state_one, state_one_dup)
self.assertNotEqual(state_one, state_one_no_desc)
self.assertNotEqual(state_one, state_two)
if __name__ == '__main__':
absltest.main() | null |
test update atc post | from functools import reduce
import operator
from django.contrib.auth.models import Group, Permission
from django.db.models import Q
from django.test import TestCase, override_settings
from django.urls import reverse
from django.utils.crypto import get_random_string
from accounts.models import User
from zentral.contrib.osquery.models import AutomaticTableConstruction
@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')
class OsquerySetupATCViewsTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user("godzilla", "[email protected]", get_random_string(12))
cls.group = Group.objects.create(name=get_random_string(12))
cls.user.groups.set([cls.group])
# utiliy methods
def _login_redirect(self, url):
response = self.client.get(url)
self.assertRedirects(response, "{u}?next={n}".format(u=reverse("login"), n=url))
def _login(self, *permissions):
if permissions:
permission_filter = reduce(operator.or_, (
Q(content_type__app_label=app_label, codename=codename)
for app_label, codename in (
permission.split(".")
for permission in permissions
)
))
self.group.permissions.set(list(Permission.objects.filter(permission_filter)))
else:
self.group.permissions.clear()
self.client.force_login(self.user)
def _get_atc_dict(self, **kwargs):
d = {
"name": get_random_string(12),
"description": get_random_string(12),
"table_name": get_random_string(length=12, allowed_chars="abcd_"),
"query": "select 1 from yo;",
"path": "/home/yolo",
"columns": ["un", "deux"],
"platforms": ["darwin", "windows"],
}
d.update(**kwargs)
return d
def _force_atc(self):
atc_dict = self._get_atc_dict()
return AutomaticTableConstruction.objects.create(**atc_dict), atc_dict
# create atc
def test_create_atc_redirect(self):
self._login_redirect(reverse("osquery:create_atc"))
def test_create_atc_permission_denied(self):
self._login()
response = self.client.get(reverse("osquery:create_atc"))
self.assertEqual(response.status_code, 403)
def test_create_atc_get(self):
self._login("osquery.add_automatictableconstruction")
response = self.client.get(reverse("osquery:create_atc"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "osquery/automatictableconstruction_form.html")
self.assertContains(response, "Create Automatic table construction")
def test_create_atc_post(self):
self._login("osquery.add_automatictableconstruction", "osquery.view_automatictableconstruction")
atc_name = get_random_string(12)
atc_description = get_random_string(12)
atc_dict = self._get_atc_dict(name=atc_name, description=atc_description)
response = self.client.post(reverse("osquery:create_atc"), atc_dict, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "osquery/automatictableconstruction_detail.html")
self.assertContains(response, atc_name)
atc = response.context["object"]
self.assertEqual(atc.name, atc_name)
self.assertEqual(atc.description, atc_description)
# update atc
def test_update_atc_redirect(self):
atc, _ = self._force_atc()
self._login_redirect(reverse("osquery:update_atc", args=(atc.pk,)))
def test_update_atc_permission_denied(self):
atc, _ = self._force_atc()
self._login()
response = self.client.get(reverse("osquery:update_atc", args=(atc.pk,)))
self.assertEqual(response.status_code, 403)
def test_update_atc_get(self):
atc, _ = self._force_atc()
self._login("osquery.change_automatictableconstruction")
response = self.client.get(reverse("osquery:update_atc", args=(atc.pk,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "osquery/automatictableconstruction_form.html")
def METHOD_NAME(self):
atc, atc_dict = self._force_atc()
self._login("osquery.change_automatictableconstruction", "osquery.view_automatictableconstruction")
atc_dict["name"] = get_random_string(12)
response = self.client.post(reverse("osquery:update_atc", args=(atc.pk,)),
atc_dict, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "osquery/automatictableconstruction_detail.html")
self.assertContains(response, atc_dict["name"])
atc = response.context["object"]
self.assertEqual(atc.name, atc_dict["name"])
# delete atc
def test_delete_atc_redirect(self):
atc, _ = self._force_atc()
self._login_redirect(reverse("osquery:delete_atc", args=(atc.pk,)))
def test_delete_atc_permission_denied(self):
atc, _ = self._force_atc()
self._login()
response = self.client.get(reverse("osquery:delete_atc", args=(atc.pk,)))
self.assertEqual(response.status_code, 403)
def test_delete_atc_get(self):
atc, _ = self._force_atc()
self._login("osquery.delete_automatictableconstruction")
response = self.client.get(reverse("osquery:delete_atc", args=(atc.pk,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "osquery/automatictableconstruction_confirm_delete.html")
self.assertContains(response, atc.name)
def test_delete_atc_post(self):
atc, _ = self._force_atc()
self._login("osquery.delete_automatictableconstruction", "osquery.view_automatictableconstruction")
response = self.client.post(reverse("osquery:delete_atc", args=(atc.pk,)), follow=True)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "osquery/automatictableconstruction_list.html")
self.assertEqual(AutomaticTableConstruction.objects.filter(pk=atc.pk).count(), 0)
self.assertNotContains(response, atc.name)
# atc list
def test_atc_list_redirect(self):
self._login_redirect(reverse("osquery:atcs"))
def test_atc_list_permission_denied(self):
self._login()
response = self.client.get(reverse("osquery:atcs"))
self.assertEqual(response.status_code, 403)
def test_atc_list(self):
atc, _ = self._force_atc()
self._login("osquery.view_automatictableconstruction")
response = self.client.get(reverse("osquery:atcs"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "osquery/automatictableconstruction_list.html")
self.assertIn(atc, response.context["object_list"])
self.assertContains(response, atc.name) | null |
write block |
from __future__ import absolute_import
from six.moves import range
__date__ = "02 Aug 2012"
__author__ = '[email protected]'
from .function_library import *
class ParamCardWriter(object):
header = \
"""######################################################################\n""" + \
"""## PARAM_CARD AUTOMATICALY GENERATED BY THE UFO #####################\n""" + \
"""######################################################################\n"""
def __init__(self, filename, list_of_parameters=None, generic=False):
"""write a valid param_card.dat"""
if not list_of_parameters:
from .parameters import all_parameters
list_of_parameters = [param for param in all_parameters if \
param.nature=='external']
self.generic_output = generic
if generic:
self.define_not_dep_param(list_of_parameters)
self.fsock = open(filename, 'w')
self.fsock.write(self.header)
self.write_card(list_of_parameters)
self.fsock.close()
def define_not_dep_param(self, list_of_parameters):
"""define self.dep_mass and self.dep_width in case that they are
requested in the param_card.dat"""
from .particles import all_particles
self.dep_mass = [(part, part.mass) for part in all_particles \
if part.pdg_code > 0 and \
part.mass not in list_of_parameters]
self.dep_width = [(part, part.width) for part in all_particles\
if part.pdg_code > 0 and \
part.width not in list_of_parameters]
@staticmethod
def order_param(obj1, obj2):
""" order parameter of a given block """
maxlen = min([len(obj1.lhacode), len(obj2.lhacode)])
for i in range(maxlen):
if obj1.lhacode[i] < obj2.lhacode[i]:
return -1
elif obj1.lhacode[i] == obj2.lhacode[i]:
return 0
else:
return 1
#identical up to the first finish
if len(obj1.lhacode) > len(obj2.lhacode):
return 1
elif len(obj1.lhacode) == len(obj2.lhacode):
return 0
else:
return -1
def write_card(self, all_ext_param):
""" """
# list all lhablock
all_lhablock = set([param.lhablock for param in all_ext_param])
# ordonate lhablock alphabeticaly
all_lhablock = list(all_lhablock)
all_lhablock.sort()
# put at the beginning SMINPUT + MASS + DECAY
for name in ['DECAY', 'MASS','SMINPUTS']:
if name in all_lhablock:
all_lhablock.remove(name)
all_lhablock.insert(0, name)
for lhablock in all_lhablock:
self.METHOD_NAME(lhablock)
need_writing = [ param for param in all_ext_param if \
param.lhablock == lhablock]
need_writing.sort(self.order_param)
[self.write_param(param, lhablock) for param in need_writing]
if self.generic_output:
if lhablock in ['MASS', 'DECAY']:
self.write_dep_param_block(lhablock)
if self.generic_output:
self.write_qnumber()
def METHOD_NAME(self, name):
""" write a comment for a block"""
self.fsock.writelines(
"""\n###################################""" + \
"""\n## INFORMATION FOR %s""" % name.upper() +\
"""\n###################################\n"""
)
if name!='DECAY':
self.fsock.write("""Block %s \n""" % name)
def write_param(self, param, lhablock):
lhacode=' '.join(['%3s' % key for key in param.lhacode])
if lhablock != 'DECAY':
text = """ %s %e # %s \n""" % (lhacode, complex(param.value).real, param.name )
else:
text = '''DECAY %s %e \n''' % (lhacode, complex(param.value).real)
self.fsock.write(text)
def write_dep_param_block(self, lhablock):
import cmath
from .parameters import all_parameters
from .particles import all_particles
for parameter in all_parameters:
exec("%s = %s" % (parameter.name, parameter.value))
text = "## Not dependent paramater.\n"
text += "## Those values should be edited following analytical the \n"
text += "## analytical expression. Some generator could simply ignore \n"
text += "## those values and use the analytical expression\n"
if lhablock == 'MASS':
data = self.dep_mass
prefix = " "
else:
data = self.dep_width
prefix = "DECAY "
for part, param in data:
if isinstance(param.value, str):
value = complex(eval(param.value)).real
else:
value = param.value
text += """%s %s %f # %s : %s \n""" %(prefix, part.pdg_code,
value, part.name, param.value)
# If more than a particles has the same mass/width we need to write it here
# as well
if lhablock == 'MASS':
arg = 'mass'
done = [part for (part, param) in self.dep_mass]
else:
arg = 'width'
done = [part for (part, param) in self.dep_width]
for particle in all_particles:
if particle.pdg_code <0:
continue
is_define = True
if particle not in done:
if getattr(particle, arg).lhacode[0] != particle.pdg_code:
is_define = False
if not is_define:
value = float(particle.get(arg).value )
name = particle.get(arg).name
text += """%s %s %f # %s : %s \n""" %(prefix, particle.pdg_code,
value, particle.name, name)
self.fsock.write(text)
sm_pdg = [1,2,3,4,5,6,11,12,13,13,14,15,16,21,22,23,24,25]
data="""Block QNUMBERS %(pdg)d # %(name)s
1 %(charge)d # 3 times electric charge
2 %(spin)d # number of spin states (2S+1)
3 %(color)d # colour rep (1: singlet, 3: triplet, 8: octet)
4 %(antipart)d # Particle/Antiparticle distinction (0=own anti)\n"""
def write_qnumber(self):
""" write qnumber """
from .particles import all_particles
from . import particles
print(particles.__file__)
text="""#===========================================================\n"""
text += """# QUANTUM NUMBERS OF NEW STATE(S) (NON SM PDG CODE)\n"""
text += """#===========================================================\n\n"""
for part in all_particles:
if part.pdg_code in self.sm_pdg or part.pdg_code < 0:
continue
text += self.data % {'pdg': part.pdg_code,
'name': part.name,
'charge': 3 * part.charge,
'spin': part.spin,
'color': part.color,
'antipart': part.name != part.antiname and 1 or 0}
self.fsock.write(text)
if '__main__' == __name__:
ParamCardWriter('./param_card.dat', generic=True)
print('write ./param_card.dat')
| null |
uniq edges | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import paddle.fluid.layers as L
import paddle.fluid.layers as layers
from pgl.utils import paddle_helper
import pgl
def masked_select(input, mask):
"""masked_select
Slice the value from given Mask
Args:
input: Input tensor to be selected
mask: A bool tensor for sliced.
Return:
Part of inputs where mask is True.
"""
index = L.where(mask)
return L.gather(input, index, overwrite=False)
class BigBirdWrapper(pgl.graph_wrapper.BaseGraphWrapper):
"""Implement of Big Bird by PGL graph wrapper """
def __init__(self, input_mask):
super(BigBirdWrapper, self).__init__()
max_seqlen = L.shape(input_mask)[1]
input_mask = L.reshape(input_mask, [-1])
num_nodes = L.shape(input_mask)[0]
src, dst = build_edges(num_nodes, input_mask, max_seqlen)
self._edges_src = src
self._edges_dst = dst
self._edges_src.stop_gradient=True
self._edges_dst.stop_gradient=True
self._num_nodes = num_nodes
self._num_edges = L.shape(self._edges_src)[0]
self._node_ids = L.range(0, self._num_nodes, step=1, dtype="int32")
self._edge_uniq_dst, _, uniq_count = L.unique_with_counts(self._edges_dst, dtype="int32")
self._edge_uniq_dst.stop_gradient=True
last = L.reduce_sum(uniq_count, keep_dim=True)
uniq_count = L.cumsum(uniq_count, exclusive=True)
self._edge_uniq_dst_count = L.concat([uniq_count, last])
self._edge_uniq_dst_count.stop_gradient=True
def select_edges(src, dst, input_mask, num_nodes, max_seqlen):
src = fluid.layers.elementwise_max(src, num_nodes * 0)
dst = fluid.layers.elementwise_max(dst, num_nodes * 0)
src = fluid.layers.elementwise_min(src, num_nodes - 1)
dst = fluid.layers.elementwise_min(dst, num_nodes - 1)
conditions = []
conditions.append(L.gather(input_mask, src) > 0.5)
conditions.append(L.gather(input_mask, dst) > 0.5)
block_src = src / max_seqlen
block_dst = dst / max_seqlen
conditions.append(block_src == block_dst)
mask = None
for cond in conditions:
if mask is None:
mask = cond
else:
mask = L.logical_and(mask, cond)
dst = masked_select(dst, mask)
src = masked_select(src, mask)
return src, dst
def METHOD_NAME(src, dst, num_nodes):
sorted_dst = L.cast(dst, dtype="int64")
sorted_src = L.cast(src, dtype="int64")
num_nodes = L.cast(num_nodes, dtype="int64")
edge_hash = sorted_dst * num_nodes + sorted_src
edge_hash, _ = L.argsort(edge_hash)
edge_hash, _ = L.unique(edge_hash, dtype="int64")
sorted_src = L.elementwise_mod(edge_hash, num_nodes)
sorted_dst = L.elementwise_div(edge_hash, num_nodes)
sorted_src = L.cast(sorted_src, dtype="int32")
sorted_dst = L.cast(sorted_dst, dtype="int32")
return sorted_src, sorted_dst
def build_edges(num_nodes, input_mask, max_seqlen):
edges = L.range(start=0, end=num_nodes, step=1, dtype="int32")
all_edges = []
# Window
filter_func = lambda x, y: select_edges(x, y, input_mask, num_nodes, max_seqlen)
all_edges.append(filter_func(edges - 1, edges)) # win-1
all_edges.append(filter_func(edges + 1, edges)) # win-2
all_edges.append(filter_func(edges, edges)) #self-loop
# Global Assume [CLS] is the first token.
# vertical cls-window attention
cls_position = edges / max_seqlen * max_seqlen
all_edges.append(filter_func(cls_position, edges))
# horizontal cls attention
all_edges.append(filter_func(edges, cls_position))
# Random
for i in range(2):
rand_edge = L.floor(L.uniform_random(min=0, max=1, shape=[num_nodes]) * L.cast(max_seqlen, dtype="float32"))
rand_edge = L.cast(rand_edge, dtype="int32") + cls_position
all_edges.append(filter_func(rand_edge, edges))
if len(all_edges) > 1:
src = L.concat([ s for s, d in all_edges], 0)
dst = L.concat([ d for s, d in all_edges], 0)
else:
src = all_edges[0][0]
dst = all_edges[0][1]
# sort edges
sorted_src, sorted_dst = METHOD_NAME(src, dst, num_nodes)
return sorted_src, sorted_dst
def sparse_scaled_dot_product_attention(q, k, v, input_mask, dropout_rate, n_head, d_key, d_value):
def send_q_k_spmm(src_feat, dst_feat, edge_feat):
# q [ num_edges, n_head * dim]
# k [ num_edges, n_head * dim]
# v [ num_edges, n_head * dim]
_q = dst_feat["q"]
_k = src_feat["k"]
_v = src_feat["v"]
_q = L.reshape(_q, [-1, n_head, _q.shape[-1] // n_head])
_k = L.reshape(_k, [-1, n_head, _k.shape[-1] // n_head])
score = L.reduce_sum(_q * _k, -1) # [num_edge, n_head]
return { "score": score, "value": _v}
def recv_score_v_spmm(msg):
score = msg["score"]
score = paddle_helper.sequence_softmax(score)
score = layers.dropout(
score,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
score = L.reshape(score, [-1, n_head, 1])
_v = msg["value"]
_new_v = L.reshape(_v, [-1, n_head, _v.shape[-1] // n_head])
_new_v = _new_v * score
_new_v = L.reshape(_new_v, [-1, _v.shape[-1]])
_new_v = L.lod_reset(_new_v, _v)
return L.sequence_pool(_new_v, "sum")
graph_wrapper = BigBirdWrapper(input_mask)
old_v = v
q = L.reshape(q, [-1, d_key * n_head])
k = L.reshape(k, [-1, d_key * n_head])
v = L.reshape(v, [-1, d_value * n_head])
q = L.scale(q, scale=d_key ** -0.5)
msg = graph_wrapper.send(send_q_k_spmm, nfeat_list=[("k", k), ("v", v), ("q", q)])
out = graph_wrapper.recv(msg, recv_score_v_spmm)
out = L.reshape(out, [-1, L.shape(old_v)[1], d_value * n_head])
return out, out
| null |
is number | #!/usr/bin/env python
# encoding: utf8
class MainCategories:
Letter = 'Letter'
Mark = 'Mark'
Number = 'Number'
Punctuation = 'Punctuation'
Symbol = 'Symbol'
Separator = 'Separator'
Control = 'Control'
Format = 'Format'
Surrogate = 'Surrogate'
PrivateUse = 'Private_Use'
Unassigned = 'Unassigned'
Other = 'Other'
GeneralCategories = {
'Lu': ('Uppercase_Letter', MainCategories.Letter),
'Ll': ('Lowercase_Letter', MainCategories.Letter),
'Lt': ('Titlecase_Letter', MainCategories.Letter),
'LC': ('Cased_Letter', MainCategories.Letter),
'Lm': ('Modifier_Letter', MainCategories.Letter),
'Lo': ('Other_Letter', MainCategories.Letter),
'L': ('Letter', MainCategories.Letter),
'Mn': ('Nonspacing_Mark', MainCategories.Mark),
'Mc': ('Spacing_Mark', MainCategories.Mark),
'Me': ('Enclosing_Mark', MainCategories.Mark),
'M': ('Mark', MainCategories.Mark),
'Nd': ('Decimal_Number', MainCategories.Number),
'Nl': ('Letter_Number', MainCategories.Number),
'No': ('Other_Number', MainCategories.Number),
'N': ('Number', MainCategories.Number),
'Pc': ('Connector_Punctuation', MainCategories.Punctuation),
'Pd': ('Dash_Punctuation', MainCategories.Punctuation),
'Ps': ('Open_Punctuation', MainCategories.Punctuation),
'Pe': ('Close_Punctuation', MainCategories.Punctuation),
'Pi': ('Initial_Punctuation', MainCategories.Punctuation),
'Pf': ('Final_Punctuation', MainCategories.Punctuation),
'Po': ('Other_Punctuation', MainCategories.Punctuation),
'P': ('Punctuation', MainCategories.Punctuation),
'Sm': ('Math_Symbol', MainCategories.Symbol),
'Sc': ('Currency_Symbol', MainCategories.Symbol),
'Sk': ('Modifier_Symbol', MainCategories.Symbol),
'So': ('Other_Symbol', MainCategories.Symbol),
'S': ('Symbol', MainCategories.Symbol),
'Zs': ('Space_Separator', MainCategories.Separator),
'Zl': ('Line_Separator', MainCategories.Separator),
'Zp': ('Paragraph_Separator', MainCategories.Separator),
'Z': ('Separator', MainCategories.Separator),
'Cc': ('Control', MainCategories.Control),
'Cf': ('Format', MainCategories.Format),
'Cs': ('Surrogate', MainCategories.Surrogate),
'Co': ('Private_Use', MainCategories.PrivateUse),
'Cn': ('Unassigned', MainCategories.Unassigned),
'C': ('Other', MainCategories.Other),
}
class Codepoint:
def __init__(self, v):
self.codePoint = int(v[0], 16)
self.name = v[1]
self.category = v[2]
c = GeneralCategories.get(self.category, ('', MainCategories.Other))
self.categoryName = c[0]
self.mainCategory = c[1]
self.decDigitValue = v[6]
self.numValue = v[8]
def isLetter(self): return self.mainCategory is MainCategories.Letter
def isMark(self): return self.mainCategory is MainCategories.Mark
def METHOD_NAME(self): return self.mainCategory is MainCategories.Number
def isPunctuation(self): return self.mainCategory is MainCategories.Punctuation
def isSymbol(self): return self.mainCategory is MainCategories.Symbol
def isSeparator(self): return self.mainCategory is MainCategories.Separator
def isControl(self): return self.mainCategory is MainCategories.Control
def isFormat(self): return self.mainCategory is MainCategories.Format
def isSurrogate(self): return self.mainCategory is MainCategories.Surrogate
def isPrivateUse(self): return self.mainCategory is MainCategories.PrivateUse
def isUnassigned(self): return self.mainCategory is MainCategories.Unassigned
def isOther(self): return self.mainCategory is MainCategories.Other
# http://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
def parseUnicodeDataFile(ucdFile): # { codepoint:int => Codepoint() }
ucd = {}
with open(ucdFile, 'r') as f:
for line in f:
# See http://unicode.org/reports/tr44/#UnicodeData.txt for fields
# e.g. "001D;<control>;Cc;0;B;;;;;N;INFORMATION SEPARATOR THREE;;;;"
if len(line) == 0 or line.startswith('#'):
continue
v = line.split(';')
if len(v) < 10:
continue
try:
cp = Codepoint(v)
ucd[cp.codePoint] = cp
except:
pass
return ucd | null |
test un camel case | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utilities.py."""
__author__ = '[email protected] (Tony Aiuto)'
from google.apputils import basetest
import googleapis.codegen.utilities as utilities
class UtilitiesTest(basetest.TestCase):
def testCamelCase(self):
"""Basic CamelCase functionality."""
self.assertEquals('HelloWorld', utilities.CamelCase('hello_world'))
self.assertEquals('HelloWorld', utilities.CamelCase('hello-world'))
self.assertEquals('HelloWorld', utilities.CamelCase('helloWorld'))
self.assertEquals('HelloWorld', utilities.CamelCase('Hello_world'))
self.assertEquals('HelloWorld', utilities.CamelCase('_hello_world'))
self.assertEquals('HelloWorld', utilities.CamelCase('helloWorld'))
self.assertEquals('HelloWorld', utilities.CamelCase('hello.world'))
self.assertEquals('HELLOWORLD', utilities.CamelCase('HELLO_WORLD'))
self.assertEquals('HelloWorld', utilities.CamelCase('hello/world'))
self.assertEquals('HelloWorld', utilities.CamelCase('/hello/world/'))
self.assertEquals('', utilities.CamelCase(''))
self.assertEquals(' ', utilities.CamelCase(' '))
self.assertEquals(' ', utilities.CamelCase('. '))
def METHOD_NAME(self):
"""Basic CamelCase functionality."""
# standard case
self.assertEquals('hello_world', utilities.UnCamelCase('helloWorld'))
self.assertEquals('hello_world', utilities.UnCamelCase('Hello_world'))
self.assertEquals('hello_world', utilities.UnCamelCase('helloWorld'))
self.assertEquals('hello_world', utilities.UnCamelCase('HELLO_WORLD'))
self.assertEquals('hello_world', utilities.UnCamelCase('HELLOworld'))
self.assertEquals('hello_world', utilities.UnCamelCase('helloWORLD'))
self.assertEquals('hello2_world', utilities.UnCamelCase('Hello2World'))
# keep existing separators
self.assertEquals('hello_world', utilities.UnCamelCase('hello_world'))
self.assertEquals('_hello_world', utilities.UnCamelCase('_hello_world'))
self.assertEquals('_hello_world', utilities.UnCamelCase('_HelloWorld'))
self.assertEquals('hello__world', utilities.UnCamelCase('Hello__World'))
# embedded acronym
self.assertEquals('hello_xw_orld', utilities.UnCamelCase('HelloXWorld'))
# minimal input
self.assertEquals('h', utilities.UnCamelCase('H'))
self.assertEquals('', utilities.UnCamelCase(''))
# Other cases involving expanded alphabet.
self.assertEquals('_', utilities.UnCamelCase('_'))
self.assertEquals('hello-world', utilities.UnCamelCase('hello-world'))
self.assertEquals('hello.world', utilities.UnCamelCase('hello.world'))
self.assertEquals('hello/world', utilities.UnCamelCase('hello/world'))
self.assertEquals('hello world', utilities.UnCamelCase('Hello World'))
self.assertEquals(' ', utilities.UnCamelCase(' '))
def testSanitizeDomain(self):
self.assertIsNone(utilities.SanitizeDomain(None))
self.assertEquals('google.com', utilities.SanitizeDomain('google.com'))
self.assertEquals('google.com', utilities.SanitizeDomain('GooglE.com'))
self.assertEquals('google.com', utilities.SanitizeDomain('goo|gle.com'))
self.assertEquals('google.com', utilities.SanitizeDomain('goo gle.com'))
self.assertEquals('googl.com', utilities.SanitizeDomain('googlê.com'))
self.assertEquals('www_test.appspot.com',
utilities.SanitizeDomain('www-test.appspot.com'))
def testReversedDomainComponents(self):
self.assertEquals([],
utilities.ReversedDomainComponents(''))
self.assertEquals(['com', 'google'],
utilities.ReversedDomainComponents('google.com'))
def testNoSpaces(self):
self.assertIsNone(utilities.NoSpaces(None))
self.assertEquals('', utilities.NoSpaces(''))
self.assertEquals('', utilities.NoSpaces(' '))
self.assertEquals('abc', utilities.NoSpaces('a b c '))
if __name__ == '__main__':
basetest.main() | null |
is full pending transaction pool | # coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import heapq
from pyqrllib.pyqrllib import bin2hstr
from qrl.core import config
from qrl.core.misc import logger
from qrl.core.Block import Block
from qrl.core.txs.Transaction import Transaction
from qrl.core.txs.CoinBase import CoinBase
from qrl.core.TransactionInfo import TransactionInfo
class TransactionPool:
# FIXME: Remove tx pool from all method names
def __init__(self, broadcast_tx):
self.pending_tx_pool = []
self.pending_tx_pool_hash = set()
self.transaction_pool = [] # FIXME: Everyone is touching this
self.broadcast_tx = broadcast_tx
@property
def transactions(self):
return heapq.nlargest(len(self.transaction_pool), self.transaction_pool)
def set_broadcast_tx(self, broadcast_tx):
self.broadcast_tx = broadcast_tx
def get_pending_transaction(self):
if len(self.pending_tx_pool_hash) == 0:
return None
pending_tx_set = heapq.heappop(self.pending_tx_pool)
pending_tx = pending_tx_set[1].transaction
timestamp = pending_tx_set[1].timestamp
self.pending_tx_pool_hash.remove(pending_tx.txhash)
return pending_tx, timestamp
def METHOD_NAME(self, ignore_reserve=True) -> bool:
max_pool_size = config.user.pending_transaction_pool_size
if ignore_reserve:
max_pool_size = max_pool_size - config.user.pending_transaction_pool_reserve
if len(self.pending_tx_pool) >= max_pool_size:
return True
return False
def is_full_transaction_pool(self) -> bool:
if len(self.transaction_pool) >= config.user.transaction_pool_size:
return True
return False
def update_pending_tx_pool(self, tx, ip, ignore_reserve=True) -> bool:
if self.METHOD_NAME(ignore_reserve):
return False
idx = self.get_tx_index_from_pool(tx.txhash)
if idx > -1:
return False
if isinstance(tx, CoinBase):
logger.warning('Rejected CoinBase Transaction as received without block')
return False
if tx.txhash in self.pending_tx_pool_hash:
return False
# Since its a min heap giving priority to lower number
# So -1 multiplied to give higher priority to higher txn
heapq.heappush(self.pending_tx_pool, [tx.fee * -1, TransactionInfo(tx, -1), ip])
self.pending_tx_pool_hash.add(tx.txhash)
return True
def add_tx_to_pool(self, tx_class_obj, block_number, timestamp: int=None) -> bool:
if self.is_full_transaction_pool():
return False
heapq.heappush(self.transaction_pool, [tx_class_obj.fee, TransactionInfo(tx_class_obj,
block_number,
timestamp)])
return True
def get_tx_index_from_pool(self, txhash):
for i in range(len(self.transaction_pool)):
txn = self.transaction_pool[i][1].transaction
if txhash == txn.txhash:
return i
return -1
def remove_tx_from_pool(self, tx: Transaction):
idx = self.get_tx_index_from_pool(tx.txhash)
if idx > -1:
del self.transaction_pool[idx]
heapq.heapify(self.transaction_pool)
def remove_tx_in_block_from_pool(self, block_obj: Block):
for protobuf_tx in block_obj.transactions[1:]: # Ignore first transaction, as it is a coinbase txn
tx = Transaction.from_pbdata(protobuf_tx)
if tx.ots_key < config.dev.max_ots_tracking_index:
idx = self.get_tx_index_from_pool(tx.txhash)
if idx > -1:
del self.transaction_pool[idx]
else:
i = 0
while i < len(self.transaction_pool):
txn = self.transaction_pool[i][1].transaction
if txn.PK == tx.PK:
if txn.ots_key >= config.dev.max_ots_tracking_index:
if txn.ots_key <= tx.ots_key:
del self.transaction_pool[i]
continue
i += 1
heapq.heapify(self.transaction_pool)
def add_tx_from_block_to_pool(self, block: Block, current_block_number):
"""
Move all transactions from block to transaction pool.
:param block:
:return:
"""
for protobuf_tx in block.transactions[1:]:
if not self.add_tx_to_pool(Transaction.from_pbdata(protobuf_tx), current_block_number):
logger.warning('Failed to Add transaction into transaction pool')
logger.warning('Block #%s %s', block.block_number, bin2hstr(block.headerhash))
return
def check_stale_txn(self, new_state_container, update_state_container, current_block_number):
i = 0
while i < len(self.transaction_pool):
tx_info = self.transaction_pool[i][1]
if tx_info.is_stale(current_block_number):
if not tx_info.validate(new_state_container, update_state_container, current_block_number):
logger.warning('Txn validation failed for tx in tx_pool')
self.remove_tx_from_pool(tx_info.transaction)
continue
tx_info.update_block_number(current_block_number)
self.broadcast_tx(tx_info.transaction)
i += 1 | null |
test getneighbourpositions | import unittest2 as unittest
# import unittest
from math import sqrt
from libagent import playground, error
class TestPlayground(unittest.TestCase):
def setUp(self):
self.pg = playground.Playground()
def test_stringcoordinate(self):
self.assertEqual(self.pg.stringcoordinate("foo", "bar"), [])
self.assertEqual(self.pg.stringcoordinate("inf", "nan"), [])
self.assertEqual(self.pg.stringcoordinate("2", "3"), [3, 2])
def test_setregion(self):
self.assertTrue(self.pg.region["rows"] == 1)
self.pg.setregion(2, 1)
self.pg.createlayer("bar")
self.assertTrue(self.pg.region["rows"] == 2)
self.assertRaises(error.Error, self.pg.setregion, *[2, 1])
def test_getregion(self):
self.assertIsNotNone(self.pg.getregion())
self.assertIs(self.pg.getregion(), self.pg.region)
def gettotalcount(self):
# not tested for its just a wrapper
pass
def getdiagonalcount(self):
# not tested for its just a wrapper
pass
def test_getbound(self):
n = self.pg.region["n"]
s = self.pg.region["s"]
w = self.pg.region["w"]
e = self.pg.region["e"]
# TODO needed?
# ns = self.pg.region["nsres"]
ns = 1
# ew = self.pg.region["ewres"]
ew = 1
r = self.pg.region["rows"]
c = self.pg.region["cols"]
self.assertIsNotNone(n)
self.assertIsNotNone(s)
self.assertIsNotNone(w)
self.assertIsNotNone(e)
self.assertTrue(n > s)
self.assertTrue(e > w)
self.assertEqual((n - s) / ns, r)
self.assertEqual((e - w) / ew, c)
def test_setlayer(self):
layer = [0]
key = "foo"
self.pg.setlayer(key, layer)
self.assertIs(self.pg.layers[key], layer)
self.assertRaises(error.Error, self.pg.setlayer, *[key, layer])
layer = [0]
self.assertIsNot(self.pg.layers[key], layer)
self.pg.setlayer(key, layer, True)
self.assertIs(self.pg.layers[key], layer)
def test_createlayer(self):
# TODO from file, better test manually?
self.pg.createlayer("foo")
self.assertTrue("foo" in self.pg.layers)
self.assertEqual(len(self.pg.layers["foo"]), self.pg.region["rows"])
self.assertEqual(len(self.pg.layers["foo"][0]), self.pg.region["cols"])
def test_getlayer(self):
self.pg.layers["foo"] = [0]
self.assertIs(self.pg.layers["foo"], self.pg.getlayer("foo"))
def test_removelayer(self):
self.pg.layers["foo"] = [0]
self.assertTrue("foo" in self.pg.layers)
self.pg.removelayer("foo")
self.assertFalse("foo" in self.pg.layers)
def test_writelayer(self):
# TODO better test manually?
pass
def test_getrandomposition(self):
n = self.pg.region["n"]
s = self.pg.region["s"]
w = self.pg.region["w"]
e = self.pg.region["e"]
position = self.pg.getrandomposition()
self.assertTrue(position[0] >= s)
self.assertTrue(position[0] < n)
self.assertTrue(position[1] >= w)
self.assertTrue(position[1] < e)
def test_isvalidposition(self):
self.pg.setregion(3, 3)
self.assertTrue(self.pg.isvalidposition([1, 1]))
self.assertFalse(self.pg.isvalidposition([3, 3]))
def test_addneighbourposition(self):
self.pg.setregion(3, 3)
positions = []
ps = positions[:]
self.assertItemsEqual(ps, self.pg.addneighbourposition(positions, [9, 9]))
ps.append([1, 1])
self.assertItemsEqual(ps, self.pg.addneighbourposition(positions, [1, 1]))
def test_getorderedneighbourpositions(self):
self.pg.setregion(3, 3)
self.assertFalse(self.pg.getorderedneighbourpositions([1, 1], 3))
ps = self.pg.getorderedneighbourpositions([2, 2], 4)
self.assertEqual(2, len(ps))
self.assertEqual(0, ps[1][3])
ps = self.pg.getorderedneighbourpositions([1, 1], 8)
self.assertEqual(8, len(ps))
self.assertEqual(7, ps[7][2])
self.assertEqual(0, ps[3][3])
self.assertEqual(sqrt(2) - 1, ps[6][3])
def METHOD_NAME(self):
self.pg.setregion(3, 3)
ps = self.pg.getneighbourpositions([2, 2], 4)
self.assertEqual(2, len(ps))
self.assertEqual(0, ps[1][3])
def test_getcellvalue(self):
l = "bar"
self.pg.createlayer(l)
self.pg.layers[l][0][0] = 0
self.assertNotEqual(101, self.pg.getcellvalue(l, [0, 0]))
self.pg.layers[l][0][0] = 101
self.assertEqual(101, self.pg.getcellvalue(l, [0, 0]))
def test_setcellvalue(self):
l = "bar"
self.pg.createlayer(l)
self.pg.layers[l][0][0] = 0
self.assertNotEqual(101, self.pg.layers[l][0][0])
self.pg.setcellvalue(l, [0, 0], 101)
self.assertEqual(101, self.pg.getcellvalue(l, [0, 0]))
def test_decaycellvalues(self):
l = "bar"
self.pg.createlayer(l)
self.pg.layers[l][0][0] = 100
self.pg.decaycellvalues(l, 3)
self.assertEqual(int(round(self.pg.layers[l][0][0])), 79)
self.pg.decaycellvalues(l, 3)
self.assertEqual(int(round(self.pg.layers[l][0][0])), 63)
self.pg.decaycellvalues(l, 3)
self.assertEqual(int(round(self.pg.layers[l][0][0])), 50)
# def tearDown(self): | null |
setup link | """Service for linking the project to Minecraft."""
__all__ = [
"LinkManager",
]
import logging
import os
import platform
from pathlib import Path
from typing import List, Optional, Union
from beet import Cache, CachePin, Context, ErrorMessage, MultiCache, PackOverwrite
from beet.core.utils import FileSystemPath, log_time, remove_path
logger = logging.getLogger("link")
def link_cache_finalizer(cache: Cache):
"""Link cache finalizer."""
LinkManager(cache).clean()
class LinkManager:
cache: Cache
dirty = CachePin[List[str]]("dirty", default_factory=list)
world = CachePin[Optional[str]]("world", None)
minecraft = CachePin[Optional[str]]("minecraft", None)
data_pack = CachePin[Optional[str]]("data_pack", None)
resource_pack = CachePin[Optional[str]]("resource_pack", None)
def __init__(self, arg: Union[Context, MultiCache[Cache], Cache]):
if isinstance(arg, Context):
arg = arg.cache
if isinstance(arg, MultiCache):
arg = arg["link"]
self.cache = arg
self.cache.add_finalizer(link_cache_finalizer)
def clean(self):
"""Remove the previously linked files and folders."""
remove_path(*self.dirty)
self.dirty.clear()
def autosave_handler(self, ctx: Context):
"""Plugin for linking the generated resource pack and data pack to Minecraft."""
to_link = [
(Path(directory), pack)
for directory, pack in zip([self.resource_pack, self.data_pack], ctx.packs)
if directory and pack
]
if to_link:
with log_time("Link project."):
for directory, pack in to_link:
try:
self.dirty.append(str(pack.save(directory)))
except PackOverwrite as exc:
logger.warning(
f"Remove the conflicting pack to set up the link. {exc}"
)
def METHOD_NAME(
self,
world: Optional[FileSystemPath] = None,
minecraft: Optional[FileSystemPath] = None,
data_pack: Optional[FileSystemPath] = None,
resource_pack: Optional[FileSystemPath] = None,
):
"""Associate minecraft directories to the project."""
if minecraft:
minecraft = Path(minecraft).resolve()
if not minecraft.is_dir():
raise ErrorMessage(f"The specified Minecraft folder does not exist.")
else:
self.locate_minecraft()
minecraft = Path(self.minecraft) if self.minecraft else None
if world:
world_name = world
world = Path(world).resolve()
if not (world / "level.dat").is_file():
if minecraft and Path(world_name).parts == (world_name,):
world = minecraft / "saves" / world_name
if not world.is_dir():
raise ErrorMessage(
f"Couldn't find {str(world_name)!r} in the Minecraft save folder."
)
else:
raise ErrorMessage(f"The specified world folder is invalid.")
else:
world = None
if data_pack:
data_pack = Path(data_pack).resolve()
if not data_pack.is_dir():
raise ErrorMessage(
f"The specified data packs directory does not exist."
)
elif world:
data_pack = world / "datapacks"
else:
data_pack = None
if data_pack and not world:
world = data_pack.parent
if world and not minecraft:
minecraft = world.parent.parent
if resource_pack:
resource_pack = Path(resource_pack).resolve()
if not resource_pack.is_dir():
raise ErrorMessage(
f"The specified resource packs directory does not exist."
)
elif minecraft:
resource_pack = minecraft / "resourcepacks"
else:
resource_pack = None
if resource_pack and not minecraft:
minecraft = resource_pack.parent
if world:
self.world = str(world)
if minecraft:
self.minecraft = str(minecraft)
if data_pack:
self.data_pack = str(data_pack)
if resource_pack:
self.resource_pack = str(resource_pack)
def clear_link(self):
"""Clear the link."""
self.cache.clear()
def locate_minecraft(self):
"""Try to find the .minecraft folder."""
locations = [
Path(path)
for path in os.environ.get("MINECRAFT_PATH", "").split(":")
if path
]
system = platform.system()
if system == "Linux":
locations.append(Path("~/.minecraft").expanduser())
locations.append(
Path("~/.var/app/com.mojang.Minecraft/data/minecraft").expanduser()
)
elif system == "Darwin":
locations.append(
Path("~/Library/Application Support/minecraft").expanduser()
)
elif system == "Windows":
locations.append(Path(os.path.expandvars(r"%APPDATA%\.minecraft")))
if path := next((path for path in locations if path and path.is_dir()), None):
self.minecraft = str(path.resolve())
def summary(self) -> str:
"""Return a formatted summary."""
return "\n".join(
f"{title}:\n | directory = {directory}\n"
for title, directory in [
("Minecraft installation", self.minecraft),
("World folder", self.world),
("Data packs directory", self.data_pack),
("Resource packs directory", self.resource_pack),
]
) | null |
assert no error | from typing import Any, Callable
import unittest
from uuid import uuid4
import numpy as np
from rastervision.pipeline.file_system import get_tmp_dir
from rastervision.core.data import (
ClassConfig, DatasetConfig, RasterioSourceConfig, MultiRasterSourceConfig,
ReclassTransformerConfig, SceneConfig,
SemanticSegmentationLabelSourceConfig)
from rastervision.core.rv_pipeline import SemanticSegmentationConfig
from rastervision.pytorch_backend import PyTorchSemanticSegmentationConfig
from rastervision.pytorch_learner import (
SemanticSegmentationModelConfig, SolverConfig,
SemanticSegmentationGeoDataConfig, PlotOptions, GeoDataWindowConfig,
GeoDataWindowMethod)
from rastervision.pytorch_learner.utils import (
serialize_albumentation_transform)
from tests.data_files.lambda_transforms import lambda_transforms
from tests import data_file_path
def make_scene(num_channels: int, num_classes: int) -> SceneConfig:
path = data_file_path('multi_raster_source/const_100_600x600.tiff')
rs_cfgs_img = []
for _ in range(num_channels):
rs_cfg = RasterioSourceConfig(
uris=[path],
channel_order=[0],
transformers=[
ReclassTransformerConfig(
mapping={100: np.random.randint(0, 256)})
])
rs_cfgs_img.append(rs_cfg)
rs_cfg_img = MultiRasterSourceConfig(
raster_sources=rs_cfgs_img, channel_order=list(range(num_channels)))
rs_cfg_label = RasterioSourceConfig(
uris=[path],
channel_order=[0],
transformers=[
ReclassTransformerConfig(
mapping={100: np.random.randint(0, num_classes)})
])
scene_cfg = SceneConfig(
id=str(uuid4()),
raster_source=rs_cfg_img,
label_source=SemanticSegmentationLabelSourceConfig(
raster_source=rs_cfg_label))
return scene_cfg
class TestSemanticSegmentationLearner(unittest.TestCase):
def METHOD_NAME(self, fn: Callable, msg: str = ''):
try:
fn()
except Exception:
self.fail(msg)
def test_learner_rgb(self):
args = dict(num_channels=3, channel_display_groups=None)
self.METHOD_NAME(lambda: self._test_learner(**args))
def test_learner_multiband(self):
args = dict(
num_channels=6, channel_display_groups=[(0, 1, 2), (3, 4, 5)])
self.METHOD_NAME(lambda: self._test_learner(**args))
def _test_learner(self,
num_channels: int,
channel_display_groups: Any,
num_classes: int = 5):
"""Tests learner init, plots, bundle, train and pred."""
with get_tmp_dir() as tmp_dir:
class_config = ClassConfig(
names=[f'class_{i}' for i in range(num_classes)])
class_config.update()
class_config.ensure_null_class()
dataset_cfg = DatasetConfig(
class_config=class_config,
train_scenes=[
make_scene(
num_channels=num_channels, num_classes=num_classes)
for _ in range(4)
],
validation_scenes=[
make_scene(
num_channels=num_channels, num_classes=num_classes)
for _ in range(2)
],
test_scenes=[])
if num_channels == 6:
tf = lambda_transforms['swap']
aug_tf = serialize_albumentation_transform(
tf,
lambda_transforms_path=data_file_path(
'lambda_transforms.py'),
dst_dir=tmp_dir)
else:
aug_tf = None
data_cfg = SemanticSegmentationGeoDataConfig(
scene_dataset=dataset_cfg,
window_opts=GeoDataWindowConfig(
method=GeoDataWindowMethod.random, size=20, max_windows=8),
class_names=class_config.names,
class_colors=class_config.colors,
aug_transform=aug_tf,
plot_options=PlotOptions(
channel_display_groups=channel_display_groups),
num_workers=0)
backend_cfg = PyTorchSemanticSegmentationConfig(
data=data_cfg,
model=SemanticSegmentationModelConfig(pretrained=False),
solver=SolverConfig(batch_sz=4, num_epochs=1),
log_tensorboard=False)
pipeline_cfg = SemanticSegmentationConfig(
root_uri=tmp_dir, dataset=dataset_cfg, backend=backend_cfg)
pipeline_cfg.update()
backend = backend_cfg.build(pipeline_cfg, tmp_dir)
learner = backend.learner_cfg.build(tmp_dir, training=True)
learner.plot_dataloaders()
learner.train()
learner.plot_predictions(split='valid')
learner.save_model_bundle()
learner = None
backend.learner = None
backend.load_model()
pred_scene = dataset_cfg.validation_scenes[0].build(
class_config, tmp_dir)
_ = backend.predict_scene(pred_scene, chip_sz=100)
if __name__ == '__main__':
unittest.main() | null |
process result value | # -*- coding: utf-8 -*-
#Copyright (C) Nathan Jones [email protected]
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from pytrainer.util.color import Color, color_from_hex_string
from pytrainer.lib.ddbb import DeclarativeBase, ForcedInteger
from sqlalchemy import Column, Integer, Float, Unicode, CheckConstraint
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.exc import InvalidRequestError, IntegrityError
import sqlalchemy.types as types
import logging
class ColorType(types.TypeDecorator):
"""Sqlalchemy type to convert between CHAR and the Color object"""
impl = types.CHAR
def process_bind_param(self, value, dialect):
return value.to_hex_string()
def METHOD_NAME(self, value, dialect):
return color_from_hex_string(value)
class Sport(DeclarativeBase):
"""A type of exercise. For example: "running" or "cycling"."""
__tablename__ = 'sports'
color = Column(ColorType(length=6), nullable=False)
id = Column('id_sports', Integer, primary_key=True, nullable=False)
max_pace = Column(ForcedInteger, CheckConstraint('max_pace>=0'))
met = Column(Float, CheckConstraint('met>=0'))
name = Column(Unicode(length=100), nullable=False, unique=True, index=True)
weight = Column(Float, CheckConstraint('weight>=0'), nullable=False)
def __init__(self, **kwargs):
self.name = u""
self.weight = 0.0
self.met = None
self.max_pace = None
self.color = Color(0x0000ff)
super(Sport, self).__init__(**kwargs)
class SportServiceException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class SportService(object):
"""Provides access to stored sports."""
def __init__(self, ddbb):
self._ddbb = ddbb
def get_sport(self, sport_id):
"""Get the sport with the specified id.
If no sport with the given id exists then None is returned."""
if sport_id is None:
raise ValueError("Sport id cannot be None")
try:
return self._ddbb.session.query(Sport).filter(Sport.id == sport_id).one()
except NoResultFound:
return None
def get_sport_by_name(self, name):
"""Get the sport with the specified name.
If no sport with the given name exists then None is returned."""
if name is None:
raise ValueError("Sport name cannot be None")
try:
return self._ddbb.session.query(Sport).filter(Sport.name == name).one()
except NoResultFound:
return None
def get_all_sports(self):
"""Get all stored sports."""
return self._ddbb.session.query(Sport).all()
def store_sport(self, sport):
"""Store a new or update an existing sport.
The stored object is returned."""
try:
self._ddbb.session.add(sport)
self._ddbb.session.commit()
except IntegrityError:
raise SportServiceException("")
return sport
def remove_sport(self, sport):
"""Delete a stored sport.
All records associated with the sport will also be deleted."""
if not sport.id:
raise SportServiceException("Cannot remove sport which has not been stored: '{0}'.".format(sport.name))
try:
self._ddbb.session.delete(sport)
self._ddbb.session.commit()
except InvalidRequestError:
raise SportServiceException("Sport id %s not found" % sport.id)
logging.debug("Deleted sport: %s", sport.name) | null |
extract data | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import MySQLManagementClientMixinABC, _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(location_name: str, operation_id: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.DBforMySQL/locations/{locationName}/recommendedActionSessionsOperationResults/{operationId}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"locationName": _SERIALIZER.url("location_name", location_name, "str"),
"operationId": _SERIALIZER.url("operation_id", operation_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class LocationBasedRecommendedActionSessionsResultOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.rdbms.mysql.MySQLManagementClient`'s
:attr:`location_based_recommended_action_sessions_result` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, location_name: str, operation_id: str, **kwargs: Any) -> Iterable["_models.RecommendationAction"]:
"""Recommendation action session operation result.
:param location_name: The name of the location. Required.
:type location_name: str
:param operation_id: The operation identifier. Required.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RecommendationAction or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.rdbms.mysql.models.RecommendationAction]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01"))
cls: ClsType[_models.RecommendationActionsResultList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
location_name=location_name,
operation_id=operation_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def METHOD_NAME(pipeline_response):
deserialized = self._deserialize("RecommendationActionsResultList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, METHOD_NAME)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.DBforMySQL/locations/{locationName}/recommendedActionSessionsOperationResults/{operationId}"
} | null |
test morphsnakes simple shape geodesic active contour | import numpy as np
import pytest
from numpy.testing import assert_array_equal
from skimage.segmentation import (disk_level_set,
inverse_gaussian_gradient,
morphological_chan_vese,
morphological_geodesic_active_contour)
def gaussian_blob():
coords = np.mgrid[-5:6, -5:6]
sqrdistances = (coords ** 2).sum(0)
return np.exp(-sqrdistances / 10)
def test_morphsnakes_incorrect_image_shape():
img = np.zeros((10, 10, 3))
ls = np.zeros((10, 9))
with pytest.raises(ValueError):
morphological_chan_vese(img, num_iter=1, init_level_set=ls)
with pytest.raises(ValueError):
morphological_geodesic_active_contour(img, num_iter=1,
init_level_set=ls)
def test_morphsnakes_incorrect_ndim():
img = np.zeros((4, 4, 4, 4))
ls = np.zeros((4, 4, 4, 4))
with pytest.raises(ValueError):
morphological_chan_vese(img, num_iter=1, init_level_set=ls)
with pytest.raises(ValueError):
morphological_geodesic_active_contour(img, num_iter=1,
init_level_set=ls)
def test_morphsnakes_black():
img = np.zeros((11, 11))
ls = disk_level_set(img.shape, center=(5, 5), radius=3)
ref_zeros = np.zeros(img.shape, dtype=np.int8)
ref_ones = np.ones(img.shape, dtype=np.int8)
acwe_ls = morphological_chan_vese(img, num_iter=6, init_level_set=ls)
assert_array_equal(acwe_ls, ref_zeros)
gac_ls = morphological_geodesic_active_contour(img, num_iter=6,
init_level_set=ls)
assert_array_equal(gac_ls, ref_zeros)
gac_ls2 = morphological_geodesic_active_contour(img, num_iter=6,
init_level_set=ls,
balloon=1, threshold=-1,
smoothing=0)
assert_array_equal(gac_ls2, ref_ones)
assert acwe_ls.dtype == gac_ls.dtype == gac_ls2.dtype == np.int8
def test_morphsnakes_simple_shape_chan_vese():
img = gaussian_blob()
ls1 = disk_level_set(img.shape, center=(5, 5), radius=3)
ls2 = disk_level_set(img.shape, center=(5, 5), radius=6)
acwe_ls1 = morphological_chan_vese(img, num_iter=10, init_level_set=ls1)
acwe_ls2 = morphological_chan_vese(img, num_iter=10, init_level_set=ls2)
assert_array_equal(acwe_ls1, acwe_ls2)
assert acwe_ls1.dtype == acwe_ls2.dtype == np.int8
def METHOD_NAME():
img = (disk_level_set((11, 11), center=(5, 5), radius=3.5)).astype(float)
gimg = inverse_gaussian_gradient(img, alpha=10.0, sigma=1.0)
ls = disk_level_set(img.shape, center=(5, 5), radius=6)
ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.int8)
gac_ls = morphological_geodesic_active_contour(gimg, num_iter=10,
init_level_set=ls,
balloon=-1)
assert_array_equal(gac_ls, ref)
assert gac_ls.dtype == np.int8
def test_init_level_sets():
image = np.zeros((6, 6))
checkerboard_ls = morphological_chan_vese(image, 0, 'checkerboard')
checkerboard_ref = np.array([[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0]], dtype=np.int8)
disk_ls = morphological_geodesic_active_contour(image, 0, 'disk')
disk_ref = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 0]], dtype=np.int8)
assert_array_equal(checkerboard_ls, checkerboard_ref)
assert_array_equal(disk_ls, disk_ref)
def test_morphsnakes_3d():
image = np.zeros((7, 7, 7))
evolution = []
def callback(x):
evolution.append(x.sum())
ls = morphological_chan_vese(image, 5, 'disk',
iter_callback=callback)
# Check that the initial disk level set is correct
assert evolution[0] == 81
# Check that the final level set is correct
assert ls.sum() == 0
# Check that the contour is shrinking at every iteration
for v1, v2 in zip(evolution[:-1], evolution[1:]):
assert v1 >= v2 | null |
get label | #!/usr/bin/env python
# --!-- coding: utf8 --!--
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QBrush
from PyQt5.QtWidgets import QComboBox
from manuskript.enums import Outline
class cmbOutlineLabelChoser(QComboBox):
def __init__(self, parent=None):
QComboBox.__init__(self, parent)
self.activated[int].connect(self.submit)
self._column = Outline.label
self._index = None
self._indexes = None
self._updating = False
self._various = False
def setModels(self, mdlLabels, mdlOutline):
self.mdlLabels = mdlLabels
self.mdlLabels.dataChanged.connect(self.updateItems)
self.mdlOutline = mdlOutline
self.mdlOutline.dataChanged.connect(self.update)
self.updateItems()
def updateItems(self):
self.clear()
for i in range(self.mdlLabels.rowCount()):
item = self.mdlLabels.item(i, 0)
if item:
self.addItem(item.icon(),
item.text())
self._various = False
if self._index or self._indexes:
self.updateSelectedItem()
def setCurrentModelIndex(self, index):
self._indexes = None
if index.column() != self._column:
index = index.sibling(index.row(), self._column)
self._index = index
self.updateItems()
self.updateSelectedItem()
def setCurrentModelIndexes(self, indexes):
self._indexes = []
self._index = None
for i in indexes:
if i.isValid():
if i.column() != self._column:
i = i.sibling(i.row(), self._column)
self._indexes.append(i)
self.updateItems()
self.updateSelectedItem()
def update(self, topLeft, bottomRight):
if self._updating:
# We are currently putting data in the model, so no updates
return
if self._index:
if topLeft.row() <= self._index.row() <= bottomRight.row():
self.updateSelectedItem()
elif self._indexes:
update = False
for i in self._indexes:
if topLeft.row() <= i.row() <= bottomRight.row():
update = True
if update:
self.updateSelectedItem()
def METHOD_NAME(self, index):
item = index.internalPointer()
label = item.data(self._column)
if not label:
label = 0
return int(label)
def updateSelectedItem(self):
if self._updating:
return
if self._index:
label = self.METHOD_NAME(self._index)
self.setCurrentIndex(label)
elif self._indexes:
labels = []
same = True
for i in self._indexes:
labels.append(self.METHOD_NAME(i))
for lbl in labels[1:]:
if lbl != labels[0]:
same = False
break
if same:
self._various = False
self.setCurrentIndex(labels[0])
else:
if not self._various:
self.insertItem(0, self.tr("Various"))
f = self.font()
f.setItalic(True)
self.setItemData(0, f, Qt.FontRole)
self.setItemData(0, QBrush(Qt.darkGray), Qt.ForegroundRole)
self._various = True
self.setCurrentIndex(0)
else:
self.setCurrentIndex(0)
def submit(self, idx):
if self._index:
self.mdlOutline.setData(self._index, self.currentIndex())
elif self._indexes:
value = self.currentIndex()
if self._various:
if value == 0:
return
value -= 1
self._updating = True
for i in self._indexes:
self.mdlOutline.setData(i, value)
self._updating = False | null |
dats path | import json
import pytest
from scripts.dats_jsonld_annotator.annotator import annotate_dats_object
from scripts.dats_jsonld_annotator.annotator import CONTEXT_DIR
from scripts.dats_jsonld_annotator.annotator import find_context
from scripts.dats_jsonld_annotator.annotator import find_schema
from scripts.dats_jsonld_annotator.annotator import gen_jsonld_outpath
from scripts.dats_jsonld_annotator.annotator import SCHEMA_DIR
@pytest.fixture()
def basic_dats():
return {"properties": {"my_term": {"bad_key": ""}}, "id": ""}
@pytest.fixture()
def dats_dataset_schema():
return json.load(open(SCHEMA_DIR / "dataset_schema.json"))
@pytest.fixture()
def dats_person_schema():
return json.load(open(SCHEMA_DIR / "person_schema.json"))
@pytest.fixture()
def basic_json():
return {}
@pytest.fixture()
def dats_person_instance():
return {"firstName": "Gustav", "lastName": "Gans"}
@pytest.fixture()
def dats_jsonld_person_instance():
return {
"jsonld": {"firstName": "Gustav", "lastName": "Gans", "@type": "Person"},
"context": {
"firstName": "sdo:givenName",
"lastName": "sdo:familyName",
"Person": "sdo:Person",
},
}
@pytest.fixture()
def dats_dataset_instance(dats_person_instance):
return {
"title": "",
"types": {},
"creators": dats_person_instance,
"licenses": {"name": "license"},
"description": "",
"keywords": ["a", {"value": "word"}],
"version": "",
"distributions": {},
}
@pytest.fixture()
def dats_jsonld_dataset_instance():
return {
"jsonld": {
"title": "",
"types": {"@type": "DataType"},
"creators": {"firstName": "Gustav", "lastName": "Gans", "@type": "Person"},
"licenses": {"name": "license", "@type": "License"},
"description": "",
"keywords": ["a", {"value": "word", "@type": "Annotation"}],
"version": "",
"distributions": {"@type": "DatasetDistribution"},
"@type": "Dataset",
},
"context": {
"title": {"@id": "sdo:name", "@type": "sdo:Text"},
"DataType": "sdo:Thing",
"firstName": "sdo:givenName",
"lastName": "sdo:familyName",
"Person": "sdo:Person",
"creators": {"@id": "sdo:creator", "@type": "sdo:Thing"},
"name": {"@id": "sdo:name", "@type": "sdo:Text"},
"License": "sdo:CreativeWork",
"licenses": "sdo:license",
"description": {"@id": "sdo:description", "@type": "sdo:Text"},
"value": {"@id": "sdo:value", "@type": "sdo:DataType"},
"Annotation": "sdo:Thing",
"DatasetDistribution": "sdo:DataDownload",
"distributions": {"@id": "sdo:distribution", "@type": "sdo:DataDownload"},
"Dataset": "sdo:Dataset",
"keywords": {"@id": "sdo:keywords", "@type": "sdo:Thing"},
"version": {"@id": "sdo:version", "@type": "sdo:Thing"},
"types": {"@id": "sdo:identifier", "@type": "sdo:Thing"},
},
}
@pytest.fixture()
def METHOD_NAME(tmp_path):
return tmp_path / "dats_root" / "DATS.json"
class TestFindSchema:
def test_missing_term(self, basic_dats, basic_json):
term = "missing_term"
assert find_schema(basic_dats, term, basic_json) is None
def test_bad_value(self, basic_dats, basic_json):
term = "my_term"
assert find_schema(basic_dats, term, basic_json) is None
def test_term_with_a_single_possible_schema(self, dats_dataset_schema, basic_json):
term = "identifier"
test_schema = find_schema(dats_dataset_schema, term, basic_json)
assert test_schema["id"].split("/")[-1] == "identifier_info_schema.json"
def test_term_with_multiple_possible_schemata(
self, dats_dataset_schema, dats_person_instance
):
term = "creators"
test_schema = find_schema(dats_dataset_schema, term, dats_person_instance)
assert test_schema["id"].split("/")[-1] == "person_schema.json"
def test_term_with_recursive_schema(
self, dats_dataset_schema, dats_dataset_instance
):
term = "hasPart"
test_schema = find_schema(dats_dataset_schema, term, dats_dataset_instance)
assert test_schema["id"].split("/")[-1] == "dataset_schema.json"
class TestFindContext:
def test_find_dataset_context(self):
schema_id = "/remote/dataset_schema.json"
context = find_context(schema_id, CONTEXT_DIR)
assert context.get("Dataset") == "sdo:Dataset"
class TestWalkSchema:
def test_not_a_json_object(self, dats_dataset_schema):
test_result = annotate_dats_object(
None, dats_dataset_schema, {}, context_dir=CONTEXT_DIR
)
assert test_result == (None, {})
def test_simple_instance(
self, dats_person_schema, dats_person_instance, dats_jsonld_person_instance
):
jsonld, context = annotate_dats_object(
dats_person_instance, dats_person_schema, {}, context_dir=CONTEXT_DIR
)
assert jsonld == dats_jsonld_person_instance["jsonld"]
assert context == dats_jsonld_person_instance["context"]
def test_recursive_instance(
self, dats_dataset_schema, dats_dataset_instance, dats_jsonld_dataset_instance
):
jsonld, context = annotate_dats_object(
dats_dataset_instance, dats_dataset_schema, {}, context_dir=CONTEXT_DIR
)
assert jsonld == dats_jsonld_dataset_instance["jsonld"]
assert context == dats_jsonld_dataset_instance["context"]
class TestGenerateJsonldPath:
def test_output_dir_does_exist(self, tmp_path, METHOD_NAME):
out_path = tmp_path / "jsonld_out"
out_path.mkdir()
result_path = gen_jsonld_outpath(METHOD_NAME, out_path)
assert result_path == out_path / "dats_root_DATS.jsonld"
def test_output_dir_doesnt_exist(self, tmp_path, METHOD_NAME):
# If the output dir does not exist when we call gen_jsonld_outpath
# Then we want this to error out here
# We only create output directories at the start
out_path = tmp_path / "nonexistent"
with pytest.raises(ValueError):
gen_jsonld_outpath(METHOD_NAME, out_path)
def test_output_path_is_none(self, tmp_path, METHOD_NAME):
METHOD_NAME.parent.mkdir()
out_path = None
result_path = gen_jsonld_outpath(METHOD_NAME, out_path)
assert result_path == METHOD_NAME.parent / "DATS.jsonld"
def test_paths_are_string(self, tmp_path, METHOD_NAME):
out_path = tmp_path / "jsonld_out"
out_path.mkdir()
result_path = gen_jsonld_outpath(str(METHOD_NAME), str(out_path))
assert result_path == out_path / "dats_root_DATS.jsonld"
def test_output_is_file(self, tmp_path, METHOD_NAME):
out_path = tmp_path / "output_here_please.jsonld"
result_path = gen_jsonld_outpath(str(METHOD_NAME), str(out_path))
assert result_path == out_path
def test_output_dir_fails(self, tmp_path, METHOD_NAME):
out_path = tmp_path / "some" / "arbitrary" / "path"
with pytest.raises(ValueError):
gen_jsonld_outpath(METHOD_NAME, out_path)
class TestAnnotator:
# TODO: write tests for the annotator function
pass
class TestCLI:
# TODO: write tests for the CLI parser
pass | null |
test pending ops | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
from copy import deepcopy
import numpy as np
from parameterized import parameterized
from monai.data.meta_tensor import MetaTensor
from monai.transforms import RandCropByPosNegLabeld
from monai.transforms.lazy.functional import apply_pending
from tests.utils import TEST_NDARRAYS_ALL, assert_allclose
TESTS = [
[
{
"keys": ["image", "extra", "label"],
"label_key": "label",
"spatial_size": [-1, 2, 2],
"pos": 1,
"neg": 1,
"num_samples": 2,
"image_key": None,
"image_threshold": 0,
},
{
"image": np.random.randint(0, 2, size=[3, 3, 3, 3]),
"extra": np.random.randint(0, 2, size=[3, 3, 3, 3]),
"label": np.random.randint(0, 2, size=[3, 3, 3, 3]),
},
(3, 3, 2, 2),
],
[
{
"keys": ["image", "extra", "label"],
"label_key": "label",
"spatial_size": [2, 2, 2],
"pos": 1,
"neg": 1,
"num_samples": 2,
"image_key": None,
"image_threshold": 0,
},
{
"image": np.random.randint(0, 2, size=[3, 3, 3, 3]),
"extra": np.random.randint(0, 2, size=[3, 3, 3, 3]),
"label": np.random.randint(0, 2, size=[3, 3, 3, 3]),
},
(3, 2, 2, 2),
],
[
{
"keys": ["image", "extra", "label"],
"label_key": "label",
"spatial_size": [2, 2, 2],
"pos": 1,
"neg": 1,
"num_samples": 2,
"image_key": None,
"image_threshold": 0,
},
{"image": np.zeros([3, 3, 3, 3]) - 1, "extra": np.zeros([3, 3, 3, 3]), "label": np.ones([3, 3, 3, 3])},
(3, 2, 2, 2),
],
[
{
"keys": ["image", "extra", "label"],
"label_key": "label",
"spatial_size": [4, 4, 2],
"pos": 1,
"neg": 1,
"num_samples": 2,
"image_key": None,
"image_threshold": 0,
"allow_smaller": True,
},
{"image": np.zeros([3, 3, 3, 3]) - 1, "extra": np.zeros([3, 3, 3, 3]), "label": np.ones([3, 3, 3, 3])},
(3, 3, 3, 2),
],
[
{
"keys": ["image", "extra", "label"],
"label_key": "label",
"spatial_size": [4, 4, 4],
"pos": 1,
"neg": 1,
"num_samples": 2,
"image_key": None,
"image_threshold": 0,
"allow_smaller": True,
},
{"image": np.zeros([3, 3, 3, 3]) - 1, "extra": np.zeros([3, 3, 3, 3]), "label": np.ones([3, 3, 3, 3])},
(3, 3, 3, 3),
],
]
class TestRandCropByPosNegLabeld(unittest.TestCase):
@staticmethod
def convert_data_type(im_type, d, keys=("img", "image", "label")):
out = deepcopy(d)
for k, v in out.items():
if k in keys and isinstance(v, np.ndarray):
out[k] = im_type(v)
return out
@parameterized.expand(TESTS)
def test_type_shape(self, input_param, input_data, expected_shape):
for p in TEST_NDARRAYS_ALL:
input_param_mod = self.convert_data_type(p, input_param)
input_data_mod = self.convert_data_type(p, input_data)
cropper = RandCropByPosNegLabeld(**input_param_mod)
cropper.set_random_state(0)
result = cropper(input_data_mod)
self.assertListEqual(cropper.cropper.spatial_size, input_param["spatial_size"])
self.assertIsInstance(result, list)
_len = len(tuple(input_data.keys()))
self.assertTupleEqual(tuple(result[0].keys())[:_len], tuple(input_data.keys()))
for k in ("image", "extra", "label"):
self.assertTupleEqual(result[0][k].shape, expected_shape)
for i, item in enumerate(result):
self.assertEqual(item[k].meta["patch_index"], i)
def test_correct_center(self):
cropper = RandCropByPosNegLabeld(keys="label", label_key="label", spatial_size=[3, 3])
cropper.set_random_state(0)
test_image = {"label": np.asarray([[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]])}
result = cropper(test_image)
np.testing.assert_allclose(result[0]["label"], np.asarray([[[0, 0, 1], [0, 0, 0], [0, 0, 0]]]))
@parameterized.expand(TESTS)
def METHOD_NAME(self, input_param, input_data, _expected_shape):
for p in TEST_NDARRAYS_ALL:
input_param_mod = self.convert_data_type(p, input_param)
input_data_mod = self.convert_data_type(p, input_data)
cropper = RandCropByPosNegLabeld(**input_param_mod)
# non-lazy
cropper.set_random_state(0)
expected = cropper(input_data_mod)
self.assertIsInstance(expected[0]["image"], MetaTensor)
# lazy
cropper.set_random_state(0)
cropper.lazy = True
pending_result = cropper(input_data_mod)
for i, _pending_result in enumerate(pending_result):
self.assertIsInstance(_pending_result["image"], MetaTensor)
assert_allclose(_pending_result["image"].peek_pending_affine(), expected[i]["image"].affine)
assert_allclose(_pending_result["image"].peek_pending_shape(), expected[i]["image"].shape[1:])
# only support nearest
overrides = {"mode": "nearest", "align_corners": False}
result_image = apply_pending(_pending_result["image"], overrides=overrides)[0]
result_extra = apply_pending(_pending_result["extra"], overrides=overrides)[0]
# compare
assert_allclose(result_image, expected[i]["image"], rtol=1e-5)
assert_allclose(result_extra, expected[i]["extra"], rtol=1e-5)
if __name__ == "__main__":
unittest.main() | null |
test missing signing key | from unittest.mock import Mock, patch
from canonicalwebteam.store_api.exceptions import StoreApiResponseErrorList
from tests.admin.tests_models import TestModelServiceEndpoints
class TestGetPolicies(TestModelServiceEndpoints):
@patch("webapp.admin.views.admin_api.get_store_model_policies")
def test_get_policies(self, mock_get_store_model_policies):
mock_get_store_model_policies.return_value = ["policy1", "policy2"]
response = self.client.get("/admin/store/1/models/Model1/policies")
data = response.json
self.assertEqual(response.status_code, 200)
self.assertTrue(data["success"])
self.assertEqual(data["data"], ["policy1", "policy2"])
@patch("webapp.admin.views.admin_api.get_store_model_policies")
def test_failed_get_policies(self, mock_get_store_model_policies):
mock_get_store_model_policies.side_effect = StoreApiResponseErrorList(
"An error occurred", 500, [{"message": "An error occurred"}]
)
response = self.client.get("/admin/store/1/models/Model1/policies")
data = response.json
self.assertEqual(response.status_code, 500)
self.assertFalse(data["success"])
self.assertEqual(data["message"], "An error occurred")
class TestCreatePolicies(TestModelServiceEndpoints):
@patch("webapp.admin.views.admin_api.get_store_signing_keys")
@patch("webapp.admin.views.admin_api.create_store_model_policy")
def test_create_policy(
self, mock_create_store_model_policy, mock_get_store_signing_keys
):
mock_get_store_signing_keys.return_value = [
{"sha3-384": "valid_signing_key"}
]
mock_create_store_model_policy.return_value = None
payload = {"signing_key": "valid_signing_key"}
response = self.client.post(
"/admin/store/1/models/Model1/policies", data=payload
)
data = response.json
self.assertEqual(response.status_code, 200)
self.assertTrue(data["success"])
@patch("webapp.admin.views.admin_api.get_store_signing_keys")
def METHOD_NAME(self, mock_get_store_signing_keys):
mock_get_store_signing_keys.return_value = [
{"sha3-384": "valid_signing_key"}
]
payload = {}
response = self.client.post(
"/admin/store/1/models/Model1/policies", data=payload
)
data = response.json
self.assertEqual(response.status_code, 500)
self.assertFalse(data["success"])
self.assertEqual(data["message"], "Signing key required")
@patch("webapp.admin.views.admin_api.get_store_signing_keys")
def test_invalid_signing_key(self, mock_get_store_signing_keys):
mock_get_store_signing_keys.return_value = [{"sha3-384": "valid_key"}]
payload = {"signing_key": "invalid_key"}
response = self.client.post(
"/admin/store/1/models/Model1/policies", data=payload
)
data = response.json
self.assertEqual(response.status_code, 500)
self.assertFalse(data["success"])
self.assertEqual(data["message"], "Invalid signing key")
@patch("webapp.admin.views.admin_api.get_store_signing_keys")
@patch("webapp.admin.views.admin_api.create_store_model_policy")
def test_exception_in_create_policy(
self,
mock_create_store_model_policy,
mock_get_store_signing_keys,
):
mock_get_store_signing_keys.return_value = [{"sha3-384": "valid_key"}]
mock_create_store_model_policy.side_effect = StoreApiResponseErrorList(
"Simulated failure", 500, [{"message": "An error occurred"}]
)
payload = {"signing_key": "valid_key"}
response = self.client.post(
"/admin/store/1/models/Model1/policies", data=payload
)
data = response.json
self.assertEqual(response.status_code, 500)
self.assertFalse(data["success"])
self.assertEqual(data["message"], "An error occurred")
class TestDeletePolicies(TestModelServiceEndpoints):
@patch("webapp.admin.views.admin_api.delete_store_model_policy")
def test_successful_delete_policy(self, mock_delete_store_model_policy):
mock_delete_store_model_policy.return_value = Mock(status_code=204)
response = self.client.delete(
"/admin/store/1/models/Model1/policies/1"
)
data = response.json
self.assertEqual(response.status_code, 200)
self.assertTrue(data["success"])
@patch("webapp.admin.views.admin_api.delete_store_model_policy")
def test_policy_not_found(self, mock_delete_store_model_policy):
mock_delete_store_model_policy.return_value = Mock(status_code=404)
response = self.client.delete(
"/admin/store/1/models/Model1/policies/1"
)
data = response.json
self.assertEqual(response.status_code, 500)
self.assertFalse(data["success"])
self.assertEqual(data["message"], "Policy not found")
@patch("webapp.admin.views.admin_api.delete_store_model_policy")
def test_exception_in_delete_policy(self, mock_delete_store_model_policy):
mock_delete_store_model_policy.side_effect = StoreApiResponseErrorList(
"An error occured", 500, [{"message": "An error occurred"}]
)
response = self.client.delete(
"/admin/store/1/models/Model1/policies/1"
)
data = response.json
self.assertEqual(response.status_code, 500)
self.assertFalse(data["success"])
self.assertEqual(data["message"], "An error occurred") | null |
get browser name | import os
import sqlite3
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows, get_next_unused_name, open_sqlite_db_readonly
def METHOD_NAME(file_name):
if 'brave' in file_name.lower():
return 'Brave'
elif 'microsoft' in file_name.lower():
return 'Edge'
elif 'opera' in file_name.lower():
return 'Opera'
elif 'android.chrome' in file_name.lower():
return 'Chrome'
else:
return 'Unknown'
def get_chromeMediaHistory(files_found, report_folder, seeker, wrap_text):
for file_found in files_found:
file_found = str(file_found)
if not file_found.endswith('Media History'):
continue # Skip all other files
browser_name = METHOD_NAME(file_found)
if file_found.find('app_sbrowser') >= 0:
browser_name = 'Browser'
elif file_found.find('.magisk') >= 0 and file_found.find('mirror') >= 0:
continue # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data??
db = open_sqlite_db_readonly(file_found)
cursor = db.cursor()
cursor.execute('''
select
datetime(last_updated_time_s-11644473600, 'unixepoch') as last_updated_time_s,
origin_id,
url,
strftime('%H:%M:%S',position_ms/1000, 'unixepoch') as position_ms,
strftime('%H:%M:%S',duration_ms/1000, 'unixepoch') as duration_ms,
title,
artist,
album,
source_title
from playbackSession
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
report = ArtifactHtmlReport(f'{browser_name} - Media History - Sessions')
#check for existing and get next name for report file, so report from another file does not get overwritten
report_path = os.path.join(report_folder, f'{browser_name} - Media History - Sessions.temphtml')
report_path = get_next_unused_name(report_path)[:-9] # remove .temphtml
report.start_artifact_report(report_folder, os.path.basename(report_path))
report.add_script()
data_headers = ('Last Updated','Origin ID','URL','Position','Duration','Title','Artist','Album','Source Title') # Don't remove the comma, that is required to make this a tuple as there is only 1 element
data_list = []
for row in all_rows:
data_list.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8]))
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'{browser_name} - Media History - Sessions'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'{browser_name} - Media History - Sessions'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc(f'No {browser_name} - Media History - Sessions data available')
cursor.execute('''
select
datetime(last_updated_time_s-11644473600, 'unixepoch') as last_updated_time_s,
id,
origin_id,
url,
strftime('%H:%M:%S',watch_time_s, 'unixepoch') as watch_time_s,
case has_audio
when 0 then ''
when 1 then 'Yes'
end as has_audio,
case has_video
when 0 then ''
when 1 then 'Yes'
end as has_video
from playback
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
report = ArtifactHtmlReport(f'{browser_name} - Media History - Playbacks')
#check for existing and get next name for report file, so report from another file does not get overwritten
report_path = os.path.join(report_folder, f'{browser_name} - Media History - Playbacks.temphtml')
report_path = get_next_unused_name(report_path)[:-9] # remove .temphtml
report.start_artifact_report(report_folder, os.path.basename(report_path))
report.add_script()
data_headers = ('Last Updated','ID','Origin ID','URL','Watch Time','Has Audio','Has Video') # Don't remove the comma, that is required to make this a tuple as there is only 1 element
data_list = []
for row in all_rows:
data_list.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6]))
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'{browser_name} - Media History - Playbacks'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'{browser_name} - Media History - Playbacks'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc(f'No {browser_name} - Media History - Playbacks data available')
cursor.execute('''
select
datetime(last_updated_time_s-11644473600, 'unixepoch') as last_updated_time_s,
id,
origin,
cast(aggregate_watchtime_audio_video_s/86400 as integer) || ':' || strftime('%H:%M:%S', aggregate_watchtime_audio_video_s ,'unixepoch') as aggregate_watchtime_audio_video_s
from origin
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
report = ArtifactHtmlReport(f'{browser_name} - Media History - Origins')
#check for existing and get next name for report file, so report from another file does not get overwritten
report_path = os.path.join(report_folder, f'{browser_name} - Media History - Origins.temphtml')
report_path = get_next_unused_name(report_path)[:-9] # remove .temphtml
report.start_artifact_report(report_folder, os.path.basename(report_path))
report.add_script()
data_headers = ('Last Updated','ID','Origin','Aggregate Watchtime') # Don't remove the comma, that is required to make this a tuple as there is only 1 element
data_list = []
for row in all_rows:
data_list.append((row[0],row[1],row[2],row[3]))
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'{browser_name} - Media History - Origins'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'{browser_name} - Media History - Origins'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc(f'No {browser_name} - Media History - Origins data available')
db.close()
__artifacts__ = {
"ChromeMediaHistory": (
"Chromium",
('*/app_chrome/Default/Media History*','*/app_sbrowser/Default/Media History*', '*/app_opera/Media History*'),
get_chromeMediaHistory)
| null |
test time | # Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import bz2
import urllib.request
import numpy as np
import datetime
import line_profiler
profile = line_profiler.LineProfiler()
import os
from sklearn.datasets import load_svmlight_file
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
from nni.algorithms.feature_engineering.gradient_selector import FeatureGradientSelector
class Benchmark():
def __init__(self, files=None, test_size=0.2):
self.files = files
self.test_size = test_size
def run_all_test(self, pipeline):
for file_name in self.files:
file_path = self.files[file_name]
self.run_test(pipeline, file_name, file_path)
def run_test(self, pipeline, name, path):
print("download " + name)
update_name = self.download(name, path)
X, y = load_svmlight_file(update_name)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.test_size, random_state=42)
pipeline.fit(X_train, y_train)
print("[Benchmark "+ name + " Score]: ", pipeline.score(X_test, y_test))
def download(self, name, path):
old_name = name + '_train.bz2'
update_name = name + '_train.svm'
if os.path.exists(old_name) and os.path.exists(update_name):
return update_name
urllib.request.urlretrieve(path, filename=old_name)
f_svm = open(update_name, 'wt')
with bz2.open(old_name, 'rb') as f_zip:
data = f_zip.read()
f_svm.write(data.decode('utf-8'))
f_svm.close()
return update_name
@profile
def test_memory(pipeline_name, name, path):
if pipeline_name == "LR":
pipeline = make_pipeline(LogisticRegression())
if pipeline_name == "FGS":
pipeline = make_pipeline(FeatureGradientSelector(), LogisticRegression())
if pipeline_name == "Tree":
pipeline = make_pipeline(SelectFromModel(ExtraTreesClassifier(n_estimators=50)), LogisticRegression())
test_benchmark = Benchmark()
print("Dataset:\t", name)
print("Pipeline:\t", pipeline_name)
test_benchmark.run_test(pipeline, name, path)
print("")
def METHOD_NAME(pipeline_name, name, path):
if pipeline_name == "LR":
pipeline = make_pipeline(LogisticRegression())
if pipeline_name == "FGS":
pipeline = make_pipeline(FeatureGradientSelector(), LogisticRegression())
if pipeline_name == "Tree":
pipeline = make_pipeline(SelectFromModel(ExtraTreesClassifier(n_estimators=50)), LogisticRegression())
test_benchmark = Benchmark()
print("Dataset:\t", name)
print("Pipeline:\t", pipeline_name)
starttime = datetime.datetime.now()
test_benchmark.run_test(pipeline, name, path)
endtime = datetime.datetime.now()
print("Used time: ", (endtime - starttime).microseconds/1000)
print("")
if __name__ == "__main__":
LIBSVM_DATA = {
"rcv1" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/rcv1_train.binary.bz2",
"colon-cancer" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/covtype.libsvm.binary.bz2",
"gisette" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/gisette_scale.bz2",
"news20.binary" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/news20.binary.bz2",
"real-sim" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/real-sim.bz2",
"webspam" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/webspam_wc_normalized_trigram.svm.bz2",
"avazu" : "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/avazu-app.bz2"
}
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--pipeline_name', type=str, help='display pipeline_name.')
parser.add_argument('--name', type=str, help='display name.')
parser.add_argument('--object', type=str, help='display test object: time or memory.')
args = parser.parse_args()
pipeline_name = args.pipeline_name
name = args.name
test_object = args.object
path = LIBSVM_DATA[name]
if test_object == 'time':
METHOD_NAME(pipeline_name, name, path)
elif test_object == 'memory':
test_memory(pipeline_name, name, path)
else:
print("Not support test object.\t", test_object)
print("Done.") | null |
test records ui request | from functools import cached_property
from unittest.mock import Mock, patch, sentinel
from django.test import RequestFactory, override_settings
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from sentry.api.base import Endpoint
from sentry.middleware.ratelimit import RatelimitMiddleware
from sentry.middleware.stats import RequestTimingMiddleware, add_request_metric_tags
from sentry.testutils.cases import TestCase
from sentry.types.ratelimit import RateLimit, RateLimitCategory
class RateLimitedEndpoint(Endpoint):
permission_classes = (AllowAny,)
enforce_rate_limit = True
rate_limits = {"GET": {RateLimitCategory.IP: RateLimit(0, 10)}}
def get(self):
return Response({"ok": True})
class RequestTimingMiddlewareTest(TestCase):
middleware = cached_property(RequestTimingMiddleware)
@cached_property
def factory(self):
return RequestFactory()
@patch("sentry.utils.metrics.incr")
def test_records_default_api_metrics(self, incr):
request = self.factory.get("/")
request._view_path = "/"
response = Mock(status_code=200)
self.middleware.process_response(request, response)
incr.assert_called_with(
"view.response",
instance=request._view_path,
tags={
"method": "GET",
"status_code": 200,
"ui_request": False,
"rate_limit_type": None,
},
skip_internal=False,
)
@patch("sentry.utils.metrics.incr")
@override_settings(SENTRY_SELF_HOSTED=False)
def test_records_default_api_metrics_with_rate_limit_type(self, incr):
rate_limit_middleware = RatelimitMiddleware(sentinel.callback)
test_endpoint = RateLimitedEndpoint.as_view()
request = self.factory.get("/")
request._view_path = "/"
response = Mock(status_code=429)
rate_limit_middleware.process_view(request, test_endpoint, [], {})
self.middleware.process_response(request, response)
incr.assert_called_with(
"view.response",
instance=request._view_path,
tags={
"method": "GET",
"status_code": 429,
"ui_request": False,
"rate_limit_type": "fixed_window",
},
skip_internal=False,
)
@patch("sentry.utils.metrics.incr")
def METHOD_NAME(self, incr):
request = self.factory.get("/")
request._view_path = "/"
response = Mock(status_code=200)
request.COOKIES = {"foo": "bar"}
self.middleware.process_response(request, response)
incr.assert_called_with(
"view.response",
instance=request._view_path,
tags={"method": "GET", "status_code": 200, "ui_request": True, "rate_limit_type": None},
skip_internal=False,
)
@patch("sentry.utils.metrics.incr")
def test_records_endpoint_specific_metrics(self, incr):
request = self.factory.get("/")
request._view_path = "/"
request._metric_tags = {"a": "b"}
response = Mock(status_code=200)
self.middleware.process_response(request, response)
incr.assert_called_with(
"view.response",
instance=request._view_path,
tags={
"method": "GET",
"status_code": 200,
"ui_request": False,
"a": "b",
"rate_limit_type": None,
},
skip_internal=False,
)
@patch("sentry.utils.metrics.incr")
def test_add_request_metric_tags(self, incr):
request = self.factory.get("/")
request._view_path = "/"
add_request_metric_tags(request, foo="bar")
response = Mock(status_code=200)
self.middleware.process_response(request, response)
incr.assert_called_with(
"view.response",
instance=request._view_path,
tags={
"method": "GET",
"status_code": 200,
"ui_request": False,
"foo": "bar",
"rate_limit_type": None,
},
skip_internal=False,
) | null |
test holidays | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <[email protected]> (c) 2017-2023
# ryanss <[email protected]> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from holidays.countries.kenya import Kenya, KE, KEN
from tests.common import TestCase
class TestKenya(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass(Kenya, years=range(1963, 2050))
def test_country_aliases(self):
self.assertCountryAliases(Kenya, KE, KEN)
def test_no_holidays(self):
self.assertNoHolidays(Kenya(years=1962))
def test_special_holidays(self):
self.assertHoliday(
"2020-02-11",
"2022-04-29",
"2022-08-09",
"2022-09-10",
"2022-09-11",
"2022-09-12",
"2022-09-13",
)
def METHOD_NAME(self):
for year in range(1963, 2050):
self.assertHoliday(
f"{year}-01-01",
f"{year}-05-01",
f"{year}-10-20",
f"{year}-12-12",
f"{year}-12-25",
f"{year}-12-26",
)
def test_madaraka_day(self):
self.assertNoHoliday(f"{year}-06-01" for year in range(1963, 2010))
self.assertNoHolidayName("Madaraka Day", range(1963, 2010))
self.assertHoliday(f"{year}-06-01" for year in range(2010, 2050))
def test_utamaduni_day(self):
name1 = "Moi Day"
name2 = "Utamaduni Day"
self.assertNoHoliday(f"{year}-10-10" for year in range(1963, 2002))
self.assertNoHoliday(f"{year}-10-10" for year in range(2010, 2018))
self.assertNoHolidayName(name1, range(1963, 2002), range(2010, 2018))
self.assertNoHolidayName(name2, range(1963, 2002), range(2010, 2021))
self.assertHoliday(f"{year}-10-10" for year in range(2002, 2010))
self.assertHoliday(f"{year}-10-10" for year in range(2018, 2050))
def test_mashujaa_day(self):
self.assertNoHolidayName("Mashujaa Day", range(1963, 2010))
self.assertNoHolidayName("Kenyatta Day", range(2010, 2050))
def test_easter(self):
self.assertHoliday(
# Good Friday
"2018-03-30",
"2019-04-19",
"2020-04-10",
"2021-04-02",
"2022-04-15",
# Easter Monday
"2018-04-02",
"2019-04-22",
"2020-04-13",
"2021-04-05",
"2022-04-18",
)
def test_observed(self):
dt = (
# New Year's Day
"2012-01-02",
"2017-01-02",
"2023-01-02",
# Labour Day
"2011-05-02",
"2016-05-02",
"2022-05-02",
# Madaraka Day
"2014-06-02",
"2025-06-02",
# Utamaduni Day / Moi Day
"2004-10-11",
"2021-10-11",
# Mashujaa Day / Kenyatta Day
"1996-10-21",
"2002-10-21",
"2013-10-21",
"2019-10-21",
"2024-10-21",
# Jamhuri Day
"2010-12-13",
"2021-12-13",
# Christmas Day
"2011-12-27",
"2016-12-27",
"2022-12-27",
# Boxing Day
"2010-12-27",
"2021-12-27",
)
self.assertHoliday(dt)
self.assertNoNonObservedHoliday(dt)
def test_2019(self):
self.assertHolidayDates(
Kenya(years=2019),
"2019-01-01",
"2019-04-19",
"2019-04-22",
"2019-05-01",
"2019-06-01",
"2019-10-10",
"2019-10-20",
"2019-10-21",
"2019-12-12",
"2019-12-25",
"2019-12-26",
)
def test_2022(self):
self.assertHolidayDates(
Kenya(years=2022),
"2022-01-01",
"2022-04-15",
"2022-04-18",
"2022-04-29",
"2022-05-01",
"2022-05-02",
"2022-06-01",
"2022-08-09",
"2022-09-10",
"2022-09-11",
"2022-09-12",
"2022-09-13",
"2022-10-10",
"2022-10-20",
"2022-12-12",
"2022-12-25",
"2022-12-26",
"2022-12-27",
) | null |
main | #!/usr/bin/env python
# pylint: disable=unused-argument, import-error
# This program is dedicated to the public domain under the CC0 license.
"""
First, a few callback functions are defined. Then, those functions are passed to
the Application and registered at their respective places.
Then, the bot is started and runs until we press Ctrl-C on the command line.
Usage:
Example of a bot-user conversation using ConversationHandler.
Send /start to initiate the conversation.
Press Ctrl-C on the command line or send a signal to the process to stop the
bot.
"""
import logging
from typing import Dict
from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove, Update
from telegram.ext import (
Application,
CommandHandler,
ContextTypes,
ConversationHandler,
MessageHandler,
PicklePersistence,
filters,
)
# Enable logging
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
# set higher logging level for httpx to avoid all GET and POST requests being logged
logging.getLogger("httpx").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
CHOOSING, TYPING_REPLY, TYPING_CHOICE = range(3)
reply_keyboard = [
["Age", "Favourite colour"],
["Number of siblings", "Something else..."],
["Done"],
]
markup = ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)
def facts_to_str(user_data: Dict[str, str]) -> str:
"""Helper function for formatting the gathered user info."""
facts = [f"{key} - {value}" for key, value in user_data.items()]
return "\n".join(facts).join(["\n", "\n"])
async def start(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
"""Start the conversation, display any stored data and ask user for input."""
reply_text = "Hi! My name is Doctor Botter."
if context.user_data:
reply_text += (
f" You already told me your {', '.join(context.user_data.keys())}. Why don't you "
f"tell me something more about yourself? Or change anything I already know."
)
else:
reply_text += (
" I will hold a more complex conversation with you. Why don't you tell me "
"something about yourself?"
)
await update.message.reply_text(reply_text, reply_markup=markup)
return CHOOSING
async def regular_choice(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
"""Ask the user for info about the selected predefined choice."""
text = update.message.text.lower()
context.user_data["choice"] = text
if context.user_data.get(text):
reply_text = (
f"Your {text}? I already know the following about that: {context.user_data[text]}"
)
else:
reply_text = f"Your {text}? Yes, I would love to hear about that!"
await update.message.reply_text(reply_text)
return TYPING_REPLY
async def custom_choice(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
"""Ask the user for a description of a custom category."""
await update.message.reply_text(
'Alright, please send me the category first, for example "Most impressive skill"'
)
return TYPING_CHOICE
async def received_information(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
"""Store info provided by user and ask for the next category."""
text = update.message.text
category = context.user_data["choice"]
context.user_data[category] = text.lower()
del context.user_data["choice"]
await update.message.reply_text(
"Neat! Just so you know, this is what you already told me:"
f"{facts_to_str(context.user_data)}"
"You can tell me more, or change your opinion on something.",
reply_markup=markup,
)
return CHOOSING
async def show_data(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""Display the gathered info."""
await update.message.reply_text(
f"This is what you already told me: {facts_to_str(context.user_data)}"
)
async def done(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
"""Display the gathered info and end the conversation."""
if "choice" in context.user_data:
del context.user_data["choice"]
await update.message.reply_text(
f"I learned these facts about you: {facts_to_str(context.user_data)}Until next time!",
reply_markup=ReplyKeyboardRemove(),
)
return ConversationHandler.END
def METHOD_NAME() -> None:
"""Run the bot."""
# Create the Application and pass it your bot's token.
persistence = PicklePersistence(filepath="conversationbot")
application = Application.builder().token("TOKEN").persistence(persistence).build()
# Add conversation handler with the states CHOOSING, TYPING_CHOICE and TYPING_REPLY
conv_handler = ConversationHandler(
entry_points=[CommandHandler("start", start)],
states={
CHOOSING: [
MessageHandler(
filters.Regex("^(Age|Favourite colour|Number of siblings)$"), regular_choice
),
MessageHandler(filters.Regex("^Something else...$"), custom_choice),
],
TYPING_CHOICE: [
MessageHandler(
filters.TEXT & ~(filters.COMMAND | filters.Regex("^Done$")), regular_choice
)
],
TYPING_REPLY: [
MessageHandler(
filters.TEXT & ~(filters.COMMAND | filters.Regex("^Done$")),
received_information,
)
],
},
fallbacks=[MessageHandler(filters.Regex("^Done$"), done)],
name="my_conversation",
persistent=True,
)
application.add_handler(conv_handler)
show_data_handler = CommandHandler("show_data", show_data)
application.add_handler(show_data_handler)
# Run the bot until the user presses Ctrl-C
application.run_polling(allowed_updates=Update.ALL_TYPES)
if __name__ == "__main__":
METHOD_NAME() | null |
get multi topic offset | from kafka.common import OffsetRequestPayload
from corehq.apps.app_manager.util import app_doc_types
from corehq.apps.change_feed.connection import get_simple_kafka_client
from corehq.apps.change_feed.exceptions import UnavailableKafkaOffset
from corehq.form_processor.models import XFormInstance
DOMAIN = 'domain'
META = 'meta'
APP = 'app'
CASE_SQL = 'case-sql'
FORM_SQL = 'form-sql'
SMS = 'sms'
LEDGER = 'ledger'
COMMCARE_USER = 'commcare-user'
GROUP = 'group'
WEB_USER = 'web-user'
LOCATION = 'location'
SYNCLOG_SQL = 'synclog-sql'
CASE_TOPICS = (CASE_SQL, )
FORM_TOPICS = (FORM_SQL, )
USER_TOPICS = (COMMCARE_USER, WEB_USER)
ALL = (
CASE_SQL,
COMMCARE_USER,
DOMAIN,
FORM_SQL,
GROUP,
LEDGER,
META,
SMS,
WEB_USER,
APP,
LOCATION,
SYNCLOG_SQL,
)
def get_topic_for_doc_type(doc_type, data_source_type=None, default_topic=None):
from corehq.apps.change_feed import document_types
from corehq.apps.locations.document_store import LOCATION_DOC_TYPE
if doc_type in document_types.CASE_DOC_TYPES:
return CASE_SQL
elif doc_type in XFormInstance.ALL_DOC_TYPES:
return FORM_SQL
elif doc_type in document_types.DOMAIN_DOC_TYPES:
return DOMAIN
elif doc_type in document_types.MOBILE_USER_DOC_TYPES:
return COMMCARE_USER
elif doc_type in document_types.WEB_USER_DOC_TYPES:
return WEB_USER
elif doc_type in document_types.GROUP_DOC_TYPES:
return GROUP
elif doc_type in document_types.SYNCLOG_DOC_TYPES:
return SYNCLOG_SQL
elif doc_type in app_doc_types():
return APP
elif doc_type == LOCATION_DOC_TYPE:
return LOCATION
elif doc_type in ALL: # for docs that don't have a doc_type we use the Kafka topic
return doc_type
elif default_topic:
return default_topic
else:
# at some point we may want to make this more granular
return META # note this does not map to the 'meta' Couch database
def get_topic_offset(topic):
"""
:returns: The kafka offset dict for the topic."""
return METHOD_NAME([topic])
def METHOD_NAME(topics):
"""
:returns: A dict of offsets keyed by topic and partition"""
return _get_topic_offsets(topics, latest=True)
def get_multi_topic_first_available_offsets(topics):
"""
:returns: A dict of offsets keyed by topic and partition"""
return _get_topic_offsets(topics, latest=False)
def _get_topic_offsets(topics, latest):
"""
:param topics: list of topics
:param latest: True to fetch latest offsets, False to fetch earliest available
:return: dict: { (topic, partition): offset, ... }
"""
# https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetRequest
# https://cfchou.github.io/blog/2015/04/23/a-closer-look-at-kafka-offsetrequest/
assert set(topics) <= set(ALL)
with get_simple_kafka_client() as client:
partition_meta = client.topic_partitions
# only return the offset of the latest message in the partition
num_offsets = 1
time_value = -1 if latest else -2
offsets = {}
offset_requests = []
for topic in topics:
partitions = list(partition_meta.get(topic, {}))
for partition in partitions:
offsets[(topic, partition)] = None
offset_requests.append(OffsetRequestPayload(topic, partition, time_value, num_offsets))
responses = client.send_offset_request(offset_requests)
for r in responses:
offsets[(r.topic, r.partition)] = r.offsets[0]
return offsets
def validate_offsets(expected_offsets):
"""
Takes in a dictionary of offsets (topics to checkpoint numbers) and ensures they are all available
in the current kafka feed
"""
if expected_offsets:
topics = {x[0] for x in expected_offsets.keys()}
available_offsets = get_multi_topic_first_available_offsets(topics)
for topic_partition, offset in expected_offsets.items():
topic, partition = topic_partition
if topic_partition not in available_offsets:
raise UnavailableKafkaOffset("Invalid partition '{}' for topic '{}'".format(partition, topic))
if expected_offsets[topic_partition] < available_offsets[topic_partition]:
message = (
'First available topic offset for {}:{} is {} but needed {}.'
).format(topic, partition, available_offsets[topic_partition], expected_offsets[topic_partition])
raise UnavailableKafkaOffset(message) | null |
det loss | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddlex.ppdet.core.workspace import register
__all__ = ['JDEDetectionLoss', 'JDEEmbeddingLoss', 'JDELoss']
@register
class JDEDetectionLoss(nn.Layer):
__shared__ = ['num_classes']
def __init__(self, num_classes=1, for_mot=True):
super(JDEDetectionLoss, self).__init__()
self.num_classes = num_classes
self.for_mot = for_mot
def METHOD_NAME(self, p_det, anchor, t_conf, t_box):
pshape = paddle.shape(p_det)
pshape.stop_gradient = True
nB, nGh, nGw = pshape[0], pshape[-2], pshape[-1]
nA = len(anchor)
p_det = paddle.reshape(
p_det, [nB, nA, self.num_classes + 5, nGh, nGw]).transpose(
(0, 1, 3, 4, 2))
# 1. loss_conf: cross_entropy
p_conf = p_det[:, :, :, :, 4:6]
p_conf_flatten = paddle.reshape(p_conf, [-1, 2])
t_conf_flatten = t_conf.flatten()
t_conf_flatten = paddle.cast(t_conf_flatten, dtype="int64")
t_conf_flatten.stop_gradient = True
loss_conf = F.cross_entropy(
p_conf_flatten, t_conf_flatten, ignore_index=-1, reduction='mean')
loss_conf.stop_gradient = False
# 2. loss_box: smooth_l1_loss
p_box = p_det[:, :, :, :, :4]
p_box_flatten = paddle.reshape(p_box, [-1, 4])
t_box_flatten = paddle.reshape(t_box, [-1, 4])
fg_inds = paddle.nonzero(t_conf_flatten > 0).flatten()
if fg_inds.numel() > 0:
reg_delta = paddle.gather(p_box_flatten, fg_inds)
reg_target = paddle.gather(t_box_flatten, fg_inds)
else:
reg_delta = paddle.to_tensor([0, 0, 0, 0], dtype='float32')
reg_delta.stop_gradient = False
reg_target = paddle.to_tensor([0, 0, 0, 0], dtype='float32')
reg_target.stop_gradient = True
loss_box = F.smooth_l1_loss(
reg_delta, reg_target, reduction='mean', delta=1.0)
loss_box.stop_gradient = False
return loss_conf, loss_box
def forward(self, det_outs, targets, anchors):
"""
Args:
det_outs (list[Tensor]): output from detection head, each one
is a 4-D Tensor with shape [N, C, H, W].
targets (dict): contains 'im_id', 'gt_bbox', 'gt_ide', 'image',
'im_shape', 'scale_factor' and 'tbox', 'tconf', 'tide' of
each FPN level.
anchors (list[list]): anchor setting of JDE model, N row M col, N is
the anchor levels(FPN levels), M is the anchor scales each
level.
"""
assert len(det_outs) == len(anchors)
loss_confs = []
loss_boxes = []
for i, (p_det, anchor) in enumerate(zip(det_outs, anchors)):
t_conf = targets['tconf{}'.format(i)]
t_box = targets['tbox{}'.format(i)]
loss_conf, loss_box = self.METHOD_NAME(p_det, anchor, t_conf, t_box)
loss_confs.append(loss_conf)
loss_boxes.append(loss_box)
if self.for_mot:
return {'loss_confs': loss_confs, 'loss_boxes': loss_boxes}
else:
jde_conf_losses = sum(loss_confs)
jde_box_losses = sum(loss_boxes)
jde_det_losses = {
"loss_conf": jde_conf_losses,
"loss_box": jde_box_losses,
"loss": jde_conf_losses + jde_box_losses,
}
return jde_det_losses
@register
class JDEEmbeddingLoss(nn.Layer):
def __init__(self, ):
super(JDEEmbeddingLoss, self).__init__()
self.phony = self.create_parameter(shape=[1], dtype="float32")
def emb_loss(self, p_ide, t_conf, t_ide, emb_scale, classifier):
emb_dim = p_ide.shape[1]
p_ide = p_ide.transpose((0, 2, 3, 1))
p_ide_flatten = paddle.reshape(p_ide, [-1, emb_dim])
mask = t_conf > 0
mask = paddle.cast(mask, dtype="int64")
mask.stop_gradient = True
emb_mask = mask.max(1).flatten()
emb_mask_inds = paddle.nonzero(emb_mask > 0).flatten()
emb_mask_inds.stop_gradient = True
# use max(1) to decide the id, TODO: more reseanable strategy
t_ide_flatten = t_ide.max(1).flatten()
t_ide_flatten = paddle.cast(t_ide_flatten, dtype="int64")
valid_inds = paddle.nonzero(t_ide_flatten != -1).flatten()
if emb_mask_inds.numel() == 0 or valid_inds.numel() == 0:
# loss_ide = paddle.to_tensor([0]) # will be error in gradient backward
loss_ide = self.phony * 0 # todo
else:
embedding = paddle.gather(p_ide_flatten, emb_mask_inds)
embedding = emb_scale * F.normalize(embedding)
logits = classifier(embedding)
ide_target = paddle.gather(t_ide_flatten, emb_mask_inds)
loss_ide = F.cross_entropy(
logits, ide_target, ignore_index=-1, reduction='mean')
loss_ide.stop_gradient = False
return loss_ide
def forward(self, ide_outs, targets, emb_scale, classifier):
loss_ides = []
for i, p_ide in enumerate(ide_outs):
t_conf = targets['tconf{}'.format(i)]
t_ide = targets['tide{}'.format(i)]
loss_ide = self.emb_loss(p_ide, t_conf, t_ide, emb_scale,
classifier)
loss_ides.append(loss_ide)
return loss_ides
@register
class JDELoss(nn.Layer):
def __init__(self):
super(JDELoss, self).__init__()
def forward(self, loss_confs, loss_boxes, loss_ides, loss_params_cls,
loss_params_reg, loss_params_ide, targets):
assert len(loss_confs) == len(loss_boxes) == len(loss_ides)
assert len(loss_params_cls) == len(loss_params_reg) == len(
loss_params_ide)
assert len(loss_confs) == len(loss_params_cls)
batchsize = targets['gt_bbox'].shape[0]
nTargets = paddle.nonzero(paddle.sum(targets['gt_bbox'],
axis=2)).shape[0] / batchsize
nTargets = paddle.to_tensor(nTargets, dtype='float32')
nTargets.stop_gradient = True
jde_losses = []
for i, (loss_conf, loss_box, loss_ide, l_conf_p, l_box_p,
l_ide_p) in enumerate(
zip(loss_confs, loss_boxes, loss_ides, loss_params_cls,
loss_params_reg, loss_params_ide)):
jde_loss = l_conf_p(loss_conf) + l_box_p(loss_box) + l_ide_p(
loss_ide)
jde_losses.append(jde_loss)
loss_all = {
"loss_conf": sum(loss_confs),
"loss_box": sum(loss_boxes),
"loss_ide": sum(loss_ides),
"loss": sum(jde_losses),
"nTargets": nTargets,
}
return loss_all | null |
solve via data | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cvxpy.interface as intf
import cvxpy.settings as s
from cvxpy.constraints import ExpCone
from cvxpy.reductions.solution import Solution, failure_solution
from cvxpy.reductions.solvers import utilities
from cvxpy.reductions.solvers.conic_solvers.conic_solver import ConicSolver
from cvxpy.reductions.solvers.conic_solvers.ecos_conif import (
ECOS,
dims_to_solver_dict,
)
class ECOS_BB(ECOS):
"""An interface for the ECOS BB solver.
"""
# Solver capabilities.
MIP_CAPABLE = True
MI_SUPPORTED_CONSTRAINTS = ECOS.SUPPORTED_CONSTRAINTS
# Exit flags from ECOS_BB
# ECOS_BB found optimal solution.
# MI_OPTIMAL_SOLN (ECOS_OPTIMAL)
# ECOS_BB proved problem is infeasible.
# MI_INFEASIBLE (ECOS_PINF)
# ECOS_BB proved problem is unbounded.
# MI_UNBOUNDED (ECOS_DINF)
# ECOS_BB hit maximum iterations but a feasible solution was found and
# the best seen feasible solution was returned.
# MI_MAXITER_FEASIBLE_SOLN (ECOS_OPTIMAL + ECOS_INACC_OFFSET)
# ECOS_BB hit maximum iterations without finding a feasible solution.
# MI_MAXITER_NO_SOLN (ECOS_PINF + ECOS_INACC_OFFSET)
# ECOS_BB hit maximum iterations without finding a feasible solution
# that was unbounded.
# MI_MAXITER_UNBOUNDED (ECOS_DINF + ECOS_INACC_OFFSET)
def name(self):
"""The name of the solver.
"""
return s.ECOS_BB
def apply(self, problem):
data, inv_data = super(ECOS_BB, self).apply(problem)
# Because the problem variable is single dimensional, every
# boolean/integer index has length one.
var = problem.x
data[s.BOOL_IDX] = [int(t[0]) for t in var.boolean_idx]
data[s.INT_IDX] = [int(t[0]) for t in var.integer_idx]
inv_data['is_mip'] = data[s.BOOL_IDX] or data[s.INT_IDX]
return data, inv_data
def invert(self, solution, inverse_data):
"""Returns solution to original problem, given inverse_data.
"""
status = self.STATUS_MAP[solution['info']['exitFlag']]
# Timing data
attr = {}
attr[s.SOLVE_TIME] = solution["info"]["timing"]["tsolve"]
attr[s.SETUP_TIME] = solution["info"]["timing"]["tsetup"]
attr[s.NUM_ITERS] = solution["info"]["iter"]
attr[s.EXTRA_STATS] = solution
if status in s.SOLUTION_PRESENT:
primal_val = solution['info']['pcost']
opt_val = primal_val + inverse_data[s.OFFSET]
primal_vars = {
inverse_data[self.VAR_ID]: intf.DEFAULT_INTF.const_to_matrix(solution['x'])
}
dual_vars = None
if not inverse_data['is_mip']:
dual_vars = utilities.get_dual_values(
solution['z'],
utilities.extract_dual_value,
inverse_data[self.NEQ_CONSTR]
)
for con in inverse_data[self.NEQ_CONSTR]:
if isinstance(con, ExpCone):
cid = con.id
n_cones = con.num_cones()
perm = utilities.expcone_permutor(n_cones,
ECOS.EXP_CONE_ORDER)
dual_vars[cid] = dual_vars[cid][perm]
eq_duals = utilities.get_dual_values(
solution['y'],
utilities.extract_dual_value,
inverse_data[self.EQ_CONSTR]
)
dual_vars.update(eq_duals)
return Solution(status, opt_val, primal_vars, dual_vars, attr)
else:
return failure_solution(status, attr)
def METHOD_NAME(self, data, warm_start: bool, verbose: bool, solver_opts, solver_cache=None):
import ecos
cones = dims_to_solver_dict(data[ConicSolver.DIMS])
# Default verbose to false for BB wrapper.
if 'mi_verbose' in solver_opts:
mi_verbose = solver_opts['mi_verbose']
del solver_opts['mi_verbose']
else:
mi_verbose = verbose
solution = ecos.solve(data[s.C], data[s.G], data[s.H],
cones, data[s.A], data[s.B],
verbose=verbose,
mi_verbose=mi_verbose,
bool_vars_idx=data[s.BOOL_IDX],
int_vars_idx=data[s.INT_IDX],
**solver_opts)
return solution | null |
check value error | """
Tests for numpy/core/src/multiarray/conversion_utils.c
"""
import re
import sys
import pytest
import numpy as np
import numpy.core._multiarray_tests as mt
from numpy.testing import assert_warns, IS_PYPY
class StringConverterTestCase:
allow_bytes = True
case_insensitive = True
exact_match = False
warn = True
def METHOD_NAME(self, val):
pattern = r'\(got {}\)'.format(re.escape(repr(val)))
with pytest.raises(ValueError, match=pattern) as exc:
self.conv(val)
def _check_conv_assert_warn(self, val, expected):
if self.warn:
with assert_warns(DeprecationWarning) as exc:
assert self.conv(val) == expected
else:
assert self.conv(val) == expected
def _check(self, val, expected):
"""Takes valid non-deprecated inputs for converters,
runs converters on inputs, checks correctness of outputs,
warnings and errors"""
assert self.conv(val) == expected
if self.allow_bytes:
assert self.conv(val.encode('ascii')) == expected
else:
with pytest.raises(TypeError):
self.conv(val.encode('ascii'))
if len(val) != 1:
if self.exact_match:
self.METHOD_NAME(val[:1])
self.METHOD_NAME(val + '\0')
else:
self._check_conv_assert_warn(val[:1], expected)
if self.case_insensitive:
if val != val.lower():
self._check_conv_assert_warn(val.lower(), expected)
if val != val.upper():
self._check_conv_assert_warn(val.upper(), expected)
else:
if val != val.lower():
self.METHOD_NAME(val.lower())
if val != val.upper():
self.METHOD_NAME(val.upper())
def test_wrong_type(self):
# common cases which apply to all the below
with pytest.raises(TypeError):
self.conv({})
with pytest.raises(TypeError):
self.conv([])
def test_wrong_value(self):
# nonsense strings
self.METHOD_NAME('')
self.METHOD_NAME('\N{greek small letter pi}')
if self.allow_bytes:
self.METHOD_NAME(b'')
# bytes which can't be converted to strings via utf8
self.METHOD_NAME(b"\xFF")
if self.exact_match:
self.METHOD_NAME("there's no way this is supported")
class TestByteorderConverter(StringConverterTestCase):
""" Tests of PyArray_ByteorderConverter """
conv = mt.run_byteorder_converter
warn = False
def test_valid(self):
for s in ['big', '>']:
self._check(s, 'NPY_BIG')
for s in ['little', '<']:
self._check(s, 'NPY_LITTLE')
for s in ['native', '=']:
self._check(s, 'NPY_NATIVE')
for s in ['ignore', '|']:
self._check(s, 'NPY_IGNORE')
for s in ['swap']:
self._check(s, 'NPY_SWAP')
class TestSortkindConverter(StringConverterTestCase):
""" Tests of PyArray_SortkindConverter """
conv = mt.run_sortkind_converter
warn = False
def test_valid(self):
self._check('quicksort', 'NPY_QUICKSORT')
self._check('heapsort', 'NPY_HEAPSORT')
self._check('mergesort', 'NPY_STABLESORT') # alias
self._check('stable', 'NPY_STABLESORT')
class TestSelectkindConverter(StringConverterTestCase):
""" Tests of PyArray_SelectkindConverter """
conv = mt.run_selectkind_converter
case_insensitive = False
exact_match = True
def test_valid(self):
self._check('introselect', 'NPY_INTROSELECT')
class TestSearchsideConverter(StringConverterTestCase):
""" Tests of PyArray_SearchsideConverter """
conv = mt.run_searchside_converter
def test_valid(self):
self._check('left', 'NPY_SEARCHLEFT')
self._check('right', 'NPY_SEARCHRIGHT')
class TestOrderConverter(StringConverterTestCase):
""" Tests of PyArray_OrderConverter """
conv = mt.run_order_converter
warn = False
def test_valid(self):
self._check('c', 'NPY_CORDER')
self._check('f', 'NPY_FORTRANORDER')
self._check('a', 'NPY_ANYORDER')
self._check('k', 'NPY_KEEPORDER')
def test_flatten_invalid_order(self):
# invalid after gh-14596
with pytest.raises(ValueError):
self.conv('Z')
for order in [False, True, 0, 8]:
with pytest.raises(TypeError):
self.conv(order)
class TestClipmodeConverter(StringConverterTestCase):
""" Tests of PyArray_ClipmodeConverter """
conv = mt.run_clipmode_converter
def test_valid(self):
self._check('clip', 'NPY_CLIP')
self._check('wrap', 'NPY_WRAP')
self._check('raise', 'NPY_RAISE')
# integer values allowed here
assert self.conv(np.CLIP) == 'NPY_CLIP'
assert self.conv(np.WRAP) == 'NPY_WRAP'
assert self.conv(np.RAISE) == 'NPY_RAISE'
class TestCastingConverter(StringConverterTestCase):
""" Tests of PyArray_CastingConverter """
conv = mt.run_casting_converter
case_insensitive = False
exact_match = True
def test_valid(self):
self._check("no", "NPY_NO_CASTING")
self._check("equiv", "NPY_EQUIV_CASTING")
self._check("safe", "NPY_SAFE_CASTING")
self._check("same_kind", "NPY_SAME_KIND_CASTING")
self._check("unsafe", "NPY_UNSAFE_CASTING")
class TestIntpConverter:
""" Tests of PyArray_IntpConverter """
conv = mt.run_intp_converter
def test_basic(self):
assert self.conv(1) == (1,)
assert self.conv((1, 2)) == (1, 2)
assert self.conv([1, 2]) == (1, 2)
assert self.conv(()) == ()
def test_none(self):
# once the warning expires, this will raise TypeError
with pytest.warns(DeprecationWarning):
assert self.conv(None) == ()
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
reason="PyPy bug in error formatting")
def test_float(self):
with pytest.raises(TypeError):
self.conv(1.0)
with pytest.raises(TypeError):
self.conv([1, 1.0])
def test_too_large(self):
with pytest.raises(ValueError):
self.conv(2**64)
def test_too_many_dims(self):
assert self.conv([1]*32) == (1,)*32
with pytest.raises(ValueError):
self.conv([1]*33) | null |
ccc | import sys
from _typeshed import SupportsRead, SupportsReadline
from collections.abc import Callable, Iterable, Iterator
from socket import socket
from ssl import SSLContext
from types import TracebackType
from typing import Any, TextIO
from typing_extensions import Literal, Self
__all__ = ["FTP", "error_reply", "error_temp", "error_perm", "error_proto", "all_errors", "FTP_TLS"]
MSG_OOB: Literal[1]
FTP_PORT: Literal[21]
MAXLINE: Literal[8192]
CRLF: Literal["\r\n"]
B_CRLF: Literal[b"\r\n"]
class Error(Exception): ...
class error_reply(Error): ...
class error_temp(Error): ...
class error_perm(Error): ...
class error_proto(Error): ...
all_errors: tuple[type[Exception], ...]
class FTP:
debugging: int
host: str
port: int
maxline: int
sock: socket | None
welcome: str | None
passiveserver: int
timeout: int
af: int
lastresp: str
file: TextIO | None
encoding: str
def __enter__(self) -> Self: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> None: ...
source_address: tuple[str, int] | None
if sys.version_info >= (3, 9):
def __init__(
self,
host: str = "",
user: str = "",
passwd: str = "",
acct: str = "",
timeout: float = ...,
source_address: tuple[str, int] | None = None,
*,
encoding: str = "utf-8",
) -> None: ...
else:
def __init__(
self,
host: str = "",
user: str = "",
passwd: str = "",
acct: str = "",
timeout: float = ...,
source_address: tuple[str, int] | None = None,
) -> None: ...
def connect(
self, host: str = "", port: int = 0, timeout: float = -999, source_address: tuple[str, int] | None = None
) -> str: ...
def getwelcome(self) -> str: ...
def set_debuglevel(self, level: int) -> None: ...
def debug(self, level: int) -> None: ...
def set_pasv(self, val: bool | Literal[0, 1]) -> None: ...
def sanitize(self, s: str) -> str: ...
def putline(self, line: str) -> None: ...
def putcmd(self, line: str) -> None: ...
def getline(self) -> str: ...
def getmultiline(self) -> str: ...
def getresp(self) -> str: ...
def voidresp(self) -> str: ...
def abort(self) -> str: ...
def sendcmd(self, cmd: str) -> str: ...
def voidcmd(self, cmd: str) -> str: ...
def sendport(self, host: str, port: int) -> str: ...
def sendeprt(self, host: str, port: int) -> str: ...
def makeport(self) -> socket: ...
def makepasv(self) -> tuple[str, int]: ...
def login(self, user: str = "", passwd: str = "", acct: str = "") -> str: ...
# In practice, `rest` rest can actually be anything whose str() is an integer sequence, so to make it simple we allow integers.
def ntransfercmd(self, cmd: str, rest: int | str | None = None) -> tuple[socket, int | None]: ...
def transfercmd(self, cmd: str, rest: int | str | None = None) -> socket: ...
def retrbinary(
self, cmd: str, callback: Callable[[bytes], object], blocksize: int = 8192, rest: int | str | None = None
) -> str: ...
def storbinary(
self,
cmd: str,
fp: SupportsRead[bytes],
blocksize: int = 8192,
callback: Callable[[bytes], object] | None = None,
rest: int | str | None = None,
) -> str: ...
def retrlines(self, cmd: str, callback: Callable[[str], object] | None = None) -> str: ...
def storlines(self, cmd: str, fp: SupportsReadline[bytes], callback: Callable[[bytes], object] | None = None) -> str: ...
def acct(self, password: str) -> str: ...
def nlst(self, *args: str) -> list[str]: ...
# Technically only the last arg can be a Callable but ...
def dir(self, *args: str | Callable[[str], object]) -> None: ...
def mlsd(self, path: str = "", facts: Iterable[str] = []) -> Iterator[tuple[str, dict[str, str]]]: ...
def rename(self, fromname: str, toname: str) -> str: ...
def delete(self, filename: str) -> str: ...
def cwd(self, dirname: str) -> str: ...
def size(self, filename: str) -> int | None: ...
def mkd(self, dirname: str) -> str: ...
def rmd(self, dirname: str) -> str: ...
def pwd(self) -> str: ...
def quit(self) -> str: ...
def close(self) -> None: ...
class FTP_TLS(FTP):
if sys.version_info >= (3, 12):
def __init__(
self,
host: str = "",
user: str = "",
passwd: str = "",
acct: str = "",
*,
context: SSLContext | None = None,
timeout: float = ...,
source_address: tuple[str, int] | None = None,
encoding: str = "utf-8",
) -> None: ...
elif sys.version_info >= (3, 9):
def __init__(
self,
host: str = "",
user: str = "",
passwd: str = "",
acct: str = "",
keyfile: str | None = None,
certfile: str | None = None,
context: SSLContext | None = None,
timeout: float = ...,
source_address: tuple[str, int] | None = None,
*,
encoding: str = "utf-8",
) -> None: ...
else:
def __init__(
self,
host: str = "",
user: str = "",
passwd: str = "",
acct: str = "",
keyfile: str | None = None,
certfile: str | None = None,
context: SSLContext | None = None,
timeout: float = ...,
source_address: tuple[str, int] | None = None,
) -> None: ...
ssl_version: int
keyfile: str | None
certfile: str | None
context: SSLContext
def login(self, user: str = "", passwd: str = "", acct: str = "", secure: bool = True) -> str: ...
def auth(self) -> str: ...
def prot_p(self) -> str: ...
def prot_c(self) -> str: ...
def METHOD_NAME(self) -> str: ...
def parse150(resp: str) -> int | None: ... # undocumented
def parse227(resp: str) -> tuple[str, int]: ... # undocumented
def parse229(resp: str, peer: Any) -> tuple[str, int]: ... # undocumented
def parse257(resp: str) -> str: ... # undocumented
def ftpcp(
source: FTP, sourcename: str, target: FTP, targetname: str = "", type: Literal["A", "I"] = "I"
) -> None: ... # undocumented | null |
test batch issue 56273 | """
tests.pytests.functional.cli.test_batch
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import salt.cli.batch
import salt.config
import salt.utils.jid
from tests.support.mock import Mock, patch
class MockPub:
"""
Mock salt.client.LocalClient.pub method
"""
calls = 0
initial_ping = False
batch1_jid = None
batch1_tgt = None
batch2_jid = None
batch2_tgt = None
batch3_jid = None
batch3_tgt = None
def __call__(self, tgt, fun, *args, **kwargs):
if tgt == "minion*" and fun == "test.ping":
MockPub.calls += 1
MockPub.initial_ping = salt.utils.jid.gen_jid({})
pub_ret = {
"jid": MockPub.initial_ping,
"minions": ["minion0", "minion1", "minion2", "minion3"],
}
elif fun == "state.sls":
if MockPub.calls == 1:
MockPub.calls += 1
MockPub.batch1_tgt = list(tgt)
MockPub.batch1_jid = jid = salt.utils.jid.gen_jid({})
pub_ret = {"jid": jid, "minions": tgt}
elif MockPub.calls == 2:
MockPub.calls += 1
MockPub.batch2_tgt = tgt
MockPub.batch2_jid = jid = salt.utils.jid.gen_jid({})
pub_ret = {"jid": jid, "minions": tgt}
elif MockPub.calls == 3:
MockPub.calls += 1
MockPub.batch3_tgt = tgt
MockPub.batch3_jid = jid = salt.utils.jid.gen_jid({})
pub_ret = {"jid": jid, "minions": tgt}
elif fun == "saltutil.find_job":
jid = salt.utils.jid.gen_jid({})
pub_ret = {"jid": jid, "minions": tgt}
return pub_ret
class MockSubscriber:
"""
Mock salt.transport.ipc IPCMessageSubscriber in order to inject events into
salt.utils.Event
"""
calls = 0
pubret = None
def __init__(self, *args, **kwargs):
return
def recv(self, timeout=None):
"""
Mock IPCMessageSubcriber read method.
- Return events for initial ping
- Returns event for a minion in first batch to cause second batch to get sent.
- Returns 5 null events on first iteration of second batch to go back to first batch.
- On second iteration of first batch, send an event from second batch which will get cached.
- Return events for the rest of the batches.
"""
if MockSubscriber.pubret.initial_ping:
# Send ping responses for 4 minions
jid = MockSubscriber.pubret.initial_ping
if MockSubscriber.calls == 0:
MockSubscriber.calls += 1
return self._ret(jid, minion_id="minion0", fun="test.ping")
elif MockSubscriber.calls == 1:
MockSubscriber.calls += 1
return self._ret(jid, minion_id="minion1", fun="test.ping")
elif MockSubscriber.calls == 2:
MockSubscriber.calls += 1
return self._ret(jid, minion_id="minion2", fun="test.ping")
elif MockSubscriber.calls == 3:
MockSubscriber.calls += 1
return self._ret(jid, minion_id="minion3", fun="test.ping")
if MockSubscriber.pubret.batch1_jid:
jid = MockSubscriber.pubret.batch1_jid
tgt = MockSubscriber.pubret.batch1_tgt
if MockSubscriber.calls == 4:
# Send a return for first minion in first batch. This causes the
# second batch to get sent.
MockSubscriber.calls += 1
return self._ret(jid, minion_id=tgt[0], fun="state.sls")
if MockSubscriber.pubret.batch2_jid:
if MockSubscriber.calls <= 10:
# Skip the first iteration of the second batch; this will cause
# batch logic to go back to iterating over the first batch.
MockSubscriber.calls += 1
return
elif MockSubscriber.calls == 11:
# Send the minion from the second batch, This event will get cached.
jid = MockSubscriber.pubret.batch2_jid
tgt = MockSubscriber.pubret.batch2_tgt
MockSubscriber.calls += 1
return self._ret(jid, minion_id=tgt[0], fun="state.sls")
if MockSubscriber.calls == 12:
jid = MockSubscriber.pubret.batch1_jid
tgt = MockSubscriber.pubret.batch1_tgt
MockSubscriber.calls += 1
return self._ret(jid, minion_id=tgt[1], fun="state.sls")
if MockSubscriber.pubret.batch3_jid:
jid = MockSubscriber.pubret.batch3_jid
tgt = MockSubscriber.pubret.batch3_tgt
if MockSubscriber.calls == 13:
MockSubscriber.calls += 1
return self._ret(jid, minion_id=tgt[0], fun="state.sls")
return
def _ret(self, jid, minion_id, fun, _return=True, _retcode=0):
"""
Create a mock return from a jid, minion, and fun
"""
dumped = salt.payload.dumps(
{
"fun_args": [],
"jid": jid,
"return": _return,
"retcode": 0,
"success": True,
"cmd": "_return",
"fun": fun,
"id": minion_id,
"_stamp": "2021-05-24T01:23:25.373194",
},
use_bin_type=True,
)
tag = f"salt/job/{jid}/ret".encode()
return b"".join([tag, b"\n\n", dumped])
def connect(self, timeout=None):
pass
def METHOD_NAME():
"""
Regression test for race condition in batch logic.
https://github.com/saltstack/salt/issues/56273
"""
mock_pub = MockPub()
MockSubscriber.pubret = mock_pub
def returns_for_job(jid):
return True
opts = {
"conf_file": "",
"tgt": "minion*",
"fun": "state.sls",
"arg": ["foo"],
"timeout": 1,
"gather_job_timeout": 1,
"batch": 2,
"extension_modules": "",
"failhard": True,
}
with patch("salt.transport.tcp.TCPPubClient", MockSubscriber):
batch = salt.cli.batch.Batch(opts, quiet=True)
with patch.object(batch.local, "pub", Mock(side_effect=mock_pub)):
with patch.object(
batch.local, "returns_for_job", Mock(side_effect=returns_for_job)
):
ret = list(batch.run())
assert len(ret) == 4
for val, _ in ret:
values = list(val.values())
assert len(values) == 1
assert values[0] is True | null |
run | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import torch
from monai.bundle import BundleWorkflow
from monai.data import DataLoader, Dataset
from monai.engines import SupervisedEvaluator
from monai.inferers import SlidingWindowInferer
from monai.networks.nets import UNet
from monai.transforms import (
Activationsd,
AsDiscreted,
Compose,
EnsureChannelFirstd,
LoadImaged,
SaveImaged,
ScaleIntensityd,
)
from monai.utils import BundleProperty, set_determinism
class NonConfigWorkflow(BundleWorkflow):
"""
Test class simulates the bundle workflow defined by Python script directly.
"""
def __init__(self, filename, output_dir):
super().__init__(workflow_type="inference")
self.filename = filename
self.output_dir = output_dir
self._bundle_root = "will override"
self._dataset_dir = "."
self._device = torch.device("cpu")
self._data = [{"image": self.filename}]
self._dataset = None
self._network_def = None
self._inferer = None
self._preprocessing = None
self._postprocessing = None
self._evaluator = None
self._version = None
self._monai_version = None
self._pytorch_version = None
self._numpy_version = None
def initialize(self):
set_determinism(0)
if self._version is None:
self._version = "0.1.0"
if self._monai_version is None:
self._monai_version = "1.1.0"
if self._pytorch_version is None:
self._pytorch_version = "1.13.1"
if self._numpy_version is None:
self._numpy_version = "1.22.2"
if self._preprocessing is None:
self._preprocessing = Compose(
[LoadImaged(keys="image"), EnsureChannelFirstd(keys="image"), ScaleIntensityd(keys="image")]
)
self._dataset = Dataset(data=self._data, transform=self._preprocessing)
dataloader = DataLoader(self._dataset, batch_size=1, num_workers=4)
if self._network_def is None:
self._network_def = UNet(
spatial_dims=3,
in_channels=1,
out_channels=2,
channels=[2, 2, 4, 8, 4],
strides=[2, 2, 2, 2],
num_res_units=2,
norm="batch",
)
if self._inferer is None:
self._inferer = SlidingWindowInferer(roi_size=(64, 64, 32), sw_batch_size=4, overlap=0.25)
if self._postprocessing is None:
self._postprocessing = Compose(
[
Activationsd(keys="pred", softmax=True),
AsDiscreted(keys="pred", argmax=True),
SaveImaged(keys="pred", output_dir=self.output_dir, output_postfix="seg"),
]
)
self._evaluator = SupervisedEvaluator(
device=self._device,
val_data_loader=dataloader,
network=self._network_def.to(self._device),
inferer=self._inferer,
postprocessing=self._postprocessing,
amp=False,
)
def METHOD_NAME(self):
self._evaluator.METHOD_NAME()
def finalize(self):
return True
def _get_property(self, name, property):
if name == "bundle_root":
return self._bundle_root
if name == "dataset_dir":
return self._dataset_dir
if name == "dataset_data":
return self._data
if name == "dataset":
return self._dataset
if name == "device":
return self._device
if name == "evaluator":
return self._evaluator
if name == "network_def":
return self._network_def
if name == "inferer":
return self._inferer
if name == "preprocessing":
return self._preprocessing
if name == "postprocessing":
return self._postprocessing
if name == "version":
return self._version
if name == "monai_version":
return self._monai_version
if name == "pytorch_version":
return self._pytorch_version
if name == "numpy_version":
return self._numpy_version
if property[BundleProperty.REQUIRED]:
raise ValueError(f"unsupported property '{name}' is required in the bundle properties.")
def _set_property(self, name, property, value):
if name == "bundle_root":
self._bundle_root = value
elif name == "device":
self._device = value
elif name == "dataset_dir":
self._dataset_dir = value
elif name == "dataset_data":
self._data = value
elif name == "dataset":
self._dataset = value
elif name == "evaluator":
self._evaluator = value
elif name == "network_def":
self._network_def = value
elif name == "inferer":
self._inferer = value
elif name == "preprocessing":
self._preprocessing = value
elif name == "postprocessing":
self._postprocessing = value
elif name == "version":
self._version = value
elif name == "monai_version":
self._monai_version = value
elif name == "pytorch_version":
self._pytorch_version = value
elif name == "numpy_version":
self._numpy_version = value
elif property[BundleProperty.REQUIRED]:
raise ValueError(f"unsupported property '{name}' is required in the bundle properties.") | null |
is valid ipv6 | from trex.common.trex_exceptions import TRexError
from trex.common.trex_types import listify
from trex.emu.trex_emu_conversions import Mac, Ipv4, Ipv6
try:
basestring
except NameError:
basestring = str
def is_valid_mac(mac):
return Mac.is_valid(mac)
def is_valid_ipv4(addr):
return Ipv4.is_valid(addr, mc = False)
def is_valid_ipv4_mc(addr):
return Ipv4.is_valid(addr, mc = True)
def METHOD_NAME(addr):
return Ipv6.is_valid(addr, mc = False)
def is_valid_ipv6_mc(addr):
return Ipv6.is_valid(addr, mc = True)
def is_valid_tci_tpid(tci):
return isinstance(tci, list) and 0 <= len(tci) <= 2 and all([0 <= v <= EMUValidator.MAX_16_BITS for v in tci])
def is_valid_tunables(t):
return isinstance(t, list) and all([isinstance(s, basestring)for s in t])
class EMUValidator(object):
MAX_16_BITS = (2 ** 16) - 1
# all emu types with their validations
EMU_VAL_DICT = {
'mac': is_valid_mac,
'ipv4': is_valid_ipv4,
'ipv4_mc': is_valid_ipv4_mc,
'ipv6': METHOD_NAME,
'ipv6_mc': is_valid_ipv6_mc,
'mtu': lambda x: 256 <= x <= 9000,
'vport': lambda x: 0 <= x <= EMUValidator.MAX_16_BITS,
'tci': is_valid_tci_tpid,
'tpid': is_valid_tci_tpid,
'tunables': is_valid_tunables,
}
@staticmethod
def verify(list_of_args):
"""
Check if list_of_args is valid.
:parameters:
list_of_args: list
List of dictionary with data about the arguments.
| list_of_args = [{'name': 'ipv4_mc_arg', 'value': ipv4_mc_arg, 'type': 'ipv4_mc', must: 'False', 'allow_list': True}]
| the example above will verify: None, '224.0.0.0', ['224.0.0.0'] but raise exception for: 42, 'FF00::', ['224.0.0.0', 'FF00::']
| name: string (Mandatory)
| Name of the argument(for error messages).
| arg: Anything (Mandatory)
| The actual variable to validate.
| type: string or class instance (Mandatory)
| Might be a string from `EMU_VAL_DICT`('mac', 'ipv4'..) or just the wanted class instance.
| `type` might also be a list of types and `value` should be 1 one them.
| must: bool
| True will validate value is not None, defaults to True.
| allow_list: bool
| True will allow `value` to be a list of anything from `types`.
:raises:
+ :exe:'TRexError': In any case of wrong parameters.
"""
def _check_types_for_val(types, arg_name, arg_val):
for t in types:
if not isinstance(t, str):
# type is a class
if isinstance(arg_val, t):
break
else:
# type is a string, look for it in database
test_func = database.get(t, None)
if test_func is None:
err(arg_name, arg_val, 'Unknown type to EMUValidator "{0}"'.format(t))
else:
if not test_func(arg_val):
err(arg_name, arg_val, 'Argument is not valid for "{0}" type'.format(t))
break
else:
# got here if code did not break
err(arg_name, arg_val, 'Not matching type, got: "{0}"'.format(type(arg_val)))
def err(name, val, reason):
raise TRexError('Validation error, argument "{name}" with value "{val}"\nReason: {reason}'.format(name=name,
val=val,
reason=reason))
for arg in list_of_args:
database = EMUValidator.EMU_VAL_DICT
arg_name = arg.get('name')
arg_val = arg.get('arg')
arg_type = arg.get('t')
is_must = arg.get('must', True)
allow_list = arg.get('allow_list', False)
# check if arg is None
if arg_val is None:
if is_must:
err(arg_name, arg_val, 'Cannot be None')
else:
continue
arg_types = listify(arg_type)
if allow_list and isinstance(arg_val, list):
for val in arg_val:
_check_types_for_val(arg_types, arg_name, val)
else:
_check_types_for_val(arg_types, arg_name, arg_val) | null |
request factory | from __future__ import annotations
import functools
from typing import Callable, Optional, Tuple
import pytest
from django.contrib.auth.models import AnonymousUser, User
from django.core.cache import cache
from django.http import HttpRequest
from django.test import override_settings
from sentry.app import env
from sentry.middleware.auth import AuthenticationMiddleware
from sentry.middleware.placeholder import placeholder_get_response
from sentry.models import AuthIdentity, AuthProvider, Organization
from sentry.silo import SiloMode
from sentry.testutils.factories import Factories
from sentry.testutils.pytest.fixtures import django_db_all
from sentry.utils.auth import login
from sentry.web.client_config import get_client_config
RequestFactory = Callable[[], Optional[Tuple[HttpRequest, User]]]
def METHOD_NAME(f):
@functools.wraps(f)
def wrapper(*args, **kwds) -> Tuple[HttpRequest, User] | None:
result = f(*args, **kwds)
if result is not None:
request, user = result
if not user.is_anonymous:
login(request, user)
AuthenticationMiddleware(placeholder_get_response).process_request(request)
else:
request.user = user
request.auth = None
env.request = request
cache.clear()
else:
env.clear()
return result
return wrapper
@METHOD_NAME
def make_request() -> Tuple[HttpRequest, User]:
request = HttpRequest()
request.method = "GET"
request.META["REMOTE_ADDR"] = "127.0.0.1"
request.META["SERVER_NAME"] = "testserver"
request.META["SERVER_PORT"] = 80
request.session = Factories.create_session()
return request, AnonymousUser()
@METHOD_NAME
def make_user_request(org=None) -> Tuple[HttpRequest, User]:
request, _ = make_request()
user = Factories.create_user()
org = org or Factories.create_organization()
Factories.create_member(organization=org, user=user)
teams = [Factories.create_team(org, members=[user]) for i in range(2)]
[Factories.create_project(org, teams=teams) for i in range(2)]
return request, user
@METHOD_NAME
def make_user_request_from_org(org=None):
org = org or Factories.create_organization()
request, user = make_user_request(org)
request.session["activeorg"] = org.slug
return request, user
@METHOD_NAME
def make_user_request_from_non_existant_org(org=None):
request, user = make_user_request_from_org(org)
# This is a non existant value that will fail lookup.
request.session["activeorg"] = 47381
return request, user
def make_user_request_from_org_with_auth_identities(org=None):
request, user = make_user_request_from_org(org)
org = Organization.objects.get_for_user_ids({user.id})[0]
provider = AuthProvider.objects.create(
organization_id=org.id, provider="google", config={"domain": "olddomain.com"}
)
AuthIdentity.objects.create(user=user, auth_provider=provider, ident="[email protected]", data={})
return request, user
@METHOD_NAME
def none_request() -> None:
return None
@pytest.fixture(autouse=True)
def clear_env_request():
env.clear()
yield
env.clear()
@pytest.mark.parametrize(
"request_factory",
[
none_request,
make_request,
make_user_request,
make_user_request_from_org,
make_user_request_from_non_existant_org,
make_user_request_from_org_with_auth_identities,
],
)
@django_db_all(transaction=True)
def test_client_config_in_silo_modes(METHOD_NAME: RequestFactory):
request = METHOD_NAME()
if request is not None:
request, _ = request
base_line = get_client_config(request)
cache.clear()
with override_settings(SILO_MODE=SiloMode.REGION):
assert get_client_config(request) == base_line
cache.clear()
with override_settings(SILO_MODE=SiloMode.CONTROL):
assert get_client_config(request) == base_line
@django_db_all(transaction=True)
def test_client_config_deleted_user():
request, user = make_user_request_from_org()
request.user = user
user.delete()
result = get_client_config(request)
assert result["isAuthenticated"] is False
assert result["user"] is None | null |
test mandatory files | """Tests for checking the recipes and their files.
Authors
* Mirco Ravanelli 2022
"""
import os
import csv
from speechbrain.utils.data_utils import get_all_files, get_list_from_csv
__skip_list = ["README.md", "setup", "full_inference.csv"]
def test_recipe_list(
search_folder="recipes",
hparam_ext=[".yaml"],
hparam_field="Hparam_file",
recipe_folder="tests/recipes",
flags_field="test_debug_flags",
avoid_list=[
"/models/",
"/results/",
"/pretrained_models/",
"recipes/Voicebank/MTL/CoopNet/hparams/logger.yaml",
"recipes/LibriParty/generate_dataset/dataset.yaml",
"hpopt.yaml",
],
):
"""This test checks if all the all hparam file of all the recipes are listed
in the csv recipe file.
Arguments
---------
search_folder: path
The path where to search the hparam files.
hparam_ext: list
The list containing the extensions of hparam files.
hparam_field: str
Field of the csv file where the path of the hparam file is reported.
recipe_folder: path
Path of the folder containing csv recipe files.
flags_field: str
Field of the csv file where the debug flags are stated (for data flow testing).
avoid_list: list
List of files for which this check must be avoided.
Returns
---------
bool:
True if the test passes, False otherwise.
"""
all_diffs_zero = True
all_with_flags = True
for recipe_csvfile in os.listdir(recipe_folder):
if recipe_csvfile in __skip_list:
continue
dataset = os.path.splitext(recipe_csvfile)[0]
hparam_lst = get_all_files(
os.path.join(search_folder, dataset),
match_and=hparam_ext,
exclude_or=avoid_list,
)
recipe_lst = get_list_from_csv(
os.path.join(recipe_folder, recipe_csvfile), field=hparam_field
)
diff_lst = list(set(hparam_lst) - set(recipe_lst))
for file in diff_lst:
print(
"\tERROR: The file %s is not listed in %s. Please add it. \
For more info see tests/consistency/README.md"
% (file, recipe_csvfile)
)
all_diffs_zero &= len(diff_lst) == 0
flags_lst = get_list_from_csv(
os.path.join(recipe_folder, recipe_csvfile), flags_field
)
for flags in flags_lst:
if not flags:
all_with_flags = False
print(f"\tERROR: {flags_field} are missing in {recipe_csvfile}")
assert all_diffs_zero
assert all_with_flags
def test_recipe_files(
recipe_folder="tests/recipes",
fields=["Script_file", "Hparam_file", "Data_prep_file", "Readme_file"],
):
"""This test checks if the files listed in the recipe csv file exist.
Arguments
---------.
recipe_folder: path
Path of the folder containing csv recipe files.
fields: list
Fields of the csv recipe file to check.
Returns
---------
check: bool
True if the test passes, False otherwise.
"""
check = True
# Loop over all recipe CSVs
for recipe_csvfile in os.listdir(recipe_folder):
if recipe_csvfile in __skip_list:
continue
for field in fields:
lst = get_list_from_csv(
os.path.join(recipe_folder, recipe_csvfile), field=field
)
lst = filter(None, lst)
for files in lst:
files = files.split(" ")
files = filter(None, files)
for file in files:
if not (os.path.exists(file.strip())):
print(
"\tERROR: The file %s listed in %s does not exist!"
% (file, recipe_csvfile)
)
check = False
assert check
def METHOD_NAME(
recipe_folder="tests/recipes",
must_exist=["Script_file", "Hparam_file", "Readme_file"],
):
"""This test checks if all the recipes contain the specified mandatory files.
Arguments
---------.
recipe_folder: path
Path of the folder containing csv recipe files.
must_exist: list
List of the fields of the csv recipe file that must contain valid paths.
Returns
---------
check: bool
True if the test passes, False otherwise.
"""
check = True
# Loop over all recipe CSVs
for recipe_csvfile in os.listdir(recipe_folder):
if recipe_csvfile in __skip_list:
continue
with open(
os.path.join(recipe_folder, recipe_csvfile), newline=""
) as csvf:
reader = csv.DictReader(csvf, delimiter=",", skipinitialspace=True)
for row_id, row in enumerate(reader):
for field in must_exist:
if not (os.path.exists(row[field].strip())):
print(
"\tERROR: The recipe %s does not contain a %s. Please add it to %s!"
% (row_id, field, recipe_csvfile)
)
check = False
assert check
def test_README_links(
recipe_folder="tests/recipes",
readme_field="Readme_file",
must_link=["Result_url", "HF_repo"],
):
"""This test checks if the README file contains the correct DropBox and HF repositories.
Arguments
---------.
recipe_folder: path
Path of the folder containing csv recipe files.
readme_field: string
Field of the csv recipe file that contains the path to the readme file.
must_link : list
Fields that contains the paths that must be linked in the readme file.
Returns
---------
check: bool
True if the test passes, False otherwise.
"""
check = True
# Loop over all recipe CSVs
for recipe_csvfile in os.listdir(recipe_folder):
if recipe_csvfile in __skip_list:
continue
with open(
os.path.join(recipe_folder, recipe_csvfile), newline=""
) as csvf:
reader = csv.DictReader(csvf, delimiter=",", skipinitialspace=True)
for row in reader:
with open(row[readme_field].strip()) as readmefile:
content = readmefile.read()
for field in must_link:
links = row[field].strip().split(" ")
for link in links:
if len(link) == 0:
continue
if not (link in content):
print(
"\tERROR: The link to %s does not exist in %s. Please add it to %s!"
% (link, row[readme_field], recipe_csvfile)
)
check = False
assert check | null |
get abbr impl | """Generate and work with PEP 425 Compatibility Tags."""
import distutils.util
import platform
import sys
import sysconfig
import warnings
try:
from importlib.machinery import get_all_suffixes
except ImportError:
from imp import get_suffixes as get_all_suffixes
def get_config_var(var):
try:
return sysconfig.get_config_var(var)
except IOError as e: # pip Issue #1074
warnings.warn("{0}".format(e), RuntimeWarning)
return None
def METHOD_NAME():
"""Return abbreviated implementation name."""
impl = platform.python_implementation()
if impl == 'PyPy':
return 'pp'
elif impl == 'Jython':
return 'jy'
elif impl == 'IronPython':
return 'ip'
elif impl == 'CPython':
return 'cp'
raise LookupError('Unknown Python implementation: ' + impl)
def get_impl_ver():
"""Return implementation version."""
impl_ver = get_config_var("py_version_nodot")
if not impl_ver or METHOD_NAME() == 'pp':
impl_ver = ''.join(map(str, get_impl_version_info()))
return impl_ver
def get_impl_version_info():
"""Return sys.version_info-like tuple for use in decrementing the minor
version."""
if METHOD_NAME() == 'pp':
# as per https://github.com/pypa/pip/issues/2882
return (sys.version_info[0], sys.pypy_version_info.major,
sys.pypy_version_info.minor)
else:
return sys.version_info[0], sys.version_info[1]
def get_flag(var, fallback, expected=True, warn=True):
"""Use a fallback method for determining SOABI flags if the needed config
var is unset or unavailable."""
val = get_config_var(var)
if val is None:
if warn:
warnings.warn("Config variable '{0}' is unset, Python ABI tag may "
"be incorrect".format(var), RuntimeWarning, 2)
return fallback()
return val == expected
def get_abi_tag():
"""Return the ABI tag based on SOABI (if available) or emulate SOABI
(CPython 2, PyPy)."""
soabi = get_config_var('SOABI')
impl = METHOD_NAME()
if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'):
d = ''
m = ''
u = ''
if get_flag('Py_DEBUG',
lambda: hasattr(sys, 'gettotalrefcount'),
warn=(impl == 'cp')):
d = 'd'
if get_flag('WITH_PYMALLOC',
lambda: impl == 'cp',
warn=(impl == 'cp')):
m = 'm'
if get_flag('Py_UNICODE_SIZE',
lambda: sys.maxunicode == 0x10ffff,
expected=4,
warn=(impl == 'cp' and
sys.version_info < (3, 3))) \
and sys.version_info < (3, 3):
u = 'u'
abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)
elif soabi and soabi.startswith('cpython-'):
abi = 'cp' + soabi.split('-')[1]
elif soabi:
abi = soabi.replace('.', '_').replace('-', '_')
else:
abi = None
return abi
def get_platform():
"""Return our platform name 'win32', 'linux_x86_64'"""
# XXX remove distutils dependency
result = distutils.util.get_platform().replace('.', '_').replace('-', '_')
if result == "linux_x86_64" and sys.maxsize == 2147483647:
# pip pull request #3497
result = "linux_i686"
return result
def get_supported(versions=None, supplied_platform=None):
"""Return a list of supported tags for each version specified in
`versions`.
:param versions: a list of string versions, of the form ["33", "32"],
or None. The first version will be assumed to support our ABI.
"""
supported = []
# Versions must be given with respect to the preference
if versions is None:
versions = []
version_info = get_impl_version_info()
major = version_info[:-1]
# Support all previous minor Python versions.
for minor in range(version_info[-1], -1, -1):
versions.append(''.join(map(str, major + (minor,))))
impl = METHOD_NAME()
abis = []
abi = get_abi_tag()
if abi:
abis[0:0] = [abi]
abi3s = set()
for suffix in get_all_suffixes():
if suffix[0].startswith('.abi'):
abi3s.add(suffix[0].split('.', 2)[1])
abis.extend(sorted(list(abi3s)))
abis.append('none')
platforms = []
if supplied_platform:
platforms.append(supplied_platform)
platforms.append(get_platform())
# Current version, current API (built specifically for our Python):
for abi in abis:
for arch in platforms:
supported.append(('%s%s' % (impl, versions[0]), abi, arch))
# abi3 modules compatible with older version of Python
for version in versions[1:]:
# abi3 was introduced in Python 3.2
if version in ('31', '30'):
break
for abi in abi3s: # empty set if not Python 3
for arch in platforms:
supported.append(("%s%s" % (impl, version), abi, arch))
# No abi / arch, but requires our implementation:
for i, version in enumerate(versions):
supported.append(('%s%s' % (impl, version), 'none', 'any'))
if i == 0:
# Tagged specifically as being cross-version compatible
# (with just the major version specified)
supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
# Major Python version + platform; e.g. binaries not using the Python API
for arch in platforms:
supported.append(('py%s' % (versions[0][0]), 'none', arch))
# No abi / arch, generic Python
for i, version in enumerate(versions):
supported.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
supported.append(('py%s' % (version[0]), 'none', 'any'))
return supported | null |
test setlangs ro | #!/usr/bin/python
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
import os
import sys
import unittest
from spacewalk.common import rhnTranslate
class Tests(unittest.TestCase):
# pylint: disable=R0904
@staticmethod
def _setup(lang):
domain = "unit-test"
localedir = os.path.dirname(os.path.abspath(__file__)) + "/translations"
rhnTranslate.cat.set(domain=domain, localedir=localedir)
rhnTranslate.cat.setlangs(lang)
def _test(self, lang, s, target):
self._setup(lang)
ss = rhnTranslate._(s)
self.assertEqual(ss, target)
def test_setlangs_en(self):
"Tests setting the language to en"
lang = "en"
self._setup(lang)
langs = rhnTranslate.cat.getlangs()
self.assertTrue(langs[0] == lang)
def METHOD_NAME(self):
"Tests setting the language to ro"
lang = "ro"
self._setup(lang)
langs = rhnTranslate.cat.getlangs()
self.assertTrue(langs[0] == lang)
def test_setlangs_go(self):
"""Tests setting the language to go (does not exist)"""
lang = "go"
self._setup(lang)
langs = rhnTranslate.cat.getlangs()
if hasattr(sys, "version_info"):
# On python 1.5.2 we don't really get an idea what the language
# is, so it's ok to check for the first component
self.assertFalse(langs[0] == lang, "Language is %s" % langs[0])
else:
self.assertTrue(langs[0] == lang, "Language is %s" % langs[0])
def test_en_1(self):
"Tests plain English messages"
lang = 'en'
s = "Good day"
target = s
self._test(lang, s, target)
def test_en_2(self):
"Tests plain English messages"
lang = 'en'
s = "How do you do?"
target = s
self._test(lang, s, target)
def test_en_3(self):
"Tests plain English messages"
lang = 'en'
s = "What should I do now?"
target = s
self._test(lang, s, target)
def test_en_missing_1(self):
"Tests plain English messages that are not in the translation files"
lang = 'en'
s = "This string doesn't exist in the translation"
target = s
self._test(lang, s, target)
def test_ro_1(self):
"Tests plain English messages translated to Romanian"
lang = 'ro'
s = "Good day"
target = "Buna ziua"
self._test(lang, s, target)
def test_ro_2(self):
"Tests plain English messages translated to Romanian"
lang = 'ro'
s = "How do you do?"
target = "Ce mai faceti?"
self._test(lang, s, target)
def test_ro_3(self):
"Tests plain English messages translated to Romanian"
lang = 'ro'
s = "What should I do now?"
target = "Ce sa fac acum?"
self._test(lang, s, target)
def test_ro_missing_1(self):
"Tests plain English messages that are not in the translation files (ro)"
lang = 'ro'
s = "This string doesn't exist in the translation"
target = s
self._test(lang, s, target)
def test_go_1(self):
"Tests plain English messages translated in the mythical go language"
lang = 'en'
s = "Good day"
target = s
self._test(lang, s, target)
def test_go_2(self):
"Tests plain English messages translated in the mythical go language"
lang = 'en'
s = "How do you do?"
target = s
self._test(lang, s, target)
def test_go_3(self):
"Tests plain English messages translated in the mythical go language"
lang = 'en'
s = "What should I do now?"
target = s
self._test(lang, s, target)
def test_go_missing_1(self):
"Tests plain English messages that are not in the translation files (go)"
lang = 'en'
s = "This string doesn't exist in the translation"
target = s
self._test(lang, s, target)
if __name__ == '__main__':
sys.exit(unittest.main() or 0) | null |
test starred assignment | """Tests for the unparse.py script in the Tools/parser directory."""
import unittest
import test.support
import io
import os
import random
import tokenize
import ast
from test.test_tools import basepath, toolsdir, skip_if_missing
skip_if_missing()
parser_path = os.path.join(toolsdir, "parser")
with test.support.DirsOnSysPath(parser_path):
import unparse
def read_pyfile(filename):
"""Read and return the contents of a Python source file (as a
string), taking into account the file encoding."""
with open(filename, "rb") as pyfile:
encoding = tokenize.detect_encoding(pyfile.readline)[0]
with open(filename, "r", encoding=encoding) as pyfile:
source = pyfile.read()
return source
for_else = """\
def f():
for x in range(10):
break
else:
y = 2
z = 3
"""
while_else = """\
def g():
while True:
break
else:
y = 2
z = 3
"""
relative_import = """\
from . import fred
from .. import barney
from .australia import shrimp as prawns
"""
nonlocal_ex = """\
def f():
x = 1
def g():
nonlocal x
x = 2
y = 7
def h():
nonlocal x, y
"""
# also acts as test for 'except ... as ...'
raise_from = """\
try:
1 / 0
except ZeroDivisionError as e:
raise ArithmeticError from e
"""
class_decorator = """\
@f1(arg)
@f2
class Foo: pass
"""
elif1 = """\
if cond1:
suite1
elif cond2:
suite2
else:
suite3
"""
elif2 = """\
if cond1:
suite1
elif cond2:
suite2
"""
try_except_finally = """\
try:
suite1
except ex1:
suite2
except ex2:
suite3
else:
suite4
finally:
suite5
"""
with_simple = """\
with f():
suite1
"""
with_as = """\
with f() as x:
suite1
"""
with_two_items = """\
with f() as x, g() as y:
suite1
"""
class ASTTestCase(unittest.TestCase):
def assertASTEqual(self, ast1, ast2):
self.assertEqual(ast.dump(ast1), ast.dump(ast2))
def check_roundtrip(self, code1, filename="internal"):
ast1 = compile(code1, filename, "exec", ast.PyCF_ONLY_AST)
unparse_buffer = io.StringIO()
unparse.Unparser(ast1, unparse_buffer)
code2 = unparse_buffer.getvalue()
ast2 = compile(code2, filename, "exec", ast.PyCF_ONLY_AST)
self.assertASTEqual(ast1, ast2)
class UnparseTestCase(ASTTestCase):
# Tests for specific bugs found in earlier versions of unparse
def test_del_statement(self):
self.check_roundtrip("del x, y, z")
def test_shifts(self):
self.check_roundtrip("45 << 2")
self.check_roundtrip("13 >> 7")
def test_for_else(self):
self.check_roundtrip(for_else)
def test_while_else(self):
self.check_roundtrip(while_else)
def test_unary_parens(self):
self.check_roundtrip("(-1)**7")
self.check_roundtrip("(-1.)**8")
self.check_roundtrip("(-1j)**6")
self.check_roundtrip("not True or False")
self.check_roundtrip("True or not False")
def test_integer_parens(self):
self.check_roundtrip("3 .__abs__()")
def test_huge_float(self):
self.check_roundtrip("1e1000")
self.check_roundtrip("-1e1000")
self.check_roundtrip("1e1000j")
self.check_roundtrip("-1e1000j")
def test_min_int(self):
self.check_roundtrip(str(-2**31))
self.check_roundtrip(str(-2**63))
def test_imaginary_literals(self):
self.check_roundtrip("7j")
self.check_roundtrip("-7j")
self.check_roundtrip("0j")
self.check_roundtrip("-0j")
def test_lambda_parentheses(self):
self.check_roundtrip("(lambda: int)()")
def test_chained_comparisons(self):
self.check_roundtrip("1 < 4 <= 5")
self.check_roundtrip("a is b is c is not d")
def test_function_arguments(self):
self.check_roundtrip("def f(): pass")
self.check_roundtrip("def f(a): pass")
self.check_roundtrip("def f(b = 2): pass")
self.check_roundtrip("def f(a, b): pass")
self.check_roundtrip("def f(a, b = 2): pass")
self.check_roundtrip("def f(a = 5, b = 2): pass")
self.check_roundtrip("def f(*, a = 1, b = 2): pass")
self.check_roundtrip("def f(*, a = 1, b): pass")
self.check_roundtrip("def f(*, a, b = 2): pass")
self.check_roundtrip("def f(a, b = None, *, c, **kwds): pass")
self.check_roundtrip("def f(a=2, *args, c=5, d, **kwds): pass")
self.check_roundtrip("def f(*args, **kwargs): pass")
def test_relative_import(self):
self.check_roundtrip(relative_import)
def test_nonlocal(self):
self.check_roundtrip(nonlocal_ex)
def test_raise_from(self):
self.check_roundtrip(raise_from)
def test_bytes(self):
self.check_roundtrip("b'123'")
def test_annotations(self):
self.check_roundtrip("def f(a : int): pass")
self.check_roundtrip("def f(a: int = 5): pass")
self.check_roundtrip("def f(*args: [int]): pass")
self.check_roundtrip("def f(**kwargs: dict): pass")
self.check_roundtrip("def f() -> None: pass")
def test_set_literal(self):
self.check_roundtrip("{'a', 'b', 'c'}")
def test_set_comprehension(self):
self.check_roundtrip("{x for x in range(5)}")
def test_dict_comprehension(self):
self.check_roundtrip("{x: x*x for x in range(10)}")
def test_class_decorators(self):
self.check_roundtrip(class_decorator)
def test_class_definition(self):
self.check_roundtrip("class A(metaclass=type, *[], **{}): pass")
def test_elifs(self):
self.check_roundtrip(elif1)
self.check_roundtrip(elif2)
def test_try_except_finally(self):
self.check_roundtrip(try_except_finally)
def METHOD_NAME(self):
self.check_roundtrip("a, *b, c = seq")
self.check_roundtrip("a, (*b, c) = seq")
self.check_roundtrip("a, *b[0], c = seq")
self.check_roundtrip("a, *(b, c) = seq")
def test_with_simple(self):
self.check_roundtrip(with_simple)
def test_with_as(self):
self.check_roundtrip(with_as)
def test_with_two_items(self):
self.check_roundtrip(with_two_items)
class DirectoryTestCase(ASTTestCase):
"""Test roundtrip behaviour on all files in Lib and Lib/test."""
# test directories, relative to the root of the distribution
test_directories = 'Lib', os.path.join('Lib', 'test')
def test_files(self):
# get names of files to test
names = []
for d in self.test_directories:
test_dir = os.path.join(basepath, d)
for n in os.listdir(test_dir):
if n.endswith('.py') and not n.startswith('bad'):
names.append(os.path.join(test_dir, n))
# Test limited subset of files unless the 'cpu' resource is specified.
if not test.support.is_resource_enabled("cpu"):
names = random.sample(names, 10)
for filename in names:
if test.support.verbose:
print('Testing %s' % filename)
source = read_pyfile(filename)
self.check_roundtrip(source)
if __name__ == '__main__':
unittest.main() | null |
kill instances | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from mock import patch, MagicMock
from physical.tests.factory import InstanceFactory
from model_mommy import mommy
from physical.models import EngineType
class UsedAndTotalValidator(object):
@staticmethod
def assertEqual(a, b):
assert a == b, "{} NOT EQUAL {}".format(a, b)
@classmethod
def instances_sizes(cls, instances=None, expected_used_size=40,
expected_total_size=90):
for instance in instances:
cls.assertEqual(instance.used_size_in_bytes, expected_used_size)
cls.assertEqual(instance.total_size_in_bytes, expected_total_size)
class InstanceHelper(object):
model = InstanceFactory.FACTORY_FOR
quantity_of_masters = 1
@classmethod
def METHOD_NAME(cls, instances):
for instance in instances:
instance.status = cls.model.DEAD
instance.save()
@staticmethod
def change_instances_type(instances, instance_type):
for instance in instances:
instance.instance_type = instance_type
instance.save()
@staticmethod
def check_instance_is_master(instance, default_timeout=False):
"""
Method for mock the real check_instance_is_master.
"""
quantity_of_masters = instance.databaseinfra.instances.count() / 2
return instance.id in (instance.databaseinfra.instances.values_list(
'id', flat=True)[quantity_of_masters:])
@staticmethod
def create_instances_by_quant(infra, port=3306, qt=1,
total_size_in_bytes=50,
used_size_in_bytes=25, instance_type=1,
base_address='127', hostname=None):
"""
Helper create instances by quantity
"""
def _create(n):
extra_params = dict(**{'hostname': hostname} if hostname else {})
return InstanceFactory(
databaseinfra=infra,
address='{0}.7{1}.{2}.{2}'.format(
base_address, infra.id, n
),
port=port,
instance_type=instance_type,
total_size_in_bytes=total_size_in_bytes,
used_size_in_bytes=used_size_in_bytes,
**extra_params
)
return map(_create, range(1, qt + 1))
class DatabaseHelper(object):
@staticmethod
@patch('logical.models.Database.automatic_create_first_credential',
MagicMock())
def create(**kwargs):
if 'databaseinfra' not in kwargs:
kwargs['databaseinfra'] = InfraHelper.create()
driver = kwargs['databaseinfra'].get_driver()
module_path = "{}.{}.create_database".format(
driver.__class__.__module__,
driver.__class__.__name__
)
with patch(module_path, MagicMock()):
return mommy.make(
'Database', **kwargs
)
class PlanHelper(object):
engine_map = {
'mysql_single': {
'class_path': 'drivers.replication_topologies.mysql.MySQLSingle',
'name': 'MySQL Single 5.7.25'
}
}
@classmethod
def create(cls, engine_name='mysql_single', *kwargs):
"""
Engine must be: `NAME`_`TOPOLOGY_TYPE`
Ex. mysql_single.The name of engine will be mysql and mysql_single
will be used to get topology class_path and name. See `engine_map`
class variable
"""
if 'engine' not in kwargs:
if engine_name not in cls.engine_map:
raise Exception(
"Engine not mapped. Mapped engines are: {}".format(
', '.join(cls.engine_map.keys())
)
)
engine_conf = cls.engine_map[engine_name]
try:
engine_type_name = engine_name.split('_')[0]
engine_type = EngineType.objects.get(name=engine_type_name)
except EngineType.DoesNotExist:
engine_type = mommy.make('EngineType', name=engine_type_name)
engine = mommy.make(
'Engine', engine_type=engine_type
)
replication_topology = mommy.make(
'ReplicationTopology',
name=engine_conf['name'],
class_path=engine_conf['class_path']
)
else:
engine = kwargs.get('engine')
replication_topology = mommy.make(
'ReplicationTopology'
)
plan = mommy.make(
'Plan', engine=engine,
replication_topology=replication_topology
)
return engine_type, engine, replication_topology, plan
class InfraHelper(object):
@staticmethod
def create(engine_name='mysql_single', **kwargs):
if 'plan' not in kwargs:
_, _, _, kwargs['plan'] = PlanHelper.create(engine_name)
return mommy.make_recipe('physical.databaseinfra', **kwargs) | null |
create | ##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import fields, models, api, _
from odoo.exceptions import ValidationError
from odoo.tools import float_compare
class SaleOrderLine(models.Model):
_inherit = "sale.order.line"
discount1 = fields.Float(
'Discount 1 (%)',
digits='Discount',
)
discount2 = fields.Float(
'Discount 2 (%)',
digits='Discount',
)
discount3 = fields.Float(
'Discount 3 (%)',
digits='Discount',
)
# TODO do like in invoice line? Make normal field with constraint and
# oncahnge?
discount = fields.Float(
compute='_compute_discounts',
store=True,
readonly=True,
# agregamos states vacio porque lo hereda de la definicion anterior
states={},
)
@api.constrains('discount1', 'discount2', 'discount3')
def check_discount_validity(self):
for rec in self:
error = []
if rec.discount1 > 100:
error.append('Discount 1')
if rec.discount2 > 100:
error.append('Discount 2')
if rec.discount3 > 100:
error.append('Discount 3')
if error:
raise ValidationError(_(
",".join(error) + " must be less or equal than 100"
))
@api.model_create_multi
def METHOD_NAME(self, vals_list):
self.inverse_vals(vals_list)
return super().METHOD_NAME(vals_list)
def write(self, vals):
self.inverse_vals([vals])
return super().write(vals)
def inverse_vals(self, vals_list):
""" No usamos metodo inverse porque en el create odoo termina llamando
a inverse y unificando los descuentos en la primer linea.
Además, solo actualizamos con el inverse el primer descuento
principalmente por compatibilidad con listas que discriminen descuento
y consideramos que las columnas 2 y 3 son descuentos adicionales y no
las pisamos
"""
for vals in vals_list:
# we force to remove from vals the discount 1,2,3 when is call from the create method and
# the all compute values are initialized. Because of that values the discount was cleaned wrongly
if not self:
if 'discount1' in vals and vals.get('discount1') == 0:
vals.pop('discount1')
if 'discount2' in vals and vals.get('discount2') == 0:
vals.pop('discount2')
if 'discount3' in vals and vals.get('discount3') == 0:
vals.pop('discount3')
precision = self.env['decimal.precision'].precision_get('Discount')
if 'discount' in vals \
and not {'discount1', 'discount2', 'discount3'} & set(vals.keys()):
vals.update({
'discount1': vals.get('discount'),
})
@api.depends('discount1', 'discount2', 'discount3', 'discount', 'product_id', 'product_uom', 'product_uom_qty')
def _compute_discounts(self):
super()._compute_discount()
for rec in self:
if rec.discount and not rec.discount1 and not rec.discount2 and not rec.discount3:
rec.discount1 = rec.discount
discount_factor = 1.0
for discount in [rec.discount1, rec.discount2, rec.discount3]:
discount_factor = discount_factor * (
(100.0 - discount) / 100.0)
rec.discount = 100.0 - (discount_factor * 100.0)
def _prepare_invoice_line(self, **optional_values):
res = super()._prepare_invoice_line(**optional_values)
res.update({
'discount1': self.discount1,
'discount2': self.discount2,
'discount3': self.discount3
})
return res | null |
create | from datetime import datetime
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
import django_filters
from rest_framework import exceptions, mixins, serializers, viewsets
from resources.api.base import register_view
from .models import Comment, COMMENTABLE_MODELS, get_commentable_content_types
class CommentUserSerializer(serializers.ModelSerializer):
display_name = serializers.ReadOnlyField(source='get_display_name')
class Meta:
model = get_user_model()
fields = ('display_name',)
class CommentSerializer(serializers.ModelSerializer):
target_type = serializers.CharField(required=True, write_only=True) # populated in to_representation()
target_id = serializers.IntegerField(source='object_id')
created_by = CommentUserSerializer(read_only=True)
class Meta:
model = Comment
fields = ('id', 'created_at', 'created_by', 'target_type', 'target_id', 'text')
def METHOD_NAME(self, validated_data):
model = COMMENTABLE_MODELS.get(validated_data.pop('target_type'))
content_type = ContentType.objects.get_for_model(model)
validated_data['content_type'] = content_type
return super().METHOD_NAME(validated_data)
def validate(self, validated_data):
target_type = validated_data.get('target_type')
if target_type not in COMMENTABLE_MODELS.keys():
raise exceptions.ValidationError({'target_type': [_('Illegal type.')]})
target_id = validated_data.get('object_id')
target_model = COMMENTABLE_MODELS.get(target_type)
try:
target_object = target_model.objects.get(id=target_id)
except target_model.DoesNotExist:
error_message = serializers.PrimaryKeyRelatedField.default_error_messages['does_not_exist']
raise exceptions.ValidationError(
{'target_id': [error_message.format(pk_value=target_id)]}
)
if not Comment.can_user_comment_object(self.context['request'].user, target_object):
raise exceptions.ValidationError(_('You cannot comment this object.'))
return validated_data
def to_representation(self, instance):
data = super().to_representation(instance)
target_model = instance.content_type.model_class()
# when used with the comment viewset it shouldn't be possible to get StopIteration here
# because other than commentable models are excluded in the viewset
data['target_type'] = next(api_name for api_name, model in COMMENTABLE_MODELS.items() if model == target_model)
return data
class CommentFilter(django_filters.rest_framework.FilterSet):
class Meta:
model = Comment
fields = ('target_type', 'target_id')
target_type = django_filters.CharFilter(method='filter_target_type')
target_id = django_filters.CharFilter(field_name='object_id')
def filter_target_type(self, queryset, name, value):
try:
model = next(model for api_name, model in COMMENTABLE_MODELS.items() if api_name == value)
except StopIteration:
return queryset.none()
content_type = ContentType.objects.get_for_model(model)
return queryset.filter(content_type=content_type)
class CommentViewSet(mixins.CreateModelMixin, mixins.RetrieveModelMixin, mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = Comment.objects.select_related('created_by').prefetch_related('content_type')
serializer_class = CommentSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
filterset_class = CommentFilter
def get_queryset(self):
user = self.request.user
queryset = super().get_queryset()
return queryset.filter(content_type__in=get_commentable_content_types()).can_view(user)
def perform_create(self, serializer):
obj = serializer.save(created_by=self.request.user, created_at=timezone.now())
obj.send_created_notification(self.request)
return obj
register_view(CommentViewSet, 'comment') | null |
bgp peer address | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetNetworkVirtualApplianceConnectionResult',
'AwaitableGetNetworkVirtualApplianceConnectionResult',
'get_network_virtual_appliance_connection',
'get_network_virtual_appliance_connection_output',
]
@pulumi.output_type
class GetNetworkVirtualApplianceConnectionResult:
"""
NetworkVirtualApplianceConnection resource.
"""
def __init__(__self__, asn=None, METHOD_NAME=None, enable_internet_security=None, id=None, name=None, provisioning_state=None, routing_configuration=None, tunnel_identifier=None):
if asn and not isinstance(asn, float):
raise TypeError("Expected argument 'asn' to be a float")
pulumi.set(__self__, "asn", asn)
if METHOD_NAME and not isinstance(METHOD_NAME, list):
raise TypeError("Expected argument 'bgp_peer_address' to be a list")
pulumi.set(__self__, "bgp_peer_address", METHOD_NAME)
if enable_internet_security and not isinstance(enable_internet_security, bool):
raise TypeError("Expected argument 'enable_internet_security' to be a bool")
pulumi.set(__self__, "enable_internet_security", enable_internet_security)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if routing_configuration and not isinstance(routing_configuration, dict):
raise TypeError("Expected argument 'routing_configuration' to be a dict")
pulumi.set(__self__, "routing_configuration", routing_configuration)
if tunnel_identifier and not isinstance(tunnel_identifier, float):
raise TypeError("Expected argument 'tunnel_identifier' to be a float")
pulumi.set(__self__, "tunnel_identifier", tunnel_identifier)
@property
@pulumi.getter
def asn(self) -> Optional[float]:
"""
Network Virtual Appliance ASN.
"""
return pulumi.get(self, "asn")
@property
@pulumi.getter(name="bgpPeerAddress")
def METHOD_NAME(self) -> Optional[Sequence[str]]:
"""
List of bgpPeerAddresses for the NVA instances
"""
return pulumi.get(self, "bgp_peer_address")
@property
@pulumi.getter(name="enableInternetSecurity")
def enable_internet_security(self) -> Optional[bool]:
"""
Enable internet security.
"""
return pulumi.get(self, "enable_internet_security")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the NetworkVirtualApplianceConnection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="routingConfiguration")
def routing_configuration(self) -> Optional['outputs.RoutingConfigurationNfvResponse']:
"""
The Routing Configuration indicating the associated and propagated route tables on this connection.
"""
return pulumi.get(self, "routing_configuration")
@property
@pulumi.getter(name="tunnelIdentifier")
def tunnel_identifier(self) -> Optional[float]:
"""
Unique identifier for the connection.
"""
return pulumi.get(self, "tunnel_identifier")
class AwaitableGetNetworkVirtualApplianceConnectionResult(GetNetworkVirtualApplianceConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkVirtualApplianceConnectionResult(
asn=self.asn,
METHOD_NAME=self.METHOD_NAME,
enable_internet_security=self.enable_internet_security,
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
routing_configuration=self.routing_configuration,
tunnel_identifier=self.tunnel_identifier)
def get_network_virtual_appliance_connection(connection_name: Optional[str] = None,
network_virtual_appliance_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkVirtualApplianceConnectionResult:
"""
Retrieves the details of specified NVA connection.
Azure REST API version: 2023-02-01.
:param str connection_name: The name of the NVA connection.
:param str network_virtual_appliance_name: The name of the Network Virtual Appliance.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['connectionName'] = connection_name
__args__['networkVirtualApplianceName'] = network_virtual_appliance_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network:getNetworkVirtualApplianceConnection', __args__, opts=opts, typ=GetNetworkVirtualApplianceConnectionResult).value
return AwaitableGetNetworkVirtualApplianceConnectionResult(
asn=pulumi.get(__ret__, 'asn'),
METHOD_NAME=pulumi.get(__ret__, 'bgp_peer_address'),
enable_internet_security=pulumi.get(__ret__, 'enable_internet_security'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
routing_configuration=pulumi.get(__ret__, 'routing_configuration'),
tunnel_identifier=pulumi.get(__ret__, 'tunnel_identifier'))
@_utilities.lift_output_func(get_network_virtual_appliance_connection)
def get_network_virtual_appliance_connection_output(connection_name: Optional[pulumi.Input[str]] = None,
network_virtual_appliance_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNetworkVirtualApplianceConnectionResult]:
"""
Retrieves the details of specified NVA connection.
Azure REST API version: 2023-02-01.
:param str connection_name: The name of the NVA connection.
:param str network_virtual_appliance_name: The name of the Network Virtual Appliance.
:param str resource_group_name: The name of the resource group.
"""
... | null |
to str | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.28
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1PersistentVolumeList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1PersistentVolume]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1PersistentVolumeList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1PersistentVolumeList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1PersistentVolumeList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1PersistentVolumeList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1PersistentVolumeList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1PersistentVolumeList. # noqa: E501
items is a list of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes # noqa: E501
:return: The items of this V1PersistentVolumeList. # noqa: E501
:rtype: list[V1PersistentVolume]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1PersistentVolumeList.
items is a list of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes # noqa: E501
:param items: The items of this V1PersistentVolumeList. # noqa: E501
:type: list[V1PersistentVolume]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1PersistentVolumeList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1PersistentVolumeList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1PersistentVolumeList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1PersistentVolumeList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1PersistentVolumeList. # noqa: E501
:return: The metadata of this V1PersistentVolumeList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1PersistentVolumeList.
:param metadata: The metadata of this V1PersistentVolumeList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def METHOD_NAME(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.METHOD_NAME()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PersistentVolumeList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PersistentVolumeList):
return True
return self.to_dict() != other.to_dict() | null |
convert | import sys
import json
import argparse
markdown = ""
tab = " "
list_tag = '* '
inline_code = '`'
code_block = '```'
subtitle = '## '
htag = '#'
if sys.version_info < (3, 0):
raise Exception("[ERROR] This program requires Python 3.0 or greater")
def load_json(file):
try:
with open(file, 'r') as f:
data = f.read()
return json.loads(data)
except: # noqa
print("[ERROR] File must be a valid json file")
def parse_json(json_block, depth, options):
if isinstance(json_block, dict):
parse_dict(json_block, depth, options)
if isinstance(json_block, list):
parse_list(json_block, depth, options)
def parse_dict(d, depth, options):
for k in d:
if k in options['ignore']:
continue
if options['keep'] != '':
if k not in options['keep']:
continue
if isinstance(d[k], (dict, list)):
add_header(k, depth)
parse_json(d[k], depth + 1, options)
else:
add_value(k, d[k], depth)
def parse_list(l, depth, options): # noqa
for value in l:
if not isinstance(value, (dict, list)):
index = l.index(value)
add_value(index, value, depth)
else:
parse_dict(value, depth, options)
def build_header_chain(depth):
chain = list_tag * (bool(depth)) + htag * (depth + 1) + \
' value ' + (htag * (depth + 1) + '\n')
return chain
def build_value_chain(key, value, depth):
chain = tab * (bool(depth - 1)) + list_tag + \
str(key) + ": " + inline_code + str(value) + inline_code + "\n"
return chain
def add_header(value, depth):
chain = build_header_chain(depth)
global markdown
markdown += chain.replace('value', value.title())
def add_value(key, value, depth):
chain = build_value_chain(key, value, depth)
global markdown
markdown += chain
def write_out(markdown, output_file):
with open(output_file, 'w+') as f:
f.write(markdown)
def METHOD_NAME(input_file, output_file, options):
json_data = load_json(input_file)
depth = 0
parse_json(json_data, depth, options)
global markdown
markdown = markdown.replace('#######', '######')
write_out(markdown, output_file)
def main():
parser = argparse.ArgumentParser(description="Json to Markdown converter",
usage='%(prog)s -i $INPUTFILENAME [options]', # noqa
epilog="Ca va bien aller!") # noqa
parser.add_argument('-i', '--input', help='Input filename', required=True)
parser.add_argument('-o', '--output', help='Output filename')
parser.add_argument('-x', '--ignore', help='A list of keys to ignore in a json file')
parser.add_argument('-k', '--keep', help='A list of keys to convert exclusively in a json file')
parser.add_argument('-r', '--replace', help='A list of dict to replace keys values. Not implemented')
args = parser.parse_args()
if args.input is None:
print('[Error] User must specify input')
exit
else:
input_file = args.input
if args.output is None:
output_file = f'{args.input[:-4]}md'
else:
output_file = args.output
print(f'[INFO] output: {output_file}')
if args.ignore is not None:
keys_to_ignore = load_json(args.ignore)
print(keys_to_ignore)
else:
keys_to_ignore = ''
if args.keep is not None:
keys_to_keep = load_json(args.keep)
print(keys_to_keep)
else:
keys_to_keep = ''
options = dict()
options['ignore'] = keys_to_ignore
options['keep'] = keys_to_keep
print(options)
METHOD_NAME(input_file, output_file, options)
"""
if len(sys.argv) > 1:
input_file = sys.argv[1]
output_file = input_file[:-4] + 'md'
if input_file[-4:] == 'json':
convert(input_file, output_file)
else:
print('Input must be a .json file')
else:
print("[ERROR] You must specify an input file.")
print("Usage: \n python json_to_md.py $JSONFILE" + '\n')
"""
if __name__ == "__main__":
main() | null |
ask pass | """
SSH-MITM Askpass
This is a Python implementation of the SSH-ASKPASS utility which provides a simple
Tkinter-based GUI dialog to obtain a password from the user.
This utility is often used in conjunction with the OpenSSH ssh-agent program to
securely store private keys. The ssh-agent program is able to hold private keys in
memory, and automatically provides the passphrases required to use these keys.
When a key is added to the ssh-agent, it is encrypted and stored in memory, and
ssh-askpass is used to prompt the user for the passphrase required to unlock the key.
This module provides a GUI dialog to obtain a password from the user, as well as a
function to confirm a question with a yes/no answer. The module requires Tkinter to
be installed to function. If Tkinter is not installed, a error message will be logged
and the program will exit with exit code 1.
The main() function is the entry point for the application, and takes an argument list of
messages, which are used as the primary and secondary messages to be displayed in the dialog.
If the first message ends with a question mark, the confirm() function is used, otherwise the
ask_pass() function is used. If the user cancels or closes the dialog,
the program will exit with exit code 1.
"""
import argparse
import logging
import sys
from typing import NoReturn, Optional
try:
import tkinter
from tkinter.simpledialog import askstring
from tkinter import ttk
TKINTER_IMPORTED = True
except ImportError:
TKINTER_IMPORTED = False
def METHOD_NAME(primary_message: str, secondary_message: Optional[str] = None) -> Optional[str]:
"""
This function displays a dialog box for the user to enter a password.
The dialog box has a primary message, and an optional secondary message.
:param primary_message: The primary message to be displayed in the dialog box
:type primary_message: str
:param secondary_message: An optional secondary message to be displayed in the dialog box
:type secondary_message: Optional[str]
:return: The password entered by the user
:rtype: Optional[str]
"""
dialog_text = primary_message
if secondary_message:
dialog_text = "\n".join([primary_message, secondary_message])
password = askstring('SSH-MITM - Askpass', dialog_text, show="*")
if password is not None:
return password
return None
def confirm(primary_message: str, secondary_message: Optional[str] = None) -> bool:
"""
Confirms a question with yes or no answer.
:param primary_message: The main message to be displayed
:type primary_message: str
:param secondary_message: An optional secondary message to be displayed
:type secondary_message: Optional[str]
:return: True if answer is yes, False otherwise.
:rtype: bool
"""
dialog_text = primary_message
if secondary_message:
dialog_text = "\n".join([primary_message, secondary_message])
answer = tkinter.messagebox.askquestion('SSH-MITM - Askpass', dialog_text, icon='question') # type: ignore
if answer == 'yes':
return True
return False
def main() -> NoReturn:
"""
Main function to run the SSH-ASKPASS implementation.
"""
if not TKINTER_IMPORTED:
logging.error("tkinter not installed!")
sys.exit(1)
parser = argparse.ArgumentParser()
parser.add_argument('messages', nargs='*')
args = parser.parse_args()
lines = " ".join(args.messages).split("\n")
primary_message = lines[0].strip()
if primary_message == "":
primary_message = "ssh-askpass"
secondary_message: Optional[str] = "\n".join(lines[1:]).strip()
if secondary_message == "":
secondary_message = None
root = tkinter.Tk()
root.withdraw()
style = ttk.Style()
style.theme_use('clam')
if primary_message.endswith("?"):
rvalue_ok = confirm(primary_message, secondary_message)
if not rvalue_ok:
sys.exit(1)
else:
result = METHOD_NAME(primary_message, secondary_message)
if result is None:
sys.exit(1)
else:
print(result)
sys.exit(0)
if __name__ == '__main__':
main() | null |
get enterprise plan | from django.core.files.uploadedfile import SimpleUploadedFile
from django_prbac.models import Role
from django.contrib.sessions.middleware import SessionMiddleware
from corehq.apps.accounting.models import (
SoftwarePlan,
SoftwarePlanEdition,
SoftwarePlanVisibility,
SoftwareProductRate,
SoftwarePlanVersion,
)
from corehq.apps.sso import certificates
from corehq.apps.accounting.tests import generator as accounting_gen
from corehq.util.test_utils import unit_testing_only
from corehq.apps.sso.models import (
IdentityProvider,
)
@unit_testing_only
def create_idp(slug, account, include_certs=False):
idp = IdentityProvider(
name=f"Azure AD for {account.name}",
slug=slug,
owner=account,
)
idp.save()
if include_certs:
idp.create_service_provider_certificate()
idp.entity_id = "https://testidp.com/saml2/entity_id"
idp.login_url = "https://testidp.com/saml2/login"
idp.logout_url = "https://testidp.com/saml2/logout"
key_pair = certificates.create_key_pair()
cert = certificates.create_self_signed_cert(key_pair)
idp.idp_cert_public = certificates.get_public_key(cert)
idp.date_idp_cert_expiration = certificates.get_expiration_date(cert)
idp.save()
return idp
@unit_testing_only
def get_billing_account_for_idp():
billing_contact = accounting_gen.create_arbitrary_web_user_name()
dimagi_user = accounting_gen.create_arbitrary_web_user_name(is_dimagi=True)
return accounting_gen.billing_account(
dimagi_user, billing_contact, is_customer_account=True
)
@unit_testing_only
def METHOD_NAME():
enterprise_plan = SoftwarePlan.objects.create(
name="Helping Earth INGO Enterprise Plan",
description="Enterprise plan for Helping Earth",
edition=SoftwarePlanEdition.ENTERPRISE,
visibility=SoftwarePlanVisibility.INTERNAL,
is_customer_software_plan=True,
)
first_product_rate = SoftwareProductRate.objects.create(
monthly_fee=3000,
name="HQ Enterprise"
)
return SoftwarePlanVersion.objects.create(
plan=enterprise_plan,
role=Role.objects.first(),
product_rate=first_product_rate
)
@unit_testing_only
def create_request_session(request, use_saml_sso=False, use_oidc_sso=False):
def get_response(request):
raise AssertionError("should not get here")
SessionMiddleware(get_response).process_request(request)
request.session.save()
if use_saml_sso:
request.session['samlSessionIndex'] = '_7c84c96e-8774-4e64-893c-06f91d285100'
if use_oidc_sso:
request.session["oidc_state"] = '_7c84c96e-8774-4e64-893c-06f91d285100'
@unit_testing_only
def store_full_name_in_saml_user_data(request, first_name, last_name):
request.session['samlUserdata'] = {
'http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname': [first_name],
'http://schemas.xmlsoap.org/ws/2005/05/identity/claims/surname': [last_name],
}
@unit_testing_only
def store_display_name_in_saml_user_data(request, display_name):
request.session['samlUserdata'] = {
'http://schemas.microsoft.com/identity/claims/displayname': [display_name],
}
@unit_testing_only
def store_full_name_in_oidc_user_data(request, first_name, last_name):
request.session['oidcUserData'] = {
'given_name': first_name,
'family_name': last_name,
}
@unit_testing_only
def store_display_name_in_oidc_user_data(request, display_name):
request.session['oidcUserData'] = {
'name': display_name,
}
@unit_testing_only
def get_public_cert_file(expiration_in_seconds=certificates.DEFAULT_EXPIRATION):
key_pair = certificates.create_key_pair()
cert = certificates.create_self_signed_cert(
key_pair,
expiration_in_seconds
)
cert_bytes = certificates.crypto.dump_certificate(
certificates.crypto.FILETYPE_PEM,
cert
)
return SimpleUploadedFile(
"certificate.cer",
cert_bytes,
content_type="application/x-x509-ca-cert",
)
@unit_testing_only
def get_bad_cert_file(bad_cert_data):
return SimpleUploadedFile(
"certificate.cer",
bad_cert_data,
content_type="application/x-x509-ca-cert",
) | null |
allowed transition names and token types | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright David Halter and Contributors
# Modifications are dual-licensed: MIT and PSF.
# 99% of the code is different from pgen2, now.
"""
The ``Parser`` tries to convert the available Python code in an easy to read
format, something like an abstract syntax tree. The classes who represent this
tree, are sitting in the :mod:`parso.tree` module.
The Python module ``tokenize`` is a very important part in the ``Parser``,
because it splits the code into different words (tokens). Sometimes it looks a
bit messy. Sorry for that! You might ask now: "Why didn't you use the ``ast``
module for this? Well, ``ast`` does a very good job understanding proper Python
code, but fails to work as soon as there's a single line of broken code.
There's one important optimization that needs to be known: Statements are not
being parsed completely. ``Statement`` is just a representation of the tokens
within the statement. This lowers memory usage and cpu time and reduces the
complexity of the ``Parser`` (there's another parser sitting inside
``Statement``, which produces ``Array`` and ``Call``).
"""
from typing import Dict, Type
from parso import tree
from parso.pgen2.generator import ReservedString
class ParserSyntaxError(Exception):
"""
Contains error information about the parser tree.
May be raised as an exception.
"""
def __init__(self, message, error_leaf):
self.message = message
self.error_leaf = error_leaf
class InternalParseError(Exception):
"""
Exception to signal the parser is stuck and error recovery didn't help.
Basically this shouldn't happen. It's a sign that something is really
wrong.
"""
def __init__(self, msg, type_, value, start_pos):
Exception.__init__(self, "%s: type=%r, value=%r, start_pos=%r" %
(msg, type_.name, value, start_pos))
self.msg = msg
self.type = type
self.value = value
self.start_pos = start_pos
class Stack(list):
def METHOD_NAME(self):
def iterate():
# An API just for Jedi.
for stack_node in reversed(self):
for transition in stack_node.dfa.transitions:
if isinstance(transition, ReservedString):
yield transition.value
else:
yield transition # A token type
if not stack_node.dfa.is_final:
break
return list(iterate())
class StackNode:
def __init__(self, dfa):
self.dfa = dfa
self.nodes = []
@property
def nonterminal(self):
return self.dfa.from_rule
def __repr__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.dfa, self.nodes)
def _token_to_transition(grammar, type_, value):
# Map from token to label
if type_.value.contains_syntax:
# Check for reserved words (keywords)
try:
return grammar.reserved_syntax_strings[value]
except KeyError:
pass
return type_
class BaseParser:
"""Parser engine.
A Parser instance contains state pertaining to the current token
sequence, and should not be used concurrently by different threads
to parse separate token sequences.
See python/tokenize.py for how to get input tokens by a string.
When a syntax error occurs, error_recovery() is called.
"""
node_map: Dict[str, Type[tree.BaseNode]] = {}
default_node = tree.Node
leaf_map: Dict[str, Type[tree.Leaf]] = {}
default_leaf = tree.Leaf
def __init__(self, pgen_grammar, start_nonterminal='file_input', error_recovery=False):
self._pgen_grammar = pgen_grammar
self._start_nonterminal = start_nonterminal
self._error_recovery = error_recovery
def parse(self, tokens):
first_dfa = self._pgen_grammar.nonterminal_to_dfas[self._start_nonterminal][0]
self.stack = Stack([StackNode(first_dfa)])
for token in tokens:
self._add_token(token)
while True:
tos = self.stack[-1]
if not tos.dfa.is_final:
# We never broke out -- EOF is too soon -- Unfinished statement.
# However, the error recovery might have added the token again, if
# the stack is empty, we're fine.
raise InternalParseError(
"incomplete input", token.type, token.string, token.start_pos
)
if len(self.stack) > 1:
self._pop()
else:
return self.convert_node(tos.nonterminal, tos.nodes)
def error_recovery(self, token):
if self._error_recovery:
raise NotImplementedError("Error Recovery is not implemented")
else:
type_, value, start_pos, prefix = token
error_leaf = tree.ErrorLeaf(type_, value, start_pos, prefix)
raise ParserSyntaxError('SyntaxError: invalid syntax', error_leaf)
def convert_node(self, nonterminal, children):
try:
node = self.node_map[nonterminal](children)
except KeyError:
node = self.default_node(nonterminal, children)
return node
def convert_leaf(self, type_, value, prefix, start_pos):
try:
return self.leaf_map[type_](value, start_pos, prefix)
except KeyError:
return self.default_leaf(value, start_pos, prefix)
def _add_token(self, token):
"""
This is the only core function for parsing. Here happens basically
everything. Everything is well prepared by the parser generator and we
only apply the necessary steps here.
"""
grammar = self._pgen_grammar
stack = self.stack
type_, value, start_pos, prefix = token
transition = _token_to_transition(grammar, type_, value)
while True:
try:
plan = stack[-1].dfa.transitions[transition]
break
except KeyError:
if stack[-1].dfa.is_final:
self._pop()
else:
self.error_recovery(token)
return
except IndexError:
raise InternalParseError("too much input", type_, value, start_pos)
stack[-1].dfa = plan.next_dfa
for push in plan.dfa_pushes:
stack.append(StackNode(push))
leaf = self.convert_leaf(type_, value, prefix, start_pos)
stack[-1].nodes.append(leaf)
def _pop(self):
tos = self.stack.pop()
# If there's exactly one child, return that child instead of
# creating a new node. We still create expr_stmt and
# file_input though, because a lot of Jedi depends on its
# logic.
if len(tos.nodes) == 1:
new_node = tos.nodes[0]
else:
new_node = self.convert_node(tos.dfa.from_rule, tos.nodes)
self.stack[-1].nodes.append(new_node) | null |
setup method | from numpy.testing import (assert_, assert_array_equal)
import numpy as np
import pytest
from numpy.random import Generator, MT19937
class TestRegression:
def METHOD_NAME(self):
self.mt19937 = Generator(MT19937(121263137472525314065))
def test_vonmises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = self.mt19937.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(self.mt19937.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(self.mt19937.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems
assert_(self.mt19937.hypergeometric(*args) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
rvsn = self.mt19937.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
mt19937 = Generator(MT19937(12345))
shuffled = np.array(t, dtype=object)
mt19937.shuffle(shuffled)
expected = np.array([t[2], t[0], t[3], t[1]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom BitGenerator does not call into global state
res = np.array([1, 8, 0, 1, 5, 3, 3, 8, 1, 4])
for i in range(3):
mt19937 = Generator(MT19937(i))
m = Generator(MT19937(4321))
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
self.mt19937.multivariate_normal([0], [[0]], size=1)
self.mt19937.multivariate_normal([0], [[0]], size=np.int_(1))
self.mt19937.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
x = self.mt19937.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta')
def test_beta_very_small_parameters(self):
# gh-24203: beta would hang with very small parameters.
self.mt19937.beta(1e-49, 1e-40)
def test_beta_ridiculously_small_parameters(self):
# gh-24266: beta would generate nan when the parameters
# were subnormal or a small multiple of the smallest normal.
tiny = np.finfo(1.0).tiny
x = self.mt19937.beta(tiny/32, tiny/40, size=50)
assert not np.any(np.isnan(x))
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = self.mt19937.choice(a, p=probs)
assert_(c in a)
with pytest.raises(ValueError):
self.mt19937.choice(a, p=probs*0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
a = np.array(['a', 'a' * 1000])
for _ in range(100):
self.mt19937.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_shuffle_of_array_of_objects(self):
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
self.mt19937.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_permutation_subclass(self):
class N(np.ndarray):
pass
mt19937 = Generator(MT19937(1))
orig = np.arange(3).view(N)
perm = mt19937.permutation(orig)
assert_array_equal(perm, np.array([2, 0, 1]))
assert_array_equal(orig, np.arange(3).view(N))
class M:
a = np.arange(5)
def __array__(self):
return self.a
mt19937 = Generator(MT19937(1))
m = M()
perm = mt19937.permutation(m)
assert_array_equal(perm, np.array([4, 1, 3, 0, 2]))
assert_array_equal(m.__array__(), np.arange(5))
def test_gamma_0(self):
assert self.mt19937.standard_gamma(0.0) == 0.0
assert_array_equal(self.mt19937.standard_gamma([0.0]), 0.0)
actual = self.mt19937.standard_gamma([0.0], dtype='float')
expected = np.array([0.], dtype=np.float32)
assert_array_equal(actual, expected)
def test_geometric_tiny_prob(self):
# Regression test for gh-17007.
# When p = 1e-30, the probability that a sample will exceed 2**63-1
# is 0.9999999999907766, so we expect the result to be all 2**63-1.
assert_array_equal(self.mt19937.geometric(p=1e-30, size=3),
np.iinfo(np.int64).max) | null |
eeg positions | import os
import warnings
import csv
import numpy as np
from .file_finder import SubjectFiles
def read_csv_positions(fn):
''' Reads positions from a .csv file
Parameters
------------
fn: str
Name of csv file
Returns
--------
type: list
Type of position in each row ('Generic', 'Fiducial', 'Electrode', 'ReferenceElectrode' or
'Coil')
coordinates: list
Coordinates in each row
extra: list
extra coordinates in each row (eg: electrode or coil axes)
name: list
Name of position
extra_cols: list
Any extra information stored in the columns
header: str
Any information in the header
'''
with open(os.path.expanduser(fn), 'r') as f:
reader = csv.reader(f)
rows = [row for row in reader]
if len(rows[-1]) < 3:
raise IOError('CSV file must have at least 4 rows')
coordinates = []
try:
float(rows[0][1])
header = []
start = 0
except:
header = rows[0]
start = 1
rows = rows[start:]
type_ = [r[0] for r in rows]
extra = []
name = []
extra_cols = []
type_filtered = []
rows_filtered = []
for t, r, i in zip(type_, rows, range(len(type_))):
if t in ['Generic', 'Fiducial'] or len(r) == 4:
name += [r[4] if len(r) >= 5 else None]
extra_cols += [r[5:] if len(r) > 5 else None]
extra += [None]
type_filtered.append(t)
rows_filtered.append(r)
elif t in ['Electrode', 'ReferenceElectrode']:
try:
extra_ = np.array(r[4:7], float)
assert len(extra_) == 3
extra += [extra_]
name += [r[7] if len(r) >= 8 else None]
extra_cols += [r[8:] if len(r) > 8 else None]
except:
extra += [None]
name += [r[4] if len(r) >= 5 else None]
extra_cols += [r[5:] if len(r) > 5 else None]
type_filtered.append(t)
rows_filtered.append(r)
elif t == 'CoilPos':
extra += [np.array([float(d) for d in r[4:11]])]
name += [r[11] if len(r) >= 12 else None]
extra_cols += [r[12:] if len(r) > 12 else None]
type_filtered.append(t)
rows_filtered.append(r)
else:
warnings.warn('Unrecognized column type: {0}'.format(t))
type_ = type_filtered
rows = rows_filtered
try:
coordinates = np.array(
[[float(d) for d in r[1:4]] for r in rows],
dtype=float)
except:
raise IOError('Could not read coordinates from CSV file')
return type_, coordinates, extra, name, extra_cols, header
def write_csv_positions(filename, types, coordinates, name, extra=None, extra_cols=None, header=None):
''' Write positions to a .csv file
Parameters
------------
fn: str
Name of csv file
type: list
Type of position in each row ('Generic', 'Fiducial', 'Electrode', 'ReferenceElectrode' or
'Coil')
coordinates: numpy array
Coordinates in each row
name: list
Name of position
extra: list
extra coordinates in each row (eg: electrode or coil axes)
extra_cols: list
Any extra information stored in the columns
header: str
Any information in the header
'''
n = len(types)
coordinates = coordinates.tolist()
name = [[n] if n else [] for n in name]
if extra is None:
extra = [None]*n
extra = [[] if e is None else e.tolist() for e in extra]
if extra_cols is None:
extra_cols = [None]*n
extra_cols = [e_c or [] for e_c in extra_cols]
if header is None:
header = []
with open(filename, 'w', newline='') as f:
writer = csv.writer(f)
if header != []:
writer.writerow(header)
for t, c, e, n, e_c in zip(types, coordinates, extra, name, extra_cols):
writer.writerow([t] + c + e + n + e_c)
def _get_eeg_positions(fn_csv):
if not os.path.isfile(fn_csv):
raise IOError('Could not find EEG cap file: {0}'.format(fn_csv))
type_, coordinates, _, name, _, _ = read_csv_positions(fn_csv)
eeg_pos = {}
for i, t in enumerate(type_):
if t in ['Electrode', 'ReferenceElectrode', 'Fiducial']:
eeg_pos[name[i]] = coordinates[i]
return eeg_pos
def METHOD_NAME(m2m_folder, cap_name='EEG10-10_UI_Jurak_2007.csv'):
''' Returns a directory with EEG electrode positions
Parameters
-----------
m2m_folder: str
Path to the m2m_{subject_id} folder, generated during the segmantation
cap_name: str
Name of EEG cap. Default: 'EEG10-10_UI_Jurak_2007.csv'
Returns
--------
eeg_caps: dict
Dictionary with cap position
'''
sub_files = SubjectFiles(subpath=m2m_folder)
if not cap_name.endswith('.csv'):
cap_name += '.csv'
fn_cap = sub_files.get_eeg_cap(cap_name)
return _get_eeg_positions(fn_cap)
| null |
sample predictor configs | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
from functools import partial
import numpy as np
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
import argparse
class TestSequenceConvOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.X86,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
self.enable_testing_on_place(
TargetType.ARM,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
self.enable_testing_on_place(
TargetType.ARM,
PrecisionType.FP16,
DataLayoutType.NCHW,
thread=[1, 4])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
context_start = draw(st.sampled_from([-2, -1, 0]))
context_stride = 1
context_length = 3
kernel_num = draw(st.integers(min_value=2, max_value=8))
in_dims = draw(
st.lists(
st.integers(
min_value=4, max_value=100),
min_size=2,
max_size=2))
filter_dims = [context_length * in_dims[1], kernel_num]
lod_info = draw(st.sampled_from([[[0, 4]], [[0, 2, 4]]]))
padding_trainable = draw(st.booleans())
assume(context_stride == 1)
assume(len(in_dims) == 2 and len(filter_dims) == 2)
if padding_trainable:
print('paddingTrainable == True is not supported by now.')
assume(padding_trainable == False)
def generate_input(*args, **kwargs):
return np.random.random(in_dims).astype(np.float32)
def generate_filter(*args, **kwargs):
return np.random.random(filter_dims).astype(np.float32)
def generate_padding(*args, **kwargs):
begin_pad = np.max([0, -context_start])
end_pad = np.max([0, context_start + context_length - 1])
total_pad = begin_pad + end_pad
return np.random.uniform(0.1, 1,
[total_pad, in_dims[1]]).astype('float32')
inputs_dict = {"X": ["input_data"], "Filter": ["filter_data"]}
inputs_gen_dict = {
"input_data": TensorConfig(
data_gen=partial(generate_input), lod=lod_info)
}
if padding_trainable:
inputs_dict["PaddingData"] = ["padding_data"]
inputs_gen_dict["padding_data"] = TensorConfig(
data_gen=partial(generate_padding))
sequence_conv_op = OpConfig(
type="sequence_conv",
inputs=inputs_dict,
outputs={"Out": ["output_data"]},
attrs={
"contextStart": context_start,
"contextStride": context_stride,
"contextLength": context_length,
"paddingTrainable": padding_trainable
})
program_config = ProgramConfig(
ops=[sequence_conv_op],
weights={
"filter_data": TensorConfig(data_gen=partial(generate_filter))
},
inputs=inputs_gen_dict,
outputs=["output_data"])
return program_config
def METHOD_NAME(self):
return self.get_predictor_configs(), ["sequence_conv"], (1e-5, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=100)
if __name__ == "__main__":
unittest.main(argv=['']) | null |
get byd bat power | #!/usr/bin/env python3
import logging
from typing import Dict, Union, Optional, List
from dataclass_utils import dataclass_from_dict
from helpermodules.cli import run_using_positional_cli_args
from modules.devices.byd.config import BYD, BYDBatSetup, BYDConfiguration
from modules.devices.byd.device import Device as BYDDevice
from modules.common.abstract_device import AbstractDevice, DeviceDescriptor
from modules.common.component_context import SingleComponentUpdateContext
from modules.common.component_state import CounterState
from modules.common.simcount import sim_count
from modules.common.store import get_counter_value_store
from modules.devices.kostal_piko import counter
from modules.devices.kostal_piko import inverter
from modules.devices.kostal_piko.config import (KostalPiko,
KostalPikoConfiguration,
KostalPikoCounterSetup, KostalPikoInverterConfiguration,
KostalPikoInverterSetup)
log = logging.getLogger(__name__)
kostal_piko_component_classes = Union[counter.KostalPikoCounter, inverter.KostalPikoInverter]
class Device(AbstractDevice):
COMPONENT_TYPE_TO_CLASS = {
"counter": counter.KostalPikoCounter,
"inverter": inverter.KostalPikoInverter
}
def __init__(self, device_config: Union[Dict, KostalPiko]) -> None:
self.components = {} # type: Dict[str, kostal_piko_component_classes]
try:
self.device_config = dataclass_from_dict(KostalPiko, device_config)
except Exception:
log.exception("Fehler im Modul "+self.device_config.name)
def add_component(self, component_config: Union[Dict, KostalPikoCounterSetup, KostalPikoInverterSetup]) -> None:
if isinstance(component_config, Dict):
component_type = component_config["type"]
else:
component_type = component_config.type
component_config = dataclass_from_dict(COMPONENT_TYPE_TO_MODULE[
component_type].component_descriptor.configuration_factory, component_config)
if component_type in self.COMPONENT_TYPE_TO_CLASS:
self.components["component"+str(component_config.id)] = (self.COMPONENT_TYPE_TO_CLASS[component_type](
self.device_config.id, component_config, self.device_config.configuration.ip_address))
else:
raise Exception(
"illegal component type " + component_type + ". Allowed values: " +
','.join(self.COMPONENT_TYPE_TO_CLASS.keys())
)
def update(self) -> None:
log.debug("Start device reading " + str(self.components))
if self.components:
for component in self.components:
# Auch wenn bei einer Komponente ein Fehler auftritt, sollen alle anderen noch ausgelesen werden.
with SingleComponentUpdateContext(self.components[component].component_info):
self.components[component].update()
else:
log.warning(
self.device_config.name +
": Es konnten keine Werte gelesen werden, da noch keine Komponenten konfiguriert wurden."
)
COMPONENT_TYPE_TO_MODULE = {
"counter": counter,
"inverter": inverter
}
def read_legacy(component_type: str,
address: str,
bat_module: str,
bat_ip: str,
bat_username: str,
bat_password: str,
num: Optional[int] = None) -> None:
dev = Device(KostalPiko(configuration=KostalPikoConfiguration(ip_address=address)))
if component_type in COMPONENT_TYPE_TO_MODULE:
component_config = COMPONENT_TYPE_TO_MODULE[component_type].component_descriptor.configuration_factory()
else:
raise Exception(
"illegal component type " + component_type + ". Allowed values: " +
','.join(COMPONENT_TYPE_TO_MODULE.keys())
)
component_config.id = num
if isinstance(component_config, KostalPikoInverterSetup) and bat_module != "none":
component_config.configuration.bat_configured = True
dev.add_component(component_config)
log.debug('KostalPiko IP-Adresse: ' + address)
log.debug('KostalPiko Speicher: ' + bat_module)
if isinstance(component_config, KostalPikoInverterSetup):
dev.update()
elif isinstance(component_config, KostalPikoCounterSetup):
with SingleComponentUpdateContext(dev.components["componentNone"].component_info):
home_consumption, powers = dev.components["componentNone"].get_values()
if bat_module == "speicher_bydhv":
bat_power = METHOD_NAME(bat_ip, bat_username, bat_password, 1)
home_consumption += bat_power
dev.add_component(KostalPikoInverterSetup(
id=1, configuration=KostalPikoInverterConfiguration(bat_configured=True)))
inverter_power, _ = dev.components["component"+str(1)].update()
power = home_consumption + inverter_power
imported, exported = sim_count(power, prefix="bezug")
counter_state = CounterState(
imported=imported,
exported=exported,
power=power,
powers=powers
)
get_counter_value_store(None).set(counter_state)
def METHOD_NAME(bat_ip: str, bat_username: str, bat_password: str, num: int) -> float:
bat_dev = BYDDevice(BYD(configuration=BYDConfiguration(user=bat_username,
password=bat_password,
ip_address=bat_ip)))
bat_dev.add_component(BYDBatSetup(id=num))
bat_power, _ = bat_dev.components["component"+str(num)].get_values()
return bat_power
def main(argv: List[str]):
run_using_positional_cli_args(read_legacy, argv)
device_descriptor = DeviceDescriptor(configuration_factory=KostalPiko) | null |
add addon | import uuid
from django.conf import settings
from django.db import models
from django.urls import reverse
from olympia import activity, amo
from olympia.addons.models import Addon
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import ManagerBase, ModelBase
from olympia.translations.fields import (
LinkifiedField,
NoURLsField,
TranslatedField,
save_signal,
)
from olympia.users.models import UserProfile
class CollectionManager(ManagerBase):
def get_queryset(self):
qs = super().get_queryset()
return qs.transform(Collection.transformer)
class Collection(ModelBase):
id = PositiveAutoField(primary_key=True)
uuid = models.UUIDField(blank=True, unique=True, null=True)
name = TranslatedField(require_locale=False, max_length=50)
slug = models.CharField(max_length=30, blank=True, null=True)
# description can (and sometimes does) contain html and other unsanitized
# content. It must be cleaned before display - NoURLsField just strips the
# URL without doing any escaping.
description = NoURLsField(require_locale=False, max_length=280)
default_locale = models.CharField(
max_length=10, default='en-US', db_column='defaultlocale'
)
listed = models.BooleanField(
default=True, help_text='Collections are either listed or private.'
)
addon_count = models.PositiveIntegerField(default=0, db_column='addonCount')
addons = models.ManyToManyField(
Addon, through='CollectionAddon', related_name='collections'
)
author = models.ForeignKey(
UserProfile, null=True, related_name='collections', on_delete=models.CASCADE
)
objects = CollectionManager()
class Meta(ModelBase.Meta):
db_table = 'collections'
indexes = [
models.Index(fields=('created',), name='collections_created_idx'),
models.Index(fields=('listed',), name='collections_listed_idx'),
models.Index(fields=('slug',), name='collections_slug_idx'),
]
constraints = [
models.UniqueConstraint(fields=('author', 'slug'), name='author_id'),
]
def __str__(self):
return f'{self.name} ({self.addon_count})'
def save(self, **kw):
if not self.uuid:
self.uuid = uuid.uuid4()
if not self.slug:
# Work with both, strings (if passed manually on .create()
# and UUID instances)
self.slug = str(self.uuid).replace('-', '')[:30]
self.clean_slug()
super().save(**kw)
def clean_slug(self):
if not self.author:
return
qs = self.author.collections.using('default')
slugs = {slug: id for slug, id in qs.values_list('slug', 'id')}
if self.slug in slugs and slugs[self.slug] != self.id:
for idx in range(len(slugs)):
new = f'{self.slug}-{idx + 1}'
if new not in slugs:
self.slug = new
return
def get_url_path(self):
return reverse('collections.detail', args=[self.author_id, self.slug])
@classmethod
def get_fallback(cls):
return cls._meta.get_field('default_locale')
def METHOD_NAME(self, addon):
CollectionAddon.objects.get_or_create(addon=addon, collection=self)
def remove_addon(self, addon):
CollectionAddon.objects.filter(addon=addon, collection=self).delete()
def owned_by(self, user):
return user.id == self.author_id
def is_public(self):
return self.listed
@staticmethod
def transformer(collections):
if not collections:
return
author_ids = {c.author_id for c in collections}
authors = {u.id: u for u in UserProfile.objects.filter(id__in=author_ids)}
for c in collections:
c.author = authors.get(c.author_id)
@staticmethod
def post_save(sender, instance, **kwargs):
from . import tasks
if kwargs.get('raw'):
return
tasks.collection_meta.delay(instance.id)
models.signals.post_save.connect(
Collection.post_save, sender=Collection, dispatch_uid='coll.post_save'
)
models.signals.pre_save.connect(
save_signal, sender=Collection, dispatch_uid='coll_translations'
)
class CollectionAddon(ModelBase):
id = PositiveAutoField(primary_key=True)
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
collection = models.ForeignKey(Collection, on_delete=models.CASCADE)
# category (deprecated: for "Fashion Your Firefox")
comments = LinkifiedField(null=True, max_length=280)
user = models.ForeignKey(UserProfile, null=True, on_delete=models.CASCADE)
ordering = models.PositiveIntegerField(
default=0,
help_text='Add-ons are displayed in ascending order based on this field.',
)
class Meta(ModelBase.Meta):
db_table = 'addons_collections'
indexes = [
models.Index(
fields=('collection', 'created'), name='addons_collections_created_idx'
),
models.Index(fields=('addon',), name='addons_collections_addon_idx'),
models.Index(fields=('collection',), name='collection_id'),
models.Index(fields=('user',), name='addons_collections_user_id'),
]
constraints = [
models.UniqueConstraint(fields=('addon', 'collection'), name='addon_id_2'),
]
@staticmethod
def post_save(sender, instance, **kwargs):
"""Update Collection.addon_count and reindex add-on if the collection
is featured."""
from olympia.addons.tasks import index_addons
if kwargs.get('raw'):
return
if instance.collection.listed:
activity.log_create(
amo.LOG.ADD_TO_COLLECTION, instance.addon, instance.collection
)
kwargs['addons'] = [instance.addon]
Collection.post_save(sender, instance.collection, **kwargs)
if instance.collection.id == settings.COLLECTION_FEATURED_THEMES_ID:
# That collection is special: each add-on in it is considered
# recommended, so we need to index the corresponding add-on.
# (Note: we are considering the add-on in a given CollectionAddon
# never changes, to change add-ons belonging to a collection we
# add or remove CollectionAddon instances, we never modify the
# addon foreignkey of an existing instance).
index_addons.delay([instance.addon.id])
@staticmethod
def post_delete(sender, instance, **kwargs):
from olympia.addons.tasks import index_addons
if kwargs.get('raw'):
return
if instance.collection.listed:
activity.log_create(
amo.LOG.REMOVE_FROM_COLLECTION, instance.addon, instance.collection
)
kwargs['addons'] = [instance.addon]
Collection.post_save(sender, instance.collection, **kwargs)
if instance.collection.id == settings.COLLECTION_FEATURED_THEMES_ID:
# That collection is special: each add-on in it is considered
# recommended, so we need to index the add-on we just removed from
# it.
index_addons.delay([instance.addon.id])
models.signals.pre_save.connect(
save_signal, sender=CollectionAddon, dispatch_uid='coll_addon_translations'
)
# Update Collection.addon_count when a collectionaddon changes.
models.signals.post_save.connect(
CollectionAddon.post_save, sender=CollectionAddon, dispatch_uid='coll.post_save'
)
models.signals.post_delete.connect(
CollectionAddon.post_delete, sender=CollectionAddon, dispatch_uid='coll.post_delete'
) | null |
test and or | # SPDX-License-Identifier: MPL-2.0
# Copyright (C) 2020 - 2021 Vereniging van Nederlandse Gemeenten, Gemeente Amsterdam
import copy
import time
from datetime import datetime
from django.contrib.gis import geos
from django.test import TestCase
from freezegun import freeze_time
from signals.apps.dsl.ExpressionEvaluator import ExpressionEvaluator
class DslTest(TestCase):
def setUp(self):
self.compiler = ExpressionEvaluator()
poly = geos.Polygon(
((0.0, 0.0), (0.0, 50.0), (50.0, 50.0), (50.0, 0.0), (0.0, 0.0))
)
self.context = {
'testint': 1,
'location_1': geos.Point(66, 66),
'location_2': geos.Point(1, 1),
'maincat': 'dieren',
'subcat': 'subcat',
'time': time.strptime("16:00:00", "%H:%M:%S"),
'day': datetime.now().strftime("%A"),
'area': {
'stadsdeel': {
'oost': geos.MultiPolygon(poly)
}
},
'listval': 'geo1',
'list': set(['geo1', 'geo2'])
}
def test_numeric_operations(self):
c = self.compiler
self.assertTrue(c.compile('testint == 1').evaluate(self.context))
self.assertFalse(c.compile('testint == 0').evaluate(self.context))
self.assertFalse(c.compile('testint != 1').evaluate(self.context))
self.assertTrue(c.compile('testint != 0').evaluate(self.context))
self.assertFalse(c.compile('testint > 1').evaluate(self.context))
self.assertTrue(c.compile('testint >= 1').evaluate(self.context))
self.assertTrue(c.compile('testint > 0').evaluate(self.context))
self.assertTrue(c.compile('testint >= 0').evaluate(self.context))
self.assertFalse(c.compile('testint > 2').evaluate(self.context))
self.assertFalse(c.compile('testint >= 2').evaluate(self.context))
self.assertFalse(c.compile('testint < 1').evaluate(self.context))
self.assertTrue(c.compile('testint <= 1').evaluate(self.context))
self.assertFalse(c.compile('testint < 0').evaluate(self.context))
self.assertFalse(c.compile('testint <= 0').evaluate(self.context))
self.assertTrue(c.compile('testint < 2').evaluate(self.context))
self.assertTrue(c.compile('testint <= 2').evaluate(self.context))
def test_time_operations(self):
c = self.compiler
self.assertTrue(c.compile('time == 16:00:00').evaluate(self.context))
self.assertFalse(c.compile('time == 16:00:01').evaluate(self.context))
self.assertFalse(c.compile('time != 16:00:00').evaluate(self.context))
self.assertTrue(c.compile('time != 16:00:01').evaluate(self.context))
self.assertFalse(c.compile('time > 16:00:00').evaluate(self.context))
self.assertTrue(c.compile('time >= 16:00:00').evaluate(self.context))
self.assertTrue(c.compile('time > 15:00:00').evaluate(self.context))
self.assertTrue(c.compile('time >= 15:00:00').evaluate(self.context))
self.assertFalse(c.compile('time > 16:00:01').evaluate(self.context))
self.assertFalse(c.compile('time >= 16:00:01').evaluate(self.context))
self.assertFalse(c.compile('time < 16:00:00').evaluate(self.context))
self.assertTrue(c.compile('time <= 16:00:00').evaluate(self.context))
self.assertFalse(c.compile('time < 15:00:00').evaluate(self.context))
self.assertFalse(c.compile('time <= 15:00:00').evaluate(self.context))
self.assertTrue(c.compile('time < 16:00:01').evaluate(self.context))
self.assertTrue(c.compile('time <= 16:00:01').evaluate(self.context))
def test_string_operations(self):
c = self.compiler
self.assertFalse(c.compile('maincat == "test"').evaluate(self.context))
self.assertTrue(c.compile('maincat == "dieren"').evaluate(self.context))
self.assertTrue(c.compile('maincat != "test"').evaluate(self.context))
self.assertFalse(c.compile('maincat != "dieren"').evaluate(self.context))
def test_in_collection_operations(self):
c = self.compiler
self.assertTrue(c.compile('listval in list').evaluate(self.context))
self.assertFalse(c.compile('maincat in list').evaluate(self.context))
def test_in_gemometry_operations(self):
c = self.compiler
self.assertFalse(c.compile('location_1 in area."stadsdeel"."oost"').evaluate(self.context))
self.assertTrue(c.compile('location_2 in area."stadsdeel"."oost"').evaluate(self.context))
def METHOD_NAME(self):
c = self.compiler
self.assertTrue(c.compile('testint == 0 or testint == 1').evaluate(self.context))
self.assertFalse(c.compile('testint == 0 and testint == 1').evaluate(self.context))
self.assertTrue(c.compile('testint == 1 and (time > 12:00 and time < 20:00)').evaluate(self.context))
self.assertTrue(c.compile('testint == 1 or (time > 12:00 and time < 20:00)').evaluate(self.context))
self.assertTrue(c.compile(
'location_2 in area."stadsdeel"."oost" and (testint > 0 or (testint == 1))'
).evaluate(self.context))
self.assertFalse(c.compile('maincat in list and (time > 12:00 and time < 20:00)').evaluate(self.context))
def test_day_operations(self):
context = copy.deepcopy(self.context)
c = self.compiler
with freeze_time('2021-07-15 12:00:00'): # Thursday
context['day'] = datetime.now().strftime("%A")
self.assertTrue(c.compile('day == "Thursday"').evaluate(context))
self.assertFalse(c.compile('day != "Thursday"').evaluate(context))
def test_grammer_multiple_and_or(self):
c = self.compiler
c.compile('testint == 0 or testint == 1 or testint == 2')
c.compile('testint == 0 and testint == 1 and testint == 2')
c.compile('testint == 0 and testint == 1 or testint == 2')
c.compile('testint == 0 or testint == 1 and testint == 2') | null |
hide submenus | from browser import console, document, html, window, alert
style_sheet = """
/* Classes for brython.widgets.menu */
:root {
--brython-menu-font-family: Arial;
--brython-menu-font-size: 100%;
--brython-menu-navbar-bgcolor: CadetBlue;
--brython-menu-navbar-bgcolor-selected: SkyBlue;
--brython-menu-navbar-color: #fff;
--brython-menu-color: #000;
--brython-menu-submenu-bgcolor: #fff;
--brython-menu-submenu-bgcolor-selected: SkyBlue;
}
/* Item in the main horizontal navigation bar */
.brython-menu-navbar-item {
font-family: var(--brython-menu-font-family);
font-size: var(--brython-menu-font-size);
background-color: var(--brython-menu-navbar-bgcolor);
color: var(--brython-menu-navbar-color);
padding: 0.5em 1em 0.5em 1em;
cursor: default;
}
.brython-menu-navbar-item:hover {
background-color: var(--brython-menu-navbar-bgcolor-selected);
}
.brython-menu-navbar-item-selected {
background-color: var(--brython-menu-navbar-bgcolor-selected);
}
/* Table for a submenu, opened by a click on an item */
.brython-menu-submenu {
font-family: var(--brython-menu-font-family);
font-size: var(--brython-menu-font-size);
background-color: var(--brython-menu-submenu-bgcolor);
position: absolute;
border-style: solid;
border-width: 1px;
border-color: var(--brython-menu-color);
border-spacing: 0;
}
/* TR for a submenu item row */
.brython-menu-submenu-row:hover {
color: var(--brython-menu-color);
background-color: var(--brython-menu-submenu-bgcolor-selected);
}
.brython-menu-submenu-row-selected {
color: var(--brython-menu-color);
background-color: var(--brython-menu-submenu-bgcolor-selected);
}
/*
TD for a cell in a submenu row
Each row has two cells, one for the item label, the other one
filled with a > if the item has a submenu
*/
.brython-menu-submenu-item {
font-family: var(--brython-menu-font-family);
padding: 0.3em 0.3em 0.3em 1em;
cursor: default;
}
/* end of browser.widgets.menu classes */
"""
class Menu:
def __init__(self, container=document.body, parent=None, default_css=True):
"""Create a new menu, inserted inside the container. For the top level
menu, parent is None, otherwise it is a SPAN element (top menu) or a
TR element (submenu)."""
self.container = container
self.parent = parent
if default_css:
# Insert default CSS stylesheet if not already loaded
for stylesheet in document.styleSheets:
if stylesheet.ownerNode.id == "brython-menu":
break
else:
document <= html.STYLE(style_sheet, id="brython-menu")
self.default_css = default_css
if parent:
parent.submenu = html.TABLE(Class="brython-menu-submenu")
parent.submenu.style.position = "absolute"
parent.submenu.style.display = "none"
self.container <= parent.submenu
parent.bind("click", self.unfold)
if not hasattr(self.container, "bind_document"):
# Click on the document outside of the menu removes all submenus
document.bind("click", self.hide_menus)
self.container.bind_document = True
def add_item(self, label, callback=None, menu=False):
if self.parent is None:
# First level
item = html.SPAN(label, Class="brython-menu-navbar-item")
self.container <= item
item.bind("click", self.hide_menus)
else:
# Next levels
item = html.TR(Class="brython-menu-submenu-row")
self.parent.submenu <= item
item <= html.TD(label, Class="brython-menu-submenu-item")
item <= html.TD(">" if menu else " ",
Class="brython-menu-submenu-item",
paddingLeft="2em")
if callback is not None:
item.bind("click", callback)
return item
def add_link(self, label, href):
"""Add a link to the specified address."""
if self.parent is None:
# First level
item = html.A(label, Class="brython-menu-navbar-link", href=href)
self.container <= item
else:
# Next levels
item = html.TR(Class="brython-menu-submenu-row")
self.parent.submenu <= item
item <= html.TD(html.A(label, Class="brython-menu-submenu-link",
href=href))
return item
def add_menu(self, label):
"""Add a new submenu in the current menu."""
# add an item
item = self.add_item(label, menu=True)
if self.parent is None:
# create a SPAN for the submenu
span = html.SPAN(Class="brython-menu-submenu")
span.style.position = "absolute"
return Menu(self.container, item, default_css=self.default_css)
def hide_menus(self, *args):
"""When user clicks outside of open submenus, close them all."""
for css in [".brython-menu-navbar-item-selected",
".brython-menu-submenu-row-selected"]:
for item in document.select(css):
item.classList.remove(css[1:])
for div in document.select(".brython-menu-submenu"):
if div.style.display != "none":
div.style.display = "none"
def METHOD_NAME(self, table):
"""Hide all submenus of specified table."""
for row in table.select("TR"):
if hasattr(row, "submenu"):
row.submenu.style.display = "none"
self.METHOD_NAME(row.submenu)
def unfold(self, ev):
"""Called when a label with a submenu is clicked."""
target = ev.target
if target.nodeName == "SPAN":
# click on a navbar item
selected = document.select(".brython-menu-navbar-item-selected")
if selected:
self.hide_menus()
for item in selected:
item.classList.remove("brython-menu-navbar-item-selected")
submenu = target.submenu
target.classList.add("brython-menu-navbar-item-selected")
submenu.style.left = f"{target.abs_left}px"
submenu.style.top = f"{target.abs_top + target.offsetHeight}px"
# Once an item has been selected, mouseenter on the other items
# unfolds them
if not selected:
for item in document.select(".brython-menu-navbar-item"):
item.bind("mouseenter", self.unfold)
# Display menu
submenu.style.display = "block"
else:
target = target.closest("TR")
# Remove other submenus
table = target.closest("TABLE")
self.METHOD_NAME(table)
# If another item in the table was selected, unselect it
selected = table.select(".brython-menu-submenu-row-selected")
for row in selected:
row.classList.remove("brython-menu-submenu-row-selected")
# Mark target as selected
target.classList.add("brython-menu-submenu-row-selected")
if hasattr(target, "submenu"):
# Set top and left of submenu and display it
target.submenu.style.top = f"{target.abs_top}px"
target.submenu.style.left = \
f"{target.abs_left + target.offsetWidth}px"
target.submenu.style.display = "block"
if not selected:
# Once an item has been selected, mouseenter on the other
# items unfolds them
for row in table.select("TR"):
row.bind("mouseenter", self.unfold)
# stop propagation, otherwise "click" is triggered on document,
# which removes all menus...
ev.stopPropagation( | null |
open | # Copyright 2017 Workiva
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from thrift.transport.TTransport import TTransportException, TMemoryBuffer
from tornado import gen
from frugal import _NATS_MAX_MESSAGE_SIZE
from frugal.transport import FPublisherTransportFactory
from frugal.transport import FPublisherTransport
from frugal.transport import FSubscriberTransportFactory
from frugal.transport import FSubscriberTransport
from frugal.exceptions import TTransportExceptionType
_FRAME_BUFFER_SIZE = 5
_FRUGAL_PREFIX = "frugal."
logger = logging.getLogger(__name__)
class FNatsPublisherTransportFactory(FPublisherTransportFactory):
def __init__(self, nats_client):
self._nats_client = nats_client
def get_transport(self):
return FNatsPublisherTransport(self._nats_client)
class FNatsPublisherTransport(FPublisherTransport):
def __init__(self, nats_client):
super(FNatsPublisherTransport, self).__init__(_NATS_MAX_MESSAGE_SIZE)
self._nats_client = nats_client
@gen.coroutine
def METHOD_NAME(self):
if not self._nats_client.is_connected:
raise TTransportException(
type=TTransportExceptionType.NOT_OPEN,
message="Nats not connected!")
@gen.coroutine
def close(self):
if not self.is_open():
return
yield self._nats_client.flush()
def is_open(self):
return self._nats_client.is_connected
@gen.coroutine
def publish(self, topic, data):
if not self.is_open():
raise TTransportException(
type=TTransportExceptionType.NOT_OPEN,
message='Nats not connected!')
if self._check_publish_size(data):
msg = 'Message exceeds NATS max message size'
raise TTransportException(
type=TTransportExceptionType.REQUEST_TOO_LARGE,
message=msg)
yield self._nats_client.publish('frugal.{0}'.format(topic), data)
class FNatsSubscriberTransportFactory(FSubscriberTransportFactory):
def __init__(self, nats_client, queue=''):
self._nats_client = nats_client
self._queue = queue
def get_transport(self):
return FNatsSubscriberTransport(self._nats_client, self._queue)
class FNatsSubscriberTransport(FSubscriberTransport):
def __init__(self, nats_client, queue):
self._nats_client = nats_client
self._queue = queue
self._is_subscribed = False
self._sub_id = None
@gen.coroutine
def subscribe(self, topic, callback):
if not self._nats_client.is_connected:
raise TTransportException(
type=TTransportExceptionType.NOT_OPEN,
message="Nats not connected!")
if self.is_subscribed():
raise TTransportException(
type=TTransportExceptionType.ALREADY_OPEN,
message="Already subscribed to nats topic!")
self._sub_id = yield self._nats_client.subscribe_async(
'frugal.{0}'.format(topic),
queue=self._queue,
cb=lambda message: callback(TMemoryBuffer(message.data[4:]))
)
self._is_subscribed = True
@gen.coroutine
def unsubscribe(self):
if not self.is_subscribed():
return
yield self._nats_client.unsubscribe(self._sub_id)
self._sub_id = None
self._is_subscribed = False
def is_subscribed(self):
return self._is_subscribed and self._nats_client.is_connected | null |
power | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""PowerTransform bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"PowerTransform",
]
class PowerTransform(bijector.Bijector):
"""Compute `Y = g(X) = (1 + X * c)**(1 / c), X >= -1 / c`.
The [power transform](https://en.wikipedia.org/wiki/Power_transform) maps
inputs from `[0, inf]` to `[-1/c, inf]`; this is equivalent to the `inverse`
of this bijector.
This bijector is equivalent to the `Exp` bijector when `c=0`.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
METHOD_NAME=0.,
validate_args=False,
name="power_transform"):
"""Instantiates the `PowerTransform` bijector.
Args:
power: Python `float` scalar indicating the transform power, i.e.,
`Y = g(X) = (1 + X * c)**(1 / c)` where `c` is the `power`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
Raises:
ValueError: if `power < 0` or is not known statically.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
with self._name_scope("init", values=[METHOD_NAME]):
METHOD_NAME = tensor_util.constant_value(
ops.convert_to_tensor(METHOD_NAME, name="power"))
if METHOD_NAME is None or METHOD_NAME < 0:
raise ValueError("`power` must be a non-negative TF constant.")
self._power = METHOD_NAME
super(PowerTransform, self).__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
@property
def METHOD_NAME(self):
"""The `c` in: `Y = g(X) = (1 + X * c)**(1 / c)`."""
return self._power
def _forward(self, x):
x = self._maybe_assert_valid_x(x)
if self.METHOD_NAME == 0.:
return math_ops.exp(x)
# If large x accuracy is an issue, consider using:
# (1. + x * self.power)**(1. / self.power) when x >> 1.
return math_ops.exp(math_ops.log1p(x * self.METHOD_NAME) / self.METHOD_NAME)
def _inverse(self, y):
y = self._maybe_assert_valid_y(y)
if self.METHOD_NAME == 0.:
return math_ops.log(y)
# If large y accuracy is an issue, consider using:
# (y**self.power - 1.) / self.power when y >> 1.
return math_ops.expm1(math_ops.log(y) * self.METHOD_NAME) / self.METHOD_NAME
def _inverse_log_det_jacobian(self, y):
y = self._maybe_assert_valid_y(y)
return (self.METHOD_NAME - 1.) * math_ops.log(y)
def _forward_log_det_jacobian(self, x):
x = self._maybe_assert_valid_x(x)
if self.METHOD_NAME == 0.:
return x
return (1. / self.METHOD_NAME - 1.) * math_ops.log1p(x * self.METHOD_NAME)
def _maybe_assert_valid_x(self, x):
if not self.validate_args or self.METHOD_NAME == 0.:
return x
is_valid = check_ops.assert_non_negative(
1. + self.METHOD_NAME * x,
message="Forward transformation input must be at least {}.".format(
-1. / self.METHOD_NAME))
return control_flow_ops.with_dependencies([is_valid], x)
def _maybe_assert_valid_y(self, y):
if not self.validate_args:
return y
is_valid = check_ops.assert_positive(
y, message="Inverse transformation input must be greater than 0.")
return control_flow_ops.with_dependencies([is_valid], y) | null |
write settings | """
netcdf exporter of scalar products for 1d simulations
"""
from collections import namedtuple
from copy import deepcopy
import numpy as np
from scipy.io import netcdf_file
class NetCDFExporter_1d: # pylint: disable=too-few-public-methods,too-many-instance-attributes
def __init__( # pylint: disable = too-many-arguments
self, data, settings, simulator, filename, exclude_particle_reservoir=True
):
self.data = data
self.settings = settings
self.simulator = simulator
self.vars = None
self.filename = filename
self.nz_export = (
int(self.settings.z_max / self.settings.dz)
if exclude_particle_reservoir
else settings.nz
)
self.z0 = (
0.0 if exclude_particle_reservoir else -settings.particle_reservoir_depth
)
self.n_save_spec = len(self.settings.save_spec_and_attr_times)
def METHOD_NAME(self, ncdf):
for setting in dir(self.settings):
setattr(ncdf, setting, getattr(self.settings, setting))
def _create_dimensions(self, ncdf):
ncdf.createDimension("time", self.settings.nt + 1)
ncdf.createDimension("height", self.nz_export)
if self.n_save_spec != 0:
ncdf.createDimension("time_save_spec", self.n_save_spec)
for name, instance in self.simulator.particulator.products.items():
if len(instance.shape) == 2:
dim_name = name.replace(" ", "_") + "_bin_index"
ncdf.createDimension(dim_name, self.settings.number_of_bins)
def _create_variables(self, ncdf):
self.vars = {}
self.vars["time"] = ncdf.createVariable("time", "f", ["time"])
self.vars["time"][:] = self.settings.dt * np.arange(self.settings.nt + 1)
self.vars["time"].units = "seconds"
self.vars["height"] = ncdf.createVariable("height", "f", ["height"])
self.vars["height"][:] = self.z0 + self.settings.dz * (
1 / 2 + np.arange(self.nz_export)
)
self.vars["height"].units = "metres"
if self.n_save_spec != 0:
self.vars["time_save_spec"] = ncdf.createVariable(
"time_save_spec", "f", ["time_save_spec"]
)
self.vars["time_save_spec"][:] = self.settings.save_spec_and_attr_times
self.vars["time_save_spec"].units = "seconds"
for name, instance in self.simulator.particulator.products.items():
if len(instance.shape) == 2:
label = name.replace(" ", "_") + "_bin_index"
self.vars[label] = ncdf.createVariable(label, "f", (label,))
self.vars[label][:] = np.arange(1, self.settings.number_of_bins + 1)
for name, instance in self.simulator.particulator.products.items():
if name in self.vars:
raise AssertionError(
f"product ({name}) has same name as one of netCDF dimensions"
)
n_dimensions = len(instance.shape)
if n_dimensions == 1:
dimensions = ("height", "time")
elif n_dimensions == 2:
dim_name = name.replace(" ", "_") + "_bin_index"
if self.n_save_spec == 0:
continue
if self.n_save_spec == 1:
dimensions = ("height", f"{dim_name}")
else:
dimensions = ("height", f"{dim_name}", "time_save_spec")
else:
raise NotImplementedError()
self.vars[name] = ncdf.createVariable(name, "f", dimensions)
self.vars[name].units = instance.unit
def _write_variables(self):
for var in self.simulator.particulator.products.keys():
n_dimensions = len(self.simulator.particulator.products[var].shape)
if n_dimensions == 1:
self.vars[var][:, :] = self.data[var][-self.nz_export :, :]
elif n_dimensions == 2:
if self.n_save_spec == 0:
continue
if self.n_save_spec == 1:
self.vars[var][:, :] = self.data[var][-self.nz_export :, :, 0]
else:
self.vars[var][:, :, :] = self.data[var][-self.nz_export :, :, :]
else:
raise NotImplementedError()
def run(self):
with netcdf_file(self.filename, mode="w") as ncdf:
self.METHOD_NAME(ncdf)
self._create_dimensions(ncdf)
self._create_variables(ncdf)
self._write_variables()
def readNetCDF_1d(file):
f = netcdf_file(file, "r")
output_settings = deepcopy(f._attributes)
_products = deepcopy(f.variables)
output_products = {}
for k, v in _products.items():
output_products[k] = v[:]
output_dicts = namedtuple("output_dicts", "settings products")
outputs = output_dicts(output_settings, output_products)
f.close()
return outputs | null |
prepare optparser | #v 1.0
import sys, os, time, re
import urllib, urllib2
from optparse import OptionParser
from HTMLParser import HTMLParser
from xml.dom.minidom import parseString
#id mapping
def getPDBIdandChain(uniprotID):
#data from http://www.bioinf.org.uk/pdbsws/
dirPath = os.path.dirname(sys.argv[0])
fileName ='pdb_uniprot_chain_map.lst.2'
if len(dirPath) != 0:
fileName = dirPath + "/pdb_uniprot_chain_map.lst.2"
fileName = fileName.replace('\\', '/')
f = open (fileName, 'r')
pdbId = ''
chain = ''
while(1):
line = f.readline()
if not line:
break
line = line.rstrip('\n')
columns = line.split()
if len(columns) != 3:
continue
if columns[2] == uniprotID:
pdbId = columns[0]
chain = columns[1]
f.close()
return pdbId, chain
# Define a function for the thread
class NoRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
infourl.code = code
return infourl
http_error_300 = http_error_302
http_error_301 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def METHOD_NAME():
"""Prepare optparser object. New options will be added in this
function first.
"""
usage = "usage: %prog <-p STRING -c STRING -m STRING> [options]"
description = "SNP2PDBSite."
optparser = OptionParser(version="%prog v1.0", description=description, usage=usage, add_help_option=False)
optparser.add_option("-h","--help",action="help",help="Show this help message and exit.")
optparser.add_option("-p","--protId",dest="protId",type="string",
help="Enter protein (uniprot) ID.")
optparser.add_option("-c", "--chain", dest="chain",type="string",
help="Enter chain.")
optparser.add_option("-m", "--mut", dest="mutation",type="string",
help="Enter mutation.")
return optparser
def opt_validate(optparser):
"""Validate options from a OptParser object.
Return: Validated options object.
"""
(options,args) = optparser.parse_args()
if not (options.protId and options.chain and options.mutation):
optparser.print_help()
sys.exit(1)
if '"' in options.protId:
options.protId = options.protId.replace('"','')
if '"' in options.mutation:
options.mutation = options.mutation.replace('"','')
id, chain = getPDBIdandChain(options.protId)
if not (id and chain):
sys.exit(1)
options.chain = chain
options.protId = id
mutList = list(options.mutation)
mutList[0] = chain
options.mutation = "".join(mutList)
return options
class Snp2pdbsite:
def __init__(self, options):
self.protId = options.protId
if not options.chain:
options.chain = ""
self.chain = options.chain
if not options.mutation:
options.mutation = ""
self.mutation = options.mutation
self.url = 'http://www-bionet.sscc.ru/psd2/rest.php?tool=snp2pdbsite'
def sendQuery(self):
opener = urllib2.build_opener(NoRedirectHandler())
urllib2.install_opener(opener)
# do POST
params = urllib.urlencode({'pdb': self.protId, 'chain': self.chain, 'mut': self.mutation})
req = urllib2.Request(self.url, params)
rsp = urllib2.urlopen(req)
content = rsp.read()
res = content.find('qid=')
content = content[res+5:]
res = content.find('"')
content = content[:res]
self.url = "http://www-bionet.sscc.ru/psd2/rest.php?tool=snp2pdbsite&q=%s" %content
self.getResult()
def getResult(self):
opener = urllib2.build_opener(NoRedirectHandler())
urllib2.install_opener(opener)
content = "working"
while content.find('working') >= 0:
req = urllib2.Request(self.url)
rsp = urllib2.urlopen(req)
content = rsp.read()
time.sleep(0.1)
self.parseOutput(content)
def parseOutput(self, content):
if len(content) == 0:
print "Result is empty"
sys.exit(1)
xmldoc = parseString(content)
itemlist = xmldoc.getElementsByTagName('aa')
if itemlist.length <= 0:
print "Result is empty"
sys.exit(1)
for item in itemlist:
print "PDB_SITE" + ':' + item.getAttribute("pos") + item.getAttribute("aa") + ';'
def main():
opts=opt_validate(METHOD_NAME())
g = Snp2pdbsite(opts)
g.sendQuery()
if __name__ == "__main__":
main()
| null |
test no param | import re
import unittest
from ...core.inputscanner import InputScanner
class TestInputScanner(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def setUp(self):
self.value = "howdy"
self.inputscanner = InputScanner(self.value)
def test_new(self):
inputscanner = InputScanner(None)
self.assertEqual(inputscanner.hasNext(), False)
def test_next(self):
self.assertEqual(self.inputscanner.next(), self.value[0])
self.assertEqual(self.inputscanner.next(), self.value[1])
# should return None if index is at then end of the value
pattern = re.compile(r"howdy")
self.inputscanner.readUntilAfter(pattern)
self.assertEqual(self.inputscanner.next(), None)
def test_peek(self):
self.assertEqual(self.inputscanner.peek(3), self.value[3])
self.inputscanner.next()
self.assertEqual(self.inputscanner.peek(3), self.value[4])
# should return None if index is less than 0 or greater than text length
self.assertEqual(self.inputscanner.peek(-2), None)
self.assertEqual(self.inputscanner.peek(5), None)
def METHOD_NAME(self):
self.assertEqual(self.inputscanner.peek(), self.value[0])
self.inputscanner.next()
self.assertEqual(self.inputscanner.peek(), self.value[1])
def test_pattern(self):
pattern = re.compile(r"how")
index = 0
self.assertEqual(self.inputscanner.test(pattern, index), True)
self.inputscanner.next()
self.assertEqual(self.inputscanner.test(pattern, index), False)
def test_Char(self):
pattern = re.compile(r"o")
index = 1
self.assertEqual(self.inputscanner.testChar(pattern, index), True)
def test_restart(self):
# should reset index to 0
self.inputscanner.next()
self.assertEqual(self.inputscanner.peek(), self.value[1])
self.inputscanner.restart()
self.assertEqual(self.inputscanner.peek(), self.value[0])
def test_back(self):
# should move the index one place back if current position is not 0
self.inputscanner.next()
self.assertEqual(self.inputscanner.peek(), self.value[1])
self.inputscanner.back()
self.assertEqual(self.inputscanner.peek(), self.value[0])
# should not move the index back if current position is 0
self.inputscanner.back()
self.assertEqual(self.inputscanner.peek(), self.value[0])
def test_hasNext(self):
# should return true if index is not at the last position
pattern = re.compile(r"howd")
self.inputscanner.readUntilAfter(pattern)
self.assertEqual(self.inputscanner.hasNext(), True)
# should return false if index is at the last position
self.inputscanner.next()
self.assertEqual(self.inputscanner.hasNext(), False)
def test_match(self):
# should return details of pattern match and move index to next position
pattern = re.compile(r"how")
patternmatch = self.inputscanner.match(pattern)
self.assertEqual(self.inputscanner.peek(), self.value[3])
self.assertNotEqual(patternmatch, None)
self.assertEqual(patternmatch.group(0), "how")
self.inputscanner.restart()
# should return None and not move index if there is no match
pattern = re.compile(r"test")
patternmatch = self.inputscanner.match(pattern)
self.assertEqual(self.inputscanner.peek(), self.value[0])
self.assertEqual(patternmatch, None)
def test_read(self):
# should return the matched substring
pattern = re.compile(r"how")
patternmatch = self.inputscanner.read(pattern)
self.assertEqual(patternmatch, "how")
self.inputscanner.restart()
# should return the empty string if there is no match
pattern = re.compile(r"ow")
patternmatch = self.inputscanner.read(pattern)
self.assertEqual(patternmatch, "")
self.inputscanner.restart()
# should return substring from start to until pattern when unitilAfter is true
startPattern = re.compile(r"how")
untilPattern = re.compile(r"dy")
untilAfter = True
patternmatch = self.inputscanner.read(startPattern, untilPattern, untilAfter)
self.assertEqual(patternmatch, "howdy")
self.inputscanner.restart()
# should return the substring matched for startPattern when untilPattern is given but unitilAfter is false
startPattern = re.compile(r"how")
untilPattern = re.compile(r"dy")
untilAfter = False
patternmatch = self.inputscanner.read(startPattern, untilPattern, untilAfter)
self.assertEqual(patternmatch, "how")
self.inputscanner.restart()
# should return substring matched for untilPattern when startPattern is None
startPattern = None
untilPattern = re.compile(r"how")
untilAfter = True
patternmatch = self.inputscanner.read(startPattern, untilPattern, untilAfter)
self.inputscanner.restart()
# should return substring matched for untilPattern when startPattern is None and untilAfter is false
startPattern = None
untilPattern = re.compile(r"how")
untilAfter = False
patternmatch = self.inputscanner.read(startPattern, untilPattern, untilAfter)
self.assertEqual(patternmatch, "")
def test_readUntil(self):
# should return substring matched for pattern when untilAfter is true
pattern = re.compile(r"how")
untilAfter = True
patternmatch = self.inputscanner.readUntil(pattern, untilAfter)
self.assertEqual(patternmatch, "how")
self.inputscanner.restart()
# should return substring from index 0 to start index of matched substring when untilAfter is false
pattern = re.compile(r"wd")
untilAfter = False
patternmatch = self.inputscanner.readUntil(pattern, untilAfter)
self.assertEqual(patternmatch, "ho")
self.inputscanner.restart()
# should return empty string when start index of matched substring is 0 and untilAfter is false
pattern = re.compile(r"how")
untilAfter = False
patternmatch = self.inputscanner.readUntil(pattern, untilAfter)
self.assertEqual(patternmatch, "")
def test_readUntilAfter(self):
# should return matched substring
pattern = re.compile(r"how")
patternmatch = self.inputscanner.readUntilAfter(pattern)
self.assertEqual(patternmatch, "how")
def test_get_regexp(self):
# should return regex pattern for string passed
pattern = re.compile(r"ow")
self.assertEqual(self.inputscanner.get_regexp("ow"), pattern)
def test_peekUntilAfter(self):
# should return matched substring and retain index position
pattern = re.compile(r"how")
self.assertEqual(self.inputscanner.peek(), self.value[0])
self.assertEqual(self.inputscanner.peekUntilAfter(pattern), "how")
self.assertEqual(self.inputscanner.peek(), self.value[0])
def test_lookBack(self):
# should return whether testVal is obtained by shifting index to the left
testVal = "how"
pattern = re.compile(r"howd")
self.inputscanner.readUntilAfter(pattern)
self.assertEqual(self.inputscanner.lookBack(testVal), True)
testVal = "ho"
self.assertEqual(self.inputscanner.lookBack(testVal), False)
if __name__ == "__main__":
unittest.main() | null |
aten fft ihfft | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
# mypy: disable-error-code="misc,arg-type,type-arg,valid-type,assignment,return-value"
"""torch.ops.aten operators under the `fft` module.
- No inplace operators.
- All functions should not have the script() decorator. This is because
we want to delay the compilation of the function.
"""
from __future__ import annotations
from typing import Optional, Sequence
from onnxscript.onnx_types import TensorType
def aten_fft_fft(
self: TensorType, n: Optional[int] = None, dim: int = -1, norm: Optional[str] = None
) -> TensorType:
"""fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_fft2(
self: TensorType,
s: Optional[int] = None,
dim: Sequence[int] = (-2, -1),
norm: Optional[str] = None,
) -> TensorType:
"""fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_fftfreq(n: int, d: float = 1.0) -> TensorType:
"""fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_fftn(
self: TensorType,
s: Optional[int] = None,
dim: Optional[int] = None,
norm: Optional[str] = None,
) -> TensorType:
"""fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_fftshift(self: TensorType, dim: Optional[int] = None) -> TensorType:
"""fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_hfft(
self: TensorType, n: Optional[int] = None, dim: int = -1, norm: Optional[str] = None
) -> TensorType:
"""fft_hfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_hfft2(
self: TensorType,
s: Optional[int] = None,
dim: Sequence[int] = (-2, -1),
norm: Optional[str] = None,
) -> TensorType:
"""fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_hfftn(
self: TensorType,
s: Optional[int] = None,
dim: Optional[int] = None,
norm: Optional[str] = None,
) -> TensorType:
"""fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_ifft(
self: TensorType, n: Optional[int] = None, dim: int = -1, norm: Optional[str] = None
) -> TensorType:
"""fft_ifft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_ifft2(
self: TensorType,
s: Optional[int] = None,
dim: Sequence[int] = (-2, -1),
norm: Optional[str] = None,
) -> TensorType:
"""fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_ifftn(
self: TensorType,
s: Optional[int] = None,
dim: Optional[int] = None,
norm: Optional[str] = None,
) -> TensorType:
"""fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_ifftshift(self: TensorType, dim: Optional[int] = None) -> TensorType:
"""fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor"""
raise NotImplementedError()
def METHOD_NAME(
self: TensorType, n: Optional[int] = None, dim: int = -1, norm: Optional[str] = None
) -> TensorType:
"""fft_ihfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_ihfft2(
self: TensorType,
s: Optional[int] = None,
dim: Sequence[int] = (-2, -1),
norm: Optional[str] = None,
) -> TensorType:
"""fft_ihfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_ihfftn(
self: TensorType,
s: Optional[int] = None,
dim: Optional[int] = None,
norm: Optional[str] = None,
) -> TensorType:
"""fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_irfft(
self: TensorType, n: Optional[int] = None, dim: int = -1, norm: Optional[str] = None
) -> TensorType:
"""fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_irfft2(
self: TensorType,
s: Optional[int] = None,
dim: Sequence[int] = (-2, -1),
norm: Optional[str] = None,
) -> TensorType:
"""fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_irfftn(
self: TensorType,
s: Optional[int] = None,
dim: Optional[int] = None,
norm: Optional[str] = None,
) -> TensorType:
"""fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_rfft(
self: TensorType, n: Optional[int] = None, dim: int = -1, norm: Optional[str] = None
) -> TensorType:
"""fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_rfft2(
self: TensorType,
s: Optional[int] = None,
dim: Sequence[int] = (-2, -1),
norm: Optional[str] = None,
) -> TensorType:
"""fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_rfftfreq(n: int, d: float = 1.0) -> TensorType:
"""fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"""
raise NotImplementedError()
def aten_fft_rfftn(
self: TensorType,
s: Optional[int] = None,
dim: Optional[int] = None,
norm: Optional[str] = None,
) -> TensorType:
"""fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"""
raise NotImplementedError() | null |
set module | #!/usr/bin/env python
"""
Do the initial installation and configuration of a DIRAC component
"""
from DIRAC import S_OK
from DIRAC import exit as DIRACexit
from DIRAC import gConfig, gLogger
from DIRAC.Core.Base.Script import Script
from DIRAC.Core.Utilities.Extensions import extensionsByPriority
from DIRAC.FrameworkSystem.Utilities import MonitoringUtilities
class Params:
"""Class holding the parameters, and callbacks for their respective switches."""
def __init__(self) -> None:
"""C'or"""
self.overwrite = False
self.module = ""
self.specialOptions = {}
self.switches = [
("w", "overwrite", "Overwrite the configuration in the global CS", self.setOverwrite),
("m:", "module=", "Python module name for the component code", self.METHOD_NAME),
("p:", "parameter=", "Special component option ", self.setSpecialOption),
]
def setOverwrite(self):
self.overwrite = True
return S_OK()
def METHOD_NAME(self, value):
self.specialOptions["Module"] = value
self.module = value
return S_OK()
def setSpecialOption(self, value):
option, val = value.split("=")
self.specialOptions[option] = val
return S_OK()
@Script()
def main():
params = Params()
Script.registerSwitches(params.switches)
Script.registerArgument(
(
"System/Component: Full component name (ie: WorkloadManagement/JobMonitoring)",
"System: Name of the DIRAC system (ie: WorkloadManagement)",
)
)
Script.registerArgument(" Component: Name of the DIRAC service (ie: JobMonitoring)", mandatory=False)
Script.parseCommandLine(ignoreErrors=False)
args = Script.getPositionalArgs()
if not args or len(args) > 2:
Script.showHelp(exitCode=1)
# System/Component
if len(args) == 1:
args = args[0].split("/")
system = args[0]
component = args[1]
compOrMod = params.module or component
# Now actually doing things
from DIRAC.FrameworkSystem.Client.ComponentInstaller import gComponentInstaller
gComponentInstaller.exitOnError = True
result = gComponentInstaller.getSoftwareComponents(extensionsByPriority())
if not result["OK"]:
gLogger.error(result["Message"])
DIRACexit(1)
availableComponents = result["Value"]
for compType in availableComponents:
if system in availableComponents[compType] and compOrMod in availableComponents[compType][system]:
cType = compType[:-1].lower()
break
else:
gLogger.error(f"Component {system}/{component} is not available for installation")
DIRACexit(1)
if params.module:
result = gComponentInstaller.addDefaultOptionsToCS(
gConfig, cType, system, params.module, extensionsByPriority(), overwrite=params.overwrite
)
result = gComponentInstaller.addDefaultOptionsToCS(
gConfig,
cType,
system,
component,
extensionsByPriority(),
specialOptions=params.specialOptions,
overwrite=params.overwrite,
addDefaultOptions=False,
)
else:
result = gComponentInstaller.addDefaultOptionsToCS(
gConfig,
cType,
system,
component,
extensionsByPriority(),
specialOptions=params.specialOptions,
overwrite=params.overwrite,
)
if not result["OK"]:
gLogger.error(result["Message"])
DIRACexit(1)
if component.startswith("Tornado"):
result = gComponentInstaller.installTornado()
if not result["OK"]:
gLogger.error(result["Message"])
DIRACexit(1)
else:
result = gComponentInstaller.installComponent(cType, system, component, extensionsByPriority(), params.module)
if not result["OK"]:
gLogger.error(result["Message"])
DIRACexit(1)
gLogger.notice(f"Successfully installed component {component} in {system} system, now setting it up")
if component.startswith("Tornado"):
result = gComponentInstaller.setupTornadoService(system, component)
if not result["OK"]:
gLogger.error(result["Message"])
DIRACexit(1)
else:
result = gComponentInstaller.setupComponent(cType, system, component, extensionsByPriority(), params.module)
if not result["OK"]:
gLogger.error(result["Message"])
DIRACexit(1)
if "ComponentMonitoring" in component:
result = MonitoringUtilities.monitorInstallation("DB", system, "InstalledComponentsDB")
if not result["OK"]:
gLogger.error(result["Message"])
DIRACexit(1)
result = MonitoringUtilities.monitorInstallation(cType, system, component, params.module)
if not result["OK"]:
gLogger.error(result["Message"])
DIRACexit(1)
gLogger.notice(f"Successfully completed the installation of {system}/{component}")
DIRACexit()
if __name__ == "__main__":
main() | null |
source | from conan import ConanFile
from conan.tools.microsoft import is_msvc
from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rmdir, replace_in_file
from conan.tools.scm import Version
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
import os
required_conan_version = ">=1.53.0"
class LibgdConan(ConanFile):
name = "libgd"
description = ("GD is an open source code library for the dynamic"
"creation of images by programmers.")
license = "BSD-like"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://libgd.github.io"
topics = ("images", "graphics")
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_png": [True, False],
"with_jpeg": [True, False],
"with_tiff": [True, False],
"with_freetype": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_png": False,
"with_jpeg": False,
"with_tiff": False,
"with_freetype": False,
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.libcxx")
self.settings.rm_safe("compiler.cppstd")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires("zlib/1.2.13")
if self.options.with_png:
self.requires("libpng/1.6.39")
if is_msvc(self):
self.requires("getopt-for-visual-studio/20200201")
if self.options.with_jpeg:
self.requires("libjpeg/9e")
if self.options.with_tiff:
self.requires("libtiff/4.4.0")
if self.options.with_freetype:
self.requires("freetype/2.12.1")
def METHOD_NAME(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["CMAKE_C_STANDARD"] = "99"
tc.variables["BUILD_STATIC_LIBS"] = not self.options.shared
if Version(self.version) >= "2.3.0":
tc.variables["ENABLE_GD_FORMATS"] = True
tc.variables["ENABLE_PNG"] = self.options.with_png
tc.variables["ENABLE_LIQ"] = False
tc.variables["ENABLE_JPEG"] = self.options.with_jpeg
tc.variables["ENABLE_TIFF"] = self.options.with_tiff
tc.variables["ENABLE_ICONV"] = False
tc.variables["ENABLE_XPM"] = False
tc.variables["ENABLE_FREETYPE"] = self.options.with_freetype
tc.variables["ENABLE_FONTCONFIG"] = False
tc.variables["ENABLE_WEBP"] = False
if Version(self.version) >= "2.3.2":
tc.variables["ENABLE_HEIF"] = False
tc.variables["ENABLE_AVIF"] = False
if Version(self.version) >= "2.3.0":
tc.variables["ENABLE_RAQM"] = False
tc.cache_variables["CMAKE_POLICY_DEFAULT_CMP0077"] = "NEW"
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def _patch(self):
apply_conandata_patches(self)
cmakelists = os.path.join(self.source_folder, "CMakeLists.txt")
replace_in_file(self, cmakelists, "${CMAKE_SOURCE_DIR}",
"${CMAKE_CURRENT_SOURCE_DIR}")
replace_in_file(self, cmakelists,
"SET(CMAKE_MODULE_PATH \"${GD_SOURCE_DIR}/cmake/modules\")",
"LIST(APPEND CMAKE_MODULE_PATH \"${GD_SOURCE_DIR}/cmake/modules\")")
replace_in_file(self, os.path.join(self.source_folder, "src", "CMakeLists.txt"),
"RUNTIME DESTINATION bin",
"RUNTIME DESTINATION bin BUNDLE DESTINATION bin")
def build(self):
self._patch()
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, pattern="COPYING", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "share"))
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
def package_info(self):
prefix_libs = "lib" if self.settings.os == "Windows" else ""
suffix_libs = "_static" if self.settings.os == "Windows" and not self.options.shared else ""
self.cpp_info.libs = [f"{prefix_libs}gd{suffix_libs}"]
self.cpp_info.set_property("pkg_config_name", "gdlib")
if self.settings.os == "Windows" and not self.options.shared:
self.cpp_info.defines.append("BGD_NONDLL")
self.cpp_info.defines.append("BGDWIN32")
if self.settings.os in ("FreeBSD", "Linux", "Android", "SunOS", "AIX"):
self.cpp_info.system_libs.append("m")
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bin_path))
self.env_info.PATH.append(bin_path)
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.names["pkg_config"]= "gdlib" | null |
reverse permute data | import numpy as np
import nibabel as nib
from nilearn.image import new_img_like, resample_to_img
import random
import itertools
def scale_image(image, scale_factor):
scale_factor = np.asarray(scale_factor)
new_affine = np.copy(image.affine)
new_affine[:3, :3] = image.affine[:3, :3] * scale_factor
new_affine[:, 3][:3] = image.affine[:, 3][:3] + (image.shape * np.diag(image.affine)[:3] * (1 - scale_factor)) / 2
return new_img_like(image, data=image.get_data(), affine=new_affine)
def flip_image(image, axis):
try:
new_data = np.copy(image.get_data())
for axis_index in axis:
new_data = np.flip(new_data, axis=axis_index)
except TypeError:
new_data = np.flip(image.get_data(), axis=axis)
return new_img_like(image, data=new_data)
def random_flip_dimensions(n_dimensions):
axis = list()
for dim in range(n_dimensions):
if random_boolean():
axis.append(dim)
return axis
def random_scale_factor(n_dim=3, mean=1, std=0.25):
return np.random.normal(mean, std, n_dim)
def random_boolean():
return np.random.choice([True, False])
def distort_image(image, flip_axis=None, scale_factor=None):
if flip_axis:
image = flip_image(image, flip_axis)
if scale_factor is not None:
image = scale_image(image, scale_factor)
return image
def augment_data(data, truth, affine, scale_deviation=None, flip=True):
n_dim = len(truth.shape)
if scale_deviation:
scale_factor = random_scale_factor(n_dim, std=scale_deviation)
else:
scale_factor = None
if flip:
flip_axis = random_flip_dimensions(n_dim)
else:
flip_axis = None
data_list = list()
for data_index in range(data.shape[0]):
image = get_image(data[data_index], affine)
data_list.append(resample_to_img(distort_image(image, flip_axis=flip_axis,
scale_factor=scale_factor), image,
interpolation="continuous").get_data())
data = np.asarray(data_list)
truth_image = get_image(truth, affine)
truth_data = resample_to_img(distort_image(truth_image, flip_axis=flip_axis, scale_factor=scale_factor),
truth_image, interpolation="nearest").get_data()
return data, truth_data
def get_image(data, affine, nib_class=nib.Nifti1Image):
return nib_class(dataobj=data, affine=affine)
def generate_permutation_keys():
"""
This function returns a set of "keys" that represent the 48 unique rotations &
reflections of a 3D matrix.
Each item of the set is a tuple:
((rotate_y, rotate_z), flip_x, flip_y, flip_z, transpose)
As an example, ((0, 1), 0, 1, 0, 1) represents a permutation in which the data is
rotated 90 degrees around the z-axis, then reversed on the y-axis, and then
transposed.
48 unique rotations & reflections:
https://en.wikipedia.org/wiki/Octahedral_symmetry#The_isometries_of_the_cube
"""
return set(itertools.product(
itertools.combinations_with_replacement(range(2), 2), range(2), range(2), range(2), range(2)))
def random_permutation_key():
"""
Generates and randomly selects a permutation key. See the documentation for the
"generate_permutation_keys" function.
"""
return random.choice(list(generate_permutation_keys()))
def permute_data(data, key):
"""
Permutes the given data according to the specification of the given key. Input data
must be of shape (n_modalities, x, y, z).
Input key is a tuple: (rotate_y, rotate_z), flip_x, flip_y, flip_z, transpose)
As an example, ((0, 1), 0, 1, 0, 1) represents a permutation in which the data is
rotated 90 degrees around the z-axis, then reversed on the y-axis, and then
transposed.
"""
data = np.copy(data)
(rotate_y, rotate_z), flip_x, flip_y, flip_z, transpose = key
if rotate_y != 0:
data = np.rot90(data, rotate_y, axes=(1, 3))
if rotate_z != 0:
data = np.rot90(data, rotate_z, axes=(2, 3))
if flip_x:
data = data[:, ::-1]
if flip_y:
data = data[:, :, ::-1]
if flip_z:
data = data[:, :, :, ::-1]
if transpose:
for i in range(data.shape[0]):
data[i] = data[i].T
return data
def random_permutation_x_y(x_data, y_data):
"""
Performs random permutation on the data.
:param x_data: numpy array containing the data. Data must be of shape (n_modalities, x, y, z).
:param y_data: numpy array containing the data. Data must be of shape (n_modalities, x, y, z).
:return: the permuted data
"""
key = random_permutation_key()
return permute_data(x_data, key), permute_data(y_data, key)
def METHOD_NAME(data, key):
key = reverse_permutation_key(key)
data = np.copy(data)
(rotate_y, rotate_z), flip_x, flip_y, flip_z, transpose = key
if transpose:
for i in range(data.shape[0]):
data[i] = data[i].T
if flip_z:
data = data[:, :, :, ::-1]
if flip_y:
data = data[:, :, ::-1]
if flip_x:
data = data[:, ::-1]
if rotate_z != 0:
data = np.rot90(data, rotate_z, axes=(2, 3))
if rotate_y != 0:
data = np.rot90(data, rotate_y, axes=(1, 3))
return data
def reverse_permutation_key(key):
rotation = tuple([-rotate for rotate in key[0]])
return rotation, key[1], key[2], key[3], key[4] | null |
get track groups | # Copyright (C) 2014 Dustin Spicuzza
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
from gi.repository import Gio
from os.path import dirname, join
from contextlib import closing
from xl.nls import gettext as _
from xl.metadata.tags import tag_data
from xlgui.widgets import menu
from .analyzer_dialog import AnalyzerDialog
class PlaylistAnalyzerPlugin:
def __init__(self):
self.menu_items = []
self.dialog = None
self._get_track_groups = None
self.d3_loc = join(dirname(__file__), 'ext', 'd3.min.js')
def enable(self, exaile):
self.exaile = exaile
def on_gui_loaded(self):
# register menu items
item = menu.simple_menu_item(
'pz-run', [], _('Analyze playlists'), callback=self.on_analyze_playlists
)
item.register('menubar-tools-menu')
self.menu_items.append(item)
item = menu.simple_menu_item(
'pz-run',
['export-files'],
_('Analyze playlist'),
callback=self.on_analyze_playlist,
)
item.register('playlist-panel-context-menu')
self.menu_items.append(item)
# -> this could have a submenu that gets filled in with all
# of the presets
def on_exaile_loaded(self):
pass
def disable(self, exaile):
if self.dialog is not None:
self.dialog.destroy()
self.dialog = None
for menu_item in self.menu_items:
menu_item.unregister()
#
# Misc
#
def METHOD_NAME(self, track):
if self._get_track_groups is None:
if 'grouptagger' not in self.exaile.plugins.enabled_plugins:
raise ValueError(
"GroupTagger plugin must be loaded to use the GroupTagger tag"
)
self._get_track_groups = self.exaile.plugins.enabled_plugins[
'grouptagger'
].METHOD_NAME
return self._get_track_groups(track)
#
# Menu functions
#
def on_analyze_playlist(self, widget, name, parent, context):
"""
:param parent: The PlaylistsPanel that triggered this callback
"""
if self.dialog is None:
self.dialog = AnalyzerDialog(
self, parent.parent, context['selected-playlist']
)
def on_analyze_playlists(self, widget, name, parent, context):
"""
:param parent: The Exaile MainWindow object
"""
if self.dialog is None:
self.dialog = AnalyzerDialog(self, parent.window)
#
# Functions to generate the analysis
#
def get_tag(self, track, tagname, extra):
data = tag_data.get(tagname)
if data is not None:
if data.type == 'int':
ret = track.get_tag_raw(tagname, join=True)
if ret is not None:
if extra == 0:
return int(ret)
else:
return int(ret) - (int(ret) % extra)
return
if data.use_disk:
return track.get_tag_disk(tagname)
if tagname == '__grouptagger':
return list(self.METHOD_NAME(track))
return track.get_tag_raw(tagname, join=True)
def generate_data(self, tracks, tagdata):
data = []
for track in tracks:
if track is None:
data.append(None)
else:
data.append([self.get_tag(track, tag, extra) for tag, extra in tagdata])
return data
def write_to_file(self, tmpl, uri, **kwargs):
"""
Opens a template file, performs substitution, writes it to the
output URI, and also writes d3.min.js to the output directory.
:param tmpl: Local pathname to template file
:param uri: URI of output file suitable for passing to Gio.File
:param kwargs: Named parameters to substitute in template
"""
# read the template file
# NOTE: must be opened in non-binary mode because we treat the
# contents as string
with open(tmpl, 'r') as fp:
contents = fp.read()
try:
contents = contents % kwargs
except Exception:
raise RuntimeError(
"Format string error in template (probably has unescaped % in it)"
)
outfile = Gio.File.new_for_uri(uri)
parent_dir = outfile.get_parent()
if parent_dir:
parent_dir = parent_dir.get_child("d3.min.js")
with closing(
outfile.replace(None, False, Gio.FileCreateFlags.NONE, None)
) as fp:
fp.write(
contents.encode('utf-8')
) # Gio.FileOutputStream.write() needs bytes argument
# copy d3 to the destination
# -> TODO: add checkbox to indicate whether it should write d3 there or not
if parent_dir:
# Open in binary mode, so that we can directly read bytes
# and write them via Gio.FileOutputStream.write()
with open(self.d3_loc, 'rb') as d3fp:
with closing(
parent_dir.replace(None, False, Gio.FileCreateFlags.NONE, None)
) as pfp:
pfp.write(d3fp.read())
# New plugin API; requires exaile 3.4.0 or later
plugin_class = PlaylistAnalyzerPlugin | null |
run timer | # Copyright (C) 2021 Open Source Robotics Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
from datetime import datetime, timedelta
from gz.math7 import Stopwatch
class TestBox(unittest.TestCase):
# Helper function that runs a few tests
def METHOD_NAME(self, _time):
handleSteadyClock = timedelta(0)
# Start the timer
self.assertTrue(_time.start())
# The timer should be running.
self.assertTrue(_time.running())
# The start time should be greater than the stop time.
self.assertGreater(_time.start_time(), _time.stop_time())
# The elapsed stop time should still be zero.
self.assertEqual(timedelta(0), _time.elapsed_stop_time())
# Wait for some time...
time.sleep(1)
# Now the elapsed time should be greater than or equal to the time slept.
self.assertGreaterEqual(_time.elapsed_run_time() + handleSteadyClock, timedelta(seconds=1))
# Stop the timer.
self.assertTrue(_time.stop())
# The timer should not be running.
self.assertFalse(_time.running())
# The stop time should be greater than the start time.
self.assertGreater(_time.stop_time(), _time.start_time())
# The elapsed time should still be greater than the time slept.
self.assertGreaterEqual(_time.elapsed_run_time() + handleSteadyClock, timedelta(seconds=1))
# Save the elapsed time.
elapsedTime = _time.elapsed_run_time()
# The timer is now stopped, let's sleep some more.
time.sleep(1)
# The elapsed stop time should be greater than or equal to the time
# slept.
self.assertGreaterEqual(_time.elapsed_stop_time() + handleSteadyClock, timedelta(seconds=1))
# The elapsed time should be the same.
self.assertEqual(elapsedTime, _time.elapsed_run_time())
# Start the timer again.
self.assertTrue(_time.start())
# Store the elapsed stop time.
elapsedStopTime = _time.elapsed_stop_time()
# The timer should be running.
self.assertTrue(_time.running())
# Sleep for some time.
time.sleep(1)
# The elapsed stop time should remain the same
self.assertEqual(elapsedStopTime, _time.elapsed_stop_time())
# The elapsed time should be greater than the previous elapsed time.
self.assertGreater(_time.elapsed_run_time(), elapsedTime)
# The elapsed time should be greater than or equal to the the previous
# two sleep times.
self.assertGreaterEqual(_time.elapsed_run_time() + handleSteadyClock, timedelta(seconds=2))
def test_constructor(self):
watch = Stopwatch()
self.assertFalse(watch.running())
self.assertEqual(watch.stop_time(), watch.start_time())
self.assertEqual(timedelta(0), watch.elapsed_run_time())
self.assertEqual(timedelta(0), watch.elapsed_stop_time())
self.METHOD_NAME(watch)
watch2 = watch
self.assertEqual(watch, watch2)
watch3 = watch2
self.assertEqual(watch, watch3)
def test_equal_operator(self):
watch = Stopwatch()
watch2 = Stopwatch()
watch3 = Stopwatch()
self.assertEqual(watch, watch2)
self.assertEqual(watch, watch3)
self.METHOD_NAME(watch)
self.METHOD_NAME(watch2)
self.METHOD_NAME(watch3)
self.assertNotEqual(watch, watch2)
self.assertNotEqual(watch, watch3)
watch2 = watch
self.assertEqual(watch, watch2)
watch3 = watch2
self.assertEqual(watch, watch3)
def test_start_stop_reset(self):
watch = Stopwatch()
self.METHOD_NAME(watch)
watch.reset()
self.assertFalse(watch.running())
self.assertEqual(watch.stop_time(), watch.start_time())
self.assertEqual(timedelta(0), watch.elapsed_run_time())
self.assertEqual(timedelta(0), watch.elapsed_stop_time())
self.METHOD_NAME(watch)
self.assertTrue(watch.running())
watch.start(True)
self.assertTrue(watch.running())
self.assertLess(watch.stop_time(), watch.start_time())
self.assertNotEqual(timedelta(0), watch.elapsed_run_time())
self.assertEqual(timedelta(0), watch.elapsed_stop_time())
def test_fail_start_stop(self):
watch = Stopwatch()
# Can't stop while not running
self.assertFalse(watch.stop())
self.assertFalse(watch.running())
# Can start while not running
self.assertTrue(watch.start())
self.assertTrue(watch.running())
# Can't start while running
self.assertFalse(watch.start())
self.assertTrue(watch.running())
# Can stop while running
self.assertTrue(watch.stop())
self.assertFalse(watch.running())
# Can start while not running
self.assertTrue(watch.start())
self.assertTrue(watch.running())
if __name__ == '__main__':
unittest.main() | null |
replacefunc |
# Copyright 2010-2016 Jaap Karssenberg <[email protected]>
'''Thin wrapper for 'datetime' module from the standard library.
Provides timezone info for the local time. Based on example code
from standard library datetime documentation.
Main usage of this module is the function L{now()}. It imports all
from the standard datetime, so it can be used as a transparant
replacement.
Also adds a L{strfcal()} method and extends L{strftime()} to deal
with weeknumbers correctly.
'''
import re
import locale
from datetime import *
import logging
logger = logging.getLogger('zim')
def now():
'''Like C{datetime.now()} but with local timezone info'''
# Also setting microsecond to zero, to give isoformat() a nicer look
return datetime.now(LocalTimezone()).replace(microsecond=0)
# A class capturing the platform's idea of local time.
import time as _time
ZERO = timedelta(0)
STDOFFSET = timedelta(seconds = -_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds = -_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(tzinfo):
'''Implementation of tzinfo with the current time zone, based on
the platform's idea of local time
'''
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
# Initialize setting for first day of the week. This is locale
# dependent, and the Gtk.Calendar widget already has good code to find it out.
# Unfortunately, the widget keeps that data private *%#*$()()*) !
MONDAY = 0 # iso calendar starts week at Monday
SUNDAY = 6
FIRST_DAY_OF_WEEK = None
def init_first_day_of_week():
global FIRST_DAY_OF_WEEK
try:
import babel
mylocale = babel.Locale(locale.getdefaultlocale()[0])
if mylocale.first_week_day == 0:
FIRST_DAY_OF_WEEK = MONDAY
else:
FIRST_DAY_OF_WEEK = SUNDAY
logger.debug('According to babel first day of week is %i', FIRST_DAY_OF_WEEK)
except Exception as e:
if not isinstance(e, ImportError):
logger.exception('Exception while loading \'babel\' library for first day of week')
# Fallback gleaned from gtkcalendar.c - hence the inconsistency
# with weekday numbers in iso calendar...
t = _("calendar:week_start:0")
# T: Translate to "calendar:week_start:0" if you want Sunday to be the first day of the week or to "calendar:week_start:1" if you want Monday to be the first day of the week
if t[-1] == '0':
FIRST_DAY_OF_WEEK = SUNDAY
elif t[-1] == '1':
FIRST_DAY_OF_WEEK = MONDAY
else:
logger.warning("Whoever translated 'calendar:week_start:0' did so wrongly.")
FIRST_DAY_OF_WEEK = SUNDAY
def dates_for_week(year, week):
'''Returns the first and last day of the week for a given
week number of a given year.
@param year: year as int (e.g. 2012)
@param week: week number as int (0 .. 53)
@returns: a 2-tuple of:
- a C{datetime.date} object for the start date of the week
- a C{datetime.date} object for the end dateof the week
@note: first day of the week can be either C{MONDAY} or C{SUNDAY},
this is configured in C{FIRST_DAY_OF_WEEK} based on the locale.
'''
# Note that the weeknumber in the isocalendar does NOT depend on the
# first day being Sunday or Monday, but on the first Thursday in the
# new year. See datetime.isocalendar() for details.
# If the year starts with e.g. a Friday, January 1st still belongs
# to week 53 of the previous year.
# Day of week in isocalendar starts with 1 for Mon and is 7 for Sun,
# and week starts on Monday.
if FIRST_DAY_OF_WEEK is None:
init_first_day_of_week()
jan1 = date(year, 1, 1)
_, jan1_week, jan1_weekday = jan1.isocalendar()
if FIRST_DAY_OF_WEEK == MONDAY:
days = jan1_weekday - 1
# if Jan 1 is a Monday, days is 0
else:
days = jan1_weekday
# if Jan 1 is a Monday, days is 1
# for Sunday it becomes 7 (or -1 week)
if jan1_week == 1:
weeks = week - 1
else:
# Jan 1st is still wk53 of the previous year
weeks = week
start = jan1 + timedelta(days=-days, weeks=weeks)
end = start + timedelta(days=6)
return start, end
def weekcalendar(date):
'''Get the year, week number and week day for a specific date.
Like C{datetime.date.isocalendar()} but takes into account
C{FIRST_DAY_OF_WEEK} correctly.
@param date: a C{datetime.date} or C{datetime.datetime} object
@returns: a year, a week number and a weekday as integers
The weekday numbering depends on locale, 1 is always first day
of the week, either a Sunday or a Monday.
'''
# Both strftime %W and %U are not correct, they use differnt
# week number count than the isocalendar. See datetime
# module for details.
# In short Jan 1st can still be week 53 of the previous year
# So we can use isocalendar(), however this does not take
# into accout FIRST_DAY_OF_WEEK, see comment in dates_for_week()
if FIRST_DAY_OF_WEEK is None:
init_first_day_of_week()
year, week, weekday = date.isocalendar()
if FIRST_DAY_OF_WEEK == SUNDAY and weekday == 7:
# iso calendar gives us the week ending this sunday,
# we want the next week
monday = date + timedelta(days=1)
year, week, weekday = monday.isocalendar()
elif FIRST_DAY_OF_WEEK == SUNDAY:
weekday += 1
return year, week, weekday
def strfcal(format, date):
'''Method similar to strftime, but dealing with the weeknumber,
day of the week and the year of that week.
Week 1 is the first week where the Thursday is in the new year. So e.g. the
last day of 2012 is a Monday. And therefore the calendar week for 31 Dec 2012
is already week 1 2013.
The locale decides whether a week starts on Monday (as the ISO standard would have
it) or on Sunday. So depending on your locale Sun 6 Jan 2013 is either still week
1 or already the first day of week 2.
Codes supported by this method:
- C{%w} is replaced by the weekday as a decimal number [1,7], with 1 representing
either Monday or Sunday depending on the locale
- C{%W} is replaced by the weeknumber depending on the locale
- C{%Y} is replaced by the year with century as a decimal number, the year depends
on the weeknumber depending on the locale
- C{%%} is replaced by %
Difference between this method and strftime is that:
1. It uses locale to determine the first day of the week
2. It returns the year that goes with the weeknumber
'''
# TODO: may want to add version of the codes that allow forcing
# Monday or Sunday as first day, e.g. using %u %U %X and %v %V %Z
year, week, weekday = weekcalendar(date)
def METHOD_NAME(matchobj):
code = matchobj.group(0)
if code == '%w':
return str(weekday)
elif code == '%W':
return '%02d' % week
elif code == '%Y':
return str(year)
elif code == '%%':
return '%'
else:
return code # ignore unsupported codes
return re.sub(r'%.', METHOD_NAME, format)
def strftime(format, date):
# TODO: deprecate this function
return date.strftime(format)
if __name__ == '__main__': #pragma: no cover
import gettext
gettext.install('zim', None, names=('_', 'gettext', 'ngettext'))
init_first_day_of_week()
if FIRST_DAY_OF_WEEK == SUNDAY:
print('First day of week: Sunday')
else:
print('First day of week: Monday')
print('Now:', now().isoformat(), strftime("%z, %Z", now()))
print('Calendar:', strfcal('day %w of week %W %Y', now())) | null |
trainable args update method | import tempfile
import pytest
from ray import tune
from autogluon.core.hpo.ray_hpo import AutommRayTuneAdapter, RayTuneAdapter, TabularRayTuneAdapter, run
from autogluon.core.hpo.ray_tune_constants import SCHEDULER_PRESETS, SEARCHER_PRESETS
class DummyAdapter(RayTuneAdapter):
supported_searchers = list(SEARCHER_PRESETS.keys())
supported_schedulers = list(SCHEDULER_PRESETS.keys())
@property
def adapter_type(self):
return "dummy"
def get_resource_calculator(self, **kwargs):
pass
def get_resources_per_trial(self, total_resources, num_samples, **kwargs):
return {"cpu": 1}
def METHOD_NAME(self, trainable_args):
return {}
DUMMY_SEARCH_SPACE = {"a": tune.uniform(0, 1), "b": tune.uniform(0, 20)}
def _dummy_objective(x, a, b):
return a * (x**0.5) + b
def _dummy_trainable(config):
for x in range(20):
score = _dummy_objective(x, config["a"], config["b"])
tune.report(score=score)
def test_invalid_searcher():
hyperparameter_tune_kwargs = dict(
searcher="abc",
scheduler="FIFO",
num_trials=1,
)
with tempfile.TemporaryDirectory() as root:
with pytest.raises(Exception) as e_info:
run(
trainable=_dummy_trainable,
trainable_args=dict(),
search_space=DUMMY_SEARCH_SPACE,
hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,
metric="score",
mode="min",
save_dir=root,
ray_tune_adapter=DummyAdapter(),
)
def test_invalid_scheduler():
hyperparameter_tune_kwargs = dict(
searcher="random",
scheduler="abc",
num_trials=1,
)
with tempfile.TemporaryDirectory() as root:
with pytest.raises(Exception) as e_info:
run(
trainable=_dummy_trainable,
trainable_args=dict(),
search_space=DUMMY_SEARCH_SPACE,
hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,
metric="score",
mode="min",
save_dir=root,
ray_tune_adapter=DummyAdapter(),
)
def test_invalid_preset():
hyperparameter_tune_kwargs = "abc"
with tempfile.TemporaryDirectory() as root:
with pytest.raises(Exception) as e_info:
run(
trainable=_dummy_trainable,
trainable_args=dict(),
search_space=DUMMY_SEARCH_SPACE,
hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,
metric="score",
mode="min",
save_dir=root,
ray_tune_adapter=DummyAdapter(),
)
def test_empty_search_space():
hyperparameter_tune_kwargs = dict(
searcher="random",
scheduler="FIFO",
num_trials=1,
)
with tempfile.TemporaryDirectory() as root:
with pytest.raises(Exception) as e_info:
run(
trainable=_dummy_trainable,
trainable_args=dict(),
search_space=dict(),
hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,
metric="score",
mode="min",
save_dir=root,
ray_tune_adapter=DummyAdapter(),
)
@pytest.mark.platform
@pytest.mark.parametrize("searcher", list(SEARCHER_PRESETS.keys()))
@pytest.mark.parametrize("scheduler", list(SCHEDULER_PRESETS.keys()))
def test_run(searcher, scheduler):
hyperparameter_tune_kwargs = dict(
searcher=searcher,
scheduler=scheduler,
num_trials=2,
)
with tempfile.TemporaryDirectory() as root:
analysis = run(
trainable=_dummy_trainable,
trainable_args=dict(),
search_space=DUMMY_SEARCH_SPACE,
hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,
metric="score",
mode="min",
save_dir=root,
ray_tune_adapter=DummyAdapter(),
)
assert analysis is not None | null |
check and add owner | from __future__ import annotations
import base64
import zlib
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.Core.Utilities.DErrno import EWMSSUBM, EWMSJMAN
from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
from DIRAC.WorkloadManagementSystem.Client.JobState.JobManifest import JobManifest
from DIRAC.WorkloadManagementSystem.Client import JobStatus
from DIRAC.Core.Utilities.ReturnValues import returnValueOrRaise
getDIRACPlatform = returnValueOrRaise(
ObjectLoader().loadObject("ConfigurationSystem.Client.Helpers.Resources", "getDIRACPlatform")
)
def compressJDL(jdl):
"""Return compressed JDL string."""
return base64.b64encode(zlib.compress(jdl.encode(), -1)).decode()
def extractJDL(compressedJDL):
"""Return decompressed JDL string."""
# the starting bracket is guaranteeed by JobManager.submitJob
# we need the check to be backward compatible
if isinstance(compressedJDL, bytes):
if compressedJDL.startswith(b"["):
return compressedJDL.decode()
else:
if compressedJDL.startswith("["):
return compressedJDL
return zlib.decompress(base64.b64decode(compressedJDL)).decode()
def METHOD_NAME(jdl: str, owner: str, ownerGroup: str) -> JobManifest:
jobManifest = JobManifest()
res = jobManifest.load(jdl)
if not res["OK"]:
return res
jobManifest.setOptionsFromDict({"Owner": owner, "OwnerGroup": ownerGroup})
res = jobManifest.check()
if not res["OK"]:
return res
return S_OK(jobManifest)
def fixJDL(jdl: str) -> str:
# 1.- insert original JDL on DB and get new JobID
# Fix the possible lack of the brackets in the JDL
if jdl.strip()[0].find("[") != 0:
jdl = "[" + jdl + "]"
return jdl
def checkAndPrepareJob(jobID, classAdJob, classAdReq, owner, ownerGroup, jobAttrs, vo):
error = ""
jdlOwner = classAdJob.getAttributeString("Owner")
jdlOwnerGroup = classAdJob.getAttributeString("OwnerGroup")
jdlVO = classAdJob.getAttributeString("VirtualOrganization")
# The below is commented out since this is always overwritten by the submitter IDs
# but the check allows to findout inconsistent client environments
if jdlOwner and jdlOwner != owner:
error = "Wrong Owner in JDL"
elif jdlOwnerGroup and jdlOwnerGroup != ownerGroup:
error = "Wrong Owner Group in JDL"
elif jdlVO and jdlVO != vo:
error = "Wrong Virtual Organization in JDL"
classAdJob.insertAttributeString("Owner", owner)
classAdJob.insertAttributeString("OwnerGroup", ownerGroup)
if vo:
classAdJob.insertAttributeString("VirtualOrganization", vo)
classAdReq.insertAttributeString("Owner", owner)
classAdReq.insertAttributeString("OwnerGroup", ownerGroup)
if vo:
classAdReq.insertAttributeString("VirtualOrganization", vo)
inputDataPolicy = Operations(vo=vo).getValue("InputDataPolicy/InputDataModule")
if inputDataPolicy and not classAdJob.lookupAttribute("InputDataModule"):
classAdJob.insertAttributeString("InputDataModule", inputDataPolicy)
# priority
priority = classAdJob.getAttributeInt("Priority")
if priority is None:
priority = 0
classAdReq.insertAttributeInt("UserPriority", priority)
# CPU time
cpuTime = classAdJob.getAttributeInt("CPUTime")
if cpuTime is None:
opsHelper = Operations(group=ownerGroup)
cpuTime = opsHelper.getValue("JobDescription/DefaultCPUTime", 86400)
classAdReq.insertAttributeInt("CPUTime", cpuTime)
# platform(s)
platformList = classAdJob.getListFromExpression("Platform")
if platformList:
result = getDIRACPlatform(platformList)
if not result["OK"]:
return result
if result["Value"]:
classAdReq.insertAttributeVectorString("Platforms", result["Value"])
else:
error = "OS compatibility info not found"
if error:
retVal = S_ERROR(EWMSSUBM, error)
retVal["JobId"] = jobID
retVal["Status"] = JobStatus.FAILED
retVal["MinorStatus"] = error
jobAttrs["Status"] = JobStatus.FAILED
jobAttrs["MinorStatus"] = error
return retVal
return S_OK()
def createJDLWithInitialStatus(
classAdJob, classAdReq, jdl2DBParameters, jobAttrs, initialStatus, initialMinorStatus, *, modern=False
):
"""
:param modern: if True, store boolean instead of string for VerifiedFlag (used by diracx only)
"""
priority = classAdJob.getAttributeInt("Priority")
if priority is None:
priority = 0
jobAttrs["UserPriority"] = priority
for jdlName in jdl2DBParameters:
# Defaults are set by the DB.
jdlValue = classAdJob.getAttributeString(jdlName)
if jdlValue:
jobAttrs[jdlName] = jdlValue
jdlValue = classAdJob.getAttributeString("Site")
if jdlValue:
if jdlValue.find(",") != -1:
jobAttrs["Site"] = "Multiple"
else:
jobAttrs["Site"] = jdlValue
jobAttrs["VerifiedFlag"] = True if modern else "True"
jobAttrs["Status"] = initialStatus
jobAttrs["MinorStatus"] = initialMinorStatus
reqJDL = classAdReq.asJDL()
classAdJob.insertAttributeInt("JobRequirements", reqJDL)
return classAdJob.asJDL() | null |
make locations list | #!/usr/bin/env python
"""Various helper functions for workqueue"""
import logging
import os
from WMCore.Services.CRIC.CRIC import CRIC
__dbses = {}
def get_dbs(url):
"""Return DBS object for url"""
try:
return __dbses[url]
except KeyError:
from WMCore.Services.DBS.DBSReader import DBSReader
__dbses[url] = DBSReader(url)
return __dbses[url]
__cric = None
__cmsSiteNames = []
def cmsSiteNames():
"""Get all cms sites"""
global __cmsSiteNames
if __cmsSiteNames:
return __cmsSiteNames
global __cric
if not __cric:
__cric = CRIC()
try:
__cmsSiteNames = __cric.getAllPSNs()
except Exception:
pass
return __cmsSiteNames
def METHOD_NAME(siteWhitelist, siteBlacklist):
"""
_makeLocationsList_
Make a location list based on the intersection between a site white list
and blacklist, if none specified then all sites are listed.
"""
sites = cmsSiteNames()
if siteWhitelist:
# Just get the CMS sites matching the whitelists
sites = list(set(sites) & set(siteWhitelist))
if siteBlacklist:
# Get all CMS sites less the blacklist
sites = list(set(sites) - set(siteBlacklist))
return sites
def queueFromConfig(config):
"""Create a queue from the config object"""
config = queueConfigFromConfigObject(config)
if config.WorkQueueManager.level == 'GlobalQueue':
from WMCore.WorkQueue.WorkQueue import globalQueue
return globalQueue(**config.WorkQueueManager.queueParams)
elif config.WorkQueueManager.level == 'LocalQueue':
from WMCore.WorkQueue.WorkQueue import localQueue
return localQueue(**config.WorkQueueManager.queueParams)
else:
from WMCore.WorkQueue.WorkQueue import WorkQueue
return WorkQueue(**config.WorkQueueManager.queueParams)
def queueConfigFromConfigObject(config):
"""From a config object create a config dict suitable for a queue object"""
from os import path
wqManager = config.section_('WorkQueueManager')
if not hasattr(wqManager, 'componentDir'):
wqManager.componentDir = path.join(config.General.WorkDir,
'WorkQueueManager')
if not hasattr(wqManager, 'namespace'):
wqManager.namespace = 'WMComponent.WorkQueueManager.WorkQueueManager'
if not hasattr(wqManager, 'logLevel'):
wqManager.logLevel = 'INFO'
if not hasattr(wqManager, 'pollInterval'):
wqManager.pollInterval = 600
# WorkQueue config
if not hasattr(wqManager, 'queueParams'):
wqManager.queueParams = {}
qConfig = wqManager.queueParams
# Rucio-related config
if hasattr(wqManager, 'rucioUrl'):
qConfig['rucioUrl'] = wqManager.rucioUrl
if hasattr(wqManager, 'rucioAuthUrl'):
qConfig['rucioAuthUrl'] = wqManager.rucioAuthUrl
if hasattr(wqManager, 'couchurl'):
qConfig['CouchUrl'] = wqManager.couchurl
if hasattr(wqManager, 'dbname'):
qConfig['DbName'] = wqManager.dbname
if hasattr(wqManager, 'inboxDatabase'):
qConfig['InboxDbName'] = wqManager.inboxDatabase
# pull some info we need from other areas of the config
if "BossAirConfig" not in qConfig and hasattr(config, 'BossAir'):
qConfig["BossAirConfig"] = config
qConfig['BossAirConfig'].section_("Agent").agentName = config.Agent.agentName
if "JobDumpConfig" not in qConfig and hasattr(config, 'JobStateMachine'):
qConfig["JobDumpConfig"] = config
if "CacheDir" not in qConfig and getattr(config.WorkQueueManager, 'componentDir', None):
qConfig['CacheDir'] = os.path.join(config.WorkQueueManager.componentDir, 'cache')
if 'Team' not in qConfig and hasattr(config.Agent, 'teamName'):
qConfig['Team'] = config.Agent.teamName
if 'logger' not in qConfig:
import threading
myThread = threading.currentThread()
if not hasattr(myThread, 'logger'):
loggingLevelName = getattr(wqManager, 'logLevel', 'INFO')
logging.basicConfig(format='%(asctime)-15s %(levelname)-8s %(module)s: %(message)s',
level=getattr(logging, loggingLevelName))
myThread.logger = logging.getLogger('workqueue')
qConfig['logger'] = myThread.logger
# ReqMgr params
if not hasattr(wqManager, 'reqMgrConfig'):
wqManager.reqMgrConfig = {}
return config | null |
a | import re
def _prefer_non_zero(*args):
for arg in args:
if arg != 0:
return arg
return 0.0
def _ntos(n):
# %f likes to add unnecessary 0's, %g isn't consistent about # decimals
return ("%.3f" % n).rstrip("0").rstrip(".")
def _strip_xml_ns(tag):
# ElementTree API doesn't provide a way to ignore XML namespaces in tags
# so we here strip them ourselves: cf. https://bugs.python.org/issue18304
return tag.split("}", 1)[1] if "}" in tag else tag
def _transform(raw_value):
# TODO assumes a 'matrix' transform.
# No other transform functions are supported at the moment.
# https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/transform
# start simple: if you aren't exactly matrix(...) then no love
match = re.match(r"matrix\((.*)\)", raw_value)
if not match:
raise NotImplementedError
matrix = tuple(float(p) for p in re.split(r"\s+|,", match.group(1)))
if len(matrix) != 6:
raise ValueError("wrong # of terms in %s" % raw_value)
return matrix
class PathBuilder(object):
def __init__(self):
self.paths = []
self.transforms = []
def _start_path(self, initial_path=""):
self.paths.append(initial_path)
self.transforms.append(None)
def _end_path(self):
self._add("z")
def _add(self, path_snippet):
path = self.paths[-1]
if path:
path += " " + path_snippet
else:
path = path_snippet
self.paths[-1] = path
def _move(self, c, x, y):
self._add("%s%s,%s" % (c, _ntos(x), _ntos(y)))
def M(self, x, y):
self._move("M", x, y)
def m(self, x, y):
self._move("m", x, y)
def _arc(self, c, rx, ry, x, y, large_arc):
self._add(
"%s%s,%s 0 %d 1 %s,%s"
% (c, _ntos(rx), _ntos(ry), large_arc, _ntos(x), _ntos(y))
)
def METHOD_NAME(self, rx, ry, x, y, large_arc=0):
self._arc("A", rx, ry, x, y, large_arc)
def a(self, rx, ry, x, y, large_arc=0):
self._arc("a", rx, ry, x, y, large_arc)
def _vhline(self, c, x):
self._add("%s%s" % (c, _ntos(x)))
def H(self, x):
self._vhline("H", x)
def h(self, x):
self._vhline("h", x)
def V(self, y):
self._vhline("V", y)
def v(self, y):
self._vhline("v", y)
def _line(self, c, x, y):
self._add("%s%s,%s" % (c, _ntos(x), _ntos(y)))
def L(self, x, y):
self._line("L", x, y)
def l(self, x, y):
self._line("l", x, y)
def _parse_line(self, line):
x1 = float(line.attrib.get("x1", 0))
y1 = float(line.attrib.get("y1", 0))
x2 = float(line.attrib.get("x2", 0))
y2 = float(line.attrib.get("y2", 0))
self._start_path()
self.M(x1, y1)
self.L(x2, y2)
def _parse_rect(self, rect):
x = float(rect.attrib.get("x", 0))
y = float(rect.attrib.get("y", 0))
w = float(rect.attrib.get("width"))
h = float(rect.attrib.get("height"))
rx = float(rect.attrib.get("rx", 0))
ry = float(rect.attrib.get("ry", 0))
rx = _prefer_non_zero(rx, ry)
ry = _prefer_non_zero(ry, rx)
# TODO there are more rules for adjusting rx, ry
self._start_path()
self.M(x + rx, y)
self.H(x + w - rx)
if rx > 0:
self.METHOD_NAME(rx, ry, x + w, y + ry)
self.V(y + h - ry)
if rx > 0:
self.METHOD_NAME(rx, ry, x + w - rx, y + h)
self.H(x + rx)
if rx > 0:
self.METHOD_NAME(rx, ry, x, y + h - ry)
self.V(y + ry)
if rx > 0:
self.METHOD_NAME(rx, ry, x + rx, y)
self._end_path()
def _parse_path(self, path):
if "d" in path.attrib:
self._start_path(initial_path=path.attrib["d"])
def _parse_polygon(self, poly):
if "points" in poly.attrib:
self._start_path("M" + poly.attrib["points"])
self._end_path()
def _parse_polyline(self, poly):
if "points" in poly.attrib:
self._start_path("M" + poly.attrib["points"])
def _parse_circle(self, circle):
cx = float(circle.attrib.get("cx", 0))
cy = float(circle.attrib.get("cy", 0))
r = float(circle.attrib.get("r"))
# arc doesn't seem to like being a complete shape, draw two halves
self._start_path()
self.M(cx - r, cy)
self.METHOD_NAME(r, r, cx + r, cy, large_arc=1)
self.METHOD_NAME(r, r, cx - r, cy, large_arc=1)
def _parse_ellipse(self, ellipse):
cx = float(ellipse.attrib.get("cx", 0))
cy = float(ellipse.attrib.get("cy", 0))
rx = float(ellipse.attrib.get("rx"))
ry = float(ellipse.attrib.get("ry"))
# arc doesn't seem to like being a complete shape, draw two halves
self._start_path()
self.M(cx - rx, cy)
self.METHOD_NAME(rx, ry, cx + rx, cy, large_arc=1)
self.METHOD_NAME(rx, ry, cx - rx, cy, large_arc=1)
def add_path_from_element(self, el):
tag = _strip_xml_ns(el.tag)
parse_fn = getattr(self, "_parse_%s" % tag.lower(), None)
if not callable(parse_fn):
return False
parse_fn(el)
if "transform" in el.attrib:
self.transforms[-1] = _transform(el.attrib["transform"])
return True | null |
reindent | #! /usr/bin/env python
"""
This script generates ReST files based on the contents of the GPI.
These files are then parsed by Sphinx to create the GPI documentation.
"""
from __future__ import print_function, absolute_import
import sys
import os
import inspect
from itertools import zip_longest
import shutil
doc_dir = os.path.dirname(os.path.realpath(__file__))
python_dir = doc_dir + '/../ganga'
sys.path.insert(0, os.path.abspath(os.path.join(doc_dir, '..', 'ganga')))
from GangaCore.GPIDev.Base.Proxy import GPIProxyObject, stripProxy, getName
print('Generating GPI documentation')
## LOADING GANGA ##
import GangaCore.PACKAGE
GangaCore.PACKAGE.standardSetup()
import GangaCore.Runtime
gangadir = os.path.expandvars('$HOME/gangadir_sphinx_dummy')
this_argv = [
'ganga', # `argv[0]` is usually the name of the program so fake that here
'-o[Configuration]RUNTIME_PATH=GangaCore',
'-o[Configuration]gangadir={gangadir}'.format(gangadir=gangadir),
]
# Actually parse the options
GangaCore.Runtime._prog = GangaCore.Runtime.GangaProgram(argv=this_argv)
GangaCore.Runtime._prog.parseOptions()
# Perform the configuration and bootstrap steps in ganga
GangaCore.Runtime._prog.configure()
GangaCore.Runtime._prog.initEnvironment()
GangaCore.Runtime._prog.bootstrap(interactive=False)
## FINISHED LOADING GANGA ##
def indent(s, depth=''):
# type: (str, str) -> str
"""
Adds `indent` to the beginning of every line
"""
return '\n'.join(depth+l for l in s.splitlines())
def METHOD_NAME(docstring, depth=''):
# type: (str, str) -> str
"""
Returns ``docstring`` trimmed and then each line prepended with ``depth``
"""
return indent(inspect.cleandoc(docstring), depth)
def signature(func, name=None):
"""
Args:
func: a function object
name: an optional name for the function in case the function has been aliased
Returns: a string representing its signature as would be written in code
"""
args, varargs, varkw, defaults = inspect.getargspec(func)
defaults = defaults or [] # If there are no defaults, set it to an empty list
defaults = [repr(d) for d in defaults] # Type to get a useful string representing the default
# Based on a signature like foo(a, b, c=None, d=4, *args, **kwargs)
# we get args=['a', 'b', 'c', 'd'] and defaults=['None', '4']
# We must match them backwards from the end and pad the beginning with None
# to get arg_pairs=[('a', None), ('b', None), ('c', 'None'), ('d', '4')]
arg_pairs = reversed([(a, d) for a, d in zip_longest(reversed(args), reversed(defaults), fillvalue=None)])
# Based on arg_pairs we convert it into
# arg_strings=['a', 'b', 'a=None', 'd=4']
arg_strings = []
for arg, default in arg_pairs:
full_arg = arg
if default is not None:
full_arg += '='+default
arg_strings.append(full_arg)
# and append args and kwargs if necessary to get
# arg_strings=['a', 'b', 'a=None', 'd=4', '*args', '**kwargs']
if varargs is not None:
arg_strings.append('*'+varargs)
if varkw is not None:
arg_strings.append('**'+varkw)
# Signature is then 'foo(a, b, c=None, d=4, *args, **kwargs)'
return '{name}({args})'.format(name=name or func.__name__, args=', '.join(arg_strings))
# First we get all objects that are in Ganga.GPI and filter out any non-GangaObjects
gpi_classes = [stripProxy(o) for name, o in GangaCore.GPI.__dict__.items() if isinstance(o, type) and issubclass(o, GPIProxyObject)]
with open(doc_dir+'/GPI/classes.rst', 'w') as cf:
print('GPI classes', file=cf)
print('===========', file=cf)
print('', file=cf)
# For each class we generate a set of ReST using '.. class::' etc.
for c in gpi_classes:
print('.. class:: {module_name}'.format(module_name=getName(c)), file=cf)
if c.__doc__:
print('', file=cf)
print(METHOD_NAME(c.__doc__, ' '), file=cf)
print('', file=cf)
print(METHOD_NAME('Plugin category: ' + c._category, ' '), file=cf)
print('', file=cf)
# Go through each schema item and document it if it's not hidden
visible_items = ((name, item) for name, item in c._schema.allItems() if not item['hidden'])
for name, item in visible_items:
# These are the only ones we want to show in the docs
properties_we_care_about = ['protected', 'defvalue', 'changable_at_resubmit']
props = dict((k, v) for k, v in item._meta.items() if k in properties_we_care_about)
print(' .. attribute:: {name}'.format(name=name), file=cf)
print('', file=cf)
print(METHOD_NAME(item['doc'], depth=' '), file=cf)
print(METHOD_NAME(str(props), depth=' '), file=cf)
print('', file=cf)
# Add documentation for each exported method
for method_name in c._exportmethods:
try:
f = getattr(c, method_name).__func__
except AttributeError as e:
# Some classes have erroneous export lists so we catch them here and print a warning
print('WARNING on class', getName(c), ':', end=' ', file=sys.stderr)
print(e, file=sys.stderr)
continue
print(' .. method:: {signature}'.format(signature=signature(f)), file=cf)
if f.__doc__:
print('', file=cf)
print(METHOD_NAME(f.__doc__, ' '), file=cf)
print('', file=cf)
print('', file=cf)
print('', file=cf)
# Looking through the plugin list helps categorise the GPI objects
with open(doc_dir+'/GPI/plugins.rst', 'w') as pf:
from GangaCore.Utility.Plugin.GangaPlugin import allPlugins
print('Plugins', file=pf)
print('=======', file=pf)
print('', file=pf)
for c, ps in allPlugins.allCategories().items():
print(c, file=pf)
print('-'*len(c), file=pf)
print('', file=pf)
for name, c in ps.items():
if c._declared_property('hidden'):
continue
print('* :class:`{name}`'.format(name=name), file=pf)
print('', file=pf)
print('')
# All objects that are not proxied GangaObjects
gpi_objects = dict((name, stripProxy(o)) for name, o in GangaCore.GPI.__dict__.items() if stripProxy(o) not in gpi_classes and not name.startswith('__'))
# Any objects which are not exposed as proxies (mostly exceptions)
non_proxy_classes = dict((k, v) for k, v in gpi_objects.items() if inspect.isclass(v))
# Anything which is callable
callables = dict((k, v) for k, v in gpi_objects.items() if callable(v) and v not in non_proxy_classes.values())
# Things which were declared as actual functions
functions = dict((k, v) for k, v in callables.items() if inspect.isfunction(v) or inspect.ismethod(v))
with open(doc_dir+'/GPI/functions.rst', 'w') as ff:
print('Functions', file=ff)
print('=========', file=ff)
print('', file=ff)
for name, func in functions.items():
print('.. function:: {signature}'.format(signature=signature(func, name)), file=ff)
print('', file=ff)
print(METHOD_NAME(func.__doc__ or '', ' '), file=ff)
print('', file=ff)
## EXITING GANGA ##
from GangaCore.Core.InternalServices import ShutdownManager
# make sure we don't have an interactive shutdown policy
from GangaCore.Core.GangaThread import GangaThreadPool
GangaThreadPool.shutdown_policy = 'batch'
# This should now be safe
ShutdownManager._protected_ganga_exitfuncs()
shutil.rmtree(gangadir, ignore_errors=True)
## FINISHED EXITING GANGA ## | null |
setup | # Copyright (c) 2008, Thomas Hurst <[email protected]>
#
# Use of this file is unrestricted provided this notice is retained.
# If you use it, it'd be nice if you dropped me a note. Also beer.
from terminatorlib.util import dbg, err
from terminatorlib.version import APP_NAME, APP_VERSION
import socket
import threading
import sys
if sys.version_info < (3, 0):
import SocketServer as socketserver
else:
import socketserver
import code
import readline
import rlcompleter
import re
def ddbg(msg):
# uncomment this to get lots of spam from debugserver
return
dbg(msg)
class PythonConsoleServer(socketserver.BaseRequestHandler):
env = None
def METHOD_NAME(self):
dbg('debugserver: connect from %s' % str(self.client_address))
ddbg('debugserver: env=%r' % PythonConsoleServer.env)
self.console = TerminatorConsole(PythonConsoleServer.env)
def handle(self):
ddbg("debugserver: handling")
try:
self.socketio = self.request.makefile(mode='rw')
sys.stdout = self.socketio
sys.stdin = self.socketio
sys.stderr = self.socketio
self.console.run(self)
finally:
sys.stdout = sys.__stdout__
sys.stdin = sys.__stdin__
sys.stderr = sys.__stderr__
self.socketio.close()
ddbg("debugserver: done handling")
def verify_request(self, request, client_address):
return True
def finish(self):
ddbg('debugserver: disconnect from %s' % str(self.client_address))
# rfc1116/rfc1184
LINEMODE = chr(34) # Linemode negotiation
NULL = chr(0)
ECHO = chr(1)
CR = chr(13)
LF = chr(10)
SE = chr(240) # End subnegotiation
NOP = chr(241)
DM = chr(242) # Data Mark
BRK = chr(243) # Break
IP = chr(244) # Interrupt Process
AO = chr(245) # Abort Output
AYT = chr(246) # Are You There
EC = chr(247) # Erase Character
EL = chr(248) # Erase Line
GA = chr(249) # Go Ahead
SB = chr(250) # Subnegotiation follows
WILL = chr(251) # Subnegotiation commands
WONT = chr(252)
DO = chr(253)
DONT = chr(254)
IAC = chr(255) # Interpret As Command
UIAC = '(^|[^' + IAC + '])' + IAC # Unescaped IAC
BareLF = re.compile('([^' + CR + '])' + CR)
DoDont = re.compile(UIAC +'[' + DO + DONT + '](.)')
WillWont = re.compile(UIAC + '[' + WILL + WONT + '](.)')
AreYouThere = re.compile(UIAC + AYT)
IpTelnet = re.compile(UIAC + IP)
OtherTelnet = re.compile(UIAC + '[^' + IAC + ']')
# See http://blade.nagaokaut.ac.jp/cgi-bin/scat.rb/ruby/ruby-talk/205335 for telnet bits
# Python doesn't make this an especially neat conversion :(
class TerminatorConsole(code.InteractiveConsole):
def parse_telnet(self, data):
odata = data
data = re.sub(BareLF, '\\1', data)
data = data.replace(CR + NULL, '')
data = data.replace(NULL, '')
bits = re.findall(DoDont, data)
ddbg("bits = %r" % bits)
if bits:
data = re.sub(DoDont, '\\1', data)
ddbg("telnet: DO/DON'T answer")
# answer DO and DON'T with WON'T
for bit in bits:
self.write(IAC + WONT + bit[1])
bits = re.findall(WillWont, data)
if bits:
data = re.sub(WillWont, '\\1', data)
ddbg("telnet: WILL/WON'T answer")
for bit in bits:
# answer WILLs and WON'T with DON'Ts
self.write(IAC + DONT + bit[1])
bits = re.findall(AreYouThere, data)
if bits:
ddbg("telnet: am I there answer")
data = re.sub(AreYouThere, '\\1', data)
for bit in bits:
self.write("Yes, I'm still here, I think.\n")
(data, interrupts) = re.subn(IpTelnet, '\\1', data)
if interrupts:
ddbg("debugserver: Ctrl-C detected")
raise KeyboardInterrupt
data = re.sub(OtherTelnet, '\\1', data) # and any other Telnet codes
data = data.replace(IAC + IAC, IAC) # and handle escapes
if data != odata:
ddbg("debugserver: Replaced %r with %r" % (odata, data))
return data
def raw_input(self, prompt = None):
ddbg("debugserver: raw_input prompt = %r" % prompt)
if prompt:
self.write(prompt)
buf = ''
compstate = 0
while True:
data = self.server.socketio.read(1)
ddbg('raw_input: char=%r' % data)
if data == LF or data == '\006':
buf = self.parse_telnet(buf + data)
if buf != '':
return buf
elif data == '\004' or data == '': # ^D
raise EOFError
else:
buf += data
def write(self, data):
ddbg("debugserver: write %r" % data)
self.server.socketio.write(data)
self.server.socketio.flush()
def run(self, server):
self.server = server
self.write("Welcome to the %s-%s debug server, have a nice stay\n" % (APP_NAME, APP_VERSION))
self.interact()
try:
self.write("Time to go. Bye!\n")
except:
pass
def spawn(env):
PythonConsoleServer.env = env
tcpserver = socketserver.TCPServer(('127.0.0.1', 0), PythonConsoleServer)
dbg("debugserver: listening on %s" % str(tcpserver.server_address))
debugserver = threading.Thread(target=tcpserver.serve_forever, name="DebugServer")
debugserver.setDaemon(True)
debugserver.start()
return(debugserver, tcpserver)
| null |
on get item image | from __future__ import absolute_import, division, print_function
import wxtbx.bitmaps
from libtbx.queuing_system_utils import sge_utils
from libtbx.utils import Sorry
import wx
try :
from wx.lib.agw.genericmessagedialog import GenericMessageDialog
except ImportError :
GenericMessageDialog = wx.MessageBox
import sys, time
import getpass
user = getpass.getuser()
job_attrs = ["job_id", "state", "name", "user", "submit", "queue"]
job_labels = ["Job ID", "Status", "Name", "User", "Start time", "Queue"]
status_codes = ["d", "E", "h", "r", "R", "s", "S", "t", "T", "w"]
status_imgs = [3, 4, 0, 1, 0, 0, 0, 0, 0, 2]
col_sizes = [wx.LIST_AUTOSIZE] * 4 + [200,200]
class qsub_list_data(object):
def __init__(self):
self._data = []
self._sortby = None
self._sort_descending = False
def Refresh(self):
self._data = sge_utils.qstat_parse()
if self._sortby is not None :
self.SortItems(self._sortby, swap_order=False)
def GetItemCount(self):
return len(self._data)
def GetItemText(self, item, col):
return getattr(self._data[item], job_attrs[col])
def GetItemImage(self, item):
status = self._data[item].state[-1]
img_id = status_imgs[status_codes.index(status)]
return img_id
def SortItems(self, col, swap_order=True):
if swap_order :
if self._sortby == col :
self._sort_descending = (not self._sort_descending)
else :
self._sort_descending = False
if col == 0 :
self._data.sort(key=lambda element: int(element.job_id))
elif col == 4 :
fmt = "%m/%d/%Y %H:%M:%S"
self._data.sort(key=lambda element: time.strptime(element.submit, fmt))
else :
attr = job_attrs[col]
self._data.sort(key=lambda element: getattr(element, attr))
if self._sort_descending :
self._data.reverse()
self._sortby = col
def GetOwners(self, job_ids, as_list=False):
names = []
for job in self._data :
if job.job_id in job_ids :
names.append(job.user)
if as_list :
return names
return list(set(names))
def GetNames(self, job_ids):
names = []
for job in self._data :
if job.job_id in job_ids :
names.append(job.name)
return names
class qsub_list_view(wx.ListCtrl):
def __init__(self, *args, **kwds):
wx.ListCtrl.__init__(self, *args, **kwds)
self.SetupImages()
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnSelect, self)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.OnDeSelect, self)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnDoubleClick, self)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightClick, self)
self.Bind(wx.EVT_LIST_COL_CLICK, self.OnSort, self)
for i, label in enumerate(job_labels):
self.InsertColumn(i, label)
self.SetColumnWidth(i, col_sizes[i]) #wx.LIST_AUTOSIZE)
self.dataSource = qsub_list_data()
self.RefreshAllItems()
def SetupImages(self):
if wxtbx.bitmaps.icon_lib is None :
return
il = wx.ImageList(16, 16, True)
#il.Add(wx.EmptyBitmap(16,16)) #EmptyImage(16,16).ConvertToBitmap())
for icon in ["blank", "run", "recur", "stop", "status_unknown"] :
bmp = wxtbx.bitmaps.fetch_icon_bitmap("actions", icon, 16)
il.Add(bmp)
self.AssignImageList(il, wx.IMAGE_LIST_SMALL)
def METHOD_NAME(self, item):
return self.dataSource.GetItemImage(item)
def OnGetItemAttr(self, item):
pass
def OnGetItemText(self, item, col):
return self.dataSource.GetItemText(item, col)
def RefreshAllItems(self):
n_items = self.dataSource.GetItemCount()
self.SetItemCount(n_items)
if (n_items > 0):
self.RefreshItems(0, n_items - 1)
def GetSelectedJobIDs(self):
jobs = []
item = self.GetFirstSelected()
while item != -1 :
jobs.append(self.dataSource.GetItemText(item, 0))
item = self.GetNextSelected(item)
return jobs
def GetOwners(self, job_ids, as_list=False):
return self.dataSource.GetOwners(job_ids, as_list=as_list)
def GetNames(self, job_ids):
return self.dataSource.GetNames(job_ids)
def OnSelect(self, event):
pass
def OnDeSelect(self, event):
pass
def OnDoubleClick(self, event):
pass
def OnRightClick(self, event):
pass
def OnSort(self, event):
col = event.GetColumn()
self.dataSource.SortItems(col)
self.RefreshAllItems()
def Update(self):
self.dataSource.Refresh()
self.RefreshAllItems()
class queue_list_frame(wx.Frame):
def __init__(self, *args, **kwds):
wx.Frame.__init__(self, *args, **kwds)
self.statusbar = self.CreateStatusBar()
self.SetupToolbar()
self.list_ctrl = qsub_list_view(parent=self,
id=-1,
size=(800,600),
style=wx.LC_REPORT|wx.LC_VIRTUAL)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy)
self.Update()
self._timer = wx.Timer(owner=self)
self.Bind(wx.EVT_TIMER, self.OnUpdate)
self.statusbar.Bind(wx.EVT_LEFT_DCLICK, self.OnUpdate)
self._timer.Start(10000)
def SetupToolbar(self):
if wxtbx.bitmaps.icon_lib is None :
return
self.toolbar = self.CreateToolBar(style=wx.TB_TEXT)
commands = [
("actions","reload", "OnUpdate", "Refresh list"),
("actions","editdelete", "OnDelete", "Delete selected"),
]
for (icon_class, icon_name, fname, label) in commands :
bmp = wxtbx.bitmaps.fetch_icon_bitmap(icon_class, icon_name, 32)
tool_button = self.toolbar.AddLabelTool(-1, label, bmp,
shortHelp=label, kind=wx.ITEM_NORMAL)
self.Bind(wx.EVT_MENU, getattr(self, fname), tool_button)
self.toolbar.Realize()
def OnClose(self, event):
self.Destroy()
def OnDestroy(self, event):
pass
def OnUpdate(self, event):
self.Update()
def OnDelete(self, event):
job_ids = self.list_ctrl.GetSelectedJobIDs()
if len(job_ids) == 0 :
return
users = self.list_ctrl.GetOwners(job_ids)
if (len(users) > 1) or (not user in users):
raise Sorry("At least one job selected for deletion is owned by a "
"different user; this interface only allows you to delete your own "+
"jobs.")
if self.ConfirmDelete(job_ids):
try :
success = sge_utils.qdel(job_ids=job_ids)
except RuntimeError as e :
raise Sorry("Error executing 'qdel' command: %s" % str(e))
else :
GenericMessageDialog("Job(s) deleted successfuly.", style=wx.OK)
def SetUpdateInterval(self, interval) : # in seconds, not ms
self._timer.Stop()
self._timer.Start(interval * 1000)
def Update(self):
self.list_ctrl.Update()
self.statusbar.SetStatusText("Last updated at %s" % get_time())
def ConfirmDelete(self, job_ids):
pass
def get_time():
return time.strftime("%m-%d-%y %H:%M:%S", time.localtime())
#-----------------------------------------------------------------------
def run(args):
app = wx.App(0)
frame = queue_list_frame(None, -1, "SGE Queue Status")
frame.Show()
frame.Fit()
app.MainLoop()
if __name__ == "__main__" :
run(sys.argv[1:]) | null |
find replacement | # Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
from dataclasses import dataclass
from itertools import chain
from typing import Any
from pants.backend.docker.goals.package_image import DockerPackageFieldSet
from pants.backend.docker.subsystems import dockerfile_parser
from pants.backend.docker.subsystems.docker_options import DockerOptions
from pants.backend.docker.target_types import DockerImageTags, DockerImageTagsRequest
from pants.backend.docker.util_rules import (
docker_binary,
docker_build_args,
docker_build_context,
docker_build_env,
dockerfile,
)
from pants.backend.docker.util_rules.docker_build_context import (
DockerBuildContext,
DockerBuildContextRequest,
)
from pants.backend.helm.dependency_inference.deployment import (
FirstPartyHelmDeploymentMapping,
FirstPartyHelmDeploymentMappingRequest,
)
from pants.backend.helm.subsystems import post_renderer
from pants.backend.helm.subsystems.post_renderer import SetupHelmPostRenderer
from pants.backend.helm.target_types import HelmDeploymentFieldSet
from pants.engine.addresses import Address, Addresses
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import Targets, WrappedTarget, WrappedTargetRequest
from pants.engine.unions import UnionMembership
from pants.util.logging import LogLevel
from pants.util.strutil import bullet_list, softwrap
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class HelmDeploymentPostRendererRequest(EngineAwareParameter):
field_set: HelmDeploymentFieldSet
def debug_hint(self) -> str | None:
return self.field_set.address.spec
def metadata(self) -> dict[str, Any] | None:
return {"address": self.field_set.address.spec}
async def _obtain_custom_image_tags(
address: Address, union_membership: UnionMembership
) -> DockerImageTags:
wrapped_target = await Get(
WrappedTarget, WrappedTargetRequest(address, description_of_origin="<infallible>")
)
image_tags_requests = union_membership.get(DockerImageTagsRequest)
found_image_tags = await MultiGet(
Get(DockerImageTags, DockerImageTagsRequest, image_tags_request_cls(wrapped_target.target))
for image_tags_request_cls in image_tags_requests
if image_tags_request_cls.is_applicable(wrapped_target.target)
)
return DockerImageTags(chain.from_iterable(found_image_tags))
@rule(desc="Prepare Helm deployment post-renderer", level=LogLevel.DEBUG)
async def prepare_post_renderer_for_helm_deployment(
request: HelmDeploymentPostRendererRequest,
union_membership: UnionMembership,
docker_options: DockerOptions,
) -> SetupHelmPostRenderer:
mapping = await Get(
FirstPartyHelmDeploymentMapping, FirstPartyHelmDeploymentMappingRequest(request.field_set)
)
docker_addresses = [addr for _, addr in mapping.indexed_docker_addresses.values()]
logger.debug(
softwrap(
f"""
Resolving Docker image references for targets:
{bullet_list([addr.spec for addr in docker_addresses])}
"""
)
)
docker_contexts = await MultiGet(
Get(
DockerBuildContext,
DockerBuildContextRequest(
address=addr,
build_upstream_images=False,
),
)
for addr in docker_addresses
)
docker_targets = await Get(Targets, Addresses(docker_addresses))
field_sets = [DockerPackageFieldSet.create(tgt) for tgt in docker_targets]
async def resolve_docker_image_ref(address: Address, context: DockerBuildContext) -> str | None:
docker_field_sets = [fs for fs in field_sets if fs.address == address]
if not docker_field_sets:
return None
additional_image_tags = await _obtain_custom_image_tags(address, union_membership)
docker_field_set = docker_field_sets[0]
image_refs = docker_field_set.image_refs(
default_repository=docker_options.default_repository,
registries=docker_options.registries(),
interpolation_context=context.interpolation_context,
additional_tags=tuple(additional_image_tags),
)
# Choose first non-latest image reference found, or fallback to 'latest'.
found_ref: str | None = None
fallback_ref: str | None = None
for registry in image_refs:
for tag in registry.tags:
ref = tag.full_name
if ref.endswith(":latest"):
fallback_ref = ref
else:
found_ref = ref
break
resolved_ref = found_ref or fallback_ref
if resolved_ref:
logger.debug(f"Resolved Docker image ref '{resolved_ref}' for address {address}.")
else:
logger.warning(f"Could not resolve a valid image ref for Docker target {address}.")
return resolved_ref
docker_addr_ref_mapping = {
addr: await resolve_docker_image_ref(addr, ctx)
for addr, ctx in zip(docker_addresses, docker_contexts)
}
def METHOD_NAME(value: tuple[str, Address]) -> str | None:
_, addr = value
return docker_addr_ref_mapping.get(addr)
replacements = mapping.indexed_docker_addresses.transform_values(METHOD_NAME)
return SetupHelmPostRenderer(
replacements, description_of_origin=f"the `helm_deployment` {request.field_set.address}"
)
def rules():
return [
*collect_rules(),
*docker_binary.rules(),
*docker_build_args.rules(),
*docker_build_context.rules(),
*docker_build_env.rules(),
*dockerfile.rules(),
*dockerfile_parser.rules(),
*post_renderer.rules(),
] | null |
print errors | """
Test Result
-----------
Provides a TextTestResult that extends unittest's _TextTestResult to
provide support for error classes (such as the builtin skip and
deprecated classes), and hooks for plugins to take over or extend
reporting.
"""
import logging
try:
# 2.7+
from unittest.runner import _TextTestResult
except ImportError:
from unittest import _TextTestResult
from nose.config import Config
from nose.util import isclass, ln as _ln # backwards compat
log = logging.getLogger('nose.result')
def _exception_detail(exc):
# this is what stdlib module traceback does
try:
return str(exc)
except:
return '<unprintable %s object>' % type(exc).__name__
class TextTestResult(_TextTestResult):
"""Text test result that extends unittest's default test result
support for a configurable set of errorClasses (eg, Skip,
Deprecated, TODO) that extend the errors/failures/success triad.
"""
def __init__(self, stream, descriptions, verbosity, config=None,
errorClasses=None):
if errorClasses is None:
errorClasses = {}
self.errorClasses = errorClasses
if config is None:
config = Config()
self.config = config
_TextTestResult.__init__(self, stream, descriptions, verbosity)
def addSkip(self, test, reason):
# 2.7 skip compat
from nose.plugins.skip import SkipTest
if SkipTest in self.errorClasses:
storage, label, isfail = self.errorClasses[SkipTest]
storage.append((test, reason))
self.printLabel(label, (SkipTest, reason, None))
def addError(self, test, err):
"""Overrides normal addError to add support for
errorClasses. If the exception is a registered class, the
error will be added to the list for that class, not errors.
"""
ec, ev, tb = err
try:
exc_info = self._exc_info_to_string(err, test)
except TypeError:
# 2.3 compat
exc_info = self._exc_info_to_string(err)
for cls, (storage, label, isfail) in list(self.errorClasses.items()):
#if 'Skip' in cls.__name__ or 'Skip' in ec.__name__:
# from nose.tools import set_trace
# set_trace()
if isclass(ec) and issubclass(ec, cls):
if isfail:
test.passed = False
storage.append((test, exc_info))
self.printLabel(label, err)
return
self.errors.append((test, exc_info))
test.passed = False
self.printLabel('ERROR')
# override to bypass changes in 2.7
def getDescription(self, test):
if self.descriptions:
return test.shortDescription() or str(test)
else:
return str(test)
def printLabel(self, label, err=None):
# Might get patched into a streamless result
stream = getattr(self, 'stream', None)
if stream is not None:
if self.showAll:
message = [label]
if err:
detail = _exception_detail(err[1])
if detail:
message.append(detail)
stream.writeln(": ".join(message))
elif self.dots:
stream.write(label[:1])
def METHOD_NAME(self):
"""Overrides to print all errorClasses errors as well.
"""
_TextTestResult.METHOD_NAME(self)
for cls in list(self.errorClasses.keys()):
storage, label, isfail = self.errorClasses[cls]
if isfail:
self.printErrorList(label, storage)
# Might get patched into a result with no config
if hasattr(self, 'config'):
self.config.plugins.report(self.stream)
def printSummary(self, start, stop):
"""Called by the test runner to print the final summary of test
run results.
"""
write = self.stream.write
writeln = self.stream.writeln
taken = float(stop - start)
run = self.testsRun
plural = run != 1 and "s" or ""
writeln(self.separator2)
writeln("Ran %s test%s in %.3fs" % (run, plural, taken))
writeln()
summary = {}
eckeys = list(self.errorClasses.keys())
for cls in eckeys:
storage, label, isfail = self.errorClasses[cls]
count = len(storage)
if not count:
continue
summary[label] = count
if len(self.failures):
summary['failures'] = len(self.failures)
if len(self.errors):
summary['errors'] = len(self.errors)
if not self.wasSuccessful():
write("FAILED")
else:
write("OK")
items = list(summary.items())
if items:
items.sort()
write(" (")
write(", ".join(["%s=%s" % (label, count) for
label, count in items]))
writeln(")")
else:
writeln()
def wasSuccessful(self):
"""Overrides to check that there are no errors in errorClasses
lists that are marked as errors and should cause a run to
fail.
"""
if self.errors or self.failures:
return False
for cls in list(self.errorClasses.keys()):
storage, label, isfail = self.errorClasses[cls]
if not isfail:
continue
if storage:
return False
return True
def _addError(self, test, err):
try:
exc_info = self._exc_info_to_string(err, test)
except TypeError:
# 2.3: does not take test arg
exc_info = self._exc_info_to_string(err)
self.errors.append((test, exc_info))
if self.showAll:
self.stream.write('ERROR')
elif self.dots:
self.stream.write('E')
def _exc_info_to_string(self, err, test=None):
# 2.7 skip compat
from nose.plugins.skip import SkipTest
if isclass(err[0]) and issubclass(err[0], SkipTest):
return str(err[1])
# 2.3/2.4 -- 2.4 passes test, 2.3 does not
try:
return _TextTestResult._exc_info_to_string(self, err, test)
except TypeError:
# 2.3: does not take test arg
return _TextTestResult._exc_info_to_string(self, err)
def ln(*arg, **kw):
from warnings import warn
warn("ln() has moved to nose.util from nose.result and will be removed "
"from nose.result in a future release. Please update your imports ",
DeprecationWarning)
return _ln(*arg, **kw)
| null |
tls mac add | # This file is part of Scapy
# Copyright (C) 2007, 2008, 2009 Arnaud Ebalard
# 2015, 2016, 2017 Maxence Tury
# This program is published under a GPLv2 license
"""
TLS helpers, provided as out-of-context methods.
"""
from __future__ import absolute_import
import struct
from scapy.compat import orb, chb
from scapy.error import warning
from scapy.fields import (ByteEnumField, ShortEnumField,
FieldLenField, StrLenField)
from scapy.packet import Packet
from scapy.layers.tls.basefields import _tls_type, _tls_version
class TLSPlaintext(Packet):
name = "TLS Plaintext"
fields_desc = [ByteEnumField("type", None, _tls_type),
ShortEnumField("version", None, _tls_version),
FieldLenField("len", None, length_of="data", fmt="!H"),
StrLenField("data", "",
length_from=lambda pkt: pkt.len)]
class TLSCompressed(TLSPlaintext):
name = "TLS Compressed"
class TLSCiphertext(TLSPlaintext):
name = "TLS Ciphertext"
def _tls_compress(alg, p):
"""
Compress p (a TLSPlaintext instance) using compression algorithm instance
alg and return a TLSCompressed instance.
"""
c = TLSCompressed()
c.type = p.type
c.version = p.version
c.data = alg.compress(p.data)
c.len = len(c.data)
return c
def _tls_decompress(alg, c):
"""
Decompress c (a TLSCompressed instance) using compression algorithm
instance alg and return a TLSPlaintext instance.
"""
p = TLSPlaintext()
p.type = c.type
p.version = c.version
p.data = alg.decompress(c.data)
p.len = len(p.data)
return p
def METHOD_NAME(alg, c, write_seq_num):
"""
Compute the MAC using provided MAC alg instance over TLSCiphertext c using
current write sequence number write_seq_num. Computed MAC is then appended
to c.data and c.len is updated to reflect that change. It is the
caller responsibility to increment the sequence number after the operation.
The function has no return value.
"""
write_seq_num = struct.pack("!Q", write_seq_num)
h = alg.digest(write_seq_num + bytes(c))
c.data += h
c.len += alg.hash_len
def _tls_mac_verify(alg, p, read_seq_num):
"""
Verify if the MAC in provided message (message resulting from decryption
and padding removal) is valid. Current read sequence number is used in
the verification process.
If the MAC is valid:
- The function returns True
- The packet p is updated in the following way: trailing MAC value is
removed from p.data and length is updated accordingly.
In case of error, False is returned, and p may have been modified.
Also note that it is the caller's responsibility to update the read
sequence number after the operation.
"""
h_size = alg.hash_len
if p.len < h_size:
return False
received_h = p.data[-h_size:]
p.len -= h_size
p.data = p.data[:-h_size]
read_seq_num = struct.pack("!Q", read_seq_num)
h = alg.digest(read_seq_num + bytes(p))
return h == received_h
def _tls_add_pad(p, block_size):
"""
Provided with cipher block size parameter and current TLSCompressed packet
p (after MAC addition), the function adds required, deterministic padding
to p.data before encryption step, as it is defined for TLS (i.e. not
SSL and its allowed random padding). The function has no return value.
"""
padlen = -p.len % block_size
padding = chb(padlen) * (padlen + 1)
p.len += len(padding)
p.data += padding
def _tls_del_pad(p):
"""
Provided with a just decrypted TLSCiphertext (now a TLSPlaintext instance)
p, the function removes the trailing padding found in p.data. It also
performs some sanity checks on the padding (length, content, ...). False
is returned if one of the check fails. Otherwise, True is returned,
indicating that p.data and p.len have been updated.
"""
if p.len < 1:
warning("Message format is invalid (padding)")
return False
padlen = orb(p.data[-1])
padsize = padlen + 1
if p.len < padsize:
warning("Invalid padding length")
return False
if p.data[-padsize:] != chb(padlen) * padsize:
warning("Padding content is invalid %s", repr(p.data[-padsize:]))
return False
p.data = p.data[:-padsize]
p.len -= padsize
return True
def _tls_encrypt(alg, p):
"""
Provided with an already MACed TLSCompressed packet, and a stream or block
cipher alg, the function converts it into a TLSCiphertext (i.e. encrypts it
and updates length). The function returns a newly created TLSCiphertext
instance.
"""
c = TLSCiphertext()
c.type = p.type
c.version = p.version
c.data = alg.encrypt(p.data)
c.len = len(c.data)
return c
def _tls_decrypt(alg, c):
"""
Provided with a TLSCiphertext instance c, and a stream or block cipher alg,
the function decrypts c.data and returns a newly created TLSPlaintext.
"""
p = TLSPlaintext()
p.type = c.type
p.version = c.version
p.data = alg.decrypt(c.data)
p.len = len(p.data)
return p
def _tls_aead_auth_encrypt(alg, p, write_seq_num):
"""
Provided with a TLSCompressed instance p, the function applies AEAD
cipher alg to p.data and builds a new TLSCiphertext instance. Unlike
for block and stream ciphers, for which the authentication step is done
separately, AEAD alg does it simultaneously: this is the reason why
write_seq_num is passed to the function, to be incorporated in
authenticated data. Note that it is the caller's responsibility to increment # noqa: E501
write_seq_num afterwards.
"""
P = bytes(p)
write_seq_num = struct.pack("!Q", write_seq_num)
A = write_seq_num + P[:5]
c = TLSCiphertext()
c.type = p.type
c.version = p.version
c.data = alg.auth_encrypt(P, A, write_seq_num)
c.len = len(c.data)
return c
def _tls_aead_auth_decrypt(alg, c, read_seq_num):
"""
Provided with a TLSCiphertext instance c, the function applies AEAD
cipher alg auth_decrypt function to c.data (and additional data)
in order to authenticate the data and decrypt c.data. When those
steps succeed, the result is a newly created TLSCompressed instance.
On error, None is returned. Note that it is the caller's responsibility to
increment read_seq_num afterwards.
"""
# 'Deduce' TLSCompressed length from TLSCiphertext length
# There is actually no guaranty of this equality, but this is defined as
# such in TLS 1.2 specifications, and it works for GCM and CCM at least.
#
plen = c.len - getattr(alg, "nonce_explicit_len", 0) - alg.tag_len
read_seq_num = struct.pack("!Q", read_seq_num)
A = read_seq_num + struct.pack('!BHH', c.type, c.version, plen)
p = TLSCompressed()
p.type = c.type
p.version = c.version
p.len = plen
p.data = alg.auth_decrypt(A, c.data, read_seq_num)
if p.data is None: # Verification failed.
return None
return p | null |
load service with agent image | import datetime
import os
import kubernetes.client
import kubernetes.config
import kubernetes.dynamic
import pytest
from junit_report import JunitTestSuite
from service_client import log
from tests.base_test import BaseTest
class TestUpgradeAgent(BaseTest):
@pytest.fixture(scope="session")
def namespace(self) -> str:
return os.environ.get("NAMESPACE", "assisted-isntaller")
@pytest.fixture(scope="session")
def k8s_client(self) -> kubernetes.dynamic.DynamicClient:
return kubernetes.dynamic.DynamicClient(
kubernetes.client.api_client.ApiClient(
configuration=kubernetes.config.load_kube_config(),
),
)
@classmethod
def _get_other_agent_image(cls) -> str:
"""Returns the reference to the other agent image."""
return os.environ.get("OTHER_AGENT_IMAGE", "quay.io/edge-infrastructure/assisted-installer-agent:v2.20.1")
@classmethod
def _get_current_agent_image(
cls,
client: kubernetes.dynamic.DynamicClient,
namespace: str,
) -> str:
"""Returns the agent image the is currently used by the service."""
configmaps_api = client.resources.get(api_version="v1", kind="ConfigMap")
configmap = configmaps_api.get(namespace=namespace, name="assisted-service-config")
return configmap.data["AGENT_DOCKER_IMAGE"]
@classmethod
def METHOD_NAME(
cls,
client: kubernetes.dynamic.DynamicClient,
namespace: str,
image: str,
) -> None:
"""
Checks if the service is already using the given agent image. If it isn't using it then it changes the configmap
to use it and restarts the deployment.
"""
# Check if the service is already using the given agent image:
current = cls._get_current_agent_image(client, namespace)
if current == image:
log.info(f"Service is already using agent image '{image}'")
return
# Update the configuration:
configmaps_api = client.resources.get(api_version="v1", kind="ConfigMap")
configmaps_api.patch(
namespace=namespace,
name="assisted-service-config",
body={
"data": {
"AGENT_DOCKER_IMAGE": image,
},
},
)
log.info(f"Updated configuration with agent image '{image}'")
# Restart the deployment:
deployments_api = client.resources.get(api_version="apps/v1", kind="Deployment")
date = datetime.datetime.now(datetime.timezone.utc).isoformat()
deployments_api.patch(
namespace=namespace,
name="assisted-service",
body={
"spec": {
"template": {
"metadata": {
"annotations": {
"kubectl.kubernetes.io/restartedAt": date,
},
},
},
},
},
)
log.info(f"Restarted deployment with agent image '{image}'")
@JunitTestSuite()
def test_upgrade_agent(self, cluster, namespace, k8s_client):
"""
This test prepares the cluster with an image different to the current one. Once it is ready to install it
restarts the current image and wait till all the hosts have been upgraded to use it.
"""
assert (current_image := self._get_current_agent_image(k8s_client, namespace))
assert (other_image := self._get_other_agent_image())
log.info(f"Other agent image is '{other_image}'")
log.info(f"Current agent image is '{current_image}'")
try:
# Prepare the cluster for installation using the other image:
log.info("Waiting for cluster to be ready to install with agent image '%s'", other_image)
self.METHOD_NAME(k8s_client, namespace, other_image)
cluster.prepare_for_installation()
# Restart the service with the current agent image and wait till all host are using it:
log.info("Waiting for hosts to use agent image '%s'", current_image)
self.METHOD_NAME(k8s_client, namespace, current_image)
cluster.wait_until_hosts_use_agent_image(current_image)
finally:
self.METHOD_NAME(k8s_client, namespace, current_image) | null |
remove file ext | from __future__ import print_function, division, absolute_import
import os
import re
try:
import tesedml as libsedml
except ImportError:
import libsedml
#Only load this class if phrasedml exists
try:
import phrasedml
class phrasedmlImporter(object):
def __init__(self, sbml_map={}):
""" Constructor. """
self.sedml_str = None
self.sedml_path = None
self.sbml_map = sbml_map
@classmethod
def fromContent(cls, sedml_str, sbml_map={}):
# FIXME: bad hack for https://github.com/fbergmann/libSEDML/issues/47
# test for JWS quirks
if 'xmlns="http://sed-ml.org/sed-ml/level1/version3"' in sedml_str:
# import xml.etree.ElementTree as ElementTree
# root = ElementTree.fromstring(sedml_str)
# for p in root.findall('{http://sed-ml.org/sed-ml/level1/version3}plot2D'):
# if not 'logX' in p.attrib or not 'logY' in p.attrib:
# logX = False
# logY = False
# for l in p.findall('{http://sed-ml.org/sed-ml/level1/version3}listOfCurves'):
# for c in l.findall('{http://sed-ml.org/sed-ml/level1/version3}curve'):
# if 'logX' in c.attrib and c.attrib['logX'].lower() == 'true':
# logX = True
# if 'logY' in c.attrib and c.attrib['logY'].lower() == 'true':
# logY = True
# p.set('logX', logX)
# p.set('logY', logY)
# sedml_str = (ElementTree.tostring(root, encoding='utf8', method='xml')).decode('utf8')
while True:
p = sedml_str.find('plot2D')
if p < 0:
break
b = sedml_str.find('>', p)
if b < 0:
break
l = sedml_str.find('logX', p)
if l < 0 or b < l:
sedml_str = sedml_str[:p] + 'plot2D logX="false" logY="false" ' + sedml_str[p+len('plot2D'):]
else:
break
print(sedml_str)
importer = phrasedmlImporter(sbml_map)
importer.sedml_str = sedml_str
# test for errors
result = importer.toPhrasedml()
if result is None:
# get errors from libsedml
doc = libsedml.SedReader().readSedMLFromString(sedml_str)
if doc.getNumErrors():
max_len = 100
message = doc.getError(doc.getNumErrors()-1).getMessage()
message = message[:max_len] + '...' if len(message) > max_len else message
raise RuntimeError('Errors reading SED-ML: {}'.format(message))
else:
raise RuntimeError('Unable to read SED-ML.')
return importer
def isInRootDir(self, file):
d = os.path.split(file)[0]
return d == '' or d == '.'
def METHOD_NAME(self, filename):
return os.path.splitext(filename)[0]
def formatResource(self, filename):
""" Normalizes and also strips xml extension."""
return self.METHOD_NAME(os.path.normpath(filename))
def fixModelRefs(self, phrasedml_str):
''' Changes all references of type myModel.xml to myModel.'''
model_ref = re.compile(r'^.*\s*model\s*"([^"]*)"\s*$')
out_str = ''
for line in phrasedml_str.splitlines():
match = model_ref.match(line)
if match:
filename = match.group(1)
if self.isInRootDir(filename):
line = line.replace(filename,self.formatResource(filename))
out_str += line+'\n'
return out_str
def toPhrasedml(self):
# assign sbml resources
# print('toPhrasedml sbml resources:')
phrasedml.clearReferencedSBML()
for sbml_resource in self.sbml_map:
# print(' {} -> {}'.format(sbml_resource, self.sbml_map[sbml_resource][:30]))
phrasedml.setReferencedSBML(sbml_resource, self.sbml_map[sbml_resource])
# convert to phrasedml
if self.sedml_str:
result = phrasedml.convertString(self.sedml_str)
if result is None:
raise RuntimeError(phrasedml.getLastError())
return self.fixModelRefs(phrasedml.getLastPhraSEDML())
elif self.sedml_path:
result = phrasedml.convertFile(self.sedml_str)
if result is None:
raise RuntimeError(phrasedml.getLastError())
return self.fixModelRefs(phrasedml.getLastPhraSEDML())
except:
pas | null |
get header | """
Generates setups for 2d shallow water MMS tests
"""
import sympy
from sympy import init_printing
init_printing()
# coordinates
x, y, z = sympy.symbols('x[0] x[1] x[2]')
z_tilde = sympy.symbols('z_tilde')
# domain lenght, x in [0, Lx], y in [0, Ly]
lx, ly = sympy.symbols('lx ly')
# depth scale
h0 = sympy.symbols('h0', positive=True)
# coriolis scale
f0 = sympy.symbols('f0', positive=True)
# viscosity scale
nu0 = sympy.symbols('nu0', positive=True)
# gravitational acceleration
g = sympy.symbols('g')
# time
t = sympy.symbols('t', positive=True)
T = sympy.symbols('T', positive=True)
def get_ufl_expr(w):
"""
Generates string that can be though to be a UFL Expression"""
return str(w)
def get_scalar_entry(name, u=None, v=None, w=None):
"""Generates an entry for a scalar expression"""
t = """ out['{name}'] = {u}\n"""
def fds(u):
return '0.0' if u is None else get_ufl_expr(u)
return t.format(name=name, u=fds(u))
def get_vector_entry(name, u=None, v=None):
"""Generates an entry for a 2d vector expression"""
t = """ out['{name}'] = as_vector(
[
{u},
{v},
])\n"""
def fds(u):
return '0.0' if u is None else get_ufl_expr(u)
return t.format(name=name, u=fds(u), v=fds(v))
def METHOD_NAME(name, description):
t = '''def {name}(x, lx, ly, h0, f0, nu0, g):
"""
{txt}
"""
out = {{}}\n'''
return t.format(name=name, txt=description)
def get_footer():
t = """
# NOTE boundary condititions must be set manually to something meaningful
out['bnd_funcs'] = {1: {'uv': None},
2: {'uv': None},
3: {'uv': None},
4: {'uv': None},
}
return out"""
return t
def evaluate_source_term(eta, u, v, h, f, nu, nonlin=True):
# evaluate equations
if nonlin:
depth = eta + h
else:
depth = h
div_hu = sympy.diff(depth*u, x) + sympy.diff(depth*v, y)
res_elev = sympy.diff(eta, t) + div_hu
u_x = sympy.diff(u, x)
u_y = sympy.diff(u, y)
v_x = sympy.diff(v, x)
v_y = sympy.diff(v, y)
if nonlin:
adv_u = u*u_x + v*u_y
adv_v = u*v_x + v*v_y
else:
adv_u = adv_v = 0
cori_u = -f*v
cori_v = f*u
pg_u = g*sympy.diff(eta, x)
pg_v = g*sympy.diff(eta, y)
visc_u = -(2*sympy.diff(nu*sympy.diff(u, x), x)
+ sympy.diff(nu*sympy.diff(u, y), y)
+ sympy.diff(nu*sympy.diff(v, x), y))
visc_v = -(2*sympy.diff(nu*sympy.diff(v, y), y)
+ sympy.diff(nu*sympy.diff(v, x), x)
+ sympy.diff(nu*sympy.diff(u, y), x))
visc_u += -sympy.diff(depth, x)/depth * nu * 2 * sympy.diff(u, x)
visc_v += -sympy.diff(depth, y)/depth * nu * 2 * sympy.diff(v, y)
res_u = sympy.diff(u, t) + adv_u + cori_u + pg_u + visc_u
res_v = sympy.diff(v, t) + adv_v + cori_v + pg_v + visc_v
return res_elev, res_u, res_v
def generate_setup(name, description, h, f, eta, u, v, nu):
"""
Generates setup function that can be copied to mms test.
"""
res_elev, res_u, res_v = evaluate_source_term(eta, u, v, h, f, nu)
txt = ''
txt += METHOD_NAME(name, description)
txt += get_scalar_entry('bath_expr', h)
if f != 0.0:
txt += get_scalar_entry('cori_expr', f)
if nu != 0.0:
txt += get_scalar_entry('visc_expr', nu)
txt += get_scalar_entry('elev_expr', eta)
txt += get_vector_entry('uv_expr', u=u, v=v)
txt += get_scalar_entry('res_elev_expr', res_elev)
txt += get_vector_entry('res_uv_expr', u=res_u, v=res_v)
txt += get_footer()
print('')
print('')
print(txt)
name = 'setup7'
description = """Non-trivial Coriolis, bath, elev, u and v, tangential velocity is zero at bnd to test flux BCs"""
h = 4.0 + h0*sympy.sqrt(0.3*x*x + 0.2*y*y + 0.1)/lx
f = f0*sympy.cos(sympy.pi*(x + y)/lx)
nu = 0
eta = sympy.cos(sympy.pi*(3.0*x + 1.0*y)/lx)
u = sympy.sin(sympy.pi*(-2.0*x + 1.0*y)/lx)*sympy.sin(sympy.pi*y/ly)
v = 0.5*sympy.sin(sympy.pi*x/lx)*sympy.sin(sympy.pi*(-3.0*x + 1.0*y)/lx)
generate_setup(name, description, h, f, eta, u, v, nu)
name = 'setup8'
description = """Non-trivial Coriolis, bath, elev, u and v, tangential velocity is non-zero at bnd, must prescribe uv at boundary."""
h = 4.0 + h0*sympy.sqrt(0.3*x*x + 0.2*y*y + 0.1)/lx
f = f0*sympy.cos(sympy.pi*(x + y)/lx)
nu = 0
eta = sympy.cos(sympy.pi*(3.0*x + 1.0*y)/lx)
u = sympy.sin(sympy.pi*(-2.0*x + 1.0*y)/lx)
v = 0.5*sympy.sin(sympy.pi*(-3.0*x + 1.0*y)/lx)
generate_setup(name, description, h, f, eta, u, v, nu)
name = 'setup9'
description = 'No Coriolis, non-trivial bath, viscosity, elev, u and v.'
h = 4.0 + h0*sympy.sqrt(0.3*x*x + 0.2*y*y + 0.1)/lx
f = 0
eta = sympy.cos(sympy.pi*(3.0*x + 1.0*y)/lx)
u = sympy.sin(sympy.pi*(-2.0*x + 1.0*y)/lx)
v = 0.5*sympy.sin(sympy.pi*(-3.0*x + 1.0*y)/lx)
nu = nu0*(1.0 + x/lx)
generate_setup(name, description, h, f, eta, u, v, nu) | null |
build sequence | # Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import tensorflow as tf
from tensorflow.python.ops.rnn_cell_impl import RNNCell
class GRU:
'''
GRU class.
'''
def __init__(self, name, input_dim, hidden_dim):
self.name = '/'.join([name, 'gru'])
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.w_matrix = None
self.U = None
self.bias = None
def define_params(self):
'''
Define parameters.
'''
input_dim = self.input_dim
hidden_dim = self.hidden_dim
prefix = self.name
self.w_matrix = tf.Variable(tf.random_normal([input_dim, 3 * hidden_dim], stddev=0.1),
name='/'.join([prefix, 'W']))
self.U = tf.Variable(tf.random_normal([hidden_dim, 3 * hidden_dim], stddev=0.1),
name='/'.join([prefix, 'U']))
self.bias = tf.Variable(tf.random_normal([1, 3 * hidden_dim], stddev=0.1),
name='/'.join([prefix, 'b']))
return self
def build(self, x, h, mask=None):
'''
Build the GRU cell.
'''
xw = tf.split(tf.matmul(x, self.w_matrix) + self.bias, 3, 1)
hu = tf.split(tf.matmul(h, self.U), 3, 1)
r = tf.sigmoid(xw[0] + hu[0])
z = tf.sigmoid(xw[1] + hu[1])
h1 = tf.tanh(xw[2] + r * hu[2])
next_h = h1 * (1 - z) + h * z
if mask is not None:
next_h = next_h * mask + h * (1 - mask)
return next_h
def METHOD_NAME(self, xs, masks, init, is_left_to_right):
'''
Build GRU sequence.
'''
states = []
last = init
if is_left_to_right:
for i, xs_i in enumerate(xs):
h = self.build(xs_i, last, masks[i])
states.append(h)
last = h
else:
for i in range(len(xs) - 1, -1, -1):
h = self.build(xs[i], last, masks[i])
states.insert(0, h)
last = h
return states
class XGRUCell(RNNCell):
def __init__(self, hidden_dim, reuse=None):
super(XGRUCell, self).__init__(self, _reuse=reuse)
self._num_units = hidden_dim
self._activation = tf.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
input_dim = inputs.get_shape()[-1]
assert input_dim is not None, "input dimension must be defined"
W = tf.get_variable(
name="W", shape=[input_dim, 3 * self._num_units], dtype=tf.float32)
U = tf.get_variable(
name='U', shape=[self._num_units, 3 * self._num_units], dtype=tf.float32)
b = tf.get_variable(
name='b', shape=[1, 3 * self._num_units], dtype=tf.float32)
xw = tf.split(tf.matmul(inputs, W) + b, 3, 1)
hu = tf.split(tf.matmul(state, U), 3, 1)
r = tf.sigmoid(xw[0] + hu[0])
z = tf.sigmoid(xw[1] + hu[1])
h1 = self._activation(xw[2] + r * hu[2])
next_h = h1 * (1 - z) + state * z
return next_h, next_h | null |
evaluate list | import sys
from typing import Any
from typing import Iterator
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
from typing_extensions import get_args
from typing_extensions import get_origin
NONE_TYPE = type(None)
try:
from types import UnionType # type: ignore
except ImportError:
UnionType = () # type: ignore
if (3, 9) <= sys.version_info[:2] <= (3, 10):
# Backport this fix for python 3.9 and 3.10
# https://github.com/python/cpython/pull/30900
from types import GenericAlias
from typing import ForwardRef
from typing import _eval_type as __eval_type # type: ignore
def _eval_type(tp: Any, globalns: Any, localns: Any) -> Any:
if isinstance(tp, GenericAlias):
args = tuple(
ForwardRef(arg) if isinstance(arg, str) else arg for arg in tp.__args__
)
tp = tp.__origin__[args] # type: ignore
return __eval_type(tp, globalns, localns)
else:
from typing import _eval_type # type: ignore
intern_typing = sys.intern("typing.")
def is_from_typing(tp: Any) -> bool:
return str(tp).startswith(intern_typing)
def evaluate(
tp: Any,
globalns: Any = None,
localns: Any = None,
) -> Tuple[Type, ...]:
return tuple(_evaluate(_eval_type(tp, globalns, localns)))
def _evaluate(tp: Any) -> Iterator[Type]:
if tp in (dict, list, tuple):
origin = tp
elif isinstance(tp, TypeVar):
origin = TypeVar
else:
origin = get_origin(tp)
if origin:
try:
yield from __evaluations__[origin](tp)
except KeyError:
raise TypeError()
elif is_from_typing(tp):
raise TypeError()
else:
yield tp
def _evaluate_type(tp: Any) -> Iterator[Type]:
args = get_args(tp)
if not args or isinstance(args[0], TypeVar):
raise TypeError()
yield from _evaluate(args[0])
def _evaluate_mapping(tp: Any) -> Iterator[Type]:
yield dict
args = get_args(tp)
if not args:
yield str
yield str
for arg in args:
if isinstance(arg, TypeVar):
try:
next(_evaluate_typevar(arg))
except TypeError:
yield str
else:
raise TypeError()
elif is_from_typing(arg) or get_origin(arg) is not None:
raise TypeError()
else:
yield arg
def METHOD_NAME(tp: Any) -> Iterator[Type]:
yield list
args = get_args(tp)
if not args:
yield str
for arg in args:
yield from _evaluate_array_arg(arg)
def _evaluate_array_arg(arg: Any) -> Iterator[Type]:
if isinstance(arg, TypeVar):
yield from _evaluate_typevar(arg)
else:
origin = get_origin(arg)
if origin is None and not is_from_typing(arg):
yield arg
elif origin in (Union, UnionType, list, tuple):
yield from __evaluations__[origin](arg)
else:
raise TypeError()
def _evaluate_tuple(tp: Any) -> Iterator[Type]:
yield tuple
args = get_args(tp)
if not args:
yield str
for arg in args:
if arg is Ellipsis:
continue
yield from _evaluate_array_arg(arg)
def _evaluate_union(tp: Any) -> Iterator[Type]:
origin_locked = False
for arg in get_args(tp):
if arg is NONE_TYPE:
continue
if isinstance(arg, TypeVar):
yield from _evaluate_typevar(arg)
else:
origin = get_origin(arg)
if origin is list and not origin_locked:
yield from METHOD_NAME(arg)
origin_locked = True
elif origin is None and not is_from_typing(arg):
yield arg
else:
raise TypeError()
def _evaluate_typevar(tp: TypeVar):
if tp.__bound__:
yield from _evaluate(tp.__bound__)
elif tp.__constraints__:
for arg in tp.__constraints__:
yield from _evaluate(arg)
else:
raise TypeError()
__evaluations__ = {
tuple: _evaluate_tuple,
list: METHOD_NAME,
dict: _evaluate_mapping,
Union: _evaluate_union,
UnionType: _evaluate_union,
type: _evaluate_type,
TypeVar: _evaluate_typevar,
} | null |
laletterbuilder pdf response | import base64
from project.util.demo_deployment import is_not_demo_deployment
from project.util.letter_sending import (
render_multilingual_letter,
send_letter_via_lob,
)
from . import models
from io import BytesIO
import logging
from django.http import FileResponse
from django.utils import timezone
from django.db import transaction
from users.models import JustfixUser
from frontend.static_content import (
react_render,
email_react_rendered_content_with_attachment,
ContentType,
)
from project.util.site_util import SITE_CHOICES
from project import slack, locales
from laletterbuilder.models import LA_MAILING_CHOICES
# The URL, relative to the localized site root, that renders the LA Letter builder
# letter PDF.
LALETTERBUILDER_LETTER_PDF_URL = "letter.pdf"
# The URL, relative to the localized site root, that renders the LA Letter builder
# email to the landlord.
LALETTERBUILDER_EMAIL_TO_LANDLORD_URL = "letter-email.txt"
# The URL, relative to the localized site root, that renders the LA Letter builder
# email to the user.
LALETTERBUILDER_EMAIL_TO_USER_URL = "letter-email-to-user.html"
# Set to true when we are ready to test LOB letter sending
LETTER_SENDING_ENABLED = True
logger = logging.getLogger(__name__)
def METHOD_NAME(pdf_bytes: bytes, letter_type: str) -> FileResponse:
"""
Creates a FileResponse for the given PDF bytes and an
appropriate filename for the LA letter.
"""
return FileResponse(BytesIO(pdf_bytes), filename=f"{letter_type}-letter.pdf")
def email_letter_to_landlord(letter: models.Letter, pdf_bytes: bytes):
if letter.letter_emailed_at is not None:
logger.info(f"{letter} has already been emailed to the landlord.")
return False
ld = letter.user.landlord_details
assert ld.email
if is_not_demo_deployment(f"emailing {letter} to landlord"):
letter_type = letter.get_letter_type()
email_react_rendered_content_with_attachment(
SITE_CHOICES.LALETTERBUILDER,
letter.user,
LALETTERBUILDER_EMAIL_TO_LANDLORD_URL,
recipients=[ld.email],
attachment=METHOD_NAME(pdf_bytes, letter_type),
# Force the locale of this email to English, since that's what the
# landlord will read the email as.
locale=locales.DEFAULT,
)
letter.letter_emailed_at = timezone.now()
letter.save()
return True
def email_letter_to_user(letter: models.Letter, pdf_bytes: bytes):
if is_not_demo_deployment(f"emailing {letter} to user"):
letter_type = letter.get_letter_type()
email_react_rendered_content_with_attachment(
SITE_CHOICES.LALETTERBUILDER,
letter.user,
LALETTERBUILDER_EMAIL_TO_USER_URL,
is_html_email=True,
recipients=[letter.user.email],
attachment=METHOD_NAME(pdf_bytes, letter_type),
# Use the user's preferred locale, since they will be the one
# reading it.
locale=letter.user.locale,
)
return True
def create_letter(user: JustfixUser) -> models.Letter:
"""
Create a blank Letter model. HTML content is required but it will be trivial until
the user sends the letter.
"""
with transaction.atomic():
# TODO: Make this work for any type of letter
letter = models.HabitabilityLetter(
user=user,
locale=user.locale,
html_content="<>",
)
letter.full_clean()
letter.save()
return letter
def send_letter(letter: models.Letter):
user = letter.user
html_content = react_render(
SITE_CHOICES.LALETTERBUILDER,
locales.DEFAULT,
LALETTERBUILDER_LETTER_PDF_URL,
ContentType.PDF,
user=user,
).html
localized_html_content = ""
if user.locale != locales.DEFAULT:
localized_html_content = react_render(
SITE_CHOICES.LALETTERBUILDER,
user.locale,
LALETTERBUILDER_LETTER_PDF_URL,
ContentType.PDF,
user=user,
).html
with transaction.atomic():
letter.html_content = html_content
letter.localized_html_content = localized_html_content
letter.save()
pdf_bytes = render_multilingual_letter(letter)
letter_type = letter.get_letter_type() # TODO: localize this somewhere
ld = user.landlord_details
if ld.email and letter.email_to_landlord:
email_letter_to_landlord(letter, pdf_bytes)
if (
ld.address_lines_for_mailing
and letter.mail_choice == LA_MAILING_CHOICES.WE_WILL_MAIL
and LETTER_SENDING_ENABLED
):
send_letter_via_lob(
letter,
pdf_bytes,
letter_description=f"{letter_type} letter",
)
if user.email:
email_letter_to_user(letter, pdf_bytes)
slack.sendmsg_async(
f"{slack.hyperlink(text=user.best_first_name, href=user.admin_url)} "
f"has sent a {letter_type} letter!",
is_safe=True,
)
with transaction.atomic():
letter.pdf_base64 = base64.b64encode(pdf_bytes).decode("utf-8")
letter.fully_processed_at = timezone.now()
letter.full_clean()
letter.save() | null |
register task | import logging
import os
import threading
import time
from collections import defaultdict
from typing import List
from croniter import croniter
from robusta.core.persistency.scheduled_jobs_states_dal import SchedulerDal
from robusta.core.schedule.model import DynamicDelayRepeat, JobStatus, ScheduledJob, SchedulingInfo, CronScheduleRepeat
# this initial delay is important for when the robusta-runner version is updated
# at the same time a scheduled playbook is added to the configuration
# for a short time there are two runners running (old+new version) and they both
# see the new scheduled playbook. we add a delay so that only the new runner ends up
# running the scheduled playbook
INITIAL_SCHEDULE_DELAY_SEC = os.environ.get("INITIAL_SCHEDULE_DELAY_SEC", 120)
class Scheduler:
scheduled_jobs = defaultdict(None)
registered_runnables = {}
dal = None
def METHOD_NAME(self, runnable_name: str, func):
self.registered_runnables[runnable_name] = func
def init_scheduler(self):
self.dal = SchedulerDal()
# schedule standalone tasks
for job in self.__get_standalone_jobs():
logging.info(f"Scheduling standalone task {job.job_id}")
self.schedule_job(job)
def schedule_job(self, job: ScheduledJob):
if job.replace_existing:
self.__remove_scheduler_job(job.job_id)
saved_job = None # Don't load the existing state. Will be overridden with a new state
else:
if self.is_scheduled(job.job_id):
logging.info(f"job {job.job_id} already scheduled")
return # job is already scheduled, no need to re-schedule. (this is a reload playbooks scenario)
saved_job = self.dal.get_scheduled_job(job.job_id)
if saved_job is None: # save new job
self.dal.save_scheduled_job(job)
saved_job = job
elif saved_job.state.job_status == JobStatus.DONE:
logging.info(f"Scheduled job already done. Skipping scheduling. job {saved_job.job_id}")
return
next_delay = self.__calc_job_delay_for_next_run(saved_job)
logging.info(f"scheduling job {saved_job.job_id} params {saved_job.scheduling_params} will run in {next_delay}")
self.__schedule_job_internal(next_delay, saved_job.job_id, self.__on_task_execution, {"job": saved_job})
def list_scheduled_jobs(self) -> List[ScheduledJob]:
return self.dal.list_scheduled_jobs()
def unschedule_job(self, job_id):
self.__remove_scheduler_job(job_id)
self.dal.del_scheduled_job(job_id)
def is_scheduled(self, job_id):
return self.scheduled_jobs.get(job_id) is not None
def __on_task_execution(self, job: ScheduledJob):
logging.info(f"running scheduled job {job.job_id}")
if job.state.job_status == JobStatus.NEW:
job.state.job_status = JobStatus.RUNNING
job.state.last_exec_time_sec = round(time.time())
func = self.registered_runnables.get(job.runnable_name)
if not func:
logging.error(f"Scheduled runnable name not registered {job.runnable_name}")
self.__on_job_done(job)
return
try:
func(
runnable_params=job.runnable_params,
schedule_info=SchedulingInfo(execution_count=job.state.exec_count),
)
except Exception:
logging.exception(
f"failed to execute runnable {job.runnable_name}. job_id {job.job_id} exec_count {job.state.exec_count}"
)
job.state.exec_count += 1
if self.__is_job_done(job):
self.__on_job_done(job)
else:
self.dal.save_scheduled_job(job)
next_delay = self.__calc_job_delay_for_next_run(job)
self.__schedule_job_internal(
next_delay,
job.job_id,
self.__on_task_execution,
{"job": job},
)
def __get_standalone_jobs(self) -> List[ScheduledJob]:
return [job for job in self.dal.list_scheduled_jobs() if job.standalone_task]
def __on_job_done(self, job: ScheduledJob):
job.state.job_status = JobStatus.DONE
# need to persist jobs state before unscheduling the job. (to avoid race condition, on configuration reload)
if job.standalone_task:
self.dal.del_scheduled_job(job.job_id)
else:
self.dal.save_scheduled_job(job)
del self.scheduled_jobs[job.job_id]
logging.info(f"Scheduled job done. job_id {job.job_id} executions {job.state.exec_count}")
def __schedule_job_internal(self, delay, job_id, func, kwargs):
job = threading.Timer(delay, func, kwargs=kwargs)
self.scheduled_jobs[job_id] = job
job.start()
def __remove_scheduler_job(self, job_id):
job = self.scheduled_jobs.get(job_id)
if job is not None:
job.cancel()
del self.scheduled_jobs[job_id]
def __is_job_done(self, job: ScheduledJob) -> bool:
if isinstance(job.scheduling_params, DynamicDelayRepeat):
return job.state.exec_count >= len(job.scheduling_params.delay_periods)
elif isinstance(job.scheduling_params, CronScheduleRepeat):
return False
else: # default, FIXED_DELAY_REPEAT
return (job.state.exec_count >= job.scheduling_params.repeat) and job.scheduling_params.repeat != -1
def __calc_job_delay_for_next_run(self, job: ScheduledJob):
if job.state.job_status == JobStatus.NEW:
if isinstance(job.scheduling_params, DynamicDelayRepeat):
return job.scheduling_params.delay_periods[0]
else:
return INITIAL_SCHEDULE_DELAY_SEC
if isinstance(job.scheduling_params, DynamicDelayRepeat):
next_delay_idx = min(job.state.exec_count, len(job.scheduling_params.delay_periods) - 1)
next_delay = job.scheduling_params.delay_periods[next_delay_idx]
elif isinstance(job.scheduling_params, CronScheduleRepeat):
now = time.time()
next_delay = croniter(job.scheduling_params.cron_expression, now).get_next() - now
else: # FIXED_DELAY_REPEAT type
next_delay = job.scheduling_params.seconds_delay
return max(
job.state.last_exec_time_sec + next_delay - round(time.time()),
INITIAL_SCHEDULE_DELAY_SEC,
) | null |
Subsets and Splits