filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_1702 | import aiohttp
import aiosqlite
import asyncio
import discord
from discord.ext import commands
from backend.config import DATABASE_LOCATION
if __name__ == "__main__":
with open("token.txt") as file:
TOKEN = file.read().strip()
bot = commands.Bot(".", case_insensitive=True)
@bot.event
async def on_ready():
bot._session = aiohttp.ClientSession()
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
print(discord.utils.oauth_url(bot.user.id))
async def connect_db():
bot._db = await aiosqlite.connect(DATABASE_LOCATION)
if __name__ == "__main__":
bot.loop.run_until_complete(connect_db())
extensions = [
"cogs.rolehelper",
"cogs.moderation",
"cogs.fun",
"cogs.rtfm",
"cogs.dasmooi",
"cogs.laf",
"cogs.admin"
]
[bot.load_extension(ext) for ext in extensions]
bot.run(TOKEN)
|
the-stack_0_1703 | # Copyright 2014 - 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Any, Optional, Tuple
import attr
import msgpack
from unpaddedbase64 import decode_base64, encode_base64
from synapse.api.constants import (
EventContentFields,
EventTypes,
GuestAccess,
HistoryVisibility,
JoinRules,
)
from synapse.api.errors import (
Codes,
HttpResponseException,
RequestSendFailed,
SynapseError,
)
from synapse.types import JsonDict, ThirdPartyInstanceID
from synapse.util.caches.descriptors import _CacheContext, cached
from synapse.util.caches.response_cache import ResponseCache
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
REMOTE_ROOM_LIST_POLL_INTERVAL = 60 * 1000
# This is used to indicate we should only return rooms published to the main list.
EMPTY_THIRD_PARTY_ID = ThirdPartyInstanceID(None, None)
class RoomListHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastore()
self.hs = hs
self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search
self.response_cache: ResponseCache[
Tuple[Optional[int], Optional[str], Optional[ThirdPartyInstanceID]]
] = ResponseCache(hs.get_clock(), "room_list")
self.remote_response_cache: ResponseCache[
Tuple[str, Optional[int], Optional[str], bool, Optional[str]]
] = ResponseCache(hs.get_clock(), "remote_room_list", timeout_ms=30 * 1000)
async def get_local_public_room_list(
self,
limit: Optional[int] = None,
since_token: Optional[str] = None,
search_filter: Optional[dict] = None,
network_tuple: Optional[ThirdPartyInstanceID] = EMPTY_THIRD_PARTY_ID,
from_federation: bool = False,
) -> JsonDict:
"""Generate a local public room list.
There are multiple different lists: the main one plus one per third
party network. A client can ask for a specific list or to return all.
Args:
limit
since_token
search_filter
network_tuple: Which public list to use.
This can be (None, None) to indicate the main list, or a particular
appservice and network id to use an appservice specific one.
Setting to None returns all public rooms across all lists.
from_federation: true iff the request comes from the federation API
"""
if not self.enable_room_list_search:
return {"chunk": [], "total_room_count_estimate": 0}
logger.info(
"Getting public room list: limit=%r, since=%r, search=%r, network=%r",
limit,
since_token,
bool(search_filter),
network_tuple,
)
if search_filter:
# We explicitly don't bother caching searches or requests for
# appservice specific lists.
logger.info("Bypassing cache as search request.")
return await self._get_public_room_list(
limit,
since_token,
search_filter,
network_tuple=network_tuple,
from_federation=from_federation,
)
key = (limit, since_token, network_tuple)
return await self.response_cache.wrap(
key,
self._get_public_room_list,
limit,
since_token,
network_tuple=network_tuple,
from_federation=from_federation,
)
async def _get_public_room_list(
self,
limit: Optional[int] = None,
since_token: Optional[str] = None,
search_filter: Optional[dict] = None,
network_tuple: Optional[ThirdPartyInstanceID] = EMPTY_THIRD_PARTY_ID,
from_federation: bool = False,
) -> JsonDict:
"""Generate a public room list.
Args:
limit: Maximum amount of rooms to return.
since_token:
search_filter: Dictionary to filter rooms by.
network_tuple: Which public list to use.
This can be (None, None) to indicate the main list, or a particular
appservice and network id to use an appservice specific one.
Setting to None returns all public rooms across all lists.
from_federation: Whether this request originated from a
federating server or a client. Used for room filtering.
"""
# Pagination tokens work by storing the room ID sent in the last batch,
# plus the direction (forwards or backwards). Next batch tokens always
# go forwards, prev batch tokens always go backwards.
if since_token:
batch_token = RoomListNextBatch.from_token(since_token)
bounds: Optional[Tuple[int, str]] = (
batch_token.last_joined_members,
batch_token.last_room_id,
)
forwards = batch_token.direction_is_forward
has_batch_token = True
else:
bounds = None
forwards = True
has_batch_token = False
# we request one more than wanted to see if there are more pages to come
probing_limit = limit + 1 if limit is not None else None
results = await self.store.get_largest_public_rooms(
network_tuple,
search_filter,
probing_limit,
bounds=bounds,
forwards=forwards,
ignore_non_federatable=from_federation,
)
def build_room_entry(room: JsonDict) -> JsonDict:
entry = {
"room_id": room["room_id"],
"name": room["name"],
"topic": room["topic"],
"canonical_alias": room["canonical_alias"],
"num_joined_members": room["joined_members"],
"avatar_url": room["avatar"],
"world_readable": room["history_visibility"]
== HistoryVisibility.WORLD_READABLE,
"guest_can_join": room["guest_access"] == "can_join",
"join_rule": room["join_rules"],
}
# Filter out Nones – rather omit the field altogether
return {k: v for k, v in entry.items() if v is not None}
results = [build_room_entry(r) for r in results]
response: JsonDict = {}
num_results = len(results)
if limit is not None:
more_to_come = num_results == probing_limit
# Depending on direction we trim either the front or back.
if forwards:
results = results[:limit]
else:
results = results[-limit:]
else:
more_to_come = False
if num_results > 0:
final_entry = results[-1]
initial_entry = results[0]
if forwards:
if has_batch_token:
# If there was a token given then we assume that there
# must be previous results.
response["prev_batch"] = RoomListNextBatch(
last_joined_members=initial_entry["num_joined_members"],
last_room_id=initial_entry["room_id"],
direction_is_forward=False,
).to_token()
if more_to_come:
response["next_batch"] = RoomListNextBatch(
last_joined_members=final_entry["num_joined_members"],
last_room_id=final_entry["room_id"],
direction_is_forward=True,
).to_token()
else:
if has_batch_token:
response["next_batch"] = RoomListNextBatch(
last_joined_members=final_entry["num_joined_members"],
last_room_id=final_entry["room_id"],
direction_is_forward=True,
).to_token()
if more_to_come:
response["prev_batch"] = RoomListNextBatch(
last_joined_members=initial_entry["num_joined_members"],
last_room_id=initial_entry["room_id"],
direction_is_forward=False,
).to_token()
response["chunk"] = results
response["total_room_count_estimate"] = await self.store.count_public_rooms(
network_tuple, ignore_non_federatable=from_federation
)
return response
@cached(num_args=1, cache_context=True)
async def generate_room_entry(
self,
room_id: str,
num_joined_users: int,
cache_context: _CacheContext,
with_alias: bool = True,
allow_private: bool = False,
) -> Optional[JsonDict]:
"""Returns the entry for a room
Args:
room_id: The room's ID.
num_joined_users: Number of users in the room.
cache_context: Information for cached responses.
with_alias: Whether to return the room's aliases in the result.
allow_private: Whether invite-only rooms should be shown.
Returns:
Returns a room entry as a dictionary, or None if this
room was determined not to be shown publicly.
"""
result = {"room_id": room_id, "num_joined_members": num_joined_users}
if with_alias:
aliases = await self.store.get_aliases_for_room(
room_id, on_invalidate=cache_context.invalidate
)
if aliases:
result["aliases"] = aliases
current_state_ids = await self.store.get_current_state_ids(
room_id, on_invalidate=cache_context.invalidate
)
if not current_state_ids:
# We're not in the room, so may as well bail out here.
return result
event_map = await self.store.get_events(
[
event_id
for key, event_id in current_state_ids.items()
if key[0]
in (
EventTypes.Create,
EventTypes.JoinRules,
EventTypes.Name,
EventTypes.Topic,
EventTypes.CanonicalAlias,
EventTypes.RoomHistoryVisibility,
EventTypes.GuestAccess,
"m.room.avatar",
)
]
)
current_state = {(ev.type, ev.state_key): ev for ev in event_map.values()}
# Double check that this is actually a public room.
join_rules_event = current_state.get((EventTypes.JoinRules, ""))
if join_rules_event:
join_rule = join_rules_event.content.get("join_rule", None)
if not allow_private and join_rule and join_rule != JoinRules.PUBLIC:
return None
# Return whether this room is open to federation users or not
create_event = current_state[EventTypes.Create, ""]
result["m.federate"] = create_event.content.get(
EventContentFields.FEDERATE, True
)
name_event = current_state.get((EventTypes.Name, ""))
if name_event:
name = name_event.content.get("name", None)
if name:
result["name"] = name
topic_event = current_state.get((EventTypes.Topic, ""))
if topic_event:
topic = topic_event.content.get("topic", None)
if topic:
result["topic"] = topic
canonical_event = current_state.get((EventTypes.CanonicalAlias, ""))
if canonical_event:
canonical_alias = canonical_event.content.get("alias", None)
if canonical_alias:
result["canonical_alias"] = canonical_alias
visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, ""))
visibility = None
if visibility_event:
visibility = visibility_event.content.get("history_visibility", None)
result["world_readable"] = visibility == HistoryVisibility.WORLD_READABLE
guest_event = current_state.get((EventTypes.GuestAccess, ""))
guest = None
if guest_event:
guest = guest_event.content.get(EventContentFields.GUEST_ACCESS)
result["guest_can_join"] = guest == GuestAccess.CAN_JOIN
avatar_event = current_state.get(("m.room.avatar", ""))
if avatar_event:
avatar_url = avatar_event.content.get("url", None)
if avatar_url:
result["avatar_url"] = avatar_url
return result
async def get_remote_public_room_list(
self,
server_name: str,
limit: Optional[int] = None,
since_token: Optional[str] = None,
search_filter: Optional[dict] = None,
include_all_networks: bool = False,
third_party_instance_id: Optional[str] = None,
) -> JsonDict:
"""Get the public room list from remote server
Raises:
SynapseError
"""
if not self.enable_room_list_search:
return {"chunk": [], "total_room_count_estimate": 0}
if search_filter:
# Searching across federation is defined in MSC2197.
# However, the remote homeserver may or may not actually support it.
# So we first try an MSC2197 remote-filtered search, then fall back
# to a locally-filtered search if we must.
try:
res = await self._get_remote_list_cached(
server_name,
limit=limit,
since_token=since_token,
include_all_networks=include_all_networks,
third_party_instance_id=third_party_instance_id,
search_filter=search_filter,
)
return res
except HttpResponseException as hre:
syn_err = hre.to_synapse_error()
if hre.code in (404, 405) or syn_err.errcode in (
Codes.UNRECOGNIZED,
Codes.NOT_FOUND,
):
logger.debug("Falling back to locally-filtered /publicRooms")
else:
# Not an error that should trigger a fallback.
raise SynapseError(502, "Failed to fetch room list")
except RequestSendFailed:
# Not an error that should trigger a fallback.
raise SynapseError(502, "Failed to fetch room list")
# if we reach this point, then we fall back to the situation where
# we currently don't support searching across federation, so we have
# to do it manually without pagination
limit = None
since_token = None
try:
res = await self._get_remote_list_cached(
server_name,
limit=limit,
since_token=since_token,
include_all_networks=include_all_networks,
third_party_instance_id=third_party_instance_id,
)
except (RequestSendFailed, HttpResponseException):
raise SynapseError(502, "Failed to fetch room list")
if search_filter:
res = {
"chunk": [
entry
for entry in list(res.get("chunk", []))
if _matches_room_entry(entry, search_filter)
]
}
return res
async def _get_remote_list_cached(
self,
server_name: str,
limit: Optional[int] = None,
since_token: Optional[str] = None,
search_filter: Optional[dict] = None,
include_all_networks: bool = False,
third_party_instance_id: Optional[str] = None,
) -> JsonDict:
"""Wrapper around FederationClient.get_public_rooms that caches the
result.
"""
repl_layer = self.hs.get_federation_client()
if search_filter:
# We can't cache when asking for search
return await repl_layer.get_public_rooms(
server_name,
limit=limit,
since_token=since_token,
search_filter=search_filter,
include_all_networks=include_all_networks,
third_party_instance_id=third_party_instance_id,
)
key = (
server_name,
limit,
since_token,
include_all_networks,
third_party_instance_id,
)
return await self.remote_response_cache.wrap(
key,
repl_layer.get_public_rooms,
server_name,
limit=limit,
since_token=since_token,
search_filter=search_filter,
include_all_networks=include_all_networks,
third_party_instance_id=third_party_instance_id,
)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class RoomListNextBatch:
last_joined_members: int # The count to get rooms after/before
last_room_id: str # The room_id to get rooms after/before
direction_is_forward: bool # True if this is a next_batch, false if prev_batch
KEY_DICT = {
"last_joined_members": "m",
"last_room_id": "r",
"direction_is_forward": "d",
}
REVERSE_KEY_DICT = {v: k for k, v in KEY_DICT.items()}
@classmethod
def from_token(cls, token: str) -> "RoomListNextBatch":
decoded = msgpack.loads(decode_base64(token), raw=False)
return RoomListNextBatch(
**{cls.REVERSE_KEY_DICT[key]: val for key, val in decoded.items()}
)
def to_token(self) -> str:
return encode_base64(
msgpack.dumps(
{self.KEY_DICT[key]: val for key, val in attr.asdict(self).items()}
)
)
def copy_and_replace(self, **kwds: Any) -> "RoomListNextBatch":
return attr.evolve(self, **kwds)
def _matches_room_entry(room_entry: JsonDict, search_filter: dict) -> bool:
if search_filter and search_filter.get("generic_search_term", None):
generic_search_term = search_filter["generic_search_term"].upper()
if generic_search_term in room_entry.get("name", "").upper():
return True
elif generic_search_term in room_entry.get("topic", "").upper():
return True
elif generic_search_term in room_entry.get("canonical_alias", "").upper():
return True
else:
return True
return False
|
the-stack_0_1704 | import unittest
import os
import json
import numpy as np
import rastervision as rv
from rastervision.core import Box
from rastervision.data.raster_source import RasterSourceConfig
from rastervision.data.crs_transformer import IdentityCRSTransformer
from rastervision.utils.files import str_to_file
from rastervision.rv_config import RVConfig
class TestGeoJSONSource(unittest.TestCase):
def setUp(self):
self.crs_transformer = IdentityCRSTransformer()
self.extent = Box.make_square(0, 0, 10)
self.tmp_dir = RVConfig.get_tmp_dir()
self.class_id = 2
self.background_class_id = 3
self.line_buffer = 1
self.uri = os.path.join(self.tmp_dir.name, 'temp.json')
def build_source(self, geojson):
str_to_file(json.dumps(geojson), self.uri)
config = RasterSourceConfig.builder(rv.GEOJSON_SOURCE) \
.with_uri(self.uri) \
.with_rasterizer_options(self.background_class_id, self.line_buffer) \
.build()
# Convert to proto and back as a test.
config = RasterSourceConfig.builder(rv.GEOJSON_SOURCE) \
.from_proto(config.to_proto()) \
.build()
source = config.create_source(self.uri, self.extent,
self.crs_transformer)
return source
def tearDown(self):
self.tmp_dir.cleanup()
def test_get_chip(self):
geojson = {
'type':
'FeatureCollection',
'features': [{
'type': 'Feature',
'geometry': {
'type':
'Polygon',
'coordinates': [[[0., 0.], [0., 5.], [5., 5.], [5., 0.],
[0., 0.]]]
},
'properties': {
'class_id': self.class_id,
}
}, {
'type': 'Feature',
'geometry': {
'type': 'LineString',
'coordinates': [[7., 0.], [7., 9.]]
},
'properties': {
'class_id': self.class_id
}
}]
}
source = self.build_source(geojson)
self.assertEqual(source.get_extent(), self.extent)
chip = source.get_image_array()
self.assertEqual(chip.shape, (10, 10, 1))
expected_chip = self.background_class_id * np.ones((10, 10, 1))
expected_chip[0:5, 0:5, 0] = self.class_id
expected_chip[0:10, 6:8] = self.class_id
np.testing.assert_array_equal(chip, expected_chip)
def test_get_chip_no_polygons(self):
geojson = {'type': 'FeatureCollection', 'features': []}
source = self.build_source(geojson)
self.assertEqual(source.get_extent(), self.extent)
chip = source.get_image_array()
self.assertEqual(chip.shape, (10, 10, 1))
expected_chip = self.background_class_id * np.ones((10, 10, 1))
np.testing.assert_array_equal(chip, expected_chip)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_1705 | """
Copyright 2021 Merck & Co., Inc. Kenilworth, NJ, USA.
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
#from .fixtures import AUTH_HEADERS, BASE_API_PATH
from requests import post as reqpost
from os import getenv
BASE_API_PATH = getenv('USER_FACING_API_HTTP_PATH', 'http://dp-api:9000')
AUTH_HEADERS = {'X-Username': 'test-developer', 'X-Api-Key': getenv('INTTESTPASS', 'local-developer')}
# Can't test on standalone
def test_experimental_join_stats():
# routes: 225
# AccumuloControllerExperimental
# POST /experimental/joinStats controllers.accumulo.AccumuloControllerExperimental.joinStats(request: Request)
# JoinScanSpec: 406
body = {
'dataset_a': 'int_basic_test_data',
'table_a': 'int_basic_test_data',
'col_a': 'State Name',
'dataset_b': 'int_cell_level_visibilities_test_data',
'table_b': 'int_cell_level_visibilities_test_data',
'col_b': 'Location',
'limit': 1
}
req_path = f'{BASE_API_PATH}/experimental/joinStats'
res = reqpost(req_path, headers=AUTH_HEADERS, json=body)
assert res.status_code == 200
assert res.json() != {}
if __name__ == '__main__':
test_experimental_join_stats()
print('Complete')
|
the-stack_0_1706 | from ..data_class.data_manager import DataManager
from ..directories import data_loaded
import numpy as np
import scipy.sparse as sp
class TpfaScheme(DataManager):
def __init__(self, data_impress, elements_lv0, data_name: str='TpfaScheme.npz', load=False):
super().__init__(data_name, load=load)
self.data_impress = data_impress
self.elements_lv0 = elements_lv0
self.n_volumes = self.data_impress.len_entities['volumes']
self.gravity = data_loaded['gravity']
def get_transmissibility_matrix_without_boundary_conditions(self) -> None:
vols_viz_internal_faces = self.elements_lv0['neig_internal_faces']
v0 = vols_viz_internal_faces
internal_faces = self.elements_lv0['internal_faces']
transmissibility_faces = self.data_impress['transmissibility']
transmissibility_internal_faces = transmissibility_faces[internal_faces]
t0 = transmissibility_internal_faces
lines = np.array([v0[:, 0], v0[:, 1], v0[:, 0], v0[:, 1]]).flatten()
cols = np.array([v0[:, 1], v0[:, 0], v0[:, 0], v0[:, 1]]).flatten()
data = np.array([t0, t0, -t0, -t0]).flatten()
T = sp.csc_matrix((data, (lines, cols)), shape=(self.n_volumes, self.n_volumes))
self['Tini'] = T
|
the-stack_0_1707 | """
Filter data from DBPedia for inclusion in ConceptNet.
The data we use here is far from the entirety of DBPedia, which lists millions
of things that are not general knowledge, such as villages with a population of
20, individual roads, sports team rosters from a particular season, and so on.
We try to filter this information to get data that's suitable for ConceptNet.
We only extract information from the 'instance_types', 'interlanguage_links',
and 'mappingbased_objects' files, using 'page_links' as a filter.
We filter for relevant concepts in the following way:
- Use only pages whose English version is the target of at least 100 Wikipedia
links, as seen in the 'page_links_en' file.
- Skip pages that are lists or Wikipedia internals.
- Filter out the instances of specific types, such as 'Settlement' and 'Road'.
- Use only pages that have been translated to at least 5 languages in the
'interlanguage_links' file.
- Use only pages that have a Wikidata ID of less than 1000000. This is a crude
heuristic, based on the fact that higher-numbered pages are likely to be
less well known, but it lets us cut off reading the translation file early.
We extract types and certain relations from the pages that remain using the
'instance_types' and 'mappingbased_objects' files.
"""
from conceptnet5.language.token_utils import un_camel_case
from conceptnet5.uri import Licenses, uri_prefix, split_uri
from conceptnet5.nodes import standardized_concept_uri, topic_to_concept
from conceptnet5.edges import make_edge
from conceptnet5.languages import ALL_LANGUAGES, LCODE_ALIASES
from conceptnet5.formats.msgpack_stream import MsgpackStreamWriter
from conceptnet5.formats.semantic_web import resource_name, parse_nquads
import urllib
import bz2
import pathlib
from operator import itemgetter
import itertools
parse_url = urllib.parse.urlparse
RELATIONS = {
'isPartOf': '/r/PartOf',
'series': '/r/PartOf',
'languageFamily': '/r/PartOf',
'location': '/r/AtLocation',
'place': '/r/AtLocation',
'locatedInArea': '/r/AtLocation',
'spokenIn': '/r/AtLocation',
# leave out differentFrom, as it is mostly about confusable names
'sameAs': '/r/Synonym',
'similar': '/r/SimilarTo',
'related': '/r/RelatedTo',
'seeAlso': '/r/RelatedTo',
'type': '/r/InstanceOf',
'field': '/r/dbpedia/field',
'academicDiscipline': '/r/dbpedia/field',
'genre': '/r/dbpedia/genre',
'literaryGenre': '/r/dbpedia/genre',
'influencedBy': '/r/dbpedia/influencedBy',
'knownFor': '/r/dbpedia/knownFor',
'notableIdea': '/r/dbpedia/knownFor',
'notableWork': '/r/dbpedia/knownFor',
'language': '/r/dbpedia/language',
'occupation': '/r/dbpedia/occupation',
'profession': '/r/dbpedia/occupation',
#'author': '/r/dbpedia/writer',
#'writer': '/r/dbpedia/writer',
#'director': '/r/dbpedia/director',
#'starring': '/r/dbpedia/starring',
#'producer': '/r/dbpedia/producer',
#'associatedBand': '/r/dbpedia/associatedBand',
#'associatedMusicalArtist': '/r/dbpedia/associatedMusicalArtist',
#'bandMember': '/r/dbpedia/bandMember',
#'artist': '/r/dbpedia/artist',
#'musicalArtist': '/r/dbpedia/artist',
#'musicalBand': '/r/dbpedia/artist',
'genus': '/r/dbpedia/genus',
'leader': '/r/dbpedia/leader',
'capital': '/r/dbpedia/capital',
'service': '/r/dbpedia/product',
'product': '/r/dbpedia/product',
}
# Ban some concepts that are way too generic and often differ from the common
# way that people use these words
CONCEPT_BLACKLIST = {
'/c/en/work/n', '/c/en/agent/n', '/c/en/artist/n', '/c/en/thing/n'
}
TYPE_BLACKLIST = {
'Settlement', 'Railway Line', 'Road', 'Sports Event', 'Event',
'Olympic Event', 'Soccer Tournament', 'Election', 'Diocese',
'Year', 'Football League Season', 'Grand Prix'
}
def translate_dbpedia_url(url):
"""
Convert an object that's defined by a DBPedia URL to a ConceptNet
URI. We do this by finding the part of the URL that names the object,
and using that as surface text for ConceptNet.
This is, in some ways, abusing a naming convention in the Semantic Web.
The URL of an object doesn't have to mean anything at all. The
human-readable name is supposed to be a string, specified by the "name"
relation.
The problem here is that the "name" relation is not unique in either
direction. A URL can have many names, and the same name can refer to
many URLs, and some of these names are rarely used or are the result of
parsing glitches. The URL itself is a stable thing that we can build a
ConceptNet URI from, on the other hand.
"""
if '__' in url or 'dbpedia.org' not in url:
return None
parsed = parse_url(url)
domain = parsed.netloc
if '.' not in domain:
return None
if domain == 'dbpedia.org':
# Handle old DBPedia URLs that had no language code
domain = 'en.dbpedia.org'
domain_parts = domain.split('.', 1)
if domain_parts[1] == 'dbpedia.org':
lang = domain_parts[0]
if lang in LCODE_ALIASES:
lang = LCODE_ALIASES[lang]
if lang not in ALL_LANGUAGES:
return None
text = resource_name(url).replace('_', ' ')
uri = topic_to_concept(lang, text)
if uri in CONCEPT_BLACKLIST:
return None
else:
return uri
else:
return None
def map_dbpedia_relation(url):
"""
Recognize some relations that we can extract from DBPedia, and convert
them to ConceptNet relations. If the relation is specific to DBPedia, it'll
be in the '/r/dbpedia' namespace.
>>> map_dbpedia_relation('http://www.w3.org/1999/02/22-rdf-syntax-ns#type')
'/r/InstanceOf'
>>> map_dbpedia_relation('http://dbpedia.org/ontology/location')
'/r/AtLocation'
>>> map_dbpedia_relation('http://dbpedia.org/ontology/genre')
'/r/dbpedia/genre'
"""
name = resource_name(url)
if name in RELATIONS:
return RELATIONS[name]
else:
return None
def get_urls_from_degree_file(in_degree_file):
urls = set()
for line in open(in_degree_file, encoding='utf-8'):
line = line.strip()
if line:
count_str, url_str = line.split(' ', 1)
assert url_str[0] == '<'
assert url_str[-1] == '>'
url = url_str[1:-1]
urls.add(url)
return urls
def read_concept_file(concept_file):
concepts = set()
for line in open(concept_file, encoding='utf-8'):
concept = uri_prefix(line.strip())
concepts.add(concept)
return concepts
def process_dbpedia(input_dir, output_file, concept_file):
"""
Read through multiple DBPedia files and output filtered assertions to
`output_file`.
"""
ok_concepts = read_concept_file(concept_file)
input_path = pathlib.Path(input_dir)
interlang_path = input_path / 'interlanguage_links_en.tql.bz2'
mapped_urls = interlanguage_mapping(interlang_path, ok_concepts)
out = MsgpackStreamWriter(output_file)
types_path = input_path / 'instance_types_en.tql.bz2'
quads = parse_nquads(bz2.open(str(types_path), 'rt'))
for subj, pred, obj, _graph in quads:
subj_url = subj['url']
if (
'Category:' in subj_url or 'File:' in subj_url or
'List_of' in subj_url or '__' in subj_url or
'Template:' in subj_url
):
continue
if subj_url in mapped_urls:
subj_concept = translate_dbpedia_url(subj_url)
obj_type = un_camel_case(resource_name(obj['url']))
if obj_type not in TYPE_BLACKLIST:
obj_concept = standardized_concept_uri('en', obj_type, 'n')
if obj_concept not in CONCEPT_BLACKLIST:
edge = make_edge(
'/r/IsA', subj_concept, obj_concept,
dataset='/d/dbpedia/en',
license=Licenses.cc_sharealike,
sources=[{'contributor': '/s/resource/dbpedia/2015/en'}],
weight=0.5,
surfaceStart=url_to_label(subj['url']),
surfaceEnd=url_to_label(obj['url'])
)
out.write(edge)
for other_url in mapped_urls[subj_url]:
if other_url.startswith('http://wikidata.dbpedia.org/'):
urledge = make_edge(
'/r/ExternalURL',
subj_concept, other_url,
dataset='/d/dbpedia/en',
license=Licenses.cc_sharealike,
sources=[{'contributor': '/s/resource/dbpedia/2015/en'}],
weight=1.0
)
out.write(urledge)
else:
other_concept = translate_dbpedia_url(other_url)
if other_concept:
urledge = make_edge(
'/r/ExternalURL',
other_concept, other_url,
dataset='/d/dbpedia/en',
license=Licenses.cc_sharealike,
sources=[{'contributor': '/s/resource/dbpedia/2015/en'}],
weight=1.0
)
out.write(urledge)
edge = make_edge(
'/r/Synonym',
other_concept, subj_concept,
dataset='/d/dbpedia/en',
license=Licenses.cc_sharealike,
sources=[{'contributor': '/s/resource/dbpedia/2015/en'}],
weight=0.5,
surfaceStart=url_to_label(other_url),
surfaceEnd=url_to_label(subj_url)
)
out.write(edge)
relations_path = input_path / 'mappingbased_objects_en.tql.bz2'
quads = parse_nquads(bz2.open(str(relations_path), 'rt'))
for subj, pred, obj, _graph in quads:
subj_concept = translate_dbpedia_url(subj['url'])
obj_concept = translate_dbpedia_url(obj['url'])
rel_name = resource_name(pred['url'])
if (
subj_concept and obj_concept and
subj['url'] in mapped_urls and obj['url'] in mapped_urls
):
if rel_name in RELATIONS:
rel = RELATIONS[rel_name]
edge = make_edge(
rel, subj_concept, obj_concept,
dataset='/d/dbpedia/en',
license=Licenses.cc_sharealike,
sources=[{'contributor': '/s/resource/dbpedia/2015/en'}],
weight=0.5,
surfaceStart=url_to_label(subj['url']),
surfaceEnd=url_to_label(obj['url'])
)
out.write(edge)
out.close()
def url_to_label(url):
return resource_name(url).replace('_', ' ')
def interlanguage_mapping(interlang_path, ok_concepts):
quads = parse_nquads(bz2.open(str(interlang_path), 'rt'))
mapping = {}
for subj, values in itertools.groupby(quads, itemgetter(0)):
subj_url = subj['url']
subj_concept = translate_dbpedia_url(subj_url)
pieces = split_uri(subj_concept)
if len(pieces) >= 6:
sense = pieces[5]
if 'album' in sense or 'film' in sense or 'series' in sense or 'disambiguation' in sense or 'song' in sense or 'album' in sense or 'band' in sense:
continue
if uri_prefix(subj_concept) in ok_concepts:
targets = [subj_url]
for _subj, _pred, obj, _graph in values:
url = obj['url']
if 'www.wikidata.org' in url:
continue
if url.startswith('http://wikidata.dbpedia.org/'):
wikidata_id = resource_name(url)
# Return early when we see a high-numbered Wikidata ID
if int(wikidata_id[1:]) >= 1000000:
return mapping
targets.append(url)
mapping[subj_url] = targets
return mapping
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help="Directory containing DBPedia files")
parser.add_argument('output_file', help='msgpack file to output to')
parser.add_argument('concept_file', help="Text file of concepts used elsewhere in ConceptNet")
args = parser.parse_args()
process_dbpedia(args.input_dir, args.output_file, args.concept_file)
if __name__ == '__main__':
main()
|
the-stack_0_1708 | import unittest
import uuid
from aioresponses import aioresponses
from canvasaio import Canvas
from canvasaio.assignment import (
Assignment,
AssignmentGroup,
AssignmentOverride,
AssignmentExtension,
)
from canvasaio.exceptions import CanvasException, RequiredFieldMissing
from canvasaio.peer_review import PeerReview
from canvasaio.progress import Progress
from canvasaio.submission import Submission
from canvasaio.user import UserDisplay
from tests import settings
from tests.util import register_uris, cleanup_file, aioresponse_mock
@aioresponse_mock
class TestAssignment(unittest.IsolatedAsyncioTestCase):
async def asyncSetUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with aioresponses() as m:
register_uris({"course": ["get_by_id", "get_assignment_by_id"]}, m)
self.course = await self.canvas.get_course(1)
self.assignment = await self.course.get_assignment(1)
async def asyncTearDown(self):
await self.canvas.close()
async def test__init__overrides(self, m):
register_uris({"assignment": ["get_assignment_with_overrides"]}, m)
assignment = await self.course.get_assignment(1)
self.assertTrue(hasattr(assignment, "overrides"))
self.assertIsInstance(assignment.overrides, list)
self.assertEqual(len(assignment.overrides), 1)
self.assertIsInstance(assignment.overrides[0], AssignmentOverride)
# create_override()
async def test_create_override(self, m):
register_uris({"assignment": ["create_override"]}, m)
override = await self.assignment.create_override(
assignment_override={
"student_ids": [1, 2, 3],
"title": "New Assignment Override",
}
)
self.assertIsInstance(override, AssignmentOverride)
self.assertEqual(override.title, "New Assignment Override")
# delete()
async def test_delete_assignments(self, m):
register_uris({"assignment": ["delete_assignment"]}, m)
deleted_assignment = await self.assignment.delete()
self.assertIsInstance(deleted_assignment, Assignment)
# edit()
async def test_edit_assignment(self, m):
register_uris({"assignment": ["edit_assignment"]}, m)
name = "New Name"
edited_assignment = await self.assignment.edit(assignment={"name": name})
self.assertIsInstance(edited_assignment, Assignment)
self.assertTrue(hasattr(edited_assignment, "name"))
self.assertEqual(edited_assignment.name, name)
# get_gradeable_students()
async def test_get_gradeable_students(self, m):
register_uris({"course": ["list_gradeable_students"]}, m)
students = self.assignment.get_gradeable_students()
student_list = [student async for student in students]
self.assertEqual(len(student_list), 2)
self.assertIsInstance(student_list[0], UserDisplay)
# get_override()
async def test_get_override(self, m):
register_uris({"assignment": ["get_assignment_override"]}, m)
override = await self.assignment.get_override(1)
self.assertIsInstance(override, AssignmentOverride)
# get_overrides()
async def test_get_overrides(self, m):
register_uris(
{
"assignment": [
"list_assignment_overrides",
"list_assignment_overrides_p2",
]
},
m,
)
overrides = self.assignment.get_overrides()
override_list = [override async for override in overrides]
self.assertEqual(len(override_list), 4)
self.assertIsInstance(override_list[0], AssignmentOverride)
self.assertIsInstance(override_list[3], AssignmentOverride)
# get_peer_reviews()
async def test_get_peer_reviews(self, m):
register_uris({"assignment": ["list_peer_reviews"]}, m)
peer_reviews = self.assignment.get_peer_reviews()
peer_review_list = [peer_review async for peer_review in peer_reviews]
self.assertEqual(len(peer_review_list), 2)
self.assertIsInstance(peer_review_list[0], PeerReview)
# get_submission()
async def test_get_submission(self, m):
register_uris({"submission": ["get_by_id_course"], "user": ["get_by_id"]}, m)
user_id = 1
submission_by_id = await self.assignment.get_submission(user_id)
self.assertIsInstance(submission_by_id, Submission)
self.assertTrue(hasattr(submission_by_id, "submission_type"))
user = await self.canvas.get_user(user_id)
submission_by_obj = await self.assignment.get_submission(user)
self.assertIsInstance(submission_by_obj, Submission)
self.assertTrue(hasattr(submission_by_obj, "submission_type"))
# get_submissions()
async def test_get_submissions(self, m):
register_uris({"submission": ["list_submissions"]}, m)
submissions = self.assignment.get_submissions()
submission_list_by_id = [submission async for submission in submissions]
self.assertEqual(len(submission_list_by_id), 2)
self.assertIsInstance(submission_list_by_id[0], Submission)
# set_extensions()
async def test_set_extensions(self, m):
register_uris({"assignment": ["set_extensions"]}, m)
extension = await self.assignment.set_extensions(
[{"user_id": 3, "extra_attempts": 2}, {"user_id": 2, "extra_attempts": 2}]
)
self.assertIsInstance(extension, list)
self.assertEqual(len(extension), 2)
self.assertIsInstance(extension[0], AssignmentExtension)
self.assertEqual(extension[0].user_id, 3)
self.assertTrue(hasattr(extension[0], "extra_attempts"))
self.assertEqual(extension[0].extra_attempts, 2)
self.assertIsInstance(extension[1], AssignmentExtension)
self.assertEqual(extension[1].user_id, 2)
self.assertTrue(hasattr(extension[1], "extra_attempts"))
self.assertEqual(extension[1].extra_attempts, 2)
async def test_set_extensions_not_list(self, m):
with self.assertRaises(ValueError):
await self.assignment.set_extensions({"user_id": 3, "exrra_attempts": 2})
async def test_set_extensions_empty_list(self, m):
with self.assertRaises(ValueError):
await self.assignment.set_extensions([])
async def test_set_extensions_non_dicts(self, m):
with self.assertRaises(ValueError):
await self.assignment.set_extensions([("user_id", 1), ("extra_attempts", 2)])
async def test_set_extensions_missing_key(self, m):
with self.assertRaises(RequiredFieldMissing):
await self.assignment.set_extensions([{"extra_attempts": 3}])
# submit()
async def test_submit(self, m):
register_uris({"assignment": ["submit"]}, m)
sub_type = "online_upload"
sub_dict = {"submission_type": sub_type}
submission = await self.assignment.submit(sub_dict)
self.assertIsInstance(submission, Submission)
self.assertTrue(hasattr(submission, "submission_type"))
self.assertEqual(submission.submission_type, sub_type)
async def test_submit_fail(self, m):
with self.assertRaises(RequiredFieldMissing):
await self.assignment.submit({})
async def test_submit_file(self, m):
register_uris({"assignment": ["submit", "upload", "upload_final"]}, m)
filename = "testfile_assignment_{}".format(uuid.uuid4().hex)
try:
with open(filename, "w+") as file:
sub_type = "online_upload"
sub_dict = {"submission_type": sub_type}
submission = await self.assignment.submit(sub_dict, file)
self.assertIsInstance(submission, Submission)
self.assertTrue(hasattr(submission, "submission_type"))
self.assertEqual(submission.submission_type, sub_type)
finally:
cleanup_file(filename)
async def test_submit_file_wrong_type(self, m):
filename = "testfile_assignment_{}".format(uuid.uuid4().hex)
sub_type = "online_text_entry"
sub_dict = {"submission_type": sub_type}
with self.assertRaises(ValueError):
await self.assignment.submit(sub_dict, filename)
async def test_submit_file_upload_failure(self, m):
register_uris({"assignment": ["submit", "upload", "upload_fail"]}, m)
filename = "testfile_assignment_{}".format(uuid.uuid4().hex)
try:
with open(filename, "w+") as file:
sub_type = "online_upload"
sub_dict = {"submission_type": sub_type}
with self.assertRaises(CanvasException):
await self.assignment.submit(sub_dict, file)
finally:
cleanup_file(filename)
# __str__()
def test__str__(self, m):
string = str(self.assignment)
self.assertIsInstance(string, str)
# submissions_bulk_update()
async def test_submissions_bulk_update(self, m):
register_uris({"assignment": ["update_submissions"]}, m)
register_uris({"progress": ["course_progress"]}, m)
progress = await self.assignment.submissions_bulk_update(
grade_data={"1": {"posted_grade": 97}, "2": {"posted_grade": 98}}
)
self.assertIsInstance(progress, Progress)
self.assertTrue(progress.context_type == "Course")
progress = await progress.query()
self.assertTrue(progress.context_type == "Course")
# upload_to_submission()
async def test_upload_to_submission_self(self, m):
register_uris({"assignment": ["upload", "upload_final"]}, m)
filename = "testfile_assignment_{}".format(uuid.uuid4().hex)
try:
with open(filename, "w+") as file:
response = await self.assignment.upload_to_submission(file)
self.assertTrue(response[0])
self.assertIsInstance(response[1], dict)
self.assertIn("url", response[1])
finally:
cleanup_file(filename)
async def test_upload_to_submission_user(self, m):
register_uris({"assignment": ["upload_by_id", "upload_final"]}, m)
filename = "testfile_assignment_{}".format(uuid.uuid4().hex)
user_id = 1
try:
with open(filename, "w+") as file:
response = await self.assignment.upload_to_submission(file, user_id)
self.assertTrue(response[0])
self.assertIsInstance(response[1], dict)
self.assertIn("url", response[1])
finally:
cleanup_file(filename)
@aioresponse_mock
class TestAssignmentExtension(unittest.IsolatedAsyncioTestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
self.extension = AssignmentExtension(
self.canvas._Canvas__requester,
{"assignment_id": 2, "user_id": 3, "extra_attempts": 2},
)
# __str__()
def test__str__(self, m):
string = str(self.extension)
self.assertIsInstance(string, str)
@aioresponse_mock
class TestAssignmentGroup(unittest.IsolatedAsyncioTestCase):
async def asyncSetUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with aioresponses() as m:
register_uris(
{"course": ["get_by_id"], "assignment": ["get_assignment_group"]}, m
)
self.course = await self.canvas.get_course(1)
self.assignment_group = await self.course.get_assignment_group(5)
async def asyncTearDown(self):
await self.canvas.close()
# edit()
async def test_edit_assignment_group(self, m):
register_uris({"assignment": ["edit_assignment_group"]}, m)
name = "New Name"
edited_assignment_group = await self.assignment_group.edit(
assignment_group={"name": name}
)
self.assertIsInstance(edited_assignment_group, AssignmentGroup)
self.assertTrue(hasattr(edited_assignment_group, "name"))
self.assertEqual(edited_assignment_group.name, name)
# delete()
async def test_delete_assignment_group(self, m):
register_uris({"assignment": ["delete_assignment_group"]}, m)
deleted_assignment_group = await self.assignment_group.delete()
self.assertIsInstance(deleted_assignment_group, AssignmentGroup)
self.assertTrue(hasattr(deleted_assignment_group, "name"))
self.assertEqual(deleted_assignment_group.name, "Assignment Group 5")
# __str__()
def test__str__(self, m):
string = str(self.assignment_group)
self.assertIsInstance(string, str)
@aioresponse_mock()
class TestAssignmentOverride(unittest.IsolatedAsyncioTestCase):
async def asyncSetUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with aioresponses() as m:
register_uris(
{
"course": ["get_by_id", "get_assignment_by_id"],
"assignment": ["get_assignment_override"],
},
m,
)
self.course = await self.canvas.get_course(1)
self.assignment = await self.course.get_assignment(1)
self.assignment_override = await self.assignment.get_override(1)
async def asyncTearDown(self):
await self.canvas.close()
# __str__()
def test__str__(self, m):
string = str(self.assignment_override)
self.assertIsInstance(string, str)
self.assertEqual(string, "Assignment Override 1 (1)")
# delete()
async def test_delete(self, m):
register_uris({"assignment": ["delete_override"]}, m)
deleted = await self.assignment_override.delete()
self.assertIsInstance(deleted, AssignmentOverride)
self.assertEqual(deleted.id, self.assignment_override.id)
# edit()
async def test_edit(self, m):
register_uris({"assignment": ["edit_override"]}, m)
edited = await self.assignment_override.edit(
assignment_override={
"title": "New Title",
"student_ids": self.assignment_override.student_ids,
}
)
self.assertEqual(edited, self.assignment_override)
self.assertIsInstance(self.assignment_override, AssignmentOverride)
self.assertEqual(edited.title, "New Title")
|
the-stack_0_1709 | # Copyright 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import hashlib
import importlib
import logging
import os
import os.path
import socket
import struct
import tempfile
import eventlet
import mock
import netaddr
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_context import context as common_context
from oslo_context import fixture as context_fixture
from oslo_utils import encodeutils
from oslo_utils import fixture as utils_fixture
from oslo_utils import units
import six
import nova
from nova import context
from nova import exception
from nova import test
from nova import utils
CONF = cfg.CONF
class GenericUtilsTestCase(test.NoDBTestCase):
def test_parse_server_string(self):
result = utils.parse_server_string('::1')
self.assertEqual(('::1', ''), result)
result = utils.parse_server_string('[::1]:8773')
self.assertEqual(('::1', '8773'), result)
result = utils.parse_server_string('2001:db8::192.168.1.1')
self.assertEqual(('2001:db8::192.168.1.1', ''), result)
result = utils.parse_server_string('[2001:db8::192.168.1.1]:8773')
self.assertEqual(('2001:db8::192.168.1.1', '8773'), result)
result = utils.parse_server_string('192.168.1.1')
self.assertEqual(('192.168.1.1', ''), result)
result = utils.parse_server_string('192.168.1.2:8773')
self.assertEqual(('192.168.1.2', '8773'), result)
result = utils.parse_server_string('192.168.1.3')
self.assertEqual(('192.168.1.3', ''), result)
result = utils.parse_server_string('www.example.com:8443')
self.assertEqual(('www.example.com', '8443'), result)
result = utils.parse_server_string('www.example.com')
self.assertEqual(('www.example.com', ''), result)
# error case
result = utils.parse_server_string('www.exa:mple.com:8443')
self.assertEqual(('', ''), result)
result = utils.parse_server_string('')
self.assertEqual(('', ''), result)
def test_hostname_unicode_sanitization(self):
hostname = u"\u7684.test.example.com"
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_periods(self):
hostname = "....test.example.com..."
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_dashes(self):
hostname = "----test.example.com---"
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_characters(self):
hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+"
self.assertEqual("91----test-host.example.com-0",
utils.sanitize_hostname(hostname))
def test_hostname_translate(self):
hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>"
self.assertEqual("hello", utils.sanitize_hostname(hostname))
def test_hostname_has_default(self):
hostname = u"\u7684hello"
defaultname = "Server-1"
self.assertEqual("hello", utils.sanitize_hostname(hostname,
defaultname))
def test_hostname_empty_has_default(self):
hostname = u"\u7684"
defaultname = "Server-1"
self.assertEqual(defaultname, utils.sanitize_hostname(hostname,
defaultname))
def test_hostname_empty_has_default_too_long(self):
hostname = u"\u7684"
defaultname = "a" * 64
self.assertEqual("a" * 63, utils.sanitize_hostname(hostname,
defaultname))
def test_hostname_empty_no_default(self):
hostname = u"\u7684"
self.assertEqual("", utils.sanitize_hostname(hostname))
def test_hostname_empty_minus_period(self):
hostname = "---..."
self.assertEqual("", utils.sanitize_hostname(hostname))
def test_hostname_with_space(self):
hostname = " a b c "
self.assertEqual("a-b-c", utils.sanitize_hostname(hostname))
def test_hostname_too_long(self):
hostname = "a" * 64
self.assertEqual(63, len(utils.sanitize_hostname(hostname)))
def test_hostname_truncated_no_hyphen(self):
hostname = "a" * 62
hostname = hostname + '-' + 'a'
res = utils.sanitize_hostname(hostname)
# we trim to 63 and then trim the trailing dash
self.assertEqual(62, len(res))
self.assertFalse(res.endswith('-'), 'The hostname ends with a -')
def test_generate_password(self):
password = utils.generate_password()
self.assertTrue([c for c in password if c in '0123456789'])
self.assertTrue([c for c in password
if c in 'abcdefghijklmnopqrstuvwxyz'])
self.assertTrue([c for c in password
if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'])
def test_read_file_as_root(self):
def fake_execute(*args, **kwargs):
if args[1] == 'bad':
raise processutils.ProcessExecutionError()
return 'fakecontents', None
self.stub_out('nova.utils.execute', fake_execute)
contents = utils.read_file_as_root('good')
self.assertEqual(contents, 'fakecontents')
self.assertRaises(exception.FileNotFound,
utils.read_file_as_root, 'bad')
def test_temporary_chown(self):
def fake_execute(*args, **kwargs):
if args[0] == 'chown':
fake_execute.uid = args[1]
self.stub_out('nova.utils.execute', fake_execute)
with tempfile.NamedTemporaryFile() as f:
with utils.temporary_chown(f.name, owner_uid=2):
self.assertEqual(fake_execute.uid, 2)
self.assertEqual(fake_execute.uid, os.getuid())
def test_xhtml_escape(self):
self.assertEqual('"foo"', utils.xhtml_escape('"foo"'))
self.assertEqual(''foo'', utils.xhtml_escape("'foo'"))
self.assertEqual('&', utils.xhtml_escape('&'))
self.assertEqual('>', utils.xhtml_escape('>'))
self.assertEqual('<', utils.xhtml_escape('<'))
self.assertEqual('<foo>', utils.xhtml_escape('<foo>'))
def test_is_valid_ipv6_cidr(self):
self.assertTrue(utils.is_valid_ipv6_cidr("2600::/64"))
self.assertTrue(utils.is_valid_ipv6_cidr(
"abcd:ef01:2345:6789:abcd:ef01:192.168.254.254/48"))
self.assertTrue(utils.is_valid_ipv6_cidr(
"0000:0000:0000:0000:0000:0000:0000:0001/32"))
self.assertTrue(utils.is_valid_ipv6_cidr(
"0000:0000:0000:0000:0000:0000:0000:0001"))
self.assertFalse(utils.is_valid_ipv6_cidr("foo"))
self.assertFalse(utils.is_valid_ipv6_cidr("127.0.0.1"))
def test_get_shortened_ipv6(self):
self.assertEqual("abcd:ef01:2345:6789:abcd:ef01:c0a8:fefe",
utils.get_shortened_ipv6(
"abcd:ef01:2345:6789:abcd:ef01:192.168.254.254"))
self.assertEqual("::1", utils.get_shortened_ipv6(
"0000:0000:0000:0000:0000:0000:0000:0001"))
self.assertEqual("caca::caca:0:babe:201:102",
utils.get_shortened_ipv6(
"caca:0000:0000:caca:0000:babe:0201:0102"))
self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6,
"127.0.0.1")
self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6,
"failure")
def test_get_shortened_ipv6_cidr(self):
self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr(
"2600:0000:0000:0000:0000:0000:0000:0000/64"))
self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr(
"2600::1/64"))
self.assertRaises(netaddr.AddrFormatError,
utils.get_shortened_ipv6_cidr,
"127.0.0.1")
self.assertRaises(netaddr.AddrFormatError,
utils.get_shortened_ipv6_cidr,
"failure")
def test_safe_ip_format(self):
self.assertEqual("[::1]", utils.safe_ip_format("::1"))
self.assertEqual("127.0.0.1", utils.safe_ip_format("127.0.0.1"))
self.assertEqual("[::ffff:127.0.0.1]", utils.safe_ip_format(
"::ffff:127.0.0.1"))
self.assertEqual("localhost", utils.safe_ip_format("localhost"))
def test_format_remote_path(self):
self.assertEqual("[::1]:/foo/bar",
utils.format_remote_path("::1", "/foo/bar"))
self.assertEqual("127.0.0.1:/foo/bar",
utils.format_remote_path("127.0.0.1", "/foo/bar"))
self.assertEqual("[::ffff:127.0.0.1]:/foo/bar",
utils.format_remote_path("::ffff:127.0.0.1",
"/foo/bar"))
self.assertEqual("localhost:/foo/bar",
utils.format_remote_path("localhost", "/foo/bar"))
self.assertEqual("/foo/bar", utils.format_remote_path(None,
"/foo/bar"))
def test_get_hash_str(self):
base_str = b"foo"
base_unicode = u"foo"
value = hashlib.md5(base_str).hexdigest()
self.assertEqual(
value, utils.get_hash_str(base_str))
self.assertEqual(
value, utils.get_hash_str(base_unicode))
def test_use_rootwrap(self):
self.flags(disable_rootwrap=False, group='workarounds')
self.flags(rootwrap_config='foo')
cmd = utils.get_root_helper()
self.assertEqual('sudo nova-rootwrap foo', cmd)
@mock.patch('nova.utils.RootwrapProcessHelper')
def test_get_root_helper_proc(self, mock_proc_helper):
self.flags(use_rootwrap_daemon=False)
self.flags(rootwrap_config="/path/to/conf")
utils._get_rootwrap_helper()
mock_proc_helper.assert_called_once_with()
@mock.patch('nova.utils.RootwrapDaemonHelper')
def test_get_root_helper_daemon(self, mock_daemon_helper):
conf_path = '/path/to/conf'
self.flags(use_rootwrap_daemon=True)
self.flags(rootwrap_config=conf_path)
utils._get_rootwrap_helper()
mock_daemon_helper.assert_called_once_with(conf_path)
def test_use_sudo(self):
self.flags(disable_rootwrap=True, group='workarounds')
cmd = utils.get_root_helper()
self.assertEqual('sudo', cmd)
def test_ssh_execute(self):
expected_args = ('ssh', '-o', 'BatchMode=yes',
'remotehost', 'ls', '-l')
with mock.patch('nova.utils.execute') as mock_method:
utils.ssh_execute('remotehost', 'ls', '-l')
mock_method.assert_called_once_with(*expected_args)
class TestCachedFile(test.NoDBTestCase):
@mock.patch('os.path.getmtime', return_value=1)
def test_read_cached_file(self, getmtime):
utils._FILE_CACHE = {
'/this/is/a/fake': {"data": 1123, "mtime": 1}
}
fresh, data = utils.read_cached_file("/this/is/a/fake")
fdata = utils._FILE_CACHE['/this/is/a/fake']["data"]
self.assertEqual(fdata, data)
@mock.patch('os.path.getmtime', return_value=2)
def test_read_modified_cached_file(self, getmtime):
utils._FILE_CACHE = {
'/this/is/a/fake': {"data": 1123, "mtime": 1}
}
fake_contents = "lorem ipsum"
with mock.patch('six.moves.builtins.open',
mock.mock_open(read_data=fake_contents)):
fresh, data = utils.read_cached_file("/this/is/a/fake")
self.assertEqual(data, fake_contents)
self.assertTrue(fresh)
def test_delete_cached_file(self):
filename = '/this/is/a/fake/deletion/of/cached/file'
utils._FILE_CACHE = {
filename: {"data": 1123, "mtime": 1}
}
self.assertIn(filename, utils._FILE_CACHE)
utils.delete_cached_file(filename)
self.assertNotIn(filename, utils._FILE_CACHE)
def test_delete_cached_file_not_exist(self):
# We expect that if cached file does not exist no Exception raised.
filename = '/this/is/a/fake/deletion/attempt/of/not/cached/file'
self.assertNotIn(filename, utils._FILE_CACHE)
utils.delete_cached_file(filename)
self.assertNotIn(filename, utils._FILE_CACHE)
class RootwrapDaemonTesetCase(test.NoDBTestCase):
@mock.patch('oslo_rootwrap.client.Client')
def test_get_client(self, mock_client):
mock_conf = mock.MagicMock()
utils.RootwrapDaemonHelper(mock_conf)
mock_client.assert_called_once_with(
["sudo", "nova-rootwrap-daemon", mock_conf])
@mock.patch('nova.utils.LOG.info')
def test_execute(self, mock_info):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.client = mock.MagicMock()
daemon.client.execute = mock.Mock(return_value=(0, None, None))
daemon.execute('a', 1, foo='bar', run_as_root=True)
daemon.client.execute.assert_called_once_with(['a', '1'], None)
mock_info.assert_has_calls([mock.call(
u'Executing RootwrapDaemonHelper.execute cmd=[%(cmd)r] '
u'kwargs=[%(kwargs)r]',
{'cmd': u'a 1', 'kwargs': {'run_as_root': True, 'foo': 'bar'}})])
def test_execute_with_kwargs(self):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.client = mock.MagicMock()
daemon.client.execute = mock.Mock(return_value=(0, None, None))
daemon.execute('a', 1, foo='bar', run_as_root=True, process_input=True)
daemon.client.execute.assert_called_once_with(['a', '1'], True)
def test_execute_fail(self):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.client = mock.MagicMock()
daemon.client.execute = mock.Mock(return_value=(-2, None, None))
self.assertRaises(processutils.ProcessExecutionError,
daemon.execute, 'b', 2)
def test_execute_pass_with_check_exit_code(self):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.client = mock.MagicMock()
daemon.client.execute = mock.Mock(return_value=(-2, None, None))
daemon.execute('b', 2, check_exit_code=[-2])
def test_execute_fail_with_retry(self):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.client = mock.MagicMock()
daemon.client.execute = mock.Mock(return_value=(-2, None, None))
self.assertRaises(processutils.ProcessExecutionError,
daemon.execute, 'b', 2, attempts=2)
daemon.client.execute.assert_has_calls(
[mock.call(['b', '2'], None),
mock.call(['b', '2'], None)])
@mock.patch('nova.utils.LOG.log')
def test_execute_fail_and_logging(self, mock_log):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.client = mock.MagicMock()
daemon.client.execute = mock.Mock(return_value=(-2, None, None))
self.assertRaises(processutils.ProcessExecutionError,
daemon.execute, 'b', 2,
attempts=2,
loglevel=logging.CRITICAL,
log_errors=processutils.LOG_ALL_ERRORS)
mock_log.assert_has_calls(
[
mock.call(logging.CRITICAL, u'Running cmd (subprocess): %s',
u'b 2'),
mock.call(logging.CRITICAL,
'CMD "%(sanitized_cmd)s" returned: %(return_code)s '
'in %(end_time)0.3fs',
{'sanitized_cmd': u'b 2', 'return_code': -2,
'end_time': mock.ANY}),
mock.call(logging.CRITICAL,
u'%(desc)r\ncommand: %(cmd)r\nexit code: %(code)r'
u'\nstdout: %(stdout)r\nstderr: %(stderr)r',
{'code': -2, 'cmd': u'b 2', 'stdout': u'None',
'stderr': u'None', 'desc': None}),
mock.call(logging.CRITICAL, u'%r failed. Retrying.', u'b 2'),
mock.call(logging.CRITICAL, u'Running cmd (subprocess): %s',
u'b 2'),
mock.call(logging.CRITICAL,
'CMD "%(sanitized_cmd)s" returned: %(return_code)s '
'in %(end_time)0.3fs',
{'sanitized_cmd': u'b 2', 'return_code': -2,
'end_time': mock.ANY}),
mock.call(logging.CRITICAL,
u'%(desc)r\ncommand: %(cmd)r\nexit code: %(code)r'
u'\nstdout: %(stdout)r\nstderr: %(stderr)r',
{'code': -2, 'cmd': u'b 2', 'stdout': u'None',
'stderr': u'None', 'desc': None}),
mock.call(logging.CRITICAL, u'%r failed. Not Retrying.',
u'b 2')]
)
def test_trycmd(self):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.client = mock.MagicMock()
daemon.client.execute = mock.Mock(return_value=(0, None, None))
daemon.trycmd('a', 1, foo='bar', run_as_root=True)
daemon.client.execute.assert_called_once_with(['a', '1'], None)
def test_trycmd_with_kwargs(self):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.execute = mock.Mock(return_value=('out', 'err'))
daemon.trycmd('a', 1, foo='bar', run_as_root=True,
loglevel=logging.WARN,
log_errors=True,
process_input=True,
delay_on_retry=False,
attempts=5,
check_exit_code=[200])
daemon.execute.assert_called_once_with('a', 1, attempts=5,
check_exit_code=[200],
delay_on_retry=False, foo='bar',
log_errors=True, loglevel=30,
process_input=True,
run_as_root=True)
def test_trycmd_fail(self):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.client = mock.MagicMock()
daemon.client.execute = mock.Mock(return_value=(-2, None, None))
expected_err = six.text_type('''\
Unexpected error while running command.
Command: a 1
Exit code: -2''')
out, err = daemon.trycmd('a', 1, foo='bar', run_as_root=True)
daemon.client.execute.assert_called_once_with(['a', '1'], None)
self.assertIn(expected_err, err)
def test_trycmd_fail_with_rety(self):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.client = mock.MagicMock()
daemon.client.execute = mock.Mock(return_value=(-2, None, None))
expected_err = six.text_type('''\
Unexpected error while running command.
Command: a 1
Exit code: -2''')
out, err = daemon.trycmd('a', 1, foo='bar', run_as_root=True,
attempts=3)
self.assertIn(expected_err, err)
daemon.client.execute.assert_has_calls(
[mock.call(['a', '1'], None),
mock.call(['a', '1'], None),
mock.call(['a', '1'], None)])
class VPNPingTestCase(test.NoDBTestCase):
"""Unit tests for utils.vpn_ping()."""
def setUp(self):
super(VPNPingTestCase, self).setUp()
self.port = 'fake'
self.address = 'fake'
self.session_id = 0x1234
self.fmt = '!BQxxxxxQxxxx'
def fake_reply_packet(self, pkt_id=0x40):
return struct.pack(self.fmt, pkt_id, 0x0, self.session_id)
def setup_socket(self, mock_socket, return_value, side_effect=None):
socket_obj = mock.MagicMock()
if side_effect is not None:
socket_obj.recv.side_effect = side_effect
else:
socket_obj.recv.return_value = return_value
mock_socket.return_value = socket_obj
@mock.patch.object(socket, 'socket')
def test_vpn_ping_timeout(self, mock_socket):
"""Server doesn't reply within timeout."""
self.setup_socket(mock_socket, None, socket.timeout)
rc = utils.vpn_ping(self.address, self.port,
session_id=self.session_id)
self.assertFalse(rc)
@mock.patch.object(socket, 'socket')
def test_vpn_ping_bad_len(self, mock_socket):
"""Test a short/invalid server reply."""
self.setup_socket(mock_socket, 'fake_reply')
rc = utils.vpn_ping(self.address, self.port,
session_id=self.session_id)
self.assertFalse(rc)
@mock.patch.object(socket, 'socket')
def test_vpn_ping_bad_id(self, mock_socket):
"""Server sends an unknown packet ID."""
self.setup_socket(mock_socket, self.fake_reply_packet(pkt_id=0x41))
rc = utils.vpn_ping(self.address, self.port,
session_id=self.session_id)
self.assertFalse(rc)
@mock.patch.object(socket, 'socket')
def test_vpn_ping_ok(self, mock_socket):
self.setup_socket(mock_socket, self.fake_reply_packet())
rc = utils.vpn_ping(self.address, self.port,
session_id=self.session_id)
self.assertTrue(rc)
class MonkeyPatchTestCase(test.NoDBTestCase):
"""Unit test for utils.monkey_patch()."""
def setUp(self):
super(MonkeyPatchTestCase, self).setUp()
self.example_package = 'nova.tests.unit.monkey_patch_example.'
self.flags(
monkey_patch=True,
monkey_patch_modules=[self.example_package + 'example_a' + ':'
+ self.example_package + 'example_decorator'])
def test_monkey_patch(self):
utils.monkey_patch()
nova.tests.unit.monkey_patch_example.CALLED_FUNCTION = []
from nova.tests.unit.monkey_patch_example import example_a
from nova.tests.unit.monkey_patch_example import example_b
self.assertEqual('Example function', example_a.example_function_a())
exampleA = example_a.ExampleClassA()
exampleA.example_method()
ret_a = exampleA.example_method_add(3, 5)
self.assertEqual(ret_a, 8)
self.assertEqual('Example function', example_b.example_function_b())
exampleB = example_b.ExampleClassB()
exampleB.example_method()
ret_b = exampleB.example_method_add(3, 5)
self.assertEqual(ret_b, 8)
package_a = self.example_package + 'example_a.'
self.assertIn(package_a + 'example_function_a',
nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertIn(package_a + 'ExampleClassA.example_method',
nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertIn(package_a + 'ExampleClassA.example_method_add',
nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
package_b = self.example_package + 'example_b.'
self.assertNotIn(package_b + 'example_function_b',
nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertNotIn(package_b + 'ExampleClassB.example_method',
nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertNotIn(package_b + 'ExampleClassB.example_method_add',
nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
class MonkeyPatchDefaultTestCase(test.NoDBTestCase):
"""Unit test for default monkey_patch_modules value."""
def setUp(self):
super(MonkeyPatchDefaultTestCase, self).setUp()
self.flags(
monkey_patch=True)
def test_monkey_patch_default_mod(self):
# monkey_patch_modules is defined to be
# <module_to_patch>:<decorator_to_patch_with>
# Here we check that both parts of the default values are
# valid
for module in CONF.monkey_patch_modules:
m = module.split(':', 1)
# Check we can import the module to be patched
importlib.import_module(m[0])
# check the decorator is valid
decorator_name = m[1].rsplit('.', 1)
decorator_module = importlib.import_module(decorator_name[0])
getattr(decorator_module, decorator_name[1])
class AuditPeriodTest(test.NoDBTestCase):
def setUp(self):
super(AuditPeriodTest, self).setUp()
# a fairly random time to test with
self.useFixture(utils_fixture.TimeFixture(
datetime.datetime(second=23,
minute=12,
hour=8,
day=5,
month=3,
year=2012)))
def test_hour(self):
begin, end = utils.last_completed_audit_period(unit='hour')
self.assertEqual(begin, datetime.datetime(
hour=7,
day=5,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
hour=8,
day=5,
month=3,
year=2012))
def test_hour_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@10')
self.assertEqual(begin, datetime.datetime(
minute=10,
hour=7,
day=5,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
minute=10,
hour=8,
day=5,
month=3,
year=2012))
def test_hour_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@30')
self.assertEqual(begin, datetime.datetime(
minute=30,
hour=6,
day=5,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
minute=30,
hour=7,
day=5,
month=3,
year=2012))
def test_day(self):
begin, end = utils.last_completed_audit_period(unit='day')
self.assertEqual(begin, datetime.datetime(
day=4,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
day=5,
month=3,
year=2012))
def test_day_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='day@6')
self.assertEqual(begin, datetime.datetime(
hour=6,
day=4,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
hour=6,
day=5,
month=3,
year=2012))
def test_day_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='day@10')
self.assertEqual(begin, datetime.datetime(
hour=10,
day=3,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
hour=10,
day=4,
month=3,
year=2012))
def test_month(self):
begin, end = utils.last_completed_audit_period(unit='month')
self.assertEqual(begin, datetime.datetime(
day=1,
month=2,
year=2012))
self.assertEqual(end, datetime.datetime(
day=1,
month=3,
year=2012))
def test_month_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='month@2')
self.assertEqual(begin, datetime.datetime(
day=2,
month=2,
year=2012))
self.assertEqual(end, datetime.datetime(
day=2,
month=3,
year=2012))
def test_month_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='month@15')
self.assertEqual(begin, datetime.datetime(
day=15,
month=1,
year=2012))
self.assertEqual(end, datetime.datetime(
day=15,
month=2,
year=2012))
def test_year(self):
begin, end = utils.last_completed_audit_period(unit='year')
self.assertEqual(begin, datetime.datetime(
day=1,
month=1,
year=2011))
self.assertEqual(end, datetime.datetime(
day=1,
month=1,
year=2012))
def test_year_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='year@2')
self.assertEqual(begin, datetime.datetime(
day=1,
month=2,
year=2011))
self.assertEqual(end, datetime.datetime(
day=1,
month=2,
year=2012))
def test_year_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='year@6')
self.assertEqual(begin, datetime.datetime(
day=1,
month=6,
year=2010))
self.assertEqual(end, datetime.datetime(
day=1,
month=6,
year=2011))
class MkfsTestCase(test.NoDBTestCase):
@mock.patch('nova.utils.execute')
def test_mkfs_ext4(self, mock_execute):
utils.mkfs('ext4', '/my/block/dev')
mock_execute.assert_called_once_with('mkfs', '-t', 'ext4', '-F',
'/my/block/dev', run_as_root=False)
@mock.patch('nova.utils.execute')
def test_mkfs_msdos(self, mock_execute):
utils.mkfs('msdos', '/my/msdos/block/dev')
mock_execute.assert_called_once_with('mkfs', '-t', 'msdos',
'/my/msdos/block/dev', run_as_root=False)
@mock.patch('nova.utils.execute')
def test_mkfs_swap(self, mock_execute):
utils.mkfs('swap', '/my/swap/block/dev')
mock_execute.assert_called_once_with('mkswap', '/my/swap/block/dev',
run_as_root=False)
@mock.patch('nova.utils.execute')
def test_mkfs_ext4_withlabel(self, mock_execute):
utils.mkfs('ext4', '/my/block/dev', 'ext4-vol')
mock_execute.assert_called_once_with('mkfs', '-t', 'ext4', '-F',
'-L', 'ext4-vol', '/my/block/dev', run_as_root=False)
@mock.patch('nova.utils.execute')
def test_mkfs_msdos_withlabel(self, mock_execute):
utils.mkfs('msdos', '/my/msdos/block/dev', 'msdos-vol')
mock_execute.assert_called_once_with('mkfs', '-t', 'msdos',
'-n', 'msdos-vol', '/my/msdos/block/dev', run_as_root=False)
@mock.patch('nova.utils.execute')
def test_mkfs_swap_withlabel(self, mock_execute):
utils.mkfs('swap', '/my/swap/block/dev', 'swap-vol')
mock_execute.assert_called_once_with('mkswap', '-L', 'swap-vol',
'/my/swap/block/dev', run_as_root=False)
class LastBytesTestCase(test.NoDBTestCase):
"""Test the last_bytes() utility method."""
def setUp(self):
super(LastBytesTestCase, self).setUp()
self.f = six.BytesIO(b'1234567890')
def test_truncated(self):
self.f.seek(0, os.SEEK_SET)
out, remaining = utils.last_bytes(self.f, 5)
self.assertEqual(out, b'67890')
self.assertTrue(remaining > 0)
def test_read_all(self):
self.f.seek(0, os.SEEK_SET)
out, remaining = utils.last_bytes(self.f, 1000)
self.assertEqual(out, b'1234567890')
self.assertFalse(remaining > 0)
def test_seek_too_far_real_file(self):
# StringIO doesn't raise IOError if you see past the start of the file.
with tempfile.TemporaryFile() as flo:
content = b'1234567890'
flo.write(content)
self.assertEqual((content, 0), utils.last_bytes(flo, 1000))
class MetadataToDictTestCase(test.NoDBTestCase):
def test_metadata_to_dict(self):
self.assertEqual(utils.metadata_to_dict(
[{'key': 'foo1', 'value': 'bar'},
{'key': 'foo2', 'value': 'baz'}]),
{'foo1': 'bar', 'foo2': 'baz'})
def test_metadata_to_dict_with_include_deleted(self):
metadata = [{'key': 'foo1', 'value': 'bar', 'deleted': 1442875429,
'other': 'stuff'},
{'key': 'foo2', 'value': 'baz', 'deleted': 0,
'other': 'stuff2'}]
self.assertEqual({'foo1': 'bar', 'foo2': 'baz'},
utils.metadata_to_dict(metadata,
include_deleted=True))
self.assertEqual({'foo2': 'baz'},
utils.metadata_to_dict(metadata,
include_deleted=False))
# verify correct default behavior
self.assertEqual(utils.metadata_to_dict(metadata),
utils.metadata_to_dict(metadata,
include_deleted=False))
def test_metadata_to_dict_empty(self):
self.assertEqual({}, utils.metadata_to_dict([]))
self.assertEqual({}, utils.metadata_to_dict([], include_deleted=True))
self.assertEqual({}, utils.metadata_to_dict([], include_deleted=False))
def test_dict_to_metadata(self):
def sort_key(adict):
return sorted(adict.items())
metadata = utils.dict_to_metadata(dict(foo1='bar1', foo2='bar2'))
expected = [{'key': 'foo1', 'value': 'bar1'},
{'key': 'foo2', 'value': 'bar2'}]
self.assertEqual(sorted(metadata, key=sort_key),
sorted(expected, key=sort_key))
def test_dict_to_metadata_empty(self):
self.assertEqual(utils.dict_to_metadata({}), [])
class ExpectedArgsTestCase(test.NoDBTestCase):
def test_passes(self):
@utils.expects_func_args('foo', 'baz')
def dec(f):
return f
@dec
def func(foo, bar, baz="lol"):
pass
# Call to ensure nothing errors
func(None, None)
def test_raises(self):
@utils.expects_func_args('foo', 'baz')
def dec(f):
return f
def func(bar, baz):
pass
self.assertRaises(TypeError, dec, func)
def test_var_no_of_args(self):
@utils.expects_func_args('foo')
def dec(f):
return f
@dec
def func(bar, *args, **kwargs):
pass
# Call to ensure nothing errors
func(None)
def test_more_layers(self):
@utils.expects_func_args('foo', 'baz')
def dec(f):
return f
def dec_2(f):
def inner_f(*a, **k):
return f()
return inner_f
@dec_2
def func(bar, baz):
pass
self.assertRaises(TypeError, dec, func)
class StringLengthTestCase(test.NoDBTestCase):
def test_check_string_length(self):
self.assertIsNone(utils.check_string_length(
'test', 'name', max_length=255))
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
11, 'name', max_length=255)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'', 'name', min_length=1)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'a' * 256, 'name', max_length=255)
def test_check_string_length_noname(self):
self.assertIsNone(utils.check_string_length(
'test', max_length=255))
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
11, max_length=255)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'', min_length=1)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'a' * 256, max_length=255)
class ValidateIntegerTestCase(test.NoDBTestCase):
def test_valid_inputs(self):
self.assertEqual(
utils.validate_integer(42, "answer"), 42)
self.assertEqual(
utils.validate_integer("42", "answer"), 42)
self.assertEqual(
utils.validate_integer(
"7", "lucky", min_value=7, max_value=8), 7)
self.assertEqual(
utils.validate_integer(
7, "lucky", min_value=6, max_value=7), 7)
self.assertEqual(
utils.validate_integer(
300, "Spartaaa!!!", min_value=300), 300)
self.assertEqual(
utils.validate_integer(
"300", "Spartaaa!!!", max_value=300), 300)
def test_invalid_inputs(self):
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
"im-not-an-int", "not-an-int")
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
3.14, "Pie")
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
"299", "Sparta no-show",
min_value=300, max_value=300)
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
55, "doing 55 in a 54",
max_value=54)
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
six.unichr(129), "UnicodeError",
max_value=1000)
class ValidateNeutronConfiguration(test.NoDBTestCase):
def test_nova_network(self):
self.assertFalse(utils.is_neutron())
def test_neutron(self):
self.flags(use_neutron=True)
self.assertTrue(utils.is_neutron())
class AutoDiskConfigUtilTestCase(test.NoDBTestCase):
def test_is_auto_disk_config_disabled(self):
self.assertTrue(utils.is_auto_disk_config_disabled("Disabled "))
def test_is_auto_disk_config_disabled_none(self):
self.assertFalse(utils.is_auto_disk_config_disabled(None))
def test_is_auto_disk_config_disabled_false(self):
self.assertFalse(utils.is_auto_disk_config_disabled("false"))
class GetSystemMetadataFromImageTestCase(test.NoDBTestCase):
def get_image(self):
image_meta = {
"id": "fake-image",
"name": "fake-name",
"min_ram": 1,
"min_disk": 1,
"disk_format": "raw",
"container_format": "bare",
}
return image_meta
def get_flavor(self):
flavor = {
"id": "fake.flavor",
"root_gb": 10,
}
return flavor
def test_base_image_properties(self):
image = self.get_image()
# Verify that we inherit all the needed keys
sys_meta = utils.get_system_metadata_from_image(image)
for key in utils.SM_INHERITABLE_KEYS:
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertEqual(image[key], sys_meta.get(sys_key))
# Verify that everything else is ignored
self.assertEqual(len(sys_meta), len(utils.SM_INHERITABLE_KEYS))
def test_inherit_image_properties(self):
image = self.get_image()
image["properties"] = {"foo1": "bar", "foo2": "baz"}
sys_meta = utils.get_system_metadata_from_image(image)
# Verify that we inherit all the image properties
for key, expected in six.iteritems(image["properties"]):
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertEqual(sys_meta[sys_key], expected)
def test_skip_image_properties(self):
image = self.get_image()
image["properties"] = {
"foo1": "bar", "foo2": "baz",
"mappings": "wizz", "img_block_device_mapping": "eek",
}
sys_meta = utils.get_system_metadata_from_image(image)
# Verify that we inherit all the image properties
for key, expected in six.iteritems(image["properties"]):
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
if key in utils.SM_SKIP_KEYS:
self.assertNotIn(sys_key, sys_meta)
else:
self.assertEqual(sys_meta[sys_key], expected)
def test_vhd_min_disk_image(self):
image = self.get_image()
flavor = self.get_flavor()
image["disk_format"] = "vhd"
sys_meta = utils.get_system_metadata_from_image(image, flavor)
# Verify that the min_disk property is taken from
# flavor's root_gb when using vhd disk format
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, "min_disk")
self.assertEqual(sys_meta[sys_key], flavor["root_gb"])
def test_dont_inherit_empty_values(self):
image = self.get_image()
for key in utils.SM_INHERITABLE_KEYS:
image[key] = None
sys_meta = utils.get_system_metadata_from_image(image)
# Verify that the empty properties have not been inherited
for key in utils.SM_INHERITABLE_KEYS:
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertNotIn(sys_key, sys_meta)
class GetImageFromSystemMetadataTestCase(test.NoDBTestCase):
def get_system_metadata(self):
sys_meta = {
"image_min_ram": 1,
"image_min_disk": 1,
"image_disk_format": "raw",
"image_container_format": "bare",
}
return sys_meta
def test_image_from_system_metadata(self):
sys_meta = self.get_system_metadata()
sys_meta["%soo1" % utils.SM_IMAGE_PROP_PREFIX] = "bar"
sys_meta["%soo2" % utils.SM_IMAGE_PROP_PREFIX] = "baz"
sys_meta["%simg_block_device_mapping" %
utils.SM_IMAGE_PROP_PREFIX] = "eek"
image = utils.get_image_from_system_metadata(sys_meta)
# Verify that we inherit all the needed keys
for key in utils.SM_INHERITABLE_KEYS:
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertEqual(image[key], sys_meta.get(sys_key))
# Verify that we inherit the rest of metadata as properties
self.assertIn("properties", image)
for key, value in six.iteritems(image["properties"]):
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertEqual(image["properties"][key], sys_meta[sys_key])
self.assertNotIn("img_block_device_mapping", image["properties"])
def test_dont_inherit_empty_values(self):
sys_meta = self.get_system_metadata()
for key in utils.SM_INHERITABLE_KEYS:
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
sys_meta[sys_key] = None
image = utils.get_image_from_system_metadata(sys_meta)
# Verify that the empty properties have not been inherited
for key in utils.SM_INHERITABLE_KEYS:
self.assertNotIn(key, image)
class GetImageMetadataFromVolumeTestCase(test.NoDBTestCase):
def test_inherit_image_properties(self):
properties = {"fake_prop": "fake_value"}
volume = {"volume_image_metadata": properties}
image_meta = utils.get_image_metadata_from_volume(volume)
self.assertEqual(properties, image_meta["properties"])
def test_image_size(self):
volume = {"size": 10}
image_meta = utils.get_image_metadata_from_volume(volume)
self.assertEqual(10 * units.Gi, image_meta["size"])
def test_image_status(self):
volume = {}
image_meta = utils.get_image_metadata_from_volume(volume)
self.assertEqual("active", image_meta["status"])
def test_values_conversion(self):
properties = {"min_ram": "5", "min_disk": "7"}
volume = {"volume_image_metadata": properties}
image_meta = utils.get_image_metadata_from_volume(volume)
self.assertEqual(5, image_meta["min_ram"])
self.assertEqual(7, image_meta["min_disk"])
def test_suppress_not_image_properties(self):
properties = {"min_ram": "256", "min_disk": "128",
"image_id": "fake_id", "image_name": "fake_name",
"container_format": "ami", "disk_format": "ami",
"size": "1234", "checksum": "fake_checksum"}
volume = {"volume_image_metadata": properties}
image_meta = utils.get_image_metadata_from_volume(volume)
self.assertEqual({}, image_meta["properties"])
self.assertEqual(0, image_meta["size"])
# volume's properties should not be touched
self.assertNotEqual({}, properties)
class ResourceFilterTestCase(test.NoDBTestCase):
def _assert_filtering(self, res_list, filts, expected_tags):
actual_tags = utils.filter_and_format_resource_metadata('instance',
res_list, filts, 'metadata')
self.assertJsonEqual(expected_tags, actual_tags)
def test_filter_and_format_resource_metadata(self):
# Create some tags
# One overlapping pair, and one different key value pair
# i1 : foo=bar, bax=wibble
# i2 : foo=bar, baz=quux
# resources
i1 = {
'uuid': '1',
'metadata': {'foo': 'bar', 'bax': 'wibble'},
}
i2 = {
'uuid': '2',
'metadata': {'foo': 'bar', 'baz': 'quux'},
}
# Resources list
rl = [i1, i2]
# tags
i11 = {'instance_id': '1', 'key': 'foo', 'value': 'bar'}
i12 = {'instance_id': '1', 'key': 'bax', 'value': 'wibble'}
i21 = {'instance_id': '2', 'key': 'foo', 'value': 'bar'}
i22 = {'instance_id': '2', 'key': 'baz', 'value': 'quux'}
# No filter
self._assert_filtering(rl, [], [i11, i12, i21, i22])
self._assert_filtering(rl, {}, [i11, i12, i21, i22])
# Key search
# Both should have tags with key 'foo' and value 'bar'
self._assert_filtering(rl, {'key': 'foo', 'value': 'bar'}, [i11, i21])
# Both should have tags with key 'foo'
self._assert_filtering(rl, {'key': 'foo'}, [i11, i21])
# Only i2 should have tags with key 'baz' and value 'quux'
self._assert_filtering(rl, {'key': 'baz', 'value': 'quux'}, [i22])
# Only i2 should have tags with value 'quux'
self._assert_filtering(rl, {'value': 'quux'}, [i22])
# Empty list should be returned when no tags match
self._assert_filtering(rl, {'key': 'split', 'value': 'banana'}, [])
# Multiple values
# Only i2 should have tags with key 'baz' and values in the set
# ['quux', 'wibble']
self._assert_filtering(rl, {'key': 'baz', 'value': ['quux', 'wibble']},
[i22])
# But when specified as two different filters, no tags should be
# returned. This is because, the filter will mean "return tags which
# have (key=baz AND value=quux) AND (key=baz AND value=wibble)
self._assert_filtering(rl, [{'key': 'baz', 'value': 'quux'},
{'key': 'baz', 'value': 'wibble'}], [])
# Test for regex
self._assert_filtering(rl, {'value': '\\Aqu..*\\Z(?s)'}, [i22])
# Make sure bug #1365887 is fixed
i1['metadata']['key3'] = 'a'
self._assert_filtering(rl, {'value': 'banana'}, [])
class SafeTruncateTestCase(test.NoDBTestCase):
def test_exception_to_dict_with_long_message_3_bytes(self):
# Generate Chinese byte string whose length is 300. This Chinese UTF-8
# character occupies 3 bytes. After truncating, the byte string length
# should be 255.
msg = u'\u8d75' * 100
truncated_msg = utils.safe_truncate(msg, 255)
byte_message = encodeutils.safe_encode(truncated_msg)
self.assertEqual(255, len(byte_message))
def test_exception_to_dict_with_long_message_2_bytes(self):
# Generate Russian byte string whose length is 300. This Russian UTF-8
# character occupies 2 bytes. After truncating, the byte string length
# should be 254.
msg = encodeutils.safe_decode('\xd0\x92' * 150)
truncated_msg = utils.safe_truncate(msg, 255)
byte_message = encodeutils.safe_encode(truncated_msg)
self.assertEqual(254, len(byte_message))
class SpawnNTestCase(test.NoDBTestCase):
def setUp(self):
super(SpawnNTestCase, self).setUp()
self.useFixture(context_fixture.ClearRequestContext())
self.spawn_name = 'spawn_n'
def test_spawn_n_no_context(self):
self.assertIsNone(common_context.get_current())
def _fake_spawn(func, *args, **kwargs):
# call the method to ensure no error is raised
func(*args, **kwargs)
self.assertEqual('test', args[0])
def fake(arg):
pass
with mock.patch.object(eventlet, self.spawn_name, _fake_spawn):
getattr(utils, self.spawn_name)(fake, 'test')
self.assertIsNone(common_context.get_current())
def test_spawn_n_context(self):
self.assertIsNone(common_context.get_current())
ctxt = context.RequestContext('user', 'project')
def _fake_spawn(func, *args, **kwargs):
# call the method to ensure no error is raised
func(*args, **kwargs)
self.assertEqual(ctxt, args[0])
self.assertEqual('test', kwargs['kwarg1'])
def fake(context, kwarg1=None):
pass
with mock.patch.object(eventlet, self.spawn_name, _fake_spawn):
getattr(utils, self.spawn_name)(fake, ctxt, kwarg1='test')
self.assertEqual(ctxt, common_context.get_current())
def test_spawn_n_context_different_from_passed(self):
self.assertIsNone(common_context.get_current())
ctxt = context.RequestContext('user', 'project')
ctxt_passed = context.RequestContext('user', 'project',
overwrite=False)
self.assertEqual(ctxt, common_context.get_current())
def _fake_spawn(func, *args, **kwargs):
# call the method to ensure no error is raised
func(*args, **kwargs)
self.assertEqual(ctxt_passed, args[0])
self.assertEqual('test', kwargs['kwarg1'])
def fake(context, kwarg1=None):
pass
with mock.patch.object(eventlet, self.spawn_name, _fake_spawn):
getattr(utils, self.spawn_name)(fake, ctxt_passed, kwarg1='test')
self.assertEqual(ctxt, common_context.get_current())
class SpawnTestCase(SpawnNTestCase):
def setUp(self):
super(SpawnTestCase, self).setUp()
self.spawn_name = 'spawn'
class UT8TestCase(test.NoDBTestCase):
def test_none_value(self):
self.assertIsInstance(utils.utf8(None), type(None))
def test_bytes_value(self):
some_value = b"fake data"
return_value = utils.utf8(some_value)
# check that type of returned value doesn't changed
self.assertIsInstance(return_value, type(some_value))
self.assertEqual(some_value, return_value)
def test_not_text_type(self):
return_value = utils.utf8(1)
self.assertEqual(b"1", return_value)
self.assertIsInstance(return_value, six.binary_type)
def test_text_type_with_encoding(self):
some_value = 'test\u2026config'
self.assertEqual(some_value, utils.utf8(some_value).decode("utf-8"))
|
the-stack_0_1712 | # Script to sort the large NZ Charities files into smaller files by year
# Alasdair Rutherford, Diarmuid McDonnell
# Created: 29 March 2018
# Last edited: Github history - https://github.com/DiarmuidM/mission_accomp/tree/master/syntax/data_collection/nz
# Edited by Tom Wallace
# Because of download limmitations of the API the larger files had to be downloaded in chunks. This file re-combines the chunked files for 'vOfficerOrganisations' and 'GrpOrgAllReturns'.
# Data guide: https://www.charities.govt.nz/charities-in-new-zealand/the-charities-register/open-data/
#######Import packages#######
import csv
import re
import requests
import os
import os.path
import errno
from time import sleep
import sys
sys.path.insert(0, './Functions_scripts') # Allows to set a different path for the scripts being called below (but only if it branches off of the root dir)
from downloaddate_function import downloaddate, longtime
from nz_rowfixer import row_fixer
from loggenerator import gen_log
#######Toggles#######
stata = True
#######Initialization#######
# Run the downloaddate function to get the date
ddate = downloaddate()
log_starttime = longtime() # When the script starts for the logfile
# Path to save the downloaded data
datapath = './data_raw/' # Dropbox folder for project
# Variables to store OData endpoint and database tables #
# Add $returnall=true to every url
baseurl = 'http://www.odata.charities.govt.nz/'
register = 'Organisations' # This is returned as xml due to the number of records - $returnall=true
grpannreturns = 'GrpOrgAllReturns'
#'GrpOrgAllReturns?$returnall=true' # This is returned as xml due to the number of records - $returnall=true
activities = 'Activities'
area = 'AreaOfOperations'
beneficiaries = 'Beneficiaries'
group = 'Groups'
officers = 'Officers'
sectors = 'Sectors'
funds = 'SourceOfFunds'
vorgs = 'vOrganisations'
voff = 'vOfficerOrganisations'
#######Functions#######
# Split the downloaded annual return files into calendar years
processedfiles=[]
def splitfilesbyyear(filename, data, column, length, width, splityear=0, splitmonth=0, splitday=0, splitemp=0, spliteymonth=0):
inputfilepath = datapath + '/' + data + '/' + 'nz_' + data + '_y' + str(splityear) + '_m' + str(spliteymonth) + '_p' + str(splitemp) + '.csv'
processedfiles.append(inputfilepath)
with open(inputfilepath, 'rb') as file:
filedata = file.read()
# Replace the target string
pattern = re.compile(b'[^\x00-\x7F]')
filedata = re.sub(pattern, b'_', filedata) #filedata.replace('[^\x00-\x7F]', '_')
# Write the file out again
with open(datapath + '/' + 'nz_temp.csv', 'wb') as file:
file.write(filedata)
outputfiles = {}
for year in range(2007,2020):
outputfiles[str(year)] = open(filename + str(year) + '.csv', 'a', newline='')
outputfiles[str(year) + 'a'] = csv.writer(outputfiles[str(year)])
outputfiles['error'] = open(filename + 'error' + '.csv', 'a', newline='')
outputfiles['errora'] = csv.writer(outputfiles['error'])
with open(datapath + '/' + 'nz_temp.csv', 'r', newline='') as inCSVfile:
reader = csv.reader(inCSVfile)
print('-')
print(inputfilepath)
startrow = 1
rowcounter=0
while rowcounter<startrow:
next(reader)
rowcounter+=1
for row in reader:
#if len(row)==width: # this was the simple check before the function was written, can swtich back to it by commenting out the 2 lines below if the fixer breaks things
out_row, fixed = row_fixer(row, width)
if fixed==True:
try:
yearend = out_row[column][len(out_row[column])-length:] # Take the year out of the YearEnded column
year = int(yearend)
#yearend = yearend[2 - len(yearend):]
if year>=0 and year <=20:
yearend = '20' + yearend
elif year >20 and year<=99:
yearend = 2000
except:
yearend=0
#print(inputfilepath, rowcounter)
#print(' ', row[column], ' | -', yearend, '-')
else:
yearend=0
# Rceode the missing values for Stata
if stata == True:
out_row = [x if x != 'Null' else '.' for x in out_row]
if int(yearend) in range(2007, 2020): # ['2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017']:
outputfiles[str(yearend) + 'a'].writerow(out_row)
#print('.', end='')
else:
outputfiles['errora'].writerow(out_row)
#print('*', end='')
rowcounter+=1
for year in range(2008,2018):
outputfiles[str(year)].close()
outputfiles['error'].close()
# Creates the header rows for the files by year
def createannreturnfiles(filename, source):
with open(source, 'r', newline='') as inCSVfile:
reader = csv.reader(inCSVfile)
row = reader.__next__()
for year in range(2007,2020):
outputfile = open(filename + str(year) + '.csv', 'w', newline='')
outputfilew= csv.writer(outputfile)
outputfilew.writerow(row)
outputfile.close()
outputfile = open(filename + 'error' + '.csv', 'w', newline='')
outputfilew= csv.writer(outputfile)
outputfilew.writerow(row)
outputfile.close()
return len(row)
#######Main program#######
search = []
search_big = [voff, grpannreturns] # []
writtenfiles=[]
for data in search_big:
filename = datapath + '/' + data +'/' + data + '_yr'
# nz_vOfficerOrganisations_y2017_m0_p0_20180330.csv
for year in range(2007,2020): # This loop creates the output names manually for the log file so it will need updated when 2020 is added - it was taking too long to make automatic but it could be done
writtenfiles.append(filename+str(year)+'.csv')
writtenfiles.append(filename+'error'+'.csv')
filewidth = createannreturnfiles(filename, datapath + '/' + data + '/' + 'nz_' + data + '_y2017' + '_m0' + '_p0' + '.csv')
print('Organise', data, 'by year')
for year in [2008]:
if data == grpannreturns:
print('')
print('grpannreturns', year)
for month in range(1,13,1):
splitfilesbyyear(filename, data, 103, 4, filewidth, splityear=year, spliteymonth=month, splitemp=1) # Using column 103 (index from 0) 'CZ' to regroup the files 'YearEnded'
splitfilesbyyear(filename, data, 103, 4, filewidth, splityear=year, spliteymonth=month, splitemp=2)
elif data == voff:
print('')
print('voff')
for month in range(1,13,1):
splitfilesbyyear(filename, data, 14, 2, filewidth, splityear=year, spliteymonth=month) # Using column 14 (index from 0) 'O' to regroup the files 'PositionAppointmentDate'
#logcsv.writerow([datetime.today().strftime('%Y%m%d %H:%M'), filename, searchurl, success, fails]) # record in logfile
for year in [2007, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017]:
if data == grpannreturns:
print('')
print('grpannreturns', year)
splitfilesbyyear(filename, data, 103, 4, filewidth, splityear=year)
elif data == voff:
print('')
print('voff', year)
splitfilesbyyear(filename, data, 14, 2, filewidth, splityear=year) # Get csv
print('')
print('Done sorting ' + data)
print('------------------------------------------------------------------------------')
os.remove(datapath + '/' + 'nz_temp.csv')
print('Removed temp file:',datapath + '/' + 'nz_temp.csv')
#Log generator
finishtime = longtime() # Get ending time
scriptname = os.path.basename(__file__) # Get the current scriptname as a variable
scriptpath = (os.path.dirname(os.path.realpath(__file__))) # Get the absolute dir the script is in
scriptdesc = 'Because of download limmitations of the API the larger files had to be downloaded in chunks. This file re-combines the chunked files for "vOfficerOrganisations" and "GrpOrgAllReturns".'
processedfiles = processedfiles # Get the input file details
writtenfiles = writtenfiles # Get list of created files WARNING: this list has been created manually and will not update in future years
settings_toggles = {'stata': stata}
gen_log(log_starttime, finishtime, scriptname, scriptpath, scriptdesc, processedfiles, writtenfiles, str(settings_toggles)) # Pass info to log file generator
print('\nAll done!') |
the-stack_0_1713 | import unittest
import os
from conans.paths import EXPORT_SOURCES_DIR_OLD
from conans.util.files import tar_extract
from conans.test.utils.tools import TestServer, TestClient
from conans.model.ref import ConanFileReference
from conans.test.utils.test_files import temp_folder
class DoNotKeepOldExportSourcesLayoutTest(unittest.TestCase):
def test_basic(self):
""" check that we do not generate anymore tgz with .c_src.
also, they are not present any more in the cache layout, even if they come from a .c_src
tgz server file
"""
test_server = TestServer()
servers = {"default": test_server}
client = TestClient(servers=servers, users={"default": [("lasote", "mypass")]})
client.save({"conanfile.py": """from conans import ConanFile
class MyPkg(ConanFile):
name= "Pkg"
version = "0.1"
exports_sources = "*.txt"
""", "myfile.txt": "Hello world"})
client.run("export . lasote/testing")
client.run("upload Pkg/0.1@lasote/testing")
client.run("remove * -f")
client.run("search")
self.assertIn("There are no packages", client.user_io.out)
conan_reference = ConanFileReference.loads("Pkg/0.1@lasote/testing")
path = test_server.paths.export(conan_reference)
sources_tgz = os.path.join(path, "conan_sources.tgz")
self.assertTrue(os.path.exists(sources_tgz))
folder = temp_folder()
with open(sources_tgz, 'rb') as file_handler:
tar_extract(file_handler, folder)
self.assertEqual(os.listdir(folder), ["myfile.txt"])
# Now install again
client.run("install Pkg/0.1@lasote/testing --build=missing")
export = client.client_cache.export(conan_reference)
self.assertNotIn(EXPORT_SOURCES_DIR_OLD, os.listdir(export))
export_sources = client.client_cache.export_sources(conan_reference)
self.assertEqual(os.listdir(export_sources), ["myfile.txt"])
|
the-stack_0_1714 | from copy import copy, deepcopy
from typing import (
Any,
Collection,
Dict,
List,
NamedTuple,
Optional,
Set,
Union,
cast,
)
from ..error import GraphQLError
from ..language import ast
from ..pyutils import inspect, is_collection, is_description, FrozenList
from .definition import (
GraphQLAbstractType,
GraphQLInterfaceType,
GraphQLInputObjectType,
GraphQLNamedType,
GraphQLObjectType,
GraphQLUnionType,
GraphQLType,
GraphQLWrappingType,
get_named_type,
is_input_object_type,
is_interface_type,
is_object_type,
is_union_type,
is_wrapping_type,
)
from .directives import GraphQLDirective, specified_directives, is_directive
from .introspection import introspection_types
__all__ = ["GraphQLSchema", "is_schema", "assert_schema"]
TypeMap = Dict[str, GraphQLNamedType]
class InterfaceImplementations(NamedTuple):
objects: List[GraphQLObjectType]
interfaces: List[GraphQLInterfaceType]
class GraphQLSchema:
"""Schema Definition
A Schema is created by supplying the root types of each type of operation, query
and mutation (optional). A schema definition is then supplied to the validator
and executor.
Example::
MyAppSchema = GraphQLSchema(
query=MyAppQueryRootType,
mutation=MyAppMutationRootType)
Note: When the schema is constructed, by default only the types that are
reachable by traversing the root types are included, other types must be
explicitly referenced.
Example::
character_interface = GraphQLInterfaceType('Character', ...)
human_type = GraphQLObjectType(
'Human', interfaces=[character_interface], ...)
droid_type = GraphQLObjectType(
'Droid', interfaces: [character_interface], ...)
schema = GraphQLSchema(
query=GraphQLObjectType('Query',
fields={'hero': GraphQLField(character_interface, ....)}),
...
# Since this schema references only the `Character` interface it's
# necessary to explicitly list the types that implement it if
# you want them to be included in the final schema.
types=[human_type, droid_type])
Note: If a list of ``directives`` is provided to GraphQLSchema, that will be the
exact list of directives represented and allowed. If ``directives`` is not provided,
then a default set of the specified directives (e.g. @include and @skip) will be
used. If you wish to provide *additional* directives to these specified directives,
you must explicitly declare them. Example::
MyAppSchema = GraphQLSchema(
...
directives=specified_directives + [my_custom_directive])
"""
query_type: Optional[GraphQLObjectType]
mutation_type: Optional[GraphQLObjectType]
subscription_type: Optional[GraphQLObjectType]
type_map: TypeMap
directives: FrozenList[GraphQLDirective]
description: Optional[str]
extensions: Optional[Dict[str, Any]]
ast_node: Optional[ast.SchemaDefinitionNode]
extension_ast_nodes: Optional[FrozenList[ast.SchemaExtensionNode]]
_implementations_map: Dict[str, InterfaceImplementations]
_sub_type_map: Dict[str, Set[str]]
_validation_errors: Optional[List[GraphQLError]]
def __init__(
self,
query: Optional[GraphQLObjectType] = None,
mutation: Optional[GraphQLObjectType] = None,
subscription: Optional[GraphQLObjectType] = None,
types: Optional[Collection[GraphQLNamedType]] = None,
directives: Optional[Collection[GraphQLDirective]] = None,
description: Optional[str] = None,
extensions: Optional[Dict[str, Any]] = None,
ast_node: Optional[ast.SchemaDefinitionNode] = None,
extension_ast_nodes: Optional[Collection[ast.SchemaExtensionNode]] = None,
assume_valid: bool = False,
) -> None:
"""Initialize GraphQL schema.
If this schema was built from a source known to be valid, then it may be marked
with ``assume_valid`` to avoid an additional type system validation.
"""
self._validation_errors = [] if assume_valid else None
# Check for common mistakes during construction to produce clear and early
# error messages, but we leave the specific tests for the validation.
if query and not isinstance(query, GraphQLType):
raise TypeError("Expected query to be a GraphQL type.")
if mutation and not isinstance(mutation, GraphQLType):
raise TypeError("Expected mutation to be a GraphQL type.")
if subscription and not isinstance(subscription, GraphQLType):
raise TypeError("Expected subscription to be a GraphQL type.")
if types is None:
types = []
else:
if not is_collection(types) or not all(
isinstance(type_, GraphQLType) for type_ in types
):
raise TypeError(
"Schema types must be specified as a collection of GraphQL types."
)
if directives is not None:
# noinspection PyUnresolvedReferences
if not is_collection(directives):
raise TypeError("Schema directives must be a collection.")
if not isinstance(directives, FrozenList):
directives = FrozenList(directives)
if description is not None and not is_description(description):
raise TypeError("Schema description must be a string.")
if extensions is not None and (
not isinstance(extensions, dict)
or not all(isinstance(key, str) for key in extensions)
):
raise TypeError("Schema extensions must be a dictionary with string keys.")
if ast_node and not isinstance(ast_node, ast.SchemaDefinitionNode):
raise TypeError("Schema AST node must be a SchemaDefinitionNode.")
if extension_ast_nodes:
if not is_collection(extension_ast_nodes) or not all(
isinstance(node, ast.SchemaExtensionNode)
for node in extension_ast_nodes
):
raise TypeError(
"Schema extension AST nodes must be specified"
" as a collection of SchemaExtensionNode instances."
)
if not isinstance(extension_ast_nodes, FrozenList):
extension_ast_nodes = FrozenList(extension_ast_nodes)
self.description = description
self.extensions = extensions
self.ast_node = ast_node
self.extension_ast_nodes = (
cast(FrozenList[ast.SchemaExtensionNode], extension_ast_nodes)
if extension_ast_nodes
else None
)
self.query_type = query
self.mutation_type = mutation
self.subscription_type = subscription
# Provide specified directives (e.g. @include and @skip) by default
self.directives = (
specified_directives
if directives is None
else cast(FrozenList[GraphQLDirective], directives)
)
# To preserve order of user-provided types, we add first to add them to
# the set of "collected" types, so `collect_referenced_types` ignore them.
if types:
all_referenced_types = TypeSet.with_initial_types(types)
collect_referenced_types = all_referenced_types.collect_referenced_types
for type_ in types:
# When we are ready to process this type, we remove it from "collected"
# types and then add it together with all dependent types in the correct
# position.
del all_referenced_types[type_]
collect_referenced_types(type_)
else:
all_referenced_types = TypeSet()
collect_referenced_types = all_referenced_types.collect_referenced_types
if query:
collect_referenced_types(query)
if mutation:
collect_referenced_types(mutation)
if subscription:
collect_referenced_types(subscription)
for directive in self.directives:
# Directives are not validated until validate_schema() is called.
if is_directive(directive):
for arg in directive.args.values():
collect_referenced_types(arg.type)
collect_referenced_types(introspection_types["__Schema"])
# Storing the resulting map for reference by the schema.
type_map: TypeMap = {}
self.type_map = type_map
self._sub_type_map = {}
# Keep track of all implementations by interface name.
implementations_map: Dict[str, InterfaceImplementations] = {}
self._implementations_map = implementations_map
for named_type in all_referenced_types:
if not named_type:
continue
type_name = getattr(named_type, "name", None)
if not type_name:
raise TypeError(
"One of the provided types for building the Schema"
" is missing a name.",
)
if type_name in type_map:
raise TypeError(
"Schema must contain uniquely named types"
f" but contains multiple types named '{type_name}'."
)
type_map[type_name] = named_type
if is_interface_type(named_type):
named_type = cast(GraphQLInterfaceType, named_type)
# Store implementations by interface.
for iface in named_type.interfaces:
if is_interface_type(iface):
iface = cast(GraphQLInterfaceType, iface)
if iface.name in implementations_map:
implementations = implementations_map[iface.name]
else:
implementations = implementations_map[
iface.name
] = InterfaceImplementations(objects=[], interfaces=[])
implementations.interfaces.append(named_type)
elif is_object_type(named_type):
named_type = cast(GraphQLObjectType, named_type)
# Store implementations by objects.
for iface in named_type.interfaces:
if is_interface_type(iface):
iface = cast(GraphQLInterfaceType, iface)
if iface.name in implementations_map:
implementations = implementations_map[iface.name]
else:
implementations = implementations_map[
iface.name
] = InterfaceImplementations(objects=[], interfaces=[])
implementations.objects.append(named_type)
def to_kwargs(self) -> Dict[str, Any]:
return dict(
query=self.query_type,
mutation=self.mutation_type,
subscription=self.subscription_type,
types=FrozenList(self.type_map.values()) or None,
directives=self.directives[:],
description=self.description,
extensions=self.extensions,
ast_node=self.ast_node,
extension_ast_nodes=self.extension_ast_nodes or FrozenList(),
assume_valid=self._validation_errors is not None,
)
def __copy__(self) -> "GraphQLSchema": # pragma: no cover
return self.__class__(**self.to_kwargs())
def __deepcopy__(self, memo_: Dict) -> "GraphQLSchema":
from ..type import (
is_introspection_type,
is_specified_scalar_type,
is_specified_directive,
)
type_map: TypeMap = {
name: copy(type_)
for name, type_ in self.type_map.items()
if not is_introspection_type(type_) and not is_specified_scalar_type(type_)
}
types = type_map.values()
for type_ in types:
remap_named_type(type_, type_map)
directives = [
directive if is_specified_directive(directive) else copy(directive)
for directive in self.directives
]
return self.__class__(
self.query_type and cast(GraphQLObjectType, type_map[self.query_type.name]),
self.mutation_type
and cast(GraphQLObjectType, type_map[self.mutation_type.name]),
self.subscription_type
and cast(GraphQLObjectType, type_map[self.subscription_type.name]),
types,
directives,
self.description,
extensions=deepcopy(self.extensions),
ast_node=deepcopy(self.ast_node),
extension_ast_nodes=deepcopy(self.extension_ast_nodes),
assume_valid=True,
)
def get_type(self, name: str) -> Optional[GraphQLNamedType]:
return self.type_map.get(name)
def get_possible_types(
self, abstract_type: GraphQLAbstractType
) -> List[GraphQLObjectType]:
"""Get list of all possible concrete types for given abstract type."""
return (
cast(GraphQLUnionType, abstract_type).types
if is_union_type(abstract_type)
else self.get_implementations(
cast(GraphQLInterfaceType, abstract_type)
).objects
)
def get_implementations(
self, interface_type: GraphQLInterfaceType
) -> InterfaceImplementations:
return self._implementations_map.get(
interface_type.name, InterfaceImplementations(objects=[], interfaces=[])
)
def is_possible_type(
self, abstract_type: GraphQLAbstractType, possible_type: GraphQLObjectType
) -> bool:
"""Check whether a concrete type is possible for an abstract type.
Deprecated: Use is_sub_type() instead.
"""
return self.is_sub_type(abstract_type, possible_type)
def is_sub_type(
self,
abstract_type: GraphQLAbstractType,
maybe_sub_type: GraphQLNamedType,
) -> bool:
"""Check whether a type is a subtype of a given abstract type."""
types = self._sub_type_map.get(abstract_type.name)
if types is None:
types = set()
add = types.add
if is_union_type(abstract_type):
for type_ in cast(GraphQLUnionType, abstract_type).types:
add(type_.name)
else:
implementations = self.get_implementations(
cast(GraphQLInterfaceType, abstract_type)
)
for type_ in implementations.objects:
add(type_.name)
for type_ in implementations.interfaces:
add(type_.name)
self._sub_type_map[abstract_type.name] = types
return maybe_sub_type.name in types
def get_directive(self, name: str) -> Optional[GraphQLDirective]:
for directive in self.directives:
if directive.name == name:
return directive
return None
@property
def validation_errors(self) -> Optional[List[GraphQLError]]:
return self._validation_errors
class TypeSet(Dict[GraphQLNamedType, None]):
"""An ordered set of types that can be collected starting from initial types."""
@classmethod
def with_initial_types(cls, types: Collection[GraphQLType]) -> "TypeSet":
return cast(TypeSet, super().fromkeys(types))
def collect_referenced_types(self, type_: GraphQLType) -> None:
"""Recursive function supplementing the type starting from an initial type."""
named_type = get_named_type(type_)
if named_type in self:
return
self[named_type] = None
collect_referenced_types = self.collect_referenced_types
if is_union_type(named_type):
named_type = cast(GraphQLUnionType, named_type)
for member_type in named_type.types:
collect_referenced_types(member_type)
elif is_object_type(named_type) or is_interface_type(named_type):
named_type = cast(
Union[GraphQLObjectType, GraphQLInterfaceType], named_type
)
for interface_type in named_type.interfaces:
collect_referenced_types(interface_type)
for field in named_type.fields.values():
collect_referenced_types(field.type)
for arg in field.args.values():
collect_referenced_types(arg.type)
elif is_input_object_type(named_type):
named_type = cast(GraphQLInputObjectType, named_type)
for field in named_type.fields.values():
collect_referenced_types(field.type)
def is_schema(schema: Any) -> bool:
"""Test if the given value is a GraphQL schema."""
return isinstance(schema, GraphQLSchema)
def assert_schema(schema: Any) -> GraphQLSchema:
if not is_schema(schema):
raise TypeError(f"Expected {inspect(schema)} to be a GraphQL schema.")
return cast(GraphQLSchema, schema)
def remapped_type(type_: GraphQLType, type_map: TypeMap) -> GraphQLType:
"""Get a copy of the given type that uses this type map."""
if is_wrapping_type(type_):
type_ = cast(GraphQLWrappingType, type_)
return type_.__class__(remapped_type(type_.of_type, type_map))
type_ = cast(GraphQLNamedType, type_)
return type_map.get(type_.name, type_)
def remap_named_type(type_: GraphQLNamedType, type_map: TypeMap) -> None:
"""Change all references in the given named type to use this type map."""
if is_union_type(type_):
type_ = cast(GraphQLUnionType, type_)
type_.types = [
type_map.get(member_type.name, member_type) for member_type in type_.types
]
elif is_object_type(type_) or is_interface_type(type_):
type_ = cast(Union[GraphQLObjectType, GraphQLInterfaceType], type_)
type_.interfaces = [
type_map.get(interface_type.name, interface_type)
for interface_type in type_.interfaces
]
fields = type_.fields
for field_name, field in fields.items():
field = copy(field)
field.type = remapped_type(field.type, type_map)
args = field.args
for arg_name, arg in args.items():
arg = copy(arg)
arg.type = remapped_type(arg.type, type_map)
args[arg_name] = arg
fields[field_name] = field
elif is_input_object_type(type_):
type_ = cast(GraphQLInputObjectType, type_)
fields = type_.fields
for field_name, field in fields.items():
field = copy(field)
field.type = remapped_type(field.type, type_map)
fields[field_name] = field
|
the-stack_0_1717 | from django.db import migrations, models
def forwards_func(apps, schema_editor):
Beer = apps.get_model("beerfest", "Beer")
for beer in Beer.objects.all():
old_abv = beer.abv
if old_abv is None:
continue # skip null values
new_abv = old_abv / 10
if new_abv >= 100:
raise ValueError(
f"ABV {new_abv:.1f}% greater than or equal to 100%. "
"Aborting migration"
)
beer.abv = new_abv
beer.save()
def reverse_func(apps, schema_editor):
Beer = apps.get_model("beerfest", "Beer")
for beer in Beer.objects.all():
old_abv = beer.abv
if old_abv is None:
continue # skip null values
beer.abv = int(old_abv * 10)
beer.save()
class Migration(migrations.Migration):
dependencies = [
('beerfest', '0004_auto_20181026_0212'),
]
operations = [
migrations.AlterField(
model_name='beer',
name='abv',
field=models.DecimalField(
blank=True, decimal_places=1, max_digits=5, null=True
),
),
migrations.RunPython(forwards_func, reverse_func),
migrations.AlterField(
model_name='beer',
name='abv',
field=models.DecimalField(
blank=True, decimal_places=1, max_digits=3, null=True),
),
]
|
the-stack_0_1719 | import collections
import sys
from .compat import recursive_repr, abc
from _pmem import ffi # XXX refactor to make this import unneeded?
# XXX: refactor to allocate this instead of hardcoding it.
LIST_POBJPTR_ARRAY_TYPE_NUM = 30
class PersistentList(abc.MutableSequence):
"""Persistent version of the 'list' type."""
# XXX locking!
# XXX All bookkeeping attrs should be _v_xxxx so that all other attrs
# (other than _p_mm) can be made persistent.
def __init__(self, *args, **kw):
if not args:
return
if len(args) != 1:
raise TypeError("PersistentList takes at most 1"
" argument, {} given".format(len(args)))
self.extend(args[0])
def _p_new(self, manager):
mm = self._p_mm = manager
with mm.transaction():
# XXX Will want to implement a freelist here, like CPython
self._p_oid = mm.zalloc(ffi.sizeof('PListObject'))
ob = ffi.cast('PObject *', mm.direct(self._p_oid))
ob.ob_type = mm._get_type_code(PersistentList)
self._body = ffi.cast('PListObject *', mm.direct(self._p_oid))
def _p_resurrect(self, manager, oid):
mm = self._p_mm = manager
self._p_oid = oid
self._body = ffi.cast('PListObject *', mm.direct(oid))
# Methods and properties needed to implement the ABC required methods.
@property
def _size(self):
return ffi.cast('PVarObject *', self._body).ob_size
@property
def _allocated(self):
return self._body.allocated
@property
def _items(self):
mm = self._p_mm
ob_items = mm.otuple(self._body.ob_items)
if ob_items == mm.OID_NULL:
return None
return ffi.cast('PObjPtr *', mm.direct(ob_items))
def _resize(self, newsize):
# Note that resize does *not* set self._size. That needs to be done by
# the caller such that that the we never expose invalid item cells.
# The size field is covered by a snapshot done here, though.
mm = self._p_mm
allocated = self._allocated
# Only realloc if we don't have enough space already.
if (allocated >= newsize and newsize >= allocated >> 1):
assert self._items != None or newsize == 0
with mm.transaction():
ob = ffi.cast('PVarObject *', self._body)
mm.snapshot_range(ffi.addressof(ob, 'ob_size'),
ffi.sizeof('size_t'))
ob.ob_size = newsize
return
# We use CPython's overallocation algorithm.
new_allocated = (newsize >> 3) + (3 if newsize < 9 else 6) + newsize
if newsize == 0:
new_allocated = 0
items = self._items
with mm.transaction():
if items is None:
items = mm.zalloc(new_allocated * ffi.sizeof('PObjPtr'),
type_num=LIST_POBJPTR_ARRAY_TYPE_NUM)
else:
items = mm.zrealloc(self._body.ob_items,
new_allocated * ffi.sizeof('PObjPtr'),
LIST_POBJPTR_ARRAY_TYPE_NUM)
mm.snapshot_range(self._body, ffi.sizeof('PListObject'))
self._body.ob_items = items
self._body.allocated = new_allocated
def insert(self, index, value):
mm = self._p_mm
size = self._size
newsize = size + 1
with mm.transaction():
self._resize(newsize)
if index < 0:
index += size
if index < 0:
index = 0
if index > size:
index = size
items = self._items
mm.snapshot_range(items + index,
ffi.offsetof('PObjPtr *', newsize))
for i in range(size, index, -1):
items[i] = items[i-1]
v_oid = mm.persist(value)
mm.incref(v_oid)
items[index] = v_oid
ffi.cast('PVarObject *', self._body).ob_size = newsize
def _normalize_index(self, index):
try:
index = int(index)
except TypeError:
# Assume it is a slice
# XXX fixme
raise NotImplementedError("Slicing not yet implemented")
if index < 0:
index += self._size
if index < 0 or index >= self._size:
raise IndexError(index)
return index
def __setitem__(self, index, value):
mm = self._p_mm
index = self._normalize_index(index)
items = self._items
with mm.transaction():
v_oid = mm.persist(value)
mm.snapshot_range(ffi.addressof(items, index),
ffi.sizeof('PObjPtr *'))
mm.xdecref(items[index])
items[index] = v_oid
mm.incref(v_oid)
def __delitem__(self, index):
mm = self._p_mm
index = self._normalize_index(index)
size = self._size
newsize = size - 1
items = self._items
with mm.transaction():
ffi.cast('PVarObject *', self._body).ob_size = newsize
# We can't completely hide the process of transformation...this
# really needs a lock (or translation to GIL-locked C).
mm.snapshot_range(ffi.addressof(items, index),
ffi.offsetof('PObjPtr *', size))
oid = mm.otuple(items[index])
for i in range(index, newsize):
items[i] = items[i+1]
mm.decref(oid)
self._resize(newsize)
def __getitem__(self, index):
index = self._normalize_index(index)
items = self._items
return self._p_mm.resurrect(items[index])
def __len__(self):
return self._size
# Additional list methods not provided by the ABC.
@recursive_repr()
def __repr__(self):
return "{}([{}])".format(self.__class__.__name__,
', '.join("{!r}".format(x) for x in self))
def __eq__(self, other):
if not (isinstance(other, PersistentList) or
isinstance(other, list)):
return NotImplemented
if len(self) != len(other):
return False
for i in range(len(self)):
if self[i] != other[i]:
return False
return True
if sys.version_info[0] < 3:
def __ne__(self, other):
return not self == other
def clear(self):
mm = self._p_mm
if self._size == 0:
return
items = self._items
with mm.transaction():
size = self._size
# Set size to zero now so we never have an invalid state.
ffi.cast('PVarObject *', self._body).ob_size = 0
for i in range(size):
# Grab oid in tuple form so the assignment can't change it
oid = mm.otuple(items[i])
items[i] = mm.OID_NULL
mm.decref(oid)
self._resize(0)
# Additional methods required by the pmemobj API.
def _p_traverse(self):
items = self._items
for i in range(len(self)):
yield items[i]
def _p_substructures(self):
return ((self._body.ob_items, LIST_POBJPTR_ARRAY_TYPE_NUM),)
def _p_deallocate(self):
self.clear()
|
the-stack_0_1720 | # Copyright 2022 Kaiyu Zheng
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sciex import Experiment, Trial, Event, Result
from moos3d.tests.experiments.runner import *
from moos3d.tests.experiments.experiment import make_domain, make_trial
from moos3d import *
import matplotlib.pyplot as plt
import os
import random
ABS_PATH = os.path.dirname(os.path.abspath(__file__))
output_dir = os.path.join(ABS_PATH, "results")
prior_type = "uniform"
discount_factor = 0.99
detect_after_look = True
"""
This experiment investigates sensor quality. Fix alpha, alter beta.
Or fix beta, alter alpha.
The alpha will be fixed at 1e6. The beta will be fixed at 0. To
separate the possible interaction between the two numbers.
We run the experiments on 16x16x16 worlds with 4 objects.
"""
def main():
# Check experiment_scalability.py for comments on `num_trials`
num_trials = 14 # running on 3 computers. so 14*3 = 42 > 40.
domain = (16, 2, 10, 10, 3.0, 500, 360)
n, k, d, max_depth, planning_time, max_steps, max_time = domain
if n == 16:
setting_hier = [(1,1,max_depth), (2,2,max_depth), (4,4,max_depth)]
setting_op = [(1,1,max_depth), (1,2,max_depth), (1,4,max_depth)]
## parameters
big = 1000
small = 1
exploration_const = 1000
params = {"prior_type": prior_type,
"discount_factor": discount_factor,
"max_depth": max_depth,
"planning_time": planning_time,
"max_steps": max_steps,
"max_time": max_time,
"detect_after_look": detect_after_look,
"big": big,
"small": small,
"exploration_const": exploration_const}
alpha_fixed = 1e5
beta_fixed = 0
# SIMPLER to understand!
scenarios = [(1e1, 0.3), # severe noise
(1e1, 0.8),
(1e2, 0.3),
(1e2, 0.8),
(5e2, 0.3),
(5e2, 0.8),
(1e3, 0.3),
(1e3, 0.8),
(1e4, 0.3),
(1e4, 0.8),
(1e5, 0.3),
(1e5, 0.8)] # no noise
all_trials = []
# Generate a world. For the same world, run different sensors & baselines.
# Do this for #num_trials number of worlds
for t in range(num_trials):
seed = random.randint(1, 1000000)
# build world
worldstr = make_domain(n, k, d)
# Run different sensors and baselines
for i in range(len(scenarios)):
alpha, beta = scenarios[i]
params['alpha'] = alpha
params['beta'] = beta
trial_name = "quality%s_%s" % (str(scenarios[i]).replace(", ", "-"), str(seed))
pouct_trial = make_trial(trial_name, worldstr,
"pouct", "octree", **params)
multires_trial = make_trial(trial_name, worldstr,
"hierarchical", "octree",
setting=setting_hier, **params)
options_trial = make_trial(trial_name, worldstr,
"options", "octree",
setting=setting_op, **params)
pomcp_trial = make_trial(trial_name, worldstr,
"pomcp", "particles",
num_particles=1000, **params)
random_trial = make_trial(trial_name, worldstr,
"purelyrandom", "octree", **params)
porollout_trial = make_trial(trial_name, worldstr,
"porollout", "octree",
porollout_policy=PolicyModel(detect_after_look=detect_after_look),
**params)
all_trials.extend([pouct_trial,
multires_trial,
options_trial,
pomcp_trial,
porollout_trial,
random_trial])
# Generate scripts to run experiments and gather results
exp = Experiment("QualitySensorCC", all_trials, output_dir, verbose=True)
exp.generate_trial_scripts(split=5)
print("Find multiple computers to run these experiments.")
if __name__ == "__main__":
main()
|
the-stack_0_1722 | # -*- coding: utf-8 -*
PROVINCE_CHOICES = (
('AG', 'Agrigento'),
('AL', 'Alessandria'),
('AN', 'Ancona'),
('AO', 'Aosta'),
('AR', 'Arezzo'),
('AP', 'Ascoli Piceno'),
('AT', 'Asti'),
('AV', 'Avellino'),
('BA', 'Bari'),
('BT', 'Barletta-Andria-Trani'), # active starting from 2009
('BL', 'Belluno'),
('BN', 'Benevento'),
('BG', 'Bergamo'),
('BI', 'Biella'),
('BO', 'Bologna'),
('BZ', 'Bolzano/Bozen'),
('BS', 'Brescia'),
('BR', 'Brindisi'),
('CA', 'Cagliari'),
('CL', 'Caltanissetta'),
('CB', 'Campobasso'),
('CI', 'Carbonia-Iglesias'),
('CE', 'Caserta'),
('CT', 'Catania'),
('CZ', 'Catanzaro'),
('CH', 'Chieti'),
('CO', 'Como'),
('CS', 'Cosenza'),
('CR', 'Cremona'),
('KR', 'Crotone'),
('CN', 'Cuneo'),
('EN', 'Enna'),
('FM', 'Fermo'), # active starting from 2009
('FE', 'Ferrara'),
('FI', 'Firenze'),
('FG', 'Foggia'),
('FC', 'Forlì-Cesena'),
('FR', 'Frosinone'),
('GE', 'Genova'),
('GO', 'Gorizia'),
('GR', 'Grosseto'),
('IM', 'Imperia'),
('IS', 'Isernia'),
('SP', 'La Spezia'),
('AQ', u'L’Aquila'),
('LT', 'Latina'),
('LE', 'Lecce'),
('LC', 'Lecco'),
('LI', 'Livorno'),
('LO', 'Lodi'),
('LU', 'Lucca'),
('MC', 'Macerata'),
('MN', 'Mantova'),
('MS', 'Massa-Carrara'),
('MT', 'Matera'),
('VS', 'Medio Campidano'),
('ME', 'Messina'),
('MI', 'Milano'),
('MO', 'Modena'),
('MB', 'Monza e Brianza'), # active starting from 2009
('NA', 'Napoli'),
('NO', 'Novara'),
('NU', 'Nuoro'),
('OG', 'Ogliastra'),
('OT', 'Olbia-Tempio'),
('OR', 'Oristano'),
('PD', 'Padova'),
('PA', 'Palermo'),
('PR', 'Parma'),
('PV', 'Pavia'),
('PG', 'Perugia'),
('PU', 'Pesaro e Urbino'),
('PE', 'Pescara'),
('PC', 'Piacenza'),
('PI', 'Pisa'),
('PT', 'Pistoia'),
('PN', 'Pordenone'),
('PZ', 'Potenza'),
('PO', 'Prato'),
('RG', 'Ragusa'),
('RA', 'Ravenna'),
('RC', 'Reggio Calabria'),
('RE', 'Reggio Emilia'),
('RI', 'Rieti'),
('RN', 'Rimini'),
('RM', 'Roma'),
('RO', 'Rovigo'),
('SA', 'Salerno'),
('SS', 'Sassari'),
('SV', 'Savona'),
('SI', 'Siena'),
('SR', 'Siracusa'),
('SO', 'Sondrio'),
('TA', 'Taranto'),
('TE', 'Teramo'),
('TR', 'Terni'),
('TO', 'Torino'),
('TP', 'Trapani'),
('TN', 'Trento'),
('TV', 'Treviso'),
('TS', 'Trieste'),
('UD', 'Udine'),
('VA', 'Varese'),
('VE', 'Venezia'),
('VB', 'Verbano Cusio Ossola'),
('VC', 'Vercelli'),
('VR', 'Verona'),
('VV', 'Vibo Valentia'),
('VI', 'Vicenza'),
('VT', 'Viterbo'),
)
|
the-stack_0_1723 | """ Integral Transforms """
from __future__ import print_function, division
from sympy.core import S
from sympy.core.compatibility import reduce
from sympy.core.function import Function
from sympy.core.numbers import oo
from sympy.core.symbol import Dummy
from sympy.integrals import integrate, Integral
from sympy.integrals.meijerint import _dummy
from sympy.logic.boolalg import to_cnf, conjuncts, disjuncts, Or, And
from sympy.simplify import simplify
from sympy.utilities import default_sort_key
##########################################################################
# Helpers / Utilities
##########################################################################
class IntegralTransformError(NotImplementedError):
"""
Exception raised in relation to problems computing transforms.
This class is mostly used internally; if integrals cannot be computed
objects representing unevaluated transforms are usually returned.
The hint ``needeval=True`` can be used to disable returning transform
objects, and instead raise this exception if an integral cannot be
computed.
"""
def __init__(self, transform, function, msg):
super(IntegralTransformError, self).__init__(
"%s Transform could not be computed: %s." % (transform, msg))
self.function = function
class IntegralTransform(Function):
"""
Base class for integral transforms.
This class represents unevaluated transforms.
To implement a concrete transform, derive from this class and implement
the _compute_transform(f, x, s, **hints) and _as_integral(f, x, s)
functions. If the transform cannot be computed, raise IntegralTransformError.
Also set cls._name.
Implement self._collapse_extra if your function returns more than just a
number and possibly a convergence condition.
"""
nargs = 3
@property
def function(self):
""" The function to be transformed. """
return self.args[0]
@property
def function_variable(self):
""" The dependent variable of the function to be transformed. """
return self.args[1]
@property
def transform_variable(self):
""" The independent transform variable. """
return self.args[2]
@property
def free_symbols(self):
"""
This method returns the symbols that will exist when the transform
is evaluated.
"""
return self.function.free_symbols.union(set([self.transform_variable])) \
- set([self.function_variable])
def _compute_transform(self, f, x, s, **hints):
raise NotImplementedError
def _as_integral(self, f, x, s):
raise NotImplementedError
def _collapse_extra(self, extra):
from sympy import And
cond = And(*extra)
if cond == False:
raise IntegralTransformError(self.__class__.name, None, '')
def doit(self, **hints):
"""
Try to evaluate the transform in closed form.
This general function handles linearity, but apart from that leaves
pretty much everything to _compute_transform.
Standard hints are the following:
- ``simplify``: whether or not to simplify the result
- ``noconds``: if True, don't return convergence conditions
- ``needeval``: if True, raise IntegralTransformError instead of
returning IntegralTransform objects
The default values of these hints depend on the concrete transform,
usually the default is
``(simplify, noconds, needeval) = (True, False, False)``.
"""
from sympy import Add, expand_mul, Mul
from sympy.core.function import AppliedUndef
needeval = hints.pop('needeval', False)
try_directly = not any(func.has(self.function_variable)
for func in self.function.atoms(AppliedUndef))
if try_directly:
try:
return self._compute_transform(self.function,
self.function_variable, self.transform_variable, **hints)
except IntegralTransformError:
pass
fn = self.function
if not fn.is_Add:
fn = expand_mul(fn)
if fn.is_Add:
hints['needeval'] = needeval
res = [self.__class__(*([x] + list(self.args[1:]))).doit(**hints)
for x in fn.args]
extra = []
ress = []
for x in res:
if not isinstance(x, tuple):
x = [x]
ress.append(x[0])
if len(x) > 1:
extra += [x[1:]]
res = Add(*ress)
if not extra:
return res
try:
extra = self._collapse_extra(extra)
return tuple([res]) + tuple(extra)
except IntegralTransformError:
pass
if needeval:
raise IntegralTransformError(
self.__class__._name, self.function, 'needeval')
# TODO handle derivatives etc
# pull out constant coefficients
coeff, rest = fn.as_coeff_mul(self.function_variable)
return coeff*self.__class__(*([Mul(*rest)] + list(self.args[1:])))
@property
def as_integral(self):
return self._as_integral(self.function, self.function_variable,
self.transform_variable)
def _eval_rewrite_as_Integral(self, *args):
return self.as_integral
from sympy.solvers.inequalities import _solve_inequality
def _simplify(expr, doit):
from sympy import powdenest, piecewise_fold
if doit:
return simplify(powdenest(piecewise_fold(expr), polar=True))
return expr
def _noconds_(default):
"""
This is a decorator generator for dropping convergence conditions.
Suppose you define a function ``transform(*args)`` which returns a tuple of
the form ``(result, cond1, cond2, ...)``.
Decorating it ``@_noconds_(default)`` will add a new keyword argument
``noconds`` to it. If ``noconds=True``, the return value will be altered to
be only ``result``, whereas if ``noconds=False`` the return value will not
be altered.
The default value of the ``noconds`` keyword will be ``default`` (i.e. the
argument of this function).
"""
def make_wrapper(func):
from sympy.core.decorators import wraps
@wraps(func)
def wrapper(*args, **kwargs):
noconds = kwargs.pop('noconds', default)
res = func(*args, **kwargs)
if noconds:
return res[0]
return res
return wrapper
return make_wrapper
_noconds = _noconds_(False)
##########################################################################
# Mellin Transform
##########################################################################
def _default_integrator(f, x):
return integrate(f, (x, 0, oo))
@_noconds
def _mellin_transform(f, x, s_, integrator=_default_integrator, simplify=True):
""" Backend function to compute Mellin transforms. """
from sympy import re, Max, Min, count_ops
# We use a fresh dummy, because assumptions on s might drop conditions on
# convergence of the integral.
s = _dummy('s', 'mellin-transform', f)
F = integrator(x**(s - 1) * f, x)
if not F.has(Integral):
return _simplify(F.subs(s, s_), simplify), (-oo, oo), True
if not F.is_Piecewise:
raise IntegralTransformError('Mellin', f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(
'Mellin', f, 'integral in unexpected form')
def process_conds(cond):
"""
Turn ``cond`` into a strip (a, b), and auxiliary conditions.
"""
a = -oo
b = oo
aux = True
conds = conjuncts(to_cnf(cond))
t = Dummy('t', real=True)
for c in conds:
a_ = oo
b_ = -oo
aux_ = []
for d in disjuncts(c):
d_ = d.replace(
re, lambda x: x.as_real_imag()[0]).subs(re(s), t)
if not d.is_Relational or \
d.rel_op not in ('>', '>=', '<', '<=') \
or d_.has(s) or not d_.has(t):
aux_ += [d]
continue
soln = _solve_inequality(d_, t)
if not soln.is_Relational or \
soln.rel_op not in ('>', '>=', '<', '<='):
aux_ += [d]
continue
if soln.lts == t:
b_ = Max(soln.gts, b_)
else:
a_ = Min(soln.lts, a_)
if a_ != oo and a_ != b:
a = Max(a_, a)
elif b_ != -oo and b_ != a:
b = Min(b_, b)
else:
aux = And(aux, Or(*aux_))
return a, b, aux
conds = [process_conds(c) for c in disjuncts(cond)]
conds = [x for x in conds if x[2] != False]
conds.sort(key=lambda x: (x[0] - x[1], count_ops(x[2])))
if not conds:
raise IntegralTransformError('Mellin', f, 'no convergence found')
a, b, aux = conds[0]
return _simplify(F.subs(s, s_), simplify), (a, b), aux
class MellinTransform(IntegralTransform):
"""
Class representing unevaluated Mellin transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute Mellin transforms, see the :func:`mellin_transform`
docstring.
"""
_name = 'Mellin'
def _compute_transform(self, f, x, s, **hints):
return _mellin_transform(f, x, s, **hints)
def _as_integral(self, f, x, s):
from sympy import Integral
return Integral(f*x**(s - 1), (x, 0, oo))
def _collapse_extra(self, extra):
from sympy import And, Max, Min
a = []
b = []
cond = []
for (sa, sb), c in extra:
a += [sa]
b += [sb]
cond += [c]
res = (Max(*a), Min(*b)), And(*cond)
if (res[0][0] >= res[0][1]) == True or res[1] == False:
raise IntegralTransformError(
'Mellin', None, 'no combined convergence.')
return res
def mellin_transform(f, x, s, **hints):
r"""
Compute the Mellin transform `F(s)` of `f(x)`,
.. math :: F(s) = \int_0^\infty x^{s-1} f(x) \mathrm{d}x.
For all "sensible" functions, this converges absolutely in a strip
`a < \operatorname{Re}(s) < b`.
The Mellin transform is related via change of variables to the Fourier
transform, and also to the (bilateral) Laplace transform.
This function returns ``(F, (a, b), cond)``
where ``F`` is the Mellin transform of ``f``, ``(a, b)`` is the fundamental strip
(as above), and ``cond`` are auxiliary convergence conditions.
If the integral cannot be computed in closed form, this function returns
an unevaluated :class:`MellinTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=False``,
then only `F` will be returned (i.e. not ``cond``, and also not the strip
``(a, b)``).
>>> from sympy.integrals.transforms import mellin_transform
>>> from sympy import exp
>>> from sympy.abc import x, s
>>> mellin_transform(exp(-x), x, s)
(gamma(s), (0, oo), True)
See Also
========
inverse_mellin_transform, laplace_transform, fourier_transform
hankel_transform, inverse_hankel_transform
"""
return MellinTransform(f, x, s).doit(**hints)
def _rewrite_sin(m_n, s, a, b):
"""
Re-write the sine function ``sin(m*s + n)`` as gamma functions, compatible
with the strip (a, b).
Return ``(gamma1, gamma2, fac)`` so that ``f == fac/(gamma1 * gamma2)``.
>>> from sympy.integrals.transforms import _rewrite_sin
>>> from sympy import pi, S
>>> from sympy.abc import s
>>> _rewrite_sin((pi, 0), s, 0, 1)
(gamma(s), gamma(-s + 1), pi)
>>> _rewrite_sin((pi, 0), s, 1, 0)
(gamma(s - 1), gamma(-s + 2), -pi)
>>> _rewrite_sin((pi, 0), s, -1, 0)
(gamma(s + 1), gamma(-s), -pi)
>>> _rewrite_sin((pi, pi/2), s, S(1)/2, S(3)/2)
(gamma(s - 1/2), gamma(-s + 3/2), -pi)
>>> _rewrite_sin((pi, pi), s, 0, 1)
(gamma(s), gamma(-s + 1), -pi)
>>> _rewrite_sin((2*pi, 0), s, 0, S(1)/2)
(gamma(2*s), gamma(-2*s + 1), pi)
>>> _rewrite_sin((2*pi, 0), s, S(1)/2, 1)
(gamma(2*s - 1), gamma(-2*s + 2), -pi)
"""
# (This is a separate function because it is moderately complicated,
# and I want to doctest it.)
# We want to use pi/sin(pi*x) = gamma(x)*gamma(1-x).
# But there is one comlication: the gamma functions determine the
# inegration contour in the definition of the G-function. Usually
# it would not matter if this is slightly shifted, unless this way
# we create an undefined function!
# So we try to write this in such a way that the gammas are
# eminently on the right side of the strip.
from sympy import expand_mul, pi, ceiling, gamma, re
m, n = m_n
m = expand_mul(m/pi)
n = expand_mul(n/pi)
r = ceiling(-m*a - n.as_real_imag()[0]) # Don't use re(n), does not expand
return gamma(m*s + n + r), gamma(1 - n - r - m*s), (-1)**r*pi
class MellinTransformStripError(ValueError):
"""
Exception raised by _rewrite_gamma. Mainly for internal use.
"""
pass
def _rewrite_gamma(f, s, a, b):
"""
Try to rewrite the product f(s) as a product of gamma functions,
so that the inverse Mellin transform of f can be expressed as a meijer
G function.
Return (an, ap), (bm, bq), arg, exp, fac such that
G((an, ap), (bm, bq), arg/z**exp)*fac is the inverse Mellin transform of f(s).
Raises IntegralTransformError or MellinTransformStripError on failure.
It is asserted that f has no poles in the fundamental strip designated by
(a, b). One of a and b is allowed to be None. The fundamental strip is
important, because it determines the inversion contour.
This function can handle exponentials, linear factors, trigonometric
functions.
This is a helper function for inverse_mellin_transform that will not
attempt any transformations on f.
>>> from sympy.integrals.transforms import _rewrite_gamma
>>> from sympy.abc import s
>>> from sympy import oo
>>> _rewrite_gamma(s*(s+3)*(s-1), s, -oo, oo)
(([], [-3, 0, 1]), ([-2, 1, 2], []), 1, 1, -1)
>>> _rewrite_gamma((s-1)**2, s, -oo, oo)
(([], [1, 1]), ([2, 2], []), 1, 1, 1)
Importance of the fundamental strip:
>>> _rewrite_gamma(1/s, s, 0, oo)
(([1], []), ([], [0]), 1, 1, 1)
>>> _rewrite_gamma(1/s, s, None, oo)
(([1], []), ([], [0]), 1, 1, 1)
>>> _rewrite_gamma(1/s, s, 0, None)
(([1], []), ([], [0]), 1, 1, 1)
>>> _rewrite_gamma(1/s, s, -oo, 0)
(([], [1]), ([0], []), 1, 1, -1)
>>> _rewrite_gamma(1/s, s, None, 0)
(([], [1]), ([0], []), 1, 1, -1)
>>> _rewrite_gamma(1/s, s, -oo, None)
(([], [1]), ([0], []), 1, 1, -1)
>>> _rewrite_gamma(2**(-s+3), s, -oo, oo)
(([], []), ([], []), 1/2, 1, 8)
"""
from itertools import repeat
from sympy import (Poly, gamma, Mul, re, RootOf, exp as exp_, E, expand,
roots, ilcm, pi, sin, cos, tan, cot, igcd, exp_polar)
# Our strategy will be as follows:
# 1) Guess a constant c such that the inversion integral should be
# performed wrt s'=c*s (instead of plain s). Write s for s'.
# 2) Process all factors, rewrite them independently as gamma functions in
# argument s, or exponentials of s.
# 3) Try to transform all gamma functions s.t. they have argument
# a+s or a-s.
# 4) Check that the resulting G function parameters are valid.
# 5) Combine all the exponentials.
a_, b_ = S([a, b])
def left(c, is_numer):
"""
Decide whether pole at c lies to the left of the fundamental strip.
"""
# heuristically, this is the best chance for us to solve the inequalities
c = expand(re(c))
if a_ is None:
return c < b_
if b_ is None:
return c <= a_
if (c >= b_) is True:
return False
if (c <= a_) is True:
return True
if is_numer:
return None
if a_.free_symbols or b_.free_symbols or c.free_symbols:
return None # XXX
#raise IntegralTransformError('Inverse Mellin', f,
# 'Could not determine position of singularity %s'
# ' relative to fundamental strip' % c)
raise MellinTransformStripError('Pole inside critical strip?')
# 1)
s_multipliers = []
for g in f.atoms(gamma):
if not g.has(s):
continue
arg = g.args[0]
if arg.is_Add:
arg = arg.as_independent(s)[1]
coeff, _ = arg.as_coeff_mul(s)
s_multipliers += [coeff]
for g in f.atoms(sin, cos, tan, cot):
if not g.has(s):
continue
arg = g.args[0]
if arg.is_Add:
arg = arg.as_independent(s)[1]
coeff, _ = arg.as_coeff_mul(s)
s_multipliers += [coeff/pi]
s_multipliers = [abs(x) for x in s_multipliers if x.is_real]
common_coefficient = S(1)
for x in s_multipliers:
if not x.is_Rational:
common_coefficient = x
break
s_multipliers = [x/common_coefficient for x in s_multipliers]
if any(not x.is_Rational for x in s_multipliers):
raise NotImplementedError
s_multiplier = common_coefficient/reduce(ilcm, [S(x.q)
for x in s_multipliers], S(1))
if s_multiplier == common_coefficient:
if len(s_multipliers) == 0:
s_multiplier = common_coefficient
else:
s_multiplier = common_coefficient \
*reduce(igcd, [S(x.p) for x in s_multipliers])
exponent = S(1)
fac = S(1)
f = f.subs(s, s/s_multiplier)
fac /= s_multiplier
exponent = 1/s_multiplier
if a_ is not None:
a_ *= s_multiplier
if b_ is not None:
b_ *= s_multiplier
# 2)
numer, denom = f.as_numer_denom()
numer = Mul.make_args(numer)
denom = Mul.make_args(denom)
args = list(zip(numer, repeat(True))) + list(zip(denom, repeat(False)))
facs = []
dfacs = []
# *_gammas will contain pairs (a, c) representing Gamma(a*s + c)
numer_gammas = []
denom_gammas = []
# exponentials will contain bases for exponentials of s
exponentials = []
def exception(fact):
return IntegralTransformError("Inverse Mellin", f, "Unrecognised form '%s'." % fact)
while args:
fact, is_numer = args.pop()
if is_numer:
ugammas, lgammas = numer_gammas, denom_gammas
ufacs, lfacs = facs, dfacs
else:
ugammas, lgammas = denom_gammas, numer_gammas
ufacs, lfacs = dfacs, facs
def linear_arg(arg):
""" Test if arg is of form a*s+b, raise exception if not. """
if not arg.is_polynomial(s):
raise exception(fact)
p = Poly(arg, s)
if p.degree() != 1:
raise exception(fact)
return p.all_coeffs()
# constants
if not fact.has(s):
ufacs += [fact]
# exponentials
elif fact.is_Pow or isinstance(fact, exp_):
if fact.is_Pow:
base = fact.base
exp = fact.exp
else:
base = exp_polar(1)
exp = fact.args[0]
if exp.is_Integer:
cond = is_numer
if exp < 0:
cond = not cond
args += [(base, cond)]*abs(exp)
continue
elif not base.has(s):
a, b = linear_arg(exp)
if not is_numer:
base = 1/base
exponentials += [base**a]
facs += [base**b]
else:
raise exception(fact)
# linear factors
elif fact.is_polynomial(s):
p = Poly(fact, s)
if p.degree() != 1:
# We completely factor the poly. For this we need the roots.
# Now roots() only works in some cases (low degree), and RootOf
# only works without parameters. So try both...
coeff = p.LT()[1]
rs = roots(p, s)
if len(rs) != p.degree():
rs = RootOf.all_roots(p)
ufacs += [coeff]
args += [(s - c, is_numer) for c in rs]
continue
a, c = p.all_coeffs()
ufacs += [a]
c /= -a
# Now need to convert s - c
if left(c, is_numer):
ugammas += [(S(1), -c + 1)]
lgammas += [(S(1), -c)]
else:
ufacs += [-1]
ugammas += [(S(-1), c + 1)]
lgammas += [(S(-1), c)]
elif isinstance(fact, gamma):
a, b = linear_arg(fact.args[0])
if is_numer:
if (a > 0 and (left(-b/a, is_numer) is False)) or \
(a < 0 and (left(-b/a, is_numer) is True)):
raise NotImplementedError(
'Gammas partially over the strip.')
ugammas += [(a, b)]
elif isinstance(fact, sin):
# We try to re-write all trigs as gammas. This is not in
# general the best strategy, since sometimes this is impossible,
# but rewriting as exponentials would work. However trig functions
# in inverse mellin transforms usually all come from simplifying
# gamma terms, so this should work.
a = fact.args[0]
if is_numer:
# No problem with the poles.
gamma1, gamma2, fac_ = gamma(a/pi), gamma(1 - a/pi), pi
else:
gamma1, gamma2, fac_ = _rewrite_sin(linear_arg(a), s, a_, b_)
args += [(gamma1, not is_numer), (gamma2, not is_numer)]
ufacs += [fac_]
elif isinstance(fact, tan):
a = fact.args[0]
args += [(sin(a, evaluate=False), is_numer),
(sin(pi/2 - a, evaluate=False), not is_numer)]
elif isinstance(fact, cos):
a = fact.args[0]
args += [(sin(pi/2 - a, evaluate=False), is_numer)]
elif isinstance(fact, cot):
a = fact.args[0]
args += [(sin(pi/2 - a, evaluate=False), is_numer),
(sin(a, evaluate=False), not is_numer)]
else:
raise exception(fact)
fac *= Mul(*facs)/Mul(*dfacs)
# 3)
an, ap, bm, bq = [], [], [], []
for gammas, plus, minus, is_numer in [(numer_gammas, an, bm, True),
(denom_gammas, bq, ap, False)]:
while gammas:
a, c = gammas.pop()
if a != -1 and a != +1:
# We use the gamma function multiplication theorem.
p = abs(S(a))
newa = a/p
newc = c/p
assert a.is_Integer
for k in range(p):
gammas += [(newa, newc + k/p)]
if is_numer:
fac *= (2*pi)**((1 - p)/2) * p**(c - S(1)/2)
exponentials += [p**a]
else:
fac /= (2*pi)**((1 - p)/2) * p**(c - S(1)/2)
exponentials += [p**(-a)]
continue
if a == +1:
plus.append(1 - c)
else:
minus.append(c)
# 4)
# TODO
# 5)
arg = Mul(*exponentials)
# for testability, sort the arguments
an.sort(key=default_sort_key)
ap.sort(key=default_sort_key)
bm.sort(key=default_sort_key)
bq.sort(key=default_sort_key)
return (an, ap), (bm, bq), arg, exponent, fac
@_noconds_(True)
def _inverse_mellin_transform(F, s, x_, strip, as_meijerg=False):
""" A helper for the real inverse_mellin_transform function, this one here
assumes x to be real and positive. """
from sympy import (expand, expand_mul, hyperexpand, meijerg, And, Or,
arg, pi, re, factor, Heaviside, gamma, Add)
x = _dummy('t', 'inverse-mellin-transform', F, positive=True)
# Actually, we won't try integration at all. Instead we use the definition
# of the Meijer G function as a fairly general inverse mellin transform.
F = F.rewrite(gamma)
for g in [factor(F), expand_mul(F), expand(F)]:
if g.is_Add:
# do all terms separately
ress = [_inverse_mellin_transform(G, s, x, strip, as_meijerg,
noconds=False)
for G in g.args]
conds = [p[1] for p in ress]
ress = [p[0] for p in ress]
res = Add(*ress)
if not as_meijerg:
res = factor(res, gens=res.atoms(Heaviside))
return res.subs(x, x_), And(*conds)
try:
a, b, C, e, fac = _rewrite_gamma(g, s, strip[0], strip[1])
except IntegralTransformError:
continue
G = meijerg(a, b, C/x**e)
if as_meijerg:
h = G
else:
try:
h = hyperexpand(G)
except NotImplementedError as detail:
raise IntegralTransformError(
'Inverse Mellin', F, 'Could not calculate integral')
if h.is_Piecewise and len(h.args) == 3:
# XXX we break modularity here!
h = Heaviside(x - abs(C))*h.args[0].args[0] \
+ Heaviside(abs(C) - x)*h.args[1].args[0]
# We must ensure that the intgral along the line we want converges,
# and return that value.
# See [L], 5.2
cond = [abs(arg(G.argument)) < G.delta*pi]
# Note: we allow ">=" here, this corresponds to convergence if we let
# limits go to oo symetrically. ">" corresponds to absolute convergence.
cond += [And(Or(len(G.ap) != len(G.bq), 0 >= re(G.nu) + 1),
abs(arg(G.argument)) == G.delta*pi)]
cond = Or(*cond)
if cond == False:
raise IntegralTransformError(
'Inverse Mellin', F, 'does not converge')
return (h*fac).subs(x, x_), cond
raise IntegralTransformError('Inverse Mellin', F, '')
_allowed = None
class InverseMellinTransform(IntegralTransform):
"""
Class representing unevaluated inverse Mellin transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse Mellin transforms, see the
:func:`inverse_mellin_transform` docstring.
"""
nargs = 5
_name = 'Inverse Mellin'
_none_sentinel = Dummy('None')
_c = Dummy('c')
def __new__(cls, F, s, x, a, b, **opts):
if a is None:
a = InverseMellinTransform._none_sentinel
if b is None:
b = InverseMellinTransform._none_sentinel
return IntegralTransform.__new__(cls, F, s, x, a, b, **opts)
@property
def fundamental_strip(self):
a, b = self.args[3], self.args[4]
if a is InverseMellinTransform._none_sentinel:
a = None
if b is InverseMellinTransform._none_sentinel:
b = None
return a, b
def _compute_transform(self, F, s, x, **hints):
from sympy import postorder_traversal
global _allowed
if _allowed is None:
from sympy import (
exp, gamma, sin, cos, tan, cot, cosh, sinh, tanh,
coth, factorial, rf)
_allowed = set(
[exp, gamma, sin, cos, tan, cot, cosh, sinh, tanh, coth,
factorial, rf])
for f in postorder_traversal(F):
if f.is_Function and f.has(s) and f.func not in _allowed:
raise IntegralTransformError('Inverse Mellin', F,
'Component %s not recognised.' % f)
strip = self.fundamental_strip
return _inverse_mellin_transform(F, s, x, strip, **hints)
def _as_integral(self, F, s, x):
from sympy import Integral, I, oo
c = self.__class__._c
return Integral(F*x**(-s), (s, c - I*oo, c + I*oo))
def inverse_mellin_transform(F, s, x, strip, **hints):
r"""
Compute the inverse Mellin transform of `F(s)` over the fundamental
strip given by ``strip=(a, b)``.
This can be defined as
.. math:: f(x) = \int_{c - i\infty}^{c + i\infty} x^{-s} F(s) \mathrm{d}s,
for any `c` in the fundamental strip. Under certain regularity
conditions on `F` and/or `f`,
this recovers `f` from its Mellin transform `F`
(and vice versa), for positive real `x`.
One of `a` or `b` may be passed as ``None``; a suitable `c` will be
inferred.
If the integral cannot be computed in closed form, this function returns
an unevaluated :class:`InverseMellinTransform` object.
Note that this function will assume x to be positive and real, regardless
of the sympy assumptions!
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
>>> from sympy.integrals.transforms import inverse_mellin_transform
>>> from sympy import oo, gamma
>>> from sympy.abc import x, s
>>> inverse_mellin_transform(gamma(s), s, x, (0, oo))
exp(-x)
The fundamental strip matters:
>>> f = 1/(s**2 - 1)
>>> inverse_mellin_transform(f, s, x, (-oo, -1))
(x/2 - 1/(2*x))*Heaviside(x - 1)
>>> inverse_mellin_transform(f, s, x, (-1, 1))
-x*Heaviside(-x + 1)/2 - Heaviside(x - 1)/(2*x)
>>> inverse_mellin_transform(f, s, x, (1, oo))
(-x/2 + 1/(2*x))*Heaviside(-x + 1)
See Also
========
mellin_transform
hankel_transform, inverse_hankel_transform
"""
return InverseMellinTransform(F, s, x, strip[0], strip[1]).doit(**hints)
##########################################################################
# Laplace Transform
##########################################################################
def _simplifyconds(expr, s, a):
r"""
Naively simplify some conditions occuring in ``expr``, given that `\operatorname{Re}(s) > a`.
>>> from sympy.integrals.transforms import _simplifyconds as simp
>>> from sympy.abc import x
>>> from sympy import sympify as S
>>> simp(abs(x**2) < 1, x, 1)
False
>>> simp(abs(x**2) < 1, x, 2)
False
>>> simp(abs(x**2) < 1, x, 0)
Abs(x**2) < 1
>>> simp(abs(1/x**2) < 1, x, 1)
True
>>> simp(S(1) < abs(x), x, 1)
True
>>> simp(S(1) < abs(1/x), x, 1)
False
>>> from sympy import Ne
>>> simp(Ne(1, x**3), x, 1)
True
>>> simp(Ne(1, x**3), x, 2)
True
>>> simp(Ne(1, x**3), x, 0)
1 != x**3
"""
from sympy.core.relational import ( StrictGreaterThan, StrictLessThan,
Unequality )
from sympy import Abs
def power(ex):
if ex == s:
return 1
if ex.is_Pow and ex.base == s:
return ex.exp
return None
def bigger(ex1, ex2):
""" Return True only if |ex1| > |ex2|, False only if |ex1| < |ex2|.
Else return None. """
if ex1.has(s) and ex2.has(s):
return None
if ex1.func is Abs:
ex1 = ex1.args[0]
if ex2.func is Abs:
ex2 = ex2.args[0]
if ex1.has(s):
return bigger(1/ex2, 1/ex1)
n = power(ex2)
if n is None:
return None
if n > 0 and (abs(ex1) <= abs(a)**n) is True:
return False
if n < 0 and (abs(ex1) >= abs(a)**n) is True:
return True
def replie(x, y):
""" simplify x < y """
if not (x.is_positive or x.func is Abs) \
or not (y.is_positive or y.func is Abs):
return (x < y)
r = bigger(x, y)
if r is not None:
return not r
return (x < y)
def replue(x, y):
if bigger(x, y) in (True, False):
return True
return Unequality(x, y)
def repl(ex, *args):
if isinstance(ex, bool):
return ex
return ex.replace(*args)
expr = repl(expr, StrictLessThan, replie)
expr = repl(expr, StrictGreaterThan, lambda x, y: replie(y, x))
expr = repl(expr, Unequality, replue)
return expr
@_noconds
def _laplace_transform(f, t, s_, simplify=True):
""" The backend function for Laplace transforms. """
from sympy import (re, Max, exp, pi, Abs, Min, periodic_argument as arg,
cos, Wild, symbols, polar_lift)
s = Dummy('s')
F = integrate(exp(-s*t) * f, (t, 0, oo))
if not F.has(Integral):
return _simplify(F.subs(s, s_), simplify), -oo, True
if not F.is_Piecewise:
raise IntegralTransformError(
'Laplace', f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(
'Laplace', f, 'integral in unexpected form')
def process_conds(conds):
""" Turn ``conds`` into a strip and auxiliary conditions. """
a = -oo
aux = True
conds = conjuncts(to_cnf(conds))
u = Dummy('u', real=True)
p, q, w1, w2, w3, w4, w5 = symbols(
'p q w1 w2 w3 w4 w5', cls=Wild, exclude=[s])
for c in conds:
a_ = oo
aux_ = []
for d in disjuncts(c):
m = d.match(abs(arg((s + w3)**p*q, w1)) < w2)
if not m:
m = d.match(abs(arg((s + w3)**p*q, w1)) <= w2)
if not m:
m = d.match(abs(arg((polar_lift(s + w3))**p*q, w1)) < w2)
if not m:
m = d.match(abs(arg((polar_lift(s + w3))**p*q, w1)) <= w2)
if m:
if m[q] > 0 and m[w2]/m[p] == pi/2:
d = re(s + m[w3]) > 0
m = d.match(
0 < cos(abs(arg(s**w1*w5, q))*w2)*abs(s**w3)**w4 - p)
if not m:
m = d.match(0 < cos(abs(
arg(polar_lift(s)**w1*w5, q))*w2)*abs(s**w3)**w4 - p)
if m and all(m[wild] > 0 for wild in [w1, w2, w3, w4, w5]):
d = re(s) > m[p]
d_ = d.replace(
re, lambda x: x.expand().as_real_imag()[0]).subs(re(s), t)
if not d.is_Relational or \
d.rel_op not in ('>', '>=', '<', '<=') \
or d_.has(s) or not d_.has(t):
aux_ += [d]
continue
soln = _solve_inequality(d_, t)
if not soln.is_Relational or \
soln.rel_op not in ('>', '>=', '<', '<='):
aux_ += [d]
continue
if soln.lts == t:
raise IntegralTransformError('Laplace', f,
'convergence not in half-plane?')
else:
a_ = Min(soln.lts, a_)
if a_ != oo:
a = Max(a_, a)
else:
aux = And(aux, Or(*aux_))
return a, aux
conds = [process_conds(c) for c in disjuncts(cond)]
conds2 = [x for x in conds if x[1] != False and x[0] != -oo]
if not conds2:
conds2 = [x for x in conds if x[1] != False]
conds = conds2
def cnt(expr):
if isinstance(expr, bool):
return 0
return expr.count_ops()
conds.sort(key=lambda x: (-x[0], cnt(x[1])))
if not conds:
raise IntegralTransformError('Laplace', f, 'no convergence found')
a, aux = conds[0]
def sbs(expr):
if isinstance(expr, bool):
return expr
return expr.subs(s, s_)
if simplify:
F = _simplifyconds(F, s, a)
aux = _simplifyconds(aux, s, a)
return _simplify(F.subs(s, s_), simplify), sbs(a), sbs(aux)
class LaplaceTransform(IntegralTransform):
"""
Class representing unevaluated Laplace transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute Laplace transforms, see the :func:`laplace_transform`
docstring.
"""
_name = 'Laplace'
def _compute_transform(self, f, t, s, **hints):
return _laplace_transform(f, t, s, **hints)
def _as_integral(self, f, t, s):
from sympy import Integral, exp
return Integral(f*exp(-s*t), (t, 0, oo))
def _collapse_extra(self, extra):
from sympy import And, Max
conds = []
planes = []
for plane, cond in extra:
conds.append(cond)
planes.append(plane)
cond = And(*conds)
plane = Max(*planes)
if cond == False:
raise IntegralTransformError(
'Laplace', None, 'No combined convergence.')
return plane, cond
def laplace_transform(f, t, s, **hints):
r"""
Compute the Laplace Transform `F(s)` of `f(t)`,
.. math :: F(s) = \int_0^\infty e^{-st} f(t) \mathrm{d}t.
For all "sensible" functions, this converges absolutely in a
half plane `a < \operatorname{Re}(s)`.
This function returns ``(F, a, cond)``
where ``F`` is the Laplace transform of ``f``, `\operatorname{Re}(s) > a` is the half-plane
of convergence, and ``cond`` are auxiliary convergence conditions.
If the integral cannot be computed in closed form, this function returns
an unevaluated :class:`LaplaceTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=True``,
only `F` will be returned (i.e. not ``cond``, and also not the plane ``a``).
>>> from sympy.integrals import laplace_transform
>>> from sympy.abc import t, s, a
>>> laplace_transform(t**a, t, s)
(s**(-a)*gamma(a + 1)/s, 0, -re(a) < 1)
See Also
========
inverse_laplace_transform, mellin_transform, fourier_transform
hankel_transform, inverse_hankel_transform
"""
return LaplaceTransform(f, t, s).doit(**hints)
@_noconds_(True)
def _inverse_laplace_transform(F, s, t_, plane, simplify=True):
""" The backend function for inverse Laplace transforms. """
from sympy import exp, Heaviside, log, expand_complex, Integral, Piecewise
from sympy.integrals.meijerint import meijerint_inversion, _get_coeff_exp
# There are two strategies we can try:
# 1) Use inverse mellin transforms - related by a simple change of variables.
# 2) Use the inversion integral.
t = Dummy('t', real=True)
def pw_simp(*args):
""" Simplify a piecewise expression from hyperexpand. """
# XXX we break modularity here!
if len(args) != 3:
return Piecewise(*args)
arg = args[2].args[0].argument
coeff, exponent = _get_coeff_exp(arg, t)
e1 = args[0].args[0]
e2 = args[1].args[0]
return Heaviside(1/abs(coeff) - t**exponent)*e1 \
+ Heaviside(t**exponent - 1/abs(coeff))*e2
try:
f, cond = inverse_mellin_transform(F, s, exp(-t), (None, oo),
needeval=True, noconds=False)
except IntegralTransformError:
f = None
if f is None:
f = meijerint_inversion(F, s, t)
if f is None:
raise IntegralTransformError('Inverse Laplace', f, '')
if f.is_Piecewise:
f, cond = f.args[0]
if f.has(Integral):
raise IntegralTransformError('Inverse Laplace', f,
'inversion integral of unrecognised form.')
else:
cond = True
f = f.replace(Piecewise, pw_simp)
if f.is_Piecewise:
# many of the functions called below can't work with piecewise
# (b/c it has a bool in args)
return f.subs(t, t_), cond
u = Dummy('u')
def simp_heaviside(arg):
a = arg.subs(exp(-t), u)
if a.has(t):
return Heaviside(arg)
rel = _solve_inequality(a > 0, u)
if rel.lts == u:
k = log(rel.gts)
return Heaviside(t + k)
else:
k = log(rel.lts)
return Heaviside(-(t + k))
f = f.replace(Heaviside, simp_heaviside)
def simp_exp(arg):
return expand_complex(exp(arg))
f = f.replace(exp, simp_exp)
# TODO it would be nice to fix cosh and sinh ... simplify messes these
# exponentials up
return _simplify(f.subs(t, t_), simplify), cond
class InverseLaplaceTransform(IntegralTransform):
"""
Class representing unevaluated inverse Laplace transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse Laplace transforms, see the
:func:`inverse_laplace_transform` docstring.
"""
nargs = 4
_name = 'Inverse Laplace'
_none_sentinel = Dummy('None')
_c = Dummy('c')
def __new__(cls, F, s, x, plane, **opts):
if plane is None:
plane = InverseLaplaceTransform._none_sentinel
return IntegralTransform.__new__(cls, F, s, x, plane, **opts)
@property
def fundamental_plane(self):
plane = self.args[3]
if plane is InverseLaplaceTransform._none_sentinel:
plane = None
return plane
def _compute_transform(self, F, s, t, **hints):
return _inverse_laplace_transform(F, s, t, self.fundamental_plane, **hints)
def _as_integral(self, F, s, t):
from sympy import I, Integral, exp
c = self.__class__._c
return Integral(exp(s*t)*F, (s, c - I*oo, c + I*oo))
def inverse_laplace_transform(F, s, t, plane=None, **hints):
r"""
Compute the inverse Laplace transform of `F(s)`, defined as
.. math :: f(t) = \int_{c-i\infty}^{c+i\infty} e^{st} F(s) \mathrm{d}s,
for `c` so large that `F(s)` has no singularites in the
half-plane `\operatorname{Re}(s) > c-\epsilon`.
The plane can be specified by
argument ``plane``, but will be inferred if passed as None.
Under certain regularity conditions, this recovers `f(t)` from its
Laplace Transform `F(s)`, for non-negative `t`, and vice
versa.
If the integral cannot be computed in closed form, this function returns
an unevaluated :class:`InverseLaplaceTransform` object.
Note that this function will always assume `t` to be real,
regardless of the sympy assumption on `t`.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
>>> from sympy.integrals.transforms import inverse_laplace_transform
>>> from sympy import exp, Symbol
>>> from sympy.abc import s, t
>>> a = Symbol('a', positive=True)
>>> inverse_laplace_transform(exp(-a*s)/s, s, t)
Heaviside(-a + t)
See Also
========
laplace_transform
hankel_transform, inverse_hankel_transform
"""
return InverseLaplaceTransform(F, s, t, plane).doit(**hints)
##########################################################################
# Fourier Transform
##########################################################################
@_noconds_(True)
def _fourier_transform(f, x, k, a, b, name, simplify=True):
"""
Compute a general Fourier-type transform
F(k) = a int_-oo^oo exp(b*I*x*k) f(x) dx.
For suitable choice of a and b, this reduces to the standard Fourier
and inverse Fourier transforms.
"""
from sympy import exp, I, oo
F = integrate(a*f*exp(b*I*x*k), (x, -oo, oo))
if not F.has(Integral):
return _simplify(F, simplify), True
if not F.is_Piecewise:
raise IntegralTransformError(name, f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(name, f, 'integral in unexpected form')
return _simplify(F, simplify), cond
class FourierTypeTransform(IntegralTransform):
""" Base class for Fourier transforms.
Specify cls._a and cls._b.
"""
def _compute_transform(self, f, x, k, **hints):
return _fourier_transform(f, x, k,
self.__class__._a, self.__class__._b,
self.__class__._name, **hints)
def _as_integral(self, f, x, k):
from sympy import Integral, exp, I
a = self.__class__._a
b = self.__class__._b
return Integral(a*f*exp(b*I*x*k), (x, -oo, oo))
class FourierTransform(FourierTypeTransform):
"""
Class representing unevaluated Fourier transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute Fourier transforms, see the :func:`fourier_transform`
docstring.
"""
_name = 'Fourier'
_a = 1
_b = -2*S.Pi
def fourier_transform(f, x, k, **hints):
r"""
Compute the unitary, ordinary-frequency Fourier transform of `f`, defined
as
.. math:: F(k) = \int_{-\infty}^\infty f(x) e^{-2\pi i x k} \mathrm{d} x.
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`FourierTransform` object.
For other Fourier transform conventions, see the function
:func:`sympy.integrals.transforms._fourier_transform`.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
>>> from sympy import fourier_transform, exp
>>> from sympy.abc import x, k
>>> fourier_transform(exp(-x**2), x, k)
sqrt(pi)*exp(-pi**2*k**2)
>>> fourier_transform(exp(-x**2), x, k, noconds=False)
(sqrt(pi)*exp(-pi**2*k**2), True)
See Also
========
inverse_fourier_transform
sine_transform, inverse_sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return FourierTransform(f, x, k).doit(**hints)
class InverseFourierTransform(FourierTypeTransform):
"""
Class representing unevaluated inverse Fourier transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse Fourier transforms, see the
:func:`inverse_fourier_transform` docstring.
"""
_name = 'Inverse Fourier'
_a = 1
_b = 2*S.Pi
def inverse_fourier_transform(F, k, x, **hints):
r"""
Compute the unitary, ordinary-frequency inverse Fourier transform of `F`,
defined as
.. math:: f(x) = \int_{-\infty}^\infty F(k) e^{2\pi i x k} \mathrm{d} k.
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`InverseFourierTransform` object.
For other Fourier transform conventions, see the function
:func:`sympy.integrals.transforms._fourier_transform`.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
>>> from sympy import inverse_fourier_transform, exp, sqrt, pi
>>> from sympy.abc import x, k
>>> inverse_fourier_transform(sqrt(pi)*exp(-(pi*k)**2), k, x)
exp(-x**2)
>>> inverse_fourier_transform(sqrt(pi)*exp(-(pi*k)**2), k, x, noconds=False)
(exp(-x**2), True)
See Also
========
fourier_transform
sine_transform, inverse_sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return InverseFourierTransform(F, k, x).doit(**hints)
##########################################################################
# Fourier Sine and Cosine Transform
##########################################################################
from sympy import sin, cos, sqrt, pi, I, oo
@_noconds_(True)
def _sine_cosine_transform(f, x, k, a, b, K, name, simplify=True):
"""
Compute a general sine or cosine-type transform
F(k) = a int_0^oo b*sin(x*k) f(x) dx.
F(k) = a int_0^oo b*cos(x*k) f(x) dx.
For suitable choice of a and b, this reduces to the standard sine/cosine
and inverse sine/cosine transforms.
"""
F = integrate(a*f*K(b*x*k), (x, 0, oo))
if not F.has(Integral):
return _simplify(F, simplify), True
if not F.is_Piecewise:
raise IntegralTransformError(name, f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(name, f, 'integral in unexpected form')
return _simplify(F, simplify), cond
class SineCosineTypeTransform(IntegralTransform):
"""
Base class for sine and cosine transforms.
Specify cls._a and cls._b and cls._kern.
"""
def _compute_transform(self, f, x, k, **hints):
return _sine_cosine_transform(f, x, k,
self.__class__._a, self.__class__._b,
self.__class__._kern,
self.__class__._name, **hints)
def _as_integral(self, f, x, k):
from sympy import Integral, exp, I
a = self.__class__._a
b = self.__class__._b
K = self.__class__._kern
return Integral(a*f*K(b*x*k), (x, 0, oo))
class SineTransform(SineCosineTypeTransform):
"""
Class representing unevaluated sine transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute sine transforms, see the :func:`sine_transform`
docstring.
"""
_name = 'Sine'
_kern = sin
_a = sqrt(2)/sqrt(pi)
_b = 1
def sine_transform(f, x, k, **hints):
r"""
Compute the unitary, ordinary-frequency sine transform of `f`, defined
as
.. math:: F(k) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty f(x) \sin(2\pi x k) \mathrm{d} x.
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`SineTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
>>> from sympy import sine_transform, exp
>>> from sympy.abc import x, k, a
>>> sine_transform(x*exp(-a*x**2), x, k)
sqrt(2)*k*exp(-k**2/(4*a))/(4*a**(3/2))
>>> sine_transform(x**(-a), x, k)
2**(-a + 1/2)*k**(a - 1)*gamma(-a/2 + 1)/gamma(a/2 + 1/2)
See Also
========
fourier_transform, inverse_fourier_transform
inverse_sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return SineTransform(f, x, k).doit(**hints)
class InverseSineTransform(SineCosineTypeTransform):
"""
Class representing unevaluated inverse sine transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse sine transforms, see the
:func:`inverse_sine_transform` docstring.
"""
_name = 'Inverse Sine'
_kern = sin
_a = sqrt(2)/sqrt(pi)
_b = 1
def inverse_sine_transform(F, k, x, **hints):
r"""
Compute the unitary, ordinary-frequency inverse sine transform of `F`,
defined as
.. math:: f(x) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty F(k) \sin(2\pi x k) \mathrm{d} k.
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`InverseSineTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
>>> from sympy import inverse_sine_transform, exp, sqrt, gamma, pi
>>> from sympy.abc import x, k, a
>>> inverse_sine_transform(2**((1-2*a)/2)*k**(a - 1)*
... gamma(-a/2 + 1)/gamma((a+1)/2), k, x)
x**(-a)
>>> inverse_sine_transform(sqrt(2)*k*exp(-k**2/(4*a))/(4*sqrt(a)**3), k, x)
x*exp(-a*x**2)
See Also
========
fourier_transform, inverse_fourier_transform
sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return InverseSineTransform(F, k, x).doit(**hints)
class CosineTransform(SineCosineTypeTransform):
"""
Class representing unevaluated cosine transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute cosine transforms, see the :func:`cosine_transform`
docstring.
"""
_name = 'Cosine'
_kern = cos
_a = sqrt(2)/sqrt(pi)
_b = 1
def cosine_transform(f, x, k, **hints):
r"""
Compute the unitary, ordinary-frequency cosine transform of `f`, defined
as
.. math:: F(k) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty f(x) \cos(2\pi x k) \mathrm{d} x.
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`CosineTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
>>> from sympy import cosine_transform, exp, sqrt, cos
>>> from sympy.abc import x, k, a
>>> cosine_transform(exp(-a*x), x, k)
sqrt(2)*a/(sqrt(pi)*(a**2 + k**2))
>>> cosine_transform(exp(-a*sqrt(x))*cos(a*sqrt(x)), x, k)
a*exp(-a**2/(2*k))/(2*k**(3/2))
See Also
========
fourier_transform, inverse_fourier_transform,
sine_transform, inverse_sine_transform
inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return CosineTransform(f, x, k).doit(**hints)
class InverseCosineTransform(SineCosineTypeTransform):
"""
Class representing unevaluated inverse cosine transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse cosine transforms, see the
:func:`inverse_cosine_transform` docstring.
"""
_name = 'Inverse Cosine'
_kern = cos
_a = sqrt(2)/sqrt(pi)
_b = 1
def inverse_cosine_transform(F, k, x, **hints):
r"""
Compute the unitary, ordinary-frequency inverse cosine transform of `F`,
defined as
.. math:: f(x) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty F(k) \cos(2\pi x k) \mathrm{d} k.
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`InverseCosineTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
>>> from sympy import inverse_cosine_transform, exp, sqrt, pi
>>> from sympy.abc import x, k, a
>>> inverse_cosine_transform(sqrt(2)*a/(sqrt(pi)*(a**2 + k**2)), k, x)
exp(-a*x)
>>> inverse_cosine_transform(1/sqrt(k), k, x)
1/sqrt(x)
See Also
========
fourier_transform, inverse_fourier_transform,
sine_transform, inverse_sine_transform
cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return InverseCosineTransform(F, k, x).doit(**hints)
##########################################################################
# Hankel Transform
##########################################################################
@_noconds_(True)
def _hankel_transform(f, r, k, nu, name, simplify=True):
"""
Compute a general Hankel transform
.. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r.
"""
from sympy import besselj, oo
F = integrate(f*besselj(nu, k*r)*r, (r, 0, oo))
if not F.has(Integral):
return _simplify(F, simplify), True
if not F.is_Piecewise:
raise IntegralTransformError(name, f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(name, f, 'integral in unexpected form')
return _simplify(F, simplify), cond
class HankelTypeTransform(IntegralTransform):
"""
Base class for Hankel transforms.
"""
nargs = 4
def doit(self, **hints):
return self._compute_transform(self.function,
self.function_variable,
self.transform_variable,
self.args[3],
**hints)
def _compute_transform(self, f, r, k, nu, **hints):
return _hankel_transform(f, r, k, nu, self._name, **hints)
def _as_integral(self, f, r, k, nu):
from sympy import Integral, besselj, oo
return Integral(f*besselj(nu, k*r)*r, (r, 0, oo))
@property
def as_integral(self):
return self._as_integral(self.function,
self.function_variable,
self.transform_variable,
self.args[3])
class HankelTransform(HankelTypeTransform):
"""
Class representing unevaluated Hankel transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute Hankel transforms, see the :func:`hankel_transform`
docstring.
"""
_name = 'Hankel'
def hankel_transform(f, r, k, nu, **hints):
r"""
Compute the Hankel transform of `f`, defined as
.. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r.
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`HankelTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
>>> from sympy import hankel_transform, inverse_hankel_transform
>>> from sympy import gamma, exp, sinh, cosh
>>> from sympy.abc import r, k, m, nu, a
>>> ht = hankel_transform(1/r**m, r, k, nu)
>>> ht
2*2**(-m)*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2)
>>> inverse_hankel_transform(ht, k, r, nu)
r**(-m)
>>> ht = hankel_transform(exp(-a*r), r, k, 0)
>>> ht
a/(k**3*(a**2/k**2 + 1)**(3/2))
>>> inverse_hankel_transform(ht, k, r, 0)
exp(-a*r)
See Also
========
fourier_transform, inverse_fourier_transform
sine_transform, inverse_sine_transform
cosine_transform, inverse_cosine_transform
inverse_hankel_transform
mellin_transform, laplace_transform
"""
return HankelTransform(f, r, k, nu).doit(**hints)
class InverseHankelTransform(HankelTypeTransform):
"""
Class representing unevaluated inverse Hankel transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse Hankel transforms, see the
:func:`inverse_hankel_transform` docstring.
"""
_name = 'Inverse Hankel'
def inverse_hankel_transform(F, k, r, nu, **hints):
r"""
Compute the inverse Hankel transform of `F` defined as
.. math:: f(r) = \int_{0}^\infty F_\nu(k) J_\nu(k r) k \mathrm{d} k.
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`InverseHankelTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
>>> from sympy import hankel_transform, inverse_hankel_transform, gamma
>>> from sympy import gamma, exp, sinh, cosh
>>> from sympy.abc import r, k, m, nu, a
>>> ht = hankel_transform(1/r**m, r, k, nu)
>>> ht
2*2**(-m)*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2)
>>> inverse_hankel_transform(ht, k, r, nu)
r**(-m)
>>> ht = hankel_transform(exp(-a*r), r, k, 0)
>>> ht
a/(k**3*(a**2/k**2 + 1)**(3/2))
>>> inverse_hankel_transform(ht, k, r, 0)
exp(-a*r)
See Also
========
fourier_transform, inverse_fourier_transform
sine_transform, inverse_sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform
mellin_transform, laplace_transform
"""
return InverseHankelTransform(F, k, r, nu).doit(**hints)
|
the-stack_0_1724 | #!/usr/bin/env python
from collections import OrderedDict
import json
import numpy
from kernel_tuner import tune_kernel
from tune_utils import get_kernel_path
def tune_expdist():
device = 2
tune_params = OrderedDict()
tune_params["block_size_x"] = [32] #[2**i for i in range(5,10)]
tune_params["block_size_y"] = [2**i for i in range(6)]
tune_params["tile_size_x"] = [2**i for i in range(4)]
tune_params["tile_size_y"] = [2**i for i in range(4)]
tune_params["use_shared_mem"] = [1] #[0, 1]
#setup test input
alloc_size = 22000
size = numpy.int32(20000)
max_blocks = numpy.int32( numpy.ceil(size / float(numpy.amin(tune_params["block_size_x"]))) *
numpy.ceil(size / float(numpy.amin(tune_params["block_size_y"]))) )
ndim = numpy.int32(2)
A = numpy.random.randn(alloc_size*ndim).astype(numpy.float64)
B = A+0.00001*numpy.random.randn(alloc_size*ndim).astype(numpy.float64)
scale_A = numpy.absolute(0.01*numpy.random.randn(alloc_size).astype(numpy.float64))
scale_B = numpy.absolute(0.01*numpy.random.randn(alloc_size).astype(numpy.float64))
cost = numpy.zeros((max_blocks)).astype(numpy.float64)
#time the reference function
#arguments = [cost, A, B, size, size, ndim, scale_A, scale_B]
#with open(get_kernel_path('expdist')+'expdist_c.cpp', 'r') as f:
# kernel_string = f.read()
#print("CPU timing")
#tune_kernel("time_expdist", kernel_string, size, arguments, {"block_size_x": [1]},
# lang="C", compiler_options=['-I'+get_kernel_path('expdist'), '-O3'], device=2)
#tune the GPU function
print("GPU timing")
with open(get_kernel_path('expdist')+'kernels.cu', 'r') as f:
kernel_string = f.read()
arguments = [A, B, size, size, scale_A, scale_B, cost]
cp = ['-O3']
grid_div_x = ["block_size_x", "tile_size_x"]
grid_div_y = ["block_size_y", "tile_size_y"]
kernel1 = tune_kernel("ExpDist", kernel_string, (size, size), arguments, tune_params,
compiler_options=cp, grid_div_x=grid_div_x, grid_div_y=grid_div_y, device=2)
with open("expdist.json", 'w') as fp:
json.dump(kernel1, fp)
best_config1 = min(kernel1[0], key=lambda x:x['time'])
nblocks = numpy.int32( numpy.ceil(size / float(best_config1["block_size_x"]*best_config1["tile_size_x"])) *
numpy.ceil(size / float(best_config1["block_size_y"]*best_config1["tile_size_y"])) )
tune_params = OrderedDict()
tune_params["block_size_x"] = [32*i for i in range(1,33)]
arguments = [numpy.zeros(1).astype(numpy.float64), cost, size, size, nblocks]
kernel2 = tune_kernel("reduce_cross_term", kernel_string, 1, arguments, tune_params,
grid_div_x=[], compiler_options=cp, device=2)
best_config2 = min(kernel2[0], key=lambda x:x['time'])
print("best GPU configuration, total time=", best_config1['time'] + best_config2['time'])
print(best_config1)
print(best_config2)
if __name__ == "__main__":
tune_expdist()
|
the-stack_0_1725 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libxxf86vm(AutotoolsPackage):
"""libXxf86vm - Extension library for the XFree86-VidMode X extension."""
homepage = "http://cgit.freedesktop.org/xorg/lib/libXxf86vm"
url = "https://www.x.org/archive/individual/lib/libXxf86vm-1.1.4.tar.gz"
version('1.1.4', sha256='5108553c378a25688dcb57dca383664c36e293d60b1505815f67980ba9318a99')
depends_on('[email protected]:')
depends_on('libxext')
depends_on('xproto', type='build')
depends_on('xextproto', type='build')
depends_on('[email protected]:', type='build')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
|
the-stack_0_1726 | from django.contrib import admin
from django.forms.models import ModelForm
import django
from moderation.models import ModeratedObject, MODERATION_DRAFT_STATE,\
MODERATION_STATUS_PENDING, MODERATION_STATUS_REJECTED,\
MODERATION_STATUS_APPROVED
from django.utils.translation import ugettext as _
from moderation.forms import BaseModeratedObjectForm
from moderation.helpers import automoderate
from moderation.diff import get_changes_between_models
def approve_objects(modeladmin, request, queryset):
for obj in queryset:
obj.approve(moderated_by=request.user)
approve_objects.short_description = "Approve selected moderated objects"
def reject_objects(modeladmin, request, queryset):
for obj in queryset:
obj.reject(moderated_by=request.user)
reject_objects.short_description = "Reject selected moderated objects"
def set_objects_as_pending(modeladmin, request, queryset):
queryset.update(moderation_status=MODERATION_STATUS_PENDING)
set_objects_as_pending.short_description = "Set selected moderated objects "\
"as Pending"
class ModerationAdmin(admin.ModelAdmin):
admin_integration_enabled = True
def get_form(self, request, obj=None):
if obj and self.admin_integration_enabled:
return self.get_moderated_object_form(obj.__class__)
return super(ModerationAdmin, self).get_form(request, obj)
def change_view(self, request, object_id, extra_context=None):
if self.admin_integration_enabled:
self.send_message(request, object_id)
return super(ModerationAdmin, self).change_view(request, object_id)
def send_message(self, request, object_id):
try:
obj = self.model.objects.get(pk=object_id)
moderated_obj = ModeratedObject.objects.get_for_instance(obj)
msg = self.get_moderation_message(moderated_obj.moderation_status,
moderated_obj.moderation_reason)
except ModeratedObject.DoesNotExist:
msg = self.get_moderation_message()
self.message_user(request, msg)
def save_model(self, request, obj, form, change):
obj.save()
automoderate(obj, request.user)
def get_moderation_message(self, moderation_status=None, reason=None):
if moderation_status == MODERATION_STATUS_PENDING:
return _(u"Object is not viewable on site, "\
"it will be visible when moderator will accept it")
elif moderation_status == MODERATION_STATUS_REJECTED:
return _(u"Object has been rejected by moderator, "\
"reason: %s" % reason)
elif moderation_status == MODERATION_STATUS_APPROVED:
return _(u"Object has been approved by moderator "\
"and is visible on site")
elif moderation_status is None:
return _("This object is not registered with "\
"the moderation system.")
def get_moderated_object_form(self, model_class):
class ModeratedObjectForm(BaseModeratedObjectForm):
class Meta:
model = model_class
return ModeratedObjectForm
class ModeratedObjectAdmin(admin.ModelAdmin):
date_hierarchy = 'date_created'
list_display = ('content_object', 'content_type', 'date_created',
'moderation_status', 'moderated_by', 'moderation_date')
list_filter = ['content_type', 'moderation_status']
change_form_template = 'moderation/moderate_object.html'
change_list_template = 'moderation/moderated_objects_list.html'
actions = [reject_objects, approve_objects, set_objects_as_pending]
fieldsets = (
('Object moderation', {'fields': ('moderation_reason',)}),
)
def get_actions(self, request):
actions = super(ModeratedObjectAdmin, self).get_actions(request)
del actions['delete_selected']
return actions
def content_object(self, obj):
return unicode(obj.changed_object)
def queryset(self, request):
qs = super(ModeratedObjectAdmin, self).queryset(request)
return qs.exclude(moderation_state=MODERATION_DRAFT_STATE)
def get_moderated_object_form(self, model_class):
class ModeratedObjectForm(ModelForm):
class Meta:
model = model_class
return ModeratedObjectForm
def change_view(self, request, object_id, extra_context=None):
from moderation import moderation
moderated_object = ModeratedObject.objects.get(pk=object_id)
changed_object = moderated_object.changed_object
moderator = moderation.get_moderator(changed_object.__class__)
changes = get_changes_between_models(
moderated_object.get_object_for_this_type(),
changed_object,
moderator.fields_exclude).values()
if request.POST:
admin_form = self.get_form(request, moderated_object)(request.POST)
if admin_form.is_valid():
reason = admin_form.cleaned_data['moderation_reason']
if 'approve' in request.POST:
moderated_object.approve(request.user, reason)
elif 'reject' in request.POST:
moderated_object.reject(request.user, reason)
extra_context = {'changes': changes,
'django_version': django.get_version()[:3]}
return super(ModeratedObjectAdmin, self).change_view(request,
object_id,
extra_context)
admin.site.register(ModeratedObject, ModeratedObjectAdmin)
|
the-stack_0_1727 | import sys
import collections
import operator
import itertools
from bisect import bisect_left
import os
import glob
import concurrent.futures
import math
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import DIcommonfn
import DIread
import DIreadlib
Ent=DIcommonfn.Ent
param_set={
"mzML_files",
"library",
"ms1_ppm",
"ms2_ppm",
"MS2_score",
"sample_info",
}
param_dict=DIcommonfn.read_param(param_set)
mzML_files=sorted(glob.glob(param_dict["mzML_files"]))
basename_l=[os.path.basename(x)for x in mzML_files]
ms1ppm=float(param_dict['ms1_ppm'])/1e6
MS2_score=float(param_dict["MS2_score"])
def get_sample_info():
sample_info_file=glob.glob(param_dict["sample_info"])[0]
sample_type=dict()
with open(sample_info_file)as sfile:
next(sfile)
for line in sfile:
if line[0]!='#'and line.rstrip():
lsp=line.rstrip('\n').split('\t')
sample_type[lsp[0]]=lsp[1].upper()
return sample_type
def cos_sim(list1,list2):
if len(list1)!=len(list2):
print('adf')
sys.exit()
if sum(list1)<=0 or sum(list2)<=0: return 0
return sum(math.sqrt(x*y) for x,y in zip(list1,list2))/math.sqrt(sum(list1)*sum(list2))
def aveMS1spec(mzML_file):
basename0=os.path.basename(mzML_file)
print(basename0)
ms1_scans,ms2_scans,_=DIread.print_eic_ms(mzML_file)
ms1_peaks=sorted((mz,i,ii) for ii,ms1_ in enumerate(ms1_scans) for mz,i in zip(ms1_.mz_l,ms1_.I_l))
avespec=[]
while ms1_peaks:
maxI=max(ms1_peaks,key=operator.itemgetter(1))[0]
pos0=bisect_left(ms1_peaks,(maxI-.005,))
pos1=bisect_left(ms1_peaks,(maxI+.005,))
if len(set(ii for _,_,ii in ms1_peaks[pos0:pos1]))>len(ms1_scans)/3:
avespec.append((maxI,sum(i for _,i,_ in ms1_peaks[pos0:pos1])/len(ms1_scans)))
del ms1_peaks[pos0:pos1]
return sorted(avespec),ms2_scans,basename0
from multiprocessing import freeze_support
if __name__ == '__main__':
start_time = time.time()
print(len(mzML_files),'mzML files')
freeze_support()
sample_type=get_sample_info()
with concurrent.futures.ProcessPoolExecutor(max_workers=9) as executor:
avespec_l=list(executor.map(aveMS1spec, mzML_files))
ms1_peaks=sorted((mz,i,ii) for ms1_,_,ii in avespec_l for mz,i in ms1_)
conspec=[]
while ms1_peaks:
maxI=max(ms1_peaks,key=operator.itemgetter(1))[0]
pos0=bisect_left(ms1_peaks,(maxI-.005,))
pos1=bisect_left(ms1_peaks,(maxI+.005,))
if len(set(ii for _,_,ii in ms1_peaks[pos0:pos1]if sample_type[ii]!='BLANK'))>=5:
conspec.append(maxI)
del ms1_peaks[pos0:pos1]
lib_ent=DIreadlib.get_cpds()
con_tab=dict()
con_ms1=dict()
for ii,conmz in enumerate(sorted(conspec),1):
con_tab[(conmz,ii)]=dict()
err_bd=DIcommonfn.bound_ppm(conmz*ms1ppm)
pos_0=bisect_left(lib_ent,(conmz-err_bd,))
pos_1=bisect_left(lib_ent,(conmz+err_bd,))
ms1name=[x.replace('\n','---') for _,x in sorted(set((abs(ent.Mmass-conmz),ent.name)for ent in lib_ent[pos_0:pos_1]))]
con_ms1[conmz]=ms1name
for avespec,ms2_scans,basename0 in avespec_l:
pos=bisect_left(ms2_scans,(conmz,))
if pos>0 and ms2_scans[pos][0]-conmz > conmz-ms2_scans[pos-1][0]:
pos-=1
expMS2=ms2_scans[pos]
top10=sorted(expMS2[1].I_l,reverse=True)[min(len(expMS2[1].I_l)-1,9)]
topN=[x for x in zip(expMS2[1].mz_l,expMS2[1].I_l)if x[1]>=top10]
topmz=[x for x,_ in topN]
topI=[x for _,x in topN]
if abs(conmz-expMS2[0])>.51:
print('{} {} {} {}'.format(abs(conmz-expMS2[0]),expMS2[0],conmz,basename0))
score_ent=[]
for ent in lib_ent[pos_0:pos_1]:
ms2_I=[]
ent_I=[]
xfrag=set()
hpeak=0
mpeak=0
fent=sorted([(y,x)for x,y in zip(ent.mz,ent.I)if(ent.charge*conmz-x)>3.3],reverse=True)[:10]
for f_I,f_mz in fent:
err_bd=.01
pos0=bisect_left(topmz,f_mz-err_bd)
pos1=bisect_left(topmz,f_mz+err_bd,lo=pos0)
ent_I.append(f_I)
if pos0!=pos1:
ms2_I.append(max(topI[pos0:pos1]))
for i in range(pos0,pos1): xfrag.add(i)
if f_I==fent[0][0]: hpeak=1
mpeak+=1
else:
ms2_I.append(0)
if hpeak:
for nn,(f_mz,f_I) in enumerate(zip(topmz,topI)):
if nn not in xfrag and (ent.charge*conmz-f_mz)>3.3:
ms2_I.append(f_I)
ent_I.append(0)
cs=cos_sim(ent_I,ms2_I)
score_ent.append((cs,ent,mpeak))
pos0=bisect_left(avespec,(conmz-.005,))
pos1=bisect_left(avespec,(conmz+.005,))
aveI=sum(x for _,x in avespec[pos0:pos1])
ave_mz=(sum(mz*i for mz,i in avespec[pos0:pos1])/aveI if aveI>0 else None)
if score_ent:
score_ent=max(score_ent)
sc=score_ent[0]
mpeak=score_ent[2]
ent=score_ent[1]
exp_mz=expMS2[1].mz_l
exp_I=expMS2[1].I_l
else:
sc=None
mpeak=None
ent=Ent(conmz,'m/z={:.4f}'.format(conmz),tuple(),tuple(),'',1,None,'')
exp_mz=exp_I=tuple()
con_tab[(conmz,ii)][basename0]=(aveI,sc,ent,exp_mz,exp_I,ave_mz,mpeak)
for basename0 in basename_l:
open('ann_{}.txt'.format(basename0),'w')
frago=open('quant_frag.txt','w')
frago.write('group\tID\talt_IDs\tMS1\tmz\tadduct\tID\tcount\tfrag_m/z\t'+'\t'.join(basename_l)+'\t'+'\t'.join('score_'+x for x in basename_l)+'\t'+'\t'.join('mass_error_'+x for x in basename_l)+'\n')
for x,y in sorted(con_tab.items(),key=lambda x:x[0][0]):
c=collections.Counter(yy[2] for yy in y.values())
ent,cn=c.most_common(1)[0]
identified=(''if all(yy[2].name.startswith('m/z=')for yy in y.values())else'*')
id_with_count='{} ({})'.format(ent.name.replace('\n','---'),cn)
alt_id_with_count=' --- '.join('{} ({})'.format(ent.name.replace('\n','---'),cn) for ent,cn in c.most_common()[1:])
count_pos=sum(1 for qs in y.values() if qs[0]>0)
frago.write('{}\t{}\t{}\t{}\t{:.4f}\t{}\t{}\t{}\t{}'.format(x[1],id_with_count,alt_id_with_count,' --- '.join(con_ms1[x[0]]),x[0],ent.adduct,identified,count_pos,'precursor'))
for basename0 in basename_l:
qs=y[basename0]
frago.write('\t{:.1f}'.format(qs[0]))
for basename0 in basename_l:
qs=y[basename0]
frago.write('\t{:.2f}'.format(qs[1],qs[6])if qs and qs[1] else '\t')
for basename0 in basename_l:
qs=y[basename0]
frago.write('\t{:.3f}'.format(ent.Mmass-qs[5])if qs[1]and qs[5]else '\t')
frago.write('\n')
if ent.mz:
mzML_f=dict()
for basename0 in basename_l:
qs=y[basename0]
mzML_f[basename0]=dict()
for f_mz in ent.mz:
pos0=bisect_left(qs[3],f_mz-.01)
pos1=bisect_left(qs[3],f_mz+.01)
mzML_f[basename0][f_mz]=(max(qs[4][pos0:pos1])if pos0<pos1 else 0)
maxf=max(mzML_f[basename0].values())
if maxf>0:
for f_mz in ent.mz:
mzML_f[basename0][f_mz]/=maxf
for f_mz,_ in sorted(zip(ent.mz,ent.I),key=operator.itemgetter(1),reverse=True):
frago.write('{}\t{}\t{}\t{}\t{:.4f}\t{}\t{}\t{}\t{}'.format(x[1],id_with_count,alt_id_with_count,'',x[0],ent.adduct,identified,sum(1 for basename0 in basename_l if mzML_f[basename0][f_mz]>0),f_mz))
for basename0 in basename_l:
frago.write('\t{:.2f}'.format(mzML_f[basename0][f_mz]))
frago.write('\n')
for basename0 in basename_l:
qs=y[basename0]
if qs[1]:
with open('ann_{}.txt'.format(os.path.basename(basename0)),'a')as ann:
ann.write('NAME:\n')
ann.write('{}\n'.format(qs[2].name))
ann.write('ADDUCT: {}\n'.format(qs[2].adduct))
ann.write('TARGET_M/Z: {:.6f}\n'.format(x[0]))
ann.write('DOT_PRODUCT: {:.3f}\n'.format(qs[1]))
ann.write('EXPERIMENTAL_SPECTRUM:\n')
for mz,i in sorted(zip(qs[3],qs[4]),key=operator.itemgetter(1),reverse=True):
ann.write('{:.6f} {:.2f}\n'.format(mz,i))
ann.write('LIBRARY_SPECTRUM:\n')
for mz,i in zip(qs[2].mz,qs[2].I):
ann.write('{:.6f} {:.2f}\n'.format(mz,i))
ann.write('\n')
with PdfPages('aveMS1spec.pdf') as pdf0:
for avespec,_,basename0 in avespec_l:
plt.figure(figsize=(9, 4))
ax=plt.subplot(1,1,1)
ax.set_title('{} ave. MS1'.format(basename0[:-5]))
exp_=ax.vlines(x=[mz for mz,_ in avespec], ymin=0, ymax=[i for _,i in avespec], color='black',lw=.5)
ax.set_xlabel('m/z')#,fontsize=22)
ax.set_ylabel('intensity')#,fontsize=22)
pdf0.savefig()
plt.close()
print("Run time = {:.1f} mins".format(((time.time() - start_time)/60)))
|
the-stack_0_1728 | #!/usr/bin/env python3
# Copyright (c) 2021 The Elixir Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""RPCs that handle raw transaction packages."""
from decimal import Decimal
from io import BytesIO
import random
from test_framework.address import ADDRESS_BCRT1_P2WSH_OP_TRUE
from test_framework.test_framework import ElixirTestFramework
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
COIN,
CTransaction,
CTxInWitness,
)
from test_framework.script import (
CScript,
OP_TRUE,
)
from test_framework.util import (
assert_equal,
hex_str_to_bytes,
)
class RPCPackagesTest(ElixirTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def assert_testres_equal(self, package_hex, testres_expected):
"""Shuffle package_hex and assert that the testmempoolaccept result matches testres_expected. This should only
be used to test packages where the order does not matter. The ordering of transactions in package_hex and
testres_expected must match.
"""
shuffled_indeces = list(range(len(package_hex)))
random.shuffle(shuffled_indeces)
shuffled_package = [package_hex[i] for i in shuffled_indeces]
shuffled_testres = [testres_expected[i] for i in shuffled_indeces]
assert_equal(shuffled_testres, self.nodes[0].testmempoolaccept(shuffled_package))
def run_test(self):
self.log.info("Generate blocks to create UTXOs")
node = self.nodes[0]
self.privkeys = [node.get_deterministic_priv_key().key]
self.address = node.get_deterministic_priv_key().address
self.coins = []
# The last 100 coinbase transactions are premature
for b in node.generatetoaddress(200, self.address)[:100]:
coinbase = node.getblock(blockhash=b, verbosity=2)["tx"][0]
self.coins.append({
"txid": coinbase["txid"],
"amount": coinbase["vout"][0]["value"],
"scriptPubKey": coinbase["vout"][0]["scriptPubKey"],
})
# Create some transactions that can be reused throughout the test. Never submit these to mempool.
self.independent_txns_hex = []
self.independent_txns_testres = []
for _ in range(3):
coin = self.coins.pop()
rawtx = node.createrawtransaction([{"txid": coin["txid"], "vout": 0}],
{self.address : coin["amount"] - Decimal("0.0001")})
signedtx = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=self.privkeys)
assert signedtx["complete"]
testres = node.testmempoolaccept([signedtx["hex"]])
assert testres[0]["allowed"]
self.independent_txns_hex.append(signedtx["hex"])
# testmempoolaccept returns a list of length one, avoid creating a 2D list
self.independent_txns_testres.append(testres[0])
self.independent_txns_testres_blank = [{
"txid": res["txid"], "wtxid": res["wtxid"]} for res in self.independent_txns_testres]
self.test_independent()
self.test_chain()
self.test_multiple_children()
self.test_multiple_parents()
self.test_conflicting()
self.test_rbf()
def chain_transaction(self, parent_txid, parent_value, n=0, parent_locking_script=None):
"""Build a transaction that spends parent_txid.vout[n] and produces one output with
amount = parent_value with a fee deducted.
Return tuple (CTransaction object, raw hex, nValue, scriptPubKey of the output created).
"""
node = self.nodes[0]
inputs = [{"txid": parent_txid, "vout": n}]
my_value = parent_value - Decimal("0.0001")
outputs = {self.address : my_value}
rawtx = node.createrawtransaction(inputs, outputs)
prevtxs = [{
"txid": parent_txid,
"vout": n,
"scriptPubKey": parent_locking_script,
"amount": parent_value,
}] if parent_locking_script else None
signedtx = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=self.privkeys, prevtxs=prevtxs)
tx = CTransaction()
assert signedtx["complete"]
tx.deserialize(BytesIO(hex_str_to_bytes(signedtx["hex"])))
return (tx, signedtx["hex"], my_value, tx.vout[0].scriptPubKey.hex())
def test_independent(self):
self.log.info("Test multiple independent transactions in a package")
node = self.nodes[0]
# For independent transactions, order doesn't matter.
self.assert_testres_equal(self.independent_txns_hex, self.independent_txns_testres)
self.log.info("Test an otherwise valid package with an extra garbage tx appended")
garbage_tx = node.createrawtransaction([{"txid": "00" * 32, "vout": 5}], {self.address: 1})
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(garbage_tx)))
# Only the txid and wtxids are returned because validation is incomplete for the independent txns.
# Package validation is atomic: if the node cannot find a UTXO for any single tx in the package,
# it terminates immediately to avoid unnecessary, expensive signature verification.
package_bad = self.independent_txns_hex + [garbage_tx]
testres_bad = self.independent_txns_testres_blank + [{"txid": tx.rehash(), "wtxid": tx.getwtxid(), "allowed": False, "reject-reason": "missing-inputs"}]
self.assert_testres_equal(package_bad, testres_bad)
self.log.info("Check testmempoolaccept tells us when some transactions completed validation successfully")
coin = self.coins.pop()
tx_bad_sig_hex = node.createrawtransaction([{"txid": coin["txid"], "vout": 0}],
{self.address : coin["amount"] - Decimal("0.0001")})
tx_bad_sig = CTransaction()
tx_bad_sig.deserialize(BytesIO(hex_str_to_bytes(tx_bad_sig_hex)))
testres_bad_sig = node.testmempoolaccept(self.independent_txns_hex + [tx_bad_sig_hex])
# By the time the signature for the last transaction is checked, all the other transactions
# have been fully validated, which is why the node returns full validation results for all
# transactions here but empty results in other cases.
assert_equal(testres_bad_sig, self.independent_txns_testres + [{
"txid": tx_bad_sig.rehash(),
"wtxid": tx_bad_sig.getwtxid(), "allowed": False,
"reject-reason": "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)"
}])
self.log.info("Check testmempoolaccept reports txns in packages that exceed max feerate")
coin = self.coins.pop()
tx_high_fee_raw = node.createrawtransaction([{"txid": coin["txid"], "vout": 0}],
{self.address : coin["amount"] - Decimal("0.999")})
tx_high_fee_signed = node.signrawtransactionwithkey(hexstring=tx_high_fee_raw, privkeys=self.privkeys)
assert tx_high_fee_signed["complete"]
tx_high_fee = CTransaction()
tx_high_fee.deserialize(BytesIO(hex_str_to_bytes(tx_high_fee_signed["hex"])))
testres_high_fee = node.testmempoolaccept([tx_high_fee_signed["hex"]])
assert_equal(testres_high_fee, [
{"txid": tx_high_fee.rehash(), "wtxid": tx_high_fee.getwtxid(), "allowed": False, "reject-reason": "max-fee-exceeded"}
])
package_high_fee = [tx_high_fee_signed["hex"]] + self.independent_txns_hex
testres_package_high_fee = node.testmempoolaccept(package_high_fee)
assert_equal(testres_package_high_fee, testres_high_fee + self.independent_txns_testres_blank)
def test_chain(self):
node = self.nodes[0]
first_coin = self.coins.pop()
# Chain of 25 transactions
parent_locking_script = None
txid = first_coin["txid"]
chain_hex = []
chain_txns = []
value = first_coin["amount"]
for _ in range(25):
(tx, txhex, value, parent_locking_script) = self.chain_transaction(txid, value, 0, parent_locking_script)
txid = tx.rehash()
chain_hex.append(txhex)
chain_txns.append(tx)
self.log.info("Check that testmempoolaccept requires packages to be sorted by dependency")
assert_equal(node.testmempoolaccept(rawtxs=chain_hex[::-1]),
[{"txid": tx.rehash(), "wtxid": tx.getwtxid(), "package-error": "package-not-sorted"} for tx in chain_txns[::-1]])
self.log.info("Testmempoolaccept a chain of 25 transactions")
testres_multiple = node.testmempoolaccept(rawtxs=chain_hex)
testres_single = []
# Test accept and then submit each one individually, which should be identical to package test accept
for rawtx in chain_hex:
testres = node.testmempoolaccept([rawtx])
testres_single.append(testres[0])
# Submit the transaction now so its child should have no problem validating
node.sendrawtransaction(rawtx)
assert_equal(testres_single, testres_multiple)
# Clean up by clearing the mempool
node.generate(1)
def test_multiple_children(self):
node = self.nodes[0]
self.log.info("Testmempoolaccept a package in which a transaction has two children within the package")
first_coin = self.coins.pop()
value = (first_coin["amount"] - Decimal("0.0002")) / 2 # Deduct reasonable fee and make 2 outputs
inputs = [{"txid": first_coin["txid"], "vout": 0}]
outputs = [{self.address : value}, {ADDRESS_BCRT1_P2WSH_OP_TRUE : value}]
rawtx = node.createrawtransaction(inputs, outputs)
parent_signed = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=self.privkeys)
parent_tx = CTransaction()
assert parent_signed["complete"]
parent_tx.deserialize(BytesIO(hex_str_to_bytes(parent_signed["hex"])))
parent_txid = parent_tx.rehash()
assert node.testmempoolaccept([parent_signed["hex"]])[0]["allowed"]
parent_locking_script_a = parent_tx.vout[0].scriptPubKey.hex()
child_value = value - Decimal("0.0001")
# Child A
(_, tx_child_a_hex, _, _) = self.chain_transaction(parent_txid, child_value, 0, parent_locking_script_a)
assert not node.testmempoolaccept([tx_child_a_hex])[0]["allowed"]
# Child B
rawtx_b = node.createrawtransaction([{"txid": parent_txid, "vout": 1}], {self.address : child_value})
tx_child_b = CTransaction()
tx_child_b.deserialize(BytesIO(hex_str_to_bytes(rawtx_b)))
tx_child_b.wit.vtxinwit = [CTxInWitness()]
tx_child_b.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
tx_child_b_hex = tx_child_b.serialize().hex()
assert not node.testmempoolaccept([tx_child_b_hex])[0]["allowed"]
self.log.info("Testmempoolaccept with entire package, should work with children in either order")
testres_multiple_ab = node.testmempoolaccept(rawtxs=[parent_signed["hex"], tx_child_a_hex, tx_child_b_hex])
testres_multiple_ba = node.testmempoolaccept(rawtxs=[parent_signed["hex"], tx_child_b_hex, tx_child_a_hex])
assert all([testres["allowed"] for testres in testres_multiple_ab + testres_multiple_ba])
testres_single = []
# Test accept and then submit each one individually, which should be identical to package testaccept
for rawtx in [parent_signed["hex"], tx_child_a_hex, tx_child_b_hex]:
testres = node.testmempoolaccept([rawtx])
testres_single.append(testres[0])
# Submit the transaction now so its child should have no problem validating
node.sendrawtransaction(rawtx)
assert_equal(testres_single, testres_multiple_ab)
def create_child_with_parents(self, parents_tx, values, locking_scripts):
"""Creates a transaction that spends the first output of each parent in parents_tx."""
num_parents = len(parents_tx)
total_value = sum(values)
inputs = [{"txid": tx.rehash(), "vout": 0} for tx in parents_tx]
outputs = {self.address : total_value - num_parents * Decimal("0.0001")}
rawtx_child = self.nodes[0].createrawtransaction(inputs, outputs)
prevtxs = []
for i in range(num_parents):
prevtxs.append({"txid": parents_tx[i].rehash(), "vout": 0, "scriptPubKey": locking_scripts[i], "amount": values[i]})
signedtx_child = self.nodes[0].signrawtransactionwithkey(hexstring=rawtx_child, privkeys=self.privkeys, prevtxs=prevtxs)
assert signedtx_child["complete"]
return signedtx_child["hex"]
def test_multiple_parents(self):
node = self.nodes[0]
self.log.info("Testmempoolaccept a package in which a transaction has multiple parents within the package")
for num_parents in [2, 10, 24]:
# Test a package with num_parents parents and 1 child transaction.
package_hex = []
parents_tx = []
values = []
parent_locking_scripts = []
for _ in range(num_parents):
parent_coin = self.coins.pop()
value = parent_coin["amount"]
(tx, txhex, value, parent_locking_script) = self.chain_transaction(parent_coin["txid"], value)
package_hex.append(txhex)
parents_tx.append(tx)
values.append(value)
parent_locking_scripts.append(parent_locking_script)
child_hex = self.create_child_with_parents(parents_tx, values, parent_locking_scripts)
# Package accept should work with the parents in any order (as long as parents come before child)
for _ in range(10):
random.shuffle(package_hex)
testres_multiple = node.testmempoolaccept(rawtxs=package_hex + [child_hex])
assert all([testres["allowed"] for testres in testres_multiple])
testres_single = []
# Test accept and then submit each one individually, which should be identical to package testaccept
for rawtx in package_hex + [child_hex]:
testres_single.append(node.testmempoolaccept([rawtx])[0])
# Submit the transaction now so its child should have no problem validating
node.sendrawtransaction(rawtx)
assert_equal(testres_single, testres_multiple)
def test_conflicting(self):
node = self.nodes[0]
prevtx = self.coins.pop()
inputs = [{"txid": prevtx["txid"], "vout": 0}]
output1 = {node.get_deterministic_priv_key().address: 50 - 0.00125}
output2 = {ADDRESS_BCRT1_P2WSH_OP_TRUE: 50 - 0.00125}
# tx1 and tx2 share the same inputs
rawtx1 = node.createrawtransaction(inputs, output1)
rawtx2 = node.createrawtransaction(inputs, output2)
signedtx1 = node.signrawtransactionwithkey(hexstring=rawtx1, privkeys=self.privkeys)
signedtx2 = node.signrawtransactionwithkey(hexstring=rawtx2, privkeys=self.privkeys)
tx1 = CTransaction()
tx1.deserialize(BytesIO(hex_str_to_bytes(signedtx1["hex"])))
tx2 = CTransaction()
tx2.deserialize(BytesIO(hex_str_to_bytes(signedtx2["hex"])))
assert signedtx1["complete"]
assert signedtx2["complete"]
# Ensure tx1 and tx2 are valid by themselves
assert node.testmempoolaccept([signedtx1["hex"]])[0]["allowed"]
assert node.testmempoolaccept([signedtx2["hex"]])[0]["allowed"]
self.log.info("Test duplicate transactions in the same package")
testres = node.testmempoolaccept([signedtx1["hex"], signedtx1["hex"]])
assert_equal(testres, [
{"txid": tx1.rehash(), "wtxid": tx1.getwtxid(), "package-error": "conflict-in-package"},
{"txid": tx1.rehash(), "wtxid": tx1.getwtxid(), "package-error": "conflict-in-package"}
])
self.log.info("Test conflicting transactions in the same package")
testres = node.testmempoolaccept([signedtx1["hex"], signedtx2["hex"]])
assert_equal(testres, [
{"txid": tx1.rehash(), "wtxid": tx1.getwtxid(), "package-error": "conflict-in-package"},
{"txid": tx2.rehash(), "wtxid": tx2.getwtxid(), "package-error": "conflict-in-package"}
])
def test_rbf(self):
node = self.nodes[0]
coin = self.coins.pop()
inputs = [{"txid": coin["txid"], "vout": 0, "sequence": BIP125_SEQUENCE_NUMBER}]
fee = Decimal('0.00125000')
output = {node.get_deterministic_priv_key().address: 50 - fee}
raw_replaceable_tx = node.createrawtransaction(inputs, output)
signed_replaceable_tx = node.signrawtransactionwithkey(hexstring=raw_replaceable_tx, privkeys=self.privkeys)
testres_replaceable = node.testmempoolaccept([signed_replaceable_tx["hex"]])
replaceable_tx = CTransaction()
replaceable_tx.deserialize(BytesIO(hex_str_to_bytes(signed_replaceable_tx["hex"])))
assert_equal(testres_replaceable, [
{"txid": replaceable_tx.rehash(), "wtxid": replaceable_tx.getwtxid(),
"allowed": True, "vsize": replaceable_tx.get_vsize(), "fees": { "base": fee }}
])
# Replacement transaction is identical except has double the fee
replacement_tx = CTransaction()
replacement_tx.deserialize(BytesIO(hex_str_to_bytes(signed_replaceable_tx["hex"])))
replacement_tx.vout[0].nValue -= int(fee * COIN) # Doubled fee
signed_replacement_tx = node.signrawtransactionwithkey(replacement_tx.serialize().hex(), self.privkeys)
replacement_tx.deserialize(BytesIO(hex_str_to_bytes(signed_replacement_tx["hex"])))
self.log.info("Test that transactions within a package cannot replace each other")
testres_rbf_conflicting = node.testmempoolaccept([signed_replaceable_tx["hex"], signed_replacement_tx["hex"]])
assert_equal(testres_rbf_conflicting, [
{"txid": replaceable_tx.rehash(), "wtxid": replaceable_tx.getwtxid(), "package-error": "conflict-in-package"},
{"txid": replacement_tx.rehash(), "wtxid": replacement_tx.getwtxid(), "package-error": "conflict-in-package"}
])
self.log.info("Test that packages cannot conflict with mempool transactions, even if a valid BIP125 RBF")
node.sendrawtransaction(signed_replaceable_tx["hex"])
testres_rbf_single = node.testmempoolaccept([signed_replacement_tx["hex"]])
# This transaction is a valid BIP125 replace-by-fee
assert testres_rbf_single[0]["allowed"]
testres_rbf_package = self.independent_txns_testres_blank + [{
"txid": replacement_tx.rehash(), "wtxid": replacement_tx.getwtxid(), "allowed": False, "reject-reason": "txn-mempool-conflict"
}]
self.assert_testres_equal(self.independent_txns_hex + [signed_replacement_tx["hex"]], testres_rbf_package)
if __name__ == "__main__":
RPCPackagesTest().main()
|
the-stack_0_1729 | #
# This file is part of the MicroPython project, http://micropython.org/
#
# The MIT License (MIT)
#
# Copyright (c) 2017 SummerGift <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
from sys import argv
import argparse
from gen_qstr import make_bytes
def qstr_end_idx(str):
min = 255
idx = str.find(')')
if(idx > -1):
min = idx
idx = str.find(',')
if idx > -1 and idx < min:
min = idx
idx = str.find(':')
if idx > -1 and idx < min:
min = idx
idx = str.find('=')
if idx > -1 and idx < min:
min = idx
idx = str.find(' ')
if idx > -1 and idx < min:
min = idx
idx = str.find(';')
if idx > -1 and idx < min:
min = idx
idx = str.find('<')
if idx > -1 and idx < min:
min = idx
idx = str.find('}')
if idx > -1 and idx < min:
min = idx
idx = str.find('{')
if idx > -1 and idx < min:
min = idx
idx = str.find('.')
if idx > -1 and idx < min:
min = idx
return min
def spec_charactor_filter(string):
if ('#' in string):
return False
if ('/' in string):
return False
if ('*' in string):
return False
if ('//' in string):
return False
if ('(' in string):
return False
if (')' in string):
return False
if ('{' in string):
return False
if ('}' in string):
return False
if (' ' in string):
return False
if ('\\' in string):
return False
if ('}' in string):
return False
if ('-' in string):
return False
return True
def read_qstr(file,list):
fd = open(file)
try:
for line in fd:
idx = 0
for i in range(5):
idx = line.find('MP_QSTR_',idx)
if (idx >= 0):
str = line[idx+len('MP_QSTR_'):-1]
lidx = qstr_end_idx(str)
qstr = str[0:lidx]
if spec_charactor_filter(qstr):
if qstr not in list:
list.append(qstr)
idx = idx +1
else:
idx = line.find('Q(')
if idx ==0:
#print("**:",line+' in '+file)
str = line[idx+len('Q('):-1]
lidx = qstr_end_idx(str)
qstr = str[0:lidx]
if spec_charactor_filter(qstr):
if qstr not in list:
list.append(qstr)
break
except UnicodeDecodeError as e:
print(e)
print('in',file)
fd.close()
def gen_qstr(path,hash_len=1):
headerfile = './qstrdefs.generated.h'
headerdef = '// This file was automatically generated by auto_generate_qstr.py' + '\n'+ '\n'
headerdef = headerdef + '\n' + ' QDEF(MP_QSTRnull, (const byte*)"\\x00\\x00\\x00" "") '+ '\n'
try:
os.remove(headerfile)
except WindowsError as e:
print(e)
qstr_list = []
total = 0
for (root, dirs, files) in os.walk(path):
for file in files:
if (file.split(".")[-1] == 'c'):
read_qstr(root + '/' + file, qstr_list)
if (file.split(".")[-1] == 'h'):
read_qstr(root + '/' + file, qstr_list)
outfd = open(headerfile, 'wt')
outfd.writelines(headerdef)
for qstr in qstr_list:
out = make_bytes(1, hash_len, qstr)
outfd.writelines(out + '\n');
# print(out)
outfd.close()
print('Generate QString count:', len(qstr_list))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dir', type=str, default='../',
help='txt file')
args, unparsed = parser.parse_known_args()
gen_qstr(args.dir)
|
the-stack_0_1731 | from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandsTestCase(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db when db is available"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=None)
def test_wait_for_db(self, ts):
"""Test waiting for db"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6) |
the-stack_0_1733 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Implementation of Neural Net (NN) functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import candidate_sampling_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
def log_poisson_loss(targets, log_input, compute_full_loss=False, name=None):
"""Computes log Poisson loss given `log_input`.
Gives the log-likelihood loss between the prediction and the target under the
assumption that the target has a Poisson distribution.
Caveat: By default, this is not the exact loss, but the loss minus a
constant term [log(z!)]. That has no effect for optimization, but
does not play well with relative loss comparisons. To compute an
approximation of the log factorial term, specify
compute_full_loss=True to enable Stirling's Approximation.
For brevity, let `c = log(x) = log_input`, `z = targets`. The log Poisson
loss is
-log(exp(-x) * (x^z) / z!)
= -log(exp(-x) * (x^z)) + log(z!)
~ -log(exp(-x)) - log(x^z) [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
[ Note the second term is the Stirling's Approximation for log(z!).
It is invariant to x and does not affect optimization, though
important for correct relative loss comparisons. It is only
computed when compute_full_loss == True. ]
= x - z * log(x) [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
= exp(c) - z * c [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
Args:
targets: A `Tensor` of the same type and shape as `log_input`.
log_input: A `Tensor` of type `float32` or `float64`.
compute_full_loss: whether to compute the full loss. If false, a constant
term is dropped in favor of more efficient optimization.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `log_input` with the componentwise
logistic losses.
Raises:
ValueError: If `log_input` and `targets` do not have the same shape.
"""
with ops.name_scope(name, "log_poisson_loss", [log_input, targets]) as name:
log_input = ops.convert_to_tensor(log_input, name="log_input")
targets = ops.convert_to_tensor(targets, name="targets")
try:
targets.get_shape().merge_with(log_input.get_shape())
except ValueError:
raise ValueError(
"log_input and targets must have the same shape (%s vs %s)" %
(log_input.get_shape(), targets.get_shape()))
result = math_ops.exp(log_input) - log_input * targets
if compute_full_loss:
# need to create constant tensors here so that their dtypes can be matched
# to that of the targets.
point_five = constant_op.constant(0.5, dtype=targets.dtype)
two_pi = constant_op.constant(2 * math.pi, dtype=targets.dtype)
stirling_approx = (targets * math_ops.log(targets)) - targets + (
point_five * math_ops.log(two_pi * targets))
zeros = array_ops.zeros_like(targets, dtype=targets.dtype)
ones = array_ops.ones_like(targets, dtype=targets.dtype)
cond = math_ops.logical_and(targets >= zeros, targets <= ones)
result += array_ops.where(cond, zeros, stirling_approx)
return result
def sigmoid_cross_entropy_with_logits(_sentinel=None, # pylint: disable=invalid-name
labels=None, logits=None,
name=None):
"""Computes sigmoid cross entropy given `logits`.
Measures the probability error in discrete classification tasks in which each
class is independent and not mutually exclusive. For instance, one could
perform multilabel classification where a picture can contain both an elephant
and a dog at the same time.
For brevity, let `x = logits`, `z = labels`. The logistic loss is
z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + log(1 + exp(-x))
= x - x * z + log(1 + exp(-x))
For x < 0, to avoid overflow in exp(-x), we reformulate the above
x - x * z + log(1 + exp(-x))
= log(exp(x)) - x * z + log(1 + exp(-x))
= - x * z + log(1 + exp(x))
Hence, to ensure stability and avoid overflow, the implementation uses this
equivalent formulation
max(x, 0) - x * z + log(1 + exp(-abs(x)))
`logits` and `labels` must have the same type and shape.
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: A `Tensor` of the same type and shape as `logits`.
logits: A `Tensor` of type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
logistic losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
# pylint: disable=protected-access
nn_ops._ensure_xent_args("sigmoid_cross_entropy_with_logits",
_sentinel, labels, logits)
# pylint: enable=protected-access
with ops.name_scope(name, "logistic_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError("logits and labels must have the same shape (%s vs %s)"
% (logits.get_shape(), labels.get_shape()))
# The logistic loss formula from above is
# x - x * z + log(1 + exp(-x))
# For x < 0, a more numerically stable formula is
# -x * z + log(1 + exp(x))
# Note that these two expressions can be combined into the following:
# max(x, 0) - x * z + log(1 + exp(-abs(x)))
# To allow computing gradients at zero, we define custom versions of max and
# abs functions.
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
cond = (logits >= zeros)
relu_logits = array_ops.where(cond, logits, zeros)
neg_abs_logits = array_ops.where(cond, -logits, logits)
return math_ops.add(relu_logits - logits * labels,
math_ops.log1p(math_ops.exp(neg_abs_logits)),
name=name)
def weighted_cross_entropy_with_logits(targets, logits, pos_weight, name=None):
"""Computes a weighted cross entropy.
This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`,
allows one to trade off recall and precision by up- or down-weighting the
cost of a positive error relative to a negative error.
The usual cross-entropy cost is defined as:
targets * -log(sigmoid(logits)) + (1 - targets) * -log(1 - sigmoid(logits))
The argument `pos_weight` is used as a multiplier for the positive targets:
targets * -log(sigmoid(logits)) * pos_weight +
(1 - targets) * -log(1 - sigmoid(logits))
For brevity, let `x = logits`, `z = targets`, `q = pos_weight`.
The loss is:
qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x))
= (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))
Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow,
the implementation uses
(1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))
`logits` and `targets` must have the same type and shape.
Args:
targets: A `Tensor` of the same type and shape as `logits`.
logits: A `Tensor` of type `float32` or `float64`.
pos_weight: A coefficient to use on the positive examples.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
weighted logistic losses.
Raises:
ValueError: If `logits` and `targets` do not have the same shape.
"""
with ops.name_scope(name, "logistic_loss", [logits, targets]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
targets = ops.convert_to_tensor(targets, name="targets")
try:
targets.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError("logits and targets must have the same shape (%s vs %s)"
% (logits.get_shape(), targets.get_shape()))
# The logistic loss formula from above is
# (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))
# For x < 0, a more numerically stable formula is
# (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(x)) - l * x
# To avoid branching, we use the combined version
# (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))
log_weight = 1 + (pos_weight - 1) * targets
return math_ops.add(
(1 - targets) * logits,
log_weight * (math_ops.log1p(math_ops.exp(-math_ops.abs(logits))) +
nn_ops.relu(-logits)),
name=name)
def relu_layer(x, weights, biases, name=None):
"""Computes Relu(x * weight + biases).
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"nn_relu_layer" is used.
Returns:
A 2-D Tensor computing relu(matmul(x, weights) + biases).
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "relu_layer", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases)
return nn_ops.relu(xw_plus_b, name=name)
def l2_normalize(x, dim, epsilon=1e-12, name=None):
"""Normalizes along dimension `dim` using an L2 norm.
For a 1-D tensor with `dim = 0`, computes
output = x / sqrt(max(sum(x**2), epsilon))
For `x` with more dimensions, independently normalizes each 1-D slice along
dimension `dim`.
Args:
x: A `Tensor`.
dim: Dimension along which to normalize. A scalar or a vector of
integers.
epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the
divisor if `norm < sqrt(epsilon)`.
name: A name for this operation (optional).
Returns:
A `Tensor` with the same shape as `x`.
"""
with ops.name_scope(name, "l2_normalize", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
square_sum = math_ops.reduce_sum(math_ops.square(x), dim, keep_dims=True)
x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon))
return math_ops.multiply(x, x_inv_norm, name=name)
def zero_fraction(value, name=None):
"""Returns the fraction of zeros in `value`.
If `value` is empty, the result is `nan`.
This is useful in summaries to measure and report sparsity. For example,
```python
z = tf.Relu(...)
summ = tf.contrib.deprecated.scalar_summary('sparsity',
tf.nn.zero_fraction(z))
```
Args:
value: A tensor of numeric type.
name: A name for the operation (optional).
Returns:
The fraction of zeros in `value`, with type `float32`.
"""
with ops.name_scope(name, "zero_fraction", [value]):
value = ops.convert_to_tensor(value, name="value")
zero = constant_op.constant(0, dtype=value.dtype, name="zero")
return math_ops.reduce_mean(
math_ops.cast(math_ops.equal(value, zero), dtypes.float32))
# pylint: disable=redefined-builtin
def depthwise_conv2d(input, filter, strides, padding, rate=None, name=None):
"""Depthwise 2-D convolution.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`
containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d`
applies a different filter to each input channel (expanding from 1 channel
to `channel_multiplier` channels for each), then concatenates the results
together. The output has `in_channels * channel_multiplier` channels.
In detail,
output[b, i, j, k * channel_multiplier + q] = sum_{di, dj}
filter[di, dj, k, q] * input[b, strides[1] * i + rate[0] * di,
strides[2] * j + rate[1] * dj, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the
same horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
If any value in `rate` is greater than 1, we perform atrous depthwise
convolution, in which case all values in the `strides` tensor must be equal
to 1.
Args:
input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
filter: 4-D with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
strides: 1-D of size 4. The stride of the sliding window for each
dimension of `input`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the @{tf.nn.convolution$comment here}
rate: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
name: A name for this operation (optional).
Returns:
A 4-D `Tensor` of shape
`[batch, out_height, out_width, in_channels * channel_multiplier].`
"""
with ops.name_scope(name, "depthwise", [input, filter]) as name:
input = ops.convert_to_tensor(input, name="tensor_in")
filter = ops.convert_to_tensor(filter, name="filter_in")
if rate is None:
rate = [1, 1]
def op(input_converted, _, padding):
return nn_ops.depthwise_conv2d_native(
input=input_converted,
filter=filter,
strides=strides,
padding=padding,
name=name)
return nn_ops.with_space_to_batch(
input=input,
filter_shape=array_ops.shape(filter),
dilation_rate=rate,
padding=padding,
op=op)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin,line-too-long
def separable_conv2d(input,
depthwise_filter,
pointwise_filter,
strides,
padding,
rate=None,
name=None):
"""2-D convolution with separable filters.
Performs a depthwise convolution that acts separately on channels followed by
a pointwise convolution that mixes channels. Note that this is separability
between dimensions `[1, 2]` and `3`, not spatial separability between
dimensions `1` and `2`.
In detail,
output[b, i, j, k] = sum_{di, dj, q, r]
input[b, strides[1] * i + di, strides[2] * j + dj, q] *
depthwise_filter[di, dj, q, r] *
pointwise_filter[0, 0, q * channel_multiplier + r, k]
`strides` controls the strides for the depthwise convolution only, since
the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have
`strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
If any value in `rate` is greater than 1, we perform atrous depthwise
convolution, in which case all values in the `strides` tensor must be equal
to 1.
Args:
input: 4-D `Tensor` with shape `[batch, in_height, in_width, in_channels]`.
depthwise_filter: 4-D `Tensor` with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
Contains `in_channels` convolutional filters of depth 1.
pointwise_filter: 4-D `Tensor` with shape
`[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise
filter to mix channels after `depthwise_filter` has convolved spatially.
strides: 1-D of size 4. The strides for the depthwise convolution for
each dimension of `input`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the @{tf.nn.convolution$comment here}
rate: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
name: A name for this operation (optional).
Returns:
A 4-D `Tensor` of shape `[batch, out_height, out_width, out_channels]`.
Raises:
ValueError: If channel_multiplier * in_channels > out_channels,
which means that the separable convolution is overparameterized.
"""
with ops.name_scope(name, "separable_conv2d",
[input, depthwise_filter, pointwise_filter]) as name:
input = ops.convert_to_tensor(input, name="tensor_in")
depthwise_filter = ops.convert_to_tensor(
depthwise_filter, name="depthwise_filter")
pointwise_filter = ops.convert_to_tensor(
pointwise_filter, name="pointwise_filter")
pointwise_filter_shape = pointwise_filter.get_shape().with_rank(4)
pointwise_filter_shape[0].assert_is_compatible_with(1)
pointwise_filter_shape[1].assert_is_compatible_with(1)
channel_multiplier = depthwise_filter.get_shape().with_rank(4)[3]
in_channels = input.get_shape().with_rank(4)[3]
out_channels = pointwise_filter_shape[3]
if rate is None:
rate = [1, 1]
# If any of channel numbers is unknown, then the comparison below returns
# None. See TensorShape.__gt__().
if channel_multiplier * in_channels > out_channels:
raise ValueError("Refusing to perform an overparameterized separable "
"convolution: channel_multiplier * in_channels = "
"%d * %d = %d > %d = out_channels" %
(channel_multiplier, in_channels,
channel_multiplier * in_channels, out_channels))
# The layout of the ops in the graph are expected to be as follows:
# depthwise_conv2d // Conv2D op corresponding to native deptwise conv.
# separable_conv2d // Conv2D op corresponding to the pointwise conv.
def op(input_converted, _, padding):
return nn_ops.depthwise_conv2d_native(
input=input_converted,
filter=depthwise_filter,
strides=strides,
padding=padding,
name="depthwise")
depthwise = nn_ops.with_space_to_batch(
input=input,
filter_shape=array_ops.shape(depthwise_filter),
dilation_rate=rate,
padding=padding,
op=op)
return nn_ops.conv2d(
depthwise, pointwise_filter, [1, 1, 1, 1], padding="VALID", name=name)
# pylint: enable=redefined-builtin,line-too-long
def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None):
"""Calculate the sufficient statistics for the mean and variance of `x`.
These sufficient statistics are computed using the one pass algorithm on
an input that's optionally shifted. See:
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and variance.
shift: A `Tensor` containing the value by which to shift the data for
numerical stability, or `None` if no shift is to be performed. A shift
close to the true mean provides the most numerically stable results.
keep_dims: produce statistics with the same dimensionality as the input.
name: Name used to scope the operations that compute the sufficient stats.
Returns:
Four `Tensor` objects of the same type as `x`:
* the count (number of elements to average over).
* the (possibly shifted) sum of the elements in the array.
* the (possibly shifted) sum of squares of the elements in the array.
* the shift by which the mean must be corrected or None if `shift` is None.
"""
axes = list(set(axes))
with ops.name_scope(name, "sufficient_statistics", [x, shift]):
x = ops.convert_to_tensor(x, name="x")
x_shape = x.get_shape()
if all(x_shape[d].value is not None for d in axes):
counts = 1
for d in axes:
counts *= x_shape[d].value
counts = constant_op.constant(counts, dtype=x.dtype)
else: # shape needs to be inferred at runtime.
x_dims = array_ops.gather(
math_ops.cast(array_ops.shape(x), x.dtype), axes)
counts = math_ops.reduce_prod(x_dims, name="count")
if shift is not None:
shift = ops.convert_to_tensor(shift, name="shift")
m_ss = math_ops.subtract(x, shift)
v_ss = math_ops.squared_difference(x, shift)
else: # no shift.
m_ss = x
v_ss = math_ops.square(x)
m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name="mean_ss")
v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name="var_ss")
return counts, m_ss, v_ss, shift
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None):
"""Calculate the mean and variance of based on the sufficient statistics.
Args:
counts: A `Tensor` containing a the total count of the data (one value).
mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly
shifted) sum of the elements to average over.
variance_ss: A `Tensor` containing the variance sufficient statistics: the
(possibly shifted) squared sum of the data to compute the variance over.
shift: A `Tensor` containing the value by which the data is shifted for
numerical stability, or `None` if no shift was performed.
name: Name used to scope the operations that compute the moments.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
with ops.name_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]):
divisor = math_ops.reciprocal(counts, name="divisor")
if shift is not None:
shifted_mean = math_ops.multiply(mean_ss, divisor, name="shifted_mean")
mean = math_ops.add(shifted_mean, shift, name="mean")
else: # no shift.
shifted_mean = math_ops.multiply(mean_ss, divisor, name="mean")
mean = shifted_mean
variance = math_ops.subtract(math_ops.multiply(variance_ss, divisor),
math_ops.square(shifted_mean),
name="variance")
return (mean, variance)
def moments(x, axes, shift=None, name=None, keep_dims=False):
"""Calculate the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean
and variance of a vector.
Note: for numerical stability, when shift=None, the true mean
would be computed and used as shift.
When using these moments for batch normalization (see
`tf.nn.batch_normalization`):
* for so-called "global normalization", used with convolutional filters with
shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.
* for simple batch normalization pass `axes=[0]` (batch only).
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and
variance.
shift: A `Tensor` containing the value by which to shift the data for
numerical stability, or `None` in which case the true mean of the data is
used as shift. A shift close to the true mean provides the most
numerically stable results.
name: Name used to scope the operations that compute the moments.
keep_dims: produce moments with the same dimensionality as the input.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
with ops.name_scope(name, "moments", [x, axes, shift]):
# The dynamic range of fp16 is too limited to support the collection of
# sufficient statistics. As a workaround we simply perform the operations
# on 32-bit floats before converting the mean and variance back to fp16
y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x
if shift is None:
# Compute true mean while keeping the dims for proper broadcasting.
shift = array_ops.stop_gradient(
math_ops.reduce_mean(y, axes, keep_dims=True))
else:
shift = math_ops.cast(shift, y.dtype)
counts, m_ss, v_ss, shift = sufficient_statistics(
y, axes, shift=shift, keep_dims=keep_dims, name=name)
# Reshape shift as needed.
shift = array_ops.reshape(shift, array_ops.shape(m_ss))
shift.set_shape(m_ss.get_shape())
with ops.control_dependencies([counts, m_ss, v_ss]):
mean, variance = normalize_moments(counts, m_ss, v_ss, shift, name=name)
if x.dtype == dtypes.float16:
return (math_ops.cast(mean, dtypes.float16),
math_ops.cast(variance, dtypes.float16))
else:
return (mean, variance)
def weighted_moments(x, axes, frequency_weights, name=None, keep_dims=False):
"""Returns the frequency-weighted mean and variance of `x`.
Args:
x: A tensor.
axes: 1-d tensor of int32 values; these are the axes along which
to compute mean and variance.
frequency_weights: A tensor of positive weights which can be
broadcast with x.
name: Name used to scope the operation.
keep_dims: Produce moments with the same dimensionality as the input.
Returns:
Two tensors: `weighted_mean` and `weighted_variance`.
"""
with ops.name_scope(name, "weighted_moments", [x, frequency_weights, axes]):
x = ops.convert_to_tensor(x, name="x")
frequency_weights = ops.convert_to_tensor(
frequency_weights, name="frequency_weights")
# Unlike moments(), this just uses a simpler two-pass method.
# See comment in moments() WRT precision; it applies here too.
needs_cast = x.dtype == dtypes.float16
if needs_cast:
x = math_ops.cast(x, dtypes.float32)
if frequency_weights.dtype != x.dtype:
frequency_weights = math_ops.cast(frequency_weights, x.dtype)
# Note that we use keep_dims=True for our reductions regardless of the arg;
# this is so that the results remain broadcast-compatible with the inputs.
weighted_input_sum = math_ops.reduce_sum(
frequency_weights * x, axes, name="weighted_input_sum", keep_dims=True)
# The shape of the weights isn't necessarily the same as x's
# shape, just broadcast-compatible with it -- so this expression
# performs broadcasting to give a per-item weight, with the same
# shape as (freqency_weights * x). This avoids having to reason
# through all the broadcast logic to compute a correct
# sum_of_weights.
broadcasted_weights = frequency_weights + array_ops.zeros_like(x)
sum_of_weights = math_ops.reduce_sum(
broadcasted_weights, axes, name="sum_of_weights", keep_dims=True)
divisor = math_ops.reciprocal(sum_of_weights, name="inv_weight_sum")
weighted_mean = math_ops.multiply(weighted_input_sum, divisor)
# Have the weighted mean; now on to variance:
weighted_distsq = math_ops.reduce_sum(
frequency_weights * math_ops.squared_difference(x, weighted_mean),
axes,
name="weighted_distsq",
keep_dims=True)
weighted_variance = math_ops.multiply(weighted_distsq, divisor)
if not keep_dims:
weighted_mean = array_ops.squeeze(weighted_mean, squeeze_dims=axes)
weighted_variance = array_ops.squeeze(
weighted_variance, squeeze_dims=axes)
if needs_cast:
weighted_mean = math_ops.cast(weighted_mean, dtypes.float16)
weighted_variance = math_ops.cast(weighted_variance, dtypes.float16)
return weighted_mean, weighted_variance
def batch_normalization(x,
mean,
variance,
offset,
scale,
variance_epsilon,
name=None):
r"""Batch normalization.
As described in http://arxiv.org/abs/1502.03167.
Normalizes a tensor by `mean` and `variance`, and applies (optionally) a
`scale` \\(\gamma\\) to it, as well as an `offset` \\(\beta\\):
\\(\frac{\gamma(x-\mu)}{\sigma}+\beta\\)
`mean`, `variance`, `offset` and `scale` are all expected to be of one of two
shapes:
* In all generality, they can have the same number of dimensions as the
input `x`, with identical sizes as `x` for the dimensions that are not
normalized over (the 'depth' dimension(s)), and dimension 1 for the
others which are being normalized over.
`mean` and `variance` in this case would typically be the outputs of
`tf.nn.moments(..., keep_dims=True)` during training, or running averages
thereof during inference.
* In the common case where the 'depth' dimension is the last dimension in
the input tensor `x`, they may be one dimensional tensors of the same
size as the 'depth' dimension.
This is the case for example for the common `[batch, depth]` layout of
fully-connected layers, and `[batch, height, width, depth]` for
convolutions.
`mean` and `variance` in this case would typically be the outputs of
`tf.nn.moments(..., keep_dims=False)` during training, or running averages
thereof during inference.
Args:
x: Input `Tensor` of arbitrary dimensionality.
mean: A mean `Tensor`.
variance: A variance `Tensor`.
offset: An offset `Tensor`, often denoted \\(\beta\\) in equations, or
None. If present, will be added to the normalized tensor.
scale: A scale `Tensor`, often denoted \\(\gamma\\) in equations, or
`None`. If present, the scale is applied to the normalized tensor.
variance_epsilon: A small float number to avoid dividing by 0.
name: A name for this operation (optional).
Returns:
the normalized, scaled, offset tensor.
"""
with ops.name_scope(name, "batchnorm", [x, mean, variance, scale, offset]):
inv = math_ops.rsqrt(variance + variance_epsilon)
if scale is not None:
inv *= scale
return x * inv + (offset - mean * inv
if offset is not None else -mean * inv)
def fused_batch_norm(
x,
scale,
offset, # pylint: disable=invalid-name
mean=None,
variance=None,
epsilon=0.001,
data_format="NHWC",
is_training=True,
name=None):
r"""Batch normalization.
As described in http://arxiv.org/abs/1502.03167.
Args:
x: Input `Tensor` of 4 dimensions.
scale: A `Tensor` of 1 dimension for scaling.
offset: A `Tensor` of 1 dimension for bias.
mean: A `Tensor` of 1 dimension for population mean used for inference.
variance: A `Tensor` of 1 dimension for population variance
used for inference.
epsilon: A small float number added to the variance of x.
data_format: The data format for x. Either "NHWC" (default) or "NCHW".
is_training: A bool value to specify if the operation is used for
training or inference.
name: A name for this operation (optional).
Returns:
y: A 4D Tensor for the normalized, scaled, offsetted x.
batch_mean: A 1D Tensor for the mean of x.
batch_var: A 1D Tensor for the variance of x.
Raises:
ValueError: If mean or variance is not None when is_training is True.
"""
x = ops.convert_to_tensor(x, name="input")
scale = ops.convert_to_tensor(scale, name="scale")
offset = ops.convert_to_tensor(offset, name="offset")
if is_training:
if (mean is not None) or (variance is not None):
raise ValueError("Both 'mean' and 'variance' must be None "
"if is_training is True.")
if mean is None:
mean = constant_op.constant([])
if variance is None:
variance = constant_op.constant([])
# Add 1e-12 to epsilon when epsilon <= 1e-5 to prevent CUDNN exception.
epsilon = epsilon if epsilon > 1e-5 else epsilon + 1e-12
# pylint: disable=protected-access
y, batch_mean, batch_var, _, _ = gen_nn_ops._fused_batch_norm(
x,
scale,
offset,
mean,
variance,
epsilon=epsilon,
data_format=data_format,
is_training=is_training,
name=name)
return y, batch_mean, batch_var
# pylint: enable=protected-access
def batch_norm_with_global_normalization(t,
m,
v,
beta,
gamma,
variance_epsilon,
scale_after_normalization,
name=None):
"""Batch normalization.
This op is deprecated. See `tf.nn.batch_normalization`.
Args:
t: A 4D input Tensor.
m: A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments,
or a saved moving average thereof.
v: A 1D variance Tensor with size matching the last dimension of t.
This is the second output from tf.nn.moments,
or a saved moving average thereof.
beta: A 1D beta Tensor with size matching the last dimension of t.
An offset to be added to the normalized tensor.
gamma: A 1D gamma Tensor with size matching the last dimension of t.
If "scale_after_normalization" is true, this tensor will be multiplied
with the normalized tensor.
variance_epsilon: A small float number to avoid dividing by 0.
scale_after_normalization: A bool indicating whether the resulted tensor
needs to be multiplied with gamma.
name: A name for this operation (optional).
Returns:
A batch-normalized `t`.
"""
return batch_normalization(t, m, v, beta, gamma if scale_after_normalization
else None, variance_epsilon, name)
def _sum_rows(x):
"""Returns a vector summing up each row of the matrix x."""
# _sum_rows(x) is equivalent to math_ops.reduce_sum(x, 1) when x is
# a matrix. The gradient of _sum_rows(x) is more efficient than
# reduce_sum(x, 1)'s gradient in today's implementation. Therefore,
# we use _sum_rows(x) in the nce_loss() computation since the loss
# is mostly used for training.
cols = array_ops.shape(x)[1]
ones_shape = array_ops.stack([cols, 1])
ones = array_ops.ones(ones_shape, x.dtype)
return array_ops.reshape(math_ops.matmul(x, ones), [-1])
def _compute_sampled_logits(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
subtract_log_q=True,
remove_accidental_hits=False,
partition_strategy="mod",
name=None):
"""Helper function for nce_loss and sampled_softmax_loss functions.
Computes sampled output training logits and labels suitable for implementing
e.g. noise-contrastive estimation (see nce_loss) or sampled softmax (see
sampled_softmax_loss).
Note: In the case where num_true > 1, we assign to each target class
the target probability 1 / num_true so that the target probabilities
sum to 1 per-example.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
`[num_classes, dim]`. The (possibly-partitioned) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
subtract_log_q: A `bool`. whether to subtract the log expected count of
the labels in the sample to get the logits of the true labels.
Default is True. Turn off for Negative Sampling.
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
False.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
out_logits, out_labels: `Tensor` objects each with shape
`[batch_size, num_true + num_sampled]`, for passing to either
`nn.sigmoid_cross_entropy_with_logits` (NCE) or
`nn.softmax_cross_entropy_with_logits` (sampled softmax).
"""
if isinstance(weights, variables.PartitionedVariable):
weights = list(weights)
if not isinstance(weights, list):
weights = [weights]
with ops.name_scope(name, "compute_sampled_logits",
weights + [biases, inputs, labels]):
if labels.dtype != dtypes.int64:
labels = math_ops.cast(labels, dtypes.int64)
labels_flat = array_ops.reshape(labels, [-1])
# Sample the negative labels.
# sampled shape: [num_sampled] tensor
# true_expected_count shape = [batch_size, 1] tensor
# sampled_expected_count shape = [num_sampled] tensor
if sampled_values is None:
sampled_values = candidate_sampling_ops.log_uniform_candidate_sampler(
true_classes=labels,
num_true=num_true,
num_sampled=num_sampled,
unique=True,
range_max=num_classes)
# NOTE: pylint cannot tell that 'sampled_values' is a sequence
# pylint: disable=unpacking-non-sequence
sampled, true_expected_count, sampled_expected_count = sampled_values
# pylint: enable=unpacking-non-sequence
# labels_flat is a [batch_size * num_true] tensor
# sampled is a [num_sampled] int tensor
all_ids = array_ops.concat([labels_flat, sampled], 0)
# weights shape is [num_classes, dim]
all_w = embedding_ops.embedding_lookup(
weights, all_ids, partition_strategy=partition_strategy)
all_b = embedding_ops.embedding_lookup(biases, all_ids)
# true_w shape is [batch_size * num_true, dim]
# true_b is a [batch_size * num_true] tensor
true_w = array_ops.slice(
all_w, [0, 0], array_ops.stack([array_ops.shape(labels_flat)[0], -1]))
true_b = array_ops.slice(all_b, [0], array_ops.shape(labels_flat))
# inputs shape is [batch_size, dim]
# true_w shape is [batch_size * num_true, dim]
# row_wise_dots is [batch_size, num_true, dim]
dim = array_ops.shape(true_w)[1:2]
new_true_w_shape = array_ops.concat([[-1, num_true], dim], 0)
row_wise_dots = math_ops.multiply(
array_ops.expand_dims(inputs, 1),
array_ops.reshape(true_w, new_true_w_shape))
# We want the row-wise dot plus biases which yields a
# [batch_size, num_true] tensor of true_logits.
dots_as_matrix = array_ops.reshape(row_wise_dots,
array_ops.concat([[-1], dim], 0))
true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true])
true_b = array_ops.reshape(true_b, [-1, num_true])
true_logits += true_b
# Lookup weights and biases for sampled labels.
# sampled_w shape is [num_sampled, dim]
# sampled_b is a [num_sampled] float tensor
sampled_w = array_ops.slice(
all_w, array_ops.stack([array_ops.shape(labels_flat)[0], 0]), [-1, -1])
sampled_b = array_ops.slice(all_b, array_ops.shape(labels_flat), [-1])
# inputs has shape [batch_size, dim]
# sampled_w has shape [num_sampled, dim]
# sampled_b has shape [num_sampled]
# Apply X*W'+B, which yields [batch_size, num_sampled]
sampled_logits = math_ops.matmul(
inputs, sampled_w, transpose_b=True) + sampled_b
if remove_accidental_hits:
acc_hits = candidate_sampling_ops.compute_accidental_hits(
labels, sampled, num_true=num_true)
acc_indices, acc_ids, acc_weights = acc_hits
# This is how SparseToDense expects the indices.
acc_indices_2d = array_ops.reshape(acc_indices, [-1, 1])
acc_ids_2d_int32 = array_ops.reshape(
math_ops.cast(acc_ids, dtypes.int32), [-1, 1])
sparse_indices = array_ops.concat([acc_indices_2d, acc_ids_2d_int32], 1,
"sparse_indices")
# Create sampled_logits_shape = [batch_size, num_sampled]
sampled_logits_shape = array_ops.concat(
[array_ops.shape(labels)[:1], array_ops.expand_dims(num_sampled, 0)],
0)
if sampled_logits.dtype != acc_weights.dtype:
acc_weights = math_ops.cast(acc_weights, sampled_logits.dtype)
sampled_logits += sparse_ops.sparse_to_dense(
sparse_indices,
sampled_logits_shape,
acc_weights,
default_value=0.0,
validate_indices=False)
if subtract_log_q:
# Subtract log of Q(l), prior probability that l appears in sampled.
true_logits -= math_ops.log(true_expected_count)
sampled_logits -= math_ops.log(sampled_expected_count)
# Construct output logits and labels. The true labels/logits start at col 0.
out_logits = array_ops.concat([true_logits, sampled_logits], 1)
# true_logits is a float tensor, ones_like(true_logits) is a float tensor
# of ones. We then divide by num_true to ensure the per-example labels sum
# to 1.0, i.e. form a proper probability distribution.
out_labels = array_ops.concat([
array_ops.ones_like(true_logits) / num_true,
array_ops.zeros_like(sampled_logits)
], 1)
return out_logits, out_labels
def nce_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=False,
partition_strategy="mod",
name="nce_loss"):
"""Computes and returns the noise-contrastive estimation training loss.
See [Noise-contrastive estimation: A new estimation principle for
unnormalized statistical
models](http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf).
Also see our [Candidate Sampling Algorithms
Reference](../../extras/candidate_sampling.pdf)
Note: By default this uses a log-uniform (Zipfian) distribution for sampling,
so your labels must be sorted in order of decreasing frequency to achieve
good results. For more details, see
@{tf.nn.log_uniform_candidate_sampler}.
Note: In the case where `num_true` > 1, we assign to each target class
the target probability 1 / `num_true` so that the target probabilities
sum to 1 per-example.
Note: It would be useful to allow a variable number of target classes per
example. We hope to provide this functionality in a future release.
For now, if you have a variable number of target classes, you can pad them
out to a constant number by either repeating them or by padding
with an otherwise unused class.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-partitioned) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. Whether to remove "accidental hits"
where a sampled class equals one of the target classes. If set to
`True`, this is a "Sampled Logistic" loss instead of NCE, and we are
learning to generate log-odds instead of log probabilities. See
our [Candidate Sampling Algorithms Reference]
(../../extras/candidate_sampling.pdf).
Default is False.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example NCE losses.
"""
logits, labels = _compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
sampled_losses = sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits, name="sampled_losses")
# sampled_losses is batch_size x {true_loss, sampled_losses...}
# We sum out true and sampled losses.
return _sum_rows(sampled_losses)
def sampled_softmax_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=True,
partition_strategy="mod",
name="sampled_softmax_loss"):
"""Computes and returns the sampled softmax training loss.
This is a faster way to train a softmax classifier over a huge number of
classes.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
At inference time, you can compute full softmax probabilities with the
expression `tf.nn.softmax(tf.matmul(inputs, tf.transpose(weights)) + biases)`.
See our [Candidate Sampling Algorithms Reference]
(../../extras/candidate_sampling.pdf)
Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007)
([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
True.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example sampled softmax losses.
"""
logits, labels = _compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
sampled_losses = nn_ops.softmax_cross_entropy_with_logits(labels=labels,
logits=logits)
# sampled_losses is a [batch_size] tensor.
return sampled_losses
|
the-stack_0_1734 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .tracked_resource import TrackedResource
class StepResource(TrackedResource):
"""The resource representation of a rollout step.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives
:type location: str
:param properties: Required. The properties that define the step.
:type properties: ~azure.mgmt.deploymentmanager.models.StepProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'StepProperties'},
}
def __init__(self, **kwargs):
super(StepResource, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
|
the-stack_0_1735 | #!/usr/bin/env python
# Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2017 ZAP Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs a full scan against an API defined by OpenAPI/Swagger or SOAP
# using ZAP
#
# It can either be run 'standalone', in which case depends on
# https://pypi.python.org/pypi/python-owasp-zap-v2.4 and Docker, or it can be run
# inside one of the ZAP docker containers. It automatically detects if it is
# running in docker so the parameters are the same.
#
# It currently support APIS defined by:
# OpenAPI/Swagger URL
# OpenAPI/Swagger file
# SOAP URL
# SOAP File
# It will exit with codes of:
# 0: Success
# 1: At least 1 FAIL
# 2: At least one WARN and no FAILs
# 3: Any other failure
# By default all alerts found by ZAP will be treated as WARNings.
# You can use the -c or -u parameters to specify a configuration file to override
# this.
# You can generate a template configuration file using the -g parameter. You will
# then need to change 'WARN' to 'FAIL', 'INFO' or 'IGNORE' for the rules you want
# to be handled differently.
# You can also add your own messages for the rules by appending them after a tab
# at the end of each line.
# By default the active scan rules run are hardcoded in the API-Minimal.policy
# file but you can change them by supplying a configuration file with the rules
# you dont want to be run set to IGNORE.
import getopt
import json
import logging
import os
import os.path
import subprocess
import sys
import time
from datetime import datetime
from six.moves.urllib.parse import urljoin
from zapv2 import ZAPv2
from zap_common import *
class NoUrlsException(Exception):
pass
config_dict = {}
config_msg = {}
out_of_scope_dict = {}
min_level = 0
# Scan rules that aren't really relevant, eg the examples rules in the alpha set
blacklist = ['-1', '50003', '60000', '60001']
# Scan rules that are being addressed
in_progress_issues = {}
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
# Hide "Starting new HTTP connection" messages
logging.getLogger("requests").setLevel(logging.WARNING)
def usage():
print('Usage: zap-api-scan.py -t <target> -f <format> [options]')
print(' -t target target API definition, OpenAPI or SOAP, local file or URL, eg https://www.example.com/openapi.json')
print(' -f format either openapi or soap')
print('Options:')
print(' -h print this help message')
print(' -c config_file config file to use to INFO, IGNORE or FAIL warnings')
print(' -u config_url URL of config file to use to INFO, IGNORE or FAIL warnings')
print(' -g gen_file generate default config file(all rules set to WARN)')
print(' -r report_html file to write the full ZAP HTML report')
print(' -w report_md file to write the full ZAP Wiki(Markdown) report')
print(' -x report_xml file to write the full ZAP XML report')
print(' -J report_json file to write the full ZAP JSON document')
print(' -a include the alpha passive scan rules as well')
print(' -d show debug messages')
print(' -P specify listen port')
print(' -D delay in seconds to wait for passive scanning ')
print(' -i default rules not in the config file to INFO')
print(' -l level minimum level to show: PASS, IGNORE, INFO, WARN or FAIL, use with -s to hide example URLs')
print(' -n context_file context file which will be loaded prior to scanning the target')
print(' -p progress_file progress file which specifies issues that are being addressed')
print(' -s short output format - dont show PASSes or example URLs')
print(' -T max time in minutes to wait for ZAP to start and the passive scan to run')
print(' -O the hostname to override in the (remote) OpenAPI spec')
print(' -z zap_options ZAP command line options e.g. -z "-config aaa=bbb -config ccc=ddd"')
print(' --hook path to python file that define your custom hooks')
print('')
print('For more details see https://github.com/zaproxy/zaproxy/wiki/ZAP-API-Scan')
def main(argv):
global min_level
global in_progress_issues
cid = ''
context_file = ''
progress_file = ''
config_file = ''
config_url = ''
generate = ''
port = 0
detailed_output = True
report_html = ''
report_md = ''
report_xml = ''
report_json = ''
target = ''
target_file = ''
target_url = ''
host_override = ''
format = ''
zap_alpha = False
info_unspecified = False
base_dir = ''
zap_ip = 'localhost'
zap_options = ''
delay = 0
timeout = 0
hook_file = None
pass_count = 0
warn_count = 0
fail_count = 0
info_count = 0
ignore_count = 0
warn_inprog_count = 0
fail_inprog_count = 0
try:
opts, args = getopt.getopt(argv, "t:f:c:u:g:m:n:r:J:w:x:l:hdaijp:sz:P:D:T:O:", ["hook="])
except getopt.GetoptError as exc:
logging.warning('Invalid option ' + exc.opt + ' : ' + exc.msg)
usage()
sys.exit(3)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit(0)
elif opt == '-t':
target = arg
logging.debug('Target: ' + target)
elif opt == '-f':
format = arg
elif opt == '-c':
config_file = arg
elif opt == '-u':
config_url = arg
elif opt == '-g':
generate = arg
elif opt == '-d':
logging.getLogger().setLevel(logging.DEBUG)
elif opt == '-P':
port = int(arg)
elif opt == '-D':
delay = int(arg)
elif opt == '-n':
context_file = arg
elif opt == '-p':
progress_file = arg
elif opt == '-r':
report_html = arg
elif opt == '-J':
report_json = arg
elif opt == '-w':
report_md = arg
elif opt == '-x':
report_xml = arg
elif opt == '-a':
zap_alpha = True
elif opt == '-i':
info_unspecified = True
elif opt == '-l':
try:
min_level = zap_conf_lvls.index(arg)
except ValueError:
logging.warning('Level must be one of ' + str(zap_conf_lvls))
usage()
sys.exit(3)
elif opt == '-z':
zap_options = arg
elif opt == '-s':
detailed_output = False
elif opt == '-T':
timeout = int(arg)
elif opt == '-O':
host_override = arg
elif opt == '--hook':
hook_file = arg
check_zap_client_version()
load_custom_hooks(hook_file)
trigger_hook('cli_opts', opts)
# Check target supplied and ok
if len(target) == 0:
usage()
sys.exit(3)
if format != 'openapi' and format != 'soap':
logging.warning('Format must be either \'openapi\' or \'soap\'')
usage()
sys.exit(3)
if running_in_docker():
base_dir = '/zap/wrk/'
if config_file or generate or report_html or report_xml or report_json or progress_file or context_file or target_file:
# Check directory has been mounted
if not os.path.exists(base_dir):
logging.warning('A file based option has been specified but the directory \'/zap/wrk\' is not mounted ')
usage()
sys.exit(3)
if target.startswith('http://') or target.startswith('https://'):
target_url = target
else:
# assume its a file
if not os.path.exists(base_dir + target):
logging.warning('Target must either start with \'http://\' or \'https://\' or be a local file')
logging.warning('File does not exist: ' + base_dir + target)
usage()
sys.exit(3)
else:
target_file = target
# Choose a random 'ephemeral' port and check its available if it wasn't specified with -P option
if port == 0:
port = get_free_port()
logging.debug('Using port: ' + str(port))
if config_file:
# load config file from filestore
with open(base_dir + config_file) as f:
try:
load_config(f, config_dict, config_msg, out_of_scope_dict)
except ValueError as e:
logging.warning("Failed to load config file " + base_dir + config_file + " " + str(e))
sys.exit(3)
elif config_url:
# load config file from url
try:
config_data = urlopen(config_url).read().decode('UTF-8').splitlines()
load_config(config_data, config_dict, config_msg, out_of_scope_dict)
except ValueError as e:
logging.warning("Failed to read configs from " + config_url + " " + str(e))
sys.exit(3)
except:
logging.warning('Failed to read configs from ' + config_url)
sys.exit(3)
if progress_file:
# load progress file from filestore
with open(base_dir + progress_file) as f:
progress = json.load(f)
# parse into something more useful...
# in_prog_issues = map of vulnid -> {object with everything in}
for issue in progress["issues"]:
if issue["state"] == "inprogress":
in_progress_issues[issue["id"]] = issue
if running_in_docker():
try:
params = [
'-addonupdate',
'-addoninstall', 'pscanrulesBeta'] # In case we're running in the stable container
if zap_alpha:
params.append('-addoninstall')
params.append('pscanrulesAlpha')
add_zap_options(params, zap_options)
start_zap(port, params)
except OSError:
logging.warning('Failed to start ZAP :(')
sys.exit(3)
else:
# Not running in docker, so start one
mount_dir = ''
if context_file:
mount_dir = os.path.dirname(os.path.abspath(context_file))
params = ['-addonupdate']
if (zap_alpha):
params.extend(['-addoninstall', 'pscanrulesAlpha'])
add_zap_options(params, zap_options)
try:
cid = start_docker_zap('owasp/zap2docker-weekly', port, params, mount_dir)
zap_ip = ipaddress_for_cid(cid)
logging.debug('Docker ZAP IP Addr: ' + zap_ip)
# Copy across the files that may not be in all of the docker images
try:
subprocess.check_output(['docker', 'exec', '-t', cid, 'mkdir', '-p', '/home/zap/.ZAP_D/scripts/scripts/httpsender/'])
cp_to_docker(cid, 'scripts/scripts/httpsender/Alert_on_HTTP_Response_Code_Errors.js', '/home/zap/.ZAP_D/')
cp_to_docker(cid, 'scripts/scripts/httpsender/Alert_on_Unexpected_Content_Types.js', '/home/zap/.ZAP_D/')
cp_to_docker(cid, 'policies/API-Minimal.policy', '/home/zap/.ZAP_D/')
if target_file:
cp_to_docker(cid, target_file, '/zap/')
except OSError:
logging.warning('Failed to copy one of the required files')
sys.exit(3)
except OSError:
logging.warning('Failed to start ZAP in docker :(')
sys.exit(3)
try:
zap = ZAPv2(proxies={'http': 'http://' + zap_ip + ':' + str(port), 'https': 'http://' + zap_ip + ':' + str(port)})
wait_for_zap_start(zap, timeout * 60)
trigger_hook('zap_started', zap, target)
if context_file:
# handle the context file, cant use base_dir as it might not have been set up
zap_import_context(zap, '/zap/wrk/' + os.path.basename(context_file))
# Enable scripts
zap.script.load('Alert_on_HTTP_Response_Code_Errors.js', 'httpsender', 'Oracle Nashorn', '/home/zap/.ZAP_D/scripts/scripts/httpsender/Alert_on_HTTP_Response_Code_Errors.js')
zap.script.enable('Alert_on_HTTP_Response_Code_Errors.js')
zap.script.load('Alert_on_Unexpected_Content_Types.js', 'httpsender', 'Oracle Nashorn', '/home/zap/.ZAP_D/scripts/scripts/httpsender/Alert_on_Unexpected_Content_Types.js')
zap.script.enable('Alert_on_Unexpected_Content_Types.js')
# Import the API defn
if format == 'openapi':
trigger_hook('importing_openapi', target_url, target_file)
if target_url:
logging.debug('Import OpenAPI URL ' + target_url)
res = zap.openapi.import_url(target, host_override)
urls = zap.core.urls()
if host_override:
target = urljoin(target_url, '//' + host_override)
logging.info('Using host override, new target: {0}'.format(target))
else:
logging.debug('Import OpenAPI File ' + target_file)
res = zap.openapi.import_file(base_dir + target_file)
urls = zap.core.urls()
if len(urls) > 0:
# Choose the first one - will be striping off the path below
target = urls[0]
logging.debug('Using target from imported file: {0}'.format(target))
else:
trigger_hook('importing_soap', target_url, target_file)
if target_url:
logging.debug('Import SOAP URL ' + target_url)
res = zap._request(zap.base + 'soap/action/importUrl/', {'url':target})
urls = zap.core.urls()
else:
logging.debug('Import SOAP File ' + target_file)
res = zap._request(zap.base + 'soap/action/importFile/', {'file': base_dir + target_file})
urls = zap.core.urls()
if len(urls) > 0:
# Choose the first one - will be striping off the path below
target = urls[0]
logging.debug('Using target from imported file: {0}'.format(target))
logging.info('Number of Imported URLs: ' + str(len(urls)))
logging.debug('Import warnings: ' + str(res))
if len(urls) == 0:
logging.warning('Failed to import any URLs')
# No point continue, there's nothing to scan.
raise NoUrlsException()
if target.count('/') > 2:
old_target = target
# The url can include a valid path, but always reset to scan the host
target = target[0:target.index('/', 8)+1]
logging.debug('Normalised target from {0} to {1}'.format(old_target, target))
# Wait for a delay if specified with -D option
if (delay):
start_scan = datetime.now()
while((datetime.now() - start_scan).seconds < delay ):
time.sleep(5)
logging.debug('Delay active scan ' + str(delay -(datetime.now() - start_scan).seconds) + ' seconds')
# Set up the scan policy
scan_policy = 'API-Minimal'
if config_dict:
# They have supplied a config file, use this to define the ascan rules
# Use the default one as the script might not have write access to the one just copied across
scan_policy = 'Default Policy'
zap.ascan.enable_all_scanners(scanpolicyname=scan_policy)
for scanner, state in config_dict.items():
if state == 'IGNORE':
# Dont bother checking the result - this will fail for pscan rules
zap.ascan.set_scanner_alert_threshold(id=scanner, alertthreshold='OFF', scanpolicyname=scan_policy)
zap_active_scan(zap, target, scan_policy)
zap_wait_for_passive_scan(zap, timeout * 60)
# Print out a count of the number of urls
num_urls = len(zap.core.urls())
if num_urls == 0:
logging.warning('No URLs found - is the target URL accessible? Local services may not be accessible from the Docker container')
else:
if detailed_output:
print('Total of ' + str(num_urls) + ' URLs')
alert_dict = zap_get_alerts(zap, target, blacklist, out_of_scope_dict)
all_ascan_rules = zap.ascan.scanners('Default Policy')
all_pscan_rules = zap.pscan.scanners
all_dict = {}
for rule in all_pscan_rules:
plugin_id = rule.get('id')
if plugin_id in blacklist:
continue
all_dict[plugin_id] = rule.get('name') + ' - Passive/' + rule.get('quality')
for rule in all_ascan_rules:
plugin_id = rule.get('id')
if plugin_id in blacklist:
continue
all_dict[plugin_id] = rule.get('name') + ' - Active/' + rule.get('quality')
if generate:
# Create the config file
with open(base_dir + generate, 'w') as f:
f.write('# zap-api-scan rule configuration file\n')
f.write('# Change WARN to IGNORE to ignore rule or FAIL to fail if rule matches\n')
f.write('# Active scan rules set to IGNORE will not be run which will speed up the scan\n')
f.write('# Only the rule identifiers are used - the names are just for info\n')
f.write('# You can add your own messages to each rule by appending them after a tab on each line.\n')
for key, rule in sorted(all_dict.iteritems()):
f.write(key + '\tWARN\t(' + rule + ')\n')
# print out the passing rules
pass_dict = {}
for rule in all_pscan_rules:
plugin_id = rule.get('id')
if plugin_id in blacklist:
continue
if (not alert_dict.has_key(plugin_id)):
pass_dict[plugin_id] = rule.get('name')
for rule in all_ascan_rules:
plugin_id = rule.get('id')
if plugin_id in blacklist:
continue
if not alert_dict.has_key(plugin_id) and not(config_dict.has_key(plugin_id) and config_dict[plugin_id] == 'IGNORE'):
pass_dict[plugin_id] = rule.get('name')
if min_level == zap_conf_lvls.index("PASS") and detailed_output:
for key, rule in sorted(pass_dict.iteritems()):
print('PASS: ' + rule + ' [' + key + ']')
pass_count = len(pass_dict)
if detailed_output:
# print out the ignored ascan rules(there will be no alerts for these as they were not run)
for rule in all_ascan_rules:
plugin_id = rule.get('id')
if plugin_id in blacklist:
continue
if config_dict.has_key(plugin_id) and config_dict[plugin_id] == 'IGNORE':
print('SKIP: ' + rule.get('name') + ' [' + plugin_id + ']')
# print out the ignored rules
ignore_count, not_used = print_rules(alert_dict, 'IGNORE', config_dict, config_msg, min_level,
inc_ignore_rules, True, detailed_output, {})
# print out the info rules
info_count, not_used = print_rules(alert_dict, 'INFO', config_dict, config_msg, min_level,
inc_info_rules, info_unspecified, detailed_output, in_progress_issues)
# print out the warning rules
warn_count, warn_inprog_count = print_rules(alert_dict, 'WARN', config_dict, config_msg, min_level,
inc_warn_rules, not info_unspecified, detailed_output, in_progress_issues)
# print out the failing rules
fail_count, fail_inprog_count = print_rules(alert_dict, 'FAIL', config_dict, config_msg, min_level,
inc_fail_rules, True, detailed_output, in_progress_issues)
if report_html:
# Save the report
write_report(base_dir + report_html, zap.core.htmlreport())
if report_json:
# Save the report
write_report(base_dir + report_json, zap.core.jsonreport())
if report_md:
# Save the report
write_report(base_dir + report_md, zap.core.mdreport())
if report_xml:
# Save the report
write_report(base_dir + report_xml, zap.core.xmlreport())
print('FAIL-NEW: ' + str(fail_count) + '\tFAIL-INPROG: ' + str(fail_inprog_count) +
'\tWARN-NEW: ' + str(warn_count) + '\tWARN-INPROG: ' + str(warn_inprog_count) +
'\tINFO: ' + str(info_count) + '\tIGNORE: ' + str(ignore_count) + '\tPASS: ' + str(pass_count))
trigger_hook('zap_pre_shutdown', zap)
# Stop ZAP
zap.core.shutdown()
except IOError as e:
if hasattr(e, 'args') and len(e.args) > 1:
errno, strerror = e
print("ERROR " + str(strerror))
logging.warning('I/O error(' + str(errno) + '): ' + str(strerror))
else:
print("ERROR %s" % e)
logging.warning('I/O error: ' + str(e))
dump_log_file(cid)
except NoUrlsException:
dump_log_file(cid)
except:
print("ERROR " + str(sys.exc_info()[0]))
logging.warning('Unexpected error: ' + str(sys.exc_info()[0]))
dump_log_file(cid)
if not running_in_docker():
stop_docker(cid)
trigger_hook('pre_exit', fail_count, warn_count, pass_count)
if fail_count > 0:
sys.exit(1)
elif warn_count > 0:
sys.exit(2)
elif pass_count > 0:
sys.exit(0)
else:
sys.exit(3)
if __name__ == "__main__":
main(sys.argv[1:])
|
the-stack_0_1736 |
# CONFIG -----------------------------------------------------------------------------------------------------------#
# Here are the input and output data paths (Note: you can override wav_path in preprocess.py)
wav_path = '../database/blizzard2013/segmented/small_wavn_lead_trail_silence_removed_16000'
data_path = 'data_blizzard2013_16000/'
# model ids are separate - that way you can use a new tts with an old wavernn and vice versa
# NB: expect undefined behaviour if models were trained on different DSP settings
voc_model_id = 'blizzard2013_mol'
tts_model_id = 'blizzard2013_lsa_smooth_attention_pytorch_multihead'
# set this to True if you are only interested in WaveRNN
ignore_tts = False
# DSP --------------------------------------------------------------------------------------------------------------#
# Settings for all models
sample_rate = 16000
n_fft = 2048
fft_bins = n_fft // 2 + 1
num_mels = 80
hop_length = 200 # 12.5ms - in line with Tacotron 2 paper
win_length = 800 # 50ms - same reason as above
fmin = 40
min_level_db = -100
ref_level_db = 20
bits = 9 # bit depth of signal
mu_law = True # Recommended to suppress noise if using raw bits in hp.voc_mode below
peak_norm = False # Normalise to the peak of each wav file
# WAVERNN / VOCODER ------------------------------------------------------------------------------------------------#
# Model Hparams
voc_mode = 'MOL' # either 'RAW' (softmax on raw bits) or 'MOL' (sample from mixture of logistics)
voc_upsample_factors = (5, 5, 8) # NB - this needs to correctly factorise hop_length
voc_rnn_dims = 512
voc_fc_dims = 512
voc_compute_dims = 128
voc_res_out_dims = 128
voc_res_blocks = 10
# Training
voc_batch_size = 64
voc_lr = 1e-4
voc_checkpoint_every = 25_000
voc_gen_at_checkpoint = 5 # number of samples to generate at each checkpoint
voc_total_steps = 2_000_000 # Total number of training steps
voc_test_samples = 50 # How many unseen samples to put aside for testing
voc_pad = 2 # this will pad the input so that the resnet can 'see' wider than input length
voc_seq_len = hop_length * 5 # must be a multiple of hop_length
# Generating / Synthesizing
voc_gen_batched = True # very fast (realtime+) single utterance batched generation
voc_target = 11_000 # target number of samples to be generated in each batch entry
voc_overlap = 550 # number of samples for crossfading between batches
# TACOTRON/TTS -----------------------------------------------------------------------------------------------------#
# Model Hparams
tts_r = 1 # model predicts r frames per output step
tts_embed_dims = 256 # embedding dimension for the graphemes/phoneme inputs
tts_encoder_dims = 128
tts_decoder_dims = 256
tts_postnet_dims = 128
tts_encoder_K = 16
tts_lstm_dims = 512
tts_postnet_K = 8
tts_num_highways = 4
tts_dropout = 0.5
tts_cleaner_names = ['english_cleaners']
# Training
tts_schedule = [(7, 1e-3, 10_000, 8), # progressive training schedule
(5, 1e-4, 100_000, 8), # (r, lr, step, batch_size)
(2, 1e-4, 180_000, 8),
(1, 1e-4, 350_000, 8),
(1, 1e-4, 1000_000, 8)]
tts_max_mel_len = 1250 # if you have a couple of extremely long spectrograms you might want to use this
tts_bin_lengths = True # bins the spectrogram lengths before sampling in data loader - speeds up training
tts_clip_grad_norm = 1.0 # clips the gradient norm to prevent explosion - set to None if not needed
tts_checkpoint_every = 2_000 # checkpoints the model every X steps
# TODO: tts_phoneme_prob = 0.0 # [0 <-> 1] probability for feeding model phonemes vrs graphemes
# ------------------------------------------------------------------------------------------------------------------#
# reference encoder
ref_enc_filters = [32, 32, 64, 64, 128, 128]
reference_depth = 128
# Global style token
num_gst = 10
num_heads = 4 # Head number for multi-head attention
style_embed_depth = 256
style_att_dim = 128
|
the-stack_0_1738 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import logging
import signal
import sys
import traceback
from typing import Any, List, Optional, TYPE_CHECKING, Union
import aiohttp
from .user import User
from .invite import Invite
from .template import Template
from .widget import Widget
from .guild import Guild
from .channel import _channel_factory
from .enums import ChannelType
from .mentions import AllowedMentions
from .errors import *
from .enums import Status, VoiceRegion
from .flags import ApplicationFlags
from .gateway import *
from .activity import BaseActivity, create_activity
from .voice_client import VoiceClient
from .http import HTTPClient
from .state import ConnectionState
from . import utils
from .object import Object
from .backoff import ExponentialBackoff
from .webhook import Webhook
from .iterators import GuildIterator
from .appinfo import AppInfo
__all__ = (
'Client',
)
if TYPE_CHECKING:
from .abc import SnowflakeTime
log = logging.getLogger(__name__)
def _cancel_tasks(loop):
tasks = {t for t in asyncio.all_tasks(loop=loop) if not t.done()}
if not tasks:
return
log.info('Cleaning up after %d tasks.', len(tasks))
for task in tasks:
task.cancel()
loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
log.info('All tasks finished cancelling.')
for task in tasks:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler({
'message': 'Unhandled exception during Client.run shutdown.',
'exception': task.exception(),
'task': task
})
def _cleanup_loop(loop):
try:
_cancel_tasks(loop)
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
log.info('Closing the event loop.')
loop.close()
class Client:
r"""Represents a client connection that connects to Discord.
This class is used to interact with the Discord WebSocket and API.
A number of options can be passed to the :class:`Client`.
Parameters
-----------
max_messages: Optional[:class:`int`]
The maximum number of messages to store in the internal message cache.
This defaults to ``1000``. Passing in ``None`` disables the message cache.
.. versionchanged:: 1.3
Allow disabling the message cache and change the default size to ``1000``.
loop: Optional[:class:`asyncio.AbstractEventLoop`]
The :class:`asyncio.AbstractEventLoop` to use for asynchronous operations.
Defaults to ``None``, in which case the default event loop is used via
:func:`asyncio.get_event_loop()`.
connector: :class:`aiohttp.BaseConnector`
The connector to use for connection pooling.
proxy: Optional[:class:`str`]
Proxy URL.
proxy_auth: Optional[:class:`aiohttp.BasicAuth`]
An object that represents proxy HTTP Basic Authorization.
shard_id: Optional[:class:`int`]
Integer starting at ``0`` and less than :attr:`.shard_count`.
shard_count: Optional[:class:`int`]
The total number of shards.
application_id: :class:`int`
The client's application ID.
intents: :class:`Intents`
The intents that you want to enable for the session. This is a way of
disabling and enabling certain gateway events from triggering and being sent.
If not given, defaults to a regularly constructed :class:`Intents` class.
.. versionadded:: 1.5
member_cache_flags: :class:`MemberCacheFlags`
Allows for finer control over how the library caches members.
If not given, defaults to cache as much as possible with the
currently selected intents.
.. versionadded:: 1.5
chunk_guilds_at_startup: :class:`bool`
Indicates if :func:`.on_ready` should be delayed to chunk all guilds
at start-up if necessary. This operation is incredibly slow for large
amounts of guilds. The default is ``True`` if :attr:`Intents.members`
is ``True``.
.. versionadded:: 1.5
status: Optional[:class:`.Status`]
A status to start your presence with upon logging on to Discord.
activity: Optional[:class:`.BaseActivity`]
An activity to start your presence with upon logging on to Discord.
allowed_mentions: Optional[:class:`AllowedMentions`]
Control how the client handles mentions by default on every message sent.
.. versionadded:: 1.4
heartbeat_timeout: :class:`float`
The maximum numbers of seconds before timing out and restarting the
WebSocket in the case of not receiving a HEARTBEAT_ACK. Useful if
processing the initial packets take too long to the point of disconnecting
you. The default timeout is 60 seconds.
guild_ready_timeout: :class:`float`
The maximum number of seconds to wait for the GUILD_CREATE stream to end before
preparing the member cache and firing READY. The default timeout is 2 seconds.
.. versionadded:: 1.4
assume_unsync_clock: :class:`bool`
Whether to assume the system clock is unsynced. This applies to the ratelimit handling
code. If this is set to ``True``, the default, then the library uses the time to reset
a rate limit bucket given by Discord. If this is ``False`` then your system clock is
used to calculate how long to sleep for. If this is set to ``False`` it is recommended to
sync your system clock to Google's NTP server.
.. versionadded:: 1.3
Attributes
-----------
ws
The websocket gateway the client is currently connected to. Could be ``None``.
loop: :class:`asyncio.AbstractEventLoop`
The event loop that the client uses for asynchronous operations.
"""
def __init__(self, *, loop=None, **options):
self.ws = None
self.loop = asyncio.get_event_loop() if loop is None else loop
self._listeners = {}
self.shard_id = options.get('shard_id')
self.shard_count = options.get('shard_count')
connector = options.pop('connector', None)
proxy = options.pop('proxy', None)
proxy_auth = options.pop('proxy_auth', None)
unsync_clock = options.pop('assume_unsync_clock', True)
self.http = HTTPClient(connector, proxy=proxy, proxy_auth=proxy_auth, unsync_clock=unsync_clock, loop=self.loop)
self._handlers = {
'ready': self._handle_ready
}
self._hooks = {
'before_identify': self._call_before_identify_hook
}
self._connection = self._get_state(**options)
self._connection.shard_count = self.shard_count
self._closed = False
self._ready = asyncio.Event()
self._connection._get_websocket = self._get_websocket
self._connection._get_client = lambda: self
if VoiceClient.warn_nacl:
VoiceClient.warn_nacl = False
log.warning("PyNaCl is not installed, voice will NOT be supported")
# internals
def _get_websocket(self, guild_id=None, *, shard_id=None):
return self.ws
def _get_state(self, **options):
return ConnectionState(dispatch=self.dispatch, handlers=self._handlers,
hooks=self._hooks, http=self.http, loop=self.loop, **options)
def _handle_ready(self):
self._ready.set()
@property
def latency(self):
""":class:`float`: Measures latency between a HEARTBEAT and a HEARTBEAT_ACK in seconds.
This could be referred to as the Discord WebSocket protocol latency.
"""
ws = self.ws
return float('nan') if not ws else ws.latency
def is_ws_ratelimited(self):
""":class:`bool`: Whether the websocket is currently rate limited.
This can be useful to know when deciding whether you should query members
using HTTP or via the gateway.
.. versionadded:: 1.6
"""
if self.ws:
return self.ws.is_ratelimited()
return False
@property
def user(self):
"""Optional[:class:`.ClientUser`]: Represents the connected client. ``None`` if not logged in."""
return self._connection.user
@property
def guilds(self):
"""List[:class:`.Guild`]: The guilds that the connected client is a member of."""
return self._connection.guilds
@property
def emojis(self):
"""List[:class:`.Emoji`]: The emojis that the connected client has."""
return self._connection.emojis
@property
def cached_messages(self):
"""Sequence[:class:`.Message`]: Read-only list of messages the connected client has cached.
.. versionadded:: 1.1
"""
return utils.SequenceProxy(self._connection._messages or [])
@property
def private_channels(self):
"""List[:class:`.abc.PrivateChannel`]: The private channels that the connected client is participating on.
.. note::
This returns only up to 128 most recent private channels due to an internal working
on how Discord deals with private channels.
"""
return self._connection.private_channels
@property
def voice_clients(self):
"""List[:class:`.VoiceProtocol`]: Represents a list of voice connections.
These are usually :class:`.VoiceClient` instances.
"""
return self._connection.voice_clients
@property
def application_id(self):
"""Optional[:class:`int`]: The client's application ID.
If this is not passed via ``__init__`` then this is retrieved
through the gateway when an event contains the data. Usually
after :func:`~discord.on_connect` is called.
"""
return self._connection.application_id
@property
def application_flags(self) -> ApplicationFlags:
""":class:`~discord.ApplicationFlags`: The client's application flags.
.. versionadded: 2.0
"""
return self._connection.application_flags # type: ignore
def is_ready(self):
""":class:`bool`: Specifies if the client's internal cache is ready for use."""
return self._ready.is_set()
async def _run_event(self, coro, event_name, *args, **kwargs):
try:
await coro(*args, **kwargs)
except asyncio.CancelledError:
pass
except Exception:
try:
await self.on_error(event_name, *args, **kwargs)
except asyncio.CancelledError:
pass
def _schedule_event(self, coro, event_name, *args, **kwargs):
wrapped = self._run_event(coro, event_name, *args, **kwargs)
# Schedules the task
return asyncio.create_task(wrapped, name=f'discord.py: {event_name}')
def dispatch(self, event, *args, **kwargs):
log.debug('Dispatching event %s', event)
method = 'on_' + event
listeners = self._listeners.get(event)
if listeners:
removed = []
for i, (future, condition) in enumerate(listeners):
if future.cancelled():
removed.append(i)
continue
try:
result = condition(*args)
except Exception as exc:
future.set_exception(exc)
removed.append(i)
else:
if result:
if len(args) == 0:
future.set_result(None)
elif len(args) == 1:
future.set_result(args[0])
else:
future.set_result(args)
removed.append(i)
if len(removed) == len(listeners):
self._listeners.pop(event)
else:
for idx in reversed(removed):
del listeners[idx]
try:
coro = getattr(self, method)
except AttributeError:
pass
else:
self._schedule_event(coro, method, *args, **kwargs)
async def on_error(self, event_method, *args, **kwargs):
"""|coro|
The default error handler provided by the client.
By default this prints to :data:`sys.stderr` however it could be
overridden to have a different implementation.
Check :func:`~discord.on_error` for more details.
"""
print(f'Ignoring exception in {event_method}', file=sys.stderr)
traceback.print_exc()
# hooks
async def _call_before_identify_hook(self, shard_id, *, initial=False):
# This hook is an internal hook that actually calls the public one.
# It allows the library to have its own hook without stepping on the
# toes of those who need to override their own hook.
await self.before_identify_hook(shard_id, initial=initial)
async def before_identify_hook(self, shard_id, *, initial=False):
"""|coro|
A hook that is called before IDENTIFYing a session. This is useful
if you wish to have more control over the synchronization of multiple
IDENTIFYing clients.
The default implementation sleeps for 5 seconds.
.. versionadded:: 1.4
Parameters
------------
shard_id: :class:`int`
The shard ID that requested being IDENTIFY'd
initial: :class:`bool`
Whether this IDENTIFY is the first initial IDENTIFY.
"""
if not initial:
await asyncio.sleep(5.0)
# login state management
async def login(self, token):
"""|coro|
Logs in the client with the specified credentials.
Parameters
-----------
token: :class:`str`
The authentication token. Do not prefix this token with
anything as the library will do it for you.
Raises
------
:exc:`.LoginFailure`
The wrong credentials are passed.
:exc:`.HTTPException`
An unknown HTTP related error occurred,
usually when it isn't 200 or the known incorrect credentials
passing status code.
"""
log.info('logging in using static token')
await self.http.static_login(token.strip())
async def connect(self, *, reconnect=True):
"""|coro|
Creates a websocket connection and lets the websocket listen
to messages from Discord. This is a loop that runs the entire
event system and miscellaneous aspects of the library. Control
is not resumed until the WebSocket connection is terminated.
Parameters
-----------
reconnect: :class:`bool`
If we should attempt reconnecting, either due to internet
failure or a specific failure on Discord's part. Certain
disconnects that lead to bad state will not be handled (such as
invalid sharding payloads or bad tokens).
Raises
-------
:exc:`.GatewayNotFound`
If the gateway to connect to Discord is not found. Usually if this
is thrown then there is a Discord API outage.
:exc:`.ConnectionClosed`
The websocket connection has been terminated.
"""
backoff = ExponentialBackoff()
ws_params = {
'initial': True,
'shard_id': self.shard_id,
}
while not self.is_closed():
try:
coro = DiscordWebSocket.from_client(self, **ws_params)
self.ws = await asyncio.wait_for(coro, timeout=60.0)
ws_params['initial'] = False
while True:
await self.ws.poll_event()
except ReconnectWebSocket as e:
log.info('Got a request to %s the websocket.', e.op)
self.dispatch('disconnect')
ws_params.update(sequence=self.ws.sequence, resume=e.resume, session=self.ws.session_id)
continue
except (OSError,
HTTPException,
GatewayNotFound,
ConnectionClosed,
aiohttp.ClientError,
asyncio.TimeoutError) as exc:
self.dispatch('disconnect')
if not reconnect:
await self.close()
if isinstance(exc, ConnectionClosed) and exc.code == 1000:
# clean close, don't re-raise this
return
raise
if self.is_closed():
return
# If we get connection reset by peer then try to RESUME
if isinstance(exc, OSError) and exc.errno in (54, 10054):
ws_params.update(sequence=self.ws.sequence, initial=False, resume=True, session=self.ws.session_id)
continue
# We should only get this when an unhandled close code happens,
# such as a clean disconnect (1000) or a bad state (bad token, no sharding, etc)
# sometimes, discord sends us 1000 for unknown reasons so we should reconnect
# regardless and rely on is_closed instead
if isinstance(exc, ConnectionClosed):
if exc.code == 4014:
raise PrivilegedIntentsRequired(exc.shard_id) from None
if exc.code != 1000:
await self.close()
raise
retry = backoff.delay()
log.exception("Attempting a reconnect in %.2fs", retry)
await asyncio.sleep(retry)
# Always try to RESUME the connection
# If the connection is not RESUME-able then the gateway will invalidate the session.
# This is apparently what the official Discord client does.
ws_params.update(sequence=self.ws.sequence, resume=True, session=self.ws.session_id)
async def close(self):
"""|coro|
Closes the connection to Discord.
"""
if self._closed:
return
await self.http.close()
self._closed = True
for voice in self.voice_clients:
try:
await voice.disconnect()
except Exception:
# if an error happens during disconnects, disregard it.
pass
if self.ws is not None and self.ws.open:
await self.ws.close(code=1000)
self._ready.clear()
def clear(self):
"""Clears the internal state of the bot.
After this, the bot can be considered "re-opened", i.e. :meth:`is_closed`
and :meth:`is_ready` both return ``False`` along with the bot's internal
cache cleared.
"""
self._closed = False
self._ready.clear()
self._connection.clear()
self.http.recreate()
async def start(self, token, *, reconnect=True):
"""|coro|
A shorthand coroutine for :meth:`login` + :meth:`connect`.
Raises
-------
TypeError
An unexpected keyword argument was received.
"""
await self.login(token)
await self.connect(reconnect=reconnect)
def run(self, *args, **kwargs):
"""A blocking call that abstracts away the event loop
initialisation from you.
If you want more control over the event loop then this
function should not be used. Use :meth:`start` coroutine
or :meth:`connect` + :meth:`login`.
Roughly Equivalent to: ::
try:
loop.run_until_complete(start(*args, **kwargs))
except KeyboardInterrupt:
loop.run_until_complete(close())
# cancel all tasks lingering
finally:
loop.close()
.. warning::
This function must be the last function to call due to the fact that it
is blocking. That means that registration of events or anything being
called after this function call will not execute until it returns.
"""
loop = self.loop
try:
loop.add_signal_handler(signal.SIGINT, lambda: loop.stop())
loop.add_signal_handler(signal.SIGTERM, lambda: loop.stop())
except NotImplementedError:
pass
async def runner():
try:
await self.start(*args, **kwargs)
finally:
if not self.is_closed():
await self.close()
def stop_loop_on_completion(f):
loop.stop()
future = asyncio.ensure_future(runner(), loop=loop)
future.add_done_callback(stop_loop_on_completion)
try:
loop.run_forever()
except KeyboardInterrupt:
log.info('Received signal to terminate bot and event loop.')
finally:
future.remove_done_callback(stop_loop_on_completion)
log.info('Cleaning up tasks.')
_cleanup_loop(loop)
if not future.cancelled():
try:
return future.result()
except KeyboardInterrupt:
# I am unsure why this gets raised here but suppress it anyway
return None
# properties
def is_closed(self):
""":class:`bool`: Indicates if the websocket connection is closed."""
return self._closed
@property
def activity(self):
"""Optional[:class:`.BaseActivity`]: The activity being used upon
logging in.
"""
return create_activity(self._connection._activity)
@activity.setter
def activity(self, value):
if value is None:
self._connection._activity = None
elif isinstance(value, BaseActivity):
self._connection._activity = value.to_dict()
else:
raise TypeError('activity must derive from BaseActivity.')
@property
def allowed_mentions(self):
"""Optional[:class:`~discord.AllowedMentions`]: The allowed mention configuration.
.. versionadded:: 1.4
"""
return self._connection.allowed_mentions
@allowed_mentions.setter
def allowed_mentions(self, value):
if value is None or isinstance(value, AllowedMentions):
self._connection.allowed_mentions = value
else:
raise TypeError(f'allowed_mentions must be AllowedMentions not {value.__class__!r}')
@property
def intents(self):
""":class:`~discord.Intents`: The intents configured for this connection.
.. versionadded:: 1.5
"""
return self._connection.intents
# helpers/getters
@property
def users(self):
"""List[:class:`~discord.User`]: Returns a list of all the users the bot can see."""
return list(self._connection._users.values())
def get_channel(self, id):
"""Returns a channel with the given ID.
Parameters
-----------
id: :class:`int`
The ID to search for.
Returns
--------
Optional[Union[:class:`.abc.GuildChannel`, :class:`.abc.PrivateChannel`]]
The returned channel or ``None`` if not found.
"""
return self._connection.get_channel(id)
def get_guild(self, id):
"""Returns a guild with the given ID.
Parameters
-----------
id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`.Guild`]
The guild or ``None`` if not found.
"""
return self._connection._get_guild(id)
def get_user(self, id):
"""Returns a user with the given ID.
Parameters
-----------
id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`~discord.User`]
The user or ``None`` if not found.
"""
return self._connection.get_user(id)
def get_emoji(self, id):
"""Returns an emoji with the given ID.
Parameters
-----------
id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`.Emoji`]
The custom emoji or ``None`` if not found.
"""
return self._connection.get_emoji(id)
def get_all_channels(self):
"""A generator that retrieves every :class:`.abc.GuildChannel` the client can 'access'.
This is equivalent to: ::
for guild in client.guilds:
for channel in guild.channels:
yield channel
.. note::
Just because you receive a :class:`.abc.GuildChannel` does not mean that
you can communicate in said channel. :meth:`.abc.GuildChannel.permissions_for` should
be used for that.
Yields
------
:class:`.abc.GuildChannel`
A channel the client can 'access'.
"""
for guild in self.guilds:
yield from guild.channels
def get_all_members(self):
"""Returns a generator with every :class:`.Member` the client can see.
This is equivalent to: ::
for guild in client.guilds:
for member in guild.members:
yield member
Yields
------
:class:`.Member`
A member the client can see.
"""
for guild in self.guilds:
yield from guild.members
# listeners/waiters
async def wait_until_ready(self):
"""|coro|
Waits until the client's internal cache is all ready.
"""
await self._ready.wait()
def wait_for(self, event, *, check=None, timeout=None):
"""|coro|
Waits for a WebSocket event to be dispatched.
This could be used to wait for a user to reply to a message,
or to react to a message, or to edit a message in a self-contained
way.
The ``timeout`` parameter is passed onto :func:`asyncio.wait_for`. By default,
it does not timeout. Note that this does propagate the
:exc:`asyncio.TimeoutError` for you in case of timeout and is provided for
ease of use.
In case the event returns multiple arguments, a :class:`tuple` containing those
arguments is returned instead. Please check the
:ref:`documentation <discord-api-events>` for a list of events and their
parameters.
This function returns the **first event that meets the requirements**.
Examples
---------
Waiting for a user reply: ::
@client.event
async def on_message(message):
if message.content.startswith('$greet'):
channel = message.channel
await channel.send('Say hello!')
def check(m):
return m.content == 'hello' and m.channel == channel
msg = await client.wait_for('message', check=check)
await channel.send(f'Hello {msg.author}!')
Waiting for a thumbs up reaction from the message author: ::
@client.event
async def on_message(message):
if message.content.startswith('$thumb'):
channel = message.channel
await channel.send('Send me that \N{THUMBS UP SIGN} reaction, mate')
def check(reaction, user):
return user == message.author and str(reaction.emoji) == '\N{THUMBS UP SIGN}'
try:
reaction, user = await client.wait_for('reaction_add', timeout=60.0, check=check)
except asyncio.TimeoutError:
await channel.send('\N{THUMBS DOWN SIGN}')
else:
await channel.send('\N{THUMBS UP SIGN}')
Parameters
------------
event: :class:`str`
The event name, similar to the :ref:`event reference <discord-api-events>`,
but without the ``on_`` prefix, to wait for.
check: Optional[Callable[..., :class:`bool`]]
A predicate to check what to wait for. The arguments must meet the
parameters of the event being waited for.
timeout: Optional[:class:`float`]
The number of seconds to wait before timing out and raising
:exc:`asyncio.TimeoutError`.
Raises
-------
asyncio.TimeoutError
If a timeout is provided and it was reached.
Returns
--------
Any
Returns no arguments, a single argument, or a :class:`tuple` of multiple
arguments that mirrors the parameters passed in the
:ref:`event reference <discord-api-events>`.
"""
future = self.loop.create_future()
if check is None:
def _check(*args):
return True
check = _check
ev = event.lower()
try:
listeners = self._listeners[ev]
except KeyError:
listeners = []
self._listeners[ev] = listeners
listeners.append((future, check))
return asyncio.wait_for(future, timeout)
# event registration
def event(self, coro):
"""A decorator that registers an event to listen to.
You can find more info about the events on the :ref:`documentation below <discord-api-events>`.
The events must be a :ref:`coroutine <coroutine>`, if not, :exc:`TypeError` is raised.
Example
---------
.. code-block:: python3
@client.event
async def on_ready():
print('Ready!')
Raises
--------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('event registered must be a coroutine function')
setattr(self, coro.__name__, coro)
log.debug('%s has successfully been registered as an event', coro.__name__)
return coro
async def change_presence(self, *, activity=None, status=None, afk=False):
"""|coro|
Changes the client's presence.
Example
---------
.. code-block:: python3
game = discord.Game("with the API")
await client.change_presence(status=discord.Status.idle, activity=game)
Parameters
----------
activity: Optional[:class:`.BaseActivity`]
The activity being done. ``None`` if no currently active activity is done.
status: Optional[:class:`.Status`]
Indicates what status to change to. If ``None``, then
:attr:`.Status.online` is used.
afk: Optional[:class:`bool`]
Indicates if you are going AFK. This allows the discord
client to know how to handle push notifications better
for you in case you are actually idle and not lying.
Raises
------
:exc:`.InvalidArgument`
If the ``activity`` parameter is not the proper type.
"""
if status is None:
status = 'online'
status_enum = Status.online
elif status is Status.offline:
status = 'invisible'
status_enum = Status.offline
else:
status_enum = status
status = str(status)
await self.ws.change_presence(activity=activity, status=status, afk=afk)
for guild in self._connection.guilds:
me = guild.me
if me is None:
continue
if activity is not None:
me.activities = (activity,)
else:
me.activities = ()
me.status = status_enum
# Guild stuff
def fetch_guilds(self, *, limit: int = 100, before: SnowflakeTime = None, after: SnowflakeTime = None) -> List[Guild]:
"""Retrieves an :class:`.AsyncIterator` that enables receiving your guilds.
.. note::
Using this, you will only receive :attr:`.Guild.owner`, :attr:`.Guild.icon`,
:attr:`.Guild.id`, and :attr:`.Guild.name` per :class:`.Guild`.
.. note::
This method is an API call. For general usage, consider :attr:`guilds` instead.
Examples
---------
Usage ::
async for guild in client.fetch_guilds(limit=150):
print(guild.name)
Flattening into a list ::
guilds = await client.fetch_guilds(limit=150).flatten()
# guilds is now a list of Guild...
All parameters are optional.
Parameters
-----------
limit: Optional[:class:`int`]
The number of guilds to retrieve.
If ``None``, it retrieves every guild you have access to. Note, however,
that this would make it a slow operation.
Defaults to ``100``.
before: Union[:class:`.abc.Snowflake`, :class:`datetime.datetime`]
Retrieves guilds before this date or object.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
after: Union[:class:`.abc.Snowflake`, :class:`datetime.datetime`]
Retrieve guilds after this date or object.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
Raises
------
:exc:`.HTTPException`
Getting the guilds failed.
Yields
--------
:class:`.Guild`
The guild with the guild data parsed.
"""
return GuildIterator(self, limit=limit, before=before, after=after)
async def fetch_template(self, code):
"""|coro|
Gets a :class:`.Template` from a discord.new URL or code.
Parameters
-----------
code: Union[:class:`.Template`, :class:`str`]
The Discord Template Code or URL (must be a discord.new URL).
Raises
-------
:exc:`.NotFound`
The template is invalid.
:exc:`.HTTPException`
Getting the template failed.
Returns
--------
:class:`.Template`
The template from the URL/code.
"""
code = utils.resolve_template(code)
data = await self.http.get_template(code)
return Template(data=data, state=self._connection) # type: ignore
async def fetch_guild(self, guild_id):
"""|coro|
Retrieves a :class:`.Guild` from an ID.
.. note::
Using this, you will **not** receive :attr:`.Guild.channels`, :attr:`.Guild.members`,
:attr:`.Member.activity` and :attr:`.Member.voice` per :class:`.Member`.
.. note::
This method is an API call. For general usage, consider :meth:`get_guild` instead.
Parameters
-----------
guild_id: :class:`int`
The guild's ID to fetch from.
Raises
------
:exc:`.Forbidden`
You do not have access to the guild.
:exc:`.HTTPException`
Getting the guild failed.
Returns
--------
:class:`.Guild`
The guild from the ID.
"""
data = await self.http.get_guild(guild_id)
return Guild(data=data, state=self._connection)
async def create_guild(self, name: str, region: Optional[VoiceRegion] = None, icon: Any = None, *, code: str = None):
"""|coro|
Creates a :class:`.Guild`.
Bot accounts in more than 10 guilds are not allowed to create guilds.
Parameters
----------
name: :class:`str`
The name of the guild.
region: :class:`.VoiceRegion`
The region for the voice communication server.
Defaults to :attr:`.VoiceRegion.us_west`.
icon: :class:`bytes`
The :term:`py:bytes-like object` representing the icon. See :meth:`.ClientUser.edit`
for more details on what is expected.
code: Optional[:class:`str`]
The code for a template to create the guild with.
.. versionadded:: 1.4
Raises
------
:exc:`.HTTPException`
Guild creation failed.
:exc:`.InvalidArgument`
Invalid icon image format given. Must be PNG or JPG.
Returns
-------
:class:`.Guild`
The guild created. This is not the same guild that is
added to cache.
"""
if icon is not None:
icon = utils._bytes_to_base64_data(icon)
region = region or VoiceRegion.us_west
region_value = region.value
if code:
data = await self.http.create_from_template(code, name, region_value, icon)
else:
data = await self.http.create_guild(name, region_value, icon)
return Guild(data=data, state=self._connection)
# Invite management
async def fetch_invite(self, url: Union[Invite, str], *, with_counts: bool = True, with_expiration: bool = True) -> Invite:
"""|coro|
Gets an :class:`.Invite` from a discord.gg URL or ID.
.. note::
If the invite is for a guild you have not joined, the guild and channel
attributes of the returned :class:`.Invite` will be :class:`.PartialInviteGuild` and
:class:`.PartialInviteChannel` respectively.
Parameters
-----------
url: Union[:class:`.Invite`, :class:`str`]
The Discord invite ID or URL (must be a discord.gg URL).
with_counts: :class:`bool`
Whether to include count information in the invite. This fills the
:attr:`.Invite.approximate_member_count` and :attr:`.Invite.approximate_presence_count`
fields.
with_expiration: :class:`bool`
Whether to include the expiration date of the invite. This fills the
:attr:`.Invite.expires_at` field.
.. versionadded:: 2.0
Raises
-------
:exc:`.NotFound`
The invite has expired or is invalid.
:exc:`.HTTPException`
Getting the invite failed.
Returns
--------
:class:`.Invite`
The invite from the URL/ID.
"""
invite_id = utils.resolve_invite(url)
data = await self.http.get_invite(invite_id, with_counts=with_counts, with_expiration=with_expiration)
return Invite.from_incomplete(state=self._connection, data=data)
async def delete_invite(self, invite: Union[Invite, str]) -> None:
"""|coro|
Revokes an :class:`.Invite`, URL, or ID to an invite.
You must have the :attr:`~.Permissions.manage_channels` permission in
the associated guild to do this.
Parameters
----------
invite: Union[:class:`.Invite`, :class:`str`]
The invite to revoke.
Raises
-------
:exc:`.Forbidden`
You do not have permissions to revoke invites.
:exc:`.NotFound`
The invite is invalid or expired.
:exc:`.HTTPException`
Revoking the invite failed.
"""
invite_id = utils.resolve_invite(invite)
await self.http.delete_invite(invite_id)
# Miscellaneous stuff
async def fetch_widget(self, guild_id):
"""|coro|
Gets a :class:`.Widget` from a guild ID.
.. note::
The guild must have the widget enabled to get this information.
Parameters
-----------
guild_id: :class:`int`
The ID of the guild.
Raises
-------
:exc:`.Forbidden`
The widget for this guild is disabled.
:exc:`.HTTPException`
Retrieving the widget failed.
Returns
--------
:class:`.Widget`
The guild's widget.
"""
data = await self.http.get_widget(guild_id)
return Widget(state=self._connection, data=data)
async def application_info(self):
"""|coro|
Retrieves the bot's application information.
Raises
-------
:exc:`.HTTPException`
Retrieving the information failed somehow.
Returns
--------
:class:`.AppInfo`
The bot's application information.
"""
data = await self.http.application_info()
if 'rpc_origins' not in data:
data['rpc_origins'] = None
return AppInfo(self._connection, data)
async def fetch_user(self, user_id):
"""|coro|
Retrieves a :class:`~discord.User` based on their ID.
You do not have to share any guilds with the user to get this information,
however many operations do require that you do.
.. note::
This method is an API call. If you have :attr:`discord.Intents.members` and member cache enabled, consider :meth:`get_user` instead.
Parameters
-----------
user_id: :class:`int`
The user's ID to fetch from.
Raises
-------
:exc:`.NotFound`
A user with this ID does not exist.
:exc:`.HTTPException`
Fetching the user failed.
Returns
--------
:class:`~discord.User`
The user you requested.
"""
data = await self.http.get_user(user_id)
return User(state=self._connection, data=data)
async def fetch_channel(self, channel_id):
"""|coro|
Retrieves a :class:`.abc.GuildChannel` or :class:`.abc.PrivateChannel` with the specified ID.
.. note::
This method is an API call. For general usage, consider :meth:`get_channel` instead.
.. versionadded:: 1.2
Raises
-------
:exc:`.InvalidData`
An unknown channel type was received from Discord.
:exc:`.HTTPException`
Retrieving the channel failed.
:exc:`.NotFound`
Invalid Channel ID.
:exc:`.Forbidden`
You do not have permission to fetch this channel.
Returns
--------
Union[:class:`.abc.GuildChannel`, :class:`.abc.PrivateChannel`]
The channel from the ID.
"""
data = await self.http.get_channel(channel_id)
factory, ch_type = _channel_factory(data['type'])
if factory is None:
raise InvalidData('Unknown channel type {type} for channel ID {id}.'.format_map(data))
if ch_type in (ChannelType.group, ChannelType.private):
channel = factory(me=self.user, data=data, state=self._connection)
else:
guild_id = int(data['guild_id'])
guild = self.get_guild(guild_id) or Object(id=guild_id)
channel = factory(guild=guild, state=self._connection, data=data)
return channel
async def fetch_webhook(self, webhook_id):
"""|coro|
Retrieves a :class:`.Webhook` with the specified ID.
Raises
--------
:exc:`.HTTPException`
Retrieving the webhook failed.
:exc:`.NotFound`
Invalid webhook ID.
:exc:`.Forbidden`
You do not have permission to fetch this webhook.
Returns
---------
:class:`.Webhook`
The webhook you requested.
"""
data = await self.http.get_webhook(webhook_id)
return Webhook.from_state(data, state=self._connection)
async def create_dm(self, user):
"""|coro|
Creates a :class:`.DMChannel` with this user.
This should be rarely called, as this is done transparently for most
people.
.. versionadded:: 2.0
Parameters
-----------
user: :class:`~discord.abc.Snowflake`
The user to create a DM with.
Returns
-------
:class:`.DMChannel`
The channel that was created.
"""
state = self._connection
found = state._get_private_channel_by_user(user.id)
if found:
return found
data = await state.http.start_private_message(user.id)
return state.add_dm_channel(data)
|
the-stack_0_1739 | import logging
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
LOGLEVEL = (('debug', logging.DEBUG),
('info', logging.INFO),
('warn', logging.WARN),
('error', logging.ERROR))
LOG = logging.getLogger(__name__)
_train_file = 'dataset/train.npz'
_test_file = 'dataset/test.npz'
_embedding_file = 'dataset/text-embedding.npz'
_split_file = 'dataset/train-split.npz'
class ToxicTrainData(Dataset):
def __init__(self, train_file, train_embedding):
super(ToxicTrainData, self).__init__()
self.text = train_embedding
train_npz = np.load(train_file)
self.X = train_npz['X'].astype(np.float32)
self.y = train_npz['y'].astype(np.float32)
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
return self.text[idx], self.X[idx], self.y[idx]
class ToxicTestData(Dataset):
def __init__(self, test_file, test_embedding):
super(ToxicTestData, self).__init__()
self.text = test_embedding
test_npz = np.load(test_file)
self.X = test_npz['X'].astype(np.float32)
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.text[idx], self.X[idx], 0
class ToxicDataStride(Dataset):
def __init__(self, dataset, indices, embedding_list):
super(ToxicDataStride, self).__init__()
self.dataset = dataset
self.indices = indices
self.embedding_list = embedding_list
def __len__(self):
return len(self.indices)
def __getitem__(self, idx):
text, X, y = self.dataset[self.indices[idx]]
text = self.embedding_list[text]
return torch.from_numpy(text), torch.from_numpy(X), y
class ToxicTrainSplitter(object):
def __init__(self, dataset, split_file, embedding_list):
self.dataset = dataset
self.embedding_list = embedding_list
self.indices = np.load(split_file)['indices']
def split(self):
valid_indices = self.indices[0]
train_indices = []
for indices in self.indices[1:]:
train_indices += indices
train_stride = ToxicDataStride(self.dataset, train_indices,
self.embedding_list)
valid_stride = ToxicDataStride(self.dataset, valid_indices,
self.embedding_list)
return train_stride, valid_stride
def kfold(self):
for i in range(len(self.indices)):
valid_indices = self.indices[i]
train_indices = []
for j in range(len(self.indices)):
if j != i:
train_indices += self.indices[j]
train_stride = ToxicDataStride(self.dataset, train_indices,
self.embedding_list)
valid_stride = ToxicDataStride(self.dataset, valid_indices,
self.embedding_list)
yield train_stride, valid_stride
class ToxicTrainLoader(object):
def __init__(self, batch_size, cv, n_workers=0):
self.batch_size = batch_size
self.cv = cv
self.n_workers = n_workers
embedding_npz = np.load(_embedding_file)
train_embedding = embedding_npz['train_embedding']
embedding_list = embedding_npz['embedding_list']
dataset = ToxicTrainData(_train_file, train_embedding)
self.splitter = ToxicTrainSplitter(dataset, _split_file, embedding_list)
def __call__(self):
if self.cv:
strides = self.splitter.kfold()
for train_stride, valid_stride in strides:
train_loader = DataLoader(train_stride, shuffle=True,
batch_size=self.batch_size,
num_workers=self.n_workers)
valid_loader = DataLoader(valid_stride, shuffle=False,
batch_size=self.batch_size,
num_workers=self.n_workers)
yield train_loader, valid_loader
else:
train_stride, valid_stride = self.splitter.split()
train_loader = DataLoader(train_stride, shuffle=True,
batch_size=self.batch_size,
num_workers=self.n_workers)
valid_loader = DataLoader(valid_stride, shuffle=False,
batch_size=self.batch_size,
num_workers=self.n_workers)
yield train_loader, valid_loader
class ToxicTestLoader(object):
def __init__(self, batch_size, n_workers=0, validate=False):
self.batch_size = batch_size
self.n_workers = n_workers
embedding_npz = np.load(_embedding_file)
train_embedding = embedding_npz['train_embedding']
test_embedding = embedding_npz['test_embedding']
embedding_list = embedding_npz['embedding_list']
if validate:
dataset = ToxicTestData(_train_file, train_embedding)
else:
dataset = ToxicTestData(_test_file, test_embedding)
indices = np.arange(len(dataset))
self.stride = ToxicDataStride(dataset, indices, embedding_list)
def __call__(self):
return DataLoader(self.stride, batch_size=self.batch_size,
num_workers=self.n_workers, shuffle=False)
############################################################
# learning rate scheduler
############################################################
class LRSchedNone(object):
""" No learning rate adjustment """
def __init__(self, param_groups, lr):
self.param_groups = param_groups
self.set_lr(lr)
def set_lr(self, lr):
for param_group in self.param_groups:
param_group['lr'] = lr
self.lr = lr
def update(self, loss, **kwargs):
pass
class LRSchedStep(LRSchedNone):
""" Learning rate scheduler based on predefine (loss, lr) pairs """
def __init__(self, param_groups, lr, *steps):
super(LRSchedStep, self).__init__(param_groups, lr)
self.steps = steps
def update(self, loss, **kwargs):
maximize = kwargs.get('maximize', False)
for step_loss, step_lr in self.steps:
adjust_needed = (maximize and loss > step_loss) or \
((not maximize) and loss < step_loss)
if adjust_needed and self.lr > step_lr:
self.set_lr(step_lr)
LOG.info('Update learning rate to {:.5f}'.format(step_lr))
class LRSchedDecay(LRSchedNone):
""" Learning rate decay on each epoch """
def __init__(self, param_groups, lr, decay, lr_min=0.0):
super(LRSchedDecay, self).__init__(param_groups, lr)
self.decay = decay
self.lr_min = lr_min
def update(self, loss, **kwargs):
if self.lr > self.lr_min:
lr = max(self.lr*self.decay, self.lr_min)
self.set_lr(lr)
LOG.debug('Update learning rate to {:.5f}'.format(lr))
|
the-stack_0_1741 | import fxcmpy
import pandas as pd
import numpy as np
import datetime as dt
from pyti.simple_moving_average import simple_moving_average as sma
#from pyti.exponential_moving_average import exponential_moving_average as sma
#con = fxcmpy.fxcmpy(config_file='fxcm.cfg')
# Allows for printing the whole data frame
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# df = con.get_candles('EUR/USD', period='D1', start= dt.datetime(2021, 12, 15), end = dt.datetime(2022, 1, 30))
#df_data = con.get_candles('EUR/USD', period='m5', number=10000)
# saving the dataframe
#df_data.to_csv('EUR_USD.csv')
df = pd.read_csv('EUR_USD_M5_SLOW_1000_FAST_100.csv')
# Define our pip cost and lot size
pip_cost = 1
lot_size = 10
# Define EMA Fast / Slow parameters
fast = 100
slow = 1000
# HMA fast and slow calculation
df['hma_fast'] = sma(df['askclose'], fast)
df['hma_slow'] = sma(df['askclose'], slow)
df['sell'] = (df['hma_fast'] < df['hma_slow'])
df['buy'] = (df['hma_fast'] > df['hma_slow'])
df['signal'] = np.where(df['hma_fast'] < df['hma_slow'], 1, 0)
df['position_operation'] = df['signal'].diff()
begin_prices_buy = []
end_prices_buy = []
begin_prices_sell = []
end_prices_sell = []
profits = 0
# get open/close price for each open position for sells
for i, row in df.iterrows():
if df.loc[i, 'position_operation'] == 1 and df.loc[i, 'sell'] == True and df.loc[i, 'signal'] == 1:
begin_prices_sell.append(float(df.loc[i, 'askclose']))
index = i
while index < len(df.index):
if df.loc[index, 'position_operation'] == -1 and df.loc[index, 'sell'] == False:
end_prices_sell.append(float(df.loc[index, 'askclose']))
index = len(df.index)
index += 1
# get open/close price for each open position for sells
i = 0
for i, row in df.iterrows():
if df.loc[i, 'position_operation'] == -1 and df.loc[i, 'buy'] == True and df.loc[i, 'signal'] == 0:
begin_prices_buy.append(float(df.loc[i, 'askclose']))
index = i
while index < len(df.index):
if df.loc[index, 'position_operation'] == 1 and df.loc[index, 'buy'] == False:
end_prices_buy.append(float(df.loc[index, 'askclose']))
index = len(df.index)
index += 1
# # Calculating the profit / loss
# for i in range(len(begin_prices_buy)):
# profit = (end_prices_buy[i] - begin_prices_buy[i]) * 100 * pip_cost * lot_size
# profits += profit
# print("The return for trade " + str(i + 1) + " is: " + str(int(profit)))
# Calculating the profit / loss
# for i in range(len(begin_prices_buy)):
# profit = (begin_prices_buy[i] - end_prices_buy[i]) * 1000 * pip_cost * lot_size
# profits += profit
# print("The return for trade BUY" + str(i + 1) + " is: " + str(int(profit)))
# Reduce Operations not Concluded
begin_prices_sell = begin_prices_sell[:len(end_prices_sell)]
begin_prices_buy = begin_prices_buy[:len(end_prices_buy)]
# Calculating the profit / loss SELL
i = 0
for i in range(len(begin_prices_sell)):
profit = (begin_prices_sell[i] - end_prices_sell[i]) * 1000 * pip_cost * lot_size
profit = profit - 2
profits += profit
print("The return for trade SELL" + str(i + 1) + " is: " + str(int(profit)))
i = 0
for i in range(len(begin_prices_buy)):
profit = (end_prices_buy[i] - begin_prices_buy[i]) * 1000 * pip_cost * lot_size
profits += profit
profit = profit - 2
print("The return for trade BUY" + str(i + 1) + " is: " + str(int(profit)))
print("Profit: " + str(profits))
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(24, 16))
ax1 = fig.add_subplot(111, ylabel='EUR/USD Price')
# Plotting market prices and moving averages
df['askclose'].plot(ax=ax1, color='r', lw=1.)
df[['hma_fast', 'hma_slow']].plot(ax=ax1, lw=2.)
# Placing purple markers for position entry
ax1.plot(df.loc[df.position_operation == 1.0].index,
df.hma_fast[df.position_operation == 1.0],
'v', markersize=10, color='red')
# Placing black markers for position exit
ax1.plot(df.loc[df.position_operation == -1.0].index,
df.hma_slow[df.position_operation == -1.0],
'^', markersize=10, color='green')
# Plotting of returns
ax2 = ax1.twinx()
ax2.grid(False)
ax2.set_ylabel('Profits in $')
ax2.plot(df['total'], color='green')
plt.show()
|
the-stack_0_1742 | #!/usr/bin/python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Simple client for the Gerrit REST API.
Example usage:
./gerrit_client.py [command] [args]""
"""
from __future__ import print_function
import json
import logging
import optparse
import subcommand
import sys
if sys.version_info.major == 2:
import urlparse
from urllib import quote_plus
else:
from urllib.parse import quote_plus
import urllib.parse as urlparse
import fix_encoding
import gerrit_util
import setup_color
__version__ = '0.1'
def write_result(result, opt):
if opt.json_file:
with open(opt.json_file, 'w') as json_file:
json_file.write(json.dumps(result))
@subcommand.usage('[args ...]')
def CMDmovechanges(parser, args):
parser.add_option('-p', '--param', dest='params', action='append',
help='repeatable query parameter, format: -p key=value')
parser.add_option('--destination_branch', dest='destination_branch',
help='where to move changes to')
(opt, args) = parser.parse_args(args)
assert opt.destination_branch, "--destination_branch not defined"
for p in opt.params:
assert '=' in p, '--param is key=value, not "%s"' % p
host = urlparse.urlparse(opt.host).netloc
limit = 100
while True:
result = gerrit_util.QueryChanges(
host,
list(tuple(p.split('=', 1)) for p in opt.params),
limit=limit,
)
for change in result:
gerrit_util.MoveChange(host, change['id'], opt.destination_branch)
if len(result) < limit:
break
logging.info("Done")
@subcommand.usage('[args ...]')
def CMDbranchinfo(parser, args):
parser.add_option('--branch', dest='branch', help='branch name')
(opt, args) = parser.parse_args(args)
host = urlparse.urlparse(opt.host).netloc
project = quote_plus(opt.project)
branch = quote_plus(opt.branch)
result = gerrit_util.GetGerritBranch(host, project, branch)
logging.info(result)
write_result(result, opt)
@subcommand.usage('[args ...]')
def CMDbranch(parser, args):
parser.add_option('--branch', dest='branch', help='branch name')
parser.add_option('--commit', dest='commit', help='commit hash')
(opt, args) = parser.parse_args(args)
assert opt.project, "--project not defined"
assert opt.branch, "--branch not defined"
assert opt.commit, "--commit not defined"
project = quote_plus(opt.project)
host = urlparse.urlparse(opt.host).netloc
branch = quote_plus(opt.branch)
commit = quote_plus(opt.commit)
result = gerrit_util.CreateGerritBranch(host, project, branch, commit)
logging.info(result)
write_result(result, opt)
@subcommand.usage('[args ...]')
def CMDchanges(parser, args):
parser.add_option('-p', '--param', dest='params', action='append',
help='repeatable query parameter, format: -p key=value')
parser.add_option('-o', '--o-param', dest='o_params', action='append',
help='gerrit output parameters, e.g. ALL_REVISIONS')
parser.add_option('--limit', dest='limit', type=int,
help='maximum number of results to return')
parser.add_option('--start', dest='start', type=int,
help='how many changes to skip '
'(starting with the most recent)')
(opt, args) = parser.parse_args(args)
for p in opt.params:
assert '=' in p, '--param is key=value, not "%s"' % p
result = gerrit_util.QueryChanges(
urlparse.urlparse(opt.host).netloc,
list(tuple(p.split('=', 1)) for p in opt.params),
start=opt.start, # Default: None
limit=opt.limit, # Default: None
o_params=opt.o_params, # Default: None
)
logging.info('Change query returned %d changes.', len(result))
write_result(result, opt)
@subcommand.usage('')
def CMDabandon(parser, args):
parser.add_option('-c', '--change', type=int, help='change number')
parser.add_option('-m', '--message', default='', help='reason for abandoning')
(opt, args) = parser.parse_args(args)
assert opt.change, "-c not defined"
result = gerrit_util.AbandonChange(
urlparse.urlparse(opt.host).netloc,
opt.change, opt.message)
logging.info(result)
write_result(result, opt)
class OptionParser(optparse.OptionParser):
"""Creates the option parse and add --verbose support."""
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(self, *args, version=__version__, **kwargs)
self.add_option(
'--verbose', action='count', default=0,
help='Use 2 times for more debugging info')
self.add_option('--host', dest='host', help='Url of host.')
self.add_option('--project', dest='project', help='project name')
self.add_option(
'--json_file', dest='json_file', help='output json filepath')
def parse_args(self, args=None, values=None):
options, args = optparse.OptionParser.parse_args(self, args, values)
# Host is always required
assert options.host, "--host not defined."
levels = [logging.WARNING, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(options.verbose, len(levels) - 1)])
return options, args
def main(argv):
if sys.hexversion < 0x02060000:
print('\nYour python version %s is unsupported, please upgrade.\n'
% (sys.version.split(' ', 1)[0],),
file=sys.stderr)
return 2
dispatcher = subcommand.CommandDispatcher(__name__)
return dispatcher.execute(OptionParser(), argv)
if __name__ == '__main__':
# These affect sys.stdout so do it outside of main() to simplify mocks in
# unit testing.
fix_encoding.fix_encoding()
setup_color.init()
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
|
the-stack_0_1743 | #!/usr/bin/env python3
import os
from mowgli.infrastructure import endpoints
def run():
port = os.environ.get('PORT', 8080)
endpoints.APP.run(port=port, debug=True, host='0.0.0.0')
if __name__ == '__main__':
run()
|
the-stack_0_1744 | """
Copyright 2021 the CVXPY developers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.atoms.elementwise.entr import entr
from cvxpy.atoms.elementwise.log import log
from cvxpy.atoms.elementwise.maximum import maximum
# flake8: noqa: E501
def loggamma(x):
"""Elementwise log of the gamma function.
Implementation has modest accuracy over the full range, approaching perfect
accuracy as x goes to infinity. For details on the nature of the approximation,
refer to `CVXPY GitHub Issue #228 <https://github.com/cvxpy/cvxpy/issues/228#issuecomment-544281906>`_.
"""
return maximum(
2.18382 - 3.62887*x,
1.79241 - 2.4902*x,
1.21628 - 1.37035*x,
0.261474 - 0.28904*x,
0.577216 - 0.577216*x,
-0.175517 + 0.03649*x,
-1.27572 + 0.621514*x,
-0.845568 + 0.422784*x,
-0.577216*x - log(x),
0.918939 - x - entr(x) - 0.5*log(x),
)
|
the-stack_0_1746 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backend.lib.user."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mock
import endpoints
from loaner.web_app.backend.lib import user
from loaner.web_app.backend.testing import loanertest
class UserTest(loanertest.EndpointsTestCase):
def test_get_user_email(self):
self.login_user()
self.assertEqual(user.get_user_email(), loanertest.USER_EMAIL)
def test_get_endpoints_user_email(self):
self.login_endpoints_user()
self.assertEqual(user.get_user_email(), loanertest.USER_EMAIL)
@mock.patch('__main__.user.endpoints.get_current_user')
@mock.patch('__main__.user.users.get_current_user')
def test_endpoints_error(
self, mock_currentuser, mock_currentuser_endpoint):
mock_currentuser.return_value = None
mock_currentuser_endpoint.side_effect = endpoints.InvalidGetUserCall
with self.assertRaises(user.UserNotFound):
user.get_user_email()
@mock.patch('__main__.user.endpoints.get_current_user')
@mock.patch('__main__.user.users.get_current_user')
def test_get_user_email_no_user_found(
self, mock_currentuser, mock_currentuser_endpoint):
mock_currentuser.return_value = None
mock_currentuser_endpoint.return_value = None
with self.assertRaises(user.UserNotFound):
user.get_user_email()
if __name__ == '__main__':
loanertest.main()
|
the-stack_0_1747 | """Union-find data structure."""
# based on https://www.ics.uci.edu/~eppstein/PADS/UnionFind.py
class UnionFind:
"""Union-find data structure."""
def __init__(self, n):
"""Create a new empty union-find structure."""
# we have singletons
self.array = list(range(n))
self.size = n
# We give the clusters as a dict
self.clusters = {i: {i} for i in range(n)}
def __getitem__(self, element):
"""Find and return the name of the set containing the element."""
return self.array[element]
def union(self, group1, group2):
"""Find the sets containing the objects and merge them all."""
if len(self.clusters[group1]) > len(self.clusters[group2]):
normal_order = True
to_extend = group1
to_delete = group2
else:
normal_order = False
to_extend = group2
to_delete = group1
# update elements
for i in self.clusters[to_delete]:
self.array[i] = to_extend
# updating the clusters
self.clusters[to_extend].update(self.clusters[to_delete])
del self.clusters[to_delete]
# which cluster is bigger?
return normal_order
def get_cluster(self, group):
"""List of element with id group."""
if group in self.clusters:
if self.clusters[group] == set():
print("Something went wrong!")
return self.clusters[group]
else:
return None
def move(self, elem, group):
"""Move element into group."""
elem_group = self.array[elem]
self.clusters[elem_group].remove(elem)
if self.clusters[elem_group] == set():
del self.clusters[elem_group]
self.array[elem] = group
if group in self.clusters:
self.clusters[group].add(elem)
else:
self.clusters[group] = {elem}
def escape(self, elem, lower, upper):
"""Move element somewhere else."""
elem_group = self.array[elem]
self.clusters[elem_group].remove(elem)
for i in range(lower, upper):
if i not in self.clusters: # if the cluster is empty
self.array[elem] = i
self.clusters[i] = {elem}
break
def __repr__(self):
"""To print only."""
return str(self.array)+str(self.clusters)
|
the-stack_0_1750 | from flask import Flask, render_template, session, redirect, url_for, flash
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_wtf import Form
from wtforms import StringField, SubmitField
from wtforms.validators import Required, ValidationError
from datetime import datetime
app = Flask(__name__)
bootstrap = Bootstrap(app)
moment = Moment(app)
# Example 4.1 requirement
# Not creating some crazy string because it's
# exposed in this github repo anyways
app.config['SECRET_KEY'] = 'key'
@app.route('/', methods=['GET', 'POST'])
def index():
form = UofTForm()
if form.validate_on_submit():
old_name = session.get('name')
print(old_name)
print(form.name.data)
if old_name is not None and old_name != form.name.data:
flash('Looks like you have changed your name!')
session['name'] = form.name.data
session['email'] = form.email.data
return redirect(url_for('index'))
return render_template(
'index.html',
current_time=datetime.utcnow(),
form=form,
name=session.get('name'),
email=session.get('email')
)
@app.route('/user/<name>')
def user(name):
return render_template('user.html', name=name)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
class EmailValidator(object):
"""
Checks for the presence of an '@' symbol.
"""
def __call__(self, form, field):
if '@' not in field.data:
message = 'Please include an \'@\' in the email address. \'' + field.data + '\' is missing an \'@\'.'
raise ValidationError(message)
class UofTForm(Form):
name = StringField('What is your name?', validators=[Required()])
email = StringField('What is your UofT Email address?', validators=[EmailValidator()])
submit = SubmitField('Submit')
if __name__ == '__main__':
app.run(debug=True)
|
the-stack_0_1751 | #!/usr/bin/env python
"""
Copyright (c) 2020 NIDDS developers (https://github.com/prasanthc41m/nidds/)
See the file 'LICENSE' for copying permission
"""
import re
from core.common import retrieve_content
__url__ = "https://cybercrime-tracker.net/all.php"
__check__ = "cp.php?m=login"
__info__ = "malware"
__reference__ = "cybercrime-tracker.net"
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
content = content.replace("<br />", '\n')
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#') or '(SSL)' in line:
continue
if '://' in line:
line = re.search(r"://(.*)", line).group(1)
line = line.rstrip('/')
if '/' in line:
retval[line] = (__info__, __reference__)
line = line.split('/')[0]
if ':' in line:
line = line.split(':')[0]
if re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", line):
retval[line] = ("potential malware site", __reference__)
else:
retval[line] = (__info__, __reference__)
return retval
|
the-stack_0_1752 | import numpy as np
import torch
from torch.utils.data import Dataset
import h5py
def rotate_point_cloud(batch_data):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in range(batch_data.shape[0]):
rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
shape_pc = batch_data[k, ...]
rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
return rotated_data
def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05):
""" Randomly jitter points. jittering is per point.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, jittered batch of point clouds
"""
B, N, C = batch_data.shape
assert(clip > 0)
jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1*clip, clip)
jittered_data += batch_data
return jittered_data
def center_data(pcs):
for pc in pcs:
centroid = np.mean(pc, axis=0)
pc[:,0]-=centroid[0]
pc[:,1]-=centroid[1]
pc[:,2]-=centroid[2]
return pcs
def normalize_data(pcs):
for pc in pcs:
#get furthest point distance then normalize
d = max(np.sum(np.abs(pc)**2,axis=-1)**(1./2))
pc /= d
# pc[:,0]/=max(abs(pc[:,0]))
# pc[:,1]/=max(abs(pc[:,1]))
# pc[:,2]/=max(abs(pc[:,2]))
return pcs
def load_withmask_h5(h5_filename):
f = h5py.File(h5_filename, 'r')
data = f['data'][:]
label = f['label'][:]
mask = f['mask'][:]
return data, label, mask
def convert_to_binary_mask(masks):
binary_masks = []
for i in range(masks.shape[0]):
binary_mask = np.ones(masks[i].shape)
bg_idx = np.where(masks[i, :] == -1)
binary_mask[bg_idx] = 0
binary_masks.append(binary_mask)
binary_masks = np.array(binary_masks)
return binary_masks
class ScanObjectNN(Dataset):
def __init__(self, data_dir, center=True, normalize=True, train=False, subsample=None):
self.data, self.label, self.mask = load_withmask_h5(data_dir)
self.mask = convert_to_binary_mask(self.mask)
if center:
self.data = center_data(self.data)
if normalize:
self.data = normalize_data(self.data)
self.train = train
self.subsample = subsample
def __getitem__(self, item):
pointcloud = self.data[item][None]
label = self.label[item]
mask = self.mask[item]
if self.train:
pointcloud = jitter_point_cloud(pointcloud)
pointcloud = rotate_point_cloud(pointcloud)
pc_np = pointcloud[0].copy()
ma_np = mask.copy()
if self.subsample is not None:
idx = np.random.choice(pc_np.shape[0], size=self.subsample, replace=False)
pc_np = pc_np[idx]
ma_np = ma_np[idx]
pc = torch.from_numpy(pc_np).type(torch.FloatTensor)
ma = torch.from_numpy(ma_np).type(torch.LongTensor)
return pc, label, ma
def __len__(self):
return self.data.shape[0]
|
the-stack_0_1753 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19121 if testnet else 9121
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
the-stack_0_1756 | # (c) 2016 James Turner <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: aws_service_ip_ranges
author:
- James Turner <[email protected]>
version_added: "2.5"
requirements:
- must have public internet connectivity
short_description: Look up the IP ranges for services provided in AWS such as EC2 and S3.
description:
- AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking.
- This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service.
options:
service:
description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'
default: null
region:
description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'
default: null
"""
EXAMPLES = """
vars:
ec2_ranges: "{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}"
tasks:
- name: "use list return option and iterate as a loop"
debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}"
# "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 "
- name: "Pull S3 IP ranges, and print the default return style"
debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}"
# "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17"
"""
RETURN = """
_raw:
description: comma-separated list of CIDR ranges
"""
import json
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.module_utils._text import to_native
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
try:
resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')
amazon_response = json.load(resp)['prefixes']
except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:
# on Python 3+, json.decoder.JSONDecodeError is raised for bad
# JSON. On 2.x it's a ValueError
raise AnsibleError("Could not decode AWS IP ranges: %s" % to_native(e))
except HTTPError as e:
raise AnsibleError("Received HTTP error while pulling IP ranges: %s" % to_native(e))
except SSLValidationError as e:
raise AnsibleError("Error validating the server's certificate for: %s" % to_native(e))
except URLError as e:
raise AnsibleError("Failed look up IP range service: %s" % to_native(e))
except ConnectionError as e:
raise AnsibleError("Error connecting to IP range service: %s" % to_native(e))
if 'region' in kwargs:
region = kwargs['region']
amazon_response = (item for item in amazon_response if item['region'] == region)
if 'service' in kwargs:
service = str.upper(kwargs['service'])
amazon_response = (item for item in amazon_response if item['service'] == service)
return [item['ip_prefix'] for item in amazon_response]
|
the-stack_0_1758 | #!/usr/bin/env python
import sys, traceback
import cv2
import os
import re
import numpy as np
import argparse
import string
import plantcv as pcv
def options():
parser = argparse.ArgumentParser(description="Imaging processing with opencv")
parser.add_argument("-i", "--image", help="Input image file.", required=True)
parser.add_argument("-o", "--outdir", help="Output directory for image files.", required=False)
parser.add_argument("-r","--result", help="result file.", required= False )
parser.add_argument("-r2","--coresult", help="result file.", default=None )
parser.add_argument("-w","--writeimg", help="write out images.", default=False, action="store_true")
parser.add_argument("-D", "--debug", help="Turn on debug, prints intermediate images.", action=None)
args = parser.parse_args()
return args
### Main pipeline
def main():
# Get options
args = options()
# Read image
img, path, filename = pcv.readimage(args.image)
# Pipeline step
device = 0
# Convert RGB to HSV and extract the Saturation channel
device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)
# Threshold the Saturation image
device, s_thresh = pcv.binary_threshold(s, 30, 255, 'light', device, args.debug)
# Median Filter
device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug)
device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug)
# Fill small objects
#device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)
# Convert RGB to LAB and extract the Blue channel
device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug)
# Threshold the blue image
device, b_thresh = pcv.binary_threshold(b, 130, 255, 'light', device, args.debug)
device, b_cnt = pcv.binary_threshold(b, 130, 255, 'light', device, args.debug)
# Fill small objects
#device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug)
# Join the thresholded saturation and blue-yellow images
device, bs = pcv.logical_and(s_mblur, b_cnt, device, args.debug)
# Apply Mask (for vis images, mask_color=white)
device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug)
# Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug)
device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug)
# Threshold the green-magenta and blue images
device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, args.debug)
device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, args.debug)
# Join the thresholded saturation and blue-yellow images (OR)
device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug)
device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug)
# Fill small noise
device, ab_fill1 = pcv.fill(ab, ab_cnt, 200, device, args.debug)
# Dilate to join small objects with larger ones
device, ab_cnt1=pcv.dilate(ab_fill1, 3, 2, device, args.debug)
device, ab_cnt2=pcv.dilate(ab_fill1, 3, 2, device, args.debug)
# Fill dilated image mask
device, ab_cnt3=pcv.fill(ab_cnt2,ab_cnt1,150,device,args.debug)
device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device, args.debug)
# Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, args.debug)
device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, args.debug)
# Threshold the green-magenta and blue images
device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark', device, args.debug)
device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255, 'light', device, args.debug)
device, ab_fill = pcv.logical_or(masked2a_thresh, masked2b_thresh, device, args.debug)
# Identify objects
device, id_objects,obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, args.debug)
# Define ROI
device, roi1, roi_hierarchy= pcv.define_roi(masked2,'rectangle', device, None, 'default', args.debug,True, 590, 0,-490,-375)
# Decide which objects to keep
device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug)
# Object combine kept objects
device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug)
############## VIS Analysis ################
outfile=False
if args.writeimg==True:
outfile=args.outdir+"/"+filename
# Find shape properties, output shape image (optional)
device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,outfile)
# Shape properties relative to user boundary line (optional)
device, boundary_header,boundary_data, boundary_img1= pcv.analyze_bound(img, args.image,obj, mask, 384, device,args.debug,outfile)
# Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
device, color_header,color_data,color_img= pcv.analyze_color(img, args.image, mask, 256, device, args.debug,None,'v','img',300,outfile)
# Output shape and color data
result=open(args.result,"a")
result.write('\t'.join(map(str,shape_header)))
result.write("\n")
result.write('\t'.join(map(str,shape_data)))
result.write("\n")
for row in shape_img:
result.write('\t'.join(map(str,row)))
result.write("\n")
result.write('\t'.join(map(str,color_header)))
result.write("\n")
result.write('\t'.join(map(str,color_data)))
result.write("\n")
result.write('\t'.join(map(str,boundary_header)))
result.write("\n")
result.write('\t'.join(map(str,boundary_data)))
result.write("\n")
result.write('\t'.join(map(str,boundary_img1)))
result.write("\n")
for row in color_img:
result.write('\t'.join(map(str,row)))
result.write("\n")
result.close()
############################# Use VIS image mask for NIR image#########################
# Find matching NIR image
if args.coresult is not None:
device, nirpath=pcv.get_nir(path,filename,device,args.debug)
nir, path1, filename1=pcv.readimage(nirpath)
nir2=cv2.imread(nirpath,0)
# Flip mask
device, f_mask= pcv.flip(mask,"vertical",device,args.debug)
device, f_mask= pcv.flip(f_mask,"vertical",device,args.debug)
# Reize mask
device, nmask = pcv.resize(f_mask, 0.2591687042,0.2591687042, device, args.debug)
# position, and crop mask
device,newmask=pcv.crop_position_mask(nir,nmask,device,30,7,"top","right",args.debug)
# Identify objects
device, nir_objects,nir_hierarchy = pcv.find_objects(nir, newmask, device, args.debug)
# Object combine kept objects
device, nir_combined, nir_combinedmask = pcv.object_composition(nir, nir_objects, nir_hierarchy, device, args.debug)
####################################### Analysis #############################################
outfile1=False
if args.writeimg==True:
outfile1=args.outdir+"/"+filename1
device,nhist_header, nhist_data,nir_imgs= pcv.analyze_NIR_intensity(nir2, filename1, nir_combinedmask, 256, device,False, args.debug, outfile1)
device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir2, filename1, nir_combined, nir_combinedmask, device, args.debug, outfile1)
coresult=open(args.coresult,"a")
coresult.write('\t'.join(map(str,nhist_header)))
coresult.write("\n")
coresult.write('\t'.join(map(str,nhist_data)))
coresult.write("\n")
for row in nir_imgs:
coresult.write('\t'.join(map(str,row)))
coresult.write("\n")
coresult.write('\t'.join(map(str,nshape_header)))
coresult.write("\n")
coresult.write('\t'.join(map(str,nshape_data)))
coresult.write("\n")
coresult.write('\t'.join(map(str,nir_shape)))
coresult.write("\n")
coresult.close()
if __name__ == '__main__':
main()
|
the-stack_0_1761 | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Tokenizer class. """
from collections import OrderedDict
from ...configuration_utils import PretrainedConfig
from ...file_utils import is_sentencepiece_available, is_tokenizers_available
from ...utils import logging
from ..bart.tokenization_bart import BartTokenizer
from ..bert.tokenization_bert import BertTokenizer
from ..bert_japanese.tokenization_bert_japanese import BertJapaneseTokenizer
from ..bertweet.tokenization_bertweet import BertweetTokenizer
from ..blenderbot.tokenization_blenderbot import BlenderbotTokenizer
from ..blenderbot_small.tokenization_blenderbot_small import BlenderbotSmallTokenizer
from ..convbert.tokenization_convbert import ConvBertTokenizer
from ..ctrl.tokenization_ctrl import CTRLTokenizer
from ..deberta.tokenization_deberta import DebertaTokenizer
from ..distilbert.tokenization_distilbert import DistilBertTokenizer
from ..dpr.tokenization_dpr import DPRQuestionEncoderTokenizer
from ..electra.tokenization_electra import ElectraTokenizer
from ..flaubert.tokenization_flaubert import FlaubertTokenizer
from ..fsmt.tokenization_fsmt import FSMTTokenizer
from ..funnel.tokenization_funnel import FunnelTokenizer
from ..gpt2.tokenization_gpt2 import GPT2Tokenizer
from ..herbert.tokenization_herbert import HerbertTokenizer
from ..layoutlm.tokenization_layoutlm import LayoutLMTokenizer
from ..led.tokenization_led import LEDTokenizer
from ..longformer.tokenization_longformer import LongformerTokenizer
from ..lxmert.tokenization_lxmert import LxmertTokenizer
from ..mobilebert.tokenization_mobilebert import MobileBertTokenizer
from ..mpnet.tokenization_mpnet import MPNetTokenizer
from ..openai.tokenization_openai import OpenAIGPTTokenizer
from ..phobert.tokenization_phobert import PhobertTokenizer
from ..prophetnet.tokenization_prophetnet import ProphetNetTokenizer
from ..rag.tokenization_rag import RagTokenizer
from ..retribert.tokenization_retribert import RetriBertTokenizer
from ..roberta.tokenization_roberta import RobertaTokenizer
from ..squeezebert.tokenization_squeezebert import SqueezeBertTokenizer
from ..tapas.tokenization_tapas import TapasTokenizer
from ..transfo_xl.tokenization_transfo_xl import TransfoXLTokenizer
from ..wav2vec2.tokenization_wav2vec2 import Wav2Vec2CTCTokenizer
from ..xlm.tokenization_xlm import XLMTokenizer
from .configuration_auto import (
AlbertConfig,
AutoConfig,
BartConfig,
BertConfig,
BertGenerationConfig,
BlenderbotConfig,
BlenderbotSmallConfig,
CamembertConfig,
ConvBertConfig,
CTRLConfig,
DebertaConfig,
DebertaV2Config,
DistilBertConfig,
DPRConfig,
ElectraConfig,
EncoderDecoderConfig,
FlaubertConfig,
FSMTConfig,
FunnelConfig,
GPT2Config,
IBertConfig,
LayoutLMConfig,
LEDConfig,
LongformerConfig,
LxmertConfig,
MarianConfig,
MBartConfig,
MobileBertConfig,
MPNetConfig,
MT5Config,
OpenAIGPTConfig,
PegasusConfig,
ProphetNetConfig,
RagConfig,
ReformerConfig,
RetriBertConfig,
RobertaConfig,
SqueezeBertConfig,
T5Config,
TapasConfig,
TransfoXLConfig,
Wav2Vec2Config,
XLMConfig,
XLMProphetNetConfig,
XLMRobertaConfig,
XLNetConfig,
replace_list_option_in_docstrings,
)
if is_sentencepiece_available():
from ..albert.tokenization_albert import AlbertTokenizer
from ..barthez.tokenization_barthez import BarthezTokenizer
from ..bert_generation.tokenization_bert_generation import BertGenerationTokenizer
from ..camembert.tokenization_camembert import CamembertTokenizer
from ..deberta_v2.tokenization_deberta_v2 import DebertaV2Tokenizer
from ..marian.tokenization_marian import MarianTokenizer
from ..mbart.tokenization_mbart import MBartTokenizer
from ..mt5 import MT5Tokenizer
from ..pegasus.tokenization_pegasus import PegasusTokenizer
from ..reformer.tokenization_reformer import ReformerTokenizer
from ..t5.tokenization_t5 import T5Tokenizer
from ..xlm_prophetnet.tokenization_xlm_prophetnet import XLMProphetNetTokenizer
from ..xlm_roberta.tokenization_xlm_roberta import XLMRobertaTokenizer
from ..xlnet.tokenization_xlnet import XLNetTokenizer
else:
AlbertTokenizer = None
BarthezTokenizer = None
BertGenerationTokenizer = None
CamembertTokenizer = None
DebertaV2Tokenizer = None
MarianTokenizer = None
MBartTokenizer = None
MT5Tokenizer = None
PegasusTokenizer = None
ReformerTokenizer = None
T5Tokenizer = None
XLMRobertaTokenizer = None
XLNetTokenizer = None
XLMProphetNetTokenizer = None
if is_tokenizers_available():
from ..albert.tokenization_albert_fast import AlbertTokenizerFast
from ..bart.tokenization_bart_fast import BartTokenizerFast
from ..barthez.tokenization_barthez_fast import BarthezTokenizerFast
from ..bert.tokenization_bert_fast import BertTokenizerFast
from ..camembert.tokenization_camembert_fast import CamembertTokenizerFast
from ..convbert.tokenization_convbert_fast import ConvBertTokenizerFast
from ..distilbert.tokenization_distilbert_fast import DistilBertTokenizerFast
from ..dpr.tokenization_dpr_fast import DPRQuestionEncoderTokenizerFast
from ..electra.tokenization_electra_fast import ElectraTokenizerFast
from ..funnel.tokenization_funnel_fast import FunnelTokenizerFast
from ..gpt2.tokenization_gpt2_fast import GPT2TokenizerFast
from ..herbert.tokenization_herbert_fast import HerbertTokenizerFast
from ..layoutlm.tokenization_layoutlm_fast import LayoutLMTokenizerFast
from ..led.tokenization_led_fast import LEDTokenizerFast
from ..longformer.tokenization_longformer_fast import LongformerTokenizerFast
from ..lxmert.tokenization_lxmert_fast import LxmertTokenizerFast
from ..mbart.tokenization_mbart_fast import MBartTokenizerFast
from ..mobilebert.tokenization_mobilebert_fast import MobileBertTokenizerFast
from ..mpnet.tokenization_mpnet_fast import MPNetTokenizerFast
from ..mt5 import MT5TokenizerFast
from ..openai.tokenization_openai_fast import OpenAIGPTTokenizerFast
from ..pegasus.tokenization_pegasus_fast import PegasusTokenizerFast
from ..reformer.tokenization_reformer_fast import ReformerTokenizerFast
from ..retribert.tokenization_retribert_fast import RetriBertTokenizerFast
from ..roberta.tokenization_roberta_fast import RobertaTokenizerFast
from ..squeezebert.tokenization_squeezebert_fast import SqueezeBertTokenizerFast
from ..t5.tokenization_t5_fast import T5TokenizerFast
from ..xlm_roberta.tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
from ..xlnet.tokenization_xlnet_fast import XLNetTokenizerFast
else:
AlbertTokenizerFast = None
BartTokenizerFast = None
BarthezTokenizerFast = None
BertTokenizerFast = None
CamembertTokenizerFast = None
ConvBertTokenizerFast = None
DistilBertTokenizerFast = None
DPRQuestionEncoderTokenizerFast = None
ElectraTokenizerFast = None
FunnelTokenizerFast = None
GPT2TokenizerFast = None
HerbertTokenizerFast = None
LayoutLMTokenizerFast = None
LEDTokenizerFast = None
LongformerTokenizerFast = None
LxmertTokenizerFast = None
MBartTokenizerFast = None
MobileBertTokenizerFast = None
MPNetTokenizerFast = None
MT5TokenizerFast = None
OpenAIGPTTokenizerFast = None
PegasusTokenizerFast = None
ReformerTokenizerFast = None
RetriBertTokenizerFast = None
RobertaTokenizerFast = None
SqueezeBertTokenizerFast = None
T5TokenizerFast = None
XLMRobertaTokenizerFast = None
XLNetTokenizerFast = None
logger = logging.get_logger(__name__)
TOKENIZER_MAPPING = OrderedDict(
[
(RetriBertConfig, (RetriBertTokenizer, RetriBertTokenizerFast)),
(T5Config, (T5Tokenizer, T5TokenizerFast)),
(MT5Config, (MT5Tokenizer, MT5TokenizerFast)),
(MobileBertConfig, (MobileBertTokenizer, MobileBertTokenizerFast)),
(DistilBertConfig, (DistilBertTokenizer, DistilBertTokenizerFast)),
(AlbertConfig, (AlbertTokenizer, AlbertTokenizerFast)),
(CamembertConfig, (CamembertTokenizer, CamembertTokenizerFast)),
(PegasusConfig, (PegasusTokenizer, PegasusTokenizerFast)),
(MBartConfig, (MBartTokenizer, MBartTokenizerFast)),
(XLMRobertaConfig, (XLMRobertaTokenizer, XLMRobertaTokenizerFast)),
(MarianConfig, (MarianTokenizer, None)),
(BlenderbotSmallConfig, (BlenderbotSmallTokenizer, None)),
(BlenderbotConfig, (BlenderbotTokenizer, None)),
(LongformerConfig, (LongformerTokenizer, LongformerTokenizerFast)),
(BartConfig, (BartTokenizer, BartTokenizerFast)),
(LongformerConfig, (LongformerTokenizer, LongformerTokenizerFast)),
(RobertaConfig, (RobertaTokenizer, RobertaTokenizerFast)),
(ReformerConfig, (ReformerTokenizer, ReformerTokenizerFast)),
(ElectraConfig, (ElectraTokenizer, ElectraTokenizerFast)),
(FunnelConfig, (FunnelTokenizer, FunnelTokenizerFast)),
(LxmertConfig, (LxmertTokenizer, LxmertTokenizerFast)),
(LayoutLMConfig, (LayoutLMTokenizer, LayoutLMTokenizerFast)),
(DPRConfig, (DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast)),
(SqueezeBertConfig, (SqueezeBertTokenizer, SqueezeBertTokenizerFast)),
(BertConfig, (BertTokenizer, BertTokenizerFast)),
(OpenAIGPTConfig, (OpenAIGPTTokenizer, OpenAIGPTTokenizerFast)),
(GPT2Config, (GPT2Tokenizer, GPT2TokenizerFast)),
(TransfoXLConfig, (TransfoXLTokenizer, None)),
(XLNetConfig, (XLNetTokenizer, XLNetTokenizerFast)),
(FlaubertConfig, (FlaubertTokenizer, None)),
(XLMConfig, (XLMTokenizer, None)),
(CTRLConfig, (CTRLTokenizer, None)),
(FSMTConfig, (FSMTTokenizer, None)),
(BertGenerationConfig, (BertGenerationTokenizer, None)),
(DebertaConfig, (DebertaTokenizer, None)),
(DebertaV2Config, (DebertaV2Tokenizer, None)),
(RagConfig, (RagTokenizer, None)),
(XLMProphetNetConfig, (XLMProphetNetTokenizer, None)),
(ProphetNetConfig, (ProphetNetTokenizer, None)),
(MPNetConfig, (MPNetTokenizer, MPNetTokenizerFast)),
(TapasConfig, (TapasTokenizer, None)),
(LEDConfig, (LEDTokenizer, LEDTokenizerFast)),
(ConvBertConfig, (ConvBertTokenizer, ConvBertTokenizerFast)),
(IBertConfig, (RobertaTokenizer, RobertaTokenizerFast)),
(Wav2Vec2Config, (Wav2Vec2CTCTokenizer, None)),
]
)
# For tokenizers which are not directly mapped from a config
NO_CONFIG_TOKENIZER = [
BertJapaneseTokenizer,
BertweetTokenizer,
HerbertTokenizer,
HerbertTokenizerFast,
PhobertTokenizer,
BarthezTokenizer,
BarthezTokenizerFast,
]
SLOW_TOKENIZER_MAPPING = {
k: (v[0] if v[0] is not None else v[1])
for k, v in TOKENIZER_MAPPING.items()
if (v[0] is not None or v[1] is not None)
}
def tokenizer_class_from_name(class_name: str):
all_tokenizer_classes = (
[v[0] for v in TOKENIZER_MAPPING.values() if v[0] is not None]
+ [v[1] for v in TOKENIZER_MAPPING.values() if v[1] is not None]
+ NO_CONFIG_TOKENIZER
)
for c in all_tokenizer_classes:
if c.__name__ == class_name:
return c
class AutoTokenizer:
r"""
This is a generic tokenizer class that will be instantiated as one of the tokenizer classes of the library when
created with the :meth:`AutoTokenizer.from_pretrained` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoTokenizer is designed to be instantiated "
"using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method."
)
@classmethod
@replace_list_option_in_docstrings(SLOW_TOKENIZER_MAPPING)
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
r"""
Instantiate one of the tokenizer classes of the library from a pretrained model vocabulary.
The tokenizer class to instantiate is selected based on the :obj:`model_type` property of the config object
(either passed as an argument or loaded from :obj:`pretrained_model_name_or_path` if possible), or when it's
missing, by falling back to using pattern matching on :obj:`pretrained_model_name_or_path`:
List options
Params:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
Can be either:
- A string, the `model id` of a predefined tokenizer hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing vocabulary files required by the tokenizer, for instance saved
using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.,
``./my_model_directory/``.
- A path or url to a single saved vocabulary file if and only if the tokenizer only requires a
single vocabulary file (like Bert or XLNet), e.g.: ``./my_model_directory/vocab.txt``. (Not
applicable to all derived classes)
inputs (additional positional arguments, `optional`):
Will be passed along to the Tokenizer ``__init__()`` method.
config (:class:`~transformers.PreTrainedConfig`, `optional`)
The configuration object used to dertermine the tokenizer class to instantiate.
cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download the model weights and configuration files and override the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str]`, `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
subfolder (:obj:`str`, `optional`):
In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for
facebook/rag-token-base), specify it here.
use_fast (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to try to load the fast version of the tokenizer.
kwargs (additional keyword arguments, `optional`):
Will be passed to the Tokenizer ``__init__()`` method. Can be used to set special tokens like
``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``,
``mask_token``, ``additional_special_tokens``. See parameters in the ``__init__()`` for more details.
Examples::
>>> from transformers import AutoTokenizer
>>> # Download vocabulary from huggingface.co and cache.
>>> tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
>>> # Download vocabulary from huggingface.co (user-uploaded) and cache.
>>> tokenizer = AutoTokenizer.from_pretrained('dbmdz/bert-base-german-cased')
>>> # If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
>>> tokenizer = AutoTokenizer.from_pretrained('./test/bert_saved_model/')
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
use_fast = kwargs.pop("use_fast", True)
if config.tokenizer_class is not None:
tokenizer_class = None
if use_fast and not config.tokenizer_class.endswith("Fast"):
tokenizer_class_candidate = f"{config.tokenizer_class}Fast"
tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
if tokenizer_class is None:
tokenizer_class_candidate = config.tokenizer_class
tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
if tokenizer_class is None:
raise ValueError(
"Tokenizer class {} does not exist or is not currently imported.".format(tokenizer_class_candidate)
)
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
# if model is an encoder decoder, the encoder tokenizer class is used by default
if isinstance(config, EncoderDecoderConfig):
if type(config.decoder) is not type(config.encoder): # noqa: E721
logger.warn(
f"The encoder model config class: {config.encoder.__class__} is different from the decoder model "
f"config class: {config.decoder.__class}. It is not recommended to use the "
"`AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder "
"specific tokenizer classes."
)
config = config.encoder
if type(config) in TOKENIZER_MAPPING.keys():
tokenizer_class_py, tokenizer_class_fast = TOKENIZER_MAPPING[type(config)]
if tokenizer_class_fast and (use_fast or tokenizer_class_py is None):
return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
if tokenizer_class_py is not None:
return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
raise ValueError(
"This tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed "
"in order to use this tokenizer."
)
raise ValueError(
"Unrecognized configuration class {} to build an AutoTokenizer.\n"
"Model type should be one of {}.".format(
config.__class__, ", ".join(c.__name__ for c in TOKENIZER_MAPPING.keys())
)
)
|
the-stack_0_1763 | """
spyre.Tools.wfm_writer.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tool to write waveform in a .WFM format
Authors: Alexandre Bourassa, Kevin Miao
Date: 20/04/2016
"""
import struct
import numpy as _np
from datetime import datetime as _dt
import time as _t
import sys
def array_to_ieee_block(analog, marker1, marker2, prepend_length=True):
"""
Produces a little-endian 4-byte floating point + 1-byte marker representation of analog
:param analog: Array of numpy float32
:param marker1: Array of numpy int8
:param marker2: Array of numpy int8
:return: Byte Stream in the WFM format
"""
num_bytes = 5 * len(analog)
num_digit = len(str(num_bytes))
if not marker1.dtype==_np.int8: marker1 = _np.asarray(marker1, dtype=_np.int8)
if not marker2.dtype==_np.int8: marker2 = _np.asarray(marker2, dtype=_np.int8)
if not analog.dtype == _np.float32: analog = _np.asarray(analog, dtype=_np.float32)
points = _np.zeros(len(analog), dtype='<f4, i1')
points['f1'] = (marker1 + ((marker2)<<1))<<6
points['f0'] = analog
#Makes sure that the byteordering is 'little'
if not sys.byteorder == 'little': points = points.newbyteorder('<')
bin_all = points.tobytes()
if prepend_length: return bytes('#{:d}{:d}'.format(num_digit, num_bytes), encoding='ascii') + bin_all
else : return bin_all
def iee_block_to_array(block):
"""
Decodes an iee block into three arrays
:param block: iee formatted block
:return: analog, marker1, marker2
"""
block = block.rstrip()
#Check for a '#'
if block[0:1] != b'#': raise ValueError("Argument is not a iee formatted block")
#Check for that there is the correct number of bytes
num_digit = int(block[1:2])
num_bytes = int(block[2:2+num_digit])
block = block[2 + num_digit:]
if len(block) != num_bytes: raise ValueError("Argument is not a iee formatted block")
n_points = int(num_bytes/5)
array = struct.unpack('<'+'fB'*n_points, block)
analog = _np.array(array[::2])
marker = _np.array(array[1::2])
print(marker)
marker1 = _np.right_shift(_np.bitwise_and(marker, 64), 6)
marker2 = _np.right_shift(marker, 7)
return analog, marker1, marker2
class AWG_Record(object):
def __init__(self, name, data, data_type=None):
self.name = name
self.data = data
if data_type is None:
if type(data) == str: self.data_type = 'char'
elif type(data) == float: self.data_type = 'double'
elif type(data) == bytes : self.data_type = 'bytes'
else: self.data_type = 'short'
else:
if not data_type in ['char', 'double', 'long', 'short', 'bytes']: raise Exception("Invalid data type!")
self.data_type= data_type
def _get_format_str(self, data, type):
if type == 'char': return '%ds'%(len(data)+1)
elif type == 'double': return 'd'
elif type == 'long': return 'l'
elif type == 'short': return 'h'
elif type == 'bytes': return '%ds'%(len(data))
else: raise Exception("Invalid data type!")
def _get_length(self, data, type):
if type == 'char': return len(data) + 1
elif type == 'bytes': return len(data)
elif type == 'double': return 8
elif type == 'long': return 4
elif type == 'short': return 2
else :raise Exception("Invalid data type!")
def get_bytes(self):
fmt = '<ii'+self._get_format_str(self.name, 'char')+self._get_format_str(self.data, self.data_type)
name_l = self._get_length(self.name, 'char')
data_l = self._get_length(self.data, self.data_type)
if self.data_type == 'char': data = bytes(self.data, encoding='ascii')
else : data = self.data
return struct.pack(fmt, name_l, data_l, bytes(self.name, encoding='ascii'), data)
class AWG_File_Writer(object):
def __init__(self):
self.records = ([],[],[],[],[],[],[],)
self.add_record("MAGIC", 5000, 1)
self.add_record("VERSION", 1, 1)
self.wfm = list()
self.n_seq_lines = 0
def add_record(self, name, data, group, data_type=None):
group -= 1
if not group in range(7): raise Exception("Invalid group!")
self.records[group].append(AWG_Record(name, data, data_type=data_type))
def add_waveform(self, name, analog, marker1, marker2):
if len(self.wfm)>=32000: raise Exception("Maximum 32000 waveform in .AWG file...")
if len(analog)<250:
print("WARNING: The AWG will use the software sequencer because this waveform has less than 250 points")
data = array_to_ieee_block(analog, marker1, marker2, prepend_length=False)
t = _dt.now()
tm = [t.year, t.month, t.weekday(), t.day, t.hour, t.minute, t.second, t.microsecond // 1000]
self.wfm.append(name)
N = len(self.wfm)
self.add_record("WAVEFORM_NAME_{}".format(N), name, 5)
self.add_record("WAVEFORM_TYPE_{}".format(N), 2, 5)
self.add_record("WAVEFORM_LENGTH_{}".format(N), len(analog), 5, data_type='long')
self.add_record("WAVEFORM_TIMESTAMP_{}".format(N), struct.pack('<' + 'h' * 8, *tm), 5, data_type='bytes')
self.add_record("WAVEFORM_DATA_{}".format(N), data, 5)
def add_sequence_line(self, wfm=("", "", "", ""), use_sub_seq = False, sub_seq_name="",
repeat_count=0, wait_for_trigger=False, jump_target=0, goto_target=0):
if self.n_seq_lines >= 8000: raise Exception("Maximum 8000 lines for main sequence in .AWG file...")
N = self.n_seq_lines + 1
if not (len(wfm) == 4): raise Exception("There should be 4 entries in the wfm tuples")
if not 65536 >= repeat_count >= 0: raise Exception("Maximum of 65536 for repeat_count")
if not use_sub_seq and wfm[0] == wfm[1] == wfm[2] == wfm[3] == "": raise Exception("At least one channel must have non-empty wfm")
if use_sub_seq and sub_seq_name=="": raise Exception("sub_seq_name is empty")
self.add_record('SEQUENCE_WAIT_{}'.format(N), wait_for_trigger, 6)
self.add_record('SEQUENCE_LOOP_{}'.format(N), repeat_count, 6, data_type='long')
self.add_record('SEQUENCE_JUMP_{}'.format(N), jump_target, 6)
self.add_record('SEQUENCE_GOTO_{}'.format(N), goto_target, 6)
# Add the wfm / subseq
if use_sub_seq:
wfm = ("", "", "", "")
else:
sub_seq_name = ""
for i in range(len(wfm)):
if not use_sub_seq:
if wfm[i] != "": self.add_record("SEQUENCE_WAVEFORM_NAME_CH_{}_{}".format(i + 1, N), wfm[i], 6)
self.add_record("SEQUENCE_IS_SUBSEQ_{}".format(N), int(use_sub_seq), 6, data_type='long')
self.add_record("SEQUENCE_SUBSEQ_NAME_{}".format(N), sub_seq_name, 6)
self.n_seq_lines += 1
def add_subseq(self, name):
ss = Sub_Sequence(name)
self.records[6].append(ss)
return ss
def get_bytes(self):
ans = list()
for i in range(len(self.records)):
group_list = self.records[i]
if not i == 6:
ans.extend([entry.get_bytes() for entry in group_list])
else:
# Special treatement for subseq group
subseq_number, cummul_line = 1, 0
for ss in group_list:
if len(ss.lines) != 0:
ans += ss.get_bytes(subseq_number,cummul_line)
subseq_number += 1
cummul_line += len(ss.lines)
return b''.join(ans)
class Sub_Sequence(object):
def __init__(self, name):
self.name = name
self.lines = list()
def add_line(self, wfm=("", "", "", ""), repeat_count=1):
if not 65536 >= repeat_count >= 0: raise Exception("Maximum of 65536 for repeat_count")
if not (len(wfm) == 4): raise Exception("There should be 4 entries in the wfm tuples")
if wfm[0] == wfm[1] == wfm[2] == wfm[3] == "": raise Exception("At least one channel must have non-empty wfm")
self.lines.append([repeat_count, wfm])
# line = list()
# n = len(self.lines + 1)
# line.append(AWG_Record("SUBSEQ_LOOP_{}_{}_{}".format(n,self.o,n), , data_type=data_type))
def get_bytes(self, subseq_number, cummul_line):
ans = b''
u = cummul_line + 1
o = subseq_number
t = _dt.now()
tm = [t.year, t.month, t.weekday(), t.day, t.hour, t.minute, t.second, t.microsecond // 1000]
rec = [
AWG_Record("SUBSEQ_NAME_{}".format(o), self.name),
AWG_Record("SUBSEQ_TIMESTAMP_{}".format(o), struct.pack('<' + 'h' * 8, *tm), data_type='bytes'),
AWG_Record("SUBSEQ_LENGTH_{}".format(o), len(self.lines), data_type='long')
]
n = 1
for line in self.lines:
rec.append(AWG_Record("SUBSEQ_LOOP_{}_{}_{}".format(n,o,u), line[0], data_type='long'))
wfm = line[1]
for i in range(len(wfm)):
if wfm[i] != "":
rec.append(AWG_Record("SUBSEQ_WAVEFORM_NAME_CH_{}_{}_{}_{}".format(i + 1, n, o, u), wfm[i]))
n += 1
u += 1
for entry in rec:
ans += entry.get_bytes()
return ans
# -----------------------------------
# DEPRECATED
# -----------------------------------
def create_wfm(analog, marker1, marker2, clock=None):
"""
Generate the byte stream for a WFM file given 3 arrays (analog, marker1 and marker2)
:param analog: Array of float
:param marker1: Array of bool (or 1/0)
:param marker2: Array of bool (or 1/0)
:param clock: The clock speed that the waveform should be run at
:return: Byte Stream in the WFM format
"""
if not (len(analog) == len(marker1) == len(marker2)):
raise ValueError('Mismatched analog and marker lengths')
if max(analog) > 1.0 or min(analog) < -1.0:
raise ValueError('analog values out of range')
header = b'MAGIC 1000\r\n'
trailer = bytes('CLOCK {:1.10E}\r\n'.format(clock), encoding='ascii') if clock is not None else b''
body = array_to_iee_block(analog, marker1, marker2)
return b''.join((header, body, trailer))
class Sequence(object):
def __init__(self):
self.seq = []
def add_line(self, ch1_wfm="", ch2_wfm="", ch3_wfm="", ch4_wfm="", repeat_count=0, wait_for_trigger=False, logic_jump_target=0, finished_goto=0):
"""
This defines a new sequence line to be added to this SEQ file
:param ch1_wfm: wfm (or pat) file to be used for CH1 on this line.
:param ch2_wfm: wfm (or pat) file to be used for CH2 on this line.
:param ch3_wfm: wfm (or pat) file to be used for CH3 on this line.
:param ch4_wfm: wfm (or pat) file to be used for CH4 on this line.
:param repeat_count: Repeat count for the line. 0 is infinity
:param wait_for_trigger: Specify whether or not to wait for a trigger before running the wfm
:param logic_jump_target: Line number where to jump upon EVENT IN input or FORCE EVENT triggers.
0 is Off, -1 is next, and -2 is Table-jump
:param finished_goto: Line to go after current line. 0 is Next. Maximum 8000.
:return:
"""
wait_for_trigger = 1 if bool(wait_for_trigger) else 0
line = '"{}","{}","{}","{}",{},{},{},{},{}\r\n'.format(ch1_wfm, ch2_wfm, ch3_wfm, ch4_wfm,int(repeat_count),
wait_for_trigger, 0, int(logic_jump_target), finished_goto)
self.seq.append(line)
def verify_line(self, line):
line = line.strip()
args = line.split(",")
print(line)
print(args)
if len(args) != 9: raise Exception("The number of paramter in the line <{}> is incorrect".format(line))
if args[0]==args[1]==args[2]==args[3]=="": raise Exception("At least one channel must have non-empty wfm")
if not 0<=int(args[4])<=65536: raise Exception("Invalid repeat_counts (must be 0 for infinity or [1,65536])")
if not args[5] in ["0","1"]: raise Exception("wait_for_trigger must be 0 or 1")
if not args[6] == "0": raise Exception("goto_one is not implemented and therefore must be set to 0")
if not -2<=int(args[7])<=len(self.seq): raise Exception("Invalid logic_jump_target argument (must be in [-2, N] where N is the number of line in the sequence)")
if not 0<=int(args[8])<=len(self.seq): raise Exception("Invalid finnished_goto argument (must be in [0, N] where N is the number of line in the sequence)")
def get_str(self):
s = "MAGIC 3004A\r\nLINES {}".format(len(self.seq))
if len(self.seq)>8000: raise Exception("More than 8000 lines may not work...")
for line in self.seq:
self.verify_line(line)
s+= line
return s
def get_bytes(self):
return bytes(self.get_str() ,encoding='ascii')
|
the-stack_0_1765 | import random
top20 = open("../Dataset/Top20.tsv").read().split("\n")
docs = open("../Dataset/Documents.tsv").read().split("\n")
queries = open("../Dataset/queries.tsv").read().split("\n")
OUT = open("../Dataset/finetune_en.tsv",'w')
doc_list = {}
for doc in docs:
if doc != "":
doc_id = doc.split("\t")[0]
doc = doc.split("\t")[1]
doc_list[doc_id] = doc
doc_len = len(doc_list.keys())
query_list = {}
for query in queries:
if query != "":
query_id = query.split("\t")[0]
query_text = query.split("\t")[1]
query_list[query_id] = query_text
cnt = 0
print(doc_len)
for top20_ in top20:
if top20_ is not "":
cnt += 1
top20_ = top20_.split("\t")
query_id = top20_[0]
doc_id = top20_[1] ## positive
#### negative sampling ####
neg_id = top20_[1]
while str(neg_id) in top20_:
neg_id = random.randint(0,doc_len-1)
print("aaaaa")
#print("#########",doc_list[str(neg_id)])
OUT.write(query_list[query_id]+"\t"+doc_list[doc_id]+"\t"+doc_list[str(neg_id)]+"\n")
print(cnt)
|
the-stack_0_1767 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
@pytest.fixture(scope='session')
def cloud_config():
"""Provides a configuration object as a proxy to environment variables."""
return Namespace(
project=os.environ.get('GCLOUD_PROJECT'),
storage_bucket=os.environ.get('CLOUD_STORAGE_BUCKET'),
client_secrets=os.environ.get('GOOGLE_CLIENT_SECRETS'),
bigtable_cluster=os.environ.get('BIGTABLE_CLUSTER'),
bigtable_zone=os.environ.get('BIGTABLE_ZONE'))
def get_resource_path(resource, local_path):
local_resource_path = os.path.join(local_path, 'resources', *resource)
if os.path.exists(local_resource_path):
return local_resource_path
else:
raise EnvironmentError('Resource {} not found.'.format(
os.path.join(*resource)))
@pytest.fixture(scope='module')
def resource(request):
"""Provides a function that returns the full path to a local or global
testing resource"""
local_path = os.path.dirname(request.module.__file__)
return lambda *args: get_resource_path(args, local_path)
|
the-stack_0_1768 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import logging
import os.path
import re
import subprocess
import textwrap
from reno import create
from reno import scanner
from reno.tests import base
from reno import utils
import fixtures
import mock
from testtools.content import text_content
_SETUP_TEMPLATE = """
import setuptools
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
"""
_CFG_TEMPLATE = """
[metadata]
name = testpkg
summary = Test Package
[files]
packages =
testpkg
"""
class GPGKeyFixture(fixtures.Fixture):
"""Creates a GPG key for testing.
It's recommended that this be used in concert with a unique home
directory.
"""
def setUp(self):
super(GPGKeyFixture, self).setUp()
tempdir = self.useFixture(fixtures.TempDir())
gnupg_version_re = re.compile('^gpg\s.*\s([\d+])\.([\d+])\.([\d+])')
gnupg_version = utils.check_output(['gpg', '--version'],
cwd=tempdir.path)
for line in gnupg_version[0].split('\n'):
gnupg_version = gnupg_version_re.match(line)
if gnupg_version:
gnupg_version = (int(gnupg_version.group(1)),
int(gnupg_version.group(2)),
int(gnupg_version.group(3)))
break
else:
if gnupg_version is None:
gnupg_version = (0, 0, 0)
config_file = tempdir.path + '/key-config'
f = open(config_file, 'wt')
try:
if gnupg_version[0] == 2 and gnupg_version[1] >= 1:
f.write("""
%no-protection
%transient-key
""")
f.write("""
%no-ask-passphrase
Key-Type: RSA
Name-Real: Example Key
Name-Comment: N/A
Name-Email: [email protected]
Expire-Date: 2d
Preferences: (setpref)
%commit
""")
finally:
f.close()
# Note that --quick-random (--debug-quick-random in GnuPG 2.x)
# does not have a corresponding preferences file setting and
# must be passed explicitly on the command line instead
# if gnupg_version[0] == 1:
# gnupg_random = '--quick-random'
# elif gnupg_version[0] >= 2:
# gnupg_random = '--debug-quick-random'
# else:
# gnupg_random = ''
subprocess.check_call(
['gpg', '--gen-key', '--batch',
# gnupg_random,
config_file],
cwd=tempdir.path)
class Base(base.TestCase):
def _run_git(self, *args):
return utils.check_output(
['git'] + list(args),
cwd=self.reporoot,
)
def _git_setup(self):
os.makedirs(self.reporoot)
self._run_git('init', '.')
self._run_git('config', '--local', 'user.email', '[email protected]')
self._run_git('config', '--local', 'user.name', 'reno developer')
self._run_git('config', '--local', 'user.signingkey',
'[email protected]')
def _git_commit(self, message='commit message'):
self._run_git('add', '.')
self._run_git('commit', '-m', message)
def _add_other_file(self, name):
with open(os.path.join(self.reporoot, name), 'w') as f:
f.write('adding %s\n' % name)
self._git_commit('add %s' % name)
def _add_notes_file(self, slug='slug', commit=True, legacy=False):
n = self.get_note_num()
if legacy:
basename = '%016x-%s.yaml' % (n, slug)
else:
basename = '%s-%016x.yaml' % (slug, n)
filename = os.path.join(self.reporoot, 'releasenotes', 'notes',
basename)
create._make_note_file(filename)
self._git_commit('add %s' % basename)
return os.path.join('releasenotes', 'notes', basename)
def _make_python_package(self):
setup_name = os.path.join(self.reporoot, 'setup.py')
with open(setup_name, 'w') as f:
f.write(_SETUP_TEMPLATE)
cfg_name = os.path.join(self.reporoot, 'setup.cfg')
with open(cfg_name, 'w') as f:
f.write(_CFG_TEMPLATE)
pkgdir = os.path.join(self.reporoot, 'testpkg')
os.makedirs(pkgdir)
init = os.path.join(pkgdir, '__init__.py')
with open(init, 'w') as f:
f.write("Test package")
self._git_commit('add test package')
def setUp(self):
super(Base, self).setUp()
self.logger = self.useFixture(
fixtures.FakeLogger(
format='%(levelname)8s %(name)s %(message)s',
level=logging.DEBUG,
nuke_handlers=True,
)
)
# Older git does not have config --local, so create a temporary home
# directory to permit using git config --global without stepping on
# developer configuration.
self.useFixture(fixtures.TempHomeDir())
self.useFixture(GPGKeyFixture())
self.useFixture(fixtures.NestedTempfile())
self.temp_dir = self.useFixture(fixtures.TempDir()).path
self.reporoot = os.path.join(self.temp_dir, 'reporoot')
self.notesdir = os.path.join(self.reporoot,
'releasenotes',
'notes',
)
self._git_setup()
self._counter = itertools.count(1)
self.get_note_num = lambda: next(self._counter)
class BasicTest(Base):
def test_non_python_no_tags(self):
filename = self._add_notes_file()
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'0.0.0': [filename]},
results,
)
def test_python_no_tags(self):
self._make_python_package()
filename = self._add_notes_file()
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'0.0.0': [filename]},
results,
)
def test_note_before_tag(self):
filename = self._add_notes_file()
self._add_other_file('not-a-release-note.txt')
self._run_git('tag', '-s', '-m', 'first tag', '1.0.0')
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'1.0.0': [filename]},
results,
)
def test_note_commit_tagged(self):
filename = self._add_notes_file()
self._run_git('tag', '-s', '-m', 'first tag', '1.0.0')
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'1.0.0': [filename]},
results,
)
def test_note_commit_after_tag(self):
self._make_python_package()
self._run_git('tag', '-s', '-m', 'first tag', '1.0.0')
filename = self._add_notes_file()
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'1.0.0-1': [filename]},
results,
)
def test_other_commit_after_tag(self):
filename = self._add_notes_file()
self._add_other_file('ignore-1.txt')
self._run_git('tag', '-s', '-m', 'first tag', '1.0.0')
self._add_other_file('ignore-2.txt')
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'1.0.0': [filename]},
results,
)
def test_multiple_notes_after_tag(self):
self._make_python_package()
self._run_git('tag', '-s', '-m', 'first tag', '1.0.0')
f1 = self._add_notes_file()
f2 = self._add_notes_file()
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'1.0.0-2': [f1, f2]},
results,
)
def test_multiple_notes_within_tag(self):
self._make_python_package()
f1 = self._add_notes_file(commit=False)
f2 = self._add_notes_file()
self._run_git('tag', '-s', '-m', 'first tag', '1.0.0')
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'1.0.0': [f1, f2]},
results,
)
def test_multiple_tags(self):
self._make_python_package()
self._run_git('tag', '-s', '-m', 'first tag', '1.0.0')
f1 = self._add_notes_file()
self._run_git('tag', '-s', '-m', 'first tag', '2.0.0')
f2 = self._add_notes_file()
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'2.0.0': [f1],
'2.0.0-1': [f2],
},
results,
)
def test_rename_file(self):
self._make_python_package()
self._run_git('tag', '-s', '-m', 'first tag', '1.0.0')
f1 = self._add_notes_file('slug1')
self._run_git('tag', '-s', '-m', 'first tag', '2.0.0')
f2 = f1.replace('slug1', 'slug2')
self._run_git('mv', f1, f2)
self._git_commit('rename note file')
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'2.0.0': [f2],
},
results,
)
def test_rename_file_sort_earlier(self):
self._make_python_package()
self._run_git('tag', '-s', '-m', 'first tag', '1.0.0')
f1 = self._add_notes_file('slug1')
self._run_git('tag', '-s', '-m', 'first tag', '2.0.0')
f2 = f1.replace('slug1', 'slug0')
self._run_git('mv', f1, f2)
self._git_commit('rename note file')
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'2.0.0': [f2],
},
results,
)
def test_edit_file(self):
self._make_python_package()
self._run_git('tag', '-s', '-m', 'first tag', '1.0.0')
f1 = self._add_notes_file()
self._run_git('tag', '-s', '-m', 'first tag', '2.0.0')
with open(os.path.join(self.reporoot, f1), 'w') as f:
f.write('---\npreamble: new contents for file')
self._git_commit('edit note file')
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'2.0.0': [f1],
},
results,
)
def test_legacy_file(self):
self._make_python_package()
self._run_git('tag', '-s', '-m', 'first tag', '1.0.0')
f1 = self._add_notes_file('slug1', legacy=True)
self._run_git('tag', '-s', '-m', 'first tag', '2.0.0')
f2 = f1.replace('slug1', 'slug2')
self._run_git('mv', f1, f2)
self._git_commit('rename note file')
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'2.0.0': [f2],
},
results,
)
def test_rename_legacy_file_to_new(self):
self._make_python_package()
self._run_git('tag', '-s', '-m', 'first tag', '1.0.0')
f1 = self._add_notes_file('slug1', legacy=True)
self._run_git('tag', '-s', '-m', 'first tag', '2.0.0')
# Rename the file with the new convention of placing the UUID
# after the slug instead of before.
f2 = f1.replace('0000000000000001-slug1',
'slug1-0000000000000001')
self._run_git('mv', f1, f2)
self._git_commit('rename note file')
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'2.0.0': [f2],
},
results,
)
class MergeCommitTest(Base):
def test_1(self):
# Create changes on master and in the branch
# in order so the history is "normal"
n1 = self._add_notes_file()
self._run_git('tag', '-s', '-m', 'first tag', '1.0.0')
self._run_git('checkout', '-b', 'test_merge_commit')
n2 = self._add_notes_file()
self._run_git('checkout', 'master')
self._add_other_file('ignore-1.txt')
self._run_git('merge', '--no-ff', 'test_merge_commit')
self._add_other_file('ignore-2.txt')
self._run_git('tag', '-s', '-m', 'second tag', '2.0.0')
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'1.0.0': [n1],
'2.0.0': [n2]},
results,
)
self.assertEqual(
['2.0.0', '1.0.0'],
list(raw_results.keys()),
)
def test_2(self):
# Create changes on the branch before the tag into which it is
# actually merged.
self._add_other_file('ignore-0.txt')
self._run_git('checkout', '-b', 'test_merge_commit')
n1 = self._add_notes_file()
self._run_git('checkout', 'master')
n2 = self._add_notes_file()
self._run_git('tag', '-s', '-m', 'first tag', '1.0.0')
self._add_other_file('ignore-1.txt')
self._run_git('merge', '--no-ff', 'test_merge_commit')
self._add_other_file('ignore-2.txt')
self._run_git('tag', '-s', '-m', 'second tag', '2.0.0')
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'1.0.0': [n2],
'2.0.0': [n1]},
results,
)
self.assertEqual(
['2.0.0', '1.0.0'],
list(raw_results.keys()),
)
def test_3(self):
# Create changes on the branch before the tag into which it is
# actually merged, with another tag in between the time of the
# commit and the time of the merge. This should reflect the
# order of events described in bug #1522153.
self._add_other_file('ignore-0.txt')
self._run_git('checkout', '-b', 'test_merge_commit')
n1 = self._add_notes_file()
self._run_git('checkout', 'master')
n2 = self._add_notes_file()
self._run_git('tag', '-s', '-m', 'first tag', '1.0.0')
self._add_other_file('ignore-1.txt')
self._run_git('tag', '-s', '-m', 'second tag', '1.1.0')
self._run_git('merge', '--no-ff', 'test_merge_commit')
self._add_other_file('ignore-2.txt')
self._run_git('tag', '-s', '-m', 'third tag', '2.0.0')
self._add_other_file('ignore-3.txt')
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
# Since the 1.1.0 tag has no notes files, it does not appear
# in the output. It's only there to trigger the bug as it was
# originally reported.
self.assertEqual(
{'1.0.0': [n2],
'2.0.0': [n1]},
results,
)
self.assertEqual(
['2.0.0', '1.0.0'],
list(raw_results.keys()),
)
def test_4(self):
# Create changes on the branch before the tag into which it is
# actually merged, with another tag in between the time of the
# commit and the time of the merge. This should reflect the
# order of events described in bug #1522153.
self._add_other_file('ignore-0.txt')
self._run_git('checkout', '-b', 'test_merge_commit')
n1 = self._add_notes_file()
self._run_git('checkout', 'master')
n2 = self._add_notes_file()
self._run_git('tag', '-s', '-m', 'first tag', '1.0.0')
self._add_other_file('ignore-1.txt')
n3 = self._add_notes_file()
self._run_git('tag', '-s', '-m', 'second tag', '1.1.0')
self._run_git('merge', '--no-ff', 'test_merge_commit')
self._add_other_file('ignore-2.txt')
self._run_git('tag', '-s', '-m', 'third tag', '2.0.0')
self._add_other_file('ignore-3.txt')
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{'1.0.0': [n2],
'1.1.0': [n3],
'2.0.0': [n1]},
results,
)
self.assertEqual(
['2.0.0', '1.1.0', '1.0.0'],
list(raw_results.keys()),
)
class UniqueIdTest(Base):
def test_legacy(self):
uid = scanner._get_unique_id(
'releasenotes/notes/0000000000000001-slug1.yaml'
)
self.assertEqual('0000000000000001', uid)
def test_modern(self):
uid = scanner._get_unique_id(
'releasenotes/notes/slug1-0000000000000001.yaml'
)
self.assertEqual('0000000000000001', uid)
class BranchTest(Base):
def setUp(self):
super(BranchTest, self).setUp()
self._make_python_package()
self.f1 = self._add_notes_file('slug1')
self._run_git('tag', '-s', '-m', 'first tag', '1.0.0')
self.f2 = self._add_notes_file('slug2')
self._run_git('tag', '-s', '-m', 'first tag', '2.0.0')
self._add_notes_file('slug3')
self._run_git('tag', '-s', '-m', 'first tag', '3.0.0')
def test_files_current_branch(self):
self._run_git('checkout', '2.0.0')
self._run_git('checkout', '-b', 'stable/2')
f21 = self._add_notes_file('slug21')
log_text = self._run_git('log')
self.addDetail('git log', text_content(log_text))
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{
'1.0.0': [self.f1],
'2.0.0': [self.f2],
'2.0.0-1': [f21],
},
results,
)
def test_files_stable_from_master(self):
self._run_git('checkout', '2.0.0')
self._run_git('checkout', '-b', 'stable/2')
f21 = self._add_notes_file('slug21')
self._run_git('checkout', 'master')
log_text = self._run_git('log', '--pretty=%x00%H %d', '--name-only',
'stable/2')
self.addDetail('git log', text_content(log_text))
raw_results = scanner.get_notes_by_version(
self.reporoot,
'releasenotes/notes',
'stable/2',
)
results = {
k: [f for (f, n) in v]
for (k, v) in raw_results.items()
}
self.assertEqual(
{
'1.0.0': [self.f1],
'2.0.0': [self.f2],
'2.0.0-1': [f21],
},
results,
)
class GetTagsParseTest(base.TestCase):
EXPECTED = [
'2.0.0',
'1.8.1',
'1.8.0',
'1.7.1',
'1.7.0',
'1.6.0',
'1.5.0',
'1.4.0',
'1.3.0',
'1.2.0',
'1.1.0',
'1.0.0',
'0.11.2',
'0.11.1',
'0.11.0',
'0.10.1',
'0.10.0',
'0.9.0',
'0.8.0',
'0.7.1',
'0.7.0',
'0.6.0',
'0.5.1',
'0.5.0',
'0.4.2',
'0.4.1',
'0.4.0',
'0.3.2',
'0.3.1',
'0.3.0',
'0.2.5',
'0.2.4',
'0.2.3',
'0.2.2',
'0.2.1',
'0.2.0',
'0.1.3',
'0.1.2',
'0.1.1',
'0.1.0',
]
def test_keystoneclient_ubuntu_1_9_1(self):
# git 1.9.1 as it produces output on ubuntu for python-keystoneclient
# git log --simplify-by-decoration --pretty="%d"
tag_list_output = textwrap.dedent("""
(HEAD, origin/master, origin/HEAD, gerrit/master, master)
(apu/master)
(tag: 2.0.0)
(tag: 1.8.1)
(tag: 1.8.0)
(tag: 1.7.1)
(tag: 1.7.0)
(tag: 1.6.0)
(tag: 1.5.0)
(tag: 1.4.0)
(uncap-requirements)
(tag: 1.3.0)
(tag: 1.2.0)
(tag: 1.1.0)
(tag: 1.0.0)
(tag: 0.11.2)
(tag: 0.11.1)
(tag: 0.11.0)
(tag: 0.10.1)
(tag: 0.10.0)
(tag: 0.9.0)
(tag: 0.8.0)
(tag: 0.7.1)
(tag: 0.7.0)
(tag: 0.6.0)
(tag: 0.5.1)
(tag: 0.5.0)
(tag: 0.4.2)
(tag: 0.4.1)
(tag: 0.4.0)
(tag: 0.3.2)
(tag: 0.3.1)
(tag: 0.3.0)
(tag: 0.2.5)
(tag: 0.2.4)
(tag: 0.2.3)
(tag: 0.2.2)
(tag: 0.2.1)
(tag: 0.2.0)
(origin/feature/keystone-v3, gerrit/feature/keystone-v3)
(tag: 0.1.3)
(tag: 0.1.2)
(tag: 0.1.1)
(tag: 0.1.0)
(tag: folsom-1)
(tag: essex-rc1)
(tag: essex-4)
(tag: essex-3)
""")
with mock.patch('reno.utils.check_output') as co:
co.return_value = tag_list_output
actual = scanner._get_version_tags_on_branch('reporoot',
branch=None)
self.assertEqual(self.EXPECTED, actual)
def test_keystoneclient_rhel_1_7_1(self):
# git 1.7.1 as it produces output on RHEL 6 for python-keystoneclient
# git log --simplify-by-decoration --pretty="%d"
tag_list_output = textwrap.dedent("""
(HEAD, origin/master, origin/HEAD, master)
(tag: 2.0.0)
(tag: 1.8.1)
(tag: 1.8.0)
(tag: 1.7.1)
(tag: 1.7.0)
(tag: 1.6.0)
(tag: 1.5.0)
(tag: 1.4.0)
(tag: 1.3.0)
(tag: 1.2.0)
(tag: 1.1.0)
(tag: 1.0.0)
(tag: 0.11.2)
(tag: 0.11.1)
(tag: 0.11.0)
(tag: 0.10.1)
(tag: 0.10.0)
(tag: 0.9.0)
(tag: 0.8.0)
(tag: 0.7.1)
(tag: 0.7.0)
(tag: 0.6.0)
(tag: 0.5.1)
(tag: 0.5.0)
(tag: 0.4.2)
(tag: 0.4.1)
(tag: 0.4.0)
(tag: 0.3.2)
(tag: 0.3.1)
(tag: 0.3.0)
(tag: 0.2.5)
(tag: 0.2.4)
(tag: 0.2.3)
(tag: 0.2.2)
(tag: 0.2.1)
(tag: 0.2.0)
(tag: 0.1.3)
(0.1.2)
(tag: 0.1.1)
(0.1.0)
(tag: folsom-1)
(tag: essex-rc1)
(essex-4)
(essex-3)
""")
with mock.patch('reno.utils.check_output') as co:
co.return_value = tag_list_output
actual = scanner._get_version_tags_on_branch('reporoot',
branch=None)
self.assertEqual(self.EXPECTED, actual)
|
the-stack_0_1770 | # Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from test_tools.generators import scalar_gen
import cunumeric as num
def test():
test_values = [1, np.e, np.e ** 2, 0]
for a in test_values:
for (la, na) in zip(scalar_gen(num, a), scalar_gen(np, a)):
assert np.array_equal(num.exp2(la), np.exp2(na))
if __name__ == "__main__":
test()
|
the-stack_0_1772 | # encoding: utf-8
import datetime
__all__ = [
'info',
]
def info():
return {
'birthday': datetime.date(1992, 11, 15),
'class': 1,
'family_name_en': u'minegishi',
'family_name_kana': u'みねぎし',
'first_name_en': u'minami',
'first_name_kana': u'みなみ',
'graduate_date': None,
'hometown': u'東京',
'name_en': u'Minegishi Minami',
'name_ja': u'峯岸みなみ',
'name_kana': u'みねぎし みなみ',
'nick': u'みいちゃん',
'team': u'K',
}
|
the-stack_0_1775 | import os
from sandbox.rocky.tf.baselines.nn_baseline import NNBaseline
from sandbox.rocky.tf.core.network import ConvNetwork
from sandbox.rocky.tf.policies.gaussian_conv_feature_policy import GaussianConvFeaturePolicy
from sandbox.rocky.tf.policies.gaussian_conv_policy import GaussianConvPolicy
from rllab.baselines.zero_baseline import ZeroBaseline
from rllab.envs.normalized_env import normalize
from sandbox.rocky.tf.samplers.batch_sampler import BatchSampler
from sandbox.rocky.tf.algos.trpo import TRPO
from rllab.misc.instrument import stub, run_experiment_lite
from sandbox.rocky.tf.envs.base import TfEnv
from rllab.envs.gym_env import GymEnv
from railrl.algos.icm_trpo_tf import ICM
import itertools
import tensorflow as tf
stub(globals())
# Params range
seeds = range(0, 5)
for seed in seeds:
env = TfEnv(normalize(env=GymEnv('Box3dReachPixel-v11',record_video=False, \
log_dir='/tmp/gym_test',record_log=False)))
env_spec = env.spec
policy_cnn = ConvNetwork(
name="policy_conv_network",
input_shape=env_spec.observation_space.shape,
output_dim=env_spec.action_space.flat_dim,
conv_filters=(64, 64, 64, 32),
conv_filter_sizes=((5,5),(3,3),(3,3),(3,3)),
conv_strides=(3, 3, 3, 2),
conv_pads=('SAME', 'SAME', 'SAME', 'SAME'),
hidden_sizes=(256,),
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=None,
)
baseline_cnn = ConvNetwork(
name="baseline_conv_network",
input_shape=env_spec.observation_space.shape,
output_dim=env_spec.action_space.flat_dim,
conv_filters=(64, 64, 64, 32),
conv_filter_sizes=((5,5),(3,3),(3,3),(3,3)),
conv_strides=(3, 3, 3, 2),
conv_pads=('SAME', 'SAME', 'SAME', 'SAME'),
hidden_sizes=(256,),
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=None,
)
policy = GaussianConvFeaturePolicy(
"conv_feature_policy",
env_spec=env_spec,
feature_network=policy_cnn,
hidden_sizes=(128,64),
clip_action=False,
)
baseline = NNBaseline(
env_spec=env_spec,
feature_network=baseline_cnn,
hidden_sizes=(128,64),
hidden_nonlinearity=tf.nn.relu,
init_lr=0.001,
n_itr=5,
train_feature_network=True,
)
batch_size = 9600
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=batch_size,
whole_paths=True,
max_path_length=1000,
n_itr=2000,
step_size=0.01,
subsample_factor=0.2,
sampler_cls=BatchSampler,
optimizer_args={
'num_slices' : 4,
}
)
icm = ICM(
env,
algo,
'/z/dianchen/box3d/trpo_box3d_pixel_v11_tf_icm_pretrained_cnn_norew_fw0.01_%d'%seed,
forward_weight=0.01,
external_reward_weight=0.0,
init_learning_rate=1e-4,
forward_cos=True,
replay_pool_size=100000,
n_updates_per_iter=1000,
normalize_input=True,
obs_dtype='uint8',
pretrained_icm=True,
pretrained_icm_path='/z/dianchen/tfmodel_box3d/icm_supervised_box3dpixel_v11_box_dense_2e3_fw_0.01_lr_5e-4.pkl',
)
run_experiment_lite(
icm.train(),
exp_prefix='trpo_box3d_pixel_v11_tf_icm_pretrained_cnn_norew_fw0.01',
n_parallel=12,
snapshot_mode="gap",
snapshot_gap=200,
seed=seed,
mode="local"
)
|
the-stack_0_1776 | from flask import Blueprint, render_template, request
from geopy.geocoders import Nominatim
from geopy import distance
import pandas as pd
from shapely.geometry import Polygon
from shapely.geometry import Point
import logging
# Constants
MKAD_LOCATION = (55.8277252, 37.6387268)
# Nominatim geolocation service
geolocator = Nominatim(user_agent="myapp.py")
# Creating the Blueprint
distance_bp = Blueprint('distance_bp', __name__)
# Configure the logging
logging.basicConfig(filename = 'result.log', level=logging.DEBUG)
# Functions
def create_polygon():
"""Return a polygon created with the set of coordinates that delimit the
MKAD area.
"""
df = pd.read_csv(r'mkad_coordinates.csv')
df = df.rename(columns = {'37.842762': 'lat', "55.774558" : 'long'})
polygon_matrix = [[df['long'][ind], df['lat'][ind]] for ind in df.index]
return Polygon(polygon_matrix)
def get_distance(lat, long):
"""If the point related to lat and long coordinates is inside MKAD
area, distance is not calculated else, it assigns the geodesic distance.
"""
if(create_polygon().contains(Point(lat, long))):
dist = "Address is inside of MKAD"
logging.debug("distance: {}".format(dist))
else:
# Use geopy to get the geodesic distance
dist = distance.distance(MKAD_LOCATION, (lat, long))
logging.debug("distance: {}".format(dist))
return dist
def get_location_parameters(loc):
"""If the address is found with geopy, it obtains the lat, long, address and
distance. Otherwise it assigns null values to the previous
mentioned variables.
"""
location = geolocator.geocode(loc)
if(location is not None): # If the address exists in database
latitude = location.latitude
longitude = location.longitude
formatted_address = location.address
dist = get_distance(latitude, longitude)
else:
latitude, longitude, formatted_address, dist = null_address()
return latitude, longitude, formatted_address, dist
def no_input():
"""Fills variables for null address."""
return "Null input", "Null input", "Null input","Null input"
def null_address():
"""Fills variables for an unknown address."""
return "Null address", "Null address", "Null address", "Null address"
@distance_bp.route('/result',methods = ['POST', 'GET'])
def result():
"""The blueprint finds the distance from Moscow Ring Road to an addressed
passed to the application in an HTTP request. If the address is located
inside the MKAD, the distance is stated this way; if the geolocator is
unable to find the address the results are stated as null address; and if
the input address is null, the results are stated as null input.
"""
if request.method == 'POST':
location = request.form["location"]
if(location.strip()): # If location is not null
(latitude,
longitude,
formatted_address,
dist) = get_location_parameters(location)
else:
(latitude,
longitude,
formatted_address,
dist) = no_input()
# Write address and distance into .log file
logging.debug("{} distance to Moscow Ring Road: {}".format(formatted_address,
dist))
return render_template("result.html",
result = formatted_address,
Latitude=latitude,
longitude=longitude,
distance=dist)
|
the-stack_0_1778 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# system, numpy
import os
import time
import numpy as np
# pytorch, torch vision
import torch
import torch.optim as optim
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torch.utils.data.sampler import WeightedRandomSampler
# user defined
import utils
from models import SEM_PCYC
from logger import Logger, AverageMeter
from options import Options
from test import validate
from data import DataGeneratorPaired, DataGeneratorSketch, DataGeneratorImage
np.random.seed(0)
def main():
gc.collect()
torch.cuda.empty_cache()
# Parse options
args = Options().parse()
print('Parameters:\t' + str(args))
if args.filter_sketch:
assert args.dataset == 'Sketchy'
if args.split_eccv_2018:
assert args.dataset == 'Sketchy_extended' or args.dataset == 'Sketchy'
if args.gzs_sbir:
args.test = True
# Read the config file and
config = utils.read_config()
path_dataset = config['path_dataset']
path_aux = config['path_aux']
# modify the log and check point paths
ds_var = None
if '_' in args.dataset:
token = args.dataset.split('_')
print('-----token-----')
print(token)
args.dataset = token[0]
ds_var = token[1]
str_aux = ''
if args.split_eccv_2018:
str_aux = 'split_eccv_2018'
if args.gzs_sbir:
str_aux = os.path.join(str_aux, 'generalized')
args.semantic_models = sorted(args.semantic_models)
model_name = '+'.join(args.semantic_models)
root_path = os.path.join(path_dataset, args.dataset)
path_sketch_model = os.path.join(path_aux, 'CheckPoints', args.dataset, 'sketch')
print('-----path_sketch_model-----')
print(path_sketch_model)
path_image_model = os.path.join(path_aux, 'CheckPoints', args.dataset, 'image')
print('-----path_image_model-----')
print(path_image_model)
path_cp = os.path.join(path_aux, 'CheckPoints', args.dataset, str_aux, model_name, str(args.dim_out))
print('-----path_cp-----')
print(path_cp)
path_log = os.path.join(path_aux, 'LogFiles', args.dataset, str_aux, model_name, str(args.dim_out))
path_results = os.path.join(path_aux, 'Results', args.dataset, str_aux, model_name, str(args.dim_out))
files_semantic_labels = []
sem_dim = 0
for f in args.semantic_models:
fi = os.path.join(path_aux, 'Semantic', args.dataset, f + '.npy')
files_semantic_labels.append(fi)
sem_dim += list(np.load(fi, allow_pickle=True).item().values())[0].shape[0]
print('Checkpoint path: {}'.format(path_cp))
print('Logger path: {}'.format(path_log))
print('Result path: {}'.format(path_results))
# Parameters for transforming the images
transform_image = transforms.Compose([transforms.Resize((args.im_sz, args.im_sz)), transforms.ToTensor()])
transform_sketch = transforms.Compose([transforms.Resize((args.sk_sz, args.sk_sz)), transforms.ToTensor()])
# Load the dataset
print('Loading data...', end='')
if args.dataset == 'Sketchy':
if ds_var == 'extended':
photo_dir = 'extended_photo' # photo or extended_photo
photo_sd = ''
else:
photo_dir = 'photo'
photo_sd = 'tx_000000000000'
sketch_dir = 'sketch'
sketch_sd = 'tx_000000000000'
splits = utils.load_files_sketchy_zeroshot(root_path=root_path, split_eccv_2018=args.split_eccv_2018,
photo_dir=photo_dir, sketch_dir=sketch_dir, photo_sd=photo_sd,
sketch_sd=sketch_sd)
elif args.dataset == 'TU-Berlin':
photo_dir = 'images'
sketch_dir = 'sketches'
photo_sd = ''
sketch_sd = ''
splits = utils.load_files_tuberlin_zeroshot(root_path=root_path, photo_dir=photo_dir, sketch_dir=sketch_dir,
photo_sd=photo_sd, sketch_sd=sketch_sd)
elif args.dataset == 'intersection':
photo_dir = 'images'
sketch_dir = 'sketches'
photo_sd = ''
sketch_sd = ''
splits = utils.load_files_tuberlin_zeroshot(root_path=root_path, photo_dir=photo_dir, sketch_dir=sketch_dir,
photo_sd=photo_sd, sketch_sd=sketch_sd)
else:
raise Exception('Wrong dataset.')
# Combine the valid and test set into test set
splits['te_fls_sk'] = np.concatenate((splits['va_fls_sk'], splits['te_fls_sk']), axis=0)
splits['te_clss_sk'] = np.concatenate((splits['va_clss_sk'], splits['te_clss_sk']), axis=0)
splits['te_fls_im'] = np.concatenate((splits['va_fls_im'], splits['te_fls_im']), axis=0)
splits['te_clss_im'] = np.concatenate((splits['va_clss_im'], splits['te_clss_im']), axis=0)
print('--------args.gzs_sbir-----------')
print(args.gzs_sbir)
if args.gzs_sbir:
perc = 0.2
_, idx_sk = np.unique(splits['tr_fls_sk'], return_index=True)
tr_fls_sk_ = splits['tr_fls_sk'][idx_sk]
tr_clss_sk_ = splits['tr_clss_sk'][idx_sk]
_, idx_im = np.unique(splits['tr_fls_im'], return_index=True)
tr_fls_im_ = splits['tr_fls_im'][idx_im]
tr_clss_im_ = splits['tr_clss_im'][idx_im]
if args.dataset == 'Sketchy' and args.filter_sketch:
_, idx_sk = np.unique([f.split('-')[0] for f in tr_fls_sk_], return_index=True)
tr_fls_sk_ = tr_fls_sk_[idx_sk]
tr_clss_sk_ = tr_clss_sk_[idx_sk]
idx_sk = np.sort(np.random.choice(tr_fls_sk_.shape[0], int(perc * splits['te_fls_sk'].shape[0]), replace=False))
idx_im = np.sort(np.random.choice(tr_fls_im_.shape[0], int(perc * splits['te_fls_im'].shape[0]), replace=False))
splits['te_fls_sk'] = np.concatenate((tr_fls_sk_[idx_sk], splits['te_fls_sk']), axis=0)
splits['te_clss_sk'] = np.concatenate((tr_clss_sk_[idx_sk], splits['te_clss_sk']), axis=0)
splits['te_fls_im'] = np.concatenate((tr_fls_im_[idx_im], splits['te_fls_im']), axis=0)
splits['te_clss_im'] = np.concatenate((tr_clss_im_[idx_im], splits['te_clss_im']), axis=0)
# class dictionary
dict_clss = utils.create_dict_texts(splits['tr_clss_im'])
data_train = DataGeneratorPaired(args.dataset, root_path, photo_dir, sketch_dir, photo_sd, sketch_sd,
splits['tr_fls_sk'], splits['tr_fls_im'], splits['tr_clss_im'],
transforms_sketch=transform_sketch, transforms_image=transform_image)
data_valid_sketch = DataGeneratorSketch(args.dataset, root_path, sketch_dir, sketch_sd, splits['va_fls_sk'],
splits['va_clss_sk'], transforms=transform_sketch)
data_valid_image = DataGeneratorImage(args.dataset, root_path, photo_dir, photo_sd, splits['va_fls_im'],
splits['va_clss_im'], transforms=transform_image)
data_test_sketch = DataGeneratorSketch(args.dataset, root_path, sketch_dir, sketch_sd, splits['te_fls_sk'],
splits['te_clss_sk'], transforms=transform_sketch)
data_test_image = DataGeneratorImage(args.dataset, root_path, photo_dir, photo_sd, splits['te_fls_im'],
splits['te_clss_im'], transforms=transform_image)
print('Done')
train_sampler = WeightedRandomSampler(data_train.get_weights(), num_samples=args.epoch_size * args.batch_size,
replacement=True)
# PyTorch train loader
train_loader = DataLoader(dataset=data_train, batch_size=args.batch_size, sampler=train_sampler,
num_workers=args.num_workers, pin_memory=True)
# PyTorch valid loader for sketch
valid_loader_sketch = DataLoader(dataset=data_valid_sketch, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
# PyTorch valid loader for image
valid_loader_image = DataLoader(dataset=data_valid_image, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
# PyTorch test loader for sketch
test_loader_sketch = DataLoader(dataset=data_test_sketch, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
# PyTorch test loader for image
test_loader_image = DataLoader(dataset=data_test_image, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
# Model parameters
params_model = dict()
# Paths to pre-trained sketch and image models
params_model['path_sketch_model'] = path_sketch_model
params_model['path_image_model'] = path_image_model
# Dimensions
params_model['dim_out'] = args.dim_out
params_model['sem_dim'] = sem_dim
# Number of classes
params_model['num_clss'] = len(dict_clss)
# Weight (on losses) parameters
params_model['lambda_se'] = args.lambda_se
params_model['lambda_im'] = args.lambda_im
params_model['lambda_sk'] = args.lambda_sk
params_model['lambda_gen_cyc'] = args.lambda_gen_cyc
params_model['lambda_gen_adv'] = args.lambda_gen_adv
params_model['lambda_gen_cls'] = args.lambda_gen_cls
params_model['lambda_gen_reg'] = args.lambda_gen_reg
params_model['lambda_disc_se'] = args.lambda_disc_se
params_model['lambda_disc_sk'] = args.lambda_disc_sk
params_model['lambda_disc_im'] = args.lambda_disc_im
params_model['lambda_regular'] = args.lambda_regular
# Optimizers' parameters
params_model['lr'] = args.lr
params_model['momentum'] = args.momentum
params_model['milestones'] = args.milestones
params_model['gamma'] = args.gamma
# Files with semantic labels
params_model['files_semantic_labels'] = files_semantic_labels
# Class dictionary
params_model['dict_clss'] = dict_clss
print('-------------------model------------------------')
print(params_model)
# Model
sem_pcyc_model = SEM_PCYC(params_model)
cudnn.benchmark = True
# Logger
print('Setting logger...', end='')
logger = Logger(path_log, force=True)
print('Done')
# Check cuda
print('Checking cuda...', end='')
# Check if CUDA is enabled
if args.ngpu > 0 & torch.cuda.is_available():
print('*Cuda exists*...', end='')
sem_pcyc_model = sem_pcyc_model.cuda()
print('Done')
best_map = 0
early_stop_counter = 0
# Epoch for loop
if not args.test:
print('***Train***')
for epoch in range(args.epochs):
sem_pcyc_model.scheduler_gen.step()
sem_pcyc_model.scheduler_disc.step()
sem_pcyc_model.scheduler_ae.step()
# train on training set
losses = train(train_loader, sem_pcyc_model, epoch, args)
# evaluate on validation set, map_ since map is already there
print('***Validation***')
valid_data = validate(valid_loader_sketch, valid_loader_image, sem_pcyc_model, epoch, args)
map_ = np.mean(valid_data['aps@all'])
print('mAP@all on validation set after {0} epochs: {1:.4f} (real), {2:.4f} (binary)'
.format(epoch + 1, map_, np.mean(valid_data['aps@all_bin'])))
del valid_data
if map_ > best_map:
best_map = map_
early_stop_counter = 0
utils.save_checkpoint({'epoch': epoch + 1, 'state_dict': sem_pcyc_model.state_dict(), 'best_map':
best_map}, directory=path_cp)
# utils.save_checkpoint({'epoch': epoch + 1, 'state_dict': sem_pcyc_model, 'best_map':
# best_map, 'test':'tooning'}, directory=path_cp)
# utils.save_checkpoint(sem_pcyc_model, directory=path_cp)
else:
if args.early_stop == early_stop_counter:
break
early_stop_counter += 1
# Logger step
logger.add_scalar('semantic autoencoder loss', losses['aut_enc'].avg)
logger.add_scalar('generator adversarial loss', losses['gen_adv'].avg)
logger.add_scalar('generator cycle consistency loss', losses['gen_cyc'].avg)
logger.add_scalar('generator classification loss', losses['gen_cls'].avg)
logger.add_scalar('generator regression loss', losses['gen_reg'].avg)
logger.add_scalar('generator loss', losses['gen'].avg)
logger.add_scalar('semantic discriminator loss', losses['disc_se'].avg)
logger.add_scalar('sketch discriminator loss', losses['disc_sk'].avg)
logger.add_scalar('image discriminator loss', losses['disc_im'].avg)
logger.add_scalar('discriminator loss', losses['disc'].avg)
logger.add_scalar('mean average precision', map_)
logger.step()
# load the best model yet
best_model_file = os.path.join(path_cp, 'model_best.pth')
if os.path.isfile(best_model_file):
print("Loading best model from '{}'".format(best_model_file))
checkpoint = torch.load(best_model_file)
epoch = checkpoint['epoch']
best_map = checkpoint['best_map']
sem_pcyc_model.load_state_dict(checkpoint['state_dict'])
print("Loaded best model '{0}' (epoch {1}; mAP@all {2:.4f})".format(best_model_file, epoch, best_map))
print('***Test***')
valid_data = validate(test_loader_sketch, test_loader_image, sem_pcyc_model, epoch, args)
print('Results on test set: mAP@all = {1:.4f}, Prec@100 = {0:.4f}, mAP@200 = {3:.4f}, Prec@200 = {2:.4f}, '
'Time = {4:.6f} || mAP@all (binary) = {6:.4f}, Prec@100 (binary) = {5:.4f}, mAP@200 (binary) = {8:.4f}, '
'Prec@200 (binary) = {7:.4f}, Time (binary) = {9:.6f} '
.format(valid_data['prec@100'], np.mean(valid_data['aps@all']), valid_data['prec@200'],
np.mean(valid_data['aps@200']), valid_data['time_euc'], valid_data['prec@100_bin'],
np.mean(valid_data['aps@all_bin']), valid_data['prec@200_bin'], np.mean(valid_data['aps@200_bin'])
, valid_data['time_bin']))
print('Saving qualitative results...', end='')
path_qualitative_results = os.path.join(path_results, 'qualitative_results')
utils.save_qualitative_results(root_path, sketch_dir, sketch_sd, photo_dir, photo_sd, splits['te_fls_sk'],
splits['te_fls_im'], path_qualitative_results, valid_data['aps@all'],
valid_data['sim_euc'], valid_data['str_sim'], save_image=args.save_image_results,
nq=args.number_qualit_results, best=args.save_best_results)
print('Done')
else:
print("No best model found at '{}'. Exiting...".format(best_model_file))
exit()
def train(train_loader, sem_pcyc_model, epoch, args):
# Switch to train mode
sem_pcyc_model.train()
batch_time = AverageMeter()
losses_gen_adv = AverageMeter()
losses_gen_cyc = AverageMeter()
losses_gen_cls = AverageMeter()
losses_gen_reg = AverageMeter()
losses_gen = AverageMeter()
losses_disc_se = AverageMeter()
losses_disc_sk = AverageMeter()
losses_disc_im = AverageMeter()
losses_disc = AverageMeter()
losses_aut_enc = AverageMeter()
# Start counting time
time_start = time.time()
for i, (sk, im, cl) in enumerate(train_loader):
# Transfer sk and im to cuda
if torch.cuda.is_available():
sk, im = sk.cuda(), im.cuda()
# Optimize parameters
loss = sem_pcyc_model.module.optimize_params(sk, im, cl)
# Store losses for visualization
losses_aut_enc.update(loss['aut_enc'].item(), sk.size(0))
losses_gen_adv.update(loss['gen_adv'].item(), sk.size(0))
losses_gen_cyc.update(loss['gen_cyc'].item(), sk.size(0))
losses_gen_cls.update(loss['gen_cls'].item(), sk.size(0))
losses_gen_reg.update(loss['gen_reg'].item(), sk.size(0))
losses_gen.update(loss['gen'].item(), sk.size(0))
losses_disc_se.update(loss['disc_se'].item(), sk.size(0))
losses_disc_sk.update(loss['disc_sk'].item(), sk.size(0))
losses_disc_im.update(loss['disc_im'].item(), sk.size(0))
losses_disc.update(loss['disc'].item(), sk.size(0))
# time
time_end = time.time()
batch_time.update(time_end - time_start)
time_start = time_end
if (i + 1) % args.log_interval == 0:
print('[Train] Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Gen. Loss {loss_gen.val:.4f} ({loss_gen.avg:.4f})\t'
'Disc. Loss {loss_disc.val:.4f} ({loss_disc.avg:.4f})\t'
.format(epoch + 1, i + 1, len(train_loader), batch_time=batch_time, loss_gen=losses_gen,
loss_disc=losses_disc))
losses = {'aut_enc': losses_aut_enc, 'gen_adv': losses_gen_adv, 'gen_cyc': losses_gen_cyc, 'gen_cls':
losses_gen_cls, 'gen_reg': losses_gen_reg, 'gen': losses_gen, 'disc_se': losses_disc_se, 'disc_sk':
losses_disc_sk, 'disc_im': losses_disc_im, 'disc': losses_disc}
return losses
if __name__ == '__main__':
main() |
the-stack_0_1779 | # -*- coding: utf-8 -*-
'''
unit.loader
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test Salt's loader
'''
# Import Python libs
from __future__ import absolute_import
import inspect
import tempfile
import shutil
import os
import collections
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
import tests.integration
# Import Salt libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
from salt.ext.six.moves import range
from salt.config import minion_config
# pylint: enable=no-name-in-module,redefined-builtin
from salt.loader import LazyLoader, _module_dirs, grains
class LazyLoaderVirtualEnabledTest(TestCase):
'''
Test the base loader of salt.
'''
def setUp(self):
self.opts = minion_config(None)
self.opts['disable_modules'] = ['pillar']
self.opts['grains'] = grains(self.opts)
self.loader = LazyLoader(_module_dirs(self.opts, 'modules', 'module'),
self.opts,
tag='module')
def test_basic(self):
'''
Ensure that it only loads stuff when needed
'''
# make sure it starts empty
self.assertEqual(self.loader._dict, {})
# get something, and make sure its a func
self.assertTrue(inspect.isfunction(self.loader['test.ping']))
# make sure we only loaded "test" functions
for key, val in six.iteritems(self.loader._dict):
self.assertEqual(key.split('.', 1)[0], 'test')
# make sure the depends thing worked (double check of the depends testing,
# since the loader does the calling magically
self.assertFalse('test.missing_func' in self.loader._dict)
def test_badkey(self):
with self.assertRaises(KeyError):
self.loader[None] # pylint: disable=W0104
with self.assertRaises(KeyError):
self.loader[1] # pylint: disable=W0104
def test_disable(self):
self.assertNotIn('pillar.items', self.loader)
def test_len_load(self):
'''
Since LazyLoader is a MutableMapping, if someone asks for len() we have
to load all
'''
self.assertEqual(self.loader._dict, {})
len(self.loader) # force a load all
self.assertNotEqual(self.loader._dict, {})
def test_iter_load(self):
'''
Since LazyLoader is a MutableMapping, if someone asks to iterate we have
to load all
'''
self.assertEqual(self.loader._dict, {})
# force a load all
for key, func in six.iteritems(self.loader):
break
self.assertNotEqual(self.loader._dict, {})
def test_context(self):
'''
Make sure context is shared across modules
'''
# make sure it starts empty
self.assertEqual(self.loader._dict, {})
# get something, and make sure its a func
func = self.loader['test.ping']
func.__globals__['__context__']['foo'] = 'bar'
self.assertEqual(self.loader['test.echo'].__globals__['__context__']['foo'], 'bar')
self.assertEqual(self.loader['grains.get'].__globals__['__context__']['foo'], 'bar')
def test_globals(self):
func_globals = self.loader['test.ping'].__globals__
self.assertEqual(func_globals['__grains__'], self.opts.get('grains', {}))
self.assertEqual(func_globals['__pillar__'], self.opts.get('pillar', {}))
# the opts passed into modules is at least a subset of the whole opts
for key, val in six.iteritems(func_globals['__opts__']):
self.assertEqual(self.opts[key], val)
def test_pack(self):
self.loader.pack['__foo__'] = 'bar'
func_globals = self.loader['test.ping'].__globals__
self.assertEqual(func_globals['__foo__'], 'bar')
def test_virtual(self):
self.assertNotIn('test_virtual.ping', self.loader)
class LazyLoaderVirtualDisabledTest(TestCase):
'''
Test the loader of salt without __virtual__
'''
def setUp(self):
self.opts = _config = minion_config(None)
self.opts['grains'] = grains(self.opts)
self.loader = LazyLoader(_module_dirs(self.opts, 'modules', 'module'),
self.opts,
tag='module',
virtual_enable=False)
def test_virtual(self):
self.assertTrue(inspect.isfunction(self.loader['test_virtual.ping']))
class LazyLoaderWhitelistTest(TestCase):
'''
Test the loader of salt with a whitelist
'''
def setUp(self):
self.opts = _config = minion_config(None)
self.loader = LazyLoader(_module_dirs(self.opts, 'modules', 'module'),
self.opts,
tag='module',
whitelist=['test', 'pillar'])
def test_whitelist(self):
self.assertTrue(inspect.isfunction(self.loader['test.ping']))
self.assertTrue(inspect.isfunction(self.loader['pillar.get']))
self.assertNotIn('grains.get', self.loader)
module_template = '''
__load__ = ['test', 'test_alias']
__func_alias__ = dict(test_alias='working_alias')
from salt.utils.decorators import depends
def test():
return {count}
def test_alias():
return True
def test2():
return True
@depends('non_existantmodulename')
def test3():
return True
@depends('non_existantmodulename', fallback_function=test)
def test4():
return True
'''
class LazyLoaderReloadingTest(TestCase):
'''
Test the loader of salt with changing modules
'''
module_name = 'loadertest'
module_key = 'loadertest.test'
def setUp(self):
self.opts = _config = minion_config(None)
self.opts['grains'] = grains(self.opts)
self.tmp_dir = tempfile.mkdtemp(dir=tests.integration.TMP)
self.count = 0
dirs = _module_dirs(self.opts, 'modules', 'module')
dirs.append(self.tmp_dir)
self.loader = LazyLoader(dirs,
self.opts,
tag='module')
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def update_module(self):
self.count += 1
with open(self.module_path, 'wb') as fh:
fh.write(module_template.format(count=self.count))
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
try:
os.unlink(self.module_path + 'c')
except OSError:
pass
def rm_module(self):
os.unlink(self.module_path)
os.unlink(self.module_path + 'c')
@property
def module_path(self):
return os.path.join(self.tmp_dir, '{0}.py'.format(self.module_name))
def test_alias(self):
'''
Make sure that you can access alias-d modules
'''
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
self.update_module()
self.assertNotIn('{0}.test_alias'.format(self.module_name), self.loader)
self.assertTrue(inspect.isfunction(self.loader['{0}.working_alias'.format(self.module_name)]))
def test_clear(self):
self.assertTrue(inspect.isfunction(self.loader['test.ping']))
self.update_module() # write out out custom module
self.loader.clear() # clear the loader dict
# force a load of our module
self.assertTrue(inspect.isfunction(self.loader[self.module_key]))
# make sure we only loaded our custom module
# which means that we did correctly refresh the file mapping
for k, v in six.iteritems(self.loader._dict):
self.assertTrue(k.startswith(self.module_name))
def test_load(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
self.update_module()
self.assertTrue(inspect.isfunction(self.loader[self.module_key]))
def test__load__(self):
'''
If a module specifies __load__ we should only load/expose those modules
'''
self.update_module()
# ensure it doesn't exist
self.assertNotIn(self.module_key + '2', self.loader)
def test__load__and_depends(self):
'''
If a module specifies __load__ we should only load/expose those modules
'''
self.update_module()
# ensure it doesn't exist
self.assertNotIn(self.module_key + '3', self.loader)
self.assertNotIn(self.module_key + '4', self.loader)
def test_reload(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
# make sure it updates correctly
for x in range(1, 3):
self.update_module()
self.loader.clear()
self.assertEqual(self.loader[self.module_key](), self.count)
self.rm_module()
# make sure that even if we remove the module, its still loaded until a clear
self.assertEqual(self.loader[self.module_key](), self.count)
self.loader.clear()
self.assertNotIn(self.module_key, self.loader)
submodule_template = '''
import lib
def test():
return ({count}, lib.test())
'''
submodule_lib_template = '''
def test():
return {count}
'''
class LazyLoaderSubmodReloadingTest(TestCase):
'''
Test the loader of salt with changing modules
'''
module_name = 'loadertestsubmod'
module_key = 'loadertestsubmod.test'
def setUp(self):
self.opts = _config = minion_config(None)
self.opts['grains'] = grains(self.opts)
self.tmp_dir = tempfile.mkdtemp(dir=tests.integration.TMP)
os.makedirs(self.module_dir)
self.count = 0
self.lib_count = 0
dirs = _module_dirs(self.opts, 'modules', 'module')
dirs.append(self.tmp_dir)
self.loader = LazyLoader(dirs,
self.opts,
tag='module')
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def update_module(self):
self.count += 1
with open(self.module_path, 'wb') as fh:
fh.write(submodule_template.format(count=self.count))
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
try:
os.unlink(self.module_path + 'c')
except OSError:
pass
def rm_module(self):
os.unlink(self.module_path)
os.unlink(self.module_path + 'c')
def update_lib(self):
self.lib_count += 1
with open(self.lib_path, 'wb') as fh:
fh.write(submodule_lib_template.format(count=self.lib_count))
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
try:
os.unlink(self.lib_path + 'c')
except OSError:
pass
def rm_lib(self):
os.unlink(self.lib_path)
os.unlink(self.lib_path + 'c')
@property
def module_dir(self):
return os.path.join(self.tmp_dir, self.module_name)
@property
def module_path(self):
return os.path.join(self.module_dir, '__init__.py')
@property
def lib_path(self):
return os.path.join(self.module_dir, 'lib.py')
def test_basic(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
self.update_module()
self.update_lib()
self.loader.clear()
self.assertIn(self.module_key, self.loader)
def test_reload(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
# update both the module and the lib
for x in range(1, 3):
self.update_module()
self.update_lib()
self.loader.clear()
self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))
# update just the module
for x in range(1, 3):
self.update_module()
self.loader.clear()
self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))
# update just the lib
for x in range(1, 3):
self.update_lib()
self.loader.clear()
self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))
self.rm_module()
# make sure that even if we remove the module, its still loaded until a clear
self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))
self.loader.clear()
self.assertNotIn(self.module_key, self.loader)
def test_reload_missing_lib(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
# update both the module and the lib
self.update_module()
self.update_lib()
self.loader.clear()
self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))
# remove the lib, this means we should fail to load the module next time
self.rm_lib()
self.loader.clear()
self.assertNotIn(self.module_key, self.loader)
deep_init_base = '''
import top_lib
import top_lib.mid_lib
import top_lib.mid_lib.bot_lib
def top():
return top_lib.test()
def mid():
return top_lib.mid_lib.test()
def bot():
return top_lib.mid_lib.bot_lib.test()
'''
class LazyLoaderDeepSubmodReloadingTest(TestCase):
module_name = 'loadertestsubmoddeep'
libs = ('top_lib', 'mid_lib', 'bot_lib')
def setUp(self):
self.opts = _config = minion_config(None)
self.tmp_dir = tempfile.mkdtemp(dir=tests.integration.TMP)
os.makedirs(self.module_dir)
self.lib_count = collections.defaultdict(int) # mapping of path -> count
# bootstrap libs
with open(os.path.join(self.module_dir, '__init__.py'), 'w') as fh:
fh.write(deep_init_base)
fh.flush()
os.fsync(fh.fileno()) # flush to disk
self.lib_paths = {}
dir_path = self.module_dir
for lib_name in self.libs:
dir_path = os.path.join(dir_path, lib_name)
self.lib_paths[lib_name] = dir_path
os.makedirs(dir_path)
self.update_lib(lib_name)
dirs = _module_dirs(self.opts, 'modules', 'module')
dirs.append(self.tmp_dir)
self.loader = LazyLoader(dirs,
self.opts,
tag='module')
@property
def module_dir(self):
return os.path.join(self.tmp_dir, self.module_name)
def update_lib(self, lib_name):
path = os.path.join(self.lib_paths[lib_name], '__init__.py')
self.lib_count[lib_name] += 1
with open(path, 'wb') as fh:
fh.write(submodule_lib_template.format(count=self.lib_count[lib_name]))
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
try:
os.unlink(path + 'c')
except OSError:
pass
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_basic(self):
self.assertIn('{0}.top'.format(self.module_name), self.loader)
def _verify_libs(self):
for lib in self.libs:
self.assertEqual(self.loader['{0}.{1}'.format(self.module_name, lib.replace('_lib', ''))](),
self.lib_count[lib])
def test_reload(self):
'''
Make sure that we can reload all libraries of arbitrary depth
'''
self._verify_libs()
# update them all
for lib in self.libs:
for x in xrange(5):
self.update_lib(lib)
self.loader.clear()
self._verify_libs()
|
the-stack_0_1781 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import torch
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.trainer.states import RunningStage
from pytorch_lightning.utilities.warnings import WarningCache
warning_cache = WarningCache()
class _LightningModuleWrapperBase(torch.nn.Module):
def __init__(self, pl_module: LightningModule):
"""
Wraps the user's LightningModule and redirects the forward call to the appropriate
method, either ``training_step``, ``validation_step`` or ``test_step``.
If the LightningModule is in none of the states `training`, `testing` or `validation`,
the inputs will be redirected to the
:meth:`~pytorch_lightning.core.lightning.LightningModule.predict` method.
Inheriting classes may also modify the inputs or outputs of forward.
Args:
pl_module: the model to wrap
"""
super().__init__()
self.module = pl_module
def forward(self, *inputs, **kwargs):
running_stage = self.module.running_stage
if running_stage == RunningStage.TRAINING:
output = self.module.training_step(*inputs, **kwargs)
# In manual_optimization, we need to prevent DDP reducer as
# it is done manually in ``LightningModule.manual_backward``
# `require_backward_grad_sync` will be reset in the
# ddp_plugin ``post_training_step`` hook
if not self.module.automatic_optimization:
self.module.trainer.model.require_backward_grad_sync = False
warn_if_output_is_none(output, "training_step")
elif running_stage == RunningStage.TESTING:
output = self.module.test_step(*inputs, **kwargs)
warn_if_output_is_none(output, "test_step")
elif running_stage == RunningStage.EVALUATING:
output = self.module.validation_step(*inputs, **kwargs)
warn_if_output_is_none(output, "validation_step")
else:
output = self.module.predict(*inputs, **kwargs)
return output
def warn_if_output_is_none(output: Any, method_name: str) -> None:
""" Warns user about which method returned None. """
if output is None:
warning_cache.warn(f'Your {method_name} returned None. Did you forget to return an output?')
def unwrap_lightning_module(wrapped_model) -> LightningModule:
model = wrapped_model
if isinstance(model, (DistributedDataParallel, DataParallel)):
model = model.module
if isinstance(model, _LightningModuleWrapperBase):
model = model.module
return model
|
the-stack_0_1782 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 2 09:35:39 2016
@author: bing
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 25 09:42:22 2016
@author: bing
"""
import numpy as np
import scipy
import numba
import sys
bohr_angstrom = 0.52917721092
hartree_wavenumber = 219474.63
#hartree_wavenumber = scipy.constants.value(u'hartree-inverse meter relationship') / 1e2
Vmin = -24.2288
b = np.array([-6.631e-02, 1.346e-01, -3.300e-02, 6e0, -1.4e01, -1.193e02, 2.290e02, \
1.110e03, -1.850e03, -3.5e03, 6.0e03])
@numba.autojit
def derivs(x):
"""
Morse potential
"""
PES = 'pH2'
if PES == 'Morse':
a, x0 = 1.02, 1.4
De = 0.176 / 100.0
d = (1.0-np.exp(-a*x))
v0 = De*d**2
dv = 2. * De * d * a * np.exp(-a*x)
elif PES == 'HO':
v0 = x**2/2.0
dv = x
#ddv = 2.0 * De * (-d*np.exp(-a*((x-x0)))*a**2 + (np.exp(-a*(x-x0)))**2*a**2)
elif PES == 'pH2':
dx = 1e-4
v0 = np.zeros(Ntraj)
dv = np.zeros(Ntraj)
for i in range(Ntraj):
v0[i] = vpot(x[i])
dv[i] = ( vpot(x[i] + dx) - v0[i])/dx
return v0,dv
@numba.autojit
def qpot(x,p,r,w):
"""
Linear Quantum Force : direct polynomial fitting of derivative-log density (amplitude)
curve_fit : randomly choose M points and do a nonlinear least-square fitting to a
predefined functional form
"""
#tau = (max(xdata) - min(xdata))/(max(x) - min(x))
#if tau > 0.6:
# pass
#else:
# print('Data points are not sampled well.'
Nb = 4
S = np.zeros((Nb,Nb))
for j in range(Nb):
for k in range(Nb):
S[j,k] = np.dot(x**(j+k), w)
bp = np.zeros(Nb)
br = np.zeros(Nb)
for n in range(Nb):
bp[n] = np.dot(x**n * p, w)
br[n] = np.dot(x**n * r, w)
cp = np.linalg.solve(S,bp)
cr = np.linalg.solve(S,br)
#unit = np.identity(Nb)
#r_approx = cr[0] * unit + cr[1] * x + cr[2] * x**2 + cr[3] * x**3
#p_approx = cp[0] * unit + cp[1] * x + cp[2] * x**2 + cp[3] * x**3
dr = cr[1] + 2. * cr[2] * x + 3. * cr[3] * x**2 #+ 4.0 * cr[4] * x**3
dp = cp[1] + 2. * cp[2] * x + 3. * cp[3] * x**2 #+ 4.0 * cp[4] * x**3
ddr = 2. * cr[2] + 6. * cr[3] * x # + 12.0 * cr[4] * x**2
ddp = 2. * cp[2] + 6. * cp[3] * x #+ 12.0 * cp[4] * x**2
fr = -1./2./am * (2. * r * dp + ddp)
fq = 1./2./am * (2. * r * dr + ddr)
Eu = -1./2./am * np.dot(r**2 + dr,w)
return Eu,fq,fr
@numba.autojit
def sym(V):
n = V.shape[-1]
for i in range(n):
for j in range(i):
V[j,i] = V[i,j]
return V
@numba.autojit
def vpot(r):
re = 3.47005
De = 24.2288
r = r * bohr_angstrom
beta_inf = np.log(2.0 * De / u_LR(re))
s = 0.0
for j in range(11):
s += b[j] * y_ref(r,1)**j
beta = y_ref(r,6) * beta_inf + (1.0 - y_ref(r,6)) * s
vpot = De * (1.0 - u_LR(r)/u_LR(re) * np.exp(- beta * y_eq(r,6)))**2
vpot = vpot + Vmin
vpot = vpot / hartree_wavenumber
return vpot
@numba.autojit
def y_eq(r,n):
re = 3.47005
y_eq = (r**n - re**n)/(r**n + re**n)
return y_eq
@numba.autojit
def y_ref(r,n):
r_ref = 4.60
z = (r**n - r_ref**n)/(r**n + r_ref**n)
return z
@numba.autojit
def u_LR(r):
C6 = 5.820364e04
C8 = 2.87052154e05
C10 = 1.80757343e06
z = damp(r,6) * C6/r**6 + damp(r,8) * C8/r**8 + damp(r,10) * C10 / r**10
return z
@numba.autojit
def damp(r,n):
den = 1.10
z = (1.0 - np.exp(-3.30 * den * r / n - 0.423 * (den * r)**2/np.sqrt(float(n))))**(n-1)
return z
# initialization
Ntraj = 4000
a0 = 0.5
x0 = 9.0
x = np.random.randn(Ntraj)
#x = np.zeros(Ntraj)
#for k in range(Ntraj):
# x[k] = np.random.randn()
# while x[k] > 3.0:
# x[k] = np.random.randn()
x = x / np.sqrt(2.0 * a0) + x0
p = np.zeros(Ntraj)
r = - a0 * (x-x0)
w = np.array([1./Ntraj]*Ntraj)
am = 1837.0
Nt = 16000
dt = 1.0
dt2 = dt/2.0
t = 0.0
f = open('traj.dat','w')
fe = open('en.out','w')
f_MSE = open('rMSE.out','w')
nout = 20 # number of trajectories to print
fmt = ' {}' * (nout+1) + '\n'
Eu = 0.
Ndim = 1 # dimensionality of the system
fric_cons = 0.0004 # friction constant
v0, dv = derivs(x)
Eu,fq,fr = qpot(x,p,r,w)
for k in range(Nt):
t = t + dt
p += (- dv + fq) * dt2 - fric_cons * p * dt2
r += fr * dt2
x += p*dt/am
# force field
Eu, fq, fr = qpot(x,p,r,w)
if Eu < 0:
print('Error: U = {} should not be negative. \n'.format(Eu))
sys.exit()
v0, dv = derivs(x)
p += (- dv + fq) * dt2 - fric_cons * p * dt2
r += fr * dt2
f.write(fmt.format(t,*x[0:nout]))
Ek = np.dot(p*p,w)/2./am * hartree_wavenumber
Ev = np.dot(v0,w) * hartree_wavenumber
Eu = Eu * hartree_wavenumber
Etot = Ek + Ev + Eu
fe.write('{} {} {} {} {} \n'.format(t,Ek,Ev,Eu,Etot))
if k == Nt-1:
print('The total energy = {} cm-1. \n'.format(Etot))
fe.close()
f.close()
#a, x0, De = 1.02, 1.4, 0.176/100
#print('The well depth = {} cm-1. \n'.format(De * hartree_wavenumber))
#
#omega = a * np.sqrt(2. * De / am )
#E0 = omega/2. - omega**2/16./De
#dE = (Etot-E0) * hartree_wavenumber
#print('Exact ground-state energy = {} Hartree. \nEnergy deviation = {} cm-1. \n'.format(E0,dE))
#
|
the-stack_0_1785 | """
Modified from https://github.com/allenai/allennlp-models/blob/main/allennlp_models/tagging/models/crf_tagger.py
Removed some features that we don't need.
"""
from typing import Dict, Optional, List, Any, cast
from overrides import overrides
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder
from allennlp.modules import ConditionalRandomField
from allennlp.modules.conditional_random_field import allowed_transitions
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator
import allennlp.nn.util as util
from allennlp.training.metrics import SpanBasedF1Measure
from allennlpadd.common.util import construct_from_params
@Model.register("my_crf_tagger")
class CrfTagger(Model):
"""
The `CrfTagger` encodes a sequence of text with a `Seq2SeqEncoder`,
then uses a Conditional Random Field model to predict a tag for each token in the sequence.
Registered as a `Model` with name "crf_tagger".
# Parameters
vocab : `Vocabulary`, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : `TextFieldEmbedder`, required
Used to embed the tokens `TextField` we get as input to the model.
encoder : `Seq2SeqEncoder`
The encoder that we will use in between embedding tokens and predicting output tags.
label_namespace : `str`, optional (default=`labels`)
This is needed to compute the SpanBasedF1Measure metric.
Unless you did something unusual, the default value should be what you want.
label_encoding : `str`, optional (default=`None`)
Label encoding to use when calculating span f1 and constraining
the CRF at decoding time . Valid options are "BIO", "BIOUL", "IOB1", "BMES".
Required if `calculate_span_f1` or `constrain_crf_decoding` is true.
include_start_end_transitions : `bool`, optional (default=`True`)
Whether to include start and end transition parameters in the CRF.
constrain_crf_decoding : `bool`, optional (default=`True`)
If `True`, the CRF is constrained at decoding time to
produce valid sequences of tags. If this is `True`, then
`label_encoding` is required. If `None` and
label_encoding is specified, this is set to `True`.
If `None` and label_encoding is not specified, it defaults
to `False`.
dropout: `float`, optional (default=`None`)
Dropout probability.
verbose_metrics : `bool`, optional (default = `False`)
If true, metrics will be returned per label class in addition
to the overall statistics.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Dict[str, Any],
label_namespace: str = "labels",
label_encoding: str = "BIO",
include_start_end_transitions: bool = True,
constrain_crf_decoding: bool = True,
dropout: Optional[float] = None,
verbose_metrics: bool = False,
initializer: InitializerApplicator = InitializerApplicator(),
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self.label_namespace = label_namespace
self.text_field_embedder = text_field_embedder
self.num_tags = self.vocab.get_vocab_size(label_namespace)
self.encoder = construct_from_params(
Seq2SeqEncoder, input_size=text_field_embedder.get_output_dim(), **encoder)
self._verbose_metrics = verbose_metrics
if dropout:
self.dropout = torch.nn.Dropout(dropout)
else:
self.dropout = None
self.tag_projection_layer = torch.nn.Linear(self.encoder.get_output_dim(), self.num_tags)
self.label_encoding = label_encoding
if constrain_crf_decoding:
if not label_encoding:
raise ConfigurationError(
"constrain_crf_decoding is True, but no label_encoding was specified."
)
labels = self.vocab.get_index_to_token_vocabulary(label_namespace)
constraints = allowed_transitions(label_encoding, labels)
else:
constraints = None
self.crf = ConditionalRandomField(
self.num_tags, constraints, include_start_end_transitions=include_start_end_transitions
)
self._f1_metric = SpanBasedF1Measure(
vocab, tag_namespace=label_namespace, label_encoding=label_encoding
)
initializer(self)
@overrides
def forward(
self, # type: ignore
tokens: TextFieldTensors,
tags: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None,
**kwargs, # to allow for a more general dataset reader that passes args we don't need
) -> Dict[str, torch.Tensor]:
"""
# Parameters
tokens : `TextFieldTensors`, required
The output of `TextField.as_array()`, which should typically be passed directly to a
`TextFieldEmbedder`. This output is a dictionary mapping keys to `TokenIndexer`
tensors. At its most basic, using a `SingleIdTokenIndexer` this is : `{"tokens":
Tensor(batch_size, num_tokens)}`. This dictionary will have the same keys as were used
for the `TokenIndexers` when you created the `TextField` representing your
sequence. The dictionary is designed to be passed directly to a `TextFieldEmbedder`,
which knows how to combine different word representations into a single vector per
token in your input.
tags : `torch.LongTensor`, optional (default = `None`)
A torch tensor representing the sequence of integer gold class labels of shape
`(batch_size, num_tokens)`.
metadata : `List[Dict[str, Any]]`, optional, (default = `None`)
metadata containg the original words in the sentence to be tagged under a 'words' key.
# Returns
An output dictionary consisting of:
logits : `torch.FloatTensor`
The logits that are the output of the `tag_projection_layer`
mask : `torch.BoolTensor`
The text field mask for the input tokens
tags : `List[List[int]]`
The predicted tags using the Viterbi algorithm.
loss : `torch.FloatTensor`, optional
A scalar loss to be optimised. Only computed if gold label `tags` are provided.
"""
embedded_text_input = self.text_field_embedder(tokens, **kwargs)
mask = util.get_text_field_mask(tokens)
if self.dropout:
embedded_text_input = self.dropout(embedded_text_input)
encoded_text = self.encoder(embedded_text_input, mask)
if self.dropout:
encoded_text = self.dropout(encoded_text)
logits = self.tag_projection_layer(encoded_text)
best_paths = self.crf.viterbi_tags(logits, mask)
# Just get the top tags and ignore the scores.
predicted_tags = cast(List[List[int]], [x[0] for x in best_paths])
output = {"logits": logits, "mask": mask, "tags": predicted_tags}
if tags is not None:
# Add negative log-likelihood as loss
log_likelihood = self.crf(logits, tags, mask)
output["loss"] = -log_likelihood
# Represent viterbi tags as "class probabilities" that we can
# feed into the metrics
class_probabilities = logits * 0.0
for i, instance_tags in enumerate(predicted_tags):
for j, tag_id in enumerate(instance_tags):
class_probabilities[i, j, tag_id] = 1
self._f1_metric(class_probabilities, tags, mask)
if metadata is not None:
output["words"] = [x["words"] for x in metadata]
return output
@overrides
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Converts the tag ids to the actual tags.
`output_dict["tags"]` is a list of lists of tag_ids,
so we use an ugly nested list comprehension.
"""
def decode_tags(tags):
return [
self.vocab.get_token_from_index(tag, namespace=self.label_namespace) for tag in tags
]
output_dict["tags"] = [decode_tags(t) for t in output_dict["tags"]]
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
f1_dict = self._f1_metric.get_metric(reset=reset)
if self._verbose_metrics:
return f1_dict
else:
return {x: y for x, y in f1_dict.items() if "overall" in x}
default_predictor = "sentence_tagger"
|
the-stack_0_1786 | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from pdb import set_trace as stop
def batch_product(iput, mat2):
result = None
for i in range(iput.size()[0]):
op = torch.mm(iput[i], mat2)
op = op.unsqueeze(0)
if(result is None):
result = op
else:
result = torch.cat((result,op),0)
return result.squeeze(2)
class rec_attention(nn.Module):
# attention with bin context vector per HM and HM context vector
def __init__(self,hm,args):
super(rec_attention,self).__init__()
self.num_directions=2 if args.bidirectional else 1
if (hm==False):
self.bin_rep_size=args.bin_rnn_size*self.num_directions
else:
self.bin_rep_size=args.bin_rnn_size
self.bin_context_vector=nn.Parameter(torch.Tensor(self.bin_rep_size,1),requires_grad=True)
self.softmax=nn.Softmax(dim=1)
self.bin_context_vector.data.uniform_(-0.1, 0.1)
def forward(self,iput):
alpha=self.softmax(batch_product(iput,self.bin_context_vector))
[batch_size,source_length,bin_rep_size2]=iput.size()
repres=torch.bmm(alpha.unsqueeze(2).view(batch_size,-1,source_length),iput)
return repres,alpha
class recurrent_encoder(nn.Module):
# modular LSTM encoder
def __init__(self,n_bins,ip_bin_size,hm,args):
super(recurrent_encoder,self).__init__()
self.bin_rnn_size=args.bin_rnn_size
self.ipsize=ip_bin_size
self.seq_length=n_bins
self.num_directions=2 if args.bidirectional else 1
if (hm==False):
self.bin_rnn_size=args.bin_rnn_size
else:
self.bin_rnn_size=args.bin_rnn_size // 2
self.bin_rep_size=self.bin_rnn_size*self.num_directions
self.rnn=nn.LSTM(self.ipsize,self.bin_rnn_size,num_layers=args.num_layers,dropout=args.dropout,bidirectional=args.bidirectional)
self.bin_attention=rec_attention(hm,args)
def outputlength(self):
return self.bin_rep_size
def forward(self,single_hm,hidden=None):
bin_output, hidden = self.rnn(single_hm,hidden)
bin_output = bin_output.permute(1,0,2)
hm_rep,bin_alpha = self.bin_attention(bin_output)
return hm_rep,bin_alpha
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class att_chrome(nn.Module):
def __init__(self,args):
super(att_chrome,self).__init__()
self.n_hms=args.n_hms
self.n_bins=args.n_bins
self.ip_bin_size=1
self.rnn_hms=nn.ModuleList()
for i in range(self.n_hms):
self.rnn_hms.append(recurrent_encoder(self.n_bins,self.ip_bin_size,False,args))
self.opsize = self.rnn_hms[0].outputlength()
self.hm_level_rnn_1=recurrent_encoder(self.n_hms,self.opsize,True,args)
self.opsize2=self.hm_level_rnn_1.outputlength()
self.diffopsize=2*(self.opsize2)
self.fdiff1_1=nn.Linear(self.opsize2,1)
def forward(self,iput):
bin_a=None
level1_rep=None
[batch_size,_,_]=iput.size()
for hm,hm_encdr in enumerate(self.rnn_hms):
hmod=iput[:,:,hm].contiguous()
hmod=torch.t(hmod).unsqueeze(2)
op,a= hm_encdr(hmod)
if level1_rep is None:
level1_rep=op
bin_a=a
else:
level1_rep=torch.cat((level1_rep,op),1)
bin_a=torch.cat((bin_a,a),1)
level1_rep=level1_rep.permute(1,0,2)
final_rep_1,hm_level_attention_1=self.hm_level_rnn_1(level1_rep)
final_rep_1=final_rep_1.squeeze(1)
prediction_m=((self.fdiff1_1(final_rep_1)))
return prediction_m
args_dict = {'lr': 0.0001, 'model_name': 'attchrome', 'clip': 1, 'epochs': 2, 'batch_size': 10, 'dropout': 0.5, 'cell_1': 'Cell1', 'save_root': 'Results/Cell1', 'data_root': 'data/', 'gpuid': 0, 'gpu': 0, 'n_hms': 5, 'n_bins': 200, 'bin_rnn_size': 32, 'num_layers': 1, 'unidirectional': False, 'save_attention_maps': False, 'attentionfilename': 'beta_attention.txt', 'test_on_saved_model': False, 'bidirectional': True, 'dataset': 'Cell1'}
att_chrome_args = AttrDict(args_dict)
att_chrome_model = att_chrome(att_chrome_args)
|
the-stack_0_1792 |
import importlib
import datetime as dt
import xml.etree.ElementTree as ET
from .session import session
# Utility Objects and Functions
NS = {
'ft': 'http://www.epo.org/fulltext',
'ops': 'http://ops.epo.org',
'ex': 'http://www.epo.org/exchange'
}
def etree_els_to_text(els) -> str:
segments = [' '.join(e.itertext()) for e in els]
return "\n".join(segments)
def docid_to_inpadoc(doc, model_name='Inpadoc'):
klass = getattr(importlib.import_module('patent_client.epo.inpadoc.model'), model_name)
date = doc.find('./ex:date', NS)
if date is not None:
date = dt.datetime.strptime(date.text, '%Y%m%d').date()
return klass(
doc_type=doc.attrib['document-id-type'],
country=doc.find('./ex:country', NS).text,
number=doc.find('./ex:doc-number', NS).text,
kind_code=doc.find('./ex:kind', NS).text,
date=date,
)
def parse_family_member(member):
family_class = getattr(importlib.import_module('patent_client.epo.inpadoc.model'), 'InpadocFamilyMember')
priority_claim_class = getattr(importlib.import_module('patent_client.epo.inpadoc.model'), 'InpadocFamilyPriorityClaim')
pub = member.find('.//ex:publication-reference/ex:document-id[@document-id-type="docdb"]', NS)
app = member.find('.//ex:application-reference/ex:document-id[@document-id-type="docdb"]', NS)
family_id = int(member.attrib['family-id'])
priority = member.findall('.//ex:priority-claim', NS)
priority_claims = list()
for c in priority:
doc = docid_to_inpadoc(c.find('.//ex:document-id[@document-id-type="docdb"]', NS), 'Inpadoc')
active = c.find('.//ex:priority-active-indicator', NS).text == 'YES'
link_type = c.find('.//ex:priority-linkage-type', NS)
link_type = link_type.text if link_type is not None else link_type
seq = c.attrib['sequence']
kind = c.attrib.get('kind', None)
priority_claims.append(priority_claim_class(**{
'seq': int(seq),
'kind': kind,
'link_type': link_type,
'active': active,
'doc': doc,
}))
return family_class(**{
'publication': docid_to_inpadoc(pub, 'InpadocPublication'),
'application': docid_to_inpadoc(app, 'InpadocApplication'),
'priority_claims': priority_claims,
'family_id': family_id,
})
# Lookup Functions
def lookup_claims():
@property
def get(self) -> str:
url = f"http://ops.epo.org/3.2/rest-services/published-data/publication/{self.doc_type}/{self.num}/claims"
response = session.get(url)
claim_els = ET.fromstring(response.text).findall('.//ft:claim-text', NS)
return etree_els_to_text(claim_els)
return get
def lookup_description():
@property
def get(self) -> str:
url = f"http://ops.epo.org/3.2/rest-services/published-data/publication/{self.doc_type}/{self.num}/description"
response = session.get(url)
description_els = ET.fromstring(response.text).findall('.//ft:p', NS)
return etree_els_to_text(description_els)
return get
def lookup_family():
@property
def get(self):
url = f"http://ops.epo.org/3.2/rest-services/family/publication/{self.doc_type}/{self.num}"
response = session.get(url)
members = ET.fromstring(response.text).findall('.//ops:family-member', NS)
return [parse_family_member(m) for m in members]
return get
|
the-stack_0_1794 | """Support for Anthem Network Receivers and Processors."""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PORT, EVENT_HOMEASSISTANT_STOP, STATE_OFF,
STATE_ON)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'anthemav'
DEFAULT_PORT = 14999
SUPPORT_ANTHEMAV = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up our socket to the AVR."""
import anthemav
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
device = None
_LOGGER.info("Provisioning Anthem AVR device at %s:%d", host, port)
def async_anthemav_update_callback(message):
"""Receive notification from transport that new data exists."""
_LOGGER.info("Received update callback from AVR: %s", message)
hass.async_create_task(device.async_update_ha_state())
avr = await anthemav.Connection.create(
host=host, port=port, loop=hass.loop,
update_callback=async_anthemav_update_callback)
device = AnthemAVR(avr, name)
_LOGGER.debug("dump_devicedata: %s", device.dump_avrdata)
_LOGGER.debug("dump_conndata: %s", avr.dump_conndata)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, device.avr.close)
async_add_entities([device])
class AnthemAVR(MediaPlayerDevice):
"""Entity reading values from Anthem AVR protocol."""
def __init__(self, avr, name):
"""Initialize entity with transport."""
super().__init__()
self.avr = avr
self._name = name
def _lookup(self, propname, dval=None):
return getattr(self.avr.protocol, propname, dval)
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_ANTHEMAV
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return name of device."""
return self._name or self._lookup('model')
@property
def state(self):
"""Return state of power on/off."""
pwrstate = self._lookup('power')
if pwrstate is True:
return STATE_ON
if pwrstate is False:
return STATE_OFF
return None
@property
def is_volume_muted(self):
"""Return boolean reflecting mute state on device."""
return self._lookup('mute', False)
@property
def volume_level(self):
"""Return volume level from 0 to 1."""
return self._lookup('volume_as_percentage', 0.0)
@property
def media_title(self):
"""Return current input name (closest we have to media title)."""
return self._lookup('input_name', 'No Source')
@property
def app_name(self):
"""Return details about current video and audio stream."""
return self._lookup('video_input_resolution_text', '') + ' ' \
+ self._lookup('audio_input_name', '')
@property
def source(self):
"""Return currently selected input."""
return self._lookup('input_name', "Unknown")
@property
def source_list(self):
"""Return all active, configured inputs."""
return self._lookup('input_list', ["Unknown"])
async def async_select_source(self, source):
"""Change AVR to the designated source (by name)."""
self._update_avr('input_name', source)
async def async_turn_off(self):
"""Turn AVR power off."""
self._update_avr('power', False)
async def async_turn_on(self):
"""Turn AVR power on."""
self._update_avr('power', True)
async def async_set_volume_level(self, volume):
"""Set AVR volume (0 to 1)."""
self._update_avr('volume_as_percentage', volume)
async def async_mute_volume(self, mute):
"""Engage AVR mute."""
self._update_avr('mute', mute)
def _update_avr(self, propname, value):
"""Update a property in the AVR."""
_LOGGER.info(
"Sending command to AVR: set %s to %s", propname, str(value))
setattr(self.avr.protocol, propname, value)
@property
def dump_avrdata(self):
"""Return state of avr object for debugging forensics."""
attrs = vars(self)
return(
'dump_avrdata: '
+ ', '.join('%s: %s' % item for item in attrs.items()))
|
the-stack_0_1795 | import os
import string
import random
import tabulate
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Embedding, GRU
from tensorflow.keras.losses import sparse_categorical_crossentropy
from tensorflow.keras.models import load_model
vocab = sorted(set(string.printable))
char_to_ind = {u: i for i, u in enumerate(vocab)}
ind_to_char = np.array(vocab)
class Generator(object):
def __init__(self):
self.model = None
self.vocab = sorted(set(string.printable))
self.vocab_size = len(self.vocab)
self.hparams = {'rnn_neurons' : 256,
'embed_dim' : 64,
'learning_rate' : 1e-4,
'dropout' : 0.3}
def _createModel(self, batch_size):
model = Sequential()
model.add(Embedding(self.vocab_size, self.hparams['embed_dim'],batch_input_shape=[batch_size, None]))
model.add(GRU(self.hparams['rnn_neurons'] ,return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform', dropout=self.hparams['dropout']))
model.add(GRU(self.hparams['rnn_neurons'] ,return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform', dropout=self.hparams['dropout']))
model.add(Dense(self.vocab_size))
opt = tf.keras.optimizers.Adam(learning_rate=self.hparams['learning_rate'])
model.compile(optimizer=opt, loss=self._sparse_cat_loss)
self.model = model
def _sparse_cat_loss(self, y_true, y_pred):
return sparse_categorical_crossentropy(y_true, y_pred, from_logits=True)
def load_weights(self, weight_file_path):
'''
Constructs the model and loads the weights
Parameters:
weight_file_path (str): Path to weights location
Returns:
None
'''
#if os.path.exists(weight_file_path):
self._createModel(batch_size = 1)
self.model.load_weights(weight_file_path)
self.model.build(tf.TensorShape([1, None]))
def train(self, data, epochs=1, verbose=1, save_at=5):
'''
Trains the model for a given number of epochs
Parameters:
epochs (int) : number of epochs to train on
verbose (bool) : to print loss and epoch number of not to
save_at (int) : to save at ever n th epoch
Returns:
None
'''
self._createModel(batch_size = 128)
for epoch in range(1, epochs + 1):
print('Epoch' + str(epoch) + '/' + str(epochs) )
self.model.fit(data, epochs=1, verbose=verbose)
if (epoch + 1) % save_at == 0:
self.model.save('model-' + str(epoch) + '-epochs-256-neurons.h5')
def predict(self, start_seed, gen_size=100, temp=random.uniform(0, 1)):
'''
Generates further texts according to the seed text
Parameters:
start_seed (str) : seed that model will use to generate further texts
gen_size (int) : number of characters to generate 700 - 1000 are the most ideal ones
Returns:
None
'''
if self.model is None:
raise ValueError('Model Object cannot be NoneType')
num_generate = gen_size
input_eval = [char_to_ind[s] for s in start_seed]
input_eval = tf.expand_dims(input_eval, 0)
text_generated = []
temperature = temp
self.model.reset_states()
for _ in range(num_generate):
predictions = self.model(input_eval)
predictions = tf.squeeze(predictions, 0)
predictions = predictions / temperature
predicted_id = tf.random.categorical(predictions, num_samples=1)[-1, 0].numpy()
input_eval = tf.expand_dims([predicted_id], 0)
text_generated.append(ind_to_char[predicted_id])
return (start_seed + ''.join(text_generated))
def hyperparams(self):
print('Hyper Parameters')
print('+--------------------------+')
for key, value in self.hparams.items():
print("|{: <13} | {: >10}|".format(key, value))
print('+--------------------------+')
def summary(self):
self.model.summary()
@property
def __doc__(self):
return '''
Generator object can construct the model,
save the weights, load the weights train the model,
and make predictions
---------------------------------------------------
Trainging example :
model = Generator() # creating an instance of model
model.train(dataset, epochs = 5, verbose=1, save_at=1) # training the model
----------------------------------------------------
Continue training from a saved weights file :
model = Generator() # creating an instance of model
model.load_weights('model-3-epochs.h5', mode = 'training') # loading the weights
model.train(dataset, epochs = 5, verbose=1, save_at=1) # training the model
-----------------------------------------------------
Preditction example :
model = Generator() # creating an instance of model
model.load_weights('model-10-epochs.h5') # loading the weights
print(model.predict('hello')) # making prediction and printing
-----------------------------------------------------
'''
|
the-stack_0_1796 | # -*- coding: utf-8 -*-
"""
mslib.msui.mss_qt
~~~~~~~~~~~~~~~~~
This module helps with qt
This file is part of mss.
:copyright: Copyright 2017-2018 Joern Ungermann, Reimar Bauer
:copyright: Copyright 2017-2020 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import importlib
import logging
import os
import platform
import sys
import traceback
from fslib.fs_filepicker import getSaveFileName, getOpenFileName, getExistingDirectory
from PyQt5 import QtCore, QtWidgets # noqa
from mslib.utils import config_loader, FatalUserError
def get_open_filename_qt(*args):
filename = QtWidgets.QFileDialog.getOpenFileName(*args)
return filename[0] if isinstance(filename, tuple) else str(filename)
def get_open_filenames_qt(*args):
"""
To select multiple files simultaneously
"""
filenames = QtWidgets.QFileDialog.getOpenFileNames(*args)
return filenames[0] if isinstance(filenames, tuple) else str(filenames)
def get_save_filename_qt(*args):
filename = QtWidgets.QFileDialog.getSaveFileName(*args)
return filename[0] if isinstance(filename, tuple) else str(filename)
def get_existing_directory_qt(*args):
dirname = QtWidgets.QFileDialog.getExistingDirectory(*args)
return dirname[0] if isinstance(dirname, tuple) else str(dirname)
def get_pickertype(tag, typ):
default = config_loader(dataset="filepicker_default")
if typ is None:
if tag is None:
typ = default
else:
typ = config_loader(dataset=tag)
return typ
def get_open_filename(parent, title, dirname, filt, pickertag=None, pickertype=None):
pickertype = get_pickertype(pickertag, pickertype)
if pickertype == "fs":
filename = getOpenFileName(parent, dirname, filt, title="Import Flight Track")
elif pickertype in ["qt", "default"]:
filename = get_open_filename_qt(parent, title, os.path.expanduser(dirname), filt)
else:
raise FatalUserError(f"Unknown file picker type '{pickertype}'.")
logging.debug("Selected '%s'", filename)
if filename == "":
filename = None
return filename
def get_open_filenames(parent, title, dirname, filt, pickertag=None, pickertype=None):
"""
Opens multiple files simultaneously
Currently implemented only in kmloverlay_dockwidget.py
"""
pickertype = get_pickertype(pickertag, pickertype)
if pickertype in ["qt", "default"]:
filename = get_open_filenames_qt(parent, title, os.path.expanduser(dirname), filt)
else:
raise FatalUserError(f"Unknown file picker type '{pickertype}'.")
logging.debug("Selected '%s'", filename)
if filename == "":
filename = None
return filename
def get_save_filename(parent, title, filename, filt, pickertag=None, pickertype=None):
pickertype = get_pickertype(pickertag, pickertype)
if pickertype == "fs":
dirname, filename = os.path.split(filename)
filename = getSaveFileName(
parent, dirname, filt, title=title, default_filename=filename, show_save_action=True)
elif pickertype in ["qt", "default"]:
filename = get_save_filename_qt(parent, title, os.path.expanduser(filename), filt)
else:
raise FatalUserError(f"Unknown file picker type '{pickertype}'.")
logging.debug("Selected '%s'", filename)
if filename == "":
filename = None
return filename
def get_existing_directory(parent, title, defaultdir, pickertag=None, pickertype=None):
pickertype = get_pickertype(pickertag, pickertype)
if pickertype == "fs":
dirname = getExistingDirectory(parent, title=title, fs_url=defaultdir)[0]
elif pickertype in ["qt", "default"]:
dirname = get_existing_directory_qt(parent, title, defaultdir)
else:
raise FatalUserError(f"Unknown file picker type '{pickertype}'.")
logging.debug("Selected '%s'", dirname)
if dirname == "":
dirname = None
return dirname
def variant_to_string(variant):
if isinstance(variant, QtCore.QVariant):
return str(variant.value())
return str(variant)
def variant_to_float(variant, locale=QtCore.QLocale()):
if isinstance(variant, QtCore.QVariant):
value = variant.value()
else:
value = variant
if isinstance(value, (int, float)):
return value
try:
float_value, ok = locale.toDouble(value)
if not ok:
raise ValueError
except TypeError: # neither float nor string, try Python conversion
logging.error("Unexpected type in float conversion: %s=%s",
type(value), value)
float_value = float(value)
return float_value
# Import all Dialogues from the proper module directory.
for mod in [
"ui_about_dialog",
"ui_hexagon_dockwidget",
"ui_kmloverlay_dockwidget",
"ui_customize_kml",
"ui_mainwindow",
"ui_performance_settings",
"ui_remotesensing_dockwidget",
"ui_satellite_dockwidget",
"ui_sideview_options",
"ui_sideview_window",
"ui_tableview_window",
"ui_topview_mapappearance",
"ui_topview_window",
"ui_wms_capabilities",
"ui_wms_dockwidget",
"ui_wms_password_dialog"]:
globals()[mod] = importlib.import_module("mslib.msui.qt5." + mod)
# to store config by QSettings
QtCore.QCoreApplication.setOrganizationName("mss")
# PyQt5 silently aborts on a Python Exception
def excepthook(type_, value, traceback_):
"""
This dumps the error to console, logging (i.e. logfile), and tries to open a MessageBox for GUI users.
"""
import mslib
import mslib.utils
tb = "".join(traceback.format_exception(type_, value, traceback_))
traceback.print_exception(type_, value, traceback_)
logging.critical("MSS Version: %s", mslib.__version__)
logging.critical("Python Version: %s", sys.version)
logging.critical("Platform: %s (%s)", platform.platform(), platform.architecture())
logging.critical("Fatal error: %s", tb)
if type_ is mslib.utils.FatalUserError:
QtWidgets.QMessageBox.critical(
None, "fatal error",
f"Fatal user error in MSS {mslib.__version__} on {platform.platform()}\n"
f"Python {sys.version}\n"
f"\n"
f"{value}")
else:
QtWidgets.QMessageBox.critical(
None, "fatal error",
f"Fatal error in MSS {mslib.__version__} on {platform.platform()}\n"
f"Python {sys.version}\n"
f"\n"
f"Please report bugs in MSS to https://github.com/Open-MSS/MSS\n"
f"\n"
f"Information about the fatal error:\n"
f"\n"
f"{tb}")
sys.excepthook = excepthook
|
the-stack_0_1798 | #!/usr/bin/env python
import sys
import Bio
from Bio import SeqIO, SeqFeature
from Bio.SeqRecord import SeqRecord
import os
# Copyright(C) 2009 Iddo Friedberg & Ian MC Fleming
# Released under Biopython license. http://www.biopython.org/DIST/LICENSE
# Do not remove this comment
def get_interregions(genbank_path,intergene_length=1):
seq_record = SeqIO.parse(open(genbank_path), "genbank").__next__()
cds_list_plus = []
cds_list_minus = []
intergenic_records = []
# Loop over the genome file, get the CDS features on each of the strands
for feature in seq_record.features:
if feature.type == 'CDS':
mystart = feature.location._start.position
myend = feature.location._end.position
if feature.strand == -1:
cds_list_minus.append((mystart,myend,-1))
elif feature.strand == 1:
cds_list_plus.append((mystart,myend,1))
else:
sys.stderr.write("No strand indicated %d-%d. Assuming +\n" %
(mystart, myend))
cds_list_plus.append((mystart,myend,1))
for i,pospair in enumerate(cds_list_plus[1:]):
# Compare current start position to previous end position
last_end = cds_list_plus[i][1]
this_start = pospair[0]
strand = pospair[2]
if this_start - last_end >= intergene_length:
intergene_seq = seq_record.seq[last_end:this_start]
strand_string = "+"
intergenic_records.append(
SeqRecord(intergene_seq,id="%s-ign-%d" % (seq_record.name,i),
description="%s %d-%d %s" % (seq_record.name, last_end+1,
this_start,strand_string)))
for i,pospair in enumerate(cds_list_minus[1:]):
last_end = cds_list_minus[i][1]
this_start = pospair[0]
strand = pospair[2]
if this_start - last_end >= intergene_length:
intergene_seq = seq_record.seq[last_end:this_start]
strand_string = "-"
intergenic_records.append(
SeqRecord(intergene_seq,id="%s-ign-%d" % (seq_record.name,i),
description="%s %d-%d %s" % (seq_record.name, last_end+1,
this_start,strand_string)))
outpath = os.path.splitext(os.path.basename(genbank_path))[0] + "_ign.fasta"
SeqIO.write(intergenic_records, open(outpath,"w"), "fasta")
if __name__ == '__main__':
if len(sys.argv) == 2:
get_interregions(sys.argv[1])
elif len(sys.argv) == 3:
get_interregions(sys.argv[1],int(sys.argv[2]))
else:
print("Usage: get_intergenic.py gb_file [intergenic_length]")
sys.exit(0)
|
the-stack_0_1799 | #!/usr/bin/env python3
"""Discord bot for organizing PUGs (pick-up games).
Built for Neotokyo, but should work for any two-team game with even number
of players total.
Usage:
Commands:
Commands are prefixed with a character defined by the config value
"NTBOT_CMD_PREFIX", by default "!", so the command pug becomes "!pug" in
the Discord chat, and so on.
- clearpuggers — Empty the PUG queue.
Command access can be restricted by role(s) with the
config value NTBOT_PUG_ADMIN_ROLES.
- ping — Bot will simply respond with "Pong". Use to test if
the bot is still online and responsive.
- ping_puggers — Ping all the players currently in the PUG queue.
Can be used to manually organize games with smaller
than expected number of players. Expects a message
after the command, eg: "!ping_puggers Play 4v4?"
- pug — Join the PUG queue if there is room.
- puggers — List players currently in the PUG queue.
- scramble — Suggest randomly scrambled teams for the last full PUG
for balancing reasons. Can be repeated until a
satisfactory scramble is reached.
- unpug — Leave the PUG queue.
Config values:
The config values have been documented as comments in the config.yml
file itself.
For more information, please see the repository at:
https://github.com/Rainyan/discord-bot-ntpug
"""
from ast import literal_eval
import asyncio
from datetime import datetime, timezone
import os
import time
import random
import discord
from discord.ext import commands, tasks
import pendulum
from strictyaml import load, Bool, EmptyList, Float, Int, Map, Seq, Str
from strictyaml.ruamel.comments import CommentedSeq
# May encounter breaking changes otherwise
# NOTE: Discord API "decomissions" are scheduled for April 30, 2022:
# https://github.com/discord/discord-api-docs/discussions/4510
# Probably have to upgrade to pycord 2.X dev branch, or
# some original discord.py project equivalent whenever it releases.
assert discord.version_info.major == 1 and discord.version_info.minor == 7
SCRIPT_NAME = "NT Pug Bot"
SCRIPT_VERSION = "0.14.3"
CFG_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"config.yml")
assert os.path.isfile(CFG_PATH)
with open(file=CFG_PATH, mode="r", encoding="utf-8") as f_config:
YAML_CFG_SCHEMA = Map({
"NTBOT_SECRET_TOKEN": Str(),
"NTBOT_CMD_PREFIX": Str(),
"NTBOT_PUG_CHANNEL": Str(),
"NTBOT_PLAYERS_REQUIRED_TOTAL": Int(),
"NTBOT_DEBUG_ALLOW_REQUEUE": Bool(),
"NTBOT_POLLING_INTERVAL_SECS": Int(),
"NTBOT_PRESENCE_INTERVAL_SECS": Int(),
"NTBOT_PUGGER_ROLE": Str(),
"NTBOT_PUGGER_ROLE_PING_THRESHOLD": Float(),
"NTBOT_PUGGER_ROLE_PING_MIN_INTERVAL_HOURS": Float(),
"NTBOT_PUG_ADMIN_ROLES": Seq(Str()) | EmptyList(),
"NTBOT_IDLE_THRESHOLD_HOURS": Int(),
"NTBOT_PING_PUGGERS_COOLDOWN_SECS": Float(),
})
CFG = load(f_config.read(), YAML_CFG_SCHEMA)
assert CFG is not None
def cfg(key):
"""Returns a bot config value from environment variable or config file,
in that order. If using an env var, its format has to be constructible
to the type determined by the config file's strictyaml schema.
"""
if os.environ.get(key):
ret_type = type(CFG[key].value)
if ret_type == CommentedSeq:
return CFG[key]
return ret_type(literal_eval(os.environ.get(key)))
return CFG[key].value
bot = commands.Bot(command_prefix=cfg("NTBOT_CMD_PREFIX"))
NUM_PLAYERS_REQUIRED = cfg("NTBOT_PLAYERS_REQUIRED_TOTAL")
assert NUM_PLAYERS_REQUIRED > 0, "Need positive number of players"
assert NUM_PLAYERS_REQUIRED % 2 == 0, "Need even number of players"
DEBUG_ALLOW_REQUEUE = cfg("NTBOT_DEBUG_ALLOW_REQUEUE")
PUG_CHANNEL_NAME = cfg("NTBOT_PUG_CHANNEL")
BOT_SECRET_TOKEN = cfg("NTBOT_SECRET_TOKEN")
assert 0 <= cfg("NTBOT_PUGGER_ROLE_PING_THRESHOLD") <= 1
PUGGER_ROLE = cfg("NTBOT_PUGGER_ROLE")
assert len(PUGGER_ROLE) > 0
# This is a variable because the text used for detecting previous PUGs when
# restoring status during restart.
PUG_READY_TITLE = "**PUG is now ready!**"
print(f"Now running {SCRIPT_NAME} v.{SCRIPT_VERSION}", flush=True)
class PugStatus():
"""Object for containing and operating on one Discord server's PUG
information.
"""
# pylint: disable=too-many-instance-attributes
# This might need revisiting, but deal with it for now.
def __init__(self, guild_channel, players_required=NUM_PLAYERS_REQUIRED,
guild_roles=None):
self.guild_roles = [] if guild_roles is None else guild_roles
self.guild_channel = guild_channel
self.jin_players = []
self.nsf_players = []
self.prev_puggers = []
self.players_required_total = players_required
self.players_per_team = int(self.players_required_total / 2)
self.last_changed_presence = 0
self.last_presence = None
self.lock = asyncio.Lock()
self.last_role_ping = None
async def reset(self):
"""Stores the previous puggers, and then resets current pugger queue.
"""
async with self.lock:
self.prev_puggers = self.jin_players + self.nsf_players
self.jin_players.clear()
self.nsf_players.clear()
async def player_join(self, player, team=None):
"""If there is enough room in this PUG queue, assigns this player
to a random team to wait in, until the PUG is ready to be started.
The specific team rosters can later be shuffled by a !scramble.
"""
async with self.lock:
if not DEBUG_ALLOW_REQUEUE and \
(player in self.jin_players or player in self.nsf_players):
return False, (f"{player.mention} You are already queued! "
"If you wanted to un-PUG, please use **"
f"{bot.command_prefix}unpug** "
"instead.")
if team is None:
team = random.randint(0, 1) # flip a coin between jin/nsf
if team == 0:
if len(self.jin_players) < self.players_per_team:
self.jin_players.append(player)
return True, ""
if len(self.nsf_players) < self.players_per_team:
self.nsf_players.append(player)
return True, ""
return False, (f"{player.mention} Sorry, this PUG is currently "
"full!")
async def reload_puggers(self):
"""Iterate PUG channel's recent message history to figure out who
should be pugged. This is used both for restoring puggers after a
bot restart, but also for dropping inactive players from the queue
after inactivity of "NTBOT_IDLE_THRESHOLD_HOURS" period.
"""
limit_hrs = cfg("NTBOT_IDLE_THRESHOLD_HOURS")
assert limit_hrs > 0
after = pendulum.now().subtract(hours=limit_hrs)
# Because Pycord 1.7.3 wants non timezone aware "after" date.
after = datetime.fromisoformat(after.in_timezone("UTC").isoformat())
after = after.replace(tzinfo=None)
def is_cmd(msg, cmd):
"""Predicate for whether message equals a specific PUG command.
"""
return msg.content == f"{bot.command_prefix}{cmd}"
def is_pug_start(msg):
"""Predicate for whether a message signals PUG start.
"""
return msg.author.bot and msg.content.startswith(PUG_READY_TITLE)
backup_nsf = self.nsf_players.copy()
backup_jin = self.jin_players.copy()
backup_prev = self.prev_puggers.copy()
try:
# First reset the PUG queue, and then replay the pug/unpug traffic
# within the acceptable "restore_puggers_limit_hours" history range
await self.reset()
# We remove the default max retrieved messages history limit
# because we need to always retrieve the full order of events here.
# This can be a slow operation if the channel is heavily congested
# within the "now-after" search range, but it's acceptable here
# because this code only runs on bot init, and then once per
# clear_inactive_puggers() task loop period, which is at most once
# per hour.
async for msg in self.guild_channel.history(limit=None,
after=after,
oldest_first=True).\
filter(lambda msg: any((is_cmd(msg, "pug"),
is_cmd(msg, "unpug"),
is_pug_start(msg)))):
if is_pug_start(msg):
await self.reset()
elif is_cmd(msg, "pug"):
await self.player_join(msg.author)
else:
await self.player_leave(msg.author)
# Discord frequently HTTP 500's, so need to have pug queue backups.
# We can also hit a HTTP 429 here, which might be a pycord bug(?)
# as I don't think we're being unreasonable with the history range.
except discord.errors.HTTPException as err:
self.nsf_players = backup_nsf.copy()
self.jin_players = backup_jin.copy()
self.prev_puggers = backup_prev.copy()
raise err
async def player_leave(self, player):
"""Removes a player from the pugger queue if they were in it.
"""
async with self.lock:
num_before = self.num_queued()
self.jin_players = [p for p in self.jin_players if p != player]
self.nsf_players = [p for p in self.nsf_players if p != player]
num_after = self.num_queued()
left_queue = (num_after != num_before)
if left_queue:
return True, ""
return False, (f"{player.mention} You are not currently in the "
"PUG queue")
def num_queued(self):
"""Returns the number of puggers currently in the PUG queue.
"""
return len(self.jin_players) + len(self.nsf_players)
def num_expected(self):
"""Returns the number of puggers expected, total, to start a PUG.
"""
return self.players_required_total
def num_more_needed(self):
"""Returns how many more puggers are needed to start a PUG.
"""
return max(0, self.num_expected() - self.num_queued())
def is_full(self):
"""Whether the PUG queue is currently full or not."
"""
return self.num_queued() >= self.num_expected()
async def start_pug(self):
"""Starts a PUG match.
"""
async with self.lock:
if len(self.jin_players) == 0 or len(self.nsf_players) == 0:
await self.reset()
return False, "Error: team was empty"
msg = f"{PUG_READY_TITLE}\n"
msg += "_Jinrai players:_\n"
for player in self.jin_players:
msg += f"{player.mention}, "
msg = msg[:-2] # trailing ", "
msg += "\n_NSF players:_\n"
for player in self.nsf_players:
msg += f"{player.mention}, "
msg = msg[:-2] # trailing ", "
msg += ("\n\nTeams unbalanced? Use **"
f"{bot.command_prefix}scramble** to suggest new "
"random teams.")
return True, msg
async def update_presence(self):
"""Updates the bot's status message ("presence").
This is used for displaying things like the PUG queue status.
"""
async with self.lock:
delta_time = int(time.time()) - self.last_changed_presence
if delta_time < cfg("NTBOT_PRESENCE_INTERVAL_SECS") + 2:
return
presence = self.last_presence
if presence is None:
presence = {
"activity": discord.BaseActivity(),
"status": discord.Status.idle
}
puggers_needed = self.num_more_needed()
# Need to keep flipping status because activity update in itself
# doesn't seem to propagate that well.
status = discord.Status.idle
if presence["status"] == status:
status = discord.Status.online
if puggers_needed > 0:
text = f"for {puggers_needed} more pugger"
if puggers_needed > 1:
text += "s" # plural
else:
text += "!" # need one more!
activity = discord.Activity(type=discord.ActivityType.watching,
name=text)
else:
text = "a PUG! 🐩"
activity = discord.Activity(type=discord.ActivityType.playing,
name=text)
presence["activity"] = activity
presence["status"] = status
await bot.change_presence(activity=presence["activity"],
status=presence["status"])
self.last_presence = presence
self.last_changed_presence = int(time.time())
async def role_ping_deltatime(self):
"""Returns a datetime.timedelta of latest role ping, or None if no such
ping was found.
"""
after = pendulum.now().subtract(
hours=cfg("NTBOT_PUGGER_ROLE_PING_MIN_INTERVAL_HOURS"))
# Because Pycord 1.7.3 wants non timezone aware "after" date.
after = datetime.fromisoformat(after.in_timezone("UTC").isoformat())
after = after.replace(tzinfo=None)
try:
async for msg in self.guild_channel.history(limit=None,
after=after,
oldest_first=False):
if PUGGER_ROLE in [role.name for role in msg.role_mentions]:
# Because Pycord 1.7.3 returns non timezone aware UTC date,
# and we need to subtract a timedelta using it.
naive_utc_now = datetime.now(timezone.utc)
naive_utc_now = naive_utc_now.replace(tzinfo=None)
self.last_role_ping = msg.created_at
return naive_utc_now - msg.created_at
except discord.errors.HTTPException as err:
# If it's not a library error, and we got a HTTP 5xx response,
# err on the side of caution and treat it as if we found a recent
# ping by returning a zeroed timedelta, so that the bot will try
# again later. The Discord API throws server side HTTP 5xx errors
# pretty much daily, so silently ignoring them here keeps the bot
# side error logs cleaner since the Discord bugs aren't really
# actionable for us as the API user.
if err.code == 0 and str(err.status)[:1] == "5":
return datetime.timedelta()
raise err
return None
async def ping_role(self):
"""Pings the puggers Discord server role, if it's currently allowed.
Frequency of these pings is restricted to avoid being too spammy.
"""
async with self.lock:
if self.num_more_needed() == 0:
return
pugger_ratio = self.num_queued() / self.num_expected()
ping_ratio = cfg("NTBOT_PUGGER_ROLE_PING_THRESHOLD")
if pugger_ratio < ping_ratio:
return
last_ping_dt = await self.role_ping_deltatime()
hours_limit = cfg("NTBOT_PUGGER_ROLE_PING_MIN_INTERVAL_HOURS")
if last_ping_dt is not None:
last_ping_hours = last_ping_dt.total_seconds() / 60 / 60
if last_ping_hours < hours_limit:
return
for role in self.guild_roles:
if role.name == PUGGER_ROLE:
min_nag_hours = f"{hours_limit:.1f}"
min_nag_hours = min_nag_hours.rstrip("0").rstrip(".")
msg = (f"{role.mention} Need **"
f"{self.num_more_needed()} more puggers** "
"for a game!\n_(This is an automatic ping "
"to all puggers, because the PUG queue is "
f"{(ping_ratio * 100):.0f}% full.\nRest "
"assured, I will only ping you once per "
f"{min_nag_hours} hours, at most.\n"
"If you don't want any of these "
"notifications, please consider "
"temporarily muting this bot or leaving "
f"the {role.mention} server role._)")
await self.guild_channel.send(msg)
break
pug_guilds = {}
@bot.command(brief="Test if bot is active")
async def ping(ctx):
"""Just a standard Discord bot ping test command for confirming whether
the bot is online or not.
"""
await ctx.send("pong")
@bot.command(brief="Join the PUG queue")
async def pug(ctx):
"""Player command for joining the PUG queue.
"""
if ctx.guild not in pug_guilds or not ctx.channel.name == PUG_CHANNEL_NAME:
return
response = ""
join_success, response = await pug_guilds[ctx.guild].player_join(
ctx.message.author)
if join_success:
response = (f"{ctx.message.author.name} has joined the PUG queue "
f"({pug_guilds[ctx.guild].num_queued()} / "
f"{pug_guilds[ctx.guild].num_expected()})")
await ctx.send(f"{response}")
@bot.command(brief="Leave the PUG queue")
async def unpug(ctx):
"""Player command for leaving the PUG queue.
"""
if ctx.guild not in pug_guilds or not ctx.channel.name == PUG_CHANNEL_NAME:
return
leave_success, msg = await pug_guilds[ctx.guild].player_leave(
ctx.message.author)
if leave_success:
msg = (f"{ctx.message.author.name} has left the PUG queue "
f"({pug_guilds[ctx.guild].num_queued()} / "
f"{pug_guilds[ctx.guild].num_expected()})")
await ctx.send(msg)
@bot.command(brief="Empty the server's PUG queue")
async def clearpuggers(ctx):
"""Player command for clearing the PUG queue.
This can be restricted to Discord guild specific admin roles.
"""
if ctx.guild not in pug_guilds or not ctx.channel.name == PUG_CHANNEL_NAME:
return
# If zero pug admin roles are configured, assume anyone can !clearpuggers
if len(cfg("NTBOT_PUG_ADMIN_ROLES")) == 0:
is_allowed = True
else:
pug_admin_roles = [role.value for role in cfg("NTBOT_PUG_ADMIN_ROLES")]
user_roles = [role.name for role in ctx.message.author.roles]
is_allowed = any(role in pug_admin_roles for role in user_roles)
if is_allowed:
await pug_guilds[ctx.guild].reset()
await ctx.send(f"{ctx.message.author.name} has reset the PUG queue")
else:
await ctx.send(f"{ctx.message.author.mention} The PUG queue can only "
f"be reset by users with role(s): _{pug_admin_roles}_")
@bot.command(brief="Get new random teams suggestion for the latest PUG")
async def scramble(ctx):
"""Player command for scrambling the latest full PUG queue.
Can be called multiple times for generating new random teams.
"""
msg = ""
if len(pug_guilds[ctx.guild].prev_puggers) == 0:
msg = (f"{ctx.message.author.mention} Sorry, no previous PUG found to "
"scramble")
else:
random.shuffle(pug_guilds[ctx.guild].prev_puggers)
msg = f"{ctx.message.author.name} suggests scrambled teams:\n"
msg += f"_(random shuffle id: {random_human_readable_phrase()})_\n"
msg += "_Jinrai players:_\n"
for i in range(int(len(pug_guilds[ctx.guild].prev_puggers) / 2)):
msg += f"{pug_guilds[ctx.guild].prev_puggers[i].name}, "
msg = msg[:-2] # trailing ", "
msg += "\n_NSF players:_\n"
for i in range(int(len(pug_guilds[ctx.guild].prev_puggers) / 2),
len(pug_guilds[ctx.guild].prev_puggers)):
msg += f"{pug_guilds[ctx.guild].prev_puggers[i].name}, "
msg = msg[:-2] # trailing ", "
msg += ("\n\nTeams still unbalanced? Use **"
f"{bot.command_prefix}scramble** to suggest new random teams.")
await ctx.send(msg)
@bot.command(brief="List players currently queueing for PUG")
async def puggers(ctx):
"""Player command for listing players currently in the PUG queue.
"""
if ctx.guild not in pug_guilds or not ctx.channel.name == PUG_CHANNEL_NAME:
return
msg = (f"{pug_guilds[ctx.guild].num_queued()} / "
f"{pug_guilds[ctx.guild].num_expected()} player(s) currently "
"queued")
if pug_guilds[ctx.guild].num_queued() > 0:
all_players_queued = pug_guilds[ctx.guild].jin_players + \
pug_guilds[ctx.guild].nsf_players
msg += ": "
for player in all_players_queued:
msg += f"{player.name}, "
msg = msg[:-2] # trailing ", "
await ctx.send(msg)
@commands.cooldown(rate=1, per=cfg("NTBOT_PING_PUGGERS_COOLDOWN_SECS"),
type=commands.BucketType.user)
@bot.command(brief="Ping all players currently queueing for PUG")
async def ping_puggers(ctx):
"""Player command to ping all players currently inside the PUG queue.
"""
if ctx.guild not in pug_guilds or not ctx.channel.name == PUG_CHANNEL_NAME:
# Don't set cooldown for failed invocations.
ping_puggers.reset_cooldown(ctx)
return
pug_admin_roles = [role.value for role in cfg("NTBOT_PUG_ADMIN_ROLES")]
user_roles = [role.name for role in ctx.message.author.roles]
is_admin = any(role in pug_admin_roles for role in user_roles)
# Only admins and players in the queue themselves are allowed to ping queue
if not is_admin:
if ctx.message.author not in pug_guilds[ctx.guild].jin_players and \
ctx.message.author not in pug_guilds[ctx.guild].nsf_players:
if pug_guilds[ctx.guild].num_queued() == 0:
await ctx.send(f"{ctx.author.mention} PUG queue is currently "
"empty.")
else:
await ctx.send(f"{ctx.author.mention} Sorry, to be able to "
"ping the PUG queue, you have to be queued "
"yourself, or have the role(s): "
f"_{pug_admin_roles}_")
ping_puggers.reset_cooldown(ctx)
return
async with pug_guilds[ctx.guild].lock:
# Comparing <=1 instead of 0 because it makes no sense to ping others
# if you're the only one currently in the queue.
if pug_guilds[ctx.guild].num_queued() <= 1:
await ctx.send(f"{ctx.author.mention} There are no other players "
"in the queue to ping!")
ping_puggers.reset_cooldown(ctx)
return
# Require an info message instead of forcing pingees to spend time figuring
# out why they were pinged. We will construct a jump_url to this message.
args = ctx.message.content.split(" ", maxsplit=1)
if len(args) <= 1 or len(args[1].strip()) == 0:
await ctx.send(f"{ctx.author.mention} Please include a message after "
"the command, describing why you pinged the PUG queue.")
ping_puggers.reset_cooldown(ctx)
return
msg = ""
async with pug_guilds[ctx.guild].lock:
for player in [p for p in pug_guilds[ctx.guild].jin_players
if p != ctx.author]:
msg += f"{player.mention}, "
for player in [p for p in pug_guilds[ctx.guild].nsf_players
if p != ctx.author]:
msg += f"{player.mention}, "
msg = msg[:-2] # trailing ", "
msg += (f" User {ctx.author.mention} is pinging the PUG queue: "
f"{ctx.message.jump_url}")
await ctx.send(msg)
# No cooldown for admin pings.
if is_admin:
ping_puggers.reset_cooldown(ctx)
def random_human_readable_phrase():
"""Generates a random human readable phrase to work as an identifier.
Can be used for the !scrambles, to make it easier for players to refer
to specific scramble permutations via voice chat by using these phrases.
"""
base_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"static", "phrase_gen")
with open(file=os.path.join(base_path, "nouns.txt"), mode="r",
encoding="utf-8") as f_nouns:
nouns = f_nouns.readlines()
with open(file=os.path.join(base_path, "adjectives.txt"), mode="r",
encoding="utf-8") as f_adjs:
adjectives = f_adjs.readlines()
phrase = (f"{adjectives[random.randint(0, len(adjectives) - 1)]} "
f"{nouns[random.randint(0, len(nouns) - 1)]}")
return phrase.replace("\n", "").lower()
class ErrorHandlerCog(commands.Cog):
"""Helper class for error handling.
"""
def __init__(self, parent_bot):
self.bot = parent_bot
@commands.Cog.listener()
# pylint: disable=no-self-use
async def on_command_error(self, ctx, err):
"""Error handler for bot commands.
"""
# This could be a typo, or a command meant for another bot.
if isinstance(err, discord.ext.commands.errors.CommandNotFound):
print(f"Ignoring unknown command: \"{ctx.message.content}\"")
return
# This command is on cooldown from being used too often.
if isinstance(err, discord.ext.commands.errors.CommandOnCooldown):
# Returns a human readable "<so and so long> before" string.
retry_after = pendulum.now().diff_for_humans(pendulum.now().add(
seconds=err.retry_after))
await ctx.send(f"{ctx.message.author.mention} You're doing it too "
f"much! Please wait {retry_after} trying again.")
return
# Something else happened! Just raise the error for the logs to catch.
raise err
class PugQueueCog(commands.Cog):
"""PUG queue main event loop.
"""
def __init__(self, parent_bot):
"""Acquire lock for asynchronous queue polling,
and start the queue loop.
"""
# pylint: disable=no-member
self.bot = parent_bot
self.lock = asyncio.Lock()
self.poll_queue.start()
self.clear_inactive_puggers.start()
@tasks.loop(seconds=cfg("NTBOT_POLLING_INTERVAL_SECS"))
async def poll_queue(self):
"""Poll the PUG queue to see if we're ready to play,
and to possibly update our status in various ways.
Iterating and caching per-guild to support multiple Discord
channels simultaneously using the same bot instance with their
own independent player pools.
"""
async with self.lock:
for guild in bot.guilds:
for channel in guild.channels:
if channel.name != PUG_CHANNEL_NAME:
continue
if guild not in pug_guilds:
pug_guilds[guild] = PugStatus(guild_channel=channel,
guild_roles=guild.roles)
await pug_guilds[guild].reload_puggers()
if pug_guilds[guild].is_full():
pug_start_success, msg = \
await pug_guilds[guild].start_pug()
if pug_start_success:
# Before starting pug and resetting queue, manually
# update presence, so we're guaranteed to have the
# presence status fully up-to-date here.
pug_guilds[guild].last_changed_presence = 0
await pug_guilds[guild].update_presence()
# Ping the puggers
await channel.send(msg)
# And finally reset the queue, so we're ready for
# the next PUGs.
await pug_guilds[guild].reset()
else:
await pug_guilds[guild].update_presence()
await pug_guilds[guild].ping_role()
@tasks.loop(hours=1)
async def clear_inactive_puggers(self):
"""Periodically clear inactive puggers from the queue(s).
"""
async with self.lock:
for guild in bot.guilds:
if guild not in pug_guilds:
continue
if pug_guilds[guild].is_full():
continue
for channel in guild.channels:
if channel.name != PUG_CHANNEL_NAME:
continue
await pug_guilds[guild].reload_puggers()
break
bot.add_cog(ErrorHandlerCog(bot))
bot.add_cog(PugQueueCog(bot))
bot.run(BOT_SECRET_TOKEN)
|
the-stack_0_1800 | #!/usr/bin/env python3
import sys
# Read input and output files
infile = sys.argv[1]
outfile = sys.argv[2]
# Create list to store features in
features = []
# Iterate over lines in input Genbank file
for line in open(infile).readlines():
# Store current sequence name
if line.startswith("LOCUS"):
sequence = line.split()[1]
# Store current feature properties
if not line.startswith(" "*6) and line.startswith(" "*5):
# Remove leading and trailing whitespace and split
line = line.strip().split()
# Determine feature type and initialize feature in features list
features.append({"sequence":sequence, "feature":line[0]})
# Determine feature strand
if "complement" in line[1]:
features[-1]["strand"] = "-"
else:
features[-1]["strand"] = "+"
# Remove junk from range
line[1] = line[1].replace("join", "").replace("complement", "")
line[1] = line[1].replace("(", "").replace(")", "")
# Determine feature range
range_values = line[1].replace(",", "..").split("..")
from_to = [range_values[0], range_values[-1]]
# Fix for "join" ranges
if len(range_values) == 4:
if range_values[0] < range_values[3]:
from_to = [range_values[2], range_values[1]]
# Store initial feature attributes
features[-1].update({
"start":from_to[0].replace("<","").replace(">",""),
"end":from_to[1].replace("<","").replace(">",""),
"pseudo":False, "product":""
})
# Skip features with "order"
order = "order" in line[1]
# Determine attributes of interest
elif line.startswith(" "*21):
# Skip features with "order"
if order:
continue
# Remove leading and trailing whitespace
line = line.strip()
# Check for current attribute
if line.startswith("/"):
line = line.lstrip("/").split("=", maxsplit=1)
attribute = line[0]
# Store attribute value
if len(line) > 1:
features[-1][attribute] = line[1].strip('"')
else:
features[-1][attribute] = True
else:
# Continue adding to value from following rows
if not attribute == "translation":
features[-1][attribute] += " "
features[-1][attribute] += line.split('=', maxsplit=1)[0].strip('"')
# Count all old_locus_tag, locus_tag, and gene to find non-unique tags
tag_counts = {"old_locus_tag":{}, "locus_tag":{}, "gene":{}}
for i in range(len(features)):
# Only consider coding sequences
if not features[i]['feature'] == "CDS":
continue
# Count old_locus_tag
try:
tag_counts['old_locus_tag'][features[i]['old_locus_tag']] += 1
except KeyError:
try:
tag_counts['old_locus_tag'][features[i]['old_locus_tag']] = 1
except KeyError:
pass
# Count locus_tag
try:
tag_counts['locus_tag'][features[i]['locus_tag']] += 1
except KeyError:
try:
tag_counts['locus_tag'][features[i]['locus_tag']] = 1
except KeyError:
pass
# Count gene
try:
tag_counts['gene'][features[i]['gene']] += 1
except KeyError:
try:
tag_counts['gene'][features[i]['gene']] = 1
except KeyError:
pass
# Identify all non-unique old_locus_tag, locus_tag, and gene tags
non_uniq = {
'old_locus_tag':set(filter(
lambda x: tag_counts['old_locus_tag'][x] > 1,
tag_counts['old_locus_tag']
)),
'locus_tag':set(filter(
lambda x: tag_counts['locus_tag'][x] > 1,
tag_counts['locus_tag']
)),
'gene':set(filter(
lambda x: tag_counts['gene'][x] > 1,
tag_counts['gene']
))
}
# Rename all features that are non-unique
def rename_feature(feature, feature_type):
try:
if feature[feature_type] in non_uniq[feature_type]:
feature[feature_type] = "_".join([
feature[feature_type], feature['sequence'],
feature['start'], feature['end'], feature['strand']
])
except KeyError:
pass
return feature
# Write features to GFF
outfile = open(outfile, 'w')
for i in range(len(features)):
# Select feature
feature = features[i]
# Only consider coding sequences
if not feature['feature'] == "CDS":
continue
# Rename non-unique tags
for feature_type in ['old_locus_tag', 'locus_tag', 'gene']:
feature = rename_feature(feature, feature_type)
# Write column 1: Sequence
output = feature['sequence'] + "\t"
# Write column 2: Source
output += 'Custom' + "\t"
# Write column 3: Type
output += 'gene' + "\t"
# Write column 4: Start
output += feature['start'] + "\t"
# Write column 5: End
output += feature['end'] + "\t"
# Write column 6: Score
output += '.' + "\t"
# Write column 7: Strand
output += feature['strand'] + "\t"
# Write column 8: Frame
output += '0' + "\t"
# Write column 9: Attributes
try:
locus_tag = feature['old_locus_tag']
except KeyError:
try:
locus_tag = feature['locus_tag']
except KeyError:
locus_tag = feature['gene']
try:
ID = feature['locus_tag']
except KeyError:
ID = feature['gene']
locus_tag = "locus_tag=" + locus_tag
ID = "ID=" + ID
product = "product=" + feature['product'].replace(";", "_")
output += ";".join([product, locus_tag, ID]) + "\n"
junk = outfile.write(output)
outfile.close()
|
the-stack_0_1801 | import math
import torch
from flambe.metric import MultiLabelCrossEntropy, MultiLabelNLLLoss
def test_cross_entropy_one_hot():
"""Test cross entropy loss when one hot"""
y_pred = torch.tensor([[0.2, 0.8], [0.9, 0.1]])
y_true = torch.tensor([[1, 0], [1, 0]])
loss = MultiLabelCrossEntropy()
assert abs(loss(y_pred, y_true).item() - 0.70429) < 1e-2
def test_nllloss_one_hot():
"""Test negative log likelihood loss when one hot"""
y_pred = torch.tensor([[0.2, 0.8], [0.9, 0.1]])
y_true = torch.tensor([[1, 0], [1, 0]])
loss = MultiLabelNLLLoss()
assert abs(loss(y_pred, y_true).item() + 0.55) < 1e-2
|
the-stack_0_1803 | from django.conf import settings
from django.urls import include, path
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
path("", TemplateView.as_view(template_name="pages/home.html"), name="home"),
path(
"about/", TemplateView.as_view(template_name="pages/about.html"), name="about"
),
# Django Admin, use {% url 'admin:index' %}
path(settings.ADMIN_URL, admin.site.urls),
# User management
path("users/", include("project.users.urls", namespace="users")),
path("accounts/", include("allauth.urls")),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
|
the-stack_0_1805 | #!/home/lichess4545/web/www.lichess4545.com/env/bin/python
"""
WSGI config for heltour project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
local_dir = os.path.join(os.path.dirname(__file__))
activate_this = '/home/lichess4545/web/www.lichess4545.com/env/bin/activate_this.py'
if os.path.exists(activate_this):
exec(compile(open(activate_this).read(), activate_this, 'exec'), dict(__file__=activate_this))
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("HELTOUR_ENV", "LIVE")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "heltour.settings")
application = get_wsgi_application()
|
the-stack_0_1807 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import boto3
import sure # noqa
from moto import mock_emr
@mock_emr
def test_run_job_flow():
client = boto3.client('emr', region_name='us-east-1')
cluster_id = client.run_job_flow(
Name='cluster',
Instances={
'MasterInstanceType': 'c3.xlarge',
'SlaveInstanceType': 'c3.xlarge',
'InstanceCount': 3,
'Placement': {'AvailabilityZone': 'us-east-1a'},
'KeepJobFlowAliveWhenNoSteps': True,
},
VisibleToAllUsers=True,
)
cluster_id.should.have.key('JobFlowId')
@mock_emr
def test_list_clusters():
client = boto3.client('emr', region_name='us-east-1')
client.run_job_flow(
Name='cluster',
Instances={
'MasterInstanceType': 'c3.xlarge',
'SlaveInstanceType': 'c3.xlarge',
'InstanceCount': 3,
'Placement': {'AvailabilityZone': 'us-east-1a'},
'KeepJobFlowAliveWhenNoSteps': True,
},
VisibleToAllUsers=True,
)
summary = client.list_clusters()
clusters = summary['Clusters']
clusters.should.have.length_of(1)
cluster = clusters[0]
cluster['NormalizedInstanceHours'].should.equal(0)
cluster['Status']['State'].should.equal("RUNNING")
|
the-stack_0_1808 | from kivymd.app import MDApp
from kivymd.uix.button import MDFloatingActionButton, MDRectangleFlatButton,MDFlatButton
from kivymd.uix.screen import Screen
from tkinter.filedialog import askdirectory
from tkinter import Tk
from kivymd.uix.dialog import MDDialog
import time
import os
import shutil
from pydrive.auth import GoogleAuth#this is to import google auth
from pydrive.drive import GoogleDrive#this will import the google drive module
class MainApp(MDApp):
def build(self):
screen = Screen()
btn1 = MDRectangleFlatButton(text='Select Client No', pos_hint={'center_x': 0.5, 'center_y': 0.5},on_release=self.select_client_no)
btn3 = MDRectangleFlatButton(text='Import', pos_hint={'center_x': 0.2, 'center_y': 0.5},on_release=self.func_imp)
btn2 = MDRectangleFlatButton(text='Start', pos_hint={'center_x': 0.8, 'center_y': 0.5},on_release=self.run_prog)
screen.add_widget(btn3)
screen.add_widget(btn2)
screen.add_widget(btn1)
return screen
def func_imp(self,obj):
global path
root=Tk()
path=askdirectory(title="Please select a directory to import")
root.update()
root.destroy()
def select_client_no(self,obj):
self.dialog = MDDialog(title='Select a client no',
size_hint=(0.8, 1),
buttons=[MDRectangleFlatButton(text='2', on_release=self.press_2),
MDRectangleFlatButton(text='1',on_release=self.press_1)])
self.dialog.open()
def press_1(self,obj):
global clientno
clientno="1"
self.dialog.dismiss()
def press_2(self,obj):
global clientno
clientno='2'
self.dialog.dismiss()
def run_prog(self,obj):
#clientno=the actual clientno
#opossiteclientno=oppsite client no
def first_login():#this function will be used when a user needs to login for the first time.
global drive
gauth = GoogleAuth()
gauth.LocalWebserverAuth()
gauth.SaveCredentialsFile("mycreds.txt")
drive = GoogleDrive(gauth)
def not_first_login():#this function will be used when a user had already logged in before.
global drive
gauth = GoogleAuth()
gauth.LoadCredentialsFile("mycreds.txt")
drive=GoogleDrive(gauth)
def exist_notexist():#returns first or not first
try:
with open('mycreds.txt') as reader:
confirmsize=reader.read()
if confirmsize>2:
return 'not_first'
else:
return 'first'
except:
return 'first'
#this will upload the files
def file_upload(item_file):
upload_file = drive.CreateFile()
upload_file.SetContentFile(item_file) #load local file data into the File instance
upload_file.Upload() #creates a file in your drive with the name: my-awesome-file.txt
#this will delete the files in the drive
def filedelete(item_file):
file_list = drive.ListFile({'q': "title contains "+"'"+item_file+"'"+" and trashed=false"}).GetList() #find the file using file name.
file_id = file_list[0]['id'] #get the file ID.
file = drive.CreateFile({'id': file_id})
file.Delete()
#this will get the paths of the clients and convert them to lists
def os_file_list(path_of_the_folder_to_sync):#the outpu will be recived in two varaibles the first one in the folder paths list and the second part is the file paths list
global folderpaths_of_client
global filepaths_of_client
folderpaths_of_client=list()
filepaths_of_client=list()
#this will walk through all the folders and subfolders to gather the file paths
for folders,subfolders,files in os.walk(path_of_the_folder_to_sync):#Make a fuction for path!
folderpaths_of_client.append(folders[len(path_of_the_folder_to_sync):])
for file in files:
filepaths_of_client.append(folders[len(path_of_the_folder_to_sync):]+"\\"+file)
folderpaths_of_client.sort()
filepaths_of_client.sort()
return folderpaths_of_client,filepaths_of_client
def list_writer(list_you_want_to_write_into_a_text_file,nameofthedoc):#you need to give the list first and then the name of the document or textfile
with open(nameofthedoc+'.txt','w') as write_handler:
for item in list_you_want_to_write_into_a_text_file:
write_handler.write(item+'\n')#this will write the files/paths in order.
#This function takes in the document files and converts them to a list.
def list_reader(filename_of_the_textfile):#this will return a list.
try:
with open(filename_of_the_textfile,'r') as reader_handle:
tempreader=reader_handle.read()
return tempreader.split('\n')
except:
log_client('failed to open in list_reader',filename_of_the_textfile)
def log_client(string,string2optional=''):#can take in two strings ,second strings default value is None.
with open('logfile.txt','a+') as log_writer:
log_writer.write(string+' '+string2optional+'\n')
def copy_file(copyname,copypath):
shutil.copy2(copyname,copypath)
def file_download(item_to_download): #downloading the files from the drive
downloadtry=0
while True:
try:
time.sleep(2)
file_list = drive.ListFile({'q': "title contains "+"'"+item_to_download+"'"+" and trashed=false"}).GetList()#find the file using file name.
file_id = file_list[0]['id'] # get the file ID.
file = drive.CreateFile({'id': file_id})
file.GetContentFile(item_to_download) # downloads the file content and file.
file.Delete()
break
except: #skips the download of the files if the tryies exceed 3 times.
log_client('failed to download :',item_to_download)
continue
'''downloadtry+=1
if downloadtry>=10:
downloadtry=0
break'''
def file_delete(item_to_delete):#this fuction will be used to delete items
os.remove(item_to_delete)
#Syncing Part Starts here
#this part will take care of signing in
signinvar=exist_notexist()
if signinvar=='first':
first_login()
if signinvar=='not_first':
not_first_login()
#this part of the code will upload the os_file_list() files
#clientno=input('Enter the client no : ')
#path=askdirectory(title='Import the folder you want to sync')#after done the tinkter window needs to be closed.
folderPaths,filePaths= os_file_list(path)
list_writer(folderPaths,'folderpath'+clientno)#rememeber folderpath.
list_writer(filePaths,'filepath'+clientno)#remeber file path.
file_upload('folderpath'+clientno+'.txt')#this will upload the files to the drivev.
file_upload('filepath'+clientno+'.txt')#this will upload the files to the drive.
file_delete('folderpath'+clientno+'.txt')#this will delete file paths from the pc.
file_delete('filepath'+clientno+'.txt')#this will delete file paths from the pc.
#This part of the code will download the file paths from the other client.
if clientno=='1':
opp_clientno='2'
if clientno=='2':
opp_clientno='1'
#we never need to think about the oppsite client no again.
file_download('folderpath'+opp_clientno+'.txt')
file_download('filepath'+opp_clientno+'.txt')
#this part of the code will convert the downloaded files into lists
files_from_the_other_client=list_reader('filepath'+opp_clientno+'.txt')
folders_from_the_other_client=list_reader('folderpath'+opp_clientno+'.txt')
file_delete('folderpath'+opp_clientno+'.txt')
file_delete('filepath'+opp_clientno+'.txt')
#this part of the code will compare the lists from the other client and this client:
missing_files_from_this_client=list()
missing_folders_from_this_client=list()
#this will filter the files
for item in files_from_the_other_client:
if item not in filepaths_of_client:
missing_files_from_this_client.append(item)
#this will filter the folder
for item in folders_from_the_other_client:
if item not in folderpaths_of_client:
missing_folders_from_this_client.append(item)
#this part of the code will upload the filelist missing on this client.
#making a list of files that the other client needs to upload
list_writer(missing_files_from_this_client,'filestoupload'+clientno)
file_upload('filestoupload'+clientno+'.txt')
file_delete('filestoupload'+clientno+'.txt')
#this part of the code will download the uploadfilelist
file_download('filestoupload'+opp_clientno+'.txt')
files_to_upload=list_reader('filestoupload'+opp_clientno+'.txt')
file_delete('filestoupload'+opp_clientno+'.txt')
files_to_upload.sort()
#This is the part of code where folders/files will start Syncing.
for item in missing_folders_from_this_client:
if item=='':
continue
os.mkdir(path+item)
if clientno=='1':
#this part will take care of uploading
for item in files_to_upload:
if item=='':
continue
file_upload(path+item) #we might need to move the upload files to the actual path.
#this part will take care of the downloads
for item in missing_files_from_this_client:
if item=='':
continue
name_splitter=item.split('\\')
file=name_splitter[-1]
subtract_file_name=len(item)-len(file)
file_download(file)
while True:
try:
shutil.move(os.getcwd()+'\\'+file,path+item[:subtract_file_name])
log_client(os.getcwd()+'\\'+file+'\n',path+item[:subtract_file_name])
break
except:
log_client(os.getcwd()+'\\'+file+'\n',path+item[:subtract_file_name])
if clientno=='2':
for item in missing_files_from_this_client:
if item=='':
continue
name_splitter=item.split('\\')
file=name_splitter[-1]
subtract_file_name=len(item)-len(file)
file_download(file)
while True:
try:
shutil.move(os.getcwd()+'\\'+file,path+item[:subtract_file_name])
log_client(os.getcwd()+'\\'+file+'\n',path+item[:subtract_file_name])
break
except:
log_client(os.getcwd()+'\\'+file+'\n',path+item[:subtract_file_name])
#this part will take care of uploading
for item in files_to_upload:
if item=='':
continue
file_upload(path+item) #we might need to move the upload files to the actual path.
MainApp().run() |
the-stack_0_1810 | # coding=utf-8
# Copyright 2020 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some utilities for self-attention estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
from tensorflow_gan.examples.self_attention_estimator import eval_lib
import tensorflow_gan as tfgan # tf
def get_tpu_run_config_from_hparams(hparams):
"""Create a TPU-suitable RunConfig from HParams."""
tf.compat.v1.logging.info('tpu_location: %s', hparams.tpu_params.tpu_location)
tf.compat.v1.logging.info('gcp_project: %s', hparams.tpu_params.gcp_project)
tf.compat.v1.logging.info('tpu_zone: %s', hparams.tpu_params.tpu_zone)
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=hparams.tpu_params.tpu_location,
project=hparams.tpu_params.gcp_project,
zone=hparams.tpu_params.tpu_zone)
if hparams.debug_params.eval_on_tpu:
eval_training_input_configuration = tf.compat.v1.estimator.tpu.InputPipelineConfig.SLICED
else:
# InputPipelineConfig.SLICED is not supported when running on CPU.
eval_training_input_configuration = tf.compat.v1.estimator.tpu.InputPipelineConfig.PER_HOST_V1
return tf.compat.v1.estimator.tpu.RunConfig(
model_dir=hparams.model_dir,
cluster=cluster_resolver,
save_checkpoints_steps=hparams.train_steps_per_eval,
tpu_config=tf.compat.v1.estimator.tpu.TPUConfig(
iterations_per_loop=hparams.tpu_params.tpu_iterations_per_loop,
eval_training_input_configuration=eval_training_input_configuration))
def get_run_config_from_hparams(hparams):
mirrored_strategy = tf.distribute.MirroredStrategy()
return tf.estimator.RunConfig(
model_dir=hparams.model_dir,
save_checkpoints_steps=hparams.train_steps_per_eval,
train_distribute=mirrored_strategy)
def get_tpu_estimator(generator, discriminator, hparams, config):
return tfgan.estimator.TPUGANEstimator(
generator_fn=generator,
discriminator_fn=discriminator,
generator_loss_fn=tfgan.losses.wasserstein_hinge_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_hinge_discriminator_loss,
generator_optimizer=tf.compat.v1.train.AdamOptimizer(
hparams.generator_lr, hparams.beta1),
discriminator_optimizer=tf.compat.v1.train.AdamOptimizer(
hparams.discriminator_lr, hparams.beta1),
prepare_arguments_for_eval_metric_fn=prepare_metric_arguments,
get_eval_metric_ops_fn=functools.partial(get_metrics, hparams=hparams),
eval_on_tpu=hparams.debug_params.eval_on_tpu,
train_batch_size=hparams.train_batch_size,
eval_batch_size=hparams.eval_batch_size,
predict_batch_size=hparams.predict_batch_size,
use_tpu=hparams.debug_params.use_tpu,
config=config,
params=hparams._asdict())
def get_gpu_estimator(generator, discriminator, hparams, config):
"""Returns an Estimator object to be used for training with GPUs."""
def gpu_get_metric(gan_model):
"""A function compatible with GANEstimator's get_eval_metric_ops_fn arg."""
metrics_arguments = prepare_metric_arguments(
gan_model.generator_inputs, gan_model.generated_data,
gan_model.real_data, gan_model.discriminator_real_outputs,
gan_model.discriminator_gen_outputs)
metrics = get_metrics(hparams=hparams, **metrics_arguments)
# Generate image summaries.
real_data = gan_model.real_data
generated_data = gan_model.generated_data
real_images = (
real_data['images'] if isinstance(real_data, dict) else real_data)
gen_images = (
generated_data['images']
if isinstance(generated_data, dict) else generated_data)
metrics.update(_generator_summary_ops(gen_images, real_images))
return metrics
return tfgan.estimator.GANEstimator(
generator_fn=generator,
discriminator_fn=discriminator,
generator_loss_fn=tfgan.losses.wasserstein_hinge_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_hinge_discriminator_loss,
generator_optimizer=tf.compat.v1.train.AdamOptimizer(
hparams.generator_lr, hparams.beta1),
discriminator_optimizer=tf.compat.v1.train.AdamOptimizer(
hparams.discriminator_lr, hparams.beta1),
get_eval_metric_ops_fn=gpu_get_metric,
config=config,
params=hparams._asdict())
def prepare_metric_arguments(generator_inputs, generated_data, real_data,
discriminator_real_outputs,
discriminator_gen_outputs):
"""Prepares the arguments needed for get_metrics.
When training on TPUs, this function should be executed on TPU.
Args:
generator_inputs: Inputs to the generator fn.
generated_data: Output from the generator.
real_data: A sample of real data.
discriminator_real_outputs: Discriminator output on real data.
discriminator_gen_outputs: Discriminator output on generated data.
Returns:
A metric dictionary.
"""
del generator_inputs, discriminator_real_outputs, discriminator_gen_outputs
real_images = (real_data['images'] if isinstance(real_data, dict) else
real_data)
gen_images = (generated_data['images'] if isinstance(generated_data, dict)
else generated_data)
# Get logits and pools for real and generated images.
real_logits, real_pools = eval_lib.get_activations(
lambda: real_images, num_batches=1, get_logits=True)
fake_logits, fake_pools = eval_lib.get_activations(
lambda: gen_images, num_batches=1, get_logits=True)
return {
'real_logits': real_logits,
'real_pools': real_pools,
'fake_logits': fake_logits,
'fake_pools': fake_pools
}
def get_metrics(real_logits, real_pools, fake_logits, fake_pools, hparams):
"""Return metrics for SAGAN experiment on TPU, CPU, or GPU.
When training on TPUs, this function should be executed on the CPU.
Args:
real_logits: The real_logits object retured by prepare_metric_arguments.
real_pools: The real_pools object retured by prepare_metric_arguments.
fake_logits: The fake_logits object retured by prepare_metric_arguments.
fake_pools: The fake_pools object retured by prepare_metric_arguments.
hparams: An hparams object.
Returns:
A metric dictionary.
"""
del hparams
metric_dict = {
'eval/real_incscore':
tfgan.eval.classifier_score_from_logits_streaming(real_logits),
'eval/incscore':
tfgan.eval.classifier_score_from_logits_streaming(fake_logits),
'eval/fid':
tfgan.eval.frechet_classifier_distance_from_activations_streaming(
real_pools, fake_pools),
}
return metric_dict
def _generator_summary_ops(generated_images, real_images):
"""Creates a dictionary of image summaries."""
real_img_summ = tf.compat.v1.summary.image('real_images', real_images)
gen_img_summ = tf.compat.v1.summary.image('gen_images', generated_images)
real_img_grid = tf.compat.v1.summary.image(
'real_images_grid',
tfgan.eval.image_grid(
real_images[:16],
grid_shape=(4, 4),
image_shape=(128, 128),
num_channels=3))
gen_img_grid = tf.compat.v1.summary.image(
'generated_images_grid',
tfgan.eval.image_grid(
generated_images[:16],
grid_shape=(4, 4),
image_shape=(128, 128),
num_channels=3))
return {
'images/real': (real_img_summ, tf.no_op()),
'images/gen': (gen_img_summ, tf.no_op()),
'image_grid/real': (real_img_grid, tf.no_op()),
'image_grid/gen': (gen_img_grid, tf.no_op()),
}
|
the-stack_0_1812 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from recommonmark.parser import CommonMarkParser
from unittest.mock import MagicMock
from recommonmark.transform import AutoStructify
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
# TODO: https://github.com/rtfd/recommonmark/issues/93
# TODO https://github.com/rtfd/recommonmark/issues/120
# This patch helps in linking markdown files within mardown files
from recommonmark.states import DummyStateMachine
# Monkey patch to fix recommonmark 0.4 doc reference issues.
orig_run_role = DummyStateMachine.run_role
def run_role(self, name, options=None, content=None):
if name == 'doc':
name = 'any'
return orig_run_role(self, name, options, content)
DummyStateMachine.run_role = run_role
# -- Project information -----------------------------------------------------
project = 'Mozhi'
copyright = '2021, Mozhi'
author = 'Mageswaran'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['recommonmark',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx_markdown_tables',
# 'sphinxarg.ext',
# 'm2r', # https://github.com/miyakogi/m2r/pull/55
'sphinx.ext.githubpages']
# 'sphinxcontrib.bibtex',
# 'sphinx.ext.napoleon',
# 'nbsphinx', #https://nbsphinx.readthedocs.io/en/0.6.0/
# 'sphinx_issues', # https://github.com/sloria/sphinx-issues
# 'sphinx_copybutton']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# www.sphinx-doc.org/en/stable/markdown.html
# https://github.com/sphinx-doc/sphinx/issues/7000
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
}
source_parsers = {
'.md': CommonMarkParser,
}
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'karma_sphinx_theme'
# html_theme = 'sphinx_book_theme'
# html_theme = 'sphinx_redactor_theme'
html_css_files = [
'mozhi_style.css',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# html_theme_options = {'body_max_width': '90%'}
# Output file base name for HTML help builder.
html_theme_options = {
'navigation_depth': 3,
'includehidden': True,
'titles_only': False
}
htmlhelp_basename = 'mozhi'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {
'css_files': [
'https://fonts.googleapis.com/css?family=Lato',
'_static/css/mozhi_style.css'
],
}
# At the bottom of conf.py
# https://recommonmark.readthedocs.io/en/latest/auto_structify.html
def setup(app):
app.add_config_value('recommonmark_config', {
'enable_auto_toc_tree' : True,
'enable_math': True,
'enable_inline_math': True,
}, True)
app.add_transform(AutoStructify)
app.add_css_file('custom.css') |
the-stack_0_1814 | numeros = ('zero', 'um', 'dois', 'tres', 'quatro', 'cinco',
'seis', 'sete', 'oito', 'nove', 'dez', 'onze',
'doze', 'treze', 'catorze', 'quize', 'dezesseis',
'dezesete', 'dezoito','dezenove', 'vinte')
while True:
num = int(input('Digite um número entre 0 e 20: '))
if num <= 20:
break
print('Tente novamente. ', end='')
print(f'Você digitou o número {numeros[num]}.') |
the-stack_0_1816 | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
from typing import Any, Tuple, Dict, List, Callable
import sqlalchemy
import pymysql
import traceback
import hashlib
import logging
from sqlalchemy.sql import text
try:
# if integration is using an older image (4.5 Server) we don't have expiringdict
from expiringdict import ExpiringDict # pylint: disable=E0401
except Exception:
pass
# In order to use and convert from pymysql to MySQL this line is necessary
pymysql.install_as_MySQLdb()
GLOBAL_CACHE_ATTR = '_generic_sql_engine_cache'
DEFAULT_POOL_TTL = 600
class Client:
"""
Client to use in the SQL databases integration. Overrides BaseClient
makes the connection to the DB server
"""
def __init__(self, dialect: str, host: str, username: str, password: str, port: str,
database: str, connect_parameters: str, ssl_connect: bool, use_pool=False, pool_ttl=DEFAULT_POOL_TTL):
self.dialect = dialect
self.host = host
self.username = username
self.password = password
self.port = port
self.dbname = database
self.connect_parameters = connect_parameters
self.ssl_connect = ssl_connect
self.use_pool = use_pool
self.pool_ttl = pool_ttl
self.connection = self._create_engine_and_connect()
@staticmethod
def _convert_dialect_to_module(dialect: str) -> str:
"""
Converting a dialect to the correct string needed in order to connect the wanted dialect
:param dialect: the SQL db
:return: a key string needed for the connection
"""
if dialect == "MySQL":
module = "mysql"
elif dialect == "PostgreSQL":
module = "postgresql"
elif dialect == "Oracle":
module = "oracle"
elif dialect == "Microsoft SQL Server":
module = "mssql+pyodbc"
else:
module = str(dialect)
return module
@staticmethod
def _get_cache_string(url: str, connect_args: dict) -> str:
to_hash = url + repr(connect_args)
return hashlib.sha256(to_hash.encode('utf-8')).hexdigest()
def _get_global_cache(self) -> dict:
cache = getattr(sqlalchemy, GLOBAL_CACHE_ATTR, None)
if cache is None:
cache = ExpiringDict(100, max_age_seconds=self.pool_ttl)
setattr(sqlalchemy, GLOBAL_CACHE_ATTR, cache)
return cache
def _create_engine_and_connect(self) -> sqlalchemy.engine.base.Connection:
"""
Creating and engine according to the instance preferences and connecting
:return: a connection object that will be used in order to execute SQL queries
"""
module = self._convert_dialect_to_module(self.dialect)
port_part = ''
if self.port:
port_part = f':{self.port}'
db_preferences = f'{module}://{self.username}:{self.password}@{self.host}{port_part}/{self.dbname}'
ssl_connection = {}
if self.dialect == "Microsoft SQL Server":
db_preferences += "?driver=FreeTDS"
if self.connect_parameters and self.dialect == "Microsoft SQL Server":
db_preferences += f'&{self.connect_parameters}'
elif self.connect_parameters and self.dialect != "Microsoft SQL Server":
# a "?" was already added when the driver was defined
db_preferences += f'?{self.connect_parameters}'
if self.ssl_connect:
ssl_connection = {'ssl': {'ssl-mode': 'preferred'}}
engine: sqlalchemy.engine.Engine = None
if self.use_pool:
if 'expiringdict' not in sys.modules:
raise ValueError('Usage of connection pool is not support in this docker image')
cache = self._get_global_cache()
cache_key = self._get_cache_string(db_preferences, ssl_connection)
engine = cache.get(cache_key, None)
if engine is None: # (first time or expired) need to initialize
engine = sqlalchemy.create_engine(db_preferences, connect_args=ssl_connection)
cache[cache_key] = engine
else:
demisto.debug('Initializing engine with no pool (NullPool)')
engine = sqlalchemy.create_engine(db_preferences, connect_args=ssl_connection, poolclass=sqlalchemy.pool.NullPool)
return engine.connect()
def sql_query_execute_request(self, sql_query: str, bind_vars: Any) -> Tuple[Dict, List]:
"""Execute query in DB via engine
:param bind_vars: in case there are names and values - a bind_var dict, in case there are only values - list
:param sql_query: the SQL query
:return: results of query, table headers
"""
if type(bind_vars) is dict:
sql_query = text(sql_query)
result = self.connection.execute(sql_query, bind_vars)
results = result.fetchall()
headers = []
if results:
# if the table isn't empty
headers = results[0].keys()
return results, headers
def generate_default_port_by_dialect(dialect: str) -> str:
"""
In case no port was chosen, a default port will be chosen according to the SQL db type. Only return a port for
Microsoft SQL Server where it seems to be required. For the other drivers an empty port is supported.
:param dialect: sql db type
:return: default port needed for connection
"""
if dialect == "Microsoft SQL Server":
return "1433"
else:
# use default port supported by the driver
return ""
def generate_bind_vars(bind_variables_names: str, bind_variables_values: str) -> Any:
"""
The bind variables can be given in 2 legal ways: as 2 lists - names and values, or only values
any way defines a different executing way, therefore there are 2 legal return types
:param bind_variables_names: the names of the bind variables, must be in the length of the values list
:param bind_variables_values: the values of the bind variables, can be in the length of the names list
or in case there is no name lists - at any length
:return: a dict or lists of the bind variables
"""
bind_variables_names_list = argToList(bind_variables_names)
bind_variables_values_list = argToList(bind_variables_values)
if bind_variables_values and not bind_variables_names:
return [var for var in argToList(bind_variables_values)]
elif len(bind_variables_names_list) is len(bind_variables_values_list):
return dict(zip(bind_variables_names_list, bind_variables_values_list))
else:
raise Exception("The bind variables lists are not is the same length")
def test_module(client: Client, *_) -> Tuple[str, Dict[Any, Any], List[Any]]:
"""
If the connection in the client was successful the test will return OK
if it wasn't an exception will be raised
"""
return 'ok', {}, []
def sql_query_execute(client: Client, args: dict, *_) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]:
"""
Executes the sql query with the connection that was configured in the client
:param client: the client object with the db connection
:param args: demisto.args() including the sql query
:return: Demisto outputs
"""
try:
sql_query = str(args.get('query'))
limit = int(args.get('limit', 50))
skip = int(args.get('skip', 0))
bind_variables_names = args.get('bind_variables_names', "")
bind_variables_values = args.get('bind_variables_values', "")
bind_variables = generate_bind_vars(bind_variables_names, bind_variables_values)
result, headers = client.sql_query_execute_request(sql_query, bind_variables)
# converting an sqlalchemy object to a table
converted_table = [dict(row) for row in result]
# converting b'' and datetime objects to readable ones
table = [{str(key): str(value) for key, value in dictionary.items()} for dictionary in converted_table]
table = table[skip:skip + limit]
human_readable = tableToMarkdown(name="Query result:", t=table, headers=headers,
removeNull=True)
context = {
'Result': table,
'Query': sql_query,
'InstanceName': f'{client.dialect}_{client.dbname}'
}
entry_context: Dict = {'GenericSQL(val.Query && val.Query === obj.Query)': {'GenericSQL': context}}
return human_readable, entry_context, table
except Exception as err:
# In case there is no query executed and only an action e.g - insert, delete, update
# the result will raise an exception when we try to read the data from it
if str(err) == "This result object does not return rows. It has been closed automatically.":
human_readable = "Command executed"
return human_readable, {}, []
raise err
# list of loggers we should set to debug when running in debug_mode
# taken from: https://docs.sqlalchemy.org/en/13/core/engines.html#configuring-logging
DEBUG_LOGGERS = [
'sqlalchemy.engine',
'sqlalchemy.pool',
'sqlalchemy.dialects',
]
def main():
sql_loggers: list = [] # saves the debug loggers
try:
if is_debug_mode():
for lgr_name in DEBUG_LOGGERS:
lgr = logging.getLogger(lgr_name)
sql_loggers.append(lgr)
demisto.debug(f'setting DEBUG for logger: {repr(lgr)}')
lgr.setLevel(logging.DEBUG)
params = demisto.params()
dialect = params.get('dialect')
port = params.get('port')
if not port:
port = generate_default_port_by_dialect(dialect)
user = params.get("credentials").get("identifier")
password = params.get("credentials").get("password")
host = params.get('host')
database = params.get('dbname')
ssl_connect = params.get('ssl_connect')
connect_parameters = params.get('connect_parameters')
use_pool = params.get('use_pool', False)
pool_ttl = int(params.get('pool_ttl') or DEFAULT_POOL_TTL)
if pool_ttl <= 0:
pool_ttl = DEFAULT_POOL_TTL
command = demisto.command()
LOG(f'Command being called in SQL is: {command}')
client = Client(dialect=dialect, host=host, username=user, password=password,
port=port, database=database, connect_parameters=connect_parameters,
ssl_connect=ssl_connect, use_pool=use_pool, pool_ttl=pool_ttl)
commands: Dict[str, Callable[[Client, Dict[str, str], str], Tuple[str, Dict[Any, Any], List[Any]]]] = {
'test-module': test_module,
'query': sql_query_execute,
'sql-command': sql_query_execute
}
if command in commands:
return_outputs(*commands[command](client, demisto.args(), command))
else:
raise NotImplementedError(f'{command} is not an existing Generic SQL command')
except Exception as err:
return_error(f'Unexpected error: {str(err)} \nquery: {demisto.args().get("query")} \n{traceback.format_exc()}')
finally:
try:
if client.connection:
client.connection.close()
except Exception as ex:
demisto.error(f'Failed clossing connection: {str(ex)}')
if sql_loggers:
for lgr in sql_loggers:
demisto.debug(f'setting WARN for logger: {repr(lgr)}')
lgr.setLevel(logging.WARN)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
the-stack_0_1818 | # -----------------------------------------------------------------------------
#
# Copyright (c) 2017 Sam Cox, Roberto Sommariva
#
# This file is part of the AtChem2 software package.
#
# This file is covered by the MIT license which can be found in the file
# LICENSE.md at the top level of the AtChem2 distribution.
#
# -----------------------------------------------------------------------------
## Plotting tool for the AtChem2 model output
## --> Python version [requires numpy & matplotlib]
##
## Acknowledgements: M. Panagi
##
## ARGUMENT:
## - directory with the model output
##
## USAGE:
## python ./tools/plot/plot-atchem2-numpy.py ./model/output/
## ---------------------------------------------- ##
from __future__ import print_function
import os, sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
os.chdir(sys.argv[1])
print(os.getcwd())
with open('speciesConcentrations.output') as f:
var1 = f.readline().split()
with open('environmentVariables.output') as f:
var2 = f.readline().split()
with open('photolysisRates.output') as f:
var3 = f.readline().split()
with open('photolysisRatesParameters.output') as f:
var4 = f.readline().split()
df1 = np.loadtxt('speciesConcentrations.output', skiprows=1, unpack=True)
df2 = np.loadtxt('environmentVariables.output', skiprows=1, unpack=True)
df3 = np.loadtxt('photolysisRates.output', skiprows=1, unpack=True)
df4 = np.loadtxt('photolysisRatesParameters.output', skiprows=1, unpack=True)
nc1 = df1.shape[0]
nc2 = df2.shape[0]
nc3 = df3.shape[0]
nc4 = df4.shape[0]
## ---------------------------- ##
with PdfPages('atchem2_output.pdf') as pdf:
## speciesConcentrations.output
fig = plt.figure(figsize=(11,7))
j = 1
for i in range(1,nc1):
ax = fig.add_subplot(3,2,j)
ax.plot(df1[0], df1[i], linestyle='-', color='black')
ax.set(title=var1[i], xlabel='seconds', ylabel='')
plt.tight_layout()
plt.ticklabel_format(style='sci', axis='y', useMathText=True)
if j == 6:
pdf.savefig(fig)
fig = plt.figure(figsize=(11,7))
j = 1
else:
j = j + 1
pdf.savefig(fig)
## environmentVariables.output
fig = plt.figure(figsize=(11,7))
j = 1
for i in range(1,nc2):
ax = fig.add_subplot(3,2,j)
ax.plot(df2[0], df2[i], linestyle='-', color='black')
ax.set(title=var2[i], xlabel='seconds', ylabel='')
plt.tight_layout()
plt.ticklabel_format(style='sci', axis='y', useMathText=True)
if j == 6:
pdf.savefig(fig)
fig = plt.figure(figsize=(11,7))
j = 1
else:
j = j + 1
pdf.savefig(fig)
## photolysisRates.output
fig = plt.figure(figsize=(11,7))
j = 1
for i in range(1,nc3):
ax = fig.add_subplot(3,2,j)
ax.plot(df3[0], df3[i], linestyle='-', color='black')
ax.set(title=var3[i], xlabel='seconds', ylabel='')
plt.tight_layout()
plt.ticklabel_format(style='sci', axis='y', useMathText=True)
if j == 6:
pdf.savefig(fig)
fig = plt.figure(figsize=(11,7))
j = 1
else:
j = j + 1
pdf.savefig(fig)
## photolysisRatesParameters.output
fig = plt.figure(figsize=(11,7))
j = 1
for i in range(1,nc4):
ax = fig.add_subplot(3,2,j)
ax.plot(df4[0], df4[i], linestyle='-', color='black')
ax.set(title=var4[i], xlabel='seconds', ylabel='')
plt.tight_layout()
plt.ticklabel_format(style='sci', axis='y', useMathText=True)
if j == 6:
pdf.savefig(fig)
fig = plt.figure(figsize=(11,7))
j = 1
else:
j = j + 1
pdf.savefig(fig)
## ---------------------------- ##
print("\n===> atchem2_output.pdf created in directory:", sys.argv[1], "\n\n")
|
the-stack_0_1822 | import os
import sys
import h5py
import numpy as np
import pandas as pd
import tensorflow as tf
from fm_preprocessing import DeepFmData, Dataset
from nn_loss_metrics import get_config
from utils import top_ratio_hit_rate, train_sampling, calc_threshold_vs_depth
from deepFM import DeepFM
from xDeepFM import xDeepFM
from AFM import AFM
from utils import colorize
from sklearn.metrics import roc_auc_score
import warnings
warnings.filterwarnings(action='ignore')
def train_test_split(Xv,Xi,y, test_ratio=0.1, seed=None):
index = list(range(len(Xv)))
Xv = np.asarray(Xv)
Xi = np.asarray(Xi)
y = np.asarray(y)
np.random.seed(seed)
np.random.shuffle(index)
test_size = int(len(Xv)*test_ratio)
test_index = index[-test_size:]
train_index = index[:-test_size]
train_Xv = Xv[train_index]
test_Xv = Xv[test_index]
train_Xi = Xi[train_index]
test_Xi = Xi[test_index]
train_y = y[train_index]
test_y = y[test_index]
return train_Xv.tolist(), test_Xv.tolist(), train_Xi.tolist(),test_Xi.tolist(), train_y, test_y
def data_preprocess(train_data, test_data=None, label='is_y2', deepEnc = None, batch_size=128,
skew_threshold=5, val_ratio=0.2, double_process='z-score', save_h5_file=None,
seed=None):
train_y = train_data[label].values.reshape(-1,1)
train_data.drop(columns=[label],inplace=True)
# ---------------train data
if deepEnc is None:
enc = DeepFmData(skew_threshold=skew_threshold,double_process=double_process)
enc.fit(train_data,y=None)
else:
enc = deepEnc
train_feat_val, train_feat_index = enc.transform(train_data, y=None, normalize_double=True) #list of list
#-----------------val data
if val_ratio is not None:
(train_feat_val, val_feat_val,
train_feat_index, val_feat_index,
train_y,val_y ) = train_test_split(train_feat_val, train_feat_index, train_y,test_ratio=val_ratio, seed=seed)
else:
(val_feat_val, val_feat_index,val_y) =[None]*3
train_data = Dataset(train_feat_val, train_feat_index, train_y, batch_size, shuffle=True)
#---------------test_data-----------------
if test_data is not None:
test_y = test_data[label].values.reshape(-1,1)
test_data.drop(columns=[label],inplace=True)
test_feat_val, test_feat_index = enc.transform(test_data, y=None, normalize_double=True)
test_data = Dataset(test_feat_val, test_feat_index,test_y, batch_size)
else:
(test_feat_val, test_feat_index,test_y) =[None]*3
if save_h5_file is not None:
with h5py.File(save_h5_file,'w') as fw:
train = fw.create_group('train')
train.create_dataset('train_feat_val', data = np.array(train_feat_val))
train.create_dataset('train_feat_index',data = np.array(train_feat_index))
train.create_dataset('train_y', data= np.array(train_y))
val = fw.create_group('val')
val.create_dataset('val_feat_val', data =np.array(val_feat_val))
val.create_dataset('val_feat_index',data= np.array(val_feat_index))
val.create_dataset('val_y', data=np.array(val_y))
test = fw.create_group('test')
test.create_dataset('test_feat_val', data =np.array(test_feat_val))
test.create_dataset('test_feat_index',data= np.array(test_feat_index))
test.create_dataset('test_y', data=np.array(test_y))
return enc, train_data, test_data, train_feat_val, train_feat_index, train_y, val_feat_val, val_feat_index, val_y
def load_h5_data(h5file, batch_size=128, shuffle=True):
assert os.path.exists(h5file)
with h5py.File(h5file, 'r') as fr:
print('train-null', np.isnan(fr['train']['train_feat_val'].value).sum())
train_feat_val = fr['train']['train_feat_val'].value.tolist()
train_feat_index = fr['train']['train_feat_index'].value.tolist()
train_y = fr['train']['train_y'].value
train_data = Dataset(train_feat_val, train_feat_index, train_y, batch_size, shuffle=True)
val_feat_val = fr['val']['val_feat_val'].value.tolist()
val_feat_index = fr['val']['val_feat_index'].value.tolist()
val_y = fr['val']['val_y'].value
test_feat_val = fr['test']['test_feat_val'].value.tolist()
test_feat_index = fr['test']['test_feat_index'].value.tolist()
test_y = fr['test']['test_y'].value
test_data = Dataset(test_feat_val, test_feat_index,test_y, batch_size)
return train_data, test_data, train_feat_val, train_feat_index, train_y, val_feat_val, val_feat_index, val_y
if __name__ == '__main__':
import yaml,json
# pd.set_option('max_colwidth',10)
# os.environ["CUDA_VISIBLE_DEVICES"] ='0'
sess_config = get_config(frac=0.4, allow_growth=True, gpu="1")
pd.set_option('display.max_columns', 20)
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
with open('./conf.yaml','r') as fp:
config = yaml.load(fp)
model_type = config['model']
config = config.get(model_type)
print(json.dumps(config,indent=2))
# config = config['deepFM'] if model_type=='deepFM' else config['xDeepFM']
data_config, params = config['data'],config['params']
print(' {} '.format(model_type).center(50,'='))
train_file = data_config['train'] #"/home/yuanyuqing163/hb_rl/data/raw/train_bj_dl_200.pkl"
test_file = data_config['test'] #"/home/yuanyuqing163/hb_rl/data/raw/val_bj_dl_200.pkl"
train_data = pd.read_pickle(train_file)
test_data = pd.read_pickle(test_file)
train_data = train_sampling(train_data, col='is_y2', method='down', pn_ratio=0.2, seed=2020)
# train_data = train_sampling(train_data, col='is_y2', method='up', pn_ratio=0.5, seed=2019)
# train_data = train_sampling(train_data, col='is_y2', method='down', pn_ratio=0.5, seed=2019)
# train_data = train_sampling(train_data, col='is_y2', method='down', pn_ratio=0.05, seed=2019)
if data_config.pop('load_cache'):
enc = DeepFmData()
enc.load(data_config['enc_file']) #'./model/DeepFmData_bjdl200.pkl'
(train_data, test_data,
train_feat_val, train_feat_index, train_y,
val_feat_val, val_feat_index, val_y) = load_h5_data(data_config['cache_file'], batch_size= params['batch_size'], shuffle=True) #'./data/bj_dl_200.h5'
else:
(enc, train_data, test_data,
train_feat_val, train_feat_index, train_y,
val_feat_val, val_feat_index, val_y) = data_preprocess(train_data, test_data,
deepEnc = None, batch_size= params['batch_size'], skew_threshold=5, val_ratio=0.2,
double_process='min-max', save_h5_file=data_config['cache_file'],label='is_y2')
enc.save(data_config['enc_file'])
print(enc._field_dim, enc._feature_dim)
params.update({'feature_size':enc._feature_dim})
params.update({'field_size':enc._field_dim})
if model_type.lower()=='deepfm':
model = DeepFM(params)
elif model_type.lower() =='xdeepfm':
model = xDeepFM(params)
elif model_type.lower() =='afm':
model = AFM(params)
else:
raise ValueError('{} not supported yet'.format(model_type))
with tf.Session(config=sess_config) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer()) # global_step counter etc.
sys.stdout.flush()
best_hit_rate = 0
best_epoch = 0
best_loss = np.finfo('float32').max
stop_cnt = 0
if params['training_model']:
#---------------training---------------------------------
for epoch in range(params['epoch']):
print('epoch ={}'.format(epoch).center(50,'-'))
for batch, (xi, xv, y) in enumerate(train_data):
# print(xv)
step, prob = model.train(sess, xi, xv, y)
# print(prob.min(),prob.max())
if batch %1000 ==0:
train_loss, train_entropy, train_reg = model.evaluate(sess, train_feat_index, train_feat_val, train_y, batch_size=128)
print('train_loss=%.4f,\ttrain_ce=%.4f,\treg=%.4f'% (train_loss, train_entropy, train_reg))
val_loss,val_entropy, val_reg = model.evaluate(sess, val_feat_index, val_feat_val, val_y, batch_size=128)
print('val_loss=%.4f,\tval_ce=%.4f,\treg=%.4f' %(val_loss, val_entropy, val_reg))
# if epoch%10 ==0 or epoch == params['epoch']-1:
model.save(sess, model.checkpoint_dir, epoch)
prob = model.predict(sess, train_feat_index, train_feat_val, batch_size=128)
hit_rate, top_k = top_ratio_hit_rate(np.array(train_y).ravel(), prob, top_ratio=0.001) # ravel return view, flatten return copy
train_auc = roc_auc_score(np.array(train_y).ravel(), prob)
print(colorize('\nk={}, train_1/1000 ={:.4}'.format(top_k ,hit_rate),'cyan',True))
#-----------------test-----------------------------------
probs =[]
ys=[]
for xi, xv, y in test_data:
prob = model.predict(sess, xi, xv) # list of np.ndarry->array
probs.extend(prob.tolist())
ys.extend(y.tolist())
hit_rate, top_k = top_ratio_hit_rate(np.array(ys).ravel(), np.array(probs), top_ratio=0.001)
val_auc = roc_auc_score(np.array(ys).ravel(), np.array(probs))
print(colorize('k={}, test_1/1000 ={:.4}'.format(top_k ,hit_rate),'cyan',True))
print(colorize('train_auc={:.4}, val_auc={:.4}'.format(train_auc,val_auc),'cyan', True))
if hit_rate > best_hit_rate:
best_hit_rate, best_epoch = hit_rate, epoch
print(colorize('cur_best_rate ={:.4}'.format(best_hit_rate),'cyan',True))
if hit_rate>0.8:
calc_threshold_vs_depth(np.asarray(ys).ravel(), np.asarray(probs))
# early stopping
if (val_entropy+5e-5)<best_loss:
best_loss = val_entropy
stop_cnt = 0
else:
stop_cnt += 1
if stop_cnt > 20:
break
print(colorize('epoch={}, best_hit_rate={}'.format(best_epoch, best_hit_rate),'cyan',True))
else:
model.restore(sess, os.path.split(model.checkpoint_dir)[0])
probs=[]
ys =[]
for xi, xv, y in train_data:
prob = model.predict(sess, xi, xv) # np.ndarry
probs.extend(prob[0].ravel().tolist())
ys.extend(y.tolist())
hit_rate, top_k = top_ratio_hit_rate(np.array(ys).ravel(), np.array(probs).ravel(), top_ratio=0.001)
print('train-top-k={}, train-hit-rate={}'.format(top_k ,hit_rate))
probs=[]
ys=[]
for xi, xv, y in test_data:
prob = model.predict(sess, xi, xv) # np.ndarry
# print(type(prob), prob[0])
probs.extend(prob[0].ravel().tolist())
ys.extend(y.tolist())
hit_rate, top_k = top_ratio_hit_rate(np.array(ys).ravel(), np.array(probs).ravel(), top_ratio=0.001)
print('test-top-k={}, test-hit-rate={}'.format(top_k ,hit_rate)) |
the-stack_0_1823 | # Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Functionality related to player-controlled Spazzes."""
from __future__ import annotations
from typing import TYPE_CHECKING, TypeVar, overload
import ba
from bastd.actor.spaz import Spaz
if TYPE_CHECKING:
from typing import Any, Sequence, Tuple, Optional, Type
from typing_extensions import Literal
PlayerType = TypeVar('PlayerType', bound=ba.Player)
TeamType = TypeVar('TeamType', bound=ba.Team)
class PlayerSpazHurtMessage:
"""A message saying a ba.PlayerSpaz was hurt.
category: Message Classes
Attributes:
spaz
The ba.PlayerSpaz that was hurt
"""
def __init__(self, spaz: PlayerSpaz):
"""Instantiate with the given ba.Spaz value."""
self.spaz = spaz
class PlayerSpaz(Spaz):
"""A ba.Spaz subclass meant to be controlled by a ba.Player.
category: Gameplay Classes
When a PlayerSpaz dies, it delivers a ba.PlayerDiedMessage
to the current ba.Activity. (unless the death was the result of the
player leaving the game, in which case no message is sent)
When a PlayerSpaz is hurt, it delivers a ba.PlayerSpazHurtMessage
to the current ba.Activity.
"""
def __init__(self,
player: ba.Player,
color: Sequence[float] = (1.0, 1.0, 1.0),
highlight: Sequence[float] = (0.5, 0.5, 0.5),
character: str = 'Spaz',
powerups_expire: bool = True):
"""Create a spaz for the provided ba.Player.
Note: this does not wire up any controls;
you must call connect_controls_to_player() to do so.
"""
super().__init__(color=color,
highlight=highlight,
character=character,
source_player=player,
start_invincible=True,
powerups_expire=powerups_expire)
self.last_player_attacked_by: Optional[ba.Player] = None
self.last_attacked_time = 0.0
self.last_attacked_type: Optional[Tuple[str, str]] = None
self.held_count = 0
self.last_player_held_by: Optional[ba.Player] = None
self._player = player
self._drive_player_position()
@overload
def getplayer(self,
playertype: Type[PlayerType],
doraise: Literal[False] = False) -> Optional[PlayerType]:
...
@overload
def getplayer(self, playertype: Type[PlayerType],
doraise: Literal[True]) -> PlayerType:
...
def getplayer(self,
playertype: Type[PlayerType],
doraise: bool = False) -> Optional[PlayerType]:
"""Get the ba.Player associated with this Spaz.
By default this will return None if the Player no longer exists.
If you are logically certain that the Player still exists, pass
doraise=False to get a non-optional return type.
"""
player: Any = self._player
assert isinstance(player, playertype)
if not player.exists() and doraise:
raise ba.PlayerNotFoundError()
return player if player.exists() else None
def connect_controls_to_player(self,
enable_jump: bool = True,
enable_punch: bool = True,
enable_pickup: bool = True,
enable_bomb: bool = True,
enable_run: bool = True,
enable_fly: bool = True) -> None:
"""Wire this spaz up to the provided ba.Player.
Full control of the character is given by default
but can be selectively limited by passing False
to specific arguments.
"""
player = self.getplayer(ba.Player)
assert player
# Reset any currently connected player and/or the player we're
# wiring up.
if self._connected_to_player:
if player != self._connected_to_player:
player.reset_input()
self.disconnect_controls_from_player()
else:
player.reset_input()
player.assign_input_call('upDown', self.on_move_up_down)
player.assign_input_call('leftRight', self.on_move_left_right)
player.assign_input_call('holdPositionPress',
self._on_hold_position_press)
player.assign_input_call('holdPositionRelease',
self._on_hold_position_release)
if enable_jump:
player.assign_input_call('jumpPress', self.on_jump_press)
player.assign_input_call('jumpRelease', self.on_jump_release)
if enable_pickup:
player.assign_input_call('pickUpPress', self.on_pickup_press)
player.assign_input_call('pickUpRelease', self.on_pickup_release)
if enable_punch:
player.assign_input_call('punchPress', self.on_punch_press)
player.assign_input_call('punchRelease', self.on_punch_release)
if enable_bomb:
player.assign_input_call('bombPress', self.on_bomb_press)
player.assign_input_call('bombRelease', self.on_bomb_release)
if enable_run:
player.assign_input_call('run', self.on_run)
if enable_fly:
player.assign_input_call('flyPress', self.on_fly_press)
player.assign_input_call('flyRelease', self.on_fly_release)
self._connected_to_player = player
def disconnect_controls_from_player(self) -> None:
"""
Completely sever any previously connected
ba.Player from control of this spaz.
"""
if self._connected_to_player:
self._connected_to_player.reset_input()
self._connected_to_player = None
# Send releases for anything in case its held.
self.on_move_up_down(0)
self.on_move_left_right(0)
self._on_hold_position_release()
self.on_jump_release()
self.on_pickup_release()
self.on_punch_release()
self.on_bomb_release()
self.on_run(0.0)
self.on_fly_release()
else:
print('WARNING: disconnect_controls_from_player() called for'
' non-connected player')
def handlemessage(self, msg: Any) -> Any:
# FIXME: Tidy this up.
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
# pylint: disable=too-many-nested-blocks
if __debug__:
self._handlemessage_sanity_check()
# Keep track of if we're being held and by who most recently.
if isinstance(msg, ba.PickedUpMessage):
super().handlemessage(msg) # Augment standard behavior.
self.held_count += 1
picked_up_by = ba.playercast_o(type(self._player),
msg.node.source_player)
if picked_up_by:
self.last_player_held_by = picked_up_by
elif isinstance(msg, ba.DroppedMessage):
super().handlemessage(msg) # Augment standard behavior.
self.held_count -= 1
if self.held_count < 0:
print('ERROR: spaz held_count < 0')
# Let's count someone dropping us as an attack.
try:
picked_up_by_2 = ba.playercast_o(type(self._player),
msg.node.source_player)
except Exception:
picked_up_by_2 = None
if picked_up_by_2:
self.last_player_attacked_by = picked_up_by_2
self.last_attacked_time = ba.time()
self.last_attacked_type = ('picked_up', 'default')
elif isinstance(msg, ba.StandMessage):
super().handlemessage(msg) # Augment standard behavior.
# Our Spaz was just moved somewhere. Explicitly update
# our associated player's position in case it is being used
# for logic (otherwise it will be out of date until next step)
self._drive_player_position()
elif isinstance(msg, ba.DieMessage):
# Report player deaths to the game.
if not self._dead:
# Immediate-mode or left-game deaths don't count as 'kills'.
killed = (not msg.immediate
and msg.how is not ba.DeathType.LEFT_GAME)
activity = self._activity()
player = self.getplayer(ba.Player, doraise=False)
if not killed:
killerplayer = None
else:
# If this player was being held at the time of death,
# the holder is the killer.
if self.held_count > 0 and self.last_player_held_by:
killerplayer = self.last_player_held_by
else:
# Otherwise, if they were attacked by someone in the
# last few seconds, that person is the killer.
# Otherwise it was a suicide.
# FIXME: Currently disabling suicides in Co-Op since
# all bot kills would register as suicides; need to
# change this from last_player_attacked_by to
# something like last_actor_attacked_by to fix that.
if (self.last_player_attacked_by
and ba.time() - self.last_attacked_time < 4.0):
killerplayer = self.last_player_attacked_by
else:
# ok, call it a suicide unless we're in co-op
if (activity is not None and not isinstance(
activity.session, ba.CoopSession)):
killerplayer = player
else:
killerplayer = None
# We should never wind up with a dead-reference here;
# we want to use None in that case.
assert killerplayer is None or killerplayer
# Only report if both the player and the activity still exist.
if killed and activity is not None and player:
activity.handlemessage(
ba.PlayerDiedMessage(player, killed, killerplayer,
msg.how))
super().handlemessage(msg) # Augment standard behavior.
# Keep track of the player who last hit us for point rewarding.
elif isinstance(msg, ba.HitMessage):
source_player = msg.get_source_player(type(self._player))
if source_player:
self.last_player_attacked_by = source_player
self.last_attacked_time = ba.time()
self.last_attacked_type = (msg.hit_type, msg.hit_subtype)
super().handlemessage(msg) # Augment standard behavior.
activity = self._activity()
if activity is not None and self._player.exists():
activity.handlemessage(PlayerSpazHurtMessage(self))
else:
super().handlemessage(msg)
def _drive_player_position(self) -> None:
"""Drive our ba.Player's official position
If our position is changed explicitly, this should be called again
to instantly update the player position (otherwise it would be out
of date until the next sim step)
"""
player = self._player
if player:
assert self.node
assert player.node
self.node.connectattr('torso_position', player.node, 'position')
|
the-stack_0_1826 | """
Helper functions for command line utilities.
"""
import argparse
import json
import logging
import logging.handlers
import os
import sys
import threading
import time
import warnings
from smqtk.utils.base_object import SmqtkObject
from smqtk.utils.dict import merge_dict
def initialize_logging(logger, stream_level=logging.WARNING,
output_filepath=None, file_level=None):
"""
Standard logging initialization.
:param logger: Logger instance to initialize
:type logger: logging.Logger
:param stream_level: Logging level to set for the stderr stream formatter.
:type stream_level: int
:param output_filepath: Output logging from the given logger to the provided
file path. Currently, we log to that file indefinitely, i.e. no
rollover. Rollover may be added in the future if the need arises.
:type output_filepath: str
:param file_level: Logging level to output to the file. This the same as the
stream level by default.
"""
log_formatter = logging.Formatter(
"%(levelname)7s - %(asctime)s - %(name)s.%(funcName)s - %(message)s"
)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(log_formatter)
stream_handler.setLevel(stream_level)
logger.addHandler(stream_handler)
if output_filepath:
# TODO: Setup rotating part of the handler?
file_handler = logging.handlers.RotatingFileHandler(
output_filepath, mode='w', delay=True
)
file_handler.setFormatter(log_formatter)
file_handler.setLevel(file_level or stream_level)
logger.addHandler(file_handler)
# Because there are two levels checked before a logging message is emitted:
# * the logging object's level
# * The stream handlers level
logger.setLevel(min(stream_level, file_level or stream_level))
def load_config(config_path, defaults=None):
"""
Load the JSON configuration dictionary from the specified filepath.
If the given path does not point to a valid file, we return an empty
dictionary or the default dictionary if one was provided, returning False
as our second return argument.
:param config_path: Path to the (valid) JSON configuration file.
:type config_path: str
:param defaults: Optional default configuration dictionary to merge loaded
configuration into. If provided, it will be modified in place.
:type defaults: dict | None
:return: The result configuration dictionary and if we successfully loaded
a JSON dictionary from the given filepath.
:rtype: (dict, bool)
"""
if defaults is None:
defaults = {}
loaded = False
if config_path and os.path.isfile(config_path):
with open(config_path) as cf:
merge_dict(defaults, json.load(cf))
loaded = True
return defaults, loaded
def output_config(output_path, config_dict, log=None, overwrite=False,
error_rc=1):
"""
If a valid output configuration path is provided, we output the given
configuration dictionary as JSON or error if the file already exists (when
overwrite is False) or cannot be written. We exit the program as long as
``output_path`` was given a value, with a return code of 0 if the file was
written successfully, or the supplied return code (default of 1) if the
write failed.
Specified error return code cannot be 0, which is reserved for successful
operation.
:raises ValueError: If the given error return code is 0.
:param output_path: Path to write the configuration file to.
:type output_path: str
:param config_dict: Configuration dictionary containing JSON-compliant
values.
:type config_dict: dict
:param overwrite: If we should clobber any existing file at the specified
path. We exit with the error code if this is false and a file exists at
``output_path``.
:type overwrite: bool
:param error_rc: Custom integer error return code to use instead of 1.
:type error_rc: int
:param log: Optionally logging instance. Otherwise we use a local one.
:type log: logging.Logger
"""
error_rc = int(error_rc)
if error_rc == 0:
raise ValueError("Error return code cannot be 0.")
if log is None:
log = logging.getLogger(__name__)
if output_path:
if os.path.exists(output_path) and not overwrite:
log.error("Output configuration file path already exists! (%s)",
output_path)
sys.exit(error_rc)
else:
log.info("Outputting JSON configuration to: %s", output_path)
with open(output_path, 'w') as f:
json.dump(config_dict, f, indent=4, check_circular=True,
separators=(',', ': '), sort_keys=True)
sys.exit(0)
class ProgressReporter (SmqtkObject):
"""
Helper utility for reporting the state of a loop and the rate at which
looping is occurring based on lapsed wall-time and a given reporting
interval.
Includes optional methods that are thread-safe.
TODO: Add parameter for an optionally known total number of increments.
"""
def __init__(self, log_func, interval, what_per_second="Loops"):
"""
Initialize this reporter.
:param log_func: Logging function to use.
:type log_func: (str, *args, **kwds) -> None
:param interval: Time interval to perform reporting in seconds. If no
reporting during incrementation should occur, infinity should be
passed.
:type interval: float
:param str what_per_second:
String label about what is happening or being iterated over per
second. The provided string should make sense when followed by
" per second ...".
"""
self.log_func = log_func
self.interval = float(interval)
self.what_per_second = what_per_second
self.lock = threading.RLock()
# c_last : Increment count at the time of the last report. Updated after
# report in ``increment_report``.
# c : Current Increment count, updated in ``increment_report``.
# c_delta: Delta between the increment current and previous count at the
# time of the last report. Updated at the time of reporting in
# ``increment_report``.
self.c_last = self.c = self.c_delta = 0
# t_last : Time of the last report. Updated after report in
# ``increment_report``.
# t : Current time, Updated in ``increment_report``
# t_delta: Delta between current time and the time of the last report.
# Updated in ``increment_report``.
self.t_last = self.t = self.t_delta = self.t_start = 0.0
self.started = False
def start(self):
""" Start the timing state of this reporter.
Repeated calls to this method resets the state of the reporting for
multiple uses.
This method is thread-safe.
:returns: Self
:rtype: ProgressReporter
"""
with self.lock:
self.started = True
self.c_last = self.c = self.c_delta = 0
self.t_last = self.t = self.t_start = time.time()
self.t_delta = 0.0
return self
def increment_report(self):
"""
Increment counter and time since last report, reporting if delta exceeds
the set reporting interval period.
"""
if not self.started:
raise RuntimeError("Reporter needs to be started first.")
self.c += 1
self.c_delta = self.c - self.c_last
self.t = time.time()
self.t_delta = self.t - self.t_last
# Only report if its been ``interval`` seconds since the last
# report.
if self.t_delta >= self.interval:
self.report()
self.t_last = self.t
self.c_last = self.c
def increment_report_threadsafe(self):
"""
The same as ``increment_report`` but additionally acquires a lock on
resources first for thread-safety.
This version of the method is a little more costly due to the lock
acquisition.
"""
with self.lock:
self.increment_report()
def report(self):
"""
Report the current state.
Does nothing if no increments have occurred yet.
"""
if not self.started:
raise RuntimeError("Reporter needs to be started first.")
# divide-by-zero safeguard
if self.t_delta > 0 and (self.t - self.t_start) > 0:
self.log_func("%s per second %f (avg %f) "
"(%d current interval / %d total)"
% (self.what_per_second,
self.c_delta / self.t_delta,
self.c / (self.t - self.t_start),
self.c_delta,
self.c))
def report_threadsafe(self):
"""
The same as ``report`` but additionally acquires a lock on
resources first for thread-safety.
This version of the method is a little more costly due to the lock
acquisition.
"""
with self.lock:
self.report()
def report_progress(log, state, interval):
"""
Loop progress reporting function that logs (when in debug) loops per
second, loops in the last reporting period and total loops executed.
The ``state`` given to this function must be a list of 7 integers, initially
all set to 0. This function will update the fields of the state as its is
called to control when reporting should happen and what to report.
A report can be effectively force for a call by setting ``state[3] = 0``
or ``interval`` to ``0``.
:param log: Logger logging function to use to send reporting message to.
:type log: (str, *args, **kwargs) -> None
:param state: Reporting state. This should be initialized to a list of 6
zeros (floats), and then should not be modified externally from this
function.
:type state: list[float]
:param interval: Frequency in seconds that reporting messages should be
made. This should be greater than 0.
:type interval: float
"""
warnings.warn("``report_progress`` is deprecated. Please use the"
"``ProgressReporter`` class instead.",
DeprecationWarning)
# State format (c=count, t=time:
# [last_c, c, delta_c, last_t, t, delta_t, starting_t]
# [ 0, 1, 2, 3, 4, 5, 6 ]
warnings.warn(
'report_progress is deprecated, use ProgressReporter instead.',
DeprecationWarning)
# Starting time
if not state[6]:
state[3] = state[6] = time.time()
state[1] += 1
state[4] = time.time()
state[5] = state[4] - state[3]
if state[5] >= interval:
state[2] = state[1] - state[0]
# TODO: Could possibly to something with ncurses
# - to maintain a single line.
try:
loops_per_second = state[2] / state[5]
avg_loops_per_second = state[1] / (state[4] - state[6])
except ZeroDivisionError:
loops_per_second = 0
avg_loops_per_second = 0
log("Loops per second %f (avg %f) (%d this interval / %d total)"
% (loops_per_second,
avg_loops_per_second,
state[2], state[1]))
state[3] = state[4]
state[0] = state[1]
def basic_cli_parser(description=None, configuration_group=True):
"""
Generate an ``argparse.ArgumentParser`` with the given description and the
basic options for verbosity and configuration/generation paths.
The returned parser instance has an option for extra verbosity
(-v/--verbose) and a group for configuration specification (-c/--config and
configuration generation (-g/--generate-config) if enabled (true by
default).
:param description: Optional description string for the parser.
:type description: str
:param configuration_group: Whether or not to include the configuration
group options.
:type configuration_group: bool
:return: Argument parser instance with basic options.
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('-v', '--verbose',
default=False, action='store_true',
help='Output additional debug logging.')
if configuration_group:
g_config = parser.add_argument_group('Configuration')
g_config.add_argument('-c', '--config',
metavar="PATH",
help='Path to the JSON configuration file.')
g_config.add_argument('-g', '--generate-config',
metavar="PATH",
help='Optionally generate a default '
'configuration file at the specified path. '
'If a configuration file was provided, we '
'update the default configuration with the '
'contents of the given configuration.')
return parser
def utility_main_helper(default_config, args, additional_logging_domains=(),
skip_logging_init=False, default_config_valid=False):
"""
Helper function for utilities standardizing logging initialization, CLI
parsing and configuration loading/generation.
Specific utilities should use this in their main function. This
encapsulates the following standard actions:
- using ``argparse`` parser results to drive logging initialization
(can be skipped if initialized externally)
- handling loaded configuration merger onto the default
- handling configuration generation based on given default and possibly
specified input config.
:param default_config: Function returning default configuration (JSON)
dictionary for the utility. This should take no arguments.
:type default_config: () -> dict
:param args: Parsed arguments from argparse.ArgumentParser instance as
returned from ``parser.parse_args()``.
:type args: argparse.Namespace
:param additional_logging_domains: We initialize logging on the base
``smqtk`` and ``__main__`` namespace. Any additional namespaces under
which logging should be reported should be added here as an iterable.
:type additional_logging_domains: collections.abc.Iterable[str]
:param skip_logging_init: Skip initialize logging in this function because
it is done elsewhere externally.
:type skip_logging_init: bool
:param default_config_valid: Whether the default config returned from the
generator is a valid config to continue execution with or not.
:type default_config_valid: bool
:return: Loaded configuration dictionary.
:rtype: dict
"""
# noinspection PyUnresolvedReferences
config_filepath = args.config
# noinspection PyUnresolvedReferences
config_generate = args.generate_config
# noinspection PyUnresolvedReferences
verbose = args.verbose
if not skip_logging_init:
llevel = logging.INFO
if verbose:
llevel = logging.DEBUG
initialize_logging(logging.getLogger('smqtk'), llevel)
initialize_logging(logging.getLogger('__main__'), llevel)
for d in additional_logging_domains:
initialize_logging(logging.getLogger(d), llevel)
config, config_loaded = load_config(config_filepath, default_config())
output_config(config_generate, config, overwrite=True)
if not (config_loaded or default_config_valid):
raise RuntimeError("No configuration loaded (not trusting default).")
return config
|
the-stack_0_1827 | from param import args
import sys
# sys.path.insert(0, '/R2R-EnvDrop/build')
if args.upload:
sys.path.insert(0, '/R2R-Aux/build')
else:
sys.path.insert(0, 'build')
# setup_seed(args.seed)
import torch
import os
import time
import json
import numpy as np
from collections import defaultdict
from speaker import Speaker
from utils import read_vocab,write_vocab,build_vocab,Tokenizer,padding_idx,timeSince, read_img_features, get_sync_dir
import utils
from env import R2RBatch
from agent import Seq2SeqAgent
from eval import Evaluation
from polyaxon_client.tracking import get_outputs_refs_paths
if args.train == 'validlistener' and args.upload:
refs_paths = get_outputs_refs_paths()['experiments'][0]
print(refs_paths)
load_model = os.path.join(refs_paths,args.load)
print(load_model)
import warnings
warnings.filterwarnings("ignore")
from tensorboardX import SummaryWriter
from polyaxon_client.tracking import get_outputs_path
if args.upload:
train_vocab = get_sync_dir(os.path.join(args.upload_path,args.TRAIN_VOCAB))
trainval_vocab = get_sync_dir(os.path.join(args.upload_path,args.TRAINVAL_VOCAB))
features = get_sync_dir(os.path.join(args.upload_path,args.IMAGENET_FEATURES))
output_dir = get_outputs_path()
log_dir = os.path.join(output_dir, "snap", args.name)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# sparse_obj_feat = get_sync_dir(os.path.join(args.upload_path, args.SPARSE_OBJ_FEATURES))
# dense_obj_feat1 = get_sync_dir(os.path.join(args.upload_path, args.DENSE_OBJ_FEATURES1))
# dense_obj_feat2 = get_sync_dir(os.path.join(args.upload_path, args.DENSE_OBJ_FEATURES2))
# bbox = get_sync_dir(os.path.join(args.upload_path, args.BBOX_FEATURES))
else:
train_vocab = os.path.join(args.R2R_Aux_path,args.TRAIN_VOCAB)
trainval_vocab = os.path.join(args.R2R_Aux_path,args.TRAINVAL_VOCAB)
features = os.path.join(args.R2R_Aux_path,args.IMAGENET_FEATURES)
log_dir = 'snap/%s' % args.name
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# sparse_obj_feat = os.path.join(args.R2R_Aux_path, args.SPARSE_OBJ_FEATURES)
# dense_obj_feat1 = os.path.join(args.R2R_Aux_path, args.DENSE_OBJ_FEATURES1)
# dense_obj_feat2 = os.path.join(args.R2R_Aux_path, args.DENSE_OBJ_FEATURES2)
# bbox = os.path.join(args.R2R_Aux_path, args.BBOX_FEATURES)
if args.fast_train:
name, ext = os.path.splitext(features)
features = name + "-fast" + ext
feedback_method = args.feedback # teacher or sample
print(args)
def train_speaker(train_env, tok, n_iters, log_every=500, val_envs={}):
writer = SummaryWriter(logdir=log_dir)
listner = Seq2SeqAgent(train_env, "", tok, args.maxAction)
speaker = Speaker(train_env, listner, tok)
if args.fast_train:
log_every = 40
best_bleu = defaultdict(lambda: 0)
best_loss = defaultdict(lambda: 1232)
for idx in range(0, n_iters, log_every):
interval = min(log_every, n_iters - idx)
# Train for log_every interval
speaker.env = train_env
speaker.train(interval) # Train interval iters
print()
print("Iter: %d" % idx)
# Evaluation
for env_name, (env, evaluator) in val_envs.items():
if 'train' in env_name: # Ignore the large training set for the efficiency
continue
print("............ Evaluating %s ............." % env_name)
speaker.env = env
path2inst, loss, word_accu, sent_accu = speaker.valid()
path_id = next(iter(path2inst.keys()))
print("Inference: ", tok.decode_sentence(path2inst[path_id]))
print("GT: ", evaluator.gt[str(path_id)]['instructions'])
bleu_score, precisions = evaluator.bleu_score(path2inst)
# Tensorboard log
writer.add_scalar("bleu/%s" % (env_name), bleu_score, idx)
writer.add_scalar("loss/%s" % (env_name), loss, idx)
writer.add_scalar("word_accu/%s" % (env_name), word_accu, idx)
writer.add_scalar("sent_accu/%s" % (env_name), sent_accu, idx)
writer.add_scalar("bleu4/%s" % (env_name), precisions[3], idx)
# Save the model according to the bleu score
if bleu_score > best_bleu[env_name]:
best_bleu[env_name] = bleu_score
print('Save the model with %s BEST env bleu %0.4f' % (env_name, bleu_score))
speaker.save(idx, os.path.join(log_dir, 'state_dict', 'best_%s_bleu' % env_name))
if loss < best_loss[env_name]:
best_loss[env_name] = loss
print('Save the model with %s BEST env loss %0.4f' % (env_name, loss))
speaker.save(idx, os.path.join(log_dir, 'state_dict', 'best_%s_loss' % env_name))
# Screen print out
print("Bleu 1: %0.4f Bleu 2: %0.4f, Bleu 3 :%0.4f, Bleu 4: %0.4f" % tuple(precisions))
def train(train_env, tok, n_iters, log_every=100, val_envs={}, aug_env=None):
writer = SummaryWriter(logdir=log_dir)
listner = Seq2SeqAgent(train_env, "", tok, args.maxAction)
speaker = None
if args.self_train:
speaker = Speaker(train_env, listner, tok)
if args.speaker is not None:
if args.upload:
print("Load the speaker from %s." % args.speaker)
speaker.load(get_sync_dir(os.path.join(args.upload_path,args.speaker)))
else:
print("Load the speaker from %s." % args.speaker)
speaker.load(os.path.join(args.R2R_Aux_path, args.speaker))
start_iter = 0
if args.load is not None:
if args.upload:
refs_paths = get_outputs_refs_paths()['experiments'][0]
print(refs_paths)
load_model = os.path.join(refs_paths, args.load)
print(load_model)
print("LOAD THE listener from %s" % load_model)
start_iter = listner.load(load_model)
else:
print("LOAD THE listener from %s" % args.load)
start_iter = listner.load(os.path.join(args.R2R_Aux_path, args.load))
start = time.time()
best_val = {'val_seen': {"accu": 0., "state":"", 'update':False},
'val_unseen': {"accu": 0., "state":"", 'update':False}}
if args.fast_train:
log_every = 40
for idx in range(start_iter, start_iter+n_iters, log_every):
listner.logs = defaultdict(list)
interval = min(log_every, start_iter+n_iters-idx)
iter = idx + interval
# Train for log_every interval
if aug_env is None: # The default training process
listner.env = train_env
listner.train(interval, feedback=feedback_method) # Train interval iters
else:
if args.accumulate_grad:
for _ in range(interval // 2):
listner.zero_grad()
listner.env = train_env
# Train with GT data
args.ml_weight = 0.2
listner.accumulate_gradient(feedback_method)
listner.env = aug_env
# Train with Back Translation
args.ml_weight = 0.6 # Sem-Configuration
listner.accumulate_gradient(feedback_method, speaker=speaker)
listner.optim_step()
else:
for _ in range(interval // 2):
# Train with GT data
listner.env = train_env
args.ml_weight = 0.2
listner.train(1, feedback=feedback_method)
# Train with Back Translation
listner.env = aug_env
args.ml_weight = 0.6
listner.train(1, feedback=feedback_method, speaker=speaker)
# Log the training stats to tensorboard
total = max(sum(listner.logs['total']), 1)
length = max(len(listner.logs['critic_loss']), 1)
critic_loss = sum(listner.logs['critic_loss']) / total #/ length / args.batchSize
entropy = sum(listner.logs['entropy']) / total #/ length / args.batchSize
predict_loss = sum(listner.logs['us_loss']) / max(len(listner.logs['us_loss']), 1)
writer.add_scalar("loss/critic", critic_loss, idx)
writer.add_scalar("policy_entropy", entropy, idx)
writer.add_scalar("loss/unsupervised", predict_loss, idx)
writer.add_scalar("total_actions", total, idx)
writer.add_scalar("max_length", length, idx)
print("total_actions", total)
print("max_length", length)
# Run validation
loss_str = ""
for env_name, (env, evaluator) in val_envs.items():
listner.env = env
# Get validation loss under the same conditions as training
iters = None if args.fast_train or env_name != 'train' else 20 # 20 * 64 = 1280
# Get validation distance from goal under test evaluation conditions
listner.test(use_dropout=False, feedback='argmax', iters=iters)
result = listner.get_results()
score_summary, _ = evaluator.score(result)
loss_str += "%s " % env_name
for metric,val in score_summary.items():
if metric in ['success_rate']:
writer.add_scalar("%s/accuracy" % env_name, val, idx)
if env_name in best_val:
if val > best_val[env_name]['accu']:
best_val[env_name]['accu'] = val
best_val[env_name]['update'] = True
if metric in ['spl']:
writer.add_scalar("%s/spl" % env_name, val, idx)
loss_str += ', %s: %.3f' % (metric, val)
loss_str += '\n'
loss_str += '\n'
for env_name in best_val:
if best_val[env_name]['update']:
best_val[env_name]['state'] = 'Iter %d \n%s' % (iter, loss_str)
best_val[env_name]['update'] = False
file_dir = os.path.join(output_dir, "snap", args.name, "state_dict", "best_%s" % (env_name))
listner.save(idx, file_dir)
print(('%s (%d %d%%) \n%s' % (timeSince(start, float(iter)/n_iters),
iter, float(iter)/n_iters*100, loss_str)))
if iter % 1000 == 0:
print("BEST RESULT TILL NOW")
for env_name in best_val:
print(env_name, best_val[env_name]['state'])
if iter % 40000 == 0:
file_dir = os.path.join(output_dir, "snap", args.name, "state_dict", "Iter_%06d" % (iter))
listner.save(idx, file_dir)
# file_dir = os.path.join(output_dir, "snap", args.name, "state_dict", "LAST_iter%d" % (idx))
# listner.save(idx, file_dir)
def valid(train_env, tok, val_envs={}):
agent = Seq2SeqAgent(train_env, "", tok, args.maxAction)
if args.upload:
print("Loaded the listener model at iter %d from %s" % (agent.load(load_model), load_model))
else:
print("Loaded the listener model at iter %d from %s" % (agent.load(os.path.join(args.R2R_Aux_path, args.load)),
os.path.join(args.R2R_Aux_path, args.load)))
for env_name, (env, evaluator) in val_envs.items():
agent.logs = defaultdict(list)
agent.env = env
iters = None
agent.test(use_dropout=False, feedback='argmax', iters=iters)
result = agent.get_results()
if env_name != '':
score_summary, _ = evaluator.score(result)
loss_str = "Env name: %s" % env_name
for metric,val in score_summary.items():
loss_str += ', %s: %.4f' % (metric, val)
print(loss_str)
if args.submit:
json.dump(
result,
open(os.path.join(log_dir, "submit_%s.json" % env_name), 'w'),
sort_keys=True, indent=4, separators=(',', ': ')
)
def beam_valid(train_env, tok, val_envs={}):
listener = Seq2SeqAgent(train_env, "", tok, args.maxAction)
speaker = Speaker(train_env, listener, tok)
if args.speaker is not None:
print("Load the speaker from %s." % args.speaker)
speaker.load(args.speaker)
print("Loaded the listener model at iter % d" % listener.load(args.load))
final_log = ""
for env_name, (env, evaluator) in val_envs.items():
listener.logs = defaultdict(list)
listener.env = env
listener.beam_search_test(speaker)
results = listener.results
def cal_score(x, alpha, avg_speaker, avg_listener):
speaker_score = sum(x["speaker_scores"]) * alpha
if avg_speaker:
speaker_score /= len(x["speaker_scores"])
# normalizer = sum(math.log(k) for k in x['listener_actions'])
normalizer = 0.
listener_score = (sum(x["listener_scores"]) + normalizer) * (1-alpha)
if avg_listener:
listener_score /= len(x["listener_scores"])
return speaker_score + listener_score
if args.param_search:
# Search for the best speaker / listener ratio
interval = 0.01
logs = []
for avg_speaker in [False, True]:
for avg_listener in [False, True]:
for alpha in np.arange(0, 1 + interval, interval):
result_for_eval = []
for key in results:
result_for_eval.append({
"instr_id": key,
"trajectory": max(results[key]['paths'],
key=lambda x: cal_score(x, alpha, avg_speaker, avg_listener)
)['trajectory']
})
score_summary, _ = evaluator.score(result_for_eval)
for metric,val in score_summary.items():
if metric in ['success_rate']:
print("Avg speaker %s, Avg listener %s, For the speaker weight %0.4f, the result is %0.4f" %
(avg_speaker, avg_listener, alpha, val))
logs.append((avg_speaker, avg_listener, alpha, val))
tmp_result = "Env Name %s\n" % (env_name) + \
"Avg speaker %s, Avg listener %s, For the speaker weight %0.4f, the result is %0.4f\n" % max(logs, key=lambda x: x[3])
print(tmp_result)
# print("Env Name %s" % (env_name))
# print("Avg speaker %s, Avg listener %s, For the speaker weight %0.4f, the result is %0.4f" %
# max(logs, key=lambda x: x[3]))
final_log += tmp_result
print()
else:
avg_speaker = True
avg_listener = True
alpha = args.alpha
result_for_eval = []
for key in results:
result_for_eval.append({
"instr_id": key,
"trajectory": [(vp, 0, 0) for vp in results[key]['dijk_path']] + \
max(results[key]['paths'],
key=lambda x: cal_score(x, alpha, avg_speaker, avg_listener)
)['trajectory']
})
# result_for_eval = utils.add_exploration(result_for_eval)
score_summary, _ = evaluator.score(result_for_eval)
if env_name != 'test':
loss_str = "Env Name: %s" % env_name
for metric, val in score_summary.items():
if metric in ['success_rate']:
print("Avg speaker %s, Avg listener %s, For the speaker weight %0.4f, the result is %0.4f" %
(avg_speaker, avg_listener, alpha, val))
loss_str += ",%s: %0.4f " % (metric, val)
print(loss_str)
print()
if args.submit:
json.dump(
result_for_eval,
open(os.path.join(log_dir, "submit_%s.json" % env_name), 'w'),
sort_keys=True, indent=4, separators=(',', ': ')
)
print(final_log)
def setup():
torch.manual_seed(1)
torch.cuda.manual_seed(1)
# Check for vocabs
if not os.path.exists(train_vocab):
write_vocab(build_vocab(splits=['train']), train_vocab)
if not os.path.exists(trainval_vocab):
write_vocab(build_vocab(splits=['train','val_seen','val_unseen']), trainval_vocab)
def train_val():
''' Train on the training set, and validate on seen and unseen splits. '''
# args.fast_train = True
setup()
# Create a batch training environment that will also preprocess text
vocab = read_vocab(train_vocab)
tok = Tokenizer(vocab=vocab, encoding_length=args.maxInput)
feat_dict = read_img_features(features)
featurized_scans = set([key.split("_")[0] for key in list(feat_dict.keys())])
train_env = R2RBatch(feat_dict, batch_size=args.batchSize, splits=['train'], tokenizer=tok)
from collections import OrderedDict
val_env_names = ['val_unseen', 'val_seen']
if args.submit:
val_env_names.append('test')
else:
pass
#val_env_names.append('train')
if not args.beam:
val_env_names.append("train")
val_envs = OrderedDict(
((split,
(R2RBatch(feat_dict, batch_size=args.batchSize, splits=[split], tokenizer=tok),
Evaluation([split], featurized_scans, tok))
)
for split in val_env_names
)
)
if args.train == 'listener':
train(train_env, tok, args.iters, val_envs=val_envs)
elif args.train == 'validlistener':
if args.beam:
beam_valid(train_env, tok, val_envs=val_envs)
else:
valid(train_env, tok, val_envs=val_envs)
elif args.train == 'speaker':
train_speaker(train_env, tok, args.iters, val_envs=val_envs)
elif args.train == 'validspeaker':
valid_speaker(tok, val_envs)
else:
assert False
def valid_speaker(tok, val_envs):
import tqdm
listner = Seq2SeqAgent(None, "", tok, args.maxAction)
speaker = Speaker(None, listner, tok)
speaker.load(args.load)
for env_name, (env, evaluator) in val_envs.items():
if env_name == 'train':
continue
print("............ Evaluating %s ............." % env_name)
speaker.env = env
path2inst, loss, word_accu, sent_accu = speaker.valid(wrapper=tqdm.tqdm)
path_id = next(iter(path2inst.keys()))
print("Inference: ", tok.decode_sentence(path2inst[path_id]))
print("GT: ", evaluator.gt[path_id]['instructions'])
pathXinst = list(path2inst.items())
name2score = evaluator.lang_eval(pathXinst, no_metrics={'METEOR'})
score_string = " "
for score_name, score in name2score.items():
score_string += "%s_%s: %0.4f " % (env_name, score_name, score)
print("For env %s" % env_name)
print(score_string)
print("Average Length %0.4f" % utils.average_length(path2inst))
def train_val_augment():
"""
Train the listener with the augmented data
"""
setup()
# Create a batch training environment that will also preprocess text
vocab = read_vocab(train_vocab)
tok = Tokenizer(vocab=vocab, encoding_length=args.maxInput)
# Load the env img features
feat_dict = read_img_features(features)
featurized_scans = set([key.split("_")[0] for key in list(feat_dict.keys())])
# Load the augmentation data
if args.upload:
aug_path = get_sync_dir(os.path.join(args.upload_path, args.aug))
else:
aux_path = os.path.join(args.R2R_Aux_path, args.aug)
# Create the training environment
train_env = R2RBatch(feat_dict, batch_size=args.batchSize,
splits=['train'], tokenizer=tok)
aug_env = R2RBatch(feat_dict, batch_size=args.batchSize,
splits=[aug_path], tokenizer=tok, name='aug')
# Printing out the statistics of the dataset
stats = train_env.get_statistics()
print("The training data_size is : %d" % train_env.size())
print("The average instruction length of the dataset is %0.4f." % (stats['length']))
print("The average action length of the dataset is %0.4f." % (stats['path']))
stats = aug_env.get_statistics()
print("The augmentation data size is %d" % aug_env.size())
print("The average instruction length of the dataset is %0.4f." % (stats['length']))
print("The average action length of the dataset is %0.4f." % (stats['path']))
# Setup the validation data
val_envs = {split: (R2RBatch(feat_dict, batch_size=args.batchSize, splits=[split],
tokenizer=tok), Evaluation([split], featurized_scans, tok))
for split in ['train', 'val_seen', 'val_unseen']}
# Start training
train(train_env, tok, args.iters, val_envs=val_envs, aug_env=aug_env)
if __name__ == "__main__":
if args.train in ['speaker', 'rlspeaker', 'validspeaker',
'listener', 'validlistener']:
train_val()
elif args.train == 'auglistener':
train_val_augment()
else:
assert False
|
the-stack_0_1828 | # coding: utf-8
"""
Sentim's Emotion APIs
An emotion recognition api that tells you the emotion of text, and not just the connotation. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ResultItem(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'index': 'int',
'emotion': 'str',
'emotion_score': 'EmotionScore'
}
attribute_map = {
'index': 'Index',
'emotion': 'Emotion',
'emotion_score': 'EmotionScore'
}
def __init__(self, index=None, emotion=None, emotion_score=None): # noqa: E501
"""ResultItem - a model defined in OpenAPI""" # noqa: E501
self._index = None
self._emotion = None
self._emotion_score = None
self.discriminator = None
if index is not None:
self.index = index
if emotion is not None:
self.emotion = emotion
if emotion_score is not None:
self.emotion_score = emotion_score
@property
def index(self):
"""Gets the index of this ResultItem. # noqa: E501
The index of the conversation or list that was classified. # noqa: E501
:return: The index of this ResultItem. # noqa: E501
:rtype: int
"""
return self._index
@index.setter
def index(self, index):
"""Sets the index of this ResultItem.
The index of the conversation or list that was classified. # noqa: E501
:param index: The index of this ResultItem. # noqa: E501
:type: int
"""
self._index = index
@property
def emotion(self):
"""Gets the emotion of this ResultItem. # noqa: E501
The classified emotion of the message. # noqa: E501
:return: The emotion of this ResultItem. # noqa: E501
:rtype: str
"""
return self._emotion
@emotion.setter
def emotion(self, emotion):
"""Sets the emotion of this ResultItem.
The classified emotion of the message. # noqa: E501
:param emotion: The emotion of this ResultItem. # noqa: E501
:type: str
"""
self._emotion = emotion
@property
def emotion_score(self):
"""Gets the emotion_score of this ResultItem. # noqa: E501
:return: The emotion_score of this ResultItem. # noqa: E501
:rtype: EmotionScore
"""
return self._emotion_score
@emotion_score.setter
def emotion_score(self, emotion_score):
"""Sets the emotion_score of this ResultItem.
:param emotion_score: The emotion_score of this ResultItem. # noqa: E501
:type: EmotionScore
"""
self._emotion_score = emotion_score
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResultItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_1829 | # ---------------------------------------------------------------------
# f5.BIGIP.get_interfaces
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
from collections import defaultdict
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinterfaces import IGetInterfaces
class Script(BaseScript):
name = "f5.BIGIP.get_interfaces"
cache = True
interface = IGetInterfaces
rx_self = re.compile(r"^net self \S+ {", re.MULTILINE | re.DOTALL)
rx_self_a = re.compile(
r"^\s+address\s+(?P<address>\S+).+" r"^\s+vlan\s+(?P<vlan>\S+)", re.DOTALL | re.MULTILINE
)
def parse_kv(self, s):
r = {}
for l in s.splitlines():
k, v = l.rsplit(" ", 1)
r[k.strip()] = v
return r
def execute(self):
# Get self ip
addresses = defaultdict(list)
v = self.cli("list /net self")
for data in self.rx_self.split(v):
match = self.rx_self_a.search(data)
if match:
addresses[match.group("vlan")] += [match.group("address")]
# Get VLAN mappings
vlans = {} # tag -> data
trunks = {} # name -> [members]
aggregated = {} # name -> aggregated interface
current_vlan = None
current_trunk = None
lacp_interfaces = set()
interfaces = set()
v = self.cli("show /net vlan")
for h, data in self.parse_blocks(v):
if h.startswith("Net::Vlan: "):
d = self.parse_kv(data)
name = h[11:]
current_vlan = {
"name": name,
"mac": d.get("Mac Address (True)"),
"mtu": d.get("MTU"),
"tag": d.get("Tag"),
"tagged": [],
"untagged": [],
"ipv4_addresses": [a for a in addresses[name] if ":" not in a],
"ipv6_addresses": [a for a in addresses[name] if ":" in a],
}
vlans[name] = current_vlan
current_trunk = None
elif h.startswith("Net::Vlan-Member: "):
name = h[18:]
d = self.parse_kv(data)
tagged = d.get("Tagged") == "yes"
if tagged:
current_vlan["tagged"] += [name]
else:
current_vlan["untagged"] += [name]
interfaces.add(name)
elif h.startswith("Net::Trunk"):
name = data.splitlines()[0].split(" ", 1)[0]
current_trunk = {"name": name, "members": []}
trunks[name] = current_trunk
interfaces.add(name)
elif h.startswith("Net::Interface"):
if current_trunk:
for l in data.splitlines():
i = l.split(" ", 1)[0]
current_trunk["members"] += [i]
interfaces.add(i)
aggregated[i] = current_trunk["name"]
elif h.startswith("Net::LACP Status (interface: "):
name = h[29:-1]
lacp_interfaces.add(name)
# Build result
ifaces = []
tagged = defaultdict(list) # interface -> [vlans]
untagged = {} # interface -> vlan
for vlan in vlans:
# SVI
v = vlans[vlan]
enabled_afi = []
tag = int(v["tag"])
if v["ipv4_addresses"]:
enabled_afi += ["IPv4"]
if v["ipv6_addresses"]:
enabled_afi += ["IPv6"]
if enabled_afi:
iface = {
"name": v["name"],
"type": "SVI",
"mac": v["mac"],
"mtu": v["mtu"],
"admin_status": True,
"oper_status": True,
"subinterfaces": [
{
"name": v["name"],
"vlan_ids": [tag],
"enabled_afi": enabled_afi,
"ipv4_addresses": v["ipv4_addresses"],
"ipv6_addresses": v["ipv6_addresses"],
"admin_status": True,
"oper_status": True,
}
],
}
ifaces += [iface]
for i in v["tagged"]:
tagged[i] += [tag]
for i in v["untagged"]:
untagged[i] = tag
for i in interfaces:
itype = "physical" if i not in trunks else "aggregated"
iface = {
"name": i,
"type": itype,
# "mac": v["mac"],
# "mtu": v["mtu"],
"admin_status": True,
"oper_status": True,
"enabled_protocols": [],
"subinterfaces": [],
}
if i in tagged or i in untagged:
si = {
"name": i,
"enabled_afi": ["BRIDGE"],
"admin_status": True,
"oper_status": True,
}
if i in tagged:
si["tagged_vlans"] = sorted(tagged[i])
if i in untagged:
si["untagged_vlan"] = untagged[i]
iface["subinterfaces"] = [si]
if i in lacp_interfaces:
iface["enabled_protocols"] += ["LACP"]
if i in aggregated:
iface["aggregated_interface"] = aggregated[i]
ifaces += [iface]
return [{"interfaces": sorted(ifaces, key=lambda x: x["name"])}]
|
the-stack_0_1830 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyAzureMgmtContainerregistry(PythonPackage):
"""Microsoft Azure Container Registry Client Library for Python."""
homepage = "https://github.com/Azure/azure-sdk-for-python"
url = "https://pypi.io/packages/source/a/azure-mgmt-containerregistry/azure-mgmt-containerregistry-2.8.0.zip"
# Release candidate needed for py-azure-cli
version('3.0.0rc14', sha256='d23ce93ec5903d00f79f0ac995e16bf47197130239f7f182509add3277b73071')
version('2.8.0', sha256='b24be1050d54f3158e8be7f6ad677f0c8888dddefd09fb8391ebfc73d40173a4', preferred=True)
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:1.999', type=('build', 'run'))
depends_on('[email protected]:1.999', type=('build', 'run'))
depends_on('py-azure-mgmt-nspkg', when='^python@:2', type=('build', 'run'))
|
the-stack_0_1831 | # Copyright (c) Nanjing University, Vision Lab.
# Jianqiang Wang ([email protected]), Zhan Ma ([email protected]); Nanjing University, Vision Lab.
# Last update: 2020.06.06
import numpy as np
import h5py
import os, sys
import torch
import torch.nn as nn
import MinkowskiEngine as ME
import MinkowskiEngine.MinkowskiFunctional as MF
from models.BasicBlock import ResNet, InceptionResNet
import time
class Encoder(nn.Module):
"""
Encoder
"""
def __init__(self, channels, block_layers, block):
nn.Module.__init__(self)
in_nchannels=1
ch = [16, 32, 64, 32, channels]
if block == 'ResNet':
self.block = ResNet
elif block == 'InceptionResNet':
self.block = InceptionResNet
self.conv0 = ME.MinkowskiConvolution(
in_channels=in_nchannels,
out_channels=ch[0],
kernel_size=3,
stride=1,
bias=True,
dimension=3)
self.down0 = ME.MinkowskiConvolution(
in_channels=ch[0],
out_channels=ch[1],
kernel_size=2,
stride=2,
bias=True,
dimension=3)
self.block0 = self.make_layer(
self.block, block_layers, ch[1])
self.conv1 = ME.MinkowskiConvolution(
in_channels=ch[1],
out_channels=ch[1],
kernel_size=3,
stride=1,
bias=True,
dimension=3)
self.down1 = ME.MinkowskiConvolution(
in_channels=ch[1],
out_channels=ch[2],
kernel_size=2,
stride=2,
bias=True,
dimension=3)
self.block1 = self.make_layer(
self.block, block_layers, ch[2])
self.conv2 = ME.MinkowskiConvolution(
in_channels=ch[2],
out_channels=ch[2],
kernel_size=3,
stride=1,
bias=True,
dimension=3)
self.down2 = ME.MinkowskiConvolution(
in_channels=ch[2],
out_channels=ch[3],
kernel_size=2,
stride=2,
bias=True,
dimension=3)
self.block2 = self.make_layer(
self.block, block_layers, ch[3])
self.conv3 = ME.MinkowskiConvolution(
in_channels=ch[3],
out_channels=ch[4],
kernel_size=3,
stride=1,
bias=True,
dimension=3)
self.relu = ME.MinkowskiReLU(inplace=True)
def make_layer(self, block, block_layers, channels):
layers = []
for i in range(block_layers):
layers.append(block(channels=channels))
return nn.Sequential(*layers)
def forward(self, x):
out0 = self.relu(self.down0(self.relu(self.conv0(x))))
out0 = self.block0(out0)
out1 = self.relu(self.down1(self.relu(self.conv1(out0))))
out1 = self.block1(out1)
out2 = self.relu(self.down2(self.relu(self.conv2(out1))))
out2 = self.block2(out2)
out2 = self.conv3(out2)
return [out2, out1, out0]
class Decoder(nn.Module):
"""
Decoder
"""
def __init__(self, channels, block_layers, block):
nn.Module.__init__(self)
out_nchannel=1
ch = [channels, 64, 32, 16]
if block == 'ResNet':
self.block = ResNet
elif block == 'InceptionResNet':
self.block = InceptionResNet
self.up0 = ME.MinkowskiGenerativeConvolutionTranspose(
in_channels=ch[0],
out_channels=ch[1],
kernel_size= 2,
stride=2,
bias=True,
dimension=3)
self.conv0 = ME.MinkowskiConvolution(
in_channels=ch[1],
out_channels=ch[1],
kernel_size= 3,
stride=1,
bias=True,
dimension=3)
self.block0 = self.make_layer(
self.block, block_layers, ch[1])
self.conv0_cls = ME.MinkowskiConvolution(
in_channels=ch[1],
out_channels=out_nchannel,
kernel_size= 3,
stride=1,
bias=True,
dimension=3)
self.up1 = ME.MinkowskiGenerativeConvolutionTranspose(
in_channels=ch[1],
out_channels=ch[2],
kernel_size= 2,
stride=2,
bias=True,
dimension=3)
self.conv1 = ME.MinkowskiConvolution(
in_channels=ch[2],
out_channels=ch[2],
kernel_size= 3,
stride=1,
bias=True,
dimension=3)
self.block1 = self.make_layer(
self.block, block_layers, ch[2])
self.conv1_cls = ME.MinkowskiConvolution(
in_channels=ch[2],
out_channels=out_nchannel,
kernel_size= 3,
stride=1,
bias=True,
dimension=3)
self.up2 = ME.MinkowskiGenerativeConvolutionTranspose(
in_channels=ch[2],
out_channels=ch[3],
kernel_size= 2,
stride=2,
bias=True,
dimension=3)
self.conv2 = ME.MinkowskiConvolution(
in_channels=ch[3],
out_channels=ch[3],
kernel_size= 3,
stride=1,
bias=True,
dimension=3)
self.block2 = self.make_layer(
self.block, block_layers, ch[3])
self.conv2_cls = ME.MinkowskiConvolution(
in_channels=ch[3],
out_channels=out_nchannel,
kernel_size= 3,
stride=1,
bias=True,
dimension=3)
self.relu = ME.MinkowskiReLU(inplace=True)
# self.relu = ME.MinkowskiELU(inplace=True)
# pruning
self.pruning = ME.MinkowskiPruning()
def make_layer(self, block, block_layers, channels):
layers = []
for i in range(block_layers):
layers.append(block(channels=channels))
return nn.Sequential(*layers)
# get target from label key or sparse tensor.
def get_target_by_key(self, out, target_key):
with torch.no_grad():
target = torch.zeros(len(out), dtype=torch.bool)
cm = out.coords_man
strided_target_key = cm.stride(
target_key, out.tensor_stride[0], force_creation=True)
# kernel size = 1
ins, outs = cm.get_kernel_map(
out.coords_key,
strided_target_key,
kernel_size=1,
region_type=1)
for curr_in in ins:
target[curr_in] = 1
return target.bool()
def get_target_by_sp_tensor(self, out, target_sp_tensor):
with torch.no_grad():
def ravel_multi_index(coords, step):
coords = coords.long()
step = step.long()
coords_sum = coords[:, 0] \
+ coords[:, 1]*step \
+ coords[:, 2]*step*step \
+ coords[:, 3]*step*step*step
return coords_sum
step = max(out.C.max(), target_sp_tensor.C.max()) + 1
out_sp_tensor_coords_1d = ravel_multi_index(out.C, step)
in_sp_tensor_coords_1d = ravel_multi_index(target_sp_tensor.C, step)
# test whether each element of a 1-D array is also present in a second array.
target = np.in1d(out_sp_tensor_coords_1d.cpu().numpy(),
in_sp_tensor_coords_1d.cpu().numpy())
return torch.Tensor(target).bool()
def get_coords_nums_by_key(self, out, target_key):
with torch.no_grad():
cm = out.coords_man
strided_target_key = cm.stride(target_key, out.tensor_stride[0], force_creation=True)
ins, outs = cm.get_kernel_map(
out.coords_key,
strided_target_key,
kernel_size=1,
region_type=1)
row_indices_per_batch = cm.get_row_indices_per_batch(out.coords_key)
coords_nums = [len(np.in1d(row_indices,ins[0]).nonzero()[0]) for _, row_indices in enumerate(row_indices_per_batch)]
# coords_nums = [len(np.intersect1d(row_indices,ins[0])) for _, row_indices in enumerate(row_indices_per_batch)]
return coords_nums
def keep_adaptive(self, out, coords_nums, rho=1.0):
with torch.no_grad():
keep = torch.zeros(len(out), dtype=torch.bool)
# get row indices per batch.
# row_indices_per_batch = out.coords_man.get_row_indices_per_batch(out.coords_key)
row_indices_per_batch = out._batchwise_row_indices
for row_indices, ori_coords_num in zip(row_indices_per_batch, coords_nums):
coords_num = min(len(row_indices), ori_coords_num*rho)# select top k points.
values, indices = torch.topk(out.F[row_indices].squeeze(), int(coords_num))
keep[row_indices[indices]]=True
return keep
def forward(self, x, target_label, adaptive, rhos=[1.0, 1.0, 1.0], training=True):
if isinstance(target_label, ME.CoordinateMapKey):
target_format = 'key'
elif isinstance(target_label, list):
if isinstance(target_label[0], ME.SparseTensor):
target_format = 'sp_tensor'
elif isinstance(target_label[0], int):
target_format = 'num'
else:
print('Target Label Format Error!')
sys.exit(0)
targets = []
out_cls = []
keeps = []
# Decode 0.
out0 = self.relu(self.conv0(self.relu(self.up0(x))))
out0 = self.block0(out0)
out0_cls = self.conv0_cls(out0)
# get target 0.
if target_format == 'key':
target0 = self.get_target_by_key(out0, target_label)
elif target_format == 'sp_tensor':
target0 = self.get_target_by_sp_tensor(out0, target_label[0])
elif target_format == 'num':
target0 = target_label[0]
targets.append(target0)
out_cls.append(out0_cls)
# get keep 0.
if adaptive:
if target_format == 'key':
coords_nums0 = self.get_coords_nums_by_key(out0, target_label)
elif target_format == 'sp_tensor':
coords_nums0 = [len(coords) for coords in target_label[0].decomposed_coordinates]
elif target_format == 'num':
coords_nums0 = [target_label[0]]
keep0 = self.keep_adaptive(out0_cls, coords_nums0, rho=rhos[0])
else:
keep0 = (out0_cls.F > 0).cpu().squeeze()
if out0_cls.F.max() < 0:
# keep at least one points.
print('===0; max value < 0', out0_cls.F.max())
_, idx = torch.topk(out0_cls.F.squeeze(), 1)
keep0[idx] = True
keeps.append(keep0)
# If training, force target shape generation, use net.eval() to disable
if training:
keep0 += target0
# Remove voxels
out0_pruned = self.pruning(out0, keep0.to(out0.device))
# Decode 1.
out1 = self.relu(self.conv1(self.relu(self.up1(out0_pruned))))
out1 = self.block1(out1)
out1_cls = self.conv1_cls(out1)
# get target 1.
if target_format == 'key':
target1 = self.get_target_by_key(out1, target_label)
elif target_format == 'sp_tensor':
target1 = self.get_target_by_sp_tensor(out1, target_label[1])
elif target_format == 'num':
target1 = target_label[1]
targets.append(target1)
out_cls.append(out1_cls)
# get keep 1.
if adaptive:
if target_format == 'key':
coords_nums1 = self.get_coords_nums_by_key(out1, target_label)
elif target_format == 'sp_tensor':
coords_nums1 = [len(coords) for coords in target_label[1].decomposed_coordinates]
elif target_format == 'num':
coords_nums1 = [target_label[1]]
keep1 = self.keep_adaptive(out1_cls, coords_nums1, rho=rhos[1])
else:
keep1 = (out1_cls.F > 0).cpu().squeeze()
if out1_cls.F.max() < 0:
# keep at least one points.
print('===1; max value < 0', out1_cls.F.max())
_, idx = torch.topk(out1_cls.F.squeeze(), 1)
keep1[idx] = True
keeps.append(keep1)
if training:
keep1 += target1
# Remove voxels
out1_pruned = self.pruning(out1, keep1.to(out1.device))
# Decode 2.
out2 = self.relu(self.conv2(self.relu(self.up2(out1_pruned))))
out2 = self.block2(out2)
out2_cls = self.conv2_cls(out2)
# get target 2.
if target_format == 'key':
target2 = self.get_target_by_key(out2, target_label)
elif target_format == 'sp_tensor':
target2 = self.get_target_by_sp_tensor(out2, target_label[2])
elif target_format == 'num':
target2 = target_label[2]
targets.append(target2)
out_cls.append(out2_cls)
# get keep 2.
if adaptive:
if target_format == 'key':
coords_nums2 = self.get_coords_nums_by_key(out2, target_label)
elif target_format == 'sp_tensor':
coords_nums2 = [len(coords) for coords in target_label[2].decomposed_coordinates]
elif target_format == 'num':
coords_nums2 = [target_label[2]]
keep2 = self.keep_adaptive(out2_cls, coords_nums2, rho=rhos[2])
else:
keep2 = (out2_cls.F > 0).cpu().squeeze()
if out2_cls.F.max() < 0:
# keep at least one points.
print('===2; max value < 0', out2_cls.F.max())
_, idx = torch.topk(out2_cls.F.squeeze(), 1)
keep2[idx] = True
keeps.append(keep2)
# Remove voxels
out2_pruned = self.pruning(out2_cls, keep2.to(out2_cls.device))
return out2_pruned, out_cls, targets, keeps
if __name__ == '__main__':
encoder = Encoder(8)
print(encoder)
decoder = Decoder(8)
print(decoder)
|
the-stack_0_1834 | # Copyright (C) 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for audio streams."""
import array
import logging
import math
import time
import threading
import wave
import click
import sounddevice as sd
DEFAULT_AUDIO_SAMPLE_RATE = 16000
DEFAULT_AUDIO_SAMPLE_WIDTH = 2
DEFAULT_AUDIO_ITER_SIZE = 3200
DEFAULT_AUDIO_DEVICE_BLOCK_SIZE = 6400
DEFAULT_AUDIO_DEVICE_FLUSH_SIZE = 25600
def normalize_audio_buffer(buf, volume_percentage, sample_width=2):
"""Adjusts the loudness of the audio data in the given buffer.
Volume normalization is done by scaling the amplitude of the audio
in the buffer by a scale factor of 2^(volume_percentage/100)-1.
For example, 50% volume scales the amplitude by a factor of 0.414,
and 75% volume scales the amplitude by a factor of 0.681.
For now we only sample_width 2.
Args:
buf: byte string containing audio data to normalize.
volume_percentage: volume setting as an integer percentage (1-100).
sample_width: size of a single sample in bytes.
"""
if sample_width != 2:
raise Exception('unsupported sample width:', sample_width)
scale = math.pow(2, 1.0*volume_percentage/100)-1
# Construct array from bytes based on sample_width, multiply by scale
# and convert it back to bytes
arr = array.array('h', buf)
for idx in range(0, len(arr)):
arr[idx] = int(arr[idx]*scale)
buf = arr.tostring()
return buf
def align_buf(buf, sample_width):
"""In case of buffer size not aligned to sample_width pad it with 0s"""
remainder = len(buf) % sample_width
if remainder != 0:
buf += b'\0' * (sample_width - remainder)
return buf
class WaveSource(object):
"""Audio source that reads audio data from a WAV file.
Reads are throttled to emulate the given sample rate and silence
is returned when the end of the file is reached.
Args:
fp: file-like stream object to read from.
sample_rate: sample rate in hertz.
sample_width: size of a single sample in bytes.
"""
def __init__(self, fp, sample_rate, sample_width):
self._fp = fp
try:
self._wavep = wave.open(self._fp, 'r')
except wave.Error as e:
logging.warning('error opening WAV file: %s, '
'falling back to RAW format', e)
self._fp.seek(0)
self._wavep = None
self._sample_rate = sample_rate
self._sample_width = sample_width
self._sleep_until = 0
def read(self, size):
"""Read bytes from the stream and block until sample rate is achieved.
Args:
size: number of bytes to read from the stream.
"""
now = time.time()
missing_dt = self._sleep_until - now
if missing_dt > 0:
time.sleep(missing_dt)
self._sleep_until = time.time() + self._sleep_time(size)
data = (self._wavep.readframes(size)
if self._wavep
else self._fp.read(size))
# When reach end of audio stream, pad remainder with silence (zeros).
if not data:
return b'\x00' * size
return data
def close(self):
"""Close the underlying stream."""
if self._wavep:
self._wavep.close()
self._fp.close()
def _sleep_time(self, size):
sample_count = size / float(self._sample_width)
sample_rate_dt = sample_count / float(self._sample_rate)
return sample_rate_dt
def start(self):
pass
def stop(self):
pass
@property
def sample_rate(self):
return self._sample_rate
class WaveSink(object):
"""Audio sink that writes audio data to a WAV file.
Args:
fp: file-like stream object to write data to.
sample_rate: sample rate in hertz.
sample_width: size of a single sample in bytes.
"""
def __init__(self, fp, sample_rate, sample_width):
self._fp = fp
self._wavep = wave.open(self._fp, 'wb')
self._wavep.setsampwidth(sample_width)
self._wavep.setnchannels(1)
self._wavep.setframerate(sample_rate)
def write(self, data):
"""Write bytes to the stream.
Args:
data: frame data to write.
"""
self._wavep.writeframes(data)
def close(self):
"""Close the underlying stream."""
self._wavep.close()
self._fp.close()
def start(self):
pass
def stop(self):
pass
def flush(self):
pass
class SoundDeviceStream(object):
"""Audio stream based on an underlying sound device.
It can be used as an audio source (read) and a audio sink (write).
Args:
sample_rate: sample rate in hertz.
sample_width: size of a single sample in bytes.
block_size: size in bytes of each read and write operation.
flush_size: size in bytes of silence data written during flush operation.
"""
def __init__(self, sample_rate, sample_width, block_size, flush_size):
if sample_width == 2:
audio_format = 'int16'
else:
raise Exception('unsupported sample width:', sample_width)
self._audio_stream = sd.RawStream(
samplerate=sample_rate, dtype=audio_format, channels=1,
blocksize=int(block_size/2), # blocksize is in number of frames.
)
self._block_size = block_size
self._flush_size = flush_size
self._sample_rate = sample_rate
def read(self, size):
"""Read bytes from the stream."""
buf, overflow = self._audio_stream.read(size)
if overflow:
logging.warning('SoundDeviceStream read overflow (%d, %d)',
size, len(buf))
return bytes(buf)
def write(self, buf):
"""Write bytes to the stream."""
underflow = self._audio_stream.write(buf)
if underflow:
logging.warning('SoundDeviceStream write underflow (size: %d)',
len(buf))
return len(buf)
def flush(self):
if self._audio_stream.active and self._flush_size > 0:
self._audio_stream.write(b'\x00' * self._flush_size)
def start(self):
"""Start the underlying stream."""
if not self._audio_stream.active:
self._audio_stream.start()
def stop(self):
"""Stop the underlying stream."""
if self._audio_stream.active:
self._audio_stream.stop()
def close(self):
"""Close the underlying stream and audio interface."""
if self._audio_stream:
self.stop()
self._audio_stream.close()
self._audio_stream = None
@property
def sample_rate(self):
return self._sample_rate
class ConversationStream(object):
"""Audio stream that supports half-duplex conversation.
A conversation is the alternance of:
- a recording operation
- a playback operation
Excepted usage:
For each conversation:
- start_recording()
- read() or iter()
- stop_recording()
- start_playback()
- write()
- stop_playback()
When conversations are finished:
- close()
Args:
source: file-like stream object to read input audio bytes from.
sink: file-like stream object to write output audio bytes to.
iter_size: read size in bytes for each iteration.
sample_width: size of a single sample in bytes.
"""
def __init__(self, source, sink, iter_size, sample_width):
self._source = source
self._sink = sink
self._iter_size = iter_size
self._sample_width = sample_width
self._volume_percentage = 50
self._stop_recording = threading.Event()
self._source_lock = threading.RLock()
self._recording = False
self._playing = False
def start_recording(self):
"""Start recording from the audio source."""
self._recording = True
self._stop_recording.clear()
self._source.start()
def stop_recording(self):
"""Stop recording from the audio source."""
self._stop_recording.set()
with self._source_lock:
self._source.stop()
self._recording = False
def start_playback(self):
"""Start playback to the audio sink."""
self._playing = True
self._sink.start()
def stop_playback(self):
"""Stop playback from the audio sink."""
self._sink.flush()
self._sink.stop()
self._playing = False
@property
def recording(self):
return self._recording
@property
def playing(self):
return self._playing
@property
def volume_percentage(self):
"""The current volume setting as an integer percentage (1-100)."""
return self._volume_percentage
@volume_percentage.setter
def volume_percentage(self, new_volume_percentage):
self._volume_percentage = new_volume_percentage
def read(self, size):
"""Read bytes from the source (if currently recording).
"""
with self._source_lock:
return self._source.read(size)
def write(self, buf):
"""Write bytes to the sink (if currently playing).
"""
buf = align_buf(buf, self._sample_width)
buf = normalize_audio_buffer(buf, self.volume_percentage)
return self._sink.write(buf)
def close(self):
"""Close source and sink."""
self._source.close()
self._sink.close()
def __iter__(self):
"""Returns a generator reading data from the stream."""
while True:
if self._stop_recording.is_set():
return
yield self.read(self._iter_size)
@property
def sample_rate(self):
return self._source._sample_rate
@click.command()
@click.option('--record-time', default=5,
metavar='<record time>', show_default=True,
help='Record time in secs')
@click.option('--audio-sample-rate',
default=DEFAULT_AUDIO_SAMPLE_RATE,
metavar='<audio sample rate>', show_default=True,
help='Audio sample rate in hertz.')
@click.option('--audio-sample-width',
default=DEFAULT_AUDIO_SAMPLE_WIDTH,
metavar='<audio sample width>', show_default=True,
help='Audio sample width in bytes.')
@click.option('--audio-iter-size',
default=DEFAULT_AUDIO_ITER_SIZE,
metavar='<audio iter size>', show_default=True,
help='Size of each read during audio stream iteration in bytes.')
@click.option('--audio-block-size',
default=DEFAULT_AUDIO_DEVICE_BLOCK_SIZE,
metavar='<audio block size>', show_default=True,
help=('Block size in bytes for each audio device '
'read and write operation..'))
@click.option('--audio-flush-size',
default=DEFAULT_AUDIO_DEVICE_FLUSH_SIZE,
metavar='<audio flush size>', show_default=True,
help=('Size of silence data in bytes written '
'during flush operation'))
def main(record_time, audio_sample_rate, audio_sample_width,
audio_iter_size, audio_block_size, audio_flush_size):
"""Helper command to test audio stream processing.
- Record 5 seconds of 16-bit samples at 16khz.
- Playback the recorded samples.
"""
end_time = time.time() + record_time
audio_device = SoundDeviceStream(sample_rate=audio_sample_rate,
sample_width=audio_sample_width,
block_size=audio_block_size,
flush_size=audio_flush_size)
stream = ConversationStream(source=audio_device,
sink=audio_device,
iter_size=audio_iter_size,
sample_width=audio_sample_width)
samples = []
logging.basicConfig(level=logging.INFO)
logging.info('Starting audio test.')
stream.start_recording()
logging.info('Recording samples.')
while time.time() < end_time:
samples.append(stream.read(audio_block_size))
logging.info('Finished recording.')
stream.stop_recording()
stream.start_playback()
logging.info('Playing back samples.')
while len(samples):
stream.write(samples.pop(0))
logging.info('Finished playback.')
stream.stop_playback()
logging.info('audio test completed.')
stream.close()
if __name__ == '__main__':
main()
|
the-stack_0_1835 | ll_proto = \
"""
node life {
has anchor owner;
can infer.year_from_date;
}
node year {
has anchor year;
can infer.month_from_date;
}
node month {
has anchor month;
can infer.year_from_date;
can infer.week_from_date;
}
node week {
has anchor week;
can infer.month_from_date;
can infer.day_from_date, date.day_from_date;
}
node day: has anchor day;
node workette {
has name, order, date, owner, status, snooze_till;
has note, is_MIT, is_ritual;
}
edge past;
edge parent;
walker get_day {
has date;
life: take --> node::year == infer.year_from_date(date);
year: take --> node::month == infer.month_from_date(date);
month: take --> node::week == infer.week_from_date(date);
week: take --> node::day == date.day_from_date(date);
day: report here;
report false;
}
walker get_latest_day {
has before_date;
has anchor latest_day;
if(!before_date): before_date = std.time_now();
if(!latest_day): latest_day = 0;
life {
ignore --> node::year > infer.year_from_date(before_date);
take net.max(--> node::year);
}
year {
ignore node::month > infer.month_from_date(before_date);
take net.max(--> node::month)
else {
ignore here;
take <-- node::life;
}
}
month {
ignore node::week > infer.week_from_date(before_date);
take net.max(--> node::week)
else {
ignore here;
take <-- node::year == infer.year_from_date(before_date);
}
}
week {
ignore node::day > infer.day_from_date(before_date);
take net.max(--> node::day)
else {
ignore here;
take <-- node::month == infer.month_from_date(before_date);
}
}
day {
latest_day = here;
report here;
}
}
walker get_gen_day {
has date;
has anchor day_node;
if(!date): date=std.time_now();
root: take --> node::life;
life: take --> node::year == infer.year_from_date(date) else {
new = spawn here --> node::year ;
new.year = infer.year_from_date(date);
take --> node::year == infer.year_from_date(date);
}
year: take --> node::month == infer.month_from_date(date) else {
new = spawn here --> node::month;
new.month = infer.month_from_date(date);
take --> node::month == infer.month_from_date(date);
}
month: take --> node::week == infer.week_from_date(date) else {
new = spawn here --> node::week;
new.week = infer.week_from_date(date);
take --> node::week == infer.week_from_date(date);
}
week: take --> node::day == infer.day_from_date(date) else {
latest_day = spawn here walker::get_latest_day;
new = spawn here --> node::day;
new.day = infer.day_from_date(date);
if(latest_day and infer.day_from_date(date) ==
infer.day_from_date(std.time_now())) {
spawn latest_day walker::carry_forward(parent=new);
take new;
}
elif(latest_day) {
take latest_day;
}
else: take new;
}
day {
day_node = here;
take --> node::workette;
}
workette {
report here;
take --> node::workette;
}
}
walker get_sub_workettes {
report here;
workette: take --> node::workette;
}
walker carry_forward {
has parent;
day {
take --> node::workette;
}
workette {
if(here.status == 'done' or
here.status == 'eliminated') {
disengage;
}
new_workette = spawn here <-[past]- node::workette;
new_workette <-[parent]- parent;
new_workette := here;
spawn --> node::workette
walker::carry_forward(parent=new_workette);
}
}
walker gen_rand_life {
has num_workettes;
root: take --> node::life;
life {
num_workettes = 10;
num_days = rand.integer(2, 4);
for i=0 to i<num_days by i+=1 {
spawn here walker::get_gen_day(
date=rand.time("2019-01-01", "2019-12-31")
);
}
take -->;
}
year, month, week { take -->; }
day, workette {
if(num_workettes == 0): disengage;
gen_num = rand.integer(5, 8);
for i=0 to i<gen_num by i+=1 {
spawn here -[parent]-> node::workette(name=rand.sentence());
}
take --> ;
num_workettes -= 1;
}
}
walker init {
has owner;
has anchor life_node;
take (--> node::life == owner) else {
life_node = spawn here --> node::life;
life_node.owner = owner;
disengage;
}
}
"""
prog0 = \
"""
node testnode:0 {
has a, b, c;
can std.log::a,b::>c with exit;
}
walker testwalk {
testnode {
here.a = 43;
here.b = 'Yeah \\n"fools"!';
report here.b;
if(4 > 6) { std.log("a"); }
elif(5>6) { std.log("b"); }
elif(6>6) { std.log("c"); }
elif(7>6) { std.log(576); }
}
}
node life:0 {
}
node year {
has anchor year;
}
walker another {
life {
here.a = 43;
here.b = 'Yeah \\n"fools"!';
report here.b;
if("4 > 6" == "4 > 6") { std.log("a"); }
}
}
"""
prog1 = \
"""
node testnode:0 {
has a, b, c;
can std.log::a,b::>c with exit;
}
walker testwalk {
testnode {
here.a = 43;
here.b = 'Yeah \\n"fools"!';
report here.b;
if(4 > 6) { std.log("a"); }
elif(5>6) { std.log("b"); }
elif(6>6) { std.log("c"); }
elif(7>6) { std.log(576); }
}
}
node life:0 {
}
node year {
has anchor year;
}
node month {
has anchor month;
}
node week {
has anchor week;
}
node day {
has anchor day;
}
node workette {
has date, owner, status, snooze_till;
has note, is_MIT, is_ritual;
}
walker use_test {
can use.enc_question, use.enc_answer, use.qa_score;
has output;
q = use.enc_question(["How old are you?",
"which animal is the best?"]);
std.log(q);
a = use.enc_answer(["I'm 40 years old.", "Elephants rule."]);
std.log(a);
output = use.qa_score(q, a);
report output;
}
walker use_test_with_ctx {
can use.enc_question, use.enc_answer, use.qa_score, use.dist_score;
has output;
q = use.enc_question("Who are you?");
a = use.enc_answer("I am jason");
output = use.qa_score(q, a);
report output;
a = use.enc_answer("You are jon");
output = use.qa_score(q, a);
report output;
a = use.enc_answer("Who are you? You are jon");
output = use.qa_score(q, a);
report output;
a = use.enc_answer("Who are you? You are jon");
output = use.qa_score(q, a);
report output;
q1 = use.enc_question("Who are you?");
q2 = use.enc_question("Who you be?");
q3 = use.enc_question("Who I be?");
output = use.dist_score(q1, q2);
report output;
output = use.dist_score(q1, q3);
report output;
output = use.qa_score(q2, use.enc_answer("Who are you? You are jon"));
report output;
output = use.qa_score(q3, use.enc_answer("Who are you? You are jon"));
report output;
output = use.qa_score(q2, use.enc_answer("I am jason"));
report output;
output = use.qa_score(q3, use.enc_answer("I am jason"));
report output;
}
walker use_test_with_ctx2 {
can use.enc_question, use.enc_answer, use.qa_score, use.dist_score;
q1 = use.enc_question("Who are you?");
q2 = use.enc_question("Who you be?");
q3 = use.enc_question("Who I be?");
report use.dist_score(q1, q2);
report use.dist_score(q1, q3);
report use.qa_score(q2, use.enc_answer("Who are you? You are jon"));
report use.qa_score(q3, use.enc_answer("Who are you? You are jon"));
report use.qa_score(q2, use.enc_answer("I am jason"));
report use.qa_score(q3, use.enc_answer("I am jason"));
report use.qa_score(q3, use.enc_answer("I am jason","Who I be?"));
report use.qa_score(q3, use.enc_answer("I am jason Who I be?"));
}
walker use_test_single {
can use.enc_question, use.enc_answer, use.qa_score;
has output;
q = use.enc_question("Who's your daddy?");
a = use.enc_answer("I'm your father.");
output = use.qa_score(q, a);
report output;
}
walker get_day {
has date;
life: take infer.year_from_date(date);
year: take infer.month_from_date(date);
month: take infer.week_from_date(date);
week: take infer.day_from_date(date);
day: report --> ;
}
walker get_gen_day {
has date;
can infer.year_from_date;
can infer.month_from_date;
can infer.week_from_date;
can infer.day_from_date;
life: take --> node::year == infer.year_from_date(date) else {
new = spawn here --> node::year;
new.year = infer.year_from_date(date);
take --> node::year == infer.year_from_date(date);
}
year: take --> node::month == infer.month_from_date(date) else {
new = spawn here --> node::month;
new.month = infer.month_from_date(date);
take --> node::month == infer.month_from_date(date);
}
month: take --> node::week == infer.week_from_date(date) else {
new = spawn here --> node::week;
new.week = infer.week_from_date(date);
take --> node::week == infer.week_from_date(date);
}
week: take --> node::day == infer.day_from_date(date) else {
new = spawn here --> node::day;
new.day = infer.day_from_date(date);
take --> node::day == infer.day_from_date(date);
}
day: report --> ;
}
walker get_sub_workettes {
workette: report --> node::workette;
}
walker get_latest_day {
life: take year.max_outbound;
year: take month.max_outbound;
month: take week.max_outbound;
week: report day.max_outbound;
}
walker carry_forward {
has my_root;
day {
new_day = spawn here --> node::day;
my_root = new_day;
take day.outbound_nodes;
}
workette {
if(workette.status == 'done' or
workette.status == 'eliminated') {
continue;
}
childern = workette.outbound_nodes;
new_workette = spawn here --> node::workette;
parent = me.spawn_history.last(-1);
new_workette <-- parent;
take --> node::workette;
}
report me.spawn_history;
report new_day;
}
"""
edgey = \
"""
node testnode;
edge apple;
edge banana;
walker init {
root {
a = spawn here --> node::testnode ;
here -[apple]-> a;
here -[banana]-> a;
}
}
"""
edgey2 = \
"""
node testnode;
edge apple;
edge banana;
walker init {
root {
a = spawn here --> node::testnode ;
here -[apple]-> a;
here -[banana]-> a;
here !--> a;
}
}
"""
edgey2b = \
"""
node testnode;
edge apple;
edge banana;
walker init {
root {
a = spawn here --> node::testnode ;
b = spawn here --> node::testnode ;
here -[apple]-> a;
here -[banana]-> a;
here !--> a;
}
}
"""
edgey2c = \
"""
node testnode;
edge apple;
edge banana;
walker init {
root {
a = spawn here --> node::testnode ;
b = spawn here --> node::testnode ;
here -[apple]-> a;
here -[banana]-> a;
here !-[apple]-> a;
}
}
"""
edgey3 = \
"""
node testnode;
edge apple;
edge banana;
walker init {
root {
a = spawn here --> node::testnode ;
b = spawn here --> node::testnode ;
here -[apple]-> a;
here -[apple]-> a;
here -[banana]-> a;
here -[banana]-> a;
here -[banana]-> a;
here !-[apple]-> a;
}
}
"""
edgey4 = \
"""
node testnode;
edge apple;
edge banana;
walker init {
root {
a = spawn here --> node::testnode ;
here --> a;
here -[apple]-> a;
here -[banana]-> a;
here !-[generic]-> a;
}
}
"""
edgey5 = \
"""
node testnode;
edge apple;
edge banana;
walker init {
root {
a = spawn here --> node::testnode ;
here --> a;
here --> a;
here -[apple]-> a;
here -[banana]-> a;
here !-[generic]-> -[generic]->;
}
}
"""
edgey6 = \
"""
node testnode;
edge apple;
edge banana;
walker init {
root {
a = spawn here --> node::testnode ;
b = spawn here --> node::testnode ;
here -[apple]-> -[generic]->;
}
}
"""
edgey7 = \
"""
node testnode;
edge apple;
edge banana;
walker init {
root {
a = spawn here --> node::testnode ;
b = spawn here --> node::testnode ;
here --> a;
here --> a;
here -[apple]-> a;
here -[apple]-> b;
here -[banana]-> a;
here !-[generic]-> -[apple]->;
}
}
"""
edge_access = \
"""
node testnode;
edge apple {
has v1, v2;
}
edge banana {
has x1, x2;
}
walker init {
root {
a = spawn here -[apple]-> node::testnode ;
b = spawn here -[banana]-> node::testnode ;
e = -[apple]->.edge[0];
e.v1 = 7;
e = --> node::testnode .edge[1];
e.x1=8;
}
}
"""
has_assign = \
"""
node testnode {
has a=8;
}
walker init {
root {
a = spawn here --> node::testnode ;
b = spawn here --> node::testnode ;
std.log(a.a, b.a);
}
}
"""
set_get_global = \
"""
walker setter {
root {
std.set_global('globby', 59);
}
}
walker getter {
has a;
root {
a=std.get_global('globby');
std.log(std.get_global('globby'));
}
}
"""
set_get_global_dict = \
"""
walker setter {
root {
std.set_global('globby',
{ "max_bot_count": 10, "max_ans_count": 100,
"max_txn_count": 50000, "max_test_suite": 5,
"max_test_cases": 50, "export_import": true,
"analytics": true, "integration": "All"
});
}
}
walker getter {
has a;
root {
a=std.get_global('globby');
std.log(std.get_global('globby'));
report std.get_global('globby');
}
}
"""
version_label = \
"""
version: "alpha-1.0"
walker setter {
root {
std.set_global('globby', 59);
}
}
walker getter {
has a;
root {
a=std.get_global('globby');
std.log(std.get_global('globby'));
}
}
"""
sharable = \
"""
node life {
}
walker init {
root {
new = spawn here --> node::life;
take -->;
}
life {
std.out(here);
}
}
"""
basic = \
"""
node life {
}
walker init {
root {
new = spawn here --> node::life;
take -->;
}
life {
}
}
"""
visibility_builtins = \
"""
node testnode {
has yo, mama;
}
edge apple {
has v1, v2;
}
edge banana {
has x1, x2;
}
walker init {
root {
a = spawn here -[apple]-> node::testnode ;
a.yo="Yeah i said";
a.mama="Yo Mama Fool!";
b = spawn here -[banana]-> node::testnode ;
e = -[apple]->.edge[0];
e.v1 = 7;
e = --> node::testnode .edge[1];
e.x1=8;
report [a.context, b.info, e.details];
}
}
"""
spawn_ctx_edge_node = \
"""
node person: has name, age, birthday, profession;
edge friend: has meeting_place;
edge family: has kind;
walker init {
person1 = spawn here -[friend(meeting_place = "college")]->
node::person(name = "Josh", age = 32);
person2 = spawn here -[family(kind = "sister")] ->
node::person(name = "Jane", age = 30);
for i in -->{
report i.context;
report i.edge[0].context;
}
}
"""
filter_ctx_edge_node = \
"""
node person: has name, age, birthday, profession;
edge friend: has meeting_place;
edge family: has kind;
walker init {
person1 = spawn here -[friend(meeting_place = "college")]->
node::person(name = "Josh", age = 32);
person2 = spawn here -[family(kind = "sister")] ->
node::person(name = "Jane", age = 30);
report --> node::person(name=='Jane')[0].context;
report -[family(kind=="brother")]->;
}
"""
null_handleing = \
"""
node person: has name, age, birthday, profession;
walker init {
person1 = spawn here -->
node::person(name = "Josh", age = 32);
if(person1.birthday==null): report true;
else: report false;
if(person1.name==null): report true;
else: report false;
person1.name=null;
report person1.name==null;
person1.name=0;
report person1.name==null;
}
"""
bool_type_convert = \
"""
node person: has name;
walker init {
p1 = spawn here -->
node::person(name = "Josh");
p1.name = true;
report p1.name;
std.log(p1.name);
report p1.context;
}
"""
typecasts = \
"""
walker init {
a=5.6;
report (a+2);
report (a+2).int;
report (a+2).str;
report (a+2).bool;
report (a+2).int.float;
if(a.str.type == str and !(a.int.type == str)
and a.int.type == int):
report "Types comes back correct";
}
"""
typecasts_error = \
"""
walker init {
a=5.6;
report (a+2);
report (a+2).int;
report (a+2).str;
report (a+2).edge;
report ("a+2").int.float;
if(a.str.type == str and !(a.int.type == str)
and a.int.type == int):
report "Types comes back correct";
}
"""
filter_on_context = \
"""
node testnode {
has yo, mama;
}
edge apple {
has v1, v2;
}
edge banana {
has x1, x2;
}
walker init {
root {
a = spawn here -[apple]-> node::testnode ;
a.yo="Yeah i said";
a.mama="Yo Mama Fool!";
b = spawn here -[banana]-> node::testnode ;
e = -[apple]->.edge[0];
e.v1 = 7;
e = --> node::testnode .edge[1];
e.x1=8;
report [a.context.{yo}, b.info.{jid,j_type}, e.details];
}
}
"""
string_manipulation = \
"""
walker init {
a=" tEsting me ";
report a[4];
report a[4:7];
report a[3:-1];
report a.str::upper;
report a.str::lower;
report a.str::title;
report a.str::capitalize;
report a.str::swap_case;
report a.str::is_alnum;
report a.str::is_alpha;
report a.str::is_digit;
report a.str::is_title;
report a.str::is_upper;
report a.str::is_lower;
report a.str::is_space;
report a.str::count('t');
report a.str::find('i');
report a.str::split;
report a.str::split('E');
report a.str::startswith('tEs');
report a.str::endswith('me');
report a.str::replace('me', 'you');
report a.str::strip;
report a.str::strip(' t');
report a.str::lstrip;
report a.str::lstrip(' tE');
report a.str::rstrip;
report a.str::rstrip(' e');
report a.str::upper.str::is_upper;
}
"""
string_join = \
"""
walker init {
a=['test', 'me', 'now'];
report '_'.str::join(a);
}
"""
sub_list = \
"""
walker init {
a=[1,2,3,4,5,6,7,8,9];
report a[4:7];
}
"""
destroy_and_misc = \
"""
node person: has name, age, birthday, profession;
edge friend: has meeting_place;
edge family: has kind;
walker init {
person1 = spawn here -[friend(meeting_place = "college")]->
node::person(name = "Josh", age = 32);
person2 = spawn here -[family(kind = "sister")] ->
node::person(name = "Jane", age = 30);
report person1.name;
destroy person1.name;
report person1.context;
person1.name="pete";
report person1.context;
a=[1,2,3];
destroy a[1];
report a;
b={'a': 'b', 'c':'d'};
destroy b['c'];
report b;
a=[1,2,3,5,6,7,8,9];
destroy a[2:4];
report a;
a[2:4]=[45,33];
report a;
destroy a;
report a;
person1.banana=45;
report person1.context;
report 'age' in person1.context;
}
"""
arbitrary_assign_on_element = \
"""
node person: has name, age, birthday, profession;
walker init {
some = spawn here --> node::person;
some.apple = 45;
report some.context;
}
"""
try_else_stmts = \
"""
walker init {
a=null;
try {a=2/0;}
else with err {report err;}
try {a=2/0;}
else {report 'dont need err';}
try {a=2/0;}
try {a=2/0;}
report a;
try {a=2/1;}
report a;
}
"""
node_edge_same_name = \
"""
node person: has name, age, birthday, profession;
edge person: has meeting_place;
walker init {
person1 = spawn here -[person(meeting_place = "college")]->
node::person(name = "Josh", age = 32);
report -->.edge[0].context;
report -->[0].context;
}
"""
testcases = \
"""
node testnode {
has yo, mama;
}
node apple {
has v1, v2;
}
node banana {
has x1, x2;
}
graph dummy {
has anchor graph_root;
spawn {
graph_root = spawn node::testnode (yo="Hey yo!");
n1=spawn node::apple(v1="I'm apple");
n2=spawn node::banana(x1="I'm banana");
graph_root --> n1 --> n2;
}
}
walker init {
has num=4;
report here.context;
report num;
take -->;
}
test "basic test with refs"
with graph::dummy by walker::init;
test "test with refs and assert block"
with graph::dummy by walker::init {
report "ASSERT BLOCK";
}
test "test with graph ref and walker block"
with graph::dummy by walker {
report here.context;
report "IN generic walker";
take -->;
}
test "test with graph block and walker ref"
with graph {
has anchor graph_root;
spawn {
graph_root = spawn node::testnode (yo="Hey yo!");
n1=spawn node::apple(v1="I'm apple");
n2=spawn node::banana(x1="I'm banana");
graph_root --> n1 --> n2;
graph_root --> n2;
}
} by walker::init {
report "ASSERT BLOCK";
}
"""
testcase_asserts = \
"""
node testnode {
has yo, mama;
}
node apple {
has v1, v2;
}
node banana {
has x1, x2;
}
graph dummy {
has anchor graph_root;
spawn {
graph_root = spawn node::testnode (yo="Hey yo!");
n1=spawn node::apple(v1="I'm apple");
n2=spawn node::banana(x1="I'm banana");
graph_root --> n1 --> n2;
}
}
walker init {
has num=4;
report here.context;
report num;
take -->;
}
test "assert should be valid"
with graph::dummy by walker::init {
assert (num==4);
assert (here.x1=="I'm banana");
assert <--[0].v1=="I'm apple";
}
test "assert should fail"
with graph::dummy by walker::init {
assert (num==4);
assert (here.x1=="I'm banana");
assert <--[0].v1=="I'm Apple";
}
test "assert should fail, add internal except"
with graph::dummy by walker::init {
assert (num==4);
assert (here.x1=="I'm banana");
assert <--[10].v1=="I'm apple";
}
"""
report_not_to_jacset = \
"""
node testnode {
has yo, mama;
}
walker init {
spawn here --> node::testnode;
report -->;
}
"""
walker_spawn_unwrap_check = \
"""
node testnode {
has yo, mama;
}
walker print {
has anchor nd;
nd=here;
}
walker init {
report &(spawn here walker::print);
}
"""
|
the-stack_0_1836 | from SloppyCell.ReactionNetworks import *
# Modifications to SBML...
# Removed function LD, because it used 'ceil' which is not something we can deal
# with
# Replaced variable value_of_LD with light (more descriptive name)
# Replaced calls to LD with light
# Removed timeOfDay and dayLength variables
net = IO.from_SBML_file('BIOMD055-noceil.xml', 'base')
net.compile()
# Set up a network that will switch light on/off at 12 hour intervals.
net1212 = net.copy('net_1212')
net1212.set_var_ic('light', 1)
net1212.add_parameter('turntime', 12, is_constant=False)
net1212.add_event('light_switch', 'gt(time, turntime)', {'light': '1-light',
'turntime': '12+time'})
mutant_net = net1212.copy('cca1lhy')
mutant_net.set_var_ic('p1', net.get_var_ic('p1')/1000)
# Run to the limit cycle
traj = Dynamics.integrate(net1212, [0, 24*10])
net1212.set_var_ics(traj.get_var_vals_index(-1))
# Go to limit cycle
traj = Dynamics.integrate(mutant_net, [0, 24*10])
mutant_net.set_var_ics(traj.get_var_vals_index(-1))
net_12L_12D_12L_D = net1212.copy('net_12L_12D_12L_D')
net_12L_12D_12L_D.remove_component('light_switch')
net_12L_12D_12L_D.remove_component('turntime')
net_12L_12D_12L_D.set_var_ic('light', 1)
net_12L_12D_12L_D.add_event('off_12', 'gt(time, 12)', {'light': 0})
net_12L_12D_12L_D.add_event('on_24', 'gt(time, 24)', {'light': 1})
net_12L_12D_12L_D.add_event('off_36', 'gt(time, 36)', {'light': 0})
# Run for twelve more hours to get to the dark part of the cycle
traj = Dynamics.integrate(net1212, [0, 12])
net1212.set_var_ics(traj.get_var_vals_index(-1))
net_12D_L = net1212.copy('net_12D_L')
net_12D_L.remove_component('light_switch')
net_12D_L.remove_component('turntime')
net_12D_L.set_var_ic('light', 0)
net_12D_L.add_event('on_12', 'gt(time, 12)', {'light': 1})
mutant_12L_12D_12L_D = mutant_net.copy('mutant_12L_12D_12L_D')
mutant_12L_12D_12L_D.remove_component('light_switch')
mutant_12L_12D_12L_D.remove_component('turntime')
mutant_12L_12D_12L_D.set_var_ic('light', 1)
mutant_12L_12D_12L_D.add_event('off_12', 'gt(time, 12)', {'light': 0})
mutant_12L_12D_12L_D.add_event('on_24', 'gt(time, 24)', {'light': 1})
mutant_12L_12D_12L_D.add_event('off_36', 'gt(time, 36)', {'light': 0})
trajm = Dynamics.integrate(mutant_12L_12D_12L_D, [0, 96])
# Run for twelve more hours to get to the dark part of the cycle
traj = Dynamics.integrate(mutant_net, [0, 12])
mutant_net.set_var_ics(traj.get_var_vals_index(-1))
mutant_12D_L = mutant_net.copy('mutant_12D_L')
mutant_12D_L.remove_component('light_switch')
mutant_12D_L.remove_component('turntime')
mutant_12D_L.set_var_ic('light', 0)
mutant_12D_L.add_event('on_12', 'gt(time, 12)', {'light': 1})
networks = [net_12L_12D_12L_D, net_12D_L, mutant_12L_12D_12L_D, mutant_12D_L]
int_times = [(0, 96), (0, 96), (0, 48), (0,48)]
|
the-stack_0_1839 | # Copyright (c) 2017-2019 Datasud.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core.exceptions import ValidationError
from django import forms
from django.forms.models import ModelChoiceIterator
from django.utils import timezone
from idgo_admin.models import Category
from idgo_admin.models import Dataset
from idgo_admin.models import DataType
from idgo_admin.models import Granularity
from idgo_admin.models import License
from idgo_admin.models import Organisation
from idgo_admin.models import Support
import re
from taggit.forms import TagField
from taggit.forms import TagWidget
CKAN_URL = settings.CKAN_URL
DEFAULT_CONTACT_EMAIL = settings.DEFAULT_CONTACT_EMAIL
DEFAULT_PLATFORM_NAME = settings.DEFAULT_PLATFORM_NAME
DOMAIN_NAME = settings.DOMAIN_NAME
GEONETWORK_URL = settings.GEONETWORK_URL
TODAY = timezone.now().date().strftime('%d/%m/%Y')
# Définition de DatatypeField
# ===========================
class DatatypeModelIterator(ModelChoiceIterator):
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
queryset = self.queryset
if not queryset._prefetch_related_lookups:
queryset = queryset.iterator()
for obj in queryset:
if obj.slug == 'donnees-moissonnees':
continue
yield self.choice(obj)
class DatatypeModelMultipleChoiceField(forms.ModelMultipleChoiceField):
iterator = DatatypeModelIterator
class DatatypeField(DatatypeModelMultipleChoiceField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('label', "Type de données")
kwargs.setdefault('required', False)
kwargs.setdefault('widget', forms.CheckboxSelectMultiple())
kwargs.setdefault('queryset', DataType.objects.all())
super().__init__(*args, **kwargs)
# ========================================
# Formulaire d'édition d'un jeu de données
# ========================================
class DatasetForm(forms.ModelForm):
class Meta(object):
model = Dataset
fields = (
'broadcaster_email',
'broadcaster_name',
'categories',
'data_type',
'date_creation',
'date_modification',
'date_publication',
'description',
'geocover',
'granularity',
'keywords',
'license',
'organisation',
'owner_email',
'owner_name',
'published',
'support',
'thumbnail',
'update_frequency',
'title',
'slug')
title = forms.CharField(
label="Titre*",
required=True,
widget=forms.Textarea(
attrs={
'placeholder': "Titre de votre jeu de données",
'rows': 1,
},
),
)
slug = forms.CharField(
label="URL du jeu de données",
required=False,
max_length=100,
widget=forms.TextInput(
attrs={
'addon_before': '{}/dataset/'.format(CKAN_URL),
'addon_before_class': 'input-group-addon',
'addon_after': '<button class="btn btn-default" type="button" />',
'addon_after_class': 'input-group-btn',
'autocomplete': 'off',
'readonly': True,
'placeholder': '',
},
),
)
description = forms.CharField(
label="Description",
required=False,
widget=forms.Textarea(
attrs={
'placeholder': "Vous pouvez utiliser le langage Markdown ici",
},
),
)
class CustomClearableFileInput(forms.ClearableFileInput):
template_name = 'idgo_admin/widgets/file_drop_zone.html'
thumbnail = forms.FileField(
label="Illustration",
required=False,
widget=CustomClearableFileInput(
attrs={
'value': None,
'max_size_info': 1048576,
},
),
)
keywords = TagField(
label="Liste de mots-clés",
required=False,
widget=TagWidget(
attrs={
'autocomplete': 'off',
'class': 'typeahead',
'placeholder': "Utilisez la virgule comme séparateur",
},
),
)
categories = forms.ModelMultipleChoiceField(
label="Catégories (sélectionnez dans la liste ci-dessous une ou plusieurs catégories)",
required=False,
queryset=Category.objects.all(),
widget=forms.CheckboxSelectMultiple(),
)
date_creation = forms.DateField(
label="Date de création",
required=False,
widget=forms.DateInput(
attrs={
'autocomplete': 'off',
'class': 'datepicker',
'placeholder': "{0} (par défaut)".format(TODAY),
},
),
)
date_modification = forms.DateField(
label="Date de dernière modification",
required=False,
widget=forms.DateInput(
attrs={
'autocomplete': 'off',
'class': 'datepicker',
'placeholder': "{0} (par défaut)".format(TODAY),
},
),
)
date_publication = forms.DateField(
label="Date de publication",
required=False,
widget=forms.DateInput(
attrs={
'autocomplete': 'off',
'class': 'datepicker',
'placeholder': "{0} (par défaut)".format(TODAY),
},
),
)
update_frequency = forms.ChoiceField(
label="Fréquence de mise à jour",
required=False,
choices=Dataset.FREQUENCY_CHOICES,
)
geocover = forms.ChoiceField(
label="Couverture géographique",
required=False,
choices=Dataset.GEOCOVER_CHOICES,
)
granularity = forms.ModelChoiceField(
label="Granularité de la couverture territoriale",
empty_label="Sélectionnez une valeur",
required=False,
queryset=Granularity.objects.all().order_by('order'),
)
organisation = forms.ModelChoiceField(
label="Organisation à laquelle est rattaché ce jeu de données*",
empty_label="Sélectionnez une organisation",
required=True,
queryset=Organisation.objects.all(),
)
license = forms.ModelChoiceField(
label="Licence*",
empty_label="Sélectionnez une licence",
required=True,
queryset=License.objects.all(),
)
support = forms.ModelChoiceField(
label="Support technique",
empty_label="Aucun",
required=False,
queryset=Support.objects.all(),
)
data_type = DatatypeField(
# Cf. définition plus haut
)
owner_name = forms.CharField(
label="Nom du producteur",
required=False,
)
owner_email = forms.EmailField(
label="Adresse e-mail du producteur",
required=False,
error_messages={
'invalid': "L'adresse e-mail est invalide.",
},
)
broadcaster_name = forms.CharField(
label="Nom du diffuseur",
required=False,
)
broadcaster_email = forms.EmailField(
label="Adresse e-mail du diffuseur",
required=False,
error_messages={
'invalid': "L'adresse e-mail est invalide.",
},
)
published = forms.BooleanField(
label="Publier le jeu de données",
required=False,
initial=True,
)
def __init__(self, *args, **kwargs):
self.include_args = kwargs.pop('include', {})
super().__init__(*args, **kwargs)
instance = kwargs.get('instance', None)
owner = instance \
and instance.editor or self.include_args.get('user')
self.fields['organisation'].queryset = Organisation.objects.filter(
liaisonscontributeurs__user=owner,
liaisonscontributeurs__validated_on__isnull=False)
self.fields['owner_name'].initial = owner.get_full_name()
self.fields['owner_name'].widget.attrs['placeholder'] = \
'{} (valeur par défaut)'.format(owner.get_full_name())
self.fields['owner_email'].initial = owner.email
self.fields['owner_email'].widget.attrs['placeholder'] = \
'{} (valeur par défaut)'.format(owner.email)
self.fields['broadcaster_name'].widget.attrs['placeholder'] = \
instance and instance.support and instance.support.name or DEFAULT_PLATFORM_NAME
self.fields['broadcaster_email'].widget.attrs['placeholder'] = \
instance and instance.support and instance.support.email or DEFAULT_CONTACT_EMAIL
if instance and instance.thumbnail:
self.fields['thumbnail'].widget.attrs['value'] = instance.thumbnail.url
if not instance:
self.fields['granularity'].initial = 'indefinie'
def clean(self):
title = self.cleaned_data.get('title')
if not re.match('^[a-z0-9\-]{1,100}$', self.cleaned_data.get('slug')):
self.add_error('slug', (
"Seuls les caractères alphanumériques et le tiret sont "
"autorisés (100 caractères maximum)."))
raise ValidationError('KeywordsError')
if self.include_args['identification']:
dataset = Dataset.objects.get(id=self.include_args['id'])
if title != dataset.title and Dataset.objects.filter(title=title).exists():
self.add_error('title', 'Ce nom est réservé.')
raise ValidationError("Dataset '{0}' already exists".format(title))
if not self.include_args['identification'] \
and Dataset.objects.filter(title=title).exists():
self.add_error('title', 'Le jeu de données "{0}" existe déjà'.format(title))
raise ValidationError("Dataset '{0}' already exists".format(title))
kwords = self.cleaned_data.get('keywords')
if kwords:
for w in kwords:
if len(w) < 2:
self.add_error('keywords', "La taille minimum pour un mot clé est de 2 caractères. ")
raise ValidationError("KeywordsError")
regex = '^[a-zA-Z0-9áàâäãåçéèêëíìîïñóòôöõúùûüýÿæœÁÀÂÄÃÅÇÉÈÊËÍÌÎÏÑÓÒÔÖÕÚÙÛÜÝŸÆŒ\._\-\s]*$'
if not re.match(regex, w):
self.add_error('keywords', "Les mots-clés ne peuvent pas contenir de caractères spéciaux. ")
raise ValidationError('KeywordsError')
return self.cleaned_data
|
the-stack_0_1840 | """
pytoch Edgecortix backend
"""
# pylint: disable=unused-argument,missing-docstring
import os
import torch
import backend
import ip_runtime
import torchvision
import torchvision.transforms as transforms
import tvm
from tvm import relay
from tvm.relay import mera
from tqdm import tqdm
from PIL import Image
from torch.utils.data import DataLoader, Dataset
class CalibrationDataset(Dataset):
def __init__(self, root, files, transform):
with open(files, 'r') as f:
self.files = [os.path.join(root, fn.strip()) for fn in f.readlines()]
self.transform = transform
def __getitem__(self, idx):
image = Image.open(self.files[idx]).convert('RGB')
image = self.transform(image)
return image
def __len__(self):
return len(self.files)
class BackendEdgecortix(backend.Backend):
def __init__(self, dataset_path, dataset_calibration_list):
super(BackendEdgecortix, self).__init__()
self.sess = None
self.model = None
self.iprt = None
self.device = "cpu"
self.dataset_path = dataset_path
self.dataset_calibration_list = dataset_calibration_list
def version(self):
return ""
def name(self):
return "edgecortix"
def image_format(self):
return "NHWC"
def quantize_model(self, transform, quantization_backend='fbgemm'):
print(torch.backends.quantized.supported_engines)
print(quantization_backend)
if quantization_backend not in torch.backends.quantized.supported_engines:
raise RuntimeError("Quantized backend not supported ")
torch.backends.quantized.engine = quantization_backend
self.model.cpu()
self.model.eval()
self.model.fuse_model()
dataset = CalibrationDataset(root=self.dataset_path, files=self.dataset_calibration_list, transform=transform)
dataloader = DataLoader(dataset, batch_size=1)
self.model.qconfig = torch.quantization.get_default_qconfig(quantization_backend)
torch.quantization.prepare(self.model, inplace=True)
for x in tqdm(dataloader):
self.model(x)
torch.quantization.convert(self.model, inplace=True)
def compile_model(self, input_shape, torch_input_shape, output_dir, config):
inputs = [("input0", input_shape)]
input_layout = self.image_format()
with torch.no_grad():
traced_model = torch.jit.trace(self.model, torch.rand(torch_input_shape)).eval()
mod, params = relay.frontend.from_pytorch(traced_model, inputs, layout=input_layout)
with mera.build_config(target="IP", **config):
mera.build(mod, params, output_dir=output_dir, host_arch="x86", layout=input_layout)
def load(self, model_path, inputs=None, outputs=None):
arch = {"arch": 400, "scheduler_config": {"mode": "Slow"}}
if model_path == 'torchvision-resnet50':
self.model = torchvision.models.quantization.resnet50(pretrained=True, progress=True, quantize=False)
self.model.eval()
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
input_shape = (1, 224, 224, 3) # NHWC
torch_input_shape = (1, 3, 224, 224) # NCHW
elif model_path == 'torchvision-mobilenetv2':
self.model = torchvision.models.quantization.mobilenet.mobilenet_v2(pretrained=True, progress=True, quantize=False)
self.model.eval()
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
input_shape = (1, 224, 224, 3) # NHWC
torch_input_shape = (1, 3, 224, 224) # NCHW
arch = {
"arch": 401,
"scheduler_config": {"mode": "Slow"}
}
else:
raise RuntimeError("Preset model not available: ", model_path)
ec_dir = "./ec-" + model_path
if not os.path.exists(ec_dir):
self.quantize_model(transform)
self.compile_model(input_shape, torch_input_shape, ec_dir, arch)
self.iprt = ip_runtime.IPRuntime()
self.iprt.Setup(ec_dir)
# dummy
self.inputs = ["input"]
self.outputs = ["output"]
return self
def predict(self, feed):
key=[key for key in feed.keys()][0]
output_ip = torch.from_numpy(self.iprt.Run(feed[key])[0])
return [output_ip]
|
the-stack_0_1841 | # kafka_install.py
# TODO: capture/pipe all output of these commands somewhere they can be used for debugging. add more confirmation output.
# resources:
# https://www.digitalocean.com/community/tutorials/initial-server-setup-with-ubuntu-18-04
# https://askubuntu.com/questions/94060/run-adduser-non-interactively
# https://www.digitalocean.com/community/tutorials/ufw-essentials-common-firewall-rules-and-commands
# https://askubuntu.com/questions/746413/trying-to-install-java-8-unable-to-locate-package-openjdk-8-jre
# https://www.digitalocean.com/community/tutorials/how-to-install-apache-kafka-on-ubuntu-18-04
# https://security.stackexchange.com/questions/45712/how-secure-is-nopasswd-in-passwordless-sudo-mode
# https://kafka-python.readthedocs.io/en/master/install.html
# https://help.ubuntu.com/lts/serverguide/firewall.html
import sys,os,subprocess,socket
from uuid import uuid4
_,install_command = sys.argv
ip_addresses = []
admin_user = ''
if install_command == 'install_root':
# add new user=ADMIN_USER and user=kafka
subprocess.call(['adduser','--disabled-password','--gecos','""',admin_user])
subprocess.call(['adduser','--disabled-password','--gecos','""','kafka'])
# grant ADMIN_USER and kafka sudo privileges
subprocess.call(['usermod','-aG','sudo',admin_user])
subprocess.call(['usermod','-aG','sudo','kafka'])
# setup firewall and let applications be managed by name using builtin ufw
subprocess.call(['ufw','app','list'])
subprocess.call(['ufw','allow','OpenSSH'])
subprocess.call(['ufw','--force','enable'])
subprocess.call(['ufw','status'])
# setup ssh access for user=ADMIN_USER and user=kafka
subprocess.call(['rsync --archive --chown='+admin_user+':'+admin_user+' ~/.ssh /home/'+admin_user],shell=True)
subprocess.call(['rsync --archive --chown=kafka:kafka ~/.ssh /home/kafka'],shell=True)
# allow user=ADMIN_USER and user=kafka to execute sudo commands without password promt
fn = '/etc/sudoers'
f = open(fn,'r')
s = f.read()
f.close()
s = s +'\n'+ admin_user+' ALL=(ALL) NOPASSWD:ALL'
s = s +'\n'+ 'kafka ALL=(ALL) NOPASSWD:ALL'
f = open(fn,'w')
f.write(s)
f.close()
elif install_command == 'install_kafka':
# install openjdk 8
subprocess.call(['sudo','add-apt-repository','ppa:openjdk-r/ppa','-y'])
subprocess.call(['sudo','apt-get','update'])
subprocess.call(['sudo','apt-get','install','openjdk-8-jre','-y'])
# downloads, extract, install
subprocess.call(['mkdir','/home/kafka/Downloads'])
subprocess.call(['curl','https://www.apache.org/dist/kafka/2.1.1/kafka_2.11-2.1.1.tgz','-o','/home/kafka/Downloads/kafka.tgz'])
subprocess.call(['mkdir','/home/kafka/kafka'])
os.chdir('/home/kafka/kafka')
subprocess.call(['tar','-xvzf','/home/kafka/Downloads/kafka.tgz','--strip','1'])
# set kafka configs
fn = '/home/kafka/kafka/config/server.properties'
f = open(fn,'r')
s = f.read()
f.close()
s = s +'\n'+ 'delete.topic.enable=true'
f = open(fn,'w')
f.write(s)
f.close()
# set zookeeper unit definition
fn = '/etc/systemd/system/zookeeper.service'
subprocess.call(['sudo','touch',fn])
subprocess.call(['sudo','chmod','777',fn])
unit_definition = "[Unit]\nRequires=network.target remote-fs.target\nAfter=network.target remote-fs.target\n\n[Service]\nType=simple\nUser=kafka\nExecStart=/home/kafka/kafka/bin/zookeeper-server-start.sh /home/kafka/kafka/config/zookeeper.properties\nExecStop=/home/kafka/kafka/bin/zookeeper-server-stop.sh\nRestart=on-abnormal\n\n[Install]\nWantedBy=multi-user.target"
f = open(fn,'w')
f.write(unit_definition)
f.close()
# set kafka unit definition
fn = '/etc/systemd/system/kafka.service'
subprocess.call(['sudo','touch',fn])
subprocess.call(['sudo','chmod','777',fn])
unit_definition = "[Unit]\nRequires=zookeeper.service\nAfter=zookeeper.service\n\n[Service]\nType=simple\nUser=kafka\nExecStart=/bin/sh -c '/home/kafka/kafka/bin/kafka-server-start.sh /home/kafka/kafka/config/server.properties > /home/kafka/kafka/kafka.log 2>&1'\nExecStop=/home/kafka/kafka/bin/kafka-server-stop.sh\nRestart=on-abnormal\n\n[Install]\nWantedBy=multi-user.target'"
f = open(fn,'w')
f.write(unit_definition)
f.close()
# prepare network for running kafka and zookeeper
ip_address = socket.gethostbyname(socket.gethostname())
subprocess.call(['sudo','hostnamectl','set-hostname',ip_address])
subprocess.call(['sudo','ufw','allow','9092'])
subprocess.call(['sudo','ufw','allow','2181'])
# start kafka and check
subprocess.call(['sudo','systemctl','start','kafka'])
subprocess.call(['sudo','journalctl','-u','kafka'])
# enable on boot
#subprocess.call(['sudo','systemctl','enable','kafka']) DOES NOT WORK.
# install pip3
subprocess.call(['sudo','apt-get','-y','install','python3-pip'])
# install kafka-python with optionals
subprocess.call(['sudo','pip3','install','kafka-python'])
subprocess.call(['sudo','pip3','install','lz4'])
subprocess.call(['sudo','pip3','install','crc32c'])
# assign unique broker_id and zookeeper connection params
fn = '/home/kafka/kafka/config/server.properties'
f = open(fn,'r')
s = f.read()
f.close()
broker_id = int(uuid4())
s = s.replace('broker.id=0','broker.id='+str(broker_id))
sources = []
for ip_address in ip_addresses:
source = ip_address+':2181'
sources.append(source)
sources_str = ','.join(sources)
s = s.replace('zookeeper.connect=localhost:2181','zookeeper.connect='+sources_str)
f = open(fn,'w')
f.write(s)
f.close()
|
the-stack_0_1842 | class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
dic = {}
for i in range(len(nums)):
if nums[i] in dic:
return [dic[nums[i]], i]
else:
dic[target-nums[i]] = i
|
the-stack_0_1844 | import torch
import torch.nn as nn
from models.TransBTS.Transformer import TransformerModel
from models.TransBTS.PositionalEncoding import FixedPositionalEncoding,LearnedPositionalEncoding
from models.TransBTS.Unet_skipconnection import Unet
class TransformerBTS(nn.Module):
def __init__(
self,
img_dim,
patch_dim,
num_channels,
embedding_dim,
num_heads,
num_layers,
hidden_dim,
dropout_rate=0.0,
attn_dropout_rate=0.0,
conv_patch_representation=True,
positional_encoding_type="learned",
):
super(TransformerBTS, self).__init__()
assert embedding_dim % num_heads == 0
assert img_dim % patch_dim == 0
self.img_dim = img_dim
self.embedding_dim = embedding_dim
self.num_heads = num_heads
self.patch_dim = patch_dim
self.num_channels = num_channels
self.dropout_rate = dropout_rate
self.attn_dropout_rate = attn_dropout_rate
self.conv_patch_representation = conv_patch_representation
self.num_patches = int((img_dim // patch_dim) ** 3)
self.seq_length = self.num_patches
self.flatten_dim = 128 * num_channels
self.linear_encoding = nn.Linear(self.flatten_dim, self.embedding_dim)
if positional_encoding_type == "learned":
self.position_encoding = LearnedPositionalEncoding(
self.seq_length, self.embedding_dim, self.seq_length
)
elif positional_encoding_type == "fixed":
self.position_encoding = FixedPositionalEncoding(
self.embedding_dim,
)
self.pe_dropout = nn.Dropout(p=self.dropout_rate)
self.transformer = TransformerModel(
embedding_dim,
num_layers,
num_heads,
hidden_dim,
self.dropout_rate,
self.attn_dropout_rate,
)
self.pre_head_ln = nn.LayerNorm(embedding_dim)
if self.conv_patch_representation:
self.conv_x = nn.Conv3d(
128,
self.embedding_dim,
kernel_size=3,
stride=1,
padding=1
)
self.Unet = Unet(in_channels=4, base_channels=16, num_classes=4)
self.bn = nn.BatchNorm3d(128)
self.relu = nn.ReLU(inplace=True)
def encode(self, x):
if self.conv_patch_representation:
# combine embedding with conv patch distribution
x1_1, x2_1, x3_1, x = self.Unet(x)
x = self.bn(x)
x = self.relu(x)
x = self.conv_x(x)
x = x.permute(0, 2, 3, 4, 1).contiguous()
x = x.view(x.size(0), -1, self.embedding_dim)
else:
x = self.Unet(x)
x = self.bn(x)
x = self.relu(x)
x = (
x.unfold(2, 2, 2)
.unfold(3, 2, 2)
.unfold(4, 2, 2)
.contiguous()
)
x = x.view(x.size(0), x.size(1), -1, 8)
x = x.permute(0, 2, 3, 1).contiguous()
x = x.view(x.size(0), -1, self.flatten_dim)
x = self.linear_encoding(x)
x = self.position_encoding(x)
x = self.pe_dropout(x)
# apply transformer
x, intmd_x = self.transformer(x)
x = self.pre_head_ln(x)
return x1_1, x2_1, x3_1, x, intmd_x
def decode(self, x):
raise NotImplementedError("Should be implemented in child class!!")
def forward(self, x, auxillary_output_layers=[1, 2, 3, 4]):
x1_1, x2_1, x3_1, encoder_output, intmd_encoder_outputs = self.encode(x)
decoder_output = self.decode(
x1_1, x2_1, x3_1, encoder_output, intmd_encoder_outputs, auxillary_output_layers
)
if auxillary_output_layers is not None:
auxillary_outputs = {}
for i in auxillary_output_layers:
val = str(2 * i - 1)
_key = 'Z' + str(i)
auxillary_outputs[_key] = intmd_encoder_outputs[val]
return decoder_output
return decoder_output
def _get_padding(self, padding_type, kernel_size):
assert padding_type in ['SAME', 'VALID']
if padding_type == 'SAME':
_list = [(k - 1) // 2 for k in kernel_size]
return tuple(_list)
return tuple(0 for _ in kernel_size)
def _reshape_output(self, x):
x = x.view(
x.size(0),
int(self.img_dim / self.patch_dim),
int(self.img_dim / self.patch_dim),
int(self.img_dim / self.patch_dim),
self.embedding_dim,
)
x = x.permute(0, 4, 1, 2, 3).contiguous()
return x
class BTS(TransformerBTS):
def __init__(
self,
img_dim,
patch_dim,
num_channels,
num_classes,
embedding_dim,
num_heads,
num_layers,
hidden_dim,
dropout_rate=0.0,
attn_dropout_rate=0.0,
conv_patch_representation=True,
positional_encoding_type="learned",
):
super(BTS, self).__init__(
img_dim=img_dim,
patch_dim=patch_dim,
num_channels=num_channels,
embedding_dim=embedding_dim,
num_heads=num_heads,
num_layers=num_layers,
hidden_dim=hidden_dim,
dropout_rate=dropout_rate,
attn_dropout_rate=attn_dropout_rate,
conv_patch_representation=conv_patch_representation,
positional_encoding_type=positional_encoding_type,
)
self.num_classes = num_classes
self.Softmax = nn.Softmax(dim=1)
self.Enblock8_1 = EnBlock1(in_channels=self.embedding_dim)
self.Enblock8_2 = EnBlock2(in_channels=self.embedding_dim // 4)
self.DeUp4 = DeUp_Cat(in_channels=self.embedding_dim//4, out_channels=self.embedding_dim//8)
self.DeBlock4 = DeBlock(in_channels=self.embedding_dim//8)
self.DeUp3 = DeUp_Cat(in_channels=self.embedding_dim//8, out_channels=self.embedding_dim//16)
self.DeBlock3 = DeBlock(in_channels=self.embedding_dim//16)
self.DeUp2 = DeUp_Cat(in_channels=self.embedding_dim//16, out_channels=self.embedding_dim//32)
self.DeBlock2 = DeBlock(in_channels=self.embedding_dim//32)
self.endconv = nn.Conv3d(self.embedding_dim // 32, 4, kernel_size=1)
def decode(self, x1_1, x2_1, x3_1, x, intmd_x, intmd_layers=[1, 2, 3, 4]):
assert intmd_layers is not None, "pass the intermediate layers for MLA"
encoder_outputs = {}
all_keys = []
for i in intmd_layers:
val = str(2 * i - 1)
_key = 'Z' + str(i)
all_keys.append(_key)
encoder_outputs[_key] = intmd_x[val]
all_keys.reverse()
x8 = encoder_outputs[all_keys[0]]
x8 = self._reshape_output(x8)
x8 = self.Enblock8_1(x8)
x8 = self.Enblock8_2(x8)
y4 = self.DeUp4(x8, x3_1) # (1, 64, 32, 32, 32)
y4 = self.DeBlock4(y4)
y3 = self.DeUp3(y4, x2_1) # (1, 32, 64, 64, 64)
y3 = self.DeBlock3(y3)
y2 = self.DeUp2(y3, x1_1) # (1, 16, 128, 128, 128)
y2 = self.DeBlock2(y2)
y = self.endconv(y2) # (1, 4, 128, 128, 128)
y = self.Softmax(y)
return y
class EnBlock1(nn.Module):
def __init__(self, in_channels):
super(EnBlock1, self).__init__()
self.bn1 = nn.BatchNorm3d(512 // 4)
self.relu1 = nn.ReLU(inplace=True)
self.bn2 = nn.BatchNorm3d(512 // 4)
self.relu2 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv3d(in_channels, in_channels // 4, kernel_size=3, padding=1)
self.conv2 = nn.Conv3d(in_channels // 4, in_channels // 4, kernel_size=3, padding=1)
def forward(self, x):
x1 = self.conv1(x)
x1 = self.bn1(x1)
x1 = self.relu1(x1)
x1 = self.conv2(x1)
x1 = self.bn2(x1)
x1 = self.relu2(x1)
return x1
class EnBlock2(nn.Module):
def __init__(self, in_channels):
super(EnBlock2, self).__init__()
self.conv1 = nn.Conv3d(in_channels, in_channels, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm3d(512 // 4)
self.relu1 = nn.ReLU(inplace=True)
self.bn2 = nn.BatchNorm3d(512 // 4)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv3d(in_channels, in_channels, kernel_size=3, padding=1)
def forward(self, x):
x1 = self.conv1(x)
x1 = self.bn1(x1)
x1 = self.relu1(x1)
x1 = self.conv2(x1)
x1 = self.bn2(x1)
x1 = self.relu2(x1)
x1 = x1 + x
return x1
class DeUp_Cat(nn.Module):
def __init__(self, in_channels, out_channels):
super(DeUp_Cat, self).__init__()
self.conv1 = nn.Conv3d(in_channels, out_channels, kernel_size=1)
self.conv2 = nn.ConvTranspose3d(out_channels, out_channels, kernel_size=2, stride=2)
self.conv3 = nn.Conv3d(out_channels*2, out_channels, kernel_size=1)
def forward(self, x, prev):
x1 = self.conv1(x)
y = self.conv2(x1)
# y = y + prev
y = torch.cat((prev, y), dim=1)
y = self.conv3(y)
return y
class DeBlock(nn.Module):
def __init__(self, in_channels):
super(DeBlock, self).__init__()
self.bn1 = nn.BatchNorm3d(in_channels)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv3d(in_channels, in_channels, kernel_size=3, padding=1)
self.conv2 = nn.Conv3d(in_channels, in_channels, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm3d(in_channels)
self.relu2 = nn.ReLU(inplace=True)
def forward(self, x):
x1 = self.conv1(x)
x1 = self.bn1(x1)
x1 = self.relu1(x1)
x1 = self.conv2(x1)
x1 = self.bn2(x1)
x1 = self.relu2(x1)
x1 = x1 + x
return x1
def TransBTS(dataset='brats', _conv_repr=True, _pe_type="learned"):
if dataset.lower() == 'brats':
img_dim = 128
num_classes = 4
num_channels = 4
patch_dim = 8
aux_layers = [1, 2, 3, 4]
model = BTS(
img_dim,
patch_dim,
num_channels,
num_classes,
embedding_dim=512,
num_heads=8,
num_layers=4,
hidden_dim=4096,
dropout_rate=0.1,
attn_dropout_rate=0.1,
conv_patch_representation=_conv_repr,
positional_encoding_type=_pe_type,
)
return aux_layers, model
if __name__ == '__main__':
with torch.no_grad():
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
cuda0 = torch.device('cuda:0')
x = torch.rand((1, 4, 128, 128, 128), device=cuda0)
_, model = TransBTS(dataset='brats', _conv_repr=True, _pe_type="learned")
model.cuda()
y = model(x)
print(y.shape)
|
the-stack_0_1846 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import sys
import testtools
from tempest.api.compute import base
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
class ServersNegativeTestJSON(base.BaseV2ComputeTest):
def setUp(self):
super(ServersNegativeTestJSON, self).setUp()
try:
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
except Exception:
self.__class__.server_id = self.rebuild_server(self.server_id)
def tearDown(self):
self.server_check_teardown()
super(ServersNegativeTestJSON, self).tearDown()
@classmethod
def resource_setup(cls):
super(ServersNegativeTestJSON, cls).resource_setup()
cls.client = cls.servers_client
cls.alt_os = clients.Manager(cls.isolated_creds.get_alt_creds())
cls.alt_client = cls.alt_os.servers_client
resp, server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
@test.attr(type=['negative', 'gate'])
def test_server_name_blank(self):
# Create a server with name parameter empty
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
name='')
@test.attr(type=['negative', 'gate'])
def test_personality_file_contents_not_encoded(self):
# Use an unencoded file when creating a server with personality
file_contents = 'This is a test file.'
person = [{'path': '/etc/testfile.txt',
'contents': file_contents}]
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
personality=person)
@test.attr(type=['negative', 'gate'])
def test_create_with_invalid_image(self):
# Create a server with an unknown image
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
image_id=-1)
@test.attr(type=['negative', 'gate'])
def test_create_with_invalid_flavor(self):
# Create a server with an unknown flavor
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
flavor=-1,)
@test.attr(type=['negative', 'gate'])
def test_invalid_access_ip_v4_address(self):
# An access IPv4 address must match a valid address pattern
IPv4 = '1.1.1.1.1.1'
self.assertRaises(exceptions.BadRequest,
self.create_test_server, accessIPv4=IPv4)
@test.attr(type=['negative', 'gate'])
def test_invalid_ip_v6_address(self):
# An access IPv6 address must match a valid address pattern
IPv6 = 'notvalid'
self.assertRaises(exceptions.BadRequest,
self.create_test_server, accessIPv6=IPv6)
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative', 'gate'])
def test_resize_nonexistent_server(self):
# Resize a non-existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound,
self.client.resize,
nonexistent_server, self.flavor_ref)
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative', 'gate'])
def test_resize_server_with_non_existent_flavor(self):
# Resize a server with non-existent flavor
nonexistent_flavor = data_utils.rand_uuid()
self.assertRaises(exceptions.BadRequest, self.client.resize,
self.server_id, flavor_ref=nonexistent_flavor)
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative', 'gate'])
def test_resize_server_with_null_flavor(self):
# Resize a server with null flavor
self.assertRaises(exceptions.BadRequest, self.client.resize,
self.server_id, flavor_ref="")
@test.attr(type=['negative', 'gate'])
def test_reboot_non_existent_server(self):
# Reboot a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.reboot,
nonexistent_server, 'SOFT')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_pause_paused_server(self):
# Pause a paused server.
self.client.pause_server(self.server_id)
self.client.wait_for_server_status(self.server_id, 'PAUSED')
self.assertRaises(exceptions.Conflict,
self.client.pause_server,
self.server_id)
self.client.unpause_server(self.server_id)
@test.attr(type=['negative', 'gate'])
def test_rebuild_reboot_deleted_server(self):
# Rebuild and Reboot a deleted server
_, server = self.create_test_server()
self.client.delete_server(server['id'])
self.client.wait_for_server_termination(server['id'])
self.assertRaises(exceptions.NotFound,
self.client.rebuild,
server['id'], self.image_ref_alt)
self.assertRaises(exceptions.NotFound, self.client.reboot,
server['id'], 'SOFT')
@test.attr(type=['negative', 'gate'])
def test_rebuild_non_existent_server(self):
# Rebuild a non existent server
nonexistent_server = data_utils.rand_uuid()
meta = {'rebuild': 'server'}
new_name = data_utils.rand_name('server')
file_contents = 'Test server rebuild.'
personality = [{'path': '/etc/rebuild.txt',
'contents': base64.b64encode(file_contents)}]
self.assertRaises(exceptions.NotFound,
self.client.rebuild,
nonexistent_server,
self.image_ref_alt,
name=new_name, meta=meta,
personality=personality,
adminPass='rebuild')
@test.attr(type=['negative', 'gate'])
def test_create_numeric_server_name(self):
server_name = 12345
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
name=server_name)
@test.attr(type=['negative', 'gate'])
def test_create_server_name_length_exceeds_256(self):
# Create a server with name length exceeding 256 characters
server_name = 'a' * 256
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
name=server_name)
@test.attr(type=['negative', 'gate'])
def test_create_with_invalid_network_uuid(self):
# Pass invalid network uuid while creating a server
networks = [{'fixed_ip': '10.0.1.1', 'uuid': 'a-b-c-d-e-f-g-h-i-j'}]
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
networks=networks)
@test.attr(type=['negative', 'gate'])
def test_create_with_non_existent_keypair(self):
# Pass a non-existent keypair while creating a server
key_name = data_utils.rand_name('key')
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
key_name=key_name)
@test.attr(type=['negative', 'gate'])
def test_create_server_metadata_exceeds_length_limit(self):
# Pass really long metadata while creating a server
metadata = {'a': 'b' * 260}
self.assertRaises(exceptions.OverLimit,
self.create_test_server,
meta=metadata)
@test.attr(type=['negative', 'gate'])
def test_update_name_of_non_existent_server(self):
# Update name of a non-existent server
server_name = data_utils.rand_name('server')
new_name = data_utils.rand_name('server') + '_updated'
self.assertRaises(exceptions.NotFound, self.client.update_server,
server_name, name=new_name)
@test.attr(type=['negative', 'gate'])
def test_update_server_set_empty_name(self):
# Update name of the server to an empty string
server_name = data_utils.rand_name('server')
new_name = ''
self.assertRaises(exceptions.BadRequest, self.client.update_server,
server_name, name=new_name)
@test.attr(type=['negative', 'gate'])
def test_update_server_of_another_tenant(self):
# Update name of a server that belongs to another tenant
new_name = self.server_id + '_new'
self.assertRaises(exceptions.NotFound,
self.alt_client.update_server, self.server_id,
name=new_name)
@test.attr(type=['negative', 'gate'])
def test_update_server_name_length_exceeds_256(self):
# Update name of server exceed the name length limit
new_name = 'a' * 256
self.assertRaises(exceptions.BadRequest,
self.client.update_server,
self.server_id,
name=new_name)
@test.attr(type=['negative', 'gate'])
def test_delete_non_existent_server(self):
# Delete a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.delete_server,
nonexistent_server)
@test.attr(type=['negative', 'gate'])
def test_delete_a_server_of_another_tenant(self):
# Delete a server that belongs to another tenant
self.assertRaises(exceptions.NotFound,
self.alt_client.delete_server,
self.server_id)
@test.attr(type=['negative', 'gate'])
def test_delete_server_pass_negative_id(self):
# Pass an invalid string parameter to delete server
self.assertRaises(exceptions.NotFound, self.client.delete_server, -1)
@test.attr(type=['negative', 'gate'])
def test_delete_server_pass_id_exceeding_length_limit(self):
# Pass a server ID that exceeds length limit to delete server
self.assertRaises(exceptions.NotFound, self.client.delete_server,
sys.maxint + 1)
@test.attr(type=['negative', 'gate'])
def test_create_with_nonexistent_security_group(self):
# Create a server with a nonexistent security group
security_groups = [{'name': 'does_not_exist'}]
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
security_groups=security_groups)
@test.attr(type=['negative', 'gate'])
def test_get_non_existent_server(self):
# Get a non existent server details
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.get_server,
nonexistent_server)
@test.attr(type=['negative', 'gate'])
def test_stop_non_existent_server(self):
# Stop a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.servers_client.stop,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_pause_non_existent_server(self):
# pause a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.pause_server,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_unpause_non_existent_server(self):
# unpause a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.unpause_server,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_unpause_server_invalid_state(self):
# unpause an active server.
self.assertRaises(exceptions.Conflict,
self.client.unpause_server,
self.server_id)
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_suspend_non_existent_server(self):
# suspend a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.suspend_server,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_suspend_server_invalid_state(self):
# suspend a suspended server.
resp, _ = self.client.suspend_server(self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'SUSPENDED')
self.assertRaises(exceptions.Conflict,
self.client.suspend_server,
self.server_id)
self.client.resume_server(self.server_id)
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_resume_non_existent_server(self):
# resume a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.resume_server,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_resume_server_invalid_state(self):
# resume an active server.
self.assertRaises(exceptions.Conflict,
self.client.resume_server,
self.server_id)
@test.attr(type=['negative', 'gate'])
def test_get_console_output_of_non_existent_server(self):
# get the console output for a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound,
self.client.get_console_output,
nonexistent_server, 10)
@test.attr(type=['negative', 'gate'])
def test_force_delete_nonexistent_server_id(self):
# force-delete a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound,
self.client.force_delete_server,
nonexistent_server)
@test.attr(type=['negative', 'gate'])
def test_restore_nonexistent_server_id(self):
# restore-delete a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound,
self.client.restore_soft_deleted_server,
nonexistent_server)
@test.attr(type=['negative', 'gate'])
def test_restore_server_invalid_state(self):
# we can only restore-delete a server in 'soft-delete' state
self.assertRaises(exceptions.Conflict,
self.client.restore_soft_deleted_server,
self.server_id)
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type=['negative', 'gate'])
def test_shelve_non_existent_server(self):
# shelve a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.shelve_server,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type=['negative', 'gate'])
def test_shelve_shelved_server(self):
# shelve a shelved server.
resp, server = self.client.shelve_server(self.server_id)
self.assertEqual(202, resp.status)
offload_time = CONF.compute.shelved_offload_time
if offload_time >= 0:
self.client.wait_for_server_status(self.server_id,
'SHELVED_OFFLOADED',
extra_timeout=offload_time)
else:
self.client.wait_for_server_status(self.server_id,
'SHELVED')
resp, server = self.client.get_server(self.server_id)
image_name = server['name'] + '-shelved'
params = {'name': image_name}
resp, images = self.images_client.list_images(params)
self.assertEqual(1, len(images))
self.assertEqual(image_name, images[0]['name'])
self.assertRaises(exceptions.Conflict,
self.client.shelve_server,
self.server_id)
self.client.unshelve_server(self.server_id)
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type=['negative', 'gate'])
def test_unshelve_non_existent_server(self):
# unshelve a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.unshelve_server,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type=['negative', 'gate'])
def test_unshelve_server_invalid_state(self):
# unshelve an active server.
self.assertRaises(exceptions.Conflict,
self.client.unshelve_server,
self.server_id)
|
the-stack_0_1848 | # Copyright (c) 2017 Yingxin Cheng
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class WFException(Exception):
def __init__(self, msg, e=None):
assert isinstance(msg, str)
if e:
assert isinstance(e, Exception)
self.e = e
super(WFException, self).__init__(msg)
def __str__(self):
return self._to_str(0)
def _to_str(self, indent):
ret = "\n%s%s" % ("> "*indent, self.args[0])
if self.e:
if isinstance(self.e, WFException):
ret += self.e._to_str(indent+1)
else:
ret += "\n%s%r" % ("> "*(indent+1), self.e)
return ret
|
the-stack_0_1849 | # coding: utf-8
"""
Selling Partner API for FBA Inventory
The Selling Partner API for FBA Inventory lets you programmatically retrieve information about inventory in Amazon's fulfillment network. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InventorySummary(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'asin': 'str',
'fn_sku': 'str',
'seller_sku': 'str',
'condition': 'str',
'inventory_details': 'InventoryDetails',
'last_updated_time': 'datetime',
'product_name': 'str',
'total_quantity': 'int'
}
attribute_map = {
'asin': 'asin',
'fn_sku': 'fnSku',
'seller_sku': 'sellerSku',
'condition': 'condition',
'inventory_details': 'inventoryDetails',
'last_updated_time': 'lastUpdatedTime',
'product_name': 'productName',
'total_quantity': 'totalQuantity'
}
def __init__(self, asin=None, fn_sku=None, seller_sku=None, condition=None, inventory_details=None, last_updated_time=None, product_name=None, total_quantity=None): # noqa: E501
"""InventorySummary - a model defined in Swagger""" # noqa: E501
self._asin = None
self._fn_sku = None
self._seller_sku = None
self._condition = None
self._inventory_details = None
self._last_updated_time = None
self._product_name = None
self._total_quantity = None
self.discriminator = None
if asin is not None:
self.asin = asin
if fn_sku is not None:
self.fn_sku = fn_sku
if seller_sku is not None:
self.seller_sku = seller_sku
if condition is not None:
self.condition = condition
if inventory_details is not None:
self.inventory_details = inventory_details
if last_updated_time is not None:
self.last_updated_time = last_updated_time
if product_name is not None:
self.product_name = product_name
if total_quantity is not None:
self.total_quantity = total_quantity
@property
def asin(self):
"""Gets the asin of this InventorySummary. # noqa: E501
The Amazon Standard Identification Number (ASIN) of an item. # noqa: E501
:return: The asin of this InventorySummary. # noqa: E501
:rtype: str
"""
return self._asin
@asin.setter
def asin(self, asin):
"""Sets the asin of this InventorySummary.
The Amazon Standard Identification Number (ASIN) of an item. # noqa: E501
:param asin: The asin of this InventorySummary. # noqa: E501
:type: str
"""
self._asin = asin
@property
def fn_sku(self):
"""Gets the fn_sku of this InventorySummary. # noqa: E501
Amazon's fulfillment network SKU identifier. # noqa: E501
:return: The fn_sku of this InventorySummary. # noqa: E501
:rtype: str
"""
return self._fn_sku
@fn_sku.setter
def fn_sku(self, fn_sku):
"""Sets the fn_sku of this InventorySummary.
Amazon's fulfillment network SKU identifier. # noqa: E501
:param fn_sku: The fn_sku of this InventorySummary. # noqa: E501
:type: str
"""
self._fn_sku = fn_sku
@property
def seller_sku(self):
"""Gets the seller_sku of this InventorySummary. # noqa: E501
The seller SKU of the item. # noqa: E501
:return: The seller_sku of this InventorySummary. # noqa: E501
:rtype: str
"""
return self._seller_sku
@seller_sku.setter
def seller_sku(self, seller_sku):
"""Sets the seller_sku of this InventorySummary.
The seller SKU of the item. # noqa: E501
:param seller_sku: The seller_sku of this InventorySummary. # noqa: E501
:type: str
"""
self._seller_sku = seller_sku
@property
def condition(self):
"""Gets the condition of this InventorySummary. # noqa: E501
The condition of the item as described by the seller (for example, New Item). # noqa: E501
:return: The condition of this InventorySummary. # noqa: E501
:rtype: str
"""
return self._condition
@condition.setter
def condition(self, condition):
"""Sets the condition of this InventorySummary.
The condition of the item as described by the seller (for example, New Item). # noqa: E501
:param condition: The condition of this InventorySummary. # noqa: E501
:type: str
"""
self._condition = condition
@property
def inventory_details(self):
"""Gets the inventory_details of this InventorySummary. # noqa: E501
:return: The inventory_details of this InventorySummary. # noqa: E501
:rtype: InventoryDetails
"""
return self._inventory_details
@inventory_details.setter
def inventory_details(self, inventory_details):
"""Sets the inventory_details of this InventorySummary.
:param inventory_details: The inventory_details of this InventorySummary. # noqa: E501
:type: InventoryDetails
"""
self._inventory_details = inventory_details
@property
def last_updated_time(self):
"""Gets the last_updated_time of this InventorySummary. # noqa: E501
The date and time that any quantity was last updated. # noqa: E501
:return: The last_updated_time of this InventorySummary. # noqa: E501
:rtype: datetime
"""
return self._last_updated_time
@last_updated_time.setter
def last_updated_time(self, last_updated_time):
"""Sets the last_updated_time of this InventorySummary.
The date and time that any quantity was last updated. # noqa: E501
:param last_updated_time: The last_updated_time of this InventorySummary. # noqa: E501
:type: datetime
"""
self._last_updated_time = last_updated_time
@property
def product_name(self):
"""Gets the product_name of this InventorySummary. # noqa: E501
The localized language product title of the item within the specific marketplace. # noqa: E501
:return: The product_name of this InventorySummary. # noqa: E501
:rtype: str
"""
return self._product_name
@product_name.setter
def product_name(self, product_name):
"""Sets the product_name of this InventorySummary.
The localized language product title of the item within the specific marketplace. # noqa: E501
:param product_name: The product_name of this InventorySummary. # noqa: E501
:type: str
"""
self._product_name = product_name
@property
def total_quantity(self):
"""Gets the total_quantity of this InventorySummary. # noqa: E501
The total number of units in an inbound shipment or in Amazon fulfillment centers. # noqa: E501
:return: The total_quantity of this InventorySummary. # noqa: E501
:rtype: int
"""
return self._total_quantity
@total_quantity.setter
def total_quantity(self, total_quantity):
"""Sets the total_quantity of this InventorySummary.
The total number of units in an inbound shipment or in Amazon fulfillment centers. # noqa: E501
:param total_quantity: The total_quantity of this InventorySummary. # noqa: E501
:type: int
"""
self._total_quantity = total_quantity
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InventorySummary, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InventorySummary):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_1850 | import argparse
import time
import statistics
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import brainflow
from brainflow.board_shim import BoardShim, BrainFlowInputParams, BoardIds, LogLevels
from brainflow.data_filter import DataFilter, FilterTypes, AggOperations
def main():
parser = argparse.ArgumentParser()
# use docs to check which parameters are required for specific board, e.g. for Cyton - set serial port
parser.add_argument('--ip-port', type=int, help='ip port', required=False, default=0)
parser.add_argument('--ip-protocol', type=int, help='ip protocol, check IpProtocolType enum', required=False,
default=0)
parser.add_argument('--ip-address', type=str, help='ip address', required=False, default='')
parser.add_argument('--serial-port', type=str, help='serial port', required=False, default='')
parser.add_argument('--mac-address', type=str, help='mac address', required=False, default='')
parser.add_argument('--other-info', type=str, help='other info', required=False, default='')
parser.add_argument('--streamer-params', type=str, help='other info', required=False, default='')
parser.add_argument('--board-id', type=int, help='board id, check docs to get a list of supported boards',
required=True)
parser.add_argument('--log', action='store_true')
parser.add_argument('--run-time', type=int, help='run time for one iteration in sec', required=True)
parser.add_argument('--num-iters', type=int, help='number of iterations', default=1)
parser.add_argument('--channels', type=str, help='channels to plot in format 0,1,2 by default plot all channels',
default=None)
parser.add_argument('--config-file', type=str, help='file with strings to send to device', default=None)
args = parser.parse_args()
params = BrainFlowInputParams()
params.ip_port = args.ip_port
params.serial_port = args.serial_port
params.mac_address = args.mac_address
params.other_info = args.other_info
params.ip_address = args.ip_address
params.ip_protocol = args.ip_protocol
if (args.log):
BoardShim.enable_dev_board_logger()
else:
BoardShim.disable_board_logger()
# for streaming board need to use master board id
master_board_id = args.board_id
if args.board_id == BoardIds.STREAMING_BOARD.value:
master_board_id = int(params.other_info)
board = BoardShim(args.board_id, params)
board.prepare_session()
if args.config_file:
with open(args.config_file) as file:
lines = file.readlines()
for line in lines:
board.config_board(line)
buffer_size = int(BoardShim.get_sampling_rate(master_board_id) * args.run_time * 1.2) # + 20% for safety
if master_board_id in (
BoardIds.CYTON_BOARD.value, BoardIds.CYTON_WIFI_BOARD.value, BoardIds.GANGLION_WIFI_BOARD.value):
bytes_per_package = 33
elif master_board_id in (BoardIds.CYTON_DAISY_BOARD, BoardIds.CYTON_DAISY_WIFI_BOARD.value):
bytes_per_package = 66
elif master_board_id == BoardIds.SYNTHETIC_BOARD.value:
bytes_per_package = 104
elif master_board_id == BoardIds.NOVAXR_BOARD.value:
bytes_per_package = 72
else:
raise ValueError('unsupported board')
timestamp_channel = BoardShim.get_timestamp_channel(master_board_id)
package_num_channel = BoardShim.get_package_num_channel(master_board_id)
try:
cur_id = 0
for i in range(args.num_iters):
# wait for an input
input('Press Enter to continue...')
BoardShim.log_message(LogLevels.LEVEL_INFO.value, '\nRunning iteration %d/%d\n' % (i, args.num_iters))
# start stream and get data
board.start_stream(buffer_size, args.streamer_params)
time.sleep(args.run_time)
board.stop_stream()
data = board.get_board_data()
if data.shape[1] == 0:
BoardShim.log_message(LogLevels.LEVEL_WARN.value, '\nNo data received!\n')
continue
# calculate some metrics
total_bytes_received = bytes_per_package * data.shape[1]
packages_per_sec = float(data.shape[1]) / float(args.run_time);
timestamp_array = data[timestamp_channel]
time_diff_array = list()
for j in range(0, timestamp_array.size - 1):
time_diff_array.append(timestamp_array[j + 1] - timestamp_array[j])
package_num_array = data[package_num_channel]
lost_packages = 0
expected = package_num_array[0]
while cur_id < package_num_array.size:
if expected == 256:
expected = 0
if package_num_array[cur_id] != expected:
BoardShim.log_message(LogLevels.LEVEL_WARN.value,
'package loss detected: position %d package_num value %d expected value %d' % (
cur_id, package_num_array[cur_id], expected))
lost_packages = lost_packages + 1
else:
cur_id = cur_id + 1
expected = expected + 1
package_loss = (lost_packages / data.shape[1]) * 100
# provide results for iteration
BoardShim.log_message(LogLevels.LEVEL_INFO.value, '\nResults:\n')
BoardShim.log_message(LogLevels.LEVEL_INFO.value, 'package loss percent %f' % package_loss)
BoardShim.log_message(LogLevels.LEVEL_INFO.value,
'average time delta %f' % statistics.mean(time_diff_array))
BoardShim.log_message(LogLevels.LEVEL_INFO.value,
'std deviation of time delta %f' % statistics.pstdev(time_diff_array))
BoardShim.log_message(LogLevels.LEVEL_INFO.value, 'total packages received %d' % data.shape[1])
BoardShim.log_message(LogLevels.LEVEL_INFO.value, 'packages per sec %f' % packages_per_sec)
BoardShim.log_message(LogLevels.LEVEL_INFO.value, 'total bytes received %d' % total_bytes_received)
# plot data
eeg_channels = BoardShim.get_eeg_channels(master_board_id)
emg_channels = BoardShim.get_emg_channels(master_board_id)
total_channels = list()
if args.channels is not None:
selected_channels = [int(x) for x in args.channels.split(',')]
temp_channels = eeg_channels
for ch in emg_channels:
if ch not in temp_channels:
temp_channels.append(ch)
for j in range(len(temp_channels)):
if j in selected_channels:
total_channels.append(temp_channels[j])
else:
# for cyton/ganglion eeg_channels and emg_channels are the same array because we can not split it
# for novaxr its 2 different arrays, join them
total_channels = eeg_channels
for ch in emg_channels:
if ch not in total_channels:
total_channels.append(ch)
total_channels.append(timestamp_channel)
columns = list()
for j in range(len(total_channels) - 1):
columns.append('channel_%d' % (int(total_channels[j]) - 1))
columns.append('timestamp')
df = pd.DataFrame(np.transpose(data))
df.to_csv('all_data_%d.csv' % i)
df_to_plot = df[total_channels]
df_to_plot.columns = columns
df_to_plot.to_csv('selected_data_%d.csv' % i)
df_to_plot.plot(subplots=True, x='timestamp', style='.-')
plt.show()
finally:
# release session in the end
board.release_session()
if __name__ == "__main__":
main()
|
the-stack_0_1852 | from common_fixtures import * # NOQA
TEST_SERVICE_OPT_IMAGE = 'ibuildthecloud/helloworld'
TEST_SERVICE_OPT_IMAGE_LATEST = TEST_SERVICE_OPT_IMAGE + ':latest'
TEST_SERVICE_OPT_IMAGE_UUID = 'docker:' + TEST_SERVICE_OPT_IMAGE_LATEST
LB_IMAGE_UUID = "docker:sangeetha/testlbsd:latest"
logger = logging.getLogger(__name__)
if_compose_data_files = pytest.mark.skipif(
not os.environ.get('CATTLE_TEST_DATA_DIR'),
reason='Docker compose files directory location not set')
def test_rancher_compose_service(client,
rancher_cli_container,
socat_containers):
vol_container = client.create_container(imageUuid=TEST_IMAGE_UUID,
name=random_str(),
labels={"c1": "vol"}
)
vol_container = client.wait_success(vol_container)
volume_in_host = "/test/container"
volume_in_container = "/test/vol1"
docker_vol_value = volume_in_host + ":" + volume_in_container + ":ro"
cap_add = ["CHOWN"]
cap_drop = ["KILL"]
restart_policy = {"maximumRetryCount": 10, "name": "on-failure"}
dns_search = ['1.2.3.4']
dns_name = ['1.2.3.4']
domain_name = "rancher.io"
host_name = "test"
user = "root"
command = ["sleep", "9000"]
env_var = {"TEST_FILE": "/etc/testpath.conf"}
memory = 8000000
cpu_set = "0"
cpu_shares = 400
# Not including "dataVolumesFrom": [vol_container.id] since it is not
# implemented yet
launch_config = {"imageUuid": TEST_SERVICE_OPT_IMAGE_UUID,
"command": command,
"dataVolumes": [docker_vol_value],
"environment": env_var,
"capAdd": cap_add,
"capDrop": cap_drop,
"dnsSearch": dns_search,
"dns": dns_name,
"privileged": True,
"domainName": domain_name,
"stdinOpen": True,
"tty": True,
"memory": memory,
"cpuSet": cpu_set,
"cpuShares": cpu_shares,
"restartPolicy": restart_policy,
"directory": "/",
"hostname": host_name,
"user": user,
"labels":
{"io.rancher.scheduler.affinity:container_label":
"c1=vol"}
}
scale = 1
service, env = create_env_and_svc(client, launch_config,
scale)
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_service = get_rancher_compose_service(
client, rancher_env.id, service)
check_container_in_service(client, rancher_service)
container_list = get_service_container_list(client, rancher_service)
dns_name.append(RANCHER_DNS_SERVER)
dns_search.append(rancher_env.name+"."+RANCHER_DNS_SEARCH)
dns_search.append(
rancher_service.name+"."+rancher_env.name+"."+RANCHER_DNS_SEARCH)
dns_search.append(RANCHER_DNS_SEARCH)
for c in container_list:
print(c)
docker_client = get_docker_client(c.hosts[0])
inspect = docker_client.inspect_container(c.externalId)
assert docker_vol_value in inspect["HostConfig"]["Binds"]
# assert inspect["HostConfig"]["VolumesFrom"] == \
# [vol_container.externalId]
assert inspect["HostConfig"]["PublishAllPorts"] is False
assert inspect["HostConfig"]["Privileged"] is True
assert inspect["Config"]["OpenStdin"] is True
assert inspect["Config"]["Tty"] is True
assert inspect["HostConfig"]["Dns"] == dns_name
assert inspect["HostConfig"]["DnsSearch"] == dns_search
assert inspect["Config"]["Hostname"] == host_name
assert inspect["Config"]["Domainname"] == domain_name
assert inspect["Config"]["User"] == user
assert inspect["HostConfig"]["CapAdd"] == cap_add
assert inspect["HostConfig"]["CapDrop"] == cap_drop
# assert inspect["Config"]["Cpuset"] == cpu_set
# No support for restart
assert inspect["HostConfig"]["RestartPolicy"]["Name"] == ""
assert \
inspect["HostConfig"]["RestartPolicy"]["MaximumRetryCount"] == 0
assert inspect["Config"]["Cmd"] == command
assert inspect["HostConfig"]["Memory"] == memory
assert "TEST_FILE=/etc/testpath.conf" in inspect["Config"]["Env"]
assert inspect["HostConfig"]["CpuShares"] == cpu_shares
delete_all(client, [env, rancher_env])
def test_rancher_compose_service_option_2(client,
rancher_cli_container,
socat_containers):
hosts = client.list_host(kind='docker', removed_null=True, state="active").data
cpu_shares = 400
ulimit = {"hard": 1024, "name": "cpu", "soft": 1024}
ulimit_inspect = {"Hard": 1024, "Name": "cpu", "Soft": 1024}
ipcMode = "host"
sysctls = {"net.ipv4.ip_forward": "1"}
dev_opts = {
'/dev/null': {
'readIops': 2000,
'writeIops': 3000,
'readBps': 4000,
'writeBps': 200,
}
}
cpu_shares = 400
blkio_weight = 1000
cpu_period = 10000
cpu_quota = 20000
cpu_set = "0"
cpu_setmems = "0"
dns_opt = ["abc"]
group_add = ["root"]
kernel_memory = 6000000
memory_reservation = 5000000
memory_swap = -1
memory_swappiness = 100
oom_killdisable = True
oom_scoreadj = 100
read_only = True
shm_size = 1024
stop_signal = "SIGTERM"
uts = "host"
memory = 8000000
dev_opts_inspect = {u"Path": "/dev/null",
u"Rate": 400}
cgroup_parent = "xyz"
extraHosts = ["host1:10.1.1.1", "host2:10.2.2.2"]
tmp_fs = {"/tmp": "rw"}
security_opt = ["label=user:USER", "label=role:ROLE"]
launch_config = {"imageUuid": TEST_SERVICE_OPT_IMAGE_UUID,
"extraHosts": extraHosts,
"privileged": True,
"cpuShares": cpu_shares,
"blkioWeight": blkio_weight,
"blkioDeviceOptions": dev_opts,
"cgroupParent": cgroup_parent,
"cpuShares": cpu_shares,
"cpuPeriod": cpu_period,
"cpuQuota": cpu_quota,
"cpuSet": cpu_set,
"cpuSetMems": cpu_setmems,
"dnsOpt": dns_opt,
"groupAdd": group_add,
"kernelMemory": kernel_memory,
"memory": memory,
"memoryReservation": memory_reservation,
"memorySwap": memory_swap,
"memorySwappiness": memory_swappiness,
"oomKillDisable": oom_killdisable,
"oomScoreAdj": oom_scoreadj,
"readOnly": read_only,
"securityOpt": security_opt,
"shmSize": shm_size,
"stopSignal": stop_signal,
"sysctls": sysctls,
"tmpfs": tmp_fs,
"ulimits": [ulimit],
"ipcMode": ipcMode,
"uts": uts,
"requestedHostId": hosts[0].id
}
scale = 2
service, env = create_env_and_svc(client, launch_config,
scale)
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_service = get_rancher_compose_service(
client, rancher_env.id, service)
check_container_in_service(client, rancher_service)
container_list = get_service_container_list(client, rancher_service)
for c in container_list:
docker_client = get_docker_client(c.hosts[0])
inspect = docker_client.inspect_container(c.externalId)
assert inspect["HostConfig"]["ExtraHosts"] == extraHosts
assert inspect["HostConfig"]["BlkioWeight"] == blkio_weight
dev_opts_inspect["Path"] = "/dev/null"
dev_opts_inspect["Rate"] = 4000
assert \
inspect["HostConfig"]["BlkioDeviceReadBps"] == [dev_opts_inspect]
dev_opts_inspect["Path"] = "/dev/null"
dev_opts_inspect["Rate"] = 200
assert \
inspect["HostConfig"]["BlkioDeviceWriteBps"] == [dev_opts_inspect]
dev_opts_inspect["Path"] = "/dev/null"
dev_opts_inspect["Rate"] = 2000
assert \
inspect["HostConfig"]["BlkioDeviceReadIOps"] == [dev_opts_inspect]
dev_opts_inspect["Path"] = "/dev/null"
dev_opts_inspect["Rate"] = 3000
assert \
inspect["HostConfig"]["BlkioDeviceWriteIOps"] == [dev_opts_inspect]
assert inspect["HostConfig"]["CpuShares"] == cpu_shares
assert inspect["HostConfig"]["CgroupParent"] == cgroup_parent
assert inspect["HostConfig"]["CpuPeriod"] == cpu_period
assert inspect["HostConfig"]["CpuQuota"] == cpu_quota
assert inspect["HostConfig"]["CpusetCpus"] == cpu_set
# Bug - 6700
"""
assert inspect["HostConfig"]["CpusetMems"] == cpu_setmems
assert inspect["HostConfig"]["KernelMemory"] == kernel_memory
"""
assert inspect["HostConfig"]["MemoryReservation"] == memory_reservation
assert inspect["HostConfig"]["MemorySwap"] == memory_swap
assert inspect["HostConfig"]["MemorySwappiness"] == memory_swappiness
assert inspect["HostConfig"]["OomKillDisable"]
assert inspect["HostConfig"]["OomScoreAdj"] == oom_scoreadj
assert inspect["HostConfig"]["ReadonlyRootfs"]
assert inspect["HostConfig"]["SecurityOpt"] == security_opt
assert inspect["HostConfig"]["Tmpfs"] == tmp_fs
assert inspect["HostConfig"]["ShmSize"] == shm_size
assert inspect["Config"]["StopSignal"] == stop_signal
assert inspect["HostConfig"]["Ulimits"] == [ulimit_inspect]
assert inspect["HostConfig"]["IpcMode"] == ipcMode
assert inspect["HostConfig"]["UTSMode"] == uts
assert inspect["HostConfig"]["DnsOptions"] == dns_opt
assert inspect["HostConfig"]["GroupAdd"] == group_add
delete_all(client, [env])
@pytest.mark.skipif(True, reason='not implemented yet')
def test_rancher_compose_services_port_and_link_options(
client, rancher_cli_container, socat_containers):
hosts = client.list_host(kind='docker', removed_null=True, state="active").data
host = hosts[0]
link_host = hosts[1]
link_name = "WEB1"
link_port = 80
exposed_port = 9999
link_container = client.create_container(
imageUuid=LB_IMAGE_UUID,
environment={'CONTAINER_NAME': link_name},
name=random_str(),
requestedHostId=host.id
)
link_container = client.wait_success(link_container)
launch_config = {"imageUuid": SSH_IMAGE_UUID,
"ports": [str(exposed_port)+":22/tcp"],
"instanceLinks": {
link_name:
link_container.id},
"requestedHostId": link_host.id,
}
service, env = create_env_and_svc(client, launch_config, 1)
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_service = get_rancher_compose_service(
client, rancher_env.id, service)
container_name = get_container_name(rancher_env, rancher_service, 1)
containers = client.list_container(name=container_name, state="running").data
assert len(containers) == 1
con = containers[0]
validate_exposed_port_and_container_link(client, con, link_name,
link_port, exposed_port)
delete_all(client, [env, rancher_env, link_container])
def test_rancher_compose_lbservice(client,
rancher_cli_container):
port = "7900"
# Add LB service and do not activate services
service_scale = 2
lb_scale = 1
env, service, lb_service = create_env_with_svc_and_lb(
client, service_scale, lb_scale, port)
# Add another target to LB service
launch_config = {"imageUuid": WEB_IMAGE_UUID}
service_name = random_str()
service1 = client.create_service(name=service_name,
stackId=env.id,
launchConfig=launch_config,
scale=2)
service1 = client.wait_success(service1)
assert service1.state == "inactive"
service = activate_svc(client, service)
service1 = activate_svc(client, service1)
# Set LB targets
port_rules = lb_service.lbConfig.portRules
protocol = "http"
target_port = "80"
service_id = service1.id
port_rule = {"sourcePort": port, "protocol": protocol,
"serviceId": service_id, "targetPort": target_port}
port_rules.append(port_rule)
lb_service = client.update(lb_service,
lbConfig=create_lb_config(port_rules))
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_lb_service = get_rancher_compose_service(
client, rancher_env.id, lb_service)
client.wait_success(rancher_lb_service)
rancher_service = get_rancher_compose_service(
client, rancher_env.id, service)
client.wait_success(rancher_service)
rancher_service1 = get_rancher_compose_service(
client, rancher_env.id, service1)
client.wait_success(rancher_service1)
validate_lb_service(client, rancher_lb_service, port,
[rancher_service, rancher_service1])
delete_all(client, [env, rancher_env])
def test_rancher_compose_lbservice_internal(client,
rancher_cli_container):
port = "7911"
con_port = "7912"
# Deploy container in same network to test accessibility of internal LB
hosts = client.list_host(kind='docker', removed_null=True, state="active").data
assert len(hosts) > 0
host = hosts[0]
client_con = client.create_container(
name=random_str(), imageUuid=SSH_IMAGE_UUID,
ports=[con_port+":22/tcp"], requestedHostId=host.id)
client_con = client.wait_success(client_con, 120)
assert client_con.state == "running"
# Add an internal LB service and do not activate services
service_scale = 2
lb_scale = 1
env, service, lb_service = create_env_with_svc_and_lb(
client, service_scale, lb_scale, port, internal=True)
# Add another target to LB service
launch_config = {"imageUuid": WEB_IMAGE_UUID}
service_name = random_str()
service1 = client.create_service(name=service_name,
stackId=env.id,
launchConfig=launch_config,
scale=2)
service1 = client.wait_success(service1)
assert service1.state == "inactive"
service = activate_svc(client, service)
service1 = activate_svc(client, service1)
# Set LB targets
port_rules = lb_service.lbConfig.portRules
protocol = "http"
target_port = "80"
service_id = service1.id
port_rule = {"sourcePort": port, "protocol": protocol,
"serviceId": service_id, "targetPort": target_port}
port_rules.append(port_rule)
lb_service = client.update(lb_service,
lbConfig=create_lb_config(port_rules))
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_lb_service = get_rancher_compose_service(
client, rancher_env.id, lb_service)
client.wait_success(rancher_lb_service)
rancher_service = get_rancher_compose_service(
client, rancher_env.id, service)
client.wait_success(rancher_service)
rancher_service1 = get_rancher_compose_service(
client, rancher_env.id, service1)
client.wait_success(rancher_service1)
time.sleep(20)
validate_internal_lb(client, rancher_lb_service,
[rancher_service, rancher_service1],
host, con_port, port)
# Check that port in the host where LB Agent is running is not accessible
lb_containers = get_service_container_list(
client, rancher_lb_service)
assert len(lb_containers) == lb_service.scale
for lb_con in lb_containers:
host = client.by_id('host', lb_con.hosts[0].id)
assert check_for_no_access(host, port)
delete_all(client, [env, rancher_env])
def test_rancher_compose_service_links(client,
rancher_cli_container):
port = "7901"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service = create_env_with_2_svc(
client, service_scale, consumed_service_scale, port)
service_link = {"serviceId": consumed_service.id, "ports": ["80"]}
service.addservicelink(serviceLink=service_link)
service = client.wait_success(service, 120)
# Launch env using docker compose
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_service = get_rancher_compose_service(
client, rancher_env.id, service)
rancher_consumed_service = get_rancher_compose_service(
client, rancher_env.id, consumed_service)
client.wait_success(rancher_service)
client.wait_success(rancher_consumed_service)
validate_add_service_link(client, rancher_service,
rancher_consumed_service)
validate_linked_service(client, rancher_service,
[rancher_consumed_service], port)
delete_all(client, [env, rancher_env])
def test_rancher_compose_dns_services(client,
rancher_cli_container):
port = "7902"
rancher_compose_dns_services(client, port,
rancher_cli_container)
def test_rancher_compose_dns_services_cross_stack(client,
rancher_cli_container):
port = "7903"
rancher_compose_dns_services(client, port,
rancher_cli_container, True)
def test_rancher_compose_external_services(client,
rancher_cli_container):
port = "7904"
service_scale = 1
env, service, ext_service, con_list = create_env_with_ext_svc(
client, service_scale, port)
service_link = {"serviceId": ext_service.id}
service.addservicelink(serviceLink=service_link)
# Launch env using docker compose
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_service = get_rancher_compose_service(
client, rancher_env.id, service)
rancher_ext_service = get_rancher_compose_service(
client, rancher_env.id, ext_service)
client.wait_success(con_list[0])
client.wait_success(con_list[1])
client.wait_success(rancher_service)
client.wait_success(rancher_ext_service)
validate_add_service_link(client, rancher_service,
rancher_ext_service)
validate_external_service(client, rancher_service,
[rancher_ext_service],
port, con_list)
delete_all(client, [env, rancher_env])
def test_rancher_compose_lbservice_host_routing(client,
rancher_cli_container):
port1 = "7906"
service_scale = 2
lb_scale = 1
service_count = 3
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port1,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 0,
"sourcePort": port1,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 1,
"sourcePort": port1,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 1,
"sourcePort": port1,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/name.html",
"serviceId": 2,
"sourcePort": port1,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/name.html",
"serviceId": 2,
"sourcePort": port1,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port1], service_count, port_rules)
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_lb_service = get_rancher_compose_service(client, rancher_env.id,
lb_service)
client.wait_success(rancher_lb_service)
rancher_service = get_rancher_compose_service(
client, rancher_env.id, services[0])
client.wait_success(rancher_service)
rancher_service1 = get_rancher_compose_service(
client, rancher_env.id, services[1])
client.wait_success(rancher_service1)
rancher_service2 = get_rancher_compose_service(
client, rancher_env.id, services[2])
client.wait_success(rancher_service2)
validate_lb_service(client,
rancher_lb_service, port1,
[rancher_service, rancher_service1],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
rancher_lb_service, port1,
[rancher_service, rancher_service1],
"www.abc2.com", "/service2.html")
validate_lb_service(client,
rancher_lb_service, port1, [rancher_service2],
"www.abc1.com", "/name.html")
validate_lb_service(client,
rancher_lb_service, port1, [rancher_service2],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(client, rancher_lb_service, port1,
"www.abc1.com",
"/service2.html")
validate_lb_service_for_no_access(client, rancher_lb_service, port1,
"www.abc2.com",
"/service1.html")
delete_all(client, [env, rancher_env])
def test_rancher_compose_lbservice_multiple_port(client,
rancher_cli_container):
port1 = "7907"
port2 = "7908"
service_scale = 2
lb_scale = 1
service_count = 2
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port1,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service3.html",
"serviceId": 1,
"sourcePort": port2,
"targetPort": "81",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port1, port2],
service_count, port_rules)
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_lb_service = get_rancher_compose_service(client, rancher_env.id,
lb_service)
client.wait_success(rancher_lb_service)
rancher_service = get_rancher_compose_service(
client, rancher_env.id, services[0])
client.wait_success(rancher_service)
rancher_service1 = get_rancher_compose_service(
client, rancher_env.id, services[1])
client.wait_success(rancher_service1)
validate_lb_service(client,
rancher_lb_service, port1, [rancher_service],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
rancher_lb_service, port2, [rancher_service1],
"www.abc2.com", "/service3.html")
delete_all(client, [env, rancher_env])
def test_rancher_compose_external_services_hostname(client,
rancher_cli_container):
port = "7904"
service_scale = 1
env, service, ext_service, con_list = create_env_with_ext_svc(
client, service_scale, port, True)
service_link = {"serviceId": ext_service.id}
service.addservicelink(serviceLink=service_link)
# Launch env using docker compose
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
rancher_service = get_rancher_compose_service(
client, rancher_env.id, service)
rancher_ext_service = get_rancher_compose_service(
client, rancher_env.id, ext_service)
client.wait_success(rancher_service)
client.wait_success(rancher_ext_service)
validate_add_service_link(client, rancher_service,
rancher_ext_service)
validate_external_service_for_hostname(client, rancher_service,
[rancher_ext_service], port)
delete_all(client, [env, rancher_env])
def rancher_compose_dns_services(client, port,
rancher_cli_container,
cross_linking=False):
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port, cross_linking)
service_link = {"serviceId": dns.id}
service.addservicelink(serviceLink=service_link)
service_link = {"serviceId": consumed_service.id}
dns.addservicelink(serviceLink=service_link)
service_link = {"serviceId": consumed_service1.id}
dns.addservicelink(serviceLink=service_link)
# Launch dns env using docker compose
launch_rancher_compose(client, env)
rancher_envs = client.list_stack(name=env.name+"rancher").data
assert len(rancher_envs) == 1
rancher_env = rancher_envs[0]
# Launch envs using docker compose
if cross_linking:
# Launch Consumed Service2
env_con = get_env(client, consumed_service)
env_con = env_con.activateservices()
env_con = client.wait_success(env_con, 120)
assert env_con.state == "active"
con_service1_id = env_con.id
# Launch Consumed Service1
env_con1 = get_env(client, consumed_service1)
env_con1 = env_con1.activateservices()
env_con1 = client.wait_success(env_con1, 120)
assert env_con1.state == "active"
con_service2_id = env_con1.id
else:
con_service1_id = rancher_env.id
con_service2_id = rancher_env.id
rancher_consumed_service = get_rancher_compose_service(
client, con_service1_id, consumed_service)
rancher_consumed_service1 = get_rancher_compose_service(
client, con_service2_id, consumed_service1)
rancher_dns = get_rancher_compose_service(
client, rancher_env.id, dns)
rancher_service = get_rancher_compose_service(
client, rancher_env.id, service)
client.wait_success(rancher_dns)
client.wait_success(rancher_consumed_service)
client.wait_success(rancher_consumed_service1)
client.wait_success(rancher_service)
validate_add_service_link(client, rancher_service,
rancher_dns)
validate_add_service_link(client, rancher_dns,
rancher_consumed_service)
validate_add_service_link(client, rancher_dns,
rancher_consumed_service1)
validate_dns_service(client, rancher_service,
[rancher_consumed_service, rancher_consumed_service1],
port, rancher_dns.name)
to_delete = [env, rancher_env]
if cross_linking:
to_delete.append(env_con)
to_delete.append(env_con1)
delete_all(client, to_delete)
def get_rancher_compose_service(client, rancher_env_id, service):
rancher_services = client.list_service(name=service.name,
stackId=rancher_env_id,
removed_null=True).data
assert len(rancher_services) == 1
rancher_service = rancher_services[0]
print(service.kind)
if service.kind != 'externalService' and service.kind != 'dnsService':
assert rancher_service.scale == service.scale
rancher_service = client.wait_success(rancher_service, 120)
return rancher_service
|
the-stack_0_1853 | ## MadLib
"""
Create a "MadLib" that asks the user for various pieces of
information. Store the information as strings. Once the
information has been collected, output a story using the
stored information. You can find a template if you don't want to
make one: http://www.madlibs.com/
"""
"""
This one is quite a step up. It involves a few new concepts, such
as storing user input.
First of all, I can see that we're going to want to repeatedly
ask a user for input, and store their answers somewhere. For this
task, we could use an array like FruitLoops, but let's not fill
it up yet:
"""
info_list = []
"""
Side note: this isn't really an array. It's a list. I lied.
Python is weird and does lots of strange stuff that other
languages cry about so it's not worth getting caught up on.
For our purposes, a list can be dynamically added to, which
is what we want.
Now let's define a function to get some input from the user and
put it in the info_array:
"""
def get_input_from_user(prompt):
answer = input(prompt)
info_list.append(answer)
"""
Python's input() function handles most of the heavy lifting here.
Put simply, the code above allows us to pass a prompt to the
function, which it will then ask the user, and add the reply
to our list of information.
Next, we need a story of sorts. We can make up whatever we want.
I'm going to mark out a basic one here, with placeholders for
the words we want our user to fill in:
This morning, I <past tense action> and then get up and
had a shower. In the shower, an <animal> flew in the window
and scared me so bad that I screamed for my <relative>.
Next, let's get our user to fill in our placeholders:
"""
get_input_from_user("Please give me a past tense action: ")
get_input_from_user("Please give me an animal: ")
get_input_from_user("Please give me a relative: ")
"""
Now that we've got those stored, let's construct our story string
and print it back out. We're going to use f-formatting here,
which allows us to use curled brackets around variables names
inside a string to save us doing lots of string + string = string
"""
my_story = f"This morning, I {info_list[0]} and then got up "\
f"and had a shower. In the shower, a"\
f" {info_list[1]} flew in the window and scared me "\
f"so bad that I screamed for my {info_list[2]}."
"""
Note the trailing backslashes which concatenate the lines of text.
Now, we just print it out:
"""
print(my_story)
"""
Side note: For general purposes, it's nice to not have to think
about which index you're looking for in a data structure if
you're just iterating over them, as above. We can, when using
iterable objects, define an iterator:
"""
info_iter = iter(info_list)
"""
And instead repeatedly call next() on it to achieve the same
result as above without hard-coding any particular values in:
"""
my_iterated_story = f"This morning, I {next(info_iter)} and "\
f"then got up and had a shower. In the "\
f"shower, a {next(info_iter)} flew in the "\
f"window and scared me so bad that I "\
f"screamed for my {next(info_iter)}"
|
the-stack_0_1855 | import xarray as xr
import numpy as np
import pandas as pd
def distrib_run_build_beam_pointing_vector(dat: list):
"""
Convenience function for mapping build_beam_pointing_vectors across cluster. Assumes that you are mapping this
function with a list of data.
distrib functions also return a processing status array, here a beamwise array = 2, which states that all
processed beams are at the 'beamvector' status level
Parameters
----------
dat
[hdng, bpa, tiltangle, tx_vecs, rx_vecs, tstmp, tx_reversed, rx_reversed]
Returns
-------
list
[relative azimuth, beam pointing angle, processing_status]
"""
ans = build_beam_pointing_vectors(dat[0], dat[1], dat[2], dat[3][0], dat[3][1], dat[4], dat[5])
# return processing status = 2 for all affected soundings
processing_status = xr.DataArray(np.full_like(dat[1], 2, dtype=np.uint8),
coords={'time': dat[1].coords['time'], 'beam': dat[1].coords['beam']},
dims=['time', 'beam'])
ans.append(processing_status)
return ans
def build_beam_pointing_vectors(hdng: xr.DataArray, bpa: xr.DataArray, tiltangle: xr.DataArray, tx_vecs: xr.DataArray,
rx_vecs: xr.DataArray, tx_reversed: bool = False, rx_reversed: bool = False):
"""
Beam pointing vector is the beam specific vector that arises from the intersection of the tx ping and rx cone
of sensitivity. Points at that area. Is in the geographic coordinate system, built using the tx/rx at time of
ping/receive.
Two components are returned. Relative azimuth, the angle relative to vessel heading that points at the beam
endpoint. Beam pointing angle, the roll corrected angle relative to the horizontal that points down at the beam
endpoint.
Parameters
----------
hdng
2d (time, beam) heading in degrees at ping time for each beam
bpa
2d (time, beam) receiver beam pointing angle
tiltangle
2d (time, beam) transmitter tiltangle on ping
tx_vecs
2 dim (time, xyz) representing tx 3d orientation in space across time
rx_vecs
3 dim (time, beam, xyz) representing rx 3d orientation in space across time/beam
tx_reversed
if true, the transmitter was installed 180° offset in yaw (i.e. backwards)
rx_reversed
if true, the receiver was installed 180° offset in yaw (i.e. backwards)
Returns
-------
xr.DataArray
2dim (time, beam), beam-wise beam azimuth values relative to vessel heading at time of ping
xr.DataArray
2 dim (time, beam) values for beampointingangle at each beam
"""
# main vec (primary head) is accessed using the primary system selection
rx_angle = np.deg2rad(bpa)
tx_angle = np.deg2rad(tiltangle)
if tx_reversed:
tx_angle = -tx_angle
if rx_reversed:
rx_angle = -rx_angle
beamvecs = construct_array_relative_beamvector(tx_vecs, rx_vecs, tx_angle, rx_angle)
rotgeo = return_array_geographic_rotation(tx_vecs, rx_vecs)
bv_geo = build_geographic_beam_vectors(rotgeo, beamvecs)
rel_azimuth = compute_relative_azimuth(bv_geo, hdng)
new_pointing_angle = compute_geo_beam_pointing_angle(bv_geo, rx_angle)
return [rel_azimuth, new_pointing_angle]
def construct_array_relative_beamvector(maintx: xr.DataArray, mainrx: xr.DataArray, tx_angle: xr.DataArray,
rx_angle: xr.DataArray):
"""
Given the orientation vectors representing the transmitter/receiver at time of ping/receive (maintx, mainrx) and
the TX/RX steering angles (tx_angle, rx_angle), determine new 3d beam vector components at the midpoint between
the TX and RX. This would be the 'actual' array relative beam vector.
This is a simplification of the actual scenario, adding error in the xyz due to the difference in path length/
direction of the actual ray from tx-seafloor and seafloor-rx and this co-located assumption (tx-seafloor and
rx-seafloor are the same is the assumption)
x = +FORWARD, y=+STARBOARD, z=+DOWN
Returns:
3d beam vector in co-located array ref frame. Of shape (xyz, time, beam), with 10 times and 200 beams,
beamvecs shape would be (3, 10, 200)
| <xarray.DataArray 'tiltangle' (xyz: 3, time: 10, beam: 200)>
| dask.array<concatenate, shape=(3, 10, 200), dtype=float64, chunksize=(1, 10, 200), chunktype=numpy.ndarray>
| Coordinates:
| * time (time) float64 1.496e+09 1.496e+09 ...
| * beam (beam) int32 0 1 2 3 4 5 6 7 8 ... 194 195 196 197 198 199 200
| * xyz (xyz) object 'x' 'y' 'z'
Parameters
----------
maintx
orientation vector for transmitter at time of transmit, 2dim of shape (time, xyz)
mainrx
orientation vector for receiver at time of receive, 2dim of shape (time, xyz)
tx_angle
transmitter tiltangle for each ping time
rx_angle
receiver beam pointing angle for each ping time
Returns
-------
xr.DataArray
3d beam vector in co-located array ref frame
"""
# delta - alignment angle between tx/rx vecs
delt = np.arccos(xr.dot(maintx, mainrx, dims=['xyz'])) - np.pi / 2
ysub1 = -np.sin(rx_angle)
# solve for components of 3d beam vector
ysub1 = ysub1 / np.cos(delt)
ysub2 = np.sin(tx_angle) * np.tan(delt)
radial = np.sqrt((ysub1 + ysub2) ** 2 + np.sin(tx_angle) ** 2)
x = np.sin(tx_angle)
y = ysub1 + ysub2
z = np.sqrt(1 - radial ** 2)
# generate new dataarray object for beam vectors
newx, _ = xr.broadcast(x, y) # broadcast to duplicate x along beam dimension
beamvecs = xr.concat([newx, y, z], pd.Index(list('xyz'), name='xyz'))
return beamvecs
def return_array_geographic_rotation(maintx: xr.DataArray, mainrx: xr.DataArray):
"""
Use the transmitter/receiver array orientations to build a rotation matrix between the geographic/array rel
reference frame.
Returns rotation matrices at each time/beam, of shape (beam, rot_i, time, xyz)
| <xarray.DataArray 'getitem-82dd48467b1f4e8b4f56bbe5e841cc9f' (beam: 182, rot_i: 3, time: 2, xyz: 3)>
| dask.array<transpose, shape=(182, 3, 2, 3), dtype=float64, chunksize=(182, 3, 2, 1), chunktype=numpy.ndarray>
| Coordinates:
| * rot_i (rot_i) int32 0 1 2
| * time (time) float64 1.496e+09 1.496e+09
| * beam (beam) int32 0 1 2 3 4 5 6 7 8 ... 174 175 176 177 178 179 180 181
| * xyz (xyz) <U1 'x' 'y' 'z'
Parameters
----------
maintx
orientation vector for transmitter at time of transmit, 2dim of shape (time, xyz)
mainrx
orientation vector for receiver at time of receive, 2dim of shape (time, xyz)
Returns
-------
xr.DataArray
rotation matrices at each time/beam, of shape (beam, rot_i, time, xyz)
"""
# build rotation matrix for going from locally level to geographic coord sys
x_prime = maintx
z_prime = cross(x_prime, mainrx, 'xyz')
y_prime = cross(z_prime, x_prime, 'xyz')
rotgeo = xr.concat([x_prime, y_prime, z_prime], pd.Index([0, 1, 2], name='rot_j')).T
# to do the dot product correctly, you need to align the right dimension in both matrices by giving
# them the same name (xyz for rotgeo and bv_geo in this case)
rotgeo = rotgeo.rename({'xyz': 'rot_i'})
rotgeo.coords['rot_i'] = [0, 1, 2]
rotgeo = rotgeo.rename({'rot_j': 'xyz'})
rotgeo.coords['xyz'] = ['x', 'y', 'z']
return rotgeo
def cross(a: xr.DataArray, b: xr.DataArray, spatial_dim: str, output_dtype: np.dtype = None):
"""
Xarray-compatible cross product. Compatible with dask, parallelization uses a.dtype as output_dtype
Parameters
----------
a
xarray DataArray object with a spatial_dim
b
xarray DataArray object with a spatial_dim
spatial_dim
dimension name to be mulitplied through
output_dtype
dtype of output
Returns
-------
xr.DataArray
cross product of a and b along spatial_dim
"""
for d in (a, b):
if spatial_dim not in d.dims:
raise ValueError('dimension {} not in {}'.format(spatial_dim, d))
if d.sizes[spatial_dim] != 3:
raise ValueError('dimension {} has not length 3 in {}'.format(spatial_dim, d))
if output_dtype is None:
output_dtype = a.dtype
c = xr.apply_ufunc(np.cross, a, b,
input_core_dims=[[spatial_dim], [spatial_dim]],
output_core_dims=[[spatial_dim]],
dask='parallelized', output_dtypes=[output_dtype]
)
return c
def build_geographic_beam_vectors(rotgeo: xr.DataArray, beamvecs: xr.DataArray):
"""
Apply rotation matrix to bring transducer rel. beam vectors to geographic ref frame
Parameters
----------
rotgeo
rotation matrices at each time/beam, of shape (beam, rot_i, time, xyz), see return_array_geographic_rotation
beamvecs
3d beam vector in co-located array ref frame (xyz, time, beam), see construct_array_relative_beamvector
Returns
-------
xr.DataArray
beam vectors in geographic ref frame, of shape (time, beam, bv_xyz)
"""
bv_geo = xr.dot(rotgeo, beamvecs, dims='xyz')
bv_geo = bv_geo.rename({'rot_i': 'bv_xyz'})
bv_geo.coords['bv_xyz'] = ['x', 'y', 'z']
bv_geo = bv_geo.transpose('time', 'beam', 'bv_xyz')
return bv_geo
def compute_relative_azimuth(bv_geo: xr.DataArray, heading: xr.DataArray):
"""
Compute the relative azimuth from array to end of beam vector in geographic ref frame
Parameters
----------
bv_geo
beam vectors in geographic ref frame, of shape (time, beam, bv_xyz), see build_geographic_beam_vectors
heading
1 dim array of heading values, coords=time
Returns
-------
xr.DataArray
2dim (time, beam), beam-wise beam azimuth values relative to vessel heading at time of ping
"""
# derive azimuth/angle from the newly created geographic beam vectors
bv_azimuth = np.rad2deg(np.arctan2(bv_geo.sel(bv_xyz='y'), bv_geo.sel(bv_xyz='x')))
rel_azimuth = np.deg2rad((bv_azimuth - heading + 360) % 360)
return rel_azimuth
def compute_geo_beam_pointing_angle(bv_geo: xr.DataArray, rx_angle: xr.DataArray):
"""
Build new beam pointing angle (rel to the vertical) and with the correct sign (+ to starboard) in the geographic
ref frame.
Parameters
----------
bv_geo
beam vectors in geographic ref frame, of shape (time, beam, bv_xyz), see build_geographic_beam_vectors
rx_angle
receiver beam pointing angle for each ping time
Returns
-------
xr.DataArray
2 dim (time, beam) values for beampointingangle at each beam
"""
bvangle_divisor = np.sqrt(np.square(bv_geo.sel(bv_xyz='x')) + np.square(bv_geo.sel(bv_xyz='y')))
# new pointing angle is equal to pi/2 - depression angle (depression angle relative to horiz, pointing
# angle is the incidence angle relative to vertical)
new_pointing_angle = (np.pi / 2) - np.arctan(bv_geo.sel(bv_xyz='z') / bvangle_divisor)
# flip the sign where the azimuth is pointing to port, allows us to maintain which side the angle is on
newindx = np.ones_like(new_pointing_angle)
newindx = np.negative(newindx, out=newindx, where=rx_angle < 0)
new_pointing_angle = new_pointing_angle * newindx
return new_pointing_angle
|
the-stack_0_1857 | # -*- coding: utf-8 -*-
#
# This file is part of tofbot, a friendly IRC bot.
# You may redistribute it under the Simplified BSD License.
# If we meet some day, and you think this stuff is worth it,
# you can buy us a beer in return.
#
# Copyright (c) 2011 Etienne Millon <[email protected]>
# Martin Kirchgessner <[email protected]>
from toflib import Plugin
from toflib import distance
class PluginDassin(Plugin):
def handle_msg(self, msg_text, chan, nick):
ete = [ "tu sais"
, "je n'ai jamais été aussi heureux que ce matin-là"
, "nous marchions sur une plage"
, "un peu comme celle-ci"
, "c'était l'automne"
, "un automne où il faisait beau"
, "une saison qui n'existe que dans le nord de l'amérique"
, "là-bas on l'appelle l'été indien"
, "mais c'était tout simplement le nôtre"
, "avec ta robe longue"
, "tu ressemblais à une aquarelle de marie laurencin"
, "et je me souviens"
, "je me souviens très bien de ce que je t'ai dit ce matin-là"
, "il y a un an"
, "il y a un siècle"
, "il y a une éternité"
, "on ira"
, "où tu voudras, quand tu voudras"
, "et l'on s'aimera encore"
, "lorsque l'amour sera mort"
, "toute la vie"
, "sera pareille à ce matin"
, "aux couleurs de l'été indien"
]
colline = [ "je l'ai vue près d'un laurier"
, "elle gardait ses blanches brebis"
, "quand j'ai demandé d'où venait sa peau fraîche elle m'a dit"
, "c'est de rouler dans la rosée qui rend les bergères jolies"
, "mais quand j'ai dit qu'avec elle"
, "je voudrais y rouler aussi"
, "elle m'a dit"
, "elle m'a dit d'aller siffler là-haut sur la colline"
, "de l'attendre avec un petit bouquet d'églantines"
, "j'ai cueilli des fleurs"
, "et j'ai sifflé tant que j'ai pu"
, "j'ai attendu, attendu"
, "elle n'est jamais venue"
, "zay zay zay zay"
]
# Rone - Bora (vocal edit, un texte d'Alain Damasio), texte redécoupé pour que ça toffe
bora = [ "il n'y pas de secret",
"pas de secrets",
"il y a une vérité",
"simple, sobre, crue, quoi",
"la horde du contrevent",
"tu la réussiras uniquement si tu t'isoles",
"si tu t'isoles quoi",
"tu comprends ce que ça veut dire isole ?",
"isola",
"l'ile quoi",
"tu crées ton ile et tu l'effaces au maximum",
"il faut que les gens soient extrêmement loin de toi",
"mais loin parce que ton univers sera vaste",
"sera immense",
"sera énorme",
"énorme univers",
"énorme puissance d'univers",
"caracole il existe en toi complètement",
"comme strochnis",
"qu'il soit toi",
"que pietro della rocca tu le deviennes",
"et la croute aussi",
"et tous l'univers"
"et tout le vent",
"tu vis complètement la dedans",
"c'est ca qu'il faut",
"y a que ca qu'il faut",
"tu restes collé au vent",
"collé au vent",
"collé au vent, toi",
"et que tu te battes",
"que tu ne fasses aucune concessison sur le reste",
"tu oublies tout",
"t'es pas consultant, t'es rien",
"le consulting c'est d'la merde",
"la seule chose qui a d'la valeur",
"c'est quand t'es capable de faire un chapitre comme celui-là",
"ça, ça restera, ça mérite que tu vives",
"tu peux vivre pour écrire ça",
"ça mérite que tu vives",
"là t'es pas né pour rien",
"t'es nécessaire",
"t'es pas surnuméraire",
"t'es pas superflu",
"là t'as une nécessité quand t'écris ça",
"la nécessité d'être",
"et c'est ça qu'il faut tenir mec",
"c'est ça qu'il faut putain de tenir",
"lâches pas le morceau",
"t'fais pas enculer",
"t'fais pas disperser",
"t'fais pas fragmenter",
"fais pas de concession",
"y'a pas de concession avec la vie",
"y'a pas de concession",
"tu vis et faut vivre à fond"
]
oizo = ["coucou","tu veux voir ma bite ?"]
hell = ["hell", "cook"]
chuck = ["nope", "it's just Chuck Testa !"]
hibernatus = [ "j'ai tout visité en 2 secondes"
, "Pékin, Tokyo, la Joconde"
, "j'ai fait tous les jobs possibles"
, "plombier, pute et belle fille"
, "j'ai sodomisé un louveteau"
, "avec le manche d'un marteau"
, "j'ai grandi à Harlem"
, "avec Paul Préboist et Vandel"
, "j'ai braqué le CIO"
, "pour m'acheter le Figaro"
, "j'ai buté ma grand-mére"
, "parce que je ne savais pas quoi faire"
, "j'ai aussi buté Diana"
, "mais pour de l'argent cette fois"
, "j'ai été chez un psy"
, "pour lui dire que j'étais guérie"
, "j'ai aussi mangé du dauphin"
, "flipper était pas si malin"
, "j'ai fais la Star Academy"
, "pour chanter avec Fiori"
, "j'ai inventé la bouffe congelée"
, "et j'me ferai cryogéniser"
, "j'ai déjà vu Hibernatus"
, "j'ai le Dvd dans mon anus"
, "j'suis déjà allée partout"
, "j'ai tout ramené, je connais tout"
, "j'ai pas besoin d'en apprendre plus"
, "j'ai le dvd dans mon anus"
, "j'suis déjà allée partout"
, "j'ai tout ramené, je connais tout"
, "j'ai pas besoin d'en apprendre plus"
, "j'ai le dvd dans mon anus"
]
songs = [oizo, ete, colline, bora, hell, hibernatus, chuck]
searched = msg_text.lower()
minDist = 9999999
best = ""
for song in songs:
try:
i = 0
for line in song:
dist = distance(line, searched)
if dist < minDist:
best = song[i+1]
minDist = dist
i += 1
except:
pass
if len(best) > 3 and minDist < (len(searched)/3):
self.say(best)
|
the-stack_0_1858 | from cConstants import cEPAConstants
from cEnum import eEPA
import cPlot3D
class cPlotFrame(cPlot3D.cPlotFrame):
def __init__(self, iParent, **kwargs):
cPlot3D.cPlotFrame.__init__(self, iParent, **kwargs)
def initPanel(self, *args, **kwargs):
self.m_PlotPanel = cPlotPanel(self, **kwargs)
class cPlotPanel(cPlot3D.cPlotPanel):
def __init__(self, iParent, iXAxisItem=eEPA.evaluation, iYAxisItem=eEPA.potency, iZAxisItem=eEPA.activity, iPlotType=eEPA.fundamental, **kwargs):
cPlot3D.cPlotPanel.__init__(self, iParent, **kwargs)
self.m_XAxisItem = iXAxisItem
self.m_YAxisItem = iYAxisItem
self.m_ZAxisItem = iZAxisItem
self.m_PlotType = eEPA.fundamental
def getSentimentEPAIndex(self, iEPA, iSentiment):
return iEPA + (cEPAConstants.m_Dimensions * iSentiment)
# Axis items are the enumerations of the elements in eEPA, so they're basically numbers
def setAxis(iXAxisItem, iYAxisItem, iZAxisItem):
self.m_XAxisItem = iXAxisItem
self.m_YAxisItem = iYAxisItem
self.m_ZAxisItem = iZAxisItem
def plotEPA(self, iLearnerSamples, iSimulatorSamples):
self.clearAxes()
if (0 < len(iLearnerSamples)):
# Learner's sentiments on self and other, green and pink respectively
self.plotScatter(
iLearnerSamples[self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_SelfMultiplier)],
iLearnerSamples[self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_SelfMultiplier)],
iLearnerSamples[self.getSentimentEPAIndex(self.m_ZAxisItem, cEPAConstants.m_SelfMultiplier)],
iAutoScaling=False, iRedraw=False, iUpdate=False, marker="o", s=50, c="green", alpha=1, animated=False)
self.plotScatter(
iLearnerSamples[self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_OtherMultiplier)],
iLearnerSamples[self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_OtherMultiplier)],
iLearnerSamples[self.getSentimentEPAIndex(self.m_ZAxisItem, cEPAConstants.m_OtherMultiplier)],
iAutoScaling=False, iRedraw=False, iUpdate=False, marker="o", s=50, c="pink", alpha=1, animated=False)
if (0 < len(iSimulatorSamples)):
# Simulator's sentiments on self and other, goldenrod and blue respectively
self.plotScatter(
iSimulatorSamples[self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_SelfMultiplier)],
iSimulatorSamples[self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_SelfMultiplier)],
iSimulatorSamples[self.getSentimentEPAIndex(self.m_ZAxisItem, cEPAConstants.m_SelfMultiplier)],
iAutoScaling=False, iRedraw=False, iUpdate=False, marker="o", s=50, c="goldenrod", alpha=1, animated=False)
self.plotScatter(
iSimulatorSamples[self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_OtherMultiplier)],
iSimulatorSamples[self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_OtherMultiplier)],
iSimulatorSamples[self.getSentimentEPAIndex(self.m_ZAxisItem, cEPAConstants.m_OtherMultiplier)],
iAutoScaling=False, iRedraw=False, iUpdate=False, marker="o", s=50, c="blue", alpha=1, animated=False)
self.m_Axes.set_xlabel(cEPAConstants.m_EPALabels[self.m_XAxisItem])
self.m_Axes.set_ylabel(cEPAConstants.m_EPALabels[self.m_YAxisItem])
self.m_Axes.set_zlabel(cEPAConstants.m_EPALabels[self.m_ZAxisItem])
self.redrawAxes()
|
the-stack_0_1859 | """Test the permission utils."""
from homeassistant.auth.permissions import util
def test_test_all():
"""Test if we can test the all group."""
for val in (
None,
{},
{'all': None},
{'all': {}},
):
assert util.test_all(val, 'read') is False
for val in (
True,
{'all': True},
{'all': {'read': True}},
):
assert util.test_all(val, 'read') is True
|
the-stack_0_1860 | import setuptools
import os
import stat
from setuptools.command.install import install
from distutils import log
with open("README.md", "r") as fh:
long_description = fh.read()
"""
The below code is taken from https://github.com/Uberi/speech_recognition
See README.md for licence information
"""
FILES_TO_MARK_EXECUTABLE = ["flac-linux-x86", "flac-linux-x86_64", "flac-mac", "flac-win32.exe"]
class InstallWithExtraSteps(install):
def run(self):
install.run(self) # do the original install steps
# mark the FLAC executables as executable by all users (this fixes occasional issues when file permissions get messed up)
for output_path in self.get_outputs():
if os.path.basename(output_path) in FILES_TO_MARK_EXECUTABLE:
log.info("setting executable permissions on {}".format(output_path))
stat_info = os.stat(output_path)
os.chmod(
output_path,
stat_info.st_mode |
stat.S_IRUSR | stat.S_IXUSR | # owner can read/execute
stat.S_IRGRP | stat.S_IXGRP | # group can read/execute
stat.S_IROTH | stat.S_IXOTH # everyone else can read/execute
)
"""
Below is DanSpeech licence
"""
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setuptools.setup(
name="danspeech",
version="1.0.3",
author="Rasmus Arpe Fogh Jensen, Martin Carsten Nielsen",
author_email="[email protected], [email protected],",
description="Speech recognition for Danish",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/danspeech/danspeech",
packages=setuptools.find_packages(),
include_package_data=True,
install_requires=requirements,
license_file="LICENCE.txt",
classifiers=[
"Programming Language :: Python :: 3",
'Development Status :: 5 - Production/Stable',
"Operating System :: OS Independent",
],
)
|
Subsets and Splits