filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_1349 | from netpyne import specs
from netpyne.batch import Batch
def batchTauWeight():
# Create variable of type ordered dictionary (NetPyNE's customized version)
params = specs.ODict()
# fill in with parameters to explore and range of values (key has to coincide with a variable in simConfig)
params['synMechTau2'] = [3.0, 5.0, 7.0]
params['connWeight'] = [0.005, 0.01, 0.15]
params[('analysis', 'plotTraces', 'saveFig')] = [True, False]
# create Batch object with parameters to modify, and specifying files to use
b = Batch(params=params, cfgFile='tut8_cfg.py', netParamsFile='tut8_netParams.py',)
# Set output folder, grid method (all param combinations), and run configuration
b.batchLabel = 'tauWeight'
b.saveFolder = 'tut8_data'
b.method = 'grid'
b.runCfg = {'type': 'mpi_bulletin',
'script': 'tut8_init.py',
'skip': True}
# Run batch simulations
b.run()
# Main code
if __name__ == '__main__':
batchTauWeight() |
the-stack_0_1350 | import sys
from PyQt5 import QtWidgets
# Press the green button in the gutter to run the script.
from PyQt5.QtGui import QPixmap, QIcon
from PyQt5.QtWidgets import QLabel, QSystemTrayIcon, QMenu
from ui.MainWindow import MainWindow
from PyQt5.QtCore import Qt, QFile, QTextStream
app = None
if __name__ == '__main__':
# define ico
# load the qt application
app = QtWidgets.QApplication(sys.argv)
tray_icon = QSystemTrayIcon(QIcon('media/icon.PNG'), parent=app)
tray_icon.setToolTip('RNApp')
tray_icon.show()
# splash screen
splash = QLabel()
pixmap = QPixmap('media/logo/RN.png')
# pixmap = pixmap.scaled(640, 640)
splash.setPixmap(pixmap)
splash.setWindowFlags(Qt.SplashScreen | Qt.FramelessWindowHint)
splash.show()
# main window init
window = MainWindow(app=app)
window.setWindowIcon(QIcon('media/logo/RN.png'))
# make tray menu
menu = QMenu()
exit_action = menu.addAction('Exit')
exit_action.triggered.connect(window.close)
# stylesheet init
stylesheet = QFile('ui/stylesheet/dark.qss')
stylesheet.open(QFile.ReadOnly | QFile.Text)
stream = QTextStream(stylesheet)
app.setStyleSheet(stream.readAll())
# splash screen destroy
splash.destroy()
window.show()
app.exec_()
print('Resuming Console Interaction.')
|
the-stack_0_1352 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains mention auto-encoder implementation."""
import flax.linen as nn
import jax.numpy as jnp
from language.mentionmemory.encoders import base_encoder
from language.mentionmemory.encoders import encoder_registry
from language.mentionmemory.modules import embedding
from language.mentionmemory.modules import retrieval_update_layers
from language.mentionmemory.modules import transformer
from language.mentionmemory.utils import default_values
from language.mentionmemory.utils import jax_utils as jut
from language.mentionmemory.utils.custom_types import Array, Dtype, InitType # pylint: disable=g-multiple-import
import ml_collections
@encoder_registry.register_encoder('mauto')
class MautoEncoder(base_encoder.BaseEncoder):
"""Mention autoencoder.
More precisely, this model is designed to evaluate information stored by a
mention encoder, as well as varying methods for incorporating information into
a language model. During pre-training the model has access to oracle memory
entries corresponding to a subset of linked mentions in the passage.
Attributes:
vocab_size: size of token vocabulary.
hidden_size: dimensionality of token representations.
intermediate_dim: dimensionality of intermediate representations in MLP.
retrieval_dim: dimensionality of memory values.
retrieval_update_type: means by which retrieved memory vectors are
incorporated into input representation, such as simple addition or
concatenation + MLP.
retrieval_update_config: hyperparameters for the update layer, beyond input
dimension and datatype.
num_attention_heads: number of attention heads in Transformer layers.
num_initial_layers: number of layers in first Transformer block.
num_final_layers: number of layers in second Transformer block.
dtype: data type of encoding (bfloat16 or float32). Parameters and certain
parts of computation (i.e. loss) are always in float32.
max_positions: number of positions (for positional embeddings).
max_length: maximal number of tokens for pre-training.
dropout_rate: dropout rate in Transformer layers.
no_retrieval: if true, do not incorporate retrieved mentions into model.
num_segments: number of possible token types (for token type embeddings).
kernel_init: initialization function for model kernels.
bias_init: initialization function for model biases.
layer_norm_epsilon: layer norm constant for numerical stability.
"""
vocab_size: int
hidden_size: int
intermediate_dim: int
retrieval_dim: int
retrieval_update_type: str
retrieval_update_config: ml_collections.FrozenConfigDict
num_attention_heads: int
num_initial_layers: int
num_final_layers: int
dtype: Dtype
max_positions: int
max_length: int
dropout_rate: float
num_segments: int = 2
no_retrieval: bool = False
kernel_init: InitType = default_values.kernel_init
bias_init: InitType = default_values.bias_init
layer_norm_epsilon: float = default_values.layer_norm_epsilon
def setup(self):
self.embedder = embedding.DictEmbed({
'token_ids':
embedding.Embed(
num_embeddings=self.vocab_size,
embedding_dim=self.hidden_size,
dtype=self.dtype,
embedding_init=self.kernel_init,
),
'position_ids':
embedding.Embed(
num_embeddings=self.max_positions,
embedding_dim=self.hidden_size,
dtype=self.dtype,
embedding_init=self.kernel_init,
),
'segment_ids':
embedding.Embed(
num_embeddings=self.num_segments,
embedding_dim=self.hidden_size,
dtype=self.dtype,
embedding_init=self.kernel_init,
)
})
self.embeddings_layer_norm = nn.LayerNorm(epsilon=self.layer_norm_epsilon)
self.embeddings_dropout = nn.Dropout(rate=self.dropout_rate)
self.initial_encoder = transformer.TransformerBlock(
num_layers=self.num_initial_layers,
model_dim=self.hidden_size,
intermediate_dim=self.intermediate_dim,
num_heads=self.num_attention_heads,
dropout_rate=self.dropout_rate,
dtype=self.dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
layer_norm_epsilon=self.layer_norm_epsilon,
)
self.retrieval_update_layer = retrieval_update_layers.RETRIEVAL_UPDATE_REGISTRY[
self.retrieval_update_type](
input_dim=self.hidden_size,
dtype=self.dtype,
layer_norm_epsilon=self.layer_norm_epsilon,
**self.retrieval_update_config,
)
self.final_encoder = transformer.TransformerBlock(
num_layers=self.num_final_layers,
model_dim=self.hidden_size,
intermediate_dim=self.intermediate_dim,
num_heads=self.num_attention_heads,
dropout_rate=self.dropout_rate,
dtype=self.dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
layer_norm_epsilon=self.layer_norm_epsilon,
)
self.mention_projector = nn.Dense(
features=self.retrieval_dim,
dtype=self.dtype,
)
def forward(self, batch, deterministic):
loss_helpers = {}
logging_helpers = {}
embedded_input = self.embedder({
'token_ids': batch['text_ids'],
'position_ids': batch['position_ids'],
'segment_ids': batch['segment_ids']
})
embedded_input = self.embeddings_layer_norm(embedded_input)
embedded_input = self.embeddings_dropout(embedded_input, deterministic)
loss_helpers['word_embeddings'] = self.embedder.variables['params'][
'embedders_token_ids']['embedding']
attention_mask = batch['text_mask']
encoding = self.initial_encoder(
encoding=embedded_input,
attention_mask=attention_mask,
deterministic=deterministic)
if not self.no_retrieval:
encoding = self.retrieval_update_layer(
encoded_input=encoding,
retrieval_values=jnp.expand_dims(
# [max_retrieval_indices, retrieval_dim]
batch['retrieval_mention_values'],
-2),
retrieval_scores=jnp.expand_dims(
# [max_retrieval_indices]
batch['retrieval_mention_scores'],
-1),
mention_batch_positions=batch['retrieval_mention_batch_positions'],
mention_start_positions=batch['retrieval_mention_start_positions'],
mention_end_positions=batch['retrieval_mention_end_positions'],
mention_mask=batch['retrieval_mention_mask'],
deterministic=deterministic)
encoding = self.final_encoder(
encoding=encoding,
attention_mask=attention_mask,
deterministic=deterministic)
mention_target_batch_positions = jut.matmul_slice(
batch['mention_batch_positions'], batch['mention_target_indices'])
mention_target_start_positions = jut.matmul_slice(
batch['mention_start_positions'], batch['mention_target_indices'])
mention_target_end_positions = jut.matmul_slice(
batch['mention_end_positions'], batch['mention_target_indices'])
mention_start_final_encodings = jut.matmul_2d_index_select(
encoding,
(mention_target_batch_positions, mention_target_start_positions))
mention_end_final_encodings = jut.matmul_2d_index_select(
encoding,
(mention_target_batch_positions, mention_target_end_positions))
loss_helpers['target_mention_encodings'] = self.mention_projector(
jnp.concatenate(
(mention_start_final_encodings, mention_end_final_encodings),
axis=-1))
return encoding, loss_helpers, logging_helpers
|
the-stack_0_1353 | import os
import numpy as np
import pandas as pd
from keras.preprocessing.text import text_to_word_sequence, Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, SimpleRNN, LSTM, Dropout, Embedding
from keras.optimizers import Adam, SGD
from keras.metrics import categorical_accuracy
from itertools import chain
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras import layers
import matplotlib.pyplot as plt
'''
Trains a basic RNN and LSTM on the first five tasks of Facebook bABI.
Inspiration for this code is taken from the Keras team babi_rnn file.
Specifically: parse_stories and data_to_vector are taken from babi_rnn, credits
go to the Keras team
Original comes from "Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks"
http://arxiv.org/abs/1502.05698
Task Number | FB LSTM Baseline | Keras QA
--- | --- | ---
QA1 - Single Supporting Fact | 50 | 100.0
QA2 - Two Supporting Facts | 20 | 50.0
QA3 - Three Supporting Facts | 20 | 20.5
QA4 - Two Arg. Relations | 61 | 62.9
QA5 - Three Arg. Relations | 70 | 61.9
QA6 - yes/No Questions | 48 | 50.7
QA7 - Counting | 49 | 78.9
QA8 - Lists/Sets | 45 | 77.2
QA9 - Simple Negation | 64 | 64.0
QA10 - Indefinite Knowledge | 44 | 47.7
QA11 - Basic Coreference | 72 | 74.9
QA12 - Conjunction | 74 | 76.4
QA13 - Compound Coreference | 94 | 94.4
QA14 - Time Reasoning | 27 | 34.8
QA15 - Basic Deduction | 21 | 32.4
QA16 - Basic Induction | 23 | 50.6
QA17 - Positional Reasoning | 51 | 49.1
QA18 - Size Reasoning | 52 | 90.8
QA19 - Path Finding | 8 | 9.0
QA20 - Agent's Motivations | 91 | 90.7
bAbI Project Resources:
https://research.facebook.com/researchers/1543934539189348:
'''
def setup_local_files():
'''get files from local machine and return all training / testing text files in sorted order'''
path = 'tasks'
files = os.listdir(path)
all_training_files = []
all_testing_files = []
for fn in files:
if 'train' in fn:
all_training_files.append(fn)
if 'test' in fn:
all_testing_files.append(fn)
all_training_files = np.asarray(sorted(all_training_files))
all_testing_files = np.asarray(sorted(all_testing_files))
print(all_training_files)
print(all_testing_files)
return (all_training_files,all_testing_files)
# Setup local files
all_training_files,all_testing_files = setup_local_files()
def setup_dictionaries(training_files,testing_files):
'''take in all training / testing files and return as dictionaries
corresponding to tasks'''
training_tasks_dict = dict((k+1,v) for k,v in enumerate(training_files))
testing_tasks_dict = dict((k+1,v) for k,v in enumerate(testing_files))
return (training_tasks_dict,testing_tasks_dict)
# Dictionary setup to grab tasks
training_tasks_dict,testing_tasks_dict = setup_dictionaries(all_training_files,all_testing_files)
def txt_to_raw(task_file):
'''
take in a specific task file and return a raw corpus
'''
with open(f'{os.getcwd()}/tasks/{task_file}', 'r') as file:
raw_corpus = file.readlines()
return raw_corpus
def parse_story(story):
'''
parse the passed in raw text corpus. This is modeled from the babi_rnn source from the Keras team.
GitHub URL: https://github.com/keras-team/keras/blob/master/examples/babi_rnn.py
'''
related_content = []
data = []
for line in story:
line_id,line = line.split(' ',1)
line_id = int(line_id)
if line_id == 1:
related_content = []
if '\t' in line:
question,answer,supporting_facts = line.split('\t')
question = text_to_word_sequence(question,filters='?\n')
answer = [answer]
substory = [ss for ss in related_content if ss]
data.append((substory,question,answer))
related_content.append('')
else:
line = text_to_word_sequence(line,filters='.\n') + ['.']
for word in line:
related_content.append(word)
return data
def get_unique_vocab(train_file,test_file):
'''opens up files and grabs unique vocabulary words from the text'''
with open(f'{os.getcwd()}/tasks/{train_file}','r') as train_file, open(f'{os.getcwd()}/tasks/{test_file}','r') as test_file:
raw_corpus_train = train_file.read()
raw_corpus_test = test_file.read()
train_tokenized = text_to_word_sequence(raw_corpus_train, filters='\n\t?123456789101112131415.')
test_tokenized = text_to_word_sequence(raw_corpus_test, filters='\n\t?123456789101112131415.')
return set(train_tokenized + test_tokenized + ['.'])
def data_to_vector(data,word_dictionary,vocab_size,sentence_limit,story_maxlen,question_maxlen):
'''
Stories and questions are represented as word embeddings and the answers are one-hot encoded.
Takes the stories, finds unique words, and then vectorizing them into pure numeric form.
Each word has a numeric index which it gets replaced by!
This is modeled from the babi_rnn source from the Keras team.
GitHub URL: https://github.com/keras-team/keras/blob/master/examples/babi_rnn.py
'''
STORY_VECTOR,QUESTION_VECTOR,ANSWER_VECTOR = [],[],[]
for story,question,answer in data:
# Encode the story representations
STORY_VECTOR.append([word_dictionary[word] for word in story])
# Encode the question representations
QUESTION_VECTOR.append([word_dictionary[word] for word in question])
ANSWER_VECTOR.append(word_dictionary[answer[0].lower()])
return pad_sequences(STORY_VECTOR,maxlen=story_maxlen),pad_sequences(QUESTION_VECTOR,maxlen=question_maxlen),np.array(ANSWER_VECTOR)
def zip_sq(story_training_input,question_training_input,story_testing_input,question_testing_input):
'''take story and question vectors and return a single
concatenated vector for both training and testing alongside combined max length'''
zipped_sq_training = list(zip(story_training_input,question_training_input))
zipped_sq_testing = list(zip(story_testing_input,question_testing_input))
sq_training_combined = []
sq_testing_combined = []
for sq in zipped_sq_training:
sq_training_combined.append(list(chain(sq[0],sq[1])))
for sq in zipped_sq_testing:
sq_testing_combined.append(list(chain(sq[0],sq[1])))
combined_maxlen = max(map(len,[sq for sq in sq_training_combined]))
return (sq_training_combined,sq_testing_combined,combined_maxlen)
def build_rnn(combined_maxlen,vocab_maxlen,embedding_size,dropout_rate,learning_rate,task_num):
'''build and return the model to be used'''
print(f'Building, training and evaluating RNN for {task_num}\n\n')
rnn_model = Sequential()
rnn_model.add(Embedding(input_shape=combined_maxlen,input_dim=vocab_maxlen,output_dim=embedding_size))
rnn_model.add(SimpleRNN(50,return_sequences=True))
rnn_model.add(SimpleRNN(50))
rnn_model.add(Dropout(dropout_rate))
rnn_model.add(Dense(vocab_maxlen,activation='softmax'))
rnn_model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=learning_rate), metrics=['accuracy'])
print('Build completed, returning RNN Model...')
return rnn_model
def run_rnn(rnn_model,x,y,testing_x,testing_y,epochs,task_num):
'''build and run the rnn model and return the history'''
print(f'Training and evaluating RNN for {task_num}\n\n')
train_history = rnn_model.fit(x=np.array(x),y=np.array(y),
epochs=epochs,validation_split=0.05)
loss, accuracy = rnn_model.evaluate(x=np.array(testing_x),
y=np.array(testing_y),
batch_size=32)
print(f'\n\nRNN Evaluation loss: {loss}, Evaluation accuracy: {accuracy} for task {task_num}\n\n')
return train_history, loss, accuracy
def build_lstm(combined_maxlen,vocab_maxlen,embedding_size,dropout_rate,learning_rate,task_num):
'''build and return the model to be used'''
lstm_model = Sequential()
lstm_model.add(Embedding(input_shape=combined_maxlen,input_dim=vocab_maxlen,output_dim=embedding_size))
lstm_model.add(LSTM(50,return_sequences=True))
lstm_model.add(LSTM(50))
lstm_model.add(Dropout(dropout_rate))
lstm_model.add(Dense(vocab_maxlen, activation='softmax'))
lstm_model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=learning_rate), metrics=['accuracy'])
print('Build completed, returning LSTM Model...')
return lstm_model
def run_lstm(lstm_model,x,y,testing_x,testing_y,epochs,task_num):
'''build and run the lstm model'''
print(f'Training and evaluating LSTM for {task_num}\n\n')
train_history = lstm_model.fit(np.array(x),np.array(y),
epochs=epochs,validation_split=0.05)
loss, accuracy = lstm_model.evaluate(x=np.array(testing_x),
y=np.array(testing_y),
batch_size=32)
print(f'\n\nLSTM Evaluation loss: {loss}, Evaluation accuracy: {accuracy} for task {task_num}\n\n')
return train_history, loss, accuracy
def predict_results(model,story_question_input,answer_testing_input):
'''predict and return results of prediction'''
def predictions_helper(expected,actuals):
'''given the expected answers and the actual answers compare and contrast '''
correct = 0
for i in range(len(expected)):
if expected[i] == actuals[i]:
correct += 1
print(f'\n\n----\nOut of 1000 possible answers the model correctly predicted: {correct}')
predictions = model.predict([np.array(story_question_input)])
idxs_of_preds = []
for preds in predictions:
for idx,ps in enumerate(preds):
if ps == max(preds):
idxs_of_preds.append(idx)
print(f'List of all the predictions made by our Model: \n\n{idxs_of_preds}')
print(f'\n\n---\n\n List of the expected values given by our testing: \n\n{answer_testing_input}')
predictions_helper(answer_testing_input,idxs_of_preds)
def plot_loss(training_history, model_type, task_num):
'''plot training vs validation loss'''
plt.plot(training_history.history['loss'], label='Training Loss')
plt.plot(training_history.history['val_loss'], label='Validation Loss')
plt.legend()
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title(f'{model_type} Training loss vs Evaluation loss for task {task_num}')
def plot_acc(training_history, model_type, task_num):
'''plot training vs validation accuracy'''
plt.plot(training_history.history['acc'], label='Training Accuracy')
plt.plot(training_history.history['val_acc'], label='Validation Accuracy')
plt.legend()
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title(f'{model_type} Training accuracy vs Evaluation accuracy for task {task_num}')
def plot_all_training_losses_rnn(rnn_hist):
'''plot rnn training losses'''
rnn_loss_epoch_fig = plt.figure().add_subplot(1,1,1)
tasks = ['Single Supporting Fact', 'Two Supporting Facts', 'Three Supporting Facts',
'Two Arg. Relations', 'Three Arg. Relations']
for i in range(5):
rnn_loss_epoch_fig.plot(rnn_hist[i].history['loss'], label=f'Task {i+1} - {tasks[i]}')
rnn_loss_epoch_fig.legend()
rnn_loss_epoch_fig.legend(bbox_to_anchor=(1, 1))
rnn_loss_epoch_fig.set_xlabel('Epoch')
rnn_loss_epoch_fig.set_ylabel('Loss')
rnn_loss_epoch_fig.set_title(f'Loss rate for RNN for tasks 1 - 5 with Adam')
def plot_all_training_acc_rnn(rnn_hist):
rnn_acc_fig = plt.figure().add_subplot(1,1,1)
tasks = ['Single Supporting Fact', 'Two Supporting Facts', 'Three Supporting Facts',
'Two Arg. Relations', 'Three Arg. Relations']
for i in range(5):
rnn_acc_fig.plot(rnn_hist[i].history['acc'], label=f'Task {i+1} - {tasks[i]}')
rnn_acc_fig.legend(bbox_to_anchor=(1, 1))
rnn_acc_fig.set_xlabel('Epoch')
rnn_acc_fig.set_ylabel('Accuracy')
rnn_acc_fig.set_title('Accuracy for RNN for tasks 1 - 5')
def plot_all_training_losses_lstm(lstm_hist):
'''plot all lstm training losses'''
lstm_loss_epoch_fig = plt.figure().add_subplot(1,1,1)
tasks = ['Single Supporting Fact', 'Two Supporting Facts', 'Three Supporting Facts',
'Two Arg. Relations', 'Three Arg. Relations']
for i in range(5):
lstm_loss_epoch_fig.plot(lstm_hist[i].history['loss'], label=f'Task {i+1} - {tasks[i]}')
lstm_loss_epoch_fig.legend(bbox_to_anchor=(1, 1))
lstm_loss_epoch_fig.set_xlabel('Epoch')
lstm_loss_epoch_fig.set_ylabel('Loss')
lstm_loss_epoch_fig.set_title('Loss rate for LSTM for tasks 1 - 5 with Adam')
def plot_all_training_acc_lstm(lstm_hist):
lstm_acc_fig = plt.figure().add_subplot(1,1,1)
tasks = ['Single Supporting Fact', 'Two Supporting Facts', 'Three Supporting Facts',
'Two Arg. Relations', 'Three Arg. Relations']
for i in range(5):
lstm_acc_fig.plot(lstm_hist[i].history['acc'], label=f'Task {i+1} - {tasks[i]}')
lstm_acc_fig.legend(bbox_to_anchor=(1, 1))
lstm_acc_fig.set_xlabel('Epoch')
lstm_acc_fig.set_ylabel('Accuracy')
lstm_acc_fig.set_title('Accuracy for LSTM for tasks 1 - 5')
def run_all(embedding_size,dropout_rate,rnn_learning_rate,lstm_learning_rate,rnn_epochs,lstm_epochs):
'''run all tasks and return history along with evaluations'''
all_rnn_history = []
all_lstm_history = []
all_rnn_eval_loss = []
all_lstm_eval_loss = []
all_rnn_eval_acc = []
all_lstm_eval_acc = []
print('Running all tasks')
print(f'Passed in parameters are the following EMBEDDING SIZE: {embedding_size}, DROPOUT RATE: {dropout_rate}',\
f'LEARNING RATE FOR RNN: {rnn_learning_rate}, LEARNING RATE FOR LSTM: {lstm_learning_rate},\
, RNN EPOCHS: {rnn_epochs}, LSTM EPOCHS: {lstm_epochs}\n\n')
print('Building models...')
for task_number in range(1,6):
print(f'Running RNN and LSTM for Task {task_number}\n\n')
# Text to raw
task_training_corpus = txt_to_raw(training_tasks_dict[task_number])
task_testing_corpus = txt_to_raw(training_tasks_dict[task_number])
# Set up parsed stories
training_data = parse_story(task_training_corpus)
testing_data = parse_story(task_testing_corpus)
# Get unique vocabulary
vocab = get_unique_vocab(training_tasks_dict[task_number],testing_tasks_dict[task_number])
# Get max lengths
vocab_maxlen = len(vocab) + 1
story_maxlen = max(map(len,[s for s,_,_ in training_data]))
question_maxlen = max(map(len,[q for _,q,_ in training_data]))
# Set up word indices
word_index = dict((c, i + 1) for i, c in enumerate(vocab))
index_words = [''] + list(vocab)
# Vectorize stories, questions and answers
vocab_maxlen = len(vocab) + 1
sentence_limit = story_maxlen
vocab_size = vocab_maxlen
story_training_input,question_training_input,answer_training_input = data_to_vector(training_data,word_index,
vocab_size,sentence_limit,
story_maxlen,
question_maxlen)
story_testing_input,question_testing_input,answer_testing_input = data_to_vector(testing_data,word_index,
vocab_size,sentence_limit,
story_maxlen,
question_maxlen)
# Zip up story, questions
sq_training_combined,sq_testing_combined,combined_maxlen = zip_sq(story_training_input,question_training_input,
story_testing_input,question_testing_input)
print('Building model, training and evaluating...\n\n')
# Run and plot RNN / LSTM
rnn_model = build_rnn(combined_maxlen=(combined_maxlen,),vocab_maxlen=vocab_maxlen,embedding_size=embedding_size,dropout_rate=dropout_rate,
learning_rate=rnn_learning_rate,task_num=task_number)
lstm_model = build_lstm(combined_maxlen=(combined_maxlen,),vocab_maxlen=vocab_maxlen,embedding_size=embedding_size,dropout_rate=dropout_rate,
learning_rate=lstm_learning_rate,task_num=task_number)
rnn_history, rnn_eval_loss, rnn_eval_acc = run_rnn(rnn_model=rnn_model,x=sq_training_combined,
y=answer_training_input,
testing_x=sq_testing_combined,
testing_y=answer_testing_input,
epochs=rnn_epochs,task_num=task_number)
lstm_history, lstm_eval_loss, lstm_eval_acc = run_lstm(lstm_model=lstm_model,x=sq_training_combined,
y=answer_training_input,testing_x=sq_testing_combined,
testing_y=answer_testing_input,
epochs=lstm_epochs,task_num=task_number)
# Make Predictions
print(f'\n\n RNN Model Predictions for task {task_number}\n')
rnn_predictions = predict_results(rnn_model, sq_testing_combined, answer_testing_input)
print(f'\n\n LSTM Model Predictions for task {task_number}\n')
lstm_predictions = predict_results(lstm_model, sq_testing_combined, answer_testing_input)
all_rnn_history.append(rnn_history)
all_lstm_history.append(lstm_history)
all_rnn_eval_loss.append(rnn_eval_loss)
all_rnn_eval_acc.append(rnn_eval_acc)
all_lstm_eval_loss.append(lstm_eval_loss)
all_lstm_eval_acc.append(lstm_eval_acc)
print(f'End build for task {task_number}')
return (all_rnn_history,all_lstm_history,
all_rnn_eval_loss,all_rnn_eval_acc,
all_lstm_eval_loss,all_lstm_eval_acc)
# All history for the model runs
all_history_evaluations = run_all(embedding_size=50,dropout_rate=0.10,rnn_learning_rate=0.0001,
lstm_learning_rate=0.001,rnn_epochs=20,lstm_epochs=30)
# Separated histories for RNN / LSTM and Evaluation Loss / Accuracy
rnn_hist,lstm_hist,rnn_eval_loss,rnn_eval_acc,lstm_eval_loss,lstm_eval_acc = all_history_evaluations
|
the-stack_0_1354 | import pandas as pd
import python_bitbankcc
import datetime
import numpy as np
import os
bitbank_pub = python_bitbankcc.public()
PATH = os.path.dirname(__file__)
def make_data(pair, start_day, end_day=None, return_window=12):
""""
:param pair: 通貨ペア
:param start_day: データ取得開始日(yyyymmdd)
:param end_day: データ取得終了日(yyyymmdd)
:param return_window: returnの計算幅
"""
str_pattern = "%Y%m%d"
col_names = ["open", "high", "low", "close", "vol", "timestamp"]
output_col_names = [
"open",
"high",
"low",
"close",
"vol",
"timestamp",
"VWAP",
"log_return",
"upper_shadow",
"lower_shadow",
]
# 実行日の時刻を00:00:00に調整
today_zero = datetime.datetime.today().strftime(str_pattern)
today_zero = datetime.datetime.strptime(today_zero, str_pattern)
# end_dayがデータ取得範囲外である場合に実行日に更新
if end_day is None:
end_day = today_zero
else:
end_day = datetime.datetime.strptime(end_day, str_pattern)
if end_day >= today_zero:
end_day = today_zero
# while条件用に日時型に変更
target_day = datetime.datetime.strptime(start_day, str_pattern)
# return_windowが日付をまたがないように調整
if return_window > 288:
return_window = 288
while target_day <= end_day:
# 取得対象前日のデータを取得
target_yesterday = target_day - datetime.timedelta(days=1)
target_yesterday_str = target_yesterday.strftime(str_pattern)
pre_candles = bitbank_pub.get_candlestick(pair, "5min", target_yesterday_str)["candlestick"][0]["ohlcv"]
df_pre_candles = pd.DataFrame(np.array(pre_candles, dtype=float), columns=col_names)
# 取得対象日のデータを取得
target_day_str = target_day.strftime(str_pattern)
candles = bitbank_pub.get_candlestick(pair, "5min", target_day_str)["candlestick"][0]["ohlcv"]
df_candles = pd.DataFrame(np.array(candles, dtype=float), columns=col_names)
# timestampを変換
df_output = pd.concat([df_pre_candles, df_candles])
df_output["timestamp"] = df_output["timestamp"] / 1000
df_output["timestamp"] = pd.to_datetime(df_output["timestamp"], unit="s")
# VWAPを計算
df_output["multiple"] = df_output["close"].multiply(df_output["vol"]).rolling(288).sum().values
df_output["vol_sum"] = df_output["vol"].rolling(288).sum().values
df_output["VWAP"] = df_output["multiple"] / df_output["vol_sum"]
# log return
# log(P(t)/P(t-window)) ~ P(t) / P(t-window) - 1
df_output["log_return"] = (df_output["close"] / df_output["close"].shift(periods=return_window)) - 1
# 出力用DataFrameから前日のデータを削除
df_output = df_output[df_output["timestamp"] >= target_day]
# ヒゲを取得
df_output["upper_shadow"] = upper_shadow(df_output)
df_output["lower_shadow"] = lower_shadow(df_output)
# 必要な列のみ抽出
df_output = df_output[output_col_names]
# データを出力
df_output.to_csv(PATH + "/data/" + target_day_str + ".csv")
# 取得対象日を更新
target_day += datetime.timedelta(days=1)
def upper_shadow(df):
"""
上ヒゲを取得
:param df: OHLC
:return: 上ヒゲ
"""
return df["high"] - np.maximum(df["close"], df["open"])
def lower_shadow(df):
"""
下ヒゲを取得
:param df: OHLC
:return: 下ヒゲ
"""
return np.minimum(df["close"], df["open"]) - df["low"]
if __name__ == "__main__":
# 動作テスト用
make_data("bat_jpy", "20220105", "20220106")
|
the-stack_0_1356 | from __future__ import division
import collections
from collections import OrderedDict
import copy
from datetime import datetime
import functools
import itertools
import json
import math
import threading
import time
import warnings
try:
from bson import json_util, SON
except ImportError:
json_utils = SON = None
try:
import execjs
except ImportError:
execjs = None
try:
from pymongo import ReturnDocument
except ImportError:
class ReturnDocument(object):
BEFORE = False
AFTER = True
from sentinels import NOTHING
from six import iteritems
from six import iterkeys
from six import itervalues
from six import MAXSIZE
from six.moves import xrange
from six import string_types
from six import text_type
from mongomock.command_cursor import CommandCursor
from mongomock import DuplicateKeyError
from mongomock.filtering import filter_applies
from mongomock.filtering import iter_key_candidates
from mongomock import helpers
from mongomock import InvalidOperation
from mongomock import ObjectId
from mongomock import OperationFailure
from mongomock.results import BulkWriteResult
from mongomock.results import DeleteResult
from mongomock.results import InsertManyResult
from mongomock.results import InsertOneResult
from mongomock.results import UpdateResult
from mongomock.write_concern import WriteConcern
from mongomock import WriteError
lock = threading.RLock()
def validate_is_mapping(option, value):
if not isinstance(value, collections.Mapping):
raise TypeError('%s must be an instance of dict, bson.son.SON, or '
'other type that inherits from '
'collections.Mapping' % (option,))
def validate_is_mutable_mapping(option, value):
if not isinstance(value, collections.MutableMapping):
raise TypeError('%s must be an instance of dict, bson.son.SON, or '
'other type that inherits from '
'collections.MutableMapping' % (option,))
def validate_ok_for_replace(replacement):
validate_is_mapping('replacement', replacement)
if replacement:
first = next(iter(replacement))
if first.startswith('$'):
raise ValueError('replacement can not include $ operators')
def validate_ok_for_update(update):
validate_is_mapping('update', update)
if not update:
raise ValueError('update only works with $ operators')
first = next(iter(update))
if not first.startswith('$'):
raise ValueError('update only works with $ operators')
def validate_write_concern_params(**params):
if params:
WriteConcern(**params)
def get_value_by_dot(doc, key):
"""Get dictionary value using dotted key"""
result = doc
for i in key.split('.'):
result = result[i]
return result
def set_value_by_dot(doc, key, value):
"""Set dictionary value using dotted key"""
result = doc
keys = key.split('.')
for i in keys[:-1]:
if i not in result:
result[i] = {}
result = result[i]
result[keys[-1]] = value
return doc
class BulkWriteOperation(object):
def __init__(self, builder, selector, is_upsert=False):
self.builder = builder
self.selector = selector
self.is_upsert = is_upsert
def upsert(self):
assert not self.is_upsert
return BulkWriteOperation(self.builder, self.selector, is_upsert=True)
def register_remove_op(self, multi):
collection = self.builder.collection
selector = self.selector
def exec_remove():
op_result = collection.remove(selector, multi=multi)
if op_result.get("ok"):
return {'nRemoved': op_result.get('n')}
err = op_result.get("err")
if err:
return {"writeErrors": [err]}
return {}
self.builder.executors.append(exec_remove)
def remove(self):
assert not self.is_upsert
self.register_remove_op(multi=True)
def remove_one(self,):
assert not self.is_upsert
self.register_remove_op(multi=False)
def register_update_op(self, document, multi, **extra_args):
if not extra_args.get("remove"):
validate_ok_for_update(document)
collection = self.builder.collection
selector = self.selector
def exec_update():
result = collection._update(spec=selector, document=document,
multi=multi, upsert=self.is_upsert,
**extra_args)
ret_val = {}
if result.get('upserted'):
ret_val["upserted"] = result.get('upserted')
ret_val["nUpserted"] = result.get('n')
modified = result.get('nModified')
if modified is not None:
ret_val['nModified'] = modified
ret_val['nMatched'] = modified
if result.get('err'):
ret_val['err'] = result.get('err')
return ret_val
self.builder.executors.append(exec_update)
def update(self, document):
self.register_update_op(document, multi=True)
def update_one(self, document):
self.register_update_op(document, multi=False)
def replace_one(self, document):
self.register_update_op(document, multi=False, remove=True)
class BulkOperationBuilder(object):
def __init__(self, collection, ordered=False):
self.collection = collection
self.ordered = ordered
self.results = {}
self.executors = []
self.done = False
self._insert_returns_nModified = True
self._update_returns_nModified = True
def find(self, selector):
return BulkWriteOperation(self, selector)
def insert(self, doc):
def exec_insert():
self.collection.insert(doc)
return {'nInserted': 1}
self.executors.append(exec_insert)
def __aggregate_operation_result(self, total_result, key, value):
agg_val = total_result.get(key)
assert agg_val is not None, "Unknow operation result %s=%s" \
" (unrecognized key)" % (key, value)
if isinstance(agg_val, int):
total_result[key] += value
elif isinstance(agg_val, list):
if key == "upserted":
new_element = {"index": len(agg_val), "_id": value}
agg_val.append(new_element)
else:
agg_val.append(value)
else:
assert False, "Fixme: missed aggreation rule for type: %s for" \
" key {%s=%s}" % (type(agg_val), key, agg_val)
def _set_nModified_policy(self, insert, update):
self._insert_returns_nModified = insert
self._update_returns_nModified = update
def execute(self, write_concern=None):
if not self.executors:
raise InvalidOperation("Bulk operation empty!")
if self.done:
raise InvalidOperation("Bulk operation already executed!")
self.done = True
result = {'nModified': 0, 'nUpserted': 0, 'nMatched': 0,
'writeErrors': [], 'upserted': [], 'writeConcernErrors': [],
'nRemoved': 0, 'nInserted': 0}
has_update = False
has_insert = False
broken_nModified_info = False
for execute_func in self.executors:
exec_name = execute_func.__name__
op_result = execute_func()
for (key, value) in op_result.items():
self.__aggregate_operation_result(result, key, value)
if exec_name == "exec_update":
has_update = True
if "nModified" not in op_result:
broken_nModified_info = True
has_insert |= exec_name == "exec_insert"
if broken_nModified_info:
result.pop('nModified')
elif has_insert and self._insert_returns_nModified:
pass
elif has_update and self._update_returns_nModified:
pass
elif self._update_returns_nModified and self._insert_returns_nModified:
pass
else:
result.pop('nModified')
return result
def add_insert(self, doc):
self.insert(doc)
def add_update(self, selector, doc, multi, upsert):
write_operation = BulkWriteOperation(self, selector, is_upsert=upsert)
write_operation.register_update_op(doc, multi)
def add_replace(self, selector, doc, upsert):
write_operation = BulkWriteOperation(self, selector, is_upsert=upsert)
write_operation.replace_one(doc)
def add_delete(self, selector, just_one):
write_operation = BulkWriteOperation(self, selector, is_upsert=False)
write_operation.register_remove_op(not just_one)
class Collection(object):
def __init__(self, db, name):
self.name = name
self.full_name = "{0}.{1}".format(db.name, name)
self.database = db
self._documents = OrderedDict()
self._uniques = []
def __repr__(self):
return "Collection({0}, '{1}')".format(self.database, self.name)
def __getitem__(self, name):
return self.database[self.name + '.' + name]
def __getattr__(self, name):
return self.__getitem__(name)
def initialize_unordered_bulk_op(self):
return BulkOperationBuilder(self, ordered=False)
def initialize_ordered_bulk_op(self):
return BulkOperationBuilder(self, ordered=True)
def insert(self, data, manipulate=True, check_keys=True,
continue_on_error=False, **kwargs):
warnings.warn("insert is deprecated. Use insert_one or insert_many "
"instead.", DeprecationWarning, stacklevel=2)
validate_write_concern_params(**kwargs)
return self._insert(data)
def insert_one(self, document):
validate_is_mutable_mapping('document', document)
return InsertOneResult(self._insert(document), acknowledged=True)
def insert_many(self, documents, ordered=True):
if not isinstance(documents, collections.Iterable) or not documents:
raise TypeError('documents must be a non-empty list')
for document in documents:
validate_is_mutable_mapping('document', document)
return InsertManyResult(self._insert(documents), acknowledged=True)
def _insert(self, data):
if isinstance(data, list):
return [self._insert(item) for item in data]
if not all(isinstance(k, string_types) for k in data):
raise ValueError("Document keys must be strings")
if '_id' not in data:
data['_id'] = ObjectId()
object_id = data['_id']
if isinstance(object_id, dict):
object_id = helpers.hashdict(object_id)
if object_id in self._documents:
raise DuplicateKeyError("Duplicate Key Error", 11000)
for unique in self._uniques:
find_kwargs = {}
for key, direction in unique:
find_kwargs[key] = data.get(key)
answer = self.find(find_kwargs)
if answer.count() > 0:
raise DuplicateKeyError("Duplicate Key Error", 11000)
with lock:
self._documents[object_id] = self._internalize_dict(data)
return data['_id']
def _internalize_dict(self, d):
return {k: copy.deepcopy(v) for k, v in iteritems(d)}
def _has_key(self, doc, key):
key_parts = key.split('.')
sub_doc = doc
for part in key_parts:
if part not in sub_doc:
return False
sub_doc = sub_doc[part]
return True
def _remove_key(self, doc, key):
key_parts = key.split('.')
sub_doc = doc
for part in key_parts[:-1]:
sub_doc = sub_doc[part]
del sub_doc[key_parts[-1]]
def update_one(self, filter, update, upsert=False):
validate_ok_for_update(update)
return UpdateResult(self._update(filter, update, upsert=upsert),
acknowledged=True)
def update_many(self, filter, update, upsert=False):
validate_ok_for_update(update)
return UpdateResult(self._update(filter, update, upsert=upsert,
multi=True),
acknowledged=True)
def replace_one(self, filter, replacement, upsert=False):
validate_ok_for_replace(replacement)
return UpdateResult(self._update(filter, replacement, upsert=upsert),
acknowledged=True)
def update(self, spec, document, upsert=False, manipulate=False,
multi=False, check_keys=False, **kwargs):
warnings.warn("update is deprecated. Use replace_one, update_one or "
"update_many instead.", DeprecationWarning, stacklevel=2)
return self._update(spec, document, upsert, manipulate, multi,
check_keys, **kwargs)
def _update(self, spec, document, upsert=False, manipulate=False,
multi=False, check_keys=False, **kwargs):
validate_is_mapping('spec', spec)
validate_is_mapping('document', document)
updated_existing = False
upserted_id = None
num_updated = 0
for existing_document in itertools.chain(self._iter_documents(spec), [None]):
# we need was_insert for the setOnInsert update operation
was_insert = False
# the sentinel document means we should do an upsert
if existing_document is None:
if not upsert or num_updated:
continue
_id = document.get('_id')
to_insert = dict(spec, _id=_id) if _id else spec
to_insert = self._expand_dots(to_insert)
upserted_id = self._insert(self._discard_operators(to_insert))
existing_document = self._documents[upserted_id]
was_insert = True
else:
updated_existing = True
num_updated += 1
first = True
subdocument = None
for k, v in iteritems(document):
if k in _updaters.keys():
updater = _updaters[k]
subdocument = self._update_document_fields_with_positional_awareness(
existing_document, v, spec, updater, subdocument)
elif k == '$setOnInsert':
if not was_insert:
continue
subdocument = self._update_document_fields_with_positional_awareness(
existing_document, v, spec, _set_updater, subdocument)
elif k == '$currentDate':
for value in itervalues(v):
if value == {'$type': 'timestamp'}:
raise NotImplementedError('timestamp is not supported so far')
subdocument = self._update_document_fields_with_positional_awareness(
existing_document, v, spec, _current_date_updater, subdocument)
elif k == '$addToSet':
for field, value in iteritems(v):
nested_field_list = field.rsplit('.')
if len(nested_field_list) == 1:
if field not in existing_document:
existing_document[field] = []
# document should be a list append to it
if isinstance(value, dict):
if '$each' in value:
# append the list to the field
existing_document[field] += [
obj for obj in list(value['$each'])
if obj not in existing_document[field]]
continue
if value not in existing_document[field]:
existing_document[field].append(value)
continue
# push to array in a nested attribute
else:
# create nested attributes if they do not exist
subdocument = existing_document
for field in nested_field_list[:-1]:
if field not in subdocument:
subdocument[field] = {}
subdocument = subdocument[field]
# we're pushing a list
push_results = []
if nested_field_list[-1] in subdocument:
# if the list exists, then use that list
push_results = subdocument[
nested_field_list[-1]]
if isinstance(value, dict) and '$each' in value:
push_results += [
obj for obj in list(value['$each'])
if obj not in push_results]
elif value not in push_results:
push_results.append(value)
subdocument[nested_field_list[-1]] = push_results
elif k == '$pull':
for field, value in iteritems(v):
nested_field_list = field.rsplit('.')
# nested fields includes a positional element
# need to find that element
if '$' in nested_field_list:
if not subdocument:
subdocument = self._get_subdocument(
existing_document, spec, nested_field_list)
# value should be a dictionary since we're pulling
pull_results = []
# and the last subdoc should be an array
for obj in subdocument[nested_field_list[-1]]:
if isinstance(obj, dict):
for pull_key, pull_value in iteritems(value):
if obj[pull_key] != pull_value:
pull_results.append(obj)
continue
if obj != value:
pull_results.append(obj)
# cannot write to doc directly as it doesn't save to
# existing_document
subdocument[nested_field_list[-1]] = pull_results
else:
arr = existing_document
for field in nested_field_list:
if field not in arr:
break
arr = arr[field]
if not isinstance(arr, list):
continue
if isinstance(value, dict):
for idx, obj in enumerate(arr):
if filter_applies(value, obj):
del arr[idx]
else:
for idx, obj in enumerate(arr):
if value == obj:
del arr[idx]
elif k == '$pullAll':
for field, value in iteritems(v):
nested_field_list = field.rsplit('.')
if len(nested_field_list) == 1:
if field in existing_document:
arr = existing_document[field]
existing_document[field] = [
obj for obj in arr if obj not in value]
continue
else:
subdocument = existing_document
for nested_field in nested_field_list[:-1]:
if nested_field not in subdocument:
break
subdocument = subdocument[nested_field]
if nested_field_list[-1] in subdocument:
arr = subdocument[nested_field_list[-1]]
subdocument[nested_field_list[-1]] = [
obj for obj in arr if obj not in value]
elif k == '$push':
for field, value in iteritems(v):
nested_field_list = field.rsplit('.')
if len(nested_field_list) == 1:
if field not in existing_document:
existing_document[field] = []
# document should be a list
# append to it
if isinstance(value, dict):
if '$each' in value:
# append the list to the field
existing_document[field] += list(value['$each'])
continue
existing_document[field].append(value)
continue
# nested fields includes a positional element
# need to find that element
elif '$' in nested_field_list:
if not subdocument:
subdocument = self._get_subdocument(
existing_document, spec, nested_field_list)
# we're pushing a list
push_results = []
if nested_field_list[-1] in subdocument:
# if the list exists, then use that list
push_results = subdocument[nested_field_list[-1]]
if isinstance(value, dict):
# check to see if we have the format
# { '$each': [] }
if '$each' in value:
push_results += list(value['$each'])
else:
push_results.append(value)
else:
push_results.append(value)
# cannot write to doc directly as it doesn't save to
# existing_document
subdocument[nested_field_list[-1]] = push_results
# push to array in a nested attribute
else:
# create nested attributes if they do not exist
subdocument = existing_document
for field in nested_field_list[:-1]:
if field not in subdocument:
subdocument[field] = {}
subdocument = subdocument[field]
# we're pushing a list
push_results = []
if nested_field_list[-1] in subdocument:
# if the list exists, then use that list
push_results = subdocument[nested_field_list[-1]]
if isinstance(value, dict) and '$each' in value:
push_results += list(value['$each'])
else:
push_results.append(value)
subdocument[nested_field_list[-1]] = push_results
else:
if first:
# replace entire document
for key in document.keys():
if key.startswith('$'):
# can't mix modifiers with non-modifiers in
# update
raise ValueError('field names cannot start with $ [{}]'.format(k))
_id = spec.get('_id', existing_document.get('_id'))
existing_document.clear()
if _id:
existing_document['_id'] = _id
existing_document.update(self._internalize_dict(document))
if existing_document['_id'] != _id:
raise OperationFailure(
"The _id field cannot be changed from {0} to {1}"
.format(existing_document['_id'], _id))
break
else:
# can't mix modifiers with non-modifiers in update
raise ValueError(
'Invalid modifier specified: {}'.format(k))
first = False
# if empty document comes
if len(document) == 0:
_id = spec.get('_id', existing_document.get('_id'))
existing_document.clear()
if _id:
existing_document['_id'] = _id
if not multi:
break
return {
text_type("connectionId"): self.database.client._id,
text_type("err"): None,
text_type("n"): num_updated,
text_type("nModified"): num_updated if updated_existing else 0,
text_type("ok"): 1,
text_type("upserted"): upserted_id,
text_type("updatedExisting"): updated_existing,
}
def _get_subdocument(self, existing_document, spec, nested_field_list):
"""This method retrieves the subdocument of the existing_document.nested_field_list.
It uses the spec to filter through the items. It will continue to grab nested documents
until it can go no further. It will then return the subdocument that was last saved.
'$' is the positional operator, so we use the $elemMatch in the spec to find the right
subdocument in the array.
"""
# current document in view
doc = existing_document
# previous document in view
subdocument = existing_document
# current spec in view
subspec = spec
# walk down the dictionary
for subfield in nested_field_list:
if subfield == '$':
# positional element should have the equivalent elemMatch in the
# query
subspec = subspec['$elemMatch']
for item in doc:
# iterate through
if filter_applies(subspec, item):
# found the matching item save the parent
subdocument = doc
# save the item
doc = item
break
continue
subdocument = doc
doc = doc[subfield]
if subfield not in subspec:
break
subspec = subspec[subfield]
return subdocument
def _expand_dots(self, doc):
expanded = {}
paths = {}
for k, v in iteritems(doc):
key_parts = k.split('.')
sub_doc = v
for i in reversed(range(1, len(key_parts))):
key = key_parts[i]
sub_doc = {key: sub_doc}
key = key_parts[0]
if key in expanded:
raise WriteError("cannot infer query fields to set, "
"both paths '%s' and '%s' are matched"
% (k, paths[key]))
paths[key] = k
expanded[key] = sub_doc
return expanded
def _discard_operators(self, doc):
# TODO(this looks a little too naive...)
return {k: v for k, v in iteritems(doc) if not k.startswith("$")}
def find(self, filter=None, projection=None, skip=0, limit=0,
no_cursor_timeout=False, cursor_type=None, sort=None,
allow_partial_results=False, oplog_replay=False, modifiers=None,
batch_size=0, manipulate=True):
spec = filter
if spec is None:
spec = {}
validate_is_mapping('filter', spec)
return Cursor(self, spec, sort, projection, skip, limit)
def _get_dataset(self, spec, sort, fields, as_class, skip):
dataset = (self._copy_only_fields(document, fields, as_class)
for document in self._iter_documents(spec))
if sort:
for sortKey, sortDirection in reversed(sort):
dataset = iter(sorted(
dataset, key=lambda x: _resolve_sort_key(sortKey, x),
reverse=sortDirection < 0))
for i in xrange(skip):
try:
next(dataset)
except StopIteration:
pass
return dataset
def _copy_field(self, obj, container):
if isinstance(obj, list):
new = []
for item in obj:
new.append(self._copy_field(item, container))
return new
if isinstance(obj, dict):
new = container()
for key, value in obj.items():
new[key] = self._copy_field(value, container)
return new
else:
return copy.copy(obj)
def _extract_projection_operators(self, fields):
"""Removes and returns fields with projection operators."""
result = {}
allowed_projection_operators = set(['$elemMatch'])
for key, value in iteritems(fields):
if isinstance(value, dict):
for op in value:
if op not in allowed_projection_operators:
raise ValueError('Unsupported projection option: {}'.format(op))
result[key] = value
for key in result:
del fields[key]
return result
def _apply_projection_operators(self, ops, doc, doc_copy):
"""Applies projection operators to copied document."""
for field, op in iteritems(ops):
if field not in doc_copy:
if field in doc:
# field was not copied yet (since we are in include mode)
doc_copy[field] = doc[field]
else:
# field doesn't exist in original document, no work to do
continue
if '$elemMatch' in op:
if isinstance(doc_copy[field], list):
# find the first item that matches
matched = False
for item in doc_copy[field]:
if filter_applies(op['$elemMatch'], item):
matched = True
doc_copy[field] = [item]
break
# nothing have matched
if not matched:
del doc_copy[field]
else:
# remove the field since there is nothing to iterate
del doc_copy[field]
def _copy_only_fields(self, doc, fields, container):
"""Copy only the specified fields."""
if fields is None:
return self._copy_field(doc, container)
else:
if not fields:
fields = {"_id": 1}
if not isinstance(fields, dict):
fields = helpers._fields_list_to_dict(fields)
# we can pass in something like {"_id":0, "field":1}, so pull the id
# value out and hang on to it until later
id_value = fields.pop('_id', 1)
# filter out fields with projection operators, we will take care of them later
projection_operators = self._extract_projection_operators(fields)
# other than the _id field, all fields must be either includes or
# excludes, this can evaluate to 0
if len(set(list(fields.values()))) > 1:
raise ValueError(
'You cannot currently mix including and excluding fields.')
# if we have novalues passed in, make a doc_copy based on the
# id_value
if len(list(fields.values())) == 0:
if id_value == 1:
doc_copy = container()
else:
doc_copy = self._copy_field(doc, container)
# if 1 was passed in as the field values, include those fields
elif list(fields.values())[0] == 1:
doc_copy = container()
for key in fields:
key_parts = key.split('.')
subdocument = doc
subdocument_copy = doc_copy
full_key_path_found = True
for key_part in key_parts[:-1]:
if key_part not in subdocument:
full_key_path_found = False
break
subdocument = subdocument[key_part]
subdocument_copy = subdocument_copy.setdefault(key_part, {})
if not full_key_path_found or key_parts[-1] not in subdocument:
continue
subdocument_copy[key_parts[-1]] = subdocument[key_parts[-1]]
# otherwise, exclude the fields passed in
else:
doc_copy = self._copy_field(doc, container)
for key in fields:
key_parts = key.split('.')
subdocument_copy = doc_copy
full_key_path_found = True
for key_part in key_parts[:-1]:
if key_part not in subdocument_copy:
full_key_path_found = False
break
subdocument_copy = subdocument_copy[key_part]
if not full_key_path_found or key_parts[-1] not in subdocument_copy:
continue
del subdocument_copy[key_parts[-1]]
# set the _id value if we requested it, otherwise remove it
if id_value == 0:
doc_copy.pop('_id', None)
else:
if '_id' in doc:
doc_copy['_id'] = doc['_id']
fields['_id'] = id_value # put _id back in fields
# time to apply the projection operators and put back their fields
self._apply_projection_operators(projection_operators, doc, doc_copy)
for field, op in iteritems(projection_operators):
fields[field] = op
return doc_copy
def _update_document_fields(self, doc, fields, updater):
"""Implements the $set behavior on an existing document"""
for k, v in iteritems(fields):
self._update_document_single_field(doc, k, v, updater)
def _update_document_fields_positional(self, doc, fields, spec, updater,
subdocument=None):
"""Implements the $set behavior on an existing document"""
for k, v in iteritems(fields):
if '$' in k:
field_name_parts = k.split('.')
if not subdocument:
current_doc = doc
subspec = spec
for part in field_name_parts[:-1]:
if part == '$':
subspec = subspec.get('$elemMatch', subspec)
for item in current_doc:
if filter_applies(subspec, item):
current_doc = item
break
continue
new_spec = {}
for el in subspec:
if el.startswith(part):
if len(el.split(".")) > 1:
new_spec[".".join(
el.split(".")[1:])] = subspec[el]
else:
new_spec = subspec[el]
subspec = new_spec
current_doc = current_doc[part]
subdocument = current_doc
if (field_name_parts[-1] == '$' and
isinstance(subdocument, list)):
for i, doc in enumerate(subdocument):
if filter_applies(subspec, doc):
subdocument[i] = v
break
continue
updater(subdocument, field_name_parts[-1], v)
continue
# otherwise, we handle it the standard way
self._update_document_single_field(doc, k, v, updater)
return subdocument
def _update_document_fields_with_positional_awareness(self, existing_document, v, spec,
updater, subdocument):
positional = any('$' in key for key in iterkeys(v))
if positional:
return self._update_document_fields_positional(
existing_document, v, spec, updater, subdocument)
self._update_document_fields(existing_document, v, updater)
return subdocument
def _update_document_single_field(self, doc, field_name, field_value, updater):
field_name_parts = field_name.split(".")
for part in field_name_parts[:-1]:
if isinstance(doc, list):
try:
if part == '$':
doc = doc[0]
else:
doc = doc[int(part)]
continue
except ValueError:
pass
elif isinstance(doc, dict):
doc = doc.setdefault(part, {})
else:
return
field_name = field_name_parts[-1]
if isinstance(doc, list):
try:
doc[int(field_name)] = field_value
except IndexError:
pass
else:
updater(doc, field_name, field_value)
def _iter_documents(self, filter=None):
return (document for document in list(itervalues(self._documents))
if filter_applies(filter, document))
def find_one(self, filter=None, *args, **kwargs):
# Allow calling find_one with a non-dict argument that gets used as
# the id for the query.
if filter is None:
filter = {}
if not isinstance(filter, collections.Mapping):
filter = {'_id': filter}
try:
return next(self.find(filter, *args, **kwargs))
except StopIteration:
return None
def find_one_and_delete(self, filter, projection=None, sort=None, **kwargs):
kwargs['remove'] = True
validate_is_mapping('filter', filter)
return self._find_and_modify(filter, projection, sort=sort, **kwargs)
def find_one_and_replace(self, filter, replacement,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE, **kwargs):
validate_is_mapping('filter', filter)
validate_ok_for_replace(replacement)
return self._find_and_modify(filter, projection, replacement, upsert,
sort, return_document, **kwargs)
def find_one_and_update(self, filter, update,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE, **kwargs):
validate_is_mapping('filter', filter)
validate_ok_for_update(update)
return self._find_and_modify(filter, projection, update, upsert,
sort, return_document, **kwargs)
def find_and_modify(self, query={}, update=None, upsert=False, sort=None,
full_response=False, manipulate=False, **kwargs):
warnings.warn("find_and_modify is deprecated, use find_one_and_delete"
", find_one_and_replace, or find_one_and_update instead",
DeprecationWarning, stacklevel=2)
return self._find_and_modify(query, update=update, upsert=upsert,
sort=sort, **kwargs)
def _find_and_modify(self, query, projection=None, update=None,
upsert=False, sort=None,
return_document=ReturnDocument.BEFORE, **kwargs):
remove = kwargs.get("remove", False)
if kwargs.get("new", False) and remove:
# message from mongodb
raise OperationFailure("remove and returnNew can't co-exist")
if not (remove or update):
raise ValueError("Must either update or remove")
if remove and update:
raise ValueError("Can't do both update and remove")
old = self.find_one(query, projection=projection, sort=sort)
if not old and not upsert:
return
if old and '_id' in old:
query = {'_id': old['_id']}
if remove:
self.delete_one(query)
else:
self._update(query, update, upsert)
if return_document is ReturnDocument.AFTER or kwargs.get('new'):
return self.find_one(query, projection)
return old
def save(self, to_save, manipulate=True, check_keys=True, **kwargs):
warnings.warn("save is deprecated. Use insert_one or replace_one "
"instead", DeprecationWarning, stacklevel=2)
validate_is_mutable_mapping("to_save", to_save)
validate_write_concern_params(**kwargs)
if "_id" not in to_save:
return self.insert(to_save)
else:
self._update({"_id": to_save["_id"]}, to_save, True,
manipulate, check_keys=True, **kwargs)
return to_save.get("_id", None)
def delete_one(self, filter):
validate_is_mapping('filter', filter)
return DeleteResult(self._delete(filter), True)
def delete_many(self, filter):
validate_is_mapping('filter', filter)
return DeleteResult(self._delete(filter, multi=True), True)
def _delete(self, filter, multi=False):
if filter is None:
filter = {}
if not isinstance(filter, collections.Mapping):
filter = {'_id': filter}
to_delete = list(self.find(filter))
deleted_count = 0
for doc in to_delete:
doc_id = doc['_id']
if isinstance(doc_id, dict):
doc_id = helpers.hashdict(doc_id)
del self._documents[doc_id]
deleted_count += 1
if not multi:
break
return {
"connectionId": self.database.client._id,
"n": deleted_count,
"ok": 1.0,
"err": None,
}
def remove(self, spec_or_id=None, multi=True, **kwargs):
warnings.warn("remove is deprecated. Use delete_one or delete_many "
"instead.", DeprecationWarning, stacklevel=2)
validate_write_concern_params(**kwargs)
return self._delete(spec_or_id, multi=multi)
def count(self, filter=None, **kwargs):
if filter is None:
return len(self._documents)
else:
return self.find(filter).count()
def drop(self):
self.database.drop_collection(self.name)
def ensure_index(self, key_or_list, cache_for=300, **kwargs):
self.create_index(key_or_list, cache_for, **kwargs)
def create_index(self, key_or_list, cache_for=300, **kwargs):
if 'unique' in kwargs and kwargs['unique']:
self._uniques.append(helpers._index_list(key_or_list))
def drop_index(self, index_or_name):
pass
def index_information(self):
return {}
def map_reduce(self, map_func, reduce_func, out, full_response=False,
query=None, limit=0):
if execjs is None:
raise NotImplementedError(
"PyExecJS is required in order to run Map-Reduce. "
"Use 'pip install pyexecjs pymongo' to support Map-Reduce mock."
)
if limit == 0:
limit = None
start_time = time.clock()
out_collection = None
reduced_rows = None
full_dict = {
'counts': {
'input': 0,
'reduce': 0,
'emit': 0,
'output': 0},
'timeMillis': 0,
'ok': 1.0,
'result': None}
map_ctx = execjs.compile("""
function doMap(fnc, docList) {
var mappedDict = {};
function emit(key, val) {
if (key['$oid']) {
mapped_key = '$oid' + key['$oid'];
}
else {
mapped_key = key;
}
if(!mappedDict[mapped_key]) {
mappedDict[mapped_key] = [];
}
mappedDict[mapped_key].push(val);
}
mapper = eval('('+fnc+')');
var mappedList = new Array();
for(var i=0; i<docList.length; i++) {
var thisDoc = eval('('+docList[i]+')');
var mappedVal = (mapper).call(thisDoc);
}
return mappedDict;
}
""")
reduce_ctx = execjs.compile("""
function doReduce(fnc, docList) {
var reducedList = new Array();
reducer = eval('('+fnc+')');
for(var key in docList) {
var reducedVal = {'_id': key,
'value': reducer(key, docList[key])};
reducedList.push(reducedVal);
}
return reducedList;
}
""")
doc_list = [json.dumps(doc, default=json_util.default)
for doc in self.find(query)]
mapped_rows = map_ctx.call('doMap', map_func, doc_list)
reduced_rows = reduce_ctx.call('doReduce', reduce_func, mapped_rows)[:limit]
for reduced_row in reduced_rows:
if reduced_row['_id'].startswith('$oid'):
reduced_row['_id'] = ObjectId(reduced_row['_id'][4:])
reduced_rows = sorted(reduced_rows, key=lambda x: x['_id'])
if full_response:
full_dict['counts']['input'] = len(doc_list)
for key in mapped_rows.keys():
emit_count = len(mapped_rows[key])
full_dict['counts']['emit'] += emit_count
if emit_count > 1:
full_dict['counts']['reduce'] += 1
full_dict['counts']['output'] = len(reduced_rows)
if isinstance(out, (str, bytes)):
out_collection = getattr(self.database, out)
out_collection.drop()
out_collection.insert(reduced_rows)
ret_val = out_collection
full_dict['result'] = out
elif isinstance(out, SON) and out.get('replace') and out.get('db'):
# Must be of the format SON([('replace','results'),('db','outdb')])
out_db = getattr(self.database._client, out['db'])
out_collection = getattr(out_db, out['replace'])
out_collection.insert(reduced_rows)
ret_val = out_collection
full_dict['result'] = {'db': out['db'], 'collection': out['replace']}
elif isinstance(out, dict) and out.get('inline'):
ret_val = reduced_rows
full_dict['result'] = reduced_rows
else:
raise TypeError("'out' must be an instance of string, dict or bson.SON")
full_dict['timeMillis'] = int(round((time.clock() - start_time) * 1000))
if full_response:
ret_val = full_dict
return ret_val
def inline_map_reduce(self, map_func, reduce_func, full_response=False,
query=None, limit=0):
return self.map_reduce(
map_func, reduce_func, {'inline': 1}, full_response, query, limit)
def distinct(self, key, filter=None):
return self.find(filter).distinct(key)
def group(self, key, condition, initial, reduce, finalize=None):
if execjs is None:
raise NotImplementedError(
"PyExecJS is required in order to use group. "
"Use 'pip install pyexecjs pymongo' to support group mock."
)
reduce_ctx = execjs.compile("""
function doReduce(fnc, docList) {
reducer = eval('('+fnc+')');
for(var i=0, l=docList.length; i<l; i++) {
try {
reducedVal = reducer(docList[i-1], docList[i]);
}
catch (err) {
continue;
}
}
return docList[docList.length - 1];
}
""")
ret_array = []
doc_list_copy = []
ret_array_copy = []
reduced_val = {}
doc_list = [doc for doc in self.find(condition)]
for doc in doc_list:
doc_copy = copy.deepcopy(doc)
for k in doc:
if isinstance(doc[k], ObjectId):
doc_copy[k] = str(doc[k])
if k not in key and k not in reduce:
del doc_copy[k]
for initial_key in initial:
if initial_key in doc.keys():
pass
else:
doc_copy[initial_key] = initial[initial_key]
doc_list_copy.append(doc_copy)
doc_list = doc_list_copy
for k in key:
doc_list = sorted(doc_list, key=lambda x: _resolve_key(k, x))
for k in key:
if not isinstance(k, helpers.basestring):
raise TypeError(
"Keys must be a list of key names, "
"each an instance of %s" % helpers.basestring.__name__)
for k2, group in itertools.groupby(doc_list, lambda item: item[k]):
group_list = ([x for x in group])
reduced_val = reduce_ctx.call('doReduce', reduce, group_list)
ret_array.append(reduced_val)
for doc in ret_array:
doc_copy = copy.deepcopy(doc)
for k in doc:
if k not in key and k not in initial.keys():
del doc_copy[k]
ret_array_copy.append(doc_copy)
ret_array = ret_array_copy
return ret_array
def aggregate(self, pipeline, **kwargs):
pipeline_operators = [
'$project',
'$match',
'$redact',
'$limit',
'$skip',
'$unwind',
'$group',
'$sample'
'$sort',
'$geoNear',
'$lookup'
'$out',
'$indexStats']
group_operators = [
'$addToSet',
'$first',
'$last',
'$max',
'$min',
'$avg',
'$push',
'$sum',
'$stdDevPop',
'$stdDevSamp']
project_operators = [
'$max',
'$min',
'$avg',
'$sum',
'$stdDevPop',
'$stdDevSamp',
'$arrayElemAt'
]
boolean_operators = ['$and', '$or', '$not'] # noqa
set_operators = [ # noqa
'$setEquals',
'$setIntersection',
'$setDifference',
'$setUnion',
'$setIsSubset',
'$anyElementTrue',
'$allElementsTrue']
comparison_operators = [ # noqa
'$cmp',
'$eq',
'$gt',
'$gte',
'$lt',
'$lte',
'$ne']
arithmetic_operators = [ # noqa
'$abs',
'$add',
'$ceil',
'$divide',
'$exp',
'$floor',
'$ln',
'$log',
'$log10',
'$mod',
'$multiply',
'$pow',
'$sqrt',
'$subtract',
'$trunc']
string_operators = [ # noqa
'$concat',
'$strcasecmp',
'$substr',
'$toLower',
'$toUpper']
text_search_operators = ['$meta'] # noqa
array_operators = [ # noqa
'$arrayElemAt',
'$concatArrays',
'$filter',
'$isArray',
'$size',
'$slice']
projection_operators = ['$map', '$let', '$literal'] # noqa
date_operators = [ # noqa
'$dayOfYear',
'$dayOfMonth',
'$dayOfWeek',
'$year',
'$month',
'$week',
'$hour',
'$minute',
'$second',
'$millisecond',
'$dateToString']
def _handle_arithmetic_operator(operator, values, doc_dict):
if operator == '$abs':
return abs(_parse_expression(values, doc_dict))
elif operator == '$ceil':
return math.ceil(_parse_expression(values, doc_dict))
elif operator == '$divide':
assert len(values) == 2, 'divide must have only 2 items'
return _parse_expression(values[0], doc_dict) / _parse_expression(values[1],
doc_dict)
elif operator == '$exp':
return math.exp(_parse_expression(values, doc_dict))
elif operator == '$floor':
return math.floor(_parse_expression(values, doc_dict))
elif operator == '$ln':
return math.log(_parse_expression(values, doc_dict))
elif operator == '$log':
assert len(values) == 2, 'log must have only 2 items'
return math.log(_parse_expression(values[0], doc_dict),
_parse_expression(values[1], doc_dict))
elif operator == '$log10':
return math.log10(_parse_expression(values, doc_dict))
elif operator == '$mod':
assert len(values) == 2, 'mod must have only 2 items'
return math.fmod(_parse_expression(values[0], doc_dict),
_parse_expression(values[1], doc_dict))
elif operator == '$pow':
assert len(values) == 2, 'pow must have only 2 items'
return math.pow(_parse_expression(values[0], doc_dict),
_parse_expression(values[1], doc_dict))
elif operator == '$sqrt':
return math.sqrt(_parse_expression(values, doc_dict))
elif operator == '$subtract':
assert len(values) == 2, 'subtract must have only 2 items'
return _parse_expression(values[0], doc_dict) - _parse_expression(values[1],
doc_dict)
else:
raise NotImplementedError("Although '%s' is a valid aritmetic operator for the "
"aggregation pipeline, it is currently not implemented "
" in Mongomock." % operator)
def _handle_comparison_operator(operator, values, doc_dict):
assert len(values) == 2, 'Comparison requires two expressions'
if operator == '$eq':
return _parse_expression(values[0], doc_dict) == \
_parse_expression(values[1], doc_dict)
elif operator == '$gt':
return _parse_expression(values[0], doc_dict) > \
_parse_expression(values[1], doc_dict)
elif operator == '$gte':
return _parse_expression(values[0], doc_dict) >= \
_parse_expression(values[1], doc_dict)
elif operator == '$lt':
return _parse_expression(values[0], doc_dict) < \
_parse_expression(values[1], doc_dict)
elif operator == '$lte':
return _parse_expression(values[0], doc_dict) <= \
_parse_expression(values[1], doc_dict)
elif operator == '$ne':
return _parse_expression(values[0], doc_dict) != \
_parse_expression(values[1], doc_dict)
else:
raise NotImplementedError(
"Although '%s' is a valid comparison operator for the "
"aggregation pipeline, it is currently not implemented "
" in Mongomock." % operator)
def _handle_date_operator(operator, values, doc_dict):
out_value = _parse_expression(values, doc_dict)
if operator == '$dayOfYear':
return out_value.timetuple().tm_yday
elif operator == '$dayOfMonth':
return out_value.day
elif operator == '$dayOfWeek':
return out_value.isoweekday()
elif operator == '$year':
return out_value.year
elif operator == '$month':
return out_value.month
elif operator == '$week':
return out_value.isocalendar()[1]
elif operator == '$hour':
return out_value.hour
elif operator == '$minute':
return out_value.minute
elif operator == '$second':
return out_value.second
elif operator == '$millisecond':
return int(out_value.microsecond / 1000)
else:
raise NotImplementedError(
"Although '%s' is a valid date operator for the "
"aggregation pipeline, it is currently not implemented "
" in Mongomock." % operator)
def _handle_project_operator(operator, values, doc_dict):
if operator == '$min':
if len(values) > 2:
raise NotImplementedError("Although %d is a valid amount of elements in "
"aggregation pipeline, it is currently not "
" implemented in Mongomock" % len(values))
return min(_parse_expression(values[0], doc_dict),
_parse_expression(values[1], doc_dict))
elif operator == '$arrayElemAt':
key, index = values
array = _parse_basic_expression(key, doc_dict)
v = array[index]
return v
else:
raise NotImplementedError("Although '%s' is a valid project operator for the "
"aggregation pipeline, it is currently not implemented "
"in Mongomock." % operator)
def _parse_basic_expression(expression, doc_dict):
if isinstance(expression, str) and expression.startswith('$'):
get_value = helpers.embedded_item_getter(expression.replace('$', ''))
return get_value(doc_dict)
else:
return expression
def _parse_expression(expression, doc_dict):
if not isinstance(expression, dict):
return _parse_basic_expression(expression, doc_dict)
value_dict = {}
for k, v in iteritems(expression):
if k in arithmetic_operators:
return _handle_arithmetic_operator(k, v, doc_dict)
elif k in project_operators:
return _handle_project_operator(k, v, doc_dict)
elif k in comparison_operators:
return _handle_comparison_operator(k, v, doc_dict)
elif k in date_operators:
return _handle_date_operator(k, v, doc_dict)
else:
value_dict[k] = _parse_expression(v, doc_dict)
return value_dict
def _extend_collection(out_collection, field, expression):
field_exists = False
for doc in out_collection:
if field in doc:
field_exists = True
break
if not field_exists:
for doc in out_collection:
if isinstance(expression, str) and expression.startswith('$'):
try:
doc[field] = get_value_by_dot(doc, expression.lstrip('$'))
except KeyError:
pass
else:
# verify expression has operator as first
doc[field] = _parse_expression(expression.copy(), doc)
return out_collection
conditional_operators = ['$cond', '$ifNull'] # noqa
out_collection = [doc for doc in self.find()]
for stage in pipeline:
for k, v in iteritems(stage):
if k == '$match':
out_collection = [doc for doc in out_collection
if filter_applies(v, doc)]
elif k == '$group':
grouped_collection = []
_id = stage['$group']['_id']
if _id:
key_getter = functools.partial(_parse_expression, _id)
out_collection = sorted(out_collection, key=key_getter)
grouped = itertools.groupby(out_collection, key_getter)
else:
grouped = [(None, out_collection)]
for doc_id, group in grouped:
group_list = ([x for x in group])
doc_dict = {'_id': doc_id}
for field, value in iteritems(v):
if field == '_id':
continue
for operator, key in iteritems(value):
if operator in (
"$sum",
"$avg",
"$min",
"$max",
"$first",
"$last",
"$addToSet",
'$push'
):
key_getter = functools.partial(_parse_expression, key)
values = [key_getter(doc) for doc in group_list]
if operator == "$sum":
val_it = (val or 0 for val in values)
doc_dict[field] = sum(val_it)
elif operator == "$avg":
values = [val or 0 for val in values]
doc_dict[field] = sum(values) / max(len(values), 1)
elif operator == "$min":
val_it = (val or MAXSIZE for val in values)
doc_dict[field] = min(val_it)
elif operator == "$max":
val_it = (val or -MAXSIZE for val in values)
doc_dict[field] = max(val_it)
elif operator == "$first":
doc_dict[field] = values[0]
elif operator == "$last":
doc_dict[field] = values[-1]
elif operator == "$addToSet":
val_it = (val or None for val in values)
doc_dict[field] = set(val_it)
elif operator == '$push':
if field not in doc_dict:
doc_dict[field] = []
doc_dict[field].extend(values)
else:
if operator in group_operators:
raise NotImplementedError(
"Although %s is a valid group operator for the "
"aggregation pipeline, it is currently not implemented "
"in Mongomock." % operator)
else:
raise NotImplementedError(
"%s is not a valid group operator for the aggregation "
"pipeline. See http://docs.mongodb.org/manual/meta/"
"aggregation-quick-reference/ for a complete list of "
"valid operators." % operator)
grouped_collection.append(doc_dict)
out_collection = grouped_collection
elif k == '$sort':
sort_array = []
for x, y in v.items():
sort_array.append({x: y})
for sort_pair in reversed(sort_array):
for sortKey, sortDirection in sort_pair.items():
out_collection = sorted(
out_collection,
key=lambda x: _resolve_sort_key(sortKey, x),
reverse=sortDirection < 0)
elif k == '$skip':
out_collection = out_collection[v:]
elif k == '$limit':
out_collection = out_collection[:v]
elif k == '$unwind':
if not isinstance(v, helpers.basestring) or v[0] != '$':
raise ValueError(
"$unwind failed: exception: field path references must be prefixed "
"with a '$' '%s'" % v)
unwound_collection = []
for doc in out_collection:
array_value = get_value_by_dot(doc, v[1:])
if array_value in (None, []):
continue
elif not isinstance(array_value, list):
raise TypeError(
'$unwind must specify an array field, field: '
'"%s", value found: %s' % (v, array_value))
for field_item in array_value:
unwound_collection.append(copy.deepcopy(doc))
unwound_collection[-1] = set_value_by_dot(
unwound_collection[-1], v[1:], field_item)
out_collection = unwound_collection
elif k == '$project':
filter_list = ['_id']
for field, value in iteritems(v):
if field == '_id' and not value:
filter_list.remove('_id')
elif value:
filter_list.append(field)
out_collection = _extend_collection(out_collection, field, value)
out_collection = [{k: v for (k, v) in x.items() if k in filter_list}
for x in out_collection]
elif k == '$out':
# TODO(MetrodataTeam): should leave the origin collection unchanged
collection = self.database.get_collection(v)
if collection.count() > 0:
collection.drop()
collection.insert_many(out_collection)
else:
if k in pipeline_operators:
raise NotImplementedError(
"Although '%s' is a valid operator for the aggregation pipeline, it is "
"currently not implemented in Mongomock." % k)
else:
raise NotImplementedError(
"%s is not a valid operator for the aggregation pipeline. "
"See http://docs.mongodb.org/manual/meta/aggregation-quick-reference/ "
"for a complete list of valid operators." % k)
return CommandCursor(out_collection)
def with_options(
self, codec_options=None, read_preference=None, write_concern=None, read_concern=None):
return self
def rename(self, new_name, **kwargs):
self.database.rename_collection(self.name, new_name, **kwargs)
def bulk_write(self, operations):
bulk = BulkOperationBuilder(self)
for operation in operations:
operation._add_to_bulk(bulk)
return BulkWriteResult(bulk.execute(), True)
def _resolve_key(key, doc):
return next(iter(iter_key_candidates(key, doc)), NOTHING)
def _resolve_sort_key(key, doc):
value = _resolve_key(key, doc)
# see http://docs.mongodb.org/manual/reference/method/cursor.sort/#ascending-descending-sort
if value is NOTHING:
return 0, value
return 1, value
class Cursor(object):
def __init__(self, collection, spec=None, sort=None, projection=None, skip=0, limit=0):
super(Cursor, self).__init__()
self.collection = collection
self._spec = spec
self._sort = sort
self._projection = projection
self._skip = skip
self._factory = functools.partial(collection._get_dataset,
spec, sort, projection, dict, skip)
# pymongo limit defaults to 0, returning everything
self._limit = limit if limit != 0 else None
self.rewind()
def __iter__(self):
return self
def clone(self):
return Cursor(self.collection,
self._spec, self._sort, self._projection, self._skip, self._limit)
def __next__(self):
if self._skip and not self._skipped:
for i in range(self._skip):
next(self._dataset)
self._skipped = self._skip
if self._limit is not None and self._limit <= self._emitted:
raise StopIteration()
if self._limit is not None:
self._emitted += 1
return {k: copy.deepcopy(v) for k, v in iteritems(next(self._dataset))}
next = __next__
def rewind(self):
self._dataset = self._factory()
self._emitted = 0
self._skipped = 0
def sort(self, key_or_list, direction=None):
if direction is None:
direction = 1
if isinstance(key_or_list, (tuple, list)):
for sortKey, sortDirection in reversed(key_or_list):
self._dataset = iter(
sorted(
self._dataset,
key=lambda x: _resolve_sort_key(
sortKey,
x),
reverse=sortDirection < 0))
else:
self._dataset = iter(
sorted(self._dataset,
key=lambda x: _resolve_sort_key(key_or_list, x),
reverse=direction < 0))
return self
def count(self, with_limit_and_skip=False):
arr = [x for x in self._dataset]
count = len(arr)
if with_limit_and_skip:
if self._skip:
count -= self._skip
if self._limit and count > self._limit:
count = self._limit
self._dataset = iter(arr)
return count
def skip(self, count):
self._skip = count
return self
def limit(self, count):
self._limit = count if count != 0 else None
return self
def batch_size(self, count):
return self
def close(self):
pass
def distinct(self, key):
if not isinstance(key, helpers.basestring):
raise TypeError('cursor.distinct key must be a string')
unique = set()
unique_dict_vals = []
for x in iter(self._dataset):
value = _resolve_key(key, x)
if value == NOTHING:
continue
if isinstance(value, dict):
if any(dict_val == value for dict_val in unique_dict_vals):
continue
unique_dict_vals.append(value)
else:
unique.update(
value if isinstance(
value, (tuple, list)) else [value])
return list(unique) + unique_dict_vals
def __getitem__(self, index):
if isinstance(index, slice):
# Limit the cursor to the given slice
self._dataset = (x for x in list(self._dataset)[index])
return self
elif not isinstance(index, int):
raise TypeError("index '%s' cannot be applied to Cursor instances" % index)
elif index < 0:
raise IndexError('Cursor instances do not support negativeindices')
else:
arr = [x for x in self._dataset]
self._dataset = iter(arr)
return arr[index]
def _set_updater(doc, field_name, value):
if isinstance(value, (tuple, list)):
value = copy.deepcopy(value)
if isinstance(doc, dict):
doc[field_name] = value
def _unset_updater(doc, field_name, value):
if isinstance(doc, dict):
doc.pop(field_name, None)
def _inc_updater(doc, field_name, value):
if isinstance(doc, dict):
doc[field_name] = doc.get(field_name, 0) + value
def _max_updater(doc, field_name, value):
if isinstance(doc, dict):
doc[field_name] = max(doc.get(field_name, value), value)
def _min_updater(doc, field_name, value):
if isinstance(doc, dict):
doc[field_name] = min(doc.get(field_name, value), value)
def _sum_updater(doc, field_name, current, result):
if isinstance(doc, dict):
result = current + doc.get[field_name, 0]
return result
def _current_date_updater(doc, field_name, value):
if isinstance(doc, dict):
doc[field_name] = datetime.utcnow()
_updaters = {
'$set': _set_updater,
'$unset': _unset_updater,
'$inc': _inc_updater,
'$max': _max_updater,
'$min': _min_updater,
}
|
the-stack_0_1358 | import random
director_name = ['ahpwnzl mabfhlu','augsiq qbtxmw','gyrpvldoewfuin tmsgrjahozqwun',
'grecfblmuqz jkebcmdwqao','ljuemdby gfdewsck','dsrhgtxzivcuoya rcxmdlofqkgvphs',]
client_enterprise_name = ['hjwpfxevdlmcqz jexytvcfnrglaq','wrvjudn paydvfc','xuanq hxzve',
'drhelmfupxnz ljscbumproni','ugodj zutjm','yjgwlnzpbk bhevflixys',
'fiovwl hnzray','lsyexrpwzicdfq ckndofjeuzgpbi']
salesman_name = ['gndxzrfyulie qvfykexcuopw','lykjcqwteznomhp vlfsegqxodabznt','napdbvwkfytxzho gtsxmdyehwjblvc',
'bhrfsxwvpnoz deuqazrgnkot','pybwg chxeg','podxevkisw wrosfeuzvc',
'wvpgnuym mqktwoib','swmdinbcohtarlk iylvfhorbucpjem','agtwu ykazf']
salesman_number = ['11049911','19647906','17444041',
'15938426','19707413','14263792',
'18402745','15744174','19246098']
salesman_phone = ['15794620485','13261138409','16613282797',
'14268410346','12808971977','15530011206',
'17120631428','12505751408','16568866838']
product_code = ['N9C7h0Jx','i5gCFe4k','zsVaSZrH',
'uPUofy6q','yeKWEuzc','kpfj5GlW',
'1f7TCp9K','FJnygAXG','qkjmafCR']
product_name = ['nuoaxkqgcvt cfhrtbemaix','mzftpibjwo nzsgeyloru','cftxq ivajf',
'mzqhnsfbpoevjdk gefqcwlxvpionut','qyhlungdz rfuleqwnd','bfoau xajml',
'mbyjo lownu','rlgoemuwfkdqjs evfxmalobyuhij','qndtxlmfjcgaupe ytiwvjzqamxopeg']
product_model = ['yhzqi yzhdt','sxvwgua iyjxnwf','vwlybetukzcqn cvyirmsnebqod',
'ijkvtenzscupxq lktjzofacgxwen','ryeohdupljb usfbaohzyvx','mdlycxjvkzrpqb cqeflukpznxtgy',
'uonhcjasbypvxwe uweajmrlihgotbx','czhsruvj hqxrgudl','xzkbjvrqgdlmco ifqtgmzhuewkjc']
def generate_select_test_supply_center() -> list:
select_test = []
for i1 in range(10):
select_test.append('''
SELECT * FROM supply_center WHERE id = %d;
''' % random.randint(1, 93632))
for i in director_name:
select_test.append('''
SELECT * FROM supply_center WHERE director_name = '%s';
''' % i)
return select_test
def generate_select_test_client_enterprise() -> list:
select_test = []
for i1 in range(10):
select_test.append('''
SELECT * FROM client_enterprise WHERE id = %d;
''' % random.randint(1, 275302))
for i in client_enterprise_name:
select_test.append('''
SELECT * FROM client_enterprise WHERE name = '%s';
''' % i)
return select_test
def generate_select_test_salesman() -> list:
select_test = []
for i1 in range(10):
select_test.append('''
SELECT * FROM client WHERE id = %d;
''' % random.randint(1, 972749))
for i in salesman_name:
select_test.append('''
SELECT * FROM client WHERE name = '%s';
''' % i)
for i in salesman_number:
select_test.append('''
SELECT * FROM client WHERE number = '%s';
''' % i)
for i in salesman_phone:
select_test.append('''
SELECT * FROM client WHERE mobile_number = '%s';
''' % i)
return select_test
def generate_select_test_product() -> list:
select_test = []
for i1 in range(10):
select_test.append('''
SELECT * FROM product WHERE id = %d;
''' % random.randint(1, 962787))
for i in product_code:
select_test.append('''
SELECT * FROM product WHERE product_code = '%s';
''' % i)
for i in product_name:
select_test.append('''
SELECT * FROM product WHERE product_name = '%s';
''' % i)
return select_test
def generate_select_test_product_model() -> list:
select_test = []
for i1 in range(10):
select_test.append('''
SELECT * FROM product_model WHERE id = %d;
''' % random.randint(1, 3597940))
for i1 in range(10):
select_test.append('''
SELECT * FROM product_model WHERE product_id = %d;
''' % random.randint(1, 962787))
for i in product_model:
select_test.append('''
SELECT * FROM product_model WHERE product_model = '%s';
''' % i)
return select_test
def generate_select_test_contract() -> list:
select_test = []
for i1 in range(10):
select_test.append('''
SELECT * FROM contract WHERE id = %d;
''' % random.randint(1, 400000))
return select_test
def generate_select_test_contract_content() -> list:
select_test = []
for i1 in range(10):
select_test.append('''
SELECT * FROM contract_content WHERE id = %d;
''' % random.randint(1, 3597940))
return select_test
|
the-stack_0_1361 | from PySide2.QtWidgets import *
import sys
if __name__ == "__main__":
app = QApplication(sys.argv)
bt1 = QPushButton("bt1")
bt2 = QPushButton("bt2")
bt3 = QPushButton("bt3")
layout2 = QHBoxLayout()
layout2.addWidget(QPushButton("ok"))
layout2.addWidget(QPushButton("cancel"))
layout = QVBoxLayout()
layout.addWidget(bt1)
layout.addWidget(bt2)
layout.addStretch()
layout.addLayout(layout2)
widget = QWidget()
widget.setLayout(layout)
widget.show()
app.exec_()
|
the-stack_0_1362 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import re
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
def read(*parts):
# intentionally *not* adding an encoding option to open
# see here: https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
return open(path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='schedule',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=find_version('schedule', '__init__.py'),
#version='0.0.1',
description='batch scheduler',
long_description=long_description,
# The project's main homepage.
url='https://github.com/assethurajan/aws-batch-example',
# Author details
author='Sethu rajan',
author_email='[email protected]',
# Choose your license
license='Apache2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
# What does your project relate to?
keywords='Amazon Batch Jobs',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'boto3>=1.3.1'
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'schedule = schedule:main'
],
},
)
|
the-stack_0_1365 | #!/usr/bin/python
#-*- encoding: utf-8 -*-
"""
Copyright (c) 2015 @myuuuuun
Released under the MIT license.
"""
import math
import numpy as np
import functools
import matplotlib.pyplot as plt
import matplotlib.cm as cm
EPSIRON = 1.0e-8
# P0〜P_(length-1)までのルジャンドル多項式の, xにおける値の配列を返す
def legendre(x, length):
values = [1, x]
for i in range(2, length):
v = ((2*i-1)*x*values[i-1] - (i-1) * values[i-2]) / i
values.append(v)
return values
# P0〜P_(length-1)までのチェビシェフ多項式の, xにおける値の配列を返す
def chebyshev(x, length):
values = []
for i in range(length):
v = np.cos(i * np.arccos(x))
values.append(v)
return values
if __name__ == '__main__':
# 共通設定
length = 6
x_list = np.arange(-0.99, 1.00, 0.01)
f_matrix = np.zeros((length, 199), dtype=float)
# legendre
"""
for i, x in enumerate(x_list):
values = legendre(x, length)
for j in range(length):
f_matrix[j][i] = values[j]
fig, ax = plt.subplots()
plt.title("Legendre多項式")
plt.xlabel("x")
plt.ylabel("f")
plt.xlim(-1, 1)
for j in range(length):
plt.plot(x_list, f_matrix[j], color=cm.gist_earth(j/length), label='P{0}'.format(j))
plt.legend()
plt.show()
"""
"""
# chebyshev
for i, x in enumerate(x_list):
values = chebyshev(x, length)
for j in range(length):
f_matrix[j][i] = values[j]
fig, ax = plt.subplots()
plt.title("Chebyshev多項式")
plt.xlabel("x")
plt.ylabel("f")
plt.xlim(-1, 1)
for j in range(length):
plt.plot(x_list, f_matrix[j], color=cm.gist_earth(j/length), label='P{0}'.format(j))
plt.legend()
plt.show()
"""
|
the-stack_0_1366 | #!/usr/bin/env python3
print("Starting server...")
from ev3dev2.console import Console
Console("Lat15-Terminus12x6")
print("Importing modules (this may take a while)...")
import time
t1 = time.perf_counter()
import json
import os
import subprocess
import time
import traceback
from base64 import b64decode
from shutil import which
from socket import gethostname
from threading import Thread, Lock
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
from ev3dev2.led import Leds
from ev3dev2.motor import list_motors, Motor, MoveJoystick, OUTPUT_B, OUTPUT_C
from ev3dev2.sensor import list_sensors, Sensor
t2 = time.perf_counter()
print("Imported in", t2-t1)
# has auth is True if users should be logged in to access the server
HAS_AUTH = (os.path.exists(".htpasswd") # check that password file exists ...
and which("htpasswd") is not None) # ... and that program 'htpasswd' exists
class BasicAuthHandler(tornado.web.RequestHandler):
def prepare(self):
if HAS_AUTH:
def request_auth():
self.set_header("WWW-Authenticate", 'Basic realm="Connect to ' + gethostname() + '"')
self.set_status(401)
self.finish()
tornado.web.Finish()
auth = self.request.headers.get("Authorization")
if auth is None or not auth.startswith("Basic "):
return request_auth()
try:
decoded = b64decode(auth.split(maxsplit=1)[1])
except Exception:
return request_auth()
user, pwd = decoded.split(b":", 1)
try:
proc = subprocess.run(["htpasswd", "-i", "-v", ".htpasswd", user], timeout=1, input=pwd)
except subprocess.TimeoutExpired:
return request_auth()
if proc.returncode != 0:
return request_auth()
LEDS = Leds()
LEDS.all_off()
LEDS.reset()
move_joystick = None
motors = {}
old_joystick_left_port = None
old_joystick_right_port = None
old_motor_1_port = None
old_motor_2_port = None
class EV3InfoHandler(BasicAuthHandler, tornado.websocket.WebSocketHandler):
websockets = set()
websockets_lock = Lock()
def open(self):
with EV3InfoHandler.websockets_lock:
EV3InfoHandler.websockets.add(self)
self.write_message(get_info(set(), set(), True)[0])
self.write_message("next") # inform client that it is allowed to send a new message
def on_close(self):
with EV3InfoHandler.websockets_lock:
EV3InfoHandler.websockets.remove(self)
def on_message(self, messages):
global move_joystick
try:
print("got messages", messages)
for message in json.loads(messages):
type_ = message["type"]
if type_ == "rc-joystick":
if message["leftPort"] != old_joystick_left_port or message["rightPort"] != old_joystick_right_port:
move_joystick = MoveJoystick(message["leftPort"], message["rightPort"])
if message["x"] == 0 and message["y"] == 0:
move_joystick.off(brake=False)
else:
move_joystick.on(message["x"], message["y"], 1)
elif type_ == "rc-motor":
if message["port"] in motors:
motor = motors[message["port"]]
else:
motor = motors[message["port"]] = Motor(message["port"])
motor.on(message["speed"]*100)
elif type_ == "sensor":
port = message["port"]
attributes = message["attributes"]
device = Sensor(port)
for name, value in attributes.items():
setattr(device, name, value)
# send changes to other clients
EV3InfoHandler.send_to_all(json.dumps({port: attributes}), {self})
elif type_ == "motor":
port = message["port"]
attributes = message["attributes"]
device = Motor(port)
for name, value in attributes.items():
setattr(device, name, value)
# send changes to other clients
EV3InfoHandler.send_to_all(json.dumps({port: attributes}), {self})
elif type_ == "led":
port = message["port"]
attributes = message["attributes"]
led_group = port.split(":")[1].lower()
for color_name, brightness in attributes.items():
LEDS.leds[color_name + "_" + led_group].brightness_pct = float(brightness)
# send changes to other clients
EV3InfoHandler.send_to_all(json.dumps({port: attributes}), {self})
else:
raise ValueError("Unknown message type '" + type_ + "'")
except Exception:
traceback.print_exc()
self.send_to_all("next")
@classmethod
def send_to_all(cls, message, exclude_websockets=None):
with cls.websockets_lock:
for websocket in cls.websockets:
if not exclude_websockets or websocket not in exclude_websockets:
try:
websocket.write_message(message)
except Exception:
traceback.print_exc()
"""
Returns a string containing a JSON object which describes the current motor/sensor values in the following format:
{
"<address (e.g. "ev3-ports:in1")>": {
// for both sensors and motors:
"driver_name": "<driver name>",
"command": [<list of possible commands>],
// for sensors:
"values": "<current sensor values, separated by space (max. 8)>",
"mode": {
"selected": "<currently selected mode>],
"values": [<list of possible modes>]
},
// for motors:
"position": "<current motor position>",
"duty_cycle_sp": "<duty cycle setpoint>",
"polarity": "normal" or "inversed",
"position_sp": "position setpoint",
"speed_sp": "speed setpoint",
"ramp_up_sp": "ramp up setpoint",
"ramp_down_sp": "ramp down setpoint",
"stop_action": {
"selected": "<currently selected stop_action>",
"values": [<list of possible stop_actions>]
},
"time_sp": "time setpoint",
}
}
Parameters 'old_sensor_addressse' and 'old_motor_addresses' are sets of previously available adresses.
If an address was previously available, only "values" attribute (for sensors) or "position" attribute (for motors) is included.
This is because these are the only properties that change while the user views the page.
If 'all_info' is True, additional info is added that clients need when they connect for the first time: Currently, this is only LED brightnesses.
When a WebSocket first connects with the server, get_info(set(), set()) is called so that initially the client receives all attributes (see EV3InfoHandler.open).
get_info returns: (string containing JSON object, new sensor addresses (for use in the next call of get_info), new motor addresses (for use in the next call of get_info)).
"""
def get_info(old_sensor_addresses, old_motor_addresses, all_info=False):
info = {"disconnected_devices": []}
if all_info:
for group_name, leds in LEDS.led_groups.items():
info["led:" + group_name] = {led.desc.split("_")[0]: led.brightness_pct for led in leds}
sensor_addresses = set()
for sensor in list_sensors("*"):
try:
address = sensor.address
if address.count(":") > 1:
# addresses for i2c sensors end with ':i2c*', remove this
address = address[:address.index(":", address.index(":")+1)]
if address in old_sensor_addresses:
old_sensor_addresses.remove(address)
info[address] = {
"values": " ".join(str(sensor.value(i)) for i in range(sensor.num_values))
}
else:
info[address] = {
"driver_name": sensor.driver_name,
"mode": {
"values": sensor.modes,
"selected": sensor.mode
},
"command": sensor.commands,
"values": " ".join(str(sensor.value(i)) for i in range(sensor.num_values)),
#"decimals": sensor.decimals,
}
sensor_addresses.add(address)
except Exception:
traceback.print_exc()
info["disconnected_devices"].extend(old_sensor_addresses)
motor_addresses = set()
for motor in list_motors("*"):
try:
address = motor.address
if address in old_motor_addresses:
old_motor_addresses.remove(address)
info[address] = {
"position": motor.position
}
else:
info[address] = {
"driver_name": motor.driver_name,
"duty_cycle_sp": motor.duty_cycle_sp,
"polarity": motor.polarity,
"position": motor.position,
"position_sp": motor.position_sp,
"speed_sp": motor.speed_sp,
"ramp_up_sp": motor.ramp_up_sp,
"ramp_down_sp": motor.ramp_down_sp,
"stop_action": {
"values": motor.stop_actions,
"selected": motor.stop_action
},
"time_sp": motor.time_sp,
"command": motor.commands
}
motor_addresses.add(address)
except Exception:
traceback.print_exc()
info["disconnected_devices"].extend(old_motor_addresses)
content = json.dumps(info).encode("utf-8")
return content, sensor_addresses, motor_addresses
def send_info():
old_sensor_addresses = set()
old_motor_addresses = set()
while True:
if len(EV3InfoHandler.websockets) == 0:
print("Waiting for clients to connect...")
while len(EV3InfoHandler.websockets) == 0:
time.sleep(0.5)
print("Clients connected!")
content, old_sensor_addresses, old_motor_addresses = get_info(old_sensor_addresses, old_motor_addresses)
EV3InfoHandler.send_to_all(content)
time.sleep(0.1)
class StaticFiles(BasicAuthHandler, tornado.web.StaticFileHandler):
def set_extra_headers(self, path):
self.set_header("Cache-Control", "no-store, no-cache, must-revalidate, max-age=0")
if __name__ == "__main__":
tornado.options.define("port", default=8000, help="run on the given port", type=int)
tornado.options.parse_command_line()
static_files = os.path.join(os.path.dirname(__file__), "website")
app = tornado.web.Application([
(r"/ev3-info", EV3InfoHandler),
(r"/(.*)", StaticFiles, {"path": static_files, "default_filename": "index.html"})
],
static_path=os.path.join(os.path.dirname(__file__), "website")
)
app.listen(tornado.options.options.port)
print("Serving on port", tornado.options.options.port)
if HAS_AUTH:
print("Basic auth is required when connecting")
ioloop = tornado.ioloop.IOLoop.current()
Thread(target=ioloop.start).start()
Thread(target=send_info).start()
|
the-stack_0_1367 | import configparser
import os
config = configparser.ConfigParser()
config.read(['config.ini', os.path.expanduser('~/.thorconfig.ini')],
encoding='utf8')
base_url = str(os.environ.get('THOR_BASE_URL', config['thor']['base_url']))
auth_token = str(os.environ.get('THOR_AUTH_TOKEN', config['thor']['auth_token']))
|
the-stack_0_1368 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
import sys
import pdb
import torch.nn as nn
import torch.optim as optim
from models.archs.base_networks import *
from torchvision.transforms import *
#from layer import *
#from vgg19 import VGG19
def make_model(args, parent=False):
return Net()
class Get_gradient_nopadding(nn.Module):
def __init__(self):
super(Get_gradient_nopadding, self).__init__()
self.kernel_v = torch.from_numpy(np.array([[0, -1, 0],
[0, 0, 0],
[0, 1, 0]])).cuda().float()
self.kernel_h = torch.from_numpy(np.array([[0, 0, 0],
[-1, 0, 1],
[0, 0, 0]])).cuda().float()
self.kernel_h = self.kernel_h.unsqueeze(0).unsqueeze(0)
self.kernel_v = self.kernel_v.unsqueeze(0).unsqueeze(0)
self.gradient_h = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, padding=1, bias=False)
self.gradient_h.weight.data = self.kernel_h
self.gradient_h.weight.requires_grad = False
self.gradient_v = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, padding=1, bias=False)
self.gradient_v.weight.data = self.kernel_v
self.gradient_v.weight.requires_grad = False
def forward(self, x):
x_list = []
for i in range(x.shape[1]):
x_i = x[:, i]
x_i_v = self.gradient_v(x_i.unsqueeze(1))
x_i_h = self.gradient_h(x_i.unsqueeze(1))
x_i = torch.sqrt(torch.pow(x_i_v, 2) + torch.pow(x_i_h, 2) + 1e-6)
x_list.append(x_i)
x = torch.cat(x_list, dim = 1)
return x
class Laplacian:
def __init__(self):
self.weight = torch.from_numpy(np.array([
[[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]]],
[[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[8.,0.,0.],[0.,8.,0.],[0.,0.,8.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]]],
[[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]]]])).cuda().float()
self.frame = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, padding=1, bias=False)
self.frame.weight.data = self.weight
self.frame.weight.requires_grad = False
def __call__(self, x):
out = self.frame(x)
return out
class _Dense_Block(nn.Module):
def __init__(self, channel_in):
super(_Dense_Block, self).__init__()
self.relu = nn.PReLU()
self.conv1 = nn.Conv2d(in_channels=channel_in, out_channels=16, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(in_channels=32, out_channels=16, kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv2d(in_channels=48, out_channels=16, kernel_size=3, stride=1, padding=1)
self.conv5 = nn.Conv2d(in_channels=64, out_channels=16, kernel_size=3, stride=1, padding=1)
self.conv6 = nn.Conv2d(in_channels=80, out_channels=16, kernel_size=3, stride=1, padding=1)
self.conv7 = nn.Conv2d(in_channels=96, out_channels=16, kernel_size=3, stride=1, padding=1)
self.conv8 = nn.Conv2d(in_channels=112, out_channels=16, kernel_size=3, stride=1, padding=1)
def forward(self, x):
conv1 = self.relu(self.conv1(x))
conv2 = self.relu(self.conv2(conv1))
cout2_dense = self.relu(torch.cat([conv1, conv2], 1))
conv3 = self.relu(self.conv3(cout2_dense))
cout3_dense = self.relu(torch.cat([conv1, conv2, conv3], 1))
conv4 = self.relu(self.conv4(cout3_dense))
cout4_dense = self.relu(torch.cat([conv1, conv2, conv3, conv4], 1))
conv5 = self.relu(self.conv5(cout4_dense))
cout5_dense = self.relu(torch.cat([conv1, conv2, conv3, conv4, conv5], 1))
conv6 = self.relu(self.conv6(cout5_dense))
cout6_dense = self.relu(torch.cat([conv1, conv2, conv3, conv4, conv5, conv6], 1))
conv7 = self.relu(self.conv7(cout6_dense))
cout7_dense = self.relu(torch.cat([conv1, conv2, conv3, conv4, conv5, conv6, conv7], 1))
conv8 = self.relu(self.conv8(cout7_dense))
cout8_dense = self.relu(torch.cat([conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8], 1))
return cout8_dense
class EERSGAN(nn.Module):
def __init__(self,num_channels=3, base_filter=64, feat = 256, num_stages=10, scale_factor=4):
super(EERSGAN, self ).__init__()
self.scale = scale_factor
self.lrelu = nn.PReLU()
self.conv1 = nn.Conv2d(in_channels=num_channels, out_channels=128, kernel_size=3, stride=1, padding=1)
self.denseblock1 = self.make_layer(_Dense_Block, 128)
self.denseblock2 = self.make_layer(_Dense_Block, 256)
self.denseblock3 = self.make_layer(_Dense_Block, 384)
self.denseblock4 = self.make_layer(_Dense_Block, 512)
self.denseblock5 = self.make_layer(_Dense_Block, 640)
self.denseblock6 = self.make_layer(_Dense_Block, 768)
self.bottleneck = nn.Conv2d(in_channels=896, out_channels=256, kernel_size=1, stride=1, padding=0, bias=False)
self.ps = nn.PixelShuffle(self.scale)
out_dim = int(256/self.scale/self.scale)
self.reconstruction = nn.Conv2d(in_channels=out_dim, out_channels=3, kernel_size=3, stride=1, padding=1, bias=False)
self.Laplacian = Laplacian()
en = [nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1, bias=False),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1, bias=False),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=256, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2)]
self.en = nn.Sequential(*en)
self.denseblock_e1 = self.make_layer(_Dense_Block, 64)
self.denseblock_e2 = self.make_layer(_Dense_Block, 192)
self.denseblock_e3 = self.make_layer(_Dense_Block, 320)
self.bottleneck_2 = nn.Conv2d(in_channels=448, out_channels=64 , kernel_size=1, stride=1, padding=0, bias=False)
self.e8 = nn.Conv2d(in_channels=448, out_channels=256, kernel_size=3, stride=1, padding=1)
mask = [nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=2, padding=1, bias=False),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1, bias=False),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2)]
self.mask = nn.Sequential(*mask)
self.ps2 = nn.PixelShuffle(self.scale)
out_dim = int(256 / self.scale / self.scale)
self.reconstruction_2 = nn.Conv2d(in_channels=out_dim, out_channels=3, kernel_size=3, stride=1, padding=1,
bias=False)
self.get_g_nopadding = Get_gradient_nopadding()
self.laplacian = Laplacian()
#weight = torch.FloatTensor([
#[[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]]],
#[[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[8.,0.,0.],[0.,8.,0.],[0.,0.,8.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]]],
#[[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]]]])
#self.weight = nn.Parameter(data = weight,requires_grad = False).cuda()
def make_layer(self, block, channel_in):
layers = []
layers.append(block(channel_in))
return nn.Sequential(*layers)
def forward(self, x):
#pdb.set_trace()
bic = F.upsample(x, size=(int(x.shape[2]*self.scale), int(x.shape[3]*self.scale)), mode='bilinear')
out = self.lrelu(self.conv1(x))
out1 = self.denseblock1(out)
concat = torch.cat([out,out1], 1)
out = self.denseblock2(concat)
concat = torch.cat([concat,out], 1)
out = self.denseblock3(concat)
concat = torch.cat([concat,out], 1)
out = self.denseblock4(concat)
concat = torch.cat([concat,out], 1)
out = self.denseblock5(concat)
concat = torch.cat([concat,out], 1)
out = self.denseblock6(concat)
concat = torch.cat([concat,out], 1)
#out = self.denseblock7(concat)
#concat = torch.cat([concat,out], 1)
out = self.bottleneck(concat)
out = self.ps(out)
sr_base = self.reconstruction(out) + bic
x_fa = self.laplacian(sr_base)
#pdb.set_trace()
x_f = self.en(x_fa.cuda())
x_f2 = self.denseblock_e1(x_f)
concat = torch.cat([x_f,x_f2], 1)
x_f = self.denseblock_e2(concat)
concat = torch.cat([concat,x_f], 1)
x_f = self.denseblock_e3(concat)
concat = torch.cat([concat,x_f], 1)
x_f = self.lrelu(self.e8(concat))
x_mask = self.mask(self.get_g_nopadding(sr_base))
frame_mask = torch.sigmoid(x_mask)
x_frame = frame_mask * x_f +x_f
#x_frame = self.bottleneck_2(x_frame)
x_frame = self.ps2(x_frame)
x_frame = self.reconstruction_2(x_frame)
x_sr = x_frame + sr_base - x_fa
frame_e = x_frame - x_fa
return frame_e, sr_base, x_sr
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.net = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1),
nn.LeakyReLU(0.2),
nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2),
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2),
nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2),
nn.Conv2d(128, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2),
nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2),
nn.Conv2d(256, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2),
nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2),
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(512, 1024, kernel_size=1),
nn.LeakyReLU(0.2),
nn.Conv2d(1024, 1, kernel_size=1)
)
def forward(self, x):
batch_size = x.size(0)
return torch.sigmoid(self.net(x).view(batch_size))
if __name__ == '__main__':
model = Generator(4).cuda()
img = torch.rand(3,64,64)
#img = img.unsqueeze(0)
img = img.unsqueeze(0)
img=img.cuda()
out=model(img)
|
the-stack_0_1369 | # -*- coding: utf-8 -*-
'''
The module used to execute states in salt. A state is unlike a module
execution in that instead of just executing a command it ensure that a
certain state is present on the system.
The data sent to the state calls is as follows:
{ 'state': '<state module name>',
'fun': '<state function name>',
'name': '<the name argument passed to all states>'
'argn': '<arbitrary argument, can have many of these>'
}
'''
# Import python libs
from __future__ import absolute_import
import os
import sys
import copy
import site
import fnmatch
import logging
import datetime
import traceback
import re
# Import salt libs
import salt.utils
import salt.loader
import salt.minion
import salt.pillar
import salt.fileclient
import salt.utils.event
import salt.utils.url
import salt.syspaths as syspaths
from salt.utils import immutabletypes
from salt.template import compile_template, compile_template_str
from salt.exceptions import (
SaltException,
SaltInvocationError,
SaltRenderError,
SaltReqTimeoutError
)
from salt.utils.odict import OrderedDict, DefaultOrderedDict
from salt.utils.locales import sdecode
# Explicit late import to avoid circular import. DO NOT MOVE THIS.
import salt.utils.yamlloader as yamlloader
# Import third party libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
from salt.ext.six.moves import map, range
# pylint: enable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
# These are keywords passed to state module functions which are to be used
# by salt in this state module and not on the actual state module function
STATE_REQUISITE_KEYWORDS = frozenset([
'onchanges',
'onfail',
'prereq',
'prerequired',
'watch',
'require',
'listen',
])
STATE_REQUISITE_IN_KEYWORDS = frozenset([
'onchanges_in',
'onfail_in',
'prereq_in',
'watch_in',
'require_in',
'listen_in',
])
STATE_RUNTIME_KEYWORDS = frozenset([
'fun',
'state',
'check_cmd',
'failhard',
'onlyif',
'unless',
'order',
'prereq',
'prereq_in',
'prerequired',
'reload_modules',
'reload_grains',
'reload_pillar',
'fire_event',
'saltenv',
'use',
'use_in',
'__env__',
'__sls__',
'__id__',
'__pub_user',
'__pub_arg',
'__pub_jid',
'__pub_fun',
'__pub_tgt',
'__pub_ret',
'__pub_pid',
'__pub_tgt_type',
'__prereq__',
])
STATE_INTERNAL_KEYWORDS = STATE_REQUISITE_KEYWORDS.union(STATE_REQUISITE_IN_KEYWORDS).union(STATE_RUNTIME_KEYWORDS)
VALID_PILLAR_ENC = ('gpg',)
def _odict_hashable(self):
return id(self)
OrderedDict.__hash__ = _odict_hashable
def split_low_tag(tag):
'''
Take a low tag and split it back into the low dict that it came from
'''
state, id_, name, fun = tag.split('_|-')
return {'state': state,
'__id__': id_,
'name': name,
'fun': fun}
def _gen_tag(low):
'''
Generate the running dict tag string from the low data structure
'''
return '{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(low)
def _l_tag(name, id_):
low = {'name': 'listen_{0}'.format(name),
'__id__': 'listen_{0}'.format(id_),
'state': 'Listen_Error',
'fun': 'Listen_Error'}
return _gen_tag(low)
def trim_req(req):
'''
Trim any function off of a requisite
'''
reqfirst = next(iter(req))
if '.' in reqfirst:
return {reqfirst.split('.')[0]: req[reqfirst]}
return req
def state_args(id_, state, high):
'''
Return a set of the arguments passed to the named state
'''
args = set()
if id_ not in high:
return args
if state not in high[id_]:
return args
for item in high[id_][state]:
if not isinstance(item, dict):
continue
if len(item) != 1:
continue
args.add(next(iter(item)))
return args
def find_name(name, state, high):
'''
Scan high data for the id referencing the given name and return a list of (IDs, state) tuples that match
Note: if `state` is sls, then we are looking for all IDs that match the given SLS
'''
ext_id = []
if name in high:
ext_id.append((name, state))
# if we are requiring an entire SLS, then we need to add ourselves to everything in that SLS
elif state == 'sls':
for nid, item in high.iteritems():
if item['__sls__'] == name:
ext_id.append((nid, next(iter(item))))
# otherwise we are requiring a single state, lets find it
else:
# We need to scan for the name
for nid in high:
if state in high[nid]:
if isinstance(high[nid][state], list):
for arg in high[nid][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if arg[next(iter(arg))] == name:
ext_id.append((name, state))
return ext_id
def format_log(ret):
'''
Format the state into a log message
'''
msg = ''
if isinstance(ret, dict):
# Looks like the ret may be a valid state return
if 'changes' in ret:
# Yep, looks like a valid state return
chg = ret['changes']
if not chg:
if ret['comment']:
msg = ret['comment']
else:
msg = 'No changes made for {0[name]}'.format(ret)
elif isinstance(chg, dict):
if 'diff' in chg:
if isinstance(chg['diff'], six.string_types):
msg = 'File changed:\n{0}'.format(chg['diff'])
if all([isinstance(x, dict) for x in six.itervalues(chg)]):
if all([('old' in x and 'new' in x)
for x in six.itervalues(chg)]):
msg = 'Made the following changes:\n'
for pkg in chg:
old = chg[pkg]['old']
if not old and old not in (False, None):
old = 'absent'
new = chg[pkg]['new']
if not new and new not in (False, None):
new = 'absent'
msg += '\'{0}\' changed from \'{1}\' to ' \
'\'{2}\'\n'.format(pkg, old, new)
if not msg:
msg = str(ret['changes'])
if ret['result'] is True or ret['result'] is None:
log.info(msg)
else:
log.error(msg)
else:
# catch unhandled data
log.info(str(ret))
def master_compile(master_opts, minion_opts, grains, id_, saltenv):
'''
Compile the master side low state data, and build the hidden state file
'''
st_ = MasterHighState(master_opts, minion_opts, grains, id_, saltenv)
return st_.compile_highstate()
def ishashable(obj):
try:
hash(obj)
except TypeError:
return False
return True
class StateError(Exception):
'''
Custom exception class.
'''
pass
class Compiler(object):
'''
Class used to compile and manage the High Data structure
'''
def __init__(self, opts, renderers):
self.opts = opts
self.rend = renderers
def render_template(self, template, **kwargs):
'''
Enforce the states in a template
'''
high = compile_template(
template, self.rend, self.opts['renderer'], **kwargs)
if not high:
return high
return self.pad_funcs(high)
def pad_funcs(self, high):
'''
Turns dot delimited function refs into function strings
'''
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], six.string_types):
# Is this is a short state? It needs to be padded!
if '.' in high[name]:
comps = high[name].split('.')
if len(comps) >= 2:
# Merge the comps
comps[1] = '.'.join(comps[1:len(comps)])
high[name] = {
# '__sls__': template,
# '__env__': None,
comps[0]: [comps[1]]
}
continue
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith('_'):
continue
if not isinstance(high[name][key], list):
continue
if '.' in key:
comps = key.split('.')
if len(comps) >= 2:
# Merge the comps
comps[1] = '.'.join(comps[1:len(comps)])
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high
def verify_high(self, high):
'''
Verify that the high data is viable and follows the data structure
'''
errors = []
if not isinstance(high, dict):
errors.append('High data is not a dictionary and is invalid')
reqs = {}
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
if not isinstance(name, six.string_types):
errors.append(
'ID \'{0}\' in SLS \'{1}\' is not formed as a string, but '
'is a {2}'.format(
name,
body['__sls__'],
type(name).__name__
)
)
if not isinstance(body, dict):
err = ('The type {0} in {1} is not formatted as a dictionary'
.format(name, body))
errors.append(err)
continue
for state in body:
if state.startswith('__'):
continue
if not isinstance(body[state], list):
errors.append(
'State \'{0}\' in SLS \'{1}\' is not formed as a list'
.format(name, body['__sls__'])
)
else:
fun = 0
if '.' in state:
fun += 1
for arg in body[state]:
if isinstance(arg, six.string_types):
fun += 1
if ' ' in arg.strip():
errors.append(('The function "{0}" in state '
'"{1}" in SLS "{2}" has '
'whitespace, a function with whitespace is '
'not supported, perhaps this is an argument '
'that is missing a ":"').format(
arg,
name,
body['__sls__']))
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst in ('require', 'watch', 'prereq', 'onchanges'):
if not isinstance(arg[argfirst], list):
errors.append(('The {0}'
' statement in state \'{1}\' in SLS \'{2}\' '
'needs to be formed as a list').format(
argfirst,
name,
body['__sls__']
))
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = {'state': state}
for req in arg[argfirst]:
if not isinstance(req, dict):
err = ('Requisite declaration {0}'
' in SLS {1} is not formed as a'
' single key dictionary').format(
req,
body['__sls__'])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if '.' in req_key:
errors.append((
'Invalid requisite type \'{0}\' '
'in state \'{1}\', in SLS '
'\'{2}\'. Requisite types must '
'not contain dots, did you '
'mean \'{3}\'?'.format(
req_key,
name,
body['__sls__'],
req_key[:req_key.find('.')]
)
))
if not ishashable(req_val):
errors.append((
'Illegal requisite "{0}", '
'is SLS {1}\n'
).format(
str(req_val),
body['__sls__']))
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if reqs[req_val]['state'] == reqs[name][req_val]:
err = ('A recursive '
'requisite was found, SLS '
'"{0}" ID "{1}" ID "{2}"'
).format(
body['__sls__'],
name,
req_val
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(('Multiple dictionaries '
'defined in argument of state \'{0}\' in SLS'
' \'{1}\'').format(
name,
body['__sls__']))
if not fun:
if state == 'require' or state == 'watch':
continue
errors.append(('No function declared in state \'{0}\' in'
' SLS \'{1}\'').format(state, body['__sls__']))
elif fun > 1:
errors.append(
'Too many functions declared in state \'{0}\' in '
'SLS \'{1}\''.format(state, body['__sls__'])
)
return errors
def order_chunks(self, chunks):
'''
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
'''
cap = 1
for chunk in chunks:
if 'order' in chunk:
if not isinstance(chunk['order'], int):
continue
chunk_order = chunk['order']
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if 'order' not in chunk:
chunk['order'] = cap
continue
if not isinstance(chunk['order'], (int, float)):
if chunk['order'] == 'last':
chunk['order'] = cap + 1000000
else:
chunk['order'] = cap
if 'name_order' in chunk:
chunk['order'] = chunk['order'] + chunk.pop('name_order') / 10000.0
if chunk['order'] < 0:
chunk['order'] = cap + 1000000 + chunk['order']
chunk['name'] = sdecode(chunk['name'])
chunks.sort(key=lambda chunk: (chunk['order'], '{0[state]}{0[name]}{0[fun]}'.format(chunk)))
return chunks
def compile_high_data(self, high):
'''
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
'''
chunks = []
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
for state, run in six.iteritems(body):
funcs = set()
names = set()
if state.startswith('__'):
continue
chunk = {'state': state,
'name': name}
if '__sls__' in body:
chunk['__sls__'] = body['__sls__']
if '__env__' in body:
chunk['__env__'] = body['__env__']
chunk['__id__'] = name
for arg in run:
if isinstance(arg, six.string_types):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in six.iteritems(arg):
if key == 'names':
names.update(val)
continue
else:
chunk.update(arg)
if names:
name_order = 1
for entry in names:
live = copy.deepcopy(chunk)
if isinstance(entry, dict):
low_name = next(six.iterkeys(entry))
live['name'] = low_name
list(map(live.update, entry[low_name]))
else:
live['name'] = entry
live['name_order'] = name_order
name_order = name_order + 1
for fun in funcs:
live['fun'] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live['fun'] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def apply_exclude(self, high):
'''
Read in the __exclude__ list and remove all excluded objects from the
high data
'''
if '__exclude__' not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop('__exclude__')
for exc in exclude:
if isinstance(exc, str):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(six.iterkeys(exc))
if key == 'sls':
ex_sls.add(exc['sls'])
elif key == 'id':
ex_id.add(exc['id'])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associtaed ids
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
if body.get('__sls__', '') in ex_sls:
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
class State(object):
'''
Class used to execute salt states
'''
def __init__(self, opts, pillar=None, jid=None, pillar_enc=None, proxy=None):
if 'grains' not in opts:
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
self.proxy = proxy
self._pillar_override = pillar
if pillar_enc is not None:
try:
pillar_enc = pillar_enc.lower()
except AttributeError:
pillar_enc = str(pillar_enc).lower()
if pillar_enc not in VALID_PILLAR_ENC:
raise SaltInvocationError(
'Invalid pillar encryption type. Valid types are: {0}'
.format(', '.join(VALID_PILLAR_ENC))
)
self._pillar_enc = pillar_enc
self.opts['pillar'] = self._gather_pillar()
self.state_con = {}
self.load_modules(proxy=proxy)
self.active = set()
self.mod_init = set()
self.pre = {}
self.__run_num = 0
self.jid = jid
self.instance_id = str(id(self))
self.inject_globals = {}
def _decrypt_pillar_override(self):
'''
Decrypt CLI pillar overrides
'''
if not self._pillar_enc:
decrypt = None
else:
# Pillar data must be gathered before the modules are loaded, since
# it will be packed into each loaded function. Thus, we will not
# have access to the functions and must past an empty dict here.
decrypt = salt.loader.render(
self.opts,
{}).get(self._pillar_enc)
try:
return decrypt(self._pillar_override, translate_newlines=True)
except TypeError:
return self._pillar_override
def _gather_pillar(self):
'''
Whenever a state run starts, gather the pillar data fresh
'''
pillar = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillar=self._pillar_override,
pillarenv=self.opts.get('pillarenv')
)
ret = pillar.compile_pillar()
if self._pillar_override:
if isinstance(self._pillar_override, dict):
ret.update(self._decrypt_pillar_override())
else:
decrypted = yamlloader.load(
self._decrypt_pillar_override(),
Loader=yamlloader.SaltYamlSafeLoader
)
if not isinstance(decrypted, dict):
log.error(
'Decrypted pillar data did not render to a dictionary'
)
else:
ret.update(decrypted)
return ret
def _mod_init(self, low):
'''
Check the module initialization function, if this is the first run
of a state package that has a mod_init function, then execute the
mod_init function in the state module.
'''
# ensure that the module is loaded
try:
self.states['{0}.{1}'.format(low['state'], low['fun'])] # pylint: disable=W0106
except KeyError:
return
minit = '{0}.mod_init'.format(low['state'])
if low['state'] not in self.mod_init:
if minit in self.states._dict:
mret = self.states[minit](low)
if not mret:
return
self.mod_init.add(low['state'])
def _mod_aggregate(self, low, running, chunks):
'''
Execute the aggregation systems to runtime modify the low chunk
'''
agg_opt = self.functions['config.option']('state_aggregate')
if 'aggregate' in low:
agg_opt = low['aggregate']
if agg_opt is True:
agg_opt = [low['state']]
else:
return low
if low['state'] in agg_opt and not low.get('__agg__'):
agg_fun = '{0}.mod_aggregate'.format(low['state'])
if agg_fun in self.states:
try:
low = self.states[agg_fun](low, chunks, running)
low['__agg__'] = True
except TypeError:
log.error('Failed to execute aggregate for state {0}'.format(low['state']))
return low
def _run_check(self, low_data):
'''
Check that unless doesn't return 0, and that onlyif returns a 0.
'''
ret = {'result': False}
cmd_opts = {}
if 'shell' in self.opts['grains']:
cmd_opts['shell'] = self.opts['grains'].get('shell')
if 'onlyif' in low_data:
if not isinstance(low_data['onlyif'], list):
low_data_onlyif = [low_data['onlyif']]
else:
low_data_onlyif = low_data['onlyif']
for entry in low_data_onlyif:
cmd = self.functions['cmd.retcode'](
entry, ignore_retcode=True, python_shell=True, **cmd_opts)
log.debug('Last command return code: {0}'.format(cmd))
if cmd != 0 and ret['result'] is False:
ret.update({'comment': 'onlyif execution failed',
'skip_watch': True,
'result': True})
return ret
elif cmd == 0:
ret.update({'comment': 'onlyif execution succeeded', 'result': False})
return ret
if 'unless' in low_data:
if not isinstance(low_data['unless'], list):
low_data_unless = [low_data['unless']]
else:
low_data_unless = low_data['unless']
for entry in low_data_unless:
cmd = self.functions['cmd.retcode'](
entry, ignore_retcode=True, python_shell=True, **cmd_opts)
log.debug('Last command return code: {0}'.format(cmd))
if cmd == 0 and ret['result'] is False:
ret.update({'comment': 'unless execution succeeded',
'skip_watch': True,
'result': True})
elif cmd != 0:
ret.update({'comment': 'unless execution failed', 'result': False})
return ret
# No reason to stop, return ret
return ret
def _run_check_cmd(self, low_data):
'''
Alter the way a successful state run is determined
'''
ret = {'result': False}
cmd_opts = {}
if 'shell' in self.opts['grains']:
cmd_opts['shell'] = self.opts['grains'].get('shell')
for entry in low_data['check_cmd']:
cmd = self.functions['cmd.retcode'](
entry, ignore_retcode=True, python_shell=True, **cmd_opts)
log.debug('Last command return code: {0}'.format(cmd))
if cmd == 0 and ret['result'] is False:
ret.update({'comment': 'check_cmd determined the state succeeded', 'result': True})
elif cmd != 0:
ret.update({'comment': 'check_cmd determined the state failed', 'result': False})
return ret
return ret
def reset_run_num(self):
'''
Rest the run_num value to 0
'''
self.__run_num = 0
def load_modules(self, data=None, proxy=None):
'''
Load the modules into the state
'''
log.info('Loading fresh modules for state activity')
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, self.state_con,
utils=self.utils,
proxy=proxy)
if isinstance(data, dict):
if data.get('provider', False):
if isinstance(data['provider'], str):
providers = [{data['state']: data['provider']}]
elif isinstance(data['provider'], list):
providers = data['provider']
else:
providers = {}
for provider in providers:
for mod in provider:
funcs = salt.loader.raw_mod(self.opts,
provider[mod],
self.functions)
if funcs:
for func in funcs:
f_key = '{0}{1}'.format(
mod,
func[func.rindex('.'):]
)
self.functions[f_key] = funcs[func]
self.serializers = salt.loader.serializers(self.opts)
self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers)
self.rend = salt.loader.render(self.opts, self.functions, states=self.states)
def module_refresh(self):
'''
Refresh all the modules
'''
log.debug('Refreshing modules...')
if self.opts['grains'].get('os') != 'MacOS':
# In case a package has been installed into the current python
# process 'site-packages', the 'site' module needs to be reloaded in
# order for the newly installed package to be importable.
try:
reload(site)
except RuntimeError:
log.error('Error encountered during module reload. Modules were not reloaded.')
self.load_modules(proxy=self.proxy)
if not self.opts.get('local', False) and self.opts.get('multiprocessing', True):
self.functions['saltutil.refresh_modules']()
def check_refresh(self, data, ret):
'''
Check to see if the modules for this state instance need to be updated,
only update if the state is a file or a package and if it changed
something. If the file function is managed check to see if the file is a
possible module type, e.g. a python, pyx, or .so. Always refresh if the
function is recurse, since that can lay down anything.
'''
_reload_modules = False
if data.get('reload_grains', False):
log.debug('Refreshing grains...')
self.opts['grains'] = salt.loader.grains(self.opts)
_reload_modules = True
if data.get('reload_pillar', False):
log.debug('Refreshing pillar...')
self.opts['pillar'] = self._gather_pillar()
_reload_modules = True
if data.get('reload_modules', False) or _reload_modules:
# User explicitly requests a reload
self.module_refresh()
return
if not ret['changes']:
return
if data['state'] == 'file':
if data['fun'] == 'managed':
if data['name'].endswith(
('.py', '.pyx', '.pyo', '.pyc', '.so')):
self.module_refresh()
elif data['fun'] == 'recurse':
self.module_refresh()
elif data['fun'] == 'symlink':
if 'bin' in data['name']:
self.module_refresh()
elif data['state'] in ('pkg', 'ports'):
self.module_refresh()
def verify_ret(self, ret):
'''
Verify the state return data
'''
if not isinstance(ret, dict):
raise SaltException(
'Malformed state return, return must be a dict'
)
bad = []
for val in ['name', 'result', 'changes', 'comment']:
if val not in ret:
bad.append(val)
if bad:
raise SaltException(
('The following keys were not present in the state '
'return: {0}'
).format(','.join(bad)))
def verify_data(self, data):
'''
Verify the data, return an error statement if something is wrong
'''
errors = []
if 'state' not in data:
errors.append('Missing "state" data')
if 'fun' not in data:
errors.append('Missing "fun" data')
if 'name' not in data:
errors.append('Missing "name" data')
if data['name'] and not isinstance(data['name'], six.string_types):
errors.append(
'ID \'{0}\' in SLS \'{1}\' is not formed as a string, but is '
'a {2}'.format(
data['name'], data['__sls__'], type(data['name']).__name__)
)
if errors:
return errors
full = data['state'] + '.' + data['fun']
if full not in self.states:
if '__sls__' in data:
errors.append(
'State \'{0}\' was not found in SLS \'{1}\''.format(
full,
data['__sls__']
)
)
reason = self.states.missing_fun_string(full)
if reason:
errors.append('Reason: {0}'.format(reason))
else:
errors.append(
'Specified state \'{0}\' was not found'.format(
full
)
)
else:
# First verify that the parameters are met
aspec = salt.utils.args.get_function_argspec(self.states[full])
arglen = 0
deflen = 0
if isinstance(aspec.args, list):
arglen = len(aspec.args)
if isinstance(aspec.defaults, tuple):
deflen = len(aspec.defaults)
for ind in range(arglen - deflen):
if aspec.args[ind] not in data:
errors.append(
'Missing parameter {0} for state {1}'.format(
aspec.args[ind],
full
)
)
# If this chunk has a recursive require, then it will cause a
# recursive loop when executing, check for it
reqdec = ''
if 'require' in data:
reqdec = 'require'
if 'watch' in data:
# Check to see if the service has a mod_watch function, if it does
# not, then just require
# to just require extend the require statement with the contents
# of watch so that the mod_watch function is not called and the
# requisite capability is still used
if '{0}.mod_watch'.format(data['state']) not in self.states:
if 'require' in data:
data['require'].extend(data.pop('watch'))
else:
data['require'] = data.pop('watch')
reqdec = 'require'
else:
reqdec = 'watch'
if reqdec:
for req in data[reqdec]:
reqfirst = next(iter(req))
if data['state'] == reqfirst:
if (fnmatch.fnmatch(data['name'], req[reqfirst])
or fnmatch.fnmatch(data['__id__'], req[reqfirst])):
err = ('Recursive require detected in SLS {0} for'
' require {1} in ID {2}').format(
data['__sls__'],
req,
data['__id__'])
errors.append(err)
return errors
def verify_high(self, high):
'''
Verify that the high data is viable and follows the data structure
'''
errors = []
if not isinstance(high, dict):
errors.append('High data is not a dictionary and is invalid')
reqs = {}
for name, body in six.iteritems(high):
try:
if name.startswith('__'):
continue
except AttributeError:
pass
if not isinstance(name, six.string_types):
errors.append(
'ID \'{0}\' in SLS \'{1}\' is not formed as a string, but '
'is a {2}. It may need to be quoted.'.format(
name, body['__sls__'], type(name).__name__)
)
if not isinstance(body, dict):
err = ('The type {0} in {1} is not formatted as a dictionary'
.format(name, body))
errors.append(err)
continue
for state in body:
if state.startswith('__'):
continue
if body[state] is None:
errors.append(
'ID \'{0}\' in SLS \'{1}\' contains a short declaration '
'({2}) with a trailing colon. When not passing any '
'arguments to a state, the colon must be omitted.'
.format(name, body['__sls__'], state)
)
continue
if not isinstance(body[state], list):
errors.append(
'State \'{0}\' in SLS \'{1}\' is not formed as a list'
.format(name, body['__sls__'])
)
else:
fun = 0
if '.' in state:
fun += 1
for arg in body[state]:
if isinstance(arg, six.string_types):
fun += 1
if ' ' in arg.strip():
errors.append(('The function "{0}" in state '
'"{1}" in SLS "{2}" has '
'whitespace, a function with whitespace is '
'not supported, perhaps this is an argument '
'that is missing a ":"').format(
arg,
name,
body['__sls__']))
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst == 'names':
if not isinstance(arg[argfirst], list):
errors.append(
'The \'names\' argument in state '
'\'{0}\' in SLS \'{1}\' needs to be '
'formed as a list'
.format(name, body['__sls__'])
)
if argfirst in ('require', 'watch', 'prereq', 'onchanges'):
if not isinstance(arg[argfirst], list):
errors.append(
'The {0} statement in state \'{1}\' in '
'SLS \'{2}\' needs to be formed as a '
'list'.format(argfirst,
name,
body['__sls__'])
)
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = {'state': state}
for req in arg[argfirst]:
if not isinstance(req, dict):
err = ('Requisite declaration {0}'
' in SLS {1} is not formed as a'
' single key dictionary').format(
req,
body['__sls__'])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if '.' in req_key:
errors.append((
'Invalid requisite type \'{0}\' '
'in state \'{1}\', in SLS '
'\'{2}\'. Requisite types must '
'not contain dots, did you '
'mean \'{3}\'?'.format(
req_key,
name,
body['__sls__'],
req_key[:req_key.find('.')]
)
))
if not ishashable(req_val):
errors.append((
'Illegal requisite "{0}", '
'please check your syntax.\n'
).format(str(req_val)))
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if reqs[req_val]['state'] == reqs[name][req_val]:
err = ('A recursive '
'requisite was found, SLS '
'"{0}" ID "{1}" ID "{2}"'
).format(
body['__sls__'],
name,
req_val
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(
'Multiple dictionaries defined in '
'argument of state \'{0}\' in SLS \'{1}\''
.format(name, body['__sls__'])
)
if not fun:
if state == 'require' or state == 'watch':
continue
errors.append(
'No function declared in state \'{0}\' in SLS \'{1}\''
.format(state, body['__sls__'])
)
elif fun > 1:
errors.append(
'Too many functions declared in state \'{0}\' in '
'SLS \'{1}\''.format(state, body['__sls__'])
)
return errors
def verify_chunks(self, chunks):
'''
Verify the chunks in a list of low data structures
'''
err = []
for chunk in chunks:
err += self.verify_data(chunk)
return err
def order_chunks(self, chunks):
'''
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
'''
cap = 1
for chunk in chunks:
if 'order' in chunk:
if not isinstance(chunk['order'], int):
continue
chunk_order = chunk['order']
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if 'order' not in chunk:
chunk['order'] = cap
continue
if not isinstance(chunk['order'], (int, float)):
if chunk['order'] == 'last':
chunk['order'] = cap + 1000000
else:
chunk['order'] = cap
if 'name_order' in chunk:
chunk['order'] = chunk['order'] + chunk.pop('name_order') / 10000.0
if chunk['order'] < 0:
chunk['order'] = cap + 1000000 + chunk['order']
chunks.sort(key=lambda chunk: (chunk['order'], '{0[state]}{0[name]}{0[fun]}'.format(chunk)))
return chunks
def compile_high_data(self, high):
'''
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
'''
chunks = []
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
for state, run in six.iteritems(body):
funcs = set()
names = set()
if state.startswith('__'):
continue
chunk = {'state': state,
'name': name}
if '__sls__' in body:
chunk['__sls__'] = body['__sls__']
if '__env__' in body:
chunk['__env__'] = body['__env__']
chunk['__id__'] = name
for arg in run:
if isinstance(arg, six.string_types):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in six.iteritems(arg):
if key == 'names':
names.update(val)
elif key == 'state':
# Don't pass down a state override
continue
elif (key == 'name' and
not isinstance(val, six.string_types)):
# Invalid name, fall back to ID
chunk[key] = name
else:
chunk[key] = val
if names:
name_order = 1
for entry in names:
live = copy.deepcopy(chunk)
if isinstance(entry, dict):
low_name = next(six.iterkeys(entry))
live['name'] = low_name
list(map(live.update, entry[low_name]))
else:
live['name'] = entry
live['name_order'] = name_order
name_order = name_order + 1
for fun in funcs:
live['fun'] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live['fun'] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def reconcile_extend(self, high):
'''
Pull the extend data and add it to the respective high data
'''
errors = []
if '__extend__' not in high:
return high, errors
ext = high.pop('__extend__')
for ext_chunk in ext:
for name, body in six.iteritems(ext_chunk):
if name not in high:
state_type = next(
x for x in body if not x.startswith('__')
)
# Check for a matching 'name' override in high data
ids = find_name(name, state_type, high)
if len(ids) != 1:
errors.append(
'Cannot extend ID \'{0}\' in \'{1}:{2}\'. It is not '
'part of the high state.\n'
'This is likely due to a missing include statement '
'or an incorrectly typed ID.\nEnsure that a '
'state with an ID of \'{0}\' is available\nin '
'environment \'{1}\' and to SLS \'{2}\''.format(
name,
body.get('__env__', 'base'),
body.get('__sls__', 'base'))
)
continue
else:
name = ids[0][0]
for state, run in six.iteritems(body):
if state.startswith('__'):
continue
if state not in high[name]:
high[name][state] = run
continue
# high[name][state] is extended by run, both are lists
for arg in run:
update = False
for hind in range(len(high[name][state])):
if isinstance(arg, six.string_types) and isinstance(high[name][state][hind], six.string_types):
# replacing the function, replace the index
high[name][state].pop(hind)
high[name][state].insert(hind, arg)
update = True
continue
if isinstance(arg, dict) and isinstance(high[name][state][hind], dict):
# It is an option, make sure the options match
argfirst = next(iter(arg))
if argfirst == next(iter(high[name][state][hind])):
# If argfirst is a requisite then we must merge
# our requisite with that of the target state
if argfirst in STATE_REQUISITE_KEYWORDS:
high[name][state][hind][argfirst].extend(arg[argfirst])
# otherwise, its not a requisite and we are just extending (replacing)
else:
high[name][state][hind] = arg
update = True
if (argfirst == 'name' and
next(iter(high[name][state][hind])) == 'names'):
# If names are overwritten by name use the name
high[name][state][hind] = arg
if not update:
high[name][state].append(arg)
return high, errors
def apply_exclude(self, high):
'''
Read in the __exclude__ list and remove all excluded objects from the
high data
'''
if '__exclude__' not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop('__exclude__')
for exc in exclude:
if isinstance(exc, str):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(six.iterkeys(exc))
if key == 'sls':
ex_sls.add(exc['sls'])
elif key == 'id':
ex_id.add(exc['id'])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associated ids
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
sls = body.get('__sls__', '')
if not sls:
continue
for ex_ in ex_sls:
if fnmatch.fnmatch(sls, ex_):
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
def requisite_in(self, high):
'''
Extend the data reference with requisite_in arguments
'''
req_in = set([
'require_in',
'watch_in',
'onfail_in',
'onchanges_in',
'use',
'use_in',
'prereq',
'prereq_in',
])
req_in_all = req_in.union(
set([
'require',
'watch',
'onfail',
'onchanges',
]))
extend = {}
errors = []
for id_, body in six.iteritems(high):
if not isinstance(body, dict):
continue
for state, run in six.iteritems(body):
if state.startswith('__'):
continue
for arg in run:
if isinstance(arg, dict):
# It is not a function, verify that the arg is a
# requisite in statement
if len(arg) < 1:
# Empty arg dict
# How did we get this far?
continue
# Split out the components
key = next(iter(arg))
if key not in req_in:
continue
rkey = key.split('_')[0]
items = arg[key]
if isinstance(items, dict):
# Formatted as a single req_in
for _state, name in six.iteritems(items):
# Not a use requisite_in
found = False
if name not in extend:
extend[name] = {}
if '.' in _state:
errors.append((
'Invalid requisite in {0}: {1} for '
'{2}, in SLS \'{3}\'. Requisites must '
'not contain dots, did you mean \'{4}\'?'
.format(
rkey,
_state,
name,
body['__sls__'],
_state[:_state.find('.')]
)
))
_state = _state.split(".")[0]
if _state not in extend[name]:
extend[name][_state] = []
extend[name]['__env__'] = body['__env__']
extend[name]['__sls__'] = body['__sls__']
for ind in range(len(extend[name][_state])):
if next(iter(
extend[name][_state][ind])) == rkey:
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append(
{rkey: [{state: id_}]}
)
if isinstance(items, list):
# Formed as a list of requisite additions
for ind in items:
if not isinstance(ind, dict):
# Malformed req_in
continue
if len(ind) < 1:
continue
_state = next(iter(ind))
name = ind[_state]
if '.' in _state:
errors.append((
'Invalid requisite in {0}: {1} for '
'{2}, in SLS \'{3}\'. Requisites must '
'not contain dots, did you mean \'{4}\'?'
.format(
rkey,
_state,
name,
body['__sls__'],
_state[:_state.find('.')]
)
))
_state = _state.split(".")[0]
if key == 'prereq_in':
# Add prerequired to origin
if id_ not in extend:
extend[id_] = {}
if state not in extend[id_]:
extend[id_][state] = []
extend[id_][state].append(
{'prerequired': [{_state: name}]}
)
if key == 'prereq':
# Add prerequired to prereqs
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if ext_id not in extend:
extend[ext_id] = {}
if _req_state not in extend[ext_id]:
extend[ext_id][_req_state] = []
extend[ext_id][_req_state].append(
{'prerequired': [{state: id_}]}
)
continue
if key == 'use_in':
# Add the running states args to the
# use_in states
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if not ext_id:
continue
ext_args = state_args(ext_id, _state, high)
if ext_id not in extend:
extend[ext_id] = {}
if _req_state not in extend[ext_id]:
extend[ext_id][_req_state] = []
ignore_args = req_in_all.union(ext_args)
for arg in high[id_][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(six.iterkeys(arg)) == 'name':
continue
if next(six.iterkeys(arg)) == 'names':
continue
extend[ext_id][_req_state].append(arg)
continue
if key == 'use':
# Add the use state's args to the
# running state
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if not ext_id:
continue
loc_args = state_args(id_, state, high)
if id_ not in extend:
extend[id_] = {}
if state not in extend[id_]:
extend[id_][state] = []
ignore_args = req_in_all.union(loc_args)
for arg in high[ext_id][_req_state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(six.iterkeys(arg)) == 'name':
continue
if next(six.iterkeys(arg)) == 'names':
continue
extend[id_][state].append(arg)
continue
found = False
if name not in extend:
extend[name] = {}
if _state not in extend[name]:
extend[name][_state] = []
extend[name]['__env__'] = body['__env__']
extend[name]['__sls__'] = body['__sls__']
for ind in range(len(extend[name][_state])):
if next(iter(
extend[name][_state][ind])) == rkey:
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append(
{rkey: [{state: id_}]}
)
high['__extend__'] = []
for key, val in six.iteritems(extend):
high['__extend__'].append({key: val})
req_in_high, req_in_errors = self.reconcile_extend(high)
errors.extend(req_in_errors)
return req_in_high, errors
def call(self, low, chunks=None, running=None):
'''
Call a state directly with the low data structure, verify data
before processing.
'''
start_time = datetime.datetime.now()
log.info('Running state [{0}] at time {1}'.format(low['name'], start_time.time().isoformat()))
errors = self.verify_data(low)
if errors:
ret = {
'result': False,
'name': low['name'],
'changes': {},
'comment': '',
}
for err in errors:
ret['comment'] += '{0}\n'.format(err)
ret['__run_num__'] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
return ret
else:
ret = {'result': False, 'name': low['name'], 'changes': {}}
if not low.get('__prereq__'):
log.info(
'Executing state {0[state]}.{0[fun]} for {0[name]}'.format(
low
)
)
if 'provider' in low:
self.load_modules(low)
state_func_name = '{0[state]}.{0[fun]}'.format(low)
cdata = salt.utils.format_call(
self.states[state_func_name],
low,
initial_ret={'full': state_func_name},
expected_extra_kws=STATE_INTERNAL_KEYWORDS
)
inject_globals = {
# Pass a copy of the running dictionary, the low state chunks and
# the current state dictionaries.
# We pass deep copies here because we don't want any misbehaving
# state module to change these at runtime.
'__low__': immutabletypes.freeze(low),
'__running__': immutabletypes.freeze(running) if running else {},
'__instance_id__': self.instance_id,
'__lowstate__': immutabletypes.freeze(chunks) if chunks else {}
}
if self.inject_globals:
inject_globals.update(self.inject_globals)
if low.get('__prereq__'):
test = sys.modules[self.states[cdata['full']].__module__].__opts__['test']
sys.modules[self.states[cdata['full']].__module__].__opts__['test'] = True
try:
# Let's get a reference to the salt environment to use within this
# state call.
#
# If the state function accepts an 'env' keyword argument, it
# allows the state to be overridden(we look for that in cdata). If
# that's not found in cdata, we look for what we're being passed in
# the original data, namely, the special dunder __env__. If that's
# not found we default to 'base'
if ('unless' in low and '{0[state]}.mod_run_check'.format(low) not in self.states) or \
('onlyif' in low and '{0[state]}.mod_run_check'.format(low) not in self.states):
ret.update(self._run_check(low))
if 'saltenv' in low:
inject_globals['__env__'] = str(low['saltenv'])
elif isinstance(cdata['kwargs'].get('env', None), six.string_types):
# User is using a deprecated env setting which was parsed by
# format_call.
# We check for a string type since module functions which
# allow setting the OS environ also make use of the "env"
# keyword argument, which is not a string
inject_globals['__env__'] = str(cdata['kwargs']['env'])
elif '__env__' in low:
# The user is passing an alternative environment using __env__
# which is also not the appropriate choice, still, handle it
inject_globals['__env__'] = str(low['__env__'])
else:
# Let's use the default environment
inject_globals['__env__'] = 'base'
if 'result' not in ret or ret['result'] is False:
self.states.inject_globals = inject_globals
ret = self.states[cdata['full']](*cdata['args'],
**cdata['kwargs'])
self.states.inject_globals = {}
if 'check_cmd' in low and '{0[state]}.mod_run_check_cmd'.format(low) not in self.states:
ret.update(self._run_check_cmd(low))
self.verify_ret(ret)
except Exception:
trb = traceback.format_exc()
# There are a number of possibilities to not have the cdata
# populated with what we might have expected, so just be smart
# enough to not raise another KeyError as the name is easily
# guessable and fallback in all cases to present the real
# exception to the user
if len(cdata['args']) > 0:
name = cdata['args'][0]
elif 'name' in cdata['kwargs']:
name = cdata['kwargs']['name']
else:
name = low.get('name', low.get('__id__'))
ret = {
'result': False,
'name': name,
'changes': {},
'comment': 'An exception occurred in this state: {0}'.format(
trb)
}
finally:
if low.get('__prereq__'):
sys.modules[self.states[cdata['full']].__module__].__opts__[
'test'] = test
# If format_call got any warnings, let's show them to the user
if 'warnings' in cdata:
ret.setdefault('warnings', []).extend(cdata['warnings'])
if 'provider' in low:
self.load_modules()
if low.get('__prereq__'):
low['__prereq__'] = False
return ret
ret['__run_num__'] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
finish_time = datetime.datetime.now()
ret['start_time'] = start_time.time().isoformat()
delta = (finish_time - start_time)
# duration in milliseconds.microseconds
duration = (delta.seconds * 1000000 + delta.microseconds)/1000.0
ret['duration'] = duration
ret['__id__'] = low['__id__']
log.info('Completed state [{0}] at time {1} duration_in_ms={2}'.format(low['name'], finish_time.time().isoformat(), duration))
return ret
def call_chunks(self, chunks):
'''
Iterate over a list of chunks and call them, checking for requires.
'''
running = {}
for low in chunks:
if '__FAILHARD__' in running:
running.pop('__FAILHARD__')
return running
tag = _gen_tag(low)
if tag not in running:
running = self.call_chunk(low, running, chunks)
if self.check_failhard(low, running):
return running
self.active = set()
return running
def check_failhard(self, low, running):
'''
Check if the low data chunk should send a failhard signal
'''
tag = _gen_tag(low)
if (low.get('failhard', False) or self.opts['failhard']
and tag in running):
return not running[tag]['result']
return False
def check_requisite(self, low, running, chunks, pre=False):
'''
Look into the running data to check the status of all requisite
states
'''
present = False
# If mod_watch is not available make it a require
if 'watch' in low:
if '{0}.mod_watch'.format(low['state']) not in self.states:
if 'require' in low:
low['require'].extend(low.pop('watch'))
else:
low['require'] = low.pop('watch')
else:
present = True
if 'require' in low:
present = True
if 'prerequired' in low:
present = True
if 'prereq' in low:
present = True
if 'onfail' in low:
present = True
if 'onchanges' in low:
present = True
if not present:
return 'met', ()
reqs = {
'require': [],
'watch': [],
'prereq': [],
'onfail': [],
'onchanges': []}
if pre:
reqs['prerequired'] = []
for r_state in reqs:
if r_state in low and low[r_state] is not None:
for req in low[r_state]:
req = trim_req(req)
found = False
for chunk in chunks:
req_key = next(iter(req))
req_val = req[req_key]
if req_val is None:
continue
if req_key == 'sls':
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk['__sls__'], req_val):
found = True
reqs[r_state].append(chunk)
continue
if (fnmatch.fnmatch(chunk['name'], req_val) or
fnmatch.fnmatch(chunk['__id__'], req_val)):
if chunk['state'] == req_key:
found = True
reqs[r_state].append(chunk)
if not found:
return 'unmet', ()
fun_stats = set()
for r_state, chunks in six.iteritems(reqs):
if r_state == 'prereq':
run_dict = self.pre
else:
run_dict = running
for chunk in chunks:
tag = _gen_tag(chunk)
if tag not in run_dict:
fun_stats.add('unmet')
continue
if r_state == 'onfail':
if run_dict[tag]['result'] is True:
fun_stats.add('onfail')
continue
else:
if run_dict[tag]['result'] is False:
fun_stats.add('fail')
continue
if r_state == 'onchanges':
if not run_dict[tag]['changes']:
fun_stats.add('onchanges')
else:
fun_stats.add('onchangesmet')
continue
if r_state == 'watch' and run_dict[tag]['changes']:
fun_stats.add('change')
continue
if r_state == 'prereq' and run_dict[tag]['result'] is None:
fun_stats.add('premet')
if r_state == 'prereq' and not run_dict[tag]['result'] is None:
fun_stats.add('pre')
else:
fun_stats.add('met')
if 'unmet' in fun_stats:
status = 'unmet'
elif 'fail' in fun_stats:
status = 'fail'
elif 'pre' in fun_stats:
if 'premet' in fun_stats:
status = 'met'
else:
status = 'pre'
elif 'onfail' in fun_stats:
status = 'onfail'
elif 'onchanges' in fun_stats and 'onchangesmet' not in fun_stats:
status = 'onchanges'
elif 'change' in fun_stats:
status = 'change'
else:
status = 'met'
return status, reqs
def event(self, chunk_ret, length, fire_event=False):
'''
Fire an event on the master bus
If `fire_event` is set to True an event will be sent with the
chunk name in the tag and the chunk result in the event data.
If `fire_event` is set to a string such as `mystate/is/finished`,
an event will be sent with the string added to the tag and the chunk
result in the event data.
If the `state_events` is set to True in the config, then after the
chunk is evaluated an event will be set up to the master with the
results.
'''
if not self.opts.get('local') and (self.opts.get('state_events', True) or fire_event) and self.opts.get('master_uri'):
ret = {'ret': chunk_ret}
if fire_event is True:
tag = salt.utils.event.tagify(
[self.jid, self.opts['id'], str(chunk_ret['name'])], 'state_result'
)
elif isinstance(fire_event, six.string_types):
tag = salt.utils.event.tagify(
[self.jid, self.opts['id'], str(fire_event)], 'state_result'
)
else:
tag = salt.utils.event.tagify(
[self.jid, 'prog', self.opts['id'], str(chunk_ret['__run_num__'])], 'job'
)
ret['len'] = length
preload = {'jid': self.jid}
self.functions['event.fire_master'](ret, tag, preload=preload)
def call_chunk(self, low, running, chunks):
'''
Check if a chunk has any requires, execute the requires and then
the chunk
'''
low = self._mod_aggregate(low, running, chunks)
self._mod_init(low)
tag = _gen_tag(low)
if not low.get('prerequired'):
self.active.add(tag)
requisites = ['require', 'watch', 'prereq', 'onfail', 'onchanges']
if not low.get('__prereq__'):
requisites.append('prerequired')
status, reqs = self.check_requisite(low, running, chunks, True)
else:
status, reqs = self.check_requisite(low, running, chunks)
if status == 'unmet':
lost = {}
reqs = []
for requisite in requisites:
lost[requisite] = []
if requisite not in low:
continue
for req in low[requisite]:
req = trim_req(req)
found = False
req_key = next(iter(req))
req_val = req[req_key]
for chunk in chunks:
if req_val is None:
continue
if req_key == 'sls':
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk['__sls__'], req_val):
if requisite == 'prereq':
chunk['__prereq__'] = True
reqs.append(chunk)
found = True
continue
if (fnmatch.fnmatch(chunk['name'], req_val) or
fnmatch.fnmatch(chunk['__id__'], req_val)):
if chunk['state'] == req_key:
if requisite == 'prereq':
chunk['__prereq__'] = True
elif requisite == 'prerequired':
chunk['__prerequired__'] = True
reqs.append(chunk)
found = True
if not found:
lost[requisite].append(req)
if lost['require'] or lost['watch'] or lost['prereq'] or lost['onfail'] or lost['onchanges'] or lost.get('prerequired'):
comment = 'The following requisites were not found:\n'
for requisite, lreqs in six.iteritems(lost):
if not lreqs:
continue
comment += \
'{0}{1}:\n'.format(' ' * 19, requisite)
for lreq in lreqs:
req_key = next(iter(lreq))
req_val = lreq[req_key]
comment += \
'{0}{1}: {2}\n'.format(' ' * 23, req_key, req_val)
running[tag] = {'changes': {},
'result': False,
'comment': comment,
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
self.event(running[tag], len(chunks), fire_event=low.get('fire_event'))
return running
for chunk in reqs:
# Check to see if the chunk has been run, only run it if
# it has not been run already
ctag = _gen_tag(chunk)
if ctag not in running:
if ctag in self.active:
if chunk.get('__prerequired__'):
# Prereq recusive, run this chunk with prereq on
if tag not in self.pre:
low['__prereq__'] = True
self.pre[ctag] = self.call(low, chunks, running)
return running
else:
return running
elif ctag not in running:
log.error('Recursive requisite found')
running[tag] = {
'changes': {},
'result': False,
'comment': 'Recursive requisite found',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
self.event(running[tag], len(chunks), fire_event=low.get('fire_event'))
return running
running = self.call_chunk(chunk, running, chunks)
if self.check_failhard(chunk, running):
running['__FAILHARD__'] = True
return running
if low.get('__prereq__'):
status, reqs = self.check_requisite(low, running, chunks)
self.pre[tag] = self.call(low, chunks, running)
if not self.pre[tag]['changes'] and status == 'change':
self.pre[tag]['changes'] = {'watch': 'watch'}
self.pre[tag]['result'] = None
else:
running = self.call_chunk(low, running, chunks)
if self.check_failhard(chunk, running):
running['__FAILHARD__'] = True
return running
elif status == 'met':
if low.get('__prereq__'):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
elif status == 'fail':
# if the requisite that failed was due to a prereq on this low state
# show the normal error
if tag in self.pre:
running[tag] = self.pre[tag]
running[tag]['__run_num__'] = self.__run_num
running[tag]['__sls__'] = low['__sls__']
# otherwise the failure was due to a requisite down the chain
else:
# determine what the requisite failures where, and return
# a nice error message
failed_requisites = set()
# look at all requisite types for a failure
for req_lows in six.itervalues(reqs):
for req_low in req_lows:
req_tag = _gen_tag(req_low)
req_ret = self.pre.get(req_tag, running.get(req_tag))
# if there is no run output for the requisite it
# can't be the failure
if req_ret is None:
continue
# If the result was False (not None) it was a failure
if req_ret['result'] is False:
# use SLS.ID for the key-- so its easier to find
key = '{sls}.{_id}'.format(sls=req_low['__sls__'],
_id=req_low['__id__'])
failed_requisites.add(key)
_cmt = 'One or more requisite failed: {0}'.format(
', '.join(str(i) for i in failed_requisites)
)
running[tag] = {
'changes': {},
'result': False,
'comment': _cmt,
'__run_num__': self.__run_num,
'__sls__': low['__sls__']
}
self.__run_num += 1
elif status == 'change' and not low.get('__prereq__'):
ret = self.call(low, chunks, running)
if not ret['changes'] and not ret.get('skip_watch', False):
low = low.copy()
low['sfun'] = low['fun']
low['fun'] = 'mod_watch'
low['__reqs__'] = reqs
ret = self.call(low, chunks, running)
running[tag] = ret
elif status == 'pre':
pre_ret = {'changes': {},
'result': True,
'comment': 'No changes detected',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
running[tag] = pre_ret
self.pre[tag] = pre_ret
self.__run_num += 1
elif status == 'onfail':
running[tag] = {'changes': {},
'result': True,
'comment': 'State was not run because onfail req did not change',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
elif status == 'onchanges':
running[tag] = {'changes': {},
'result': True,
'comment': 'State was not run because none of the onchanges reqs changed',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
else:
if low.get('__prereq__'):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
if tag in running:
self.event(running[tag], len(chunks), fire_event=low.get('fire_event'))
return running
def call_listen(self, chunks, running):
'''
Find all of the listen routines and call the associated mod_watch runs
'''
listeners = []
crefs = {}
for chunk in chunks:
crefs[(chunk['state'], chunk['name'])] = chunk
crefs[(chunk['state'], chunk['__id__'])] = chunk
if 'listen' in chunk:
listeners.append({(chunk['state'], chunk['__id__']): chunk['listen']})
if 'listen_in' in chunk:
for l_in in chunk['listen_in']:
for key, val in six.iteritems(l_in):
listeners.append({(key, val): [{chunk['state']: chunk['__id__']}]})
mod_watchers = []
errors = {}
for l_dict in listeners:
for key, val in six.iteritems(l_dict):
for listen_to in val:
if not isinstance(listen_to, dict):
continue
for lkey, lval in six.iteritems(listen_to):
if (lkey, lval) not in crefs:
rerror = {_l_tag(lkey, lval):
{
'comment': 'Referenced state {0}: {1} does not exist'.format(lkey, lval),
'name': 'listen_{0}:{1}'.format(lkey, lval),
'result': False,
'changes': {}
}}
errors.update(rerror)
continue
to_tag = _gen_tag(crefs[(lkey, lval)])
if to_tag not in running:
continue
if running[to_tag]['changes']:
if key not in crefs:
rerror = {_l_tag(key[0], key[1]):
{'comment': 'Referenced state {0}: {1} does not exist'.format(key[0], key[1]),
'name': 'listen_{0}:{1}'.format(key[0], key[1]),
'result': False,
'changes': {}}}
errors.update(rerror)
continue
chunk = crefs[key]
low = chunk.copy()
low['sfun'] = chunk['fun']
low['fun'] = 'mod_watch'
low['__id__'] = 'listener_{0}'.format(low['__id__'])
for req in STATE_REQUISITE_KEYWORDS:
if req in low:
low.pop(req)
mod_watchers.append(low)
ret = self.call_chunks(mod_watchers)
running.update(ret)
for err in errors:
errors[err]['__run_num__'] = self.__run_num
self.__run_num += 1
running.update(errors)
return running
def call_high(self, high):
'''
Process a high data call and ensure the defined states.
'''
errors = []
# If there is extension data reconcile it
high, ext_errors = self.reconcile_extend(high)
errors += ext_errors
errors += self.verify_high(high)
if errors:
return errors
high, req_in_errors = self.requisite_in(high)
errors += req_in_errors
high = self.apply_exclude(high)
# Verify that the high data is structurally sound
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.compile_high_data(high)
# Check for any disabled states
disabled = {}
if 'state_runs_disabled' in self.opts['grains']:
for low in chunks[:]:
state_ = '{0}.{1}'.format(low['state'], low['fun'])
for pat in self.opts['grains']['state_runs_disabled']:
if fnmatch.fnmatch(state_, pat):
comment = (
'The state function "{0}" is currently disabled by "{1}", '
'to re-enable, run state.enable {1}.'
).format(
state_,
pat,
)
_tag = _gen_tag(low)
disabled[_tag] = {'changes': {},
'result': False,
'comment': comment,
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
chunks.remove(low)
break
# If there are extensions in the highstate, process them and update
# the low data chunks
if errors:
return errors
ret = dict(list(disabled.items()) + list(self.call_chunks(chunks).items()))
ret = self.call_listen(chunks, ret)
def _cleanup_accumulator_data():
accum_data_path = os.path.join(
salt.utils.get_accumulator_dir(self.opts['cachedir']),
self.instance_id
)
try:
os.remove(accum_data_path)
log.debug('Deleted accumulator data file {0}'.format(
accum_data_path)
)
except OSError:
log.debug('File {0} does not exist, no need to cleanup.'.format(
accum_data_path)
)
_cleanup_accumulator_data()
return ret
def render_template(self, high, template):
errors = []
if not high:
return high, errors
if not isinstance(high, dict):
errors.append(
'Template {0} does not render to a dictionary'.format(template)
)
return high, errors
invalid_items = ('include', 'exclude', 'extends')
for item in invalid_items:
if item in high:
errors.append(
'The \'{0}\' declaration found on \'{1}\' is invalid when '
'rendering single templates'.format(item, template)
)
return high, errors
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], six.string_types):
# Is this is a short state, it needs to be padded
if '.' in high[name]:
comps = high[name].split('.')
high[name] = {
# '__sls__': template,
# '__env__': None,
comps[0]: [comps[1]]
}
continue
errors.append(
'ID {0} in template {1} is not a dictionary'.format(
name, template
)
)
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith('_'):
continue
if high[name][key] is None:
errors.append(
'ID \'{0}\' in template {1} contains a short '
'declaration ({2}) with a trailing colon. When not '
'passing any arguments to a state, the colon must be '
'omitted.'.format(name, template, key)
)
continue
if not isinstance(high[name][key], list):
continue
if '.' in key:
comps = key.split('.')
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
'ID \'{0}\' in template \'{1}\' contains multiple '
'state declarations of the same type'
.format(name, template)
)
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high, errors
def call_template(self, template):
'''
Enforce the states in a template
'''
high = compile_template(
template, self.rend, self.opts['renderer'])
if not high:
return high
high, errors = self.render_template(high, template)
if errors:
return errors
return self.call_high(high)
def call_template_str(self, template):
'''
Enforce the states in a template, pass the template as a string
'''
high = compile_template_str(
template, self.rend, self.opts['renderer'])
if not high:
return high
high, errors = self.render_template(high, '<template-str>')
if errors:
return errors
return self.call_high(high)
class BaseHighState(object):
'''
The BaseHighState is an abstract base class that is the foundation of
running a highstate, extend it and add a self.state object of type State.
When extending this class, please note that ``self.client`` and
``self.matcher`` should be instantiated and handled.
'''
def __init__(self, opts):
self.opts = self.__gen_opts(opts)
self.iorder = 10000
self.avail = self.__gather_avail()
self.serial = salt.payload.Serial(self.opts)
self.building_highstate = {}
def __gather_avail(self):
'''
Gather the lists of available sls data from the master
'''
avail = {}
for saltenv in self._get_envs():
avail[saltenv] = self.client.list_states(saltenv)
return avail
def __gen_opts(self, opts):
'''
The options used by the High State object are derived from options
on the minion and the master, or just the minion if the high state
call is entirely local.
'''
# If the state is intended to be applied locally, then the local opts
# should have all of the needed data, otherwise overwrite the local
# data items with data from the master
if 'local_state' in opts:
if opts['local_state']:
return opts
mopts = self.client.master_opts()
if not isinstance(mopts, dict):
# An error happened on the master
opts['renderer'] = 'yaml_jinja'
opts['failhard'] = False
opts['state_top'] = salt.utils.url.create('top.sls')
opts['nodegroups'] = {}
opts['file_roots'] = {'base': [syspaths.BASE_FILE_ROOTS_DIR]}
else:
opts['renderer'] = mopts['renderer']
opts['failhard'] = mopts.get('failhard', False)
if mopts['state_top'].startswith('salt://'):
opts['state_top'] = mopts['state_top']
elif mopts['state_top'].startswith('/'):
opts['state_top'] = salt.utils.url.create(mopts['state_top'][1:])
else:
opts['state_top'] = salt.utils.url.create(mopts['state_top'])
opts['state_top_saltenv'] = mopts.get('state_top_saltenv', None)
opts['nodegroups'] = mopts.get('nodegroups', {})
opts['state_auto_order'] = mopts.get(
'state_auto_order',
opts['state_auto_order'])
opts['file_roots'] = mopts['file_roots']
opts['top_file_merging_strategy'] = mopts.get('top_file_merging_strategy',
opts.get('top_file_merging_strategy'))
opts['env_order'] = mopts.get('env_order', opts.get('env_order', []))
opts['default_top'] = mopts.get('default_top', opts.get('default_top'))
opts['state_events'] = mopts.get('state_events')
opts['state_aggregate'] = mopts.get('state_aggregate', opts.get('state_aggregate', False))
opts['jinja_lstrip_blocks'] = mopts.get('jinja_lstrip_blocks', False)
opts['jinja_trim_blocks'] = mopts.get('jinja_trim_blocks', False)
return opts
def _get_envs(self):
'''
Pull the file server environments out of the master options
'''
envs = ['base']
if 'file_roots' in self.opts:
envs.extend(list(self.opts['file_roots']))
client_envs = self.client.envs()
env_order = self.opts.get('env_order', [])
client_envs = self.client.envs()
if env_order and client_envs:
client_env_list = self.client.envs()
env_intersection = set(env_order).intersection(client_env_list)
final_list = []
for ord_env in env_order:
if ord_env in env_intersection:
final_list.append(ord_env)
return set(final_list)
elif env_order:
return set(env_order)
else:
for cenv in client_envs:
if cenv not in envs:
envs.append(cenv)
return set(envs)
def get_tops(self):
'''
Gather the top files
'''
tops = DefaultOrderedDict(list)
include = DefaultOrderedDict(list)
done = DefaultOrderedDict(list)
found = 0 # did we find any contents in the top files?
# Gather initial top files
if self.opts['top_file_merging_strategy'] == 'same' and \
not self.opts['environment']:
if not self.opts['default_top']:
raise SaltRenderError('Top file merge strategy set to same, but no default_top '
'configuration option was set')
self.opts['environment'] = self.opts['default_top']
if self.opts['environment']:
contents = self.client.cache_file(
self.opts['state_top'],
self.opts['environment']
)
if contents:
found = 1
tops[self.opts['environment']] = [
compile_template(
contents,
self.state.rend,
self.state.opts['renderer'],
saltenv=self.opts['environment']
)
]
elif self.opts['top_file_merging_strategy'] == 'merge':
found = 0
if self.opts.get('state_top_saltenv', False):
saltenv = self.opts['state_top_saltenv']
contents = self.client.cache_file(
self.opts['state_top'],
saltenv
)
if contents:
found = found + 1
else:
log.debug('No contents loaded for env: {0}'.format(saltenv))
tops[saltenv].append(
compile_template(
contents,
self.state.rend,
self.state.opts['renderer'],
saltenv=saltenv
)
)
else:
for saltenv in self._get_envs():
contents = self.client.cache_file(
self.opts['state_top'],
saltenv
)
if contents:
found = found + 1
else:
log.debug('No contents loaded for env: {0}'.format(saltenv))
tops[saltenv].append(
compile_template(
contents,
self.state.rend,
self.state.opts['renderer'],
saltenv=saltenv
)
)
if found > 1:
log.warning('Top file merge strategy set to \'merge\' and multiple top files found. '
'Top file merging order is undefined; '
'for better results use \'same\' option')
if found == 0:
log.error('No contents found in top file')
# Search initial top files for includes
for saltenv, ctops in six.iteritems(tops):
for ctop in ctops:
if 'include' not in ctop:
continue
for sls in ctop['include']:
include[saltenv].append(sls)
ctop.pop('include')
# Go through the includes and pull out the extra tops and add them
while include:
pops = []
for saltenv, states in six.iteritems(include):
pops.append(saltenv)
if not states:
continue
for sls_match in states:
for sls in fnmatch.filter(self.avail[saltenv], sls_match):
if sls in done[saltenv]:
continue
tops[saltenv].append(
compile_template(
self.client.get_state(
sls,
saltenv
).get('dest', False),
self.state.rend,
self.state.opts['renderer'],
saltenv=saltenv
)
)
done[saltenv].append(sls)
for saltenv in pops:
if saltenv in include:
include.pop(saltenv)
return tops
def merge_tops(self, tops):
'''
Cleanly merge the top files
'''
top = DefaultOrderedDict(OrderedDict)
for ctops in six.itervalues(tops):
for ctop in ctops:
for saltenv, targets in six.iteritems(ctop):
if saltenv == 'include':
continue
try:
for tgt in targets:
if tgt not in top[saltenv]:
top[saltenv][tgt] = ctop[saltenv][tgt]
continue
matches = []
states = set()
for comp in top[saltenv][tgt]:
if isinstance(comp, dict):
matches.append(comp)
if isinstance(comp, six.string_types):
states.add(comp)
top[saltenv][tgt] = matches
top[saltenv][tgt].extend(list(states))
except TypeError:
raise SaltRenderError('Unable to render top file. No targets found.')
return top
def verify_tops(self, tops):
'''
Verify the contents of the top file data
'''
errors = []
if not isinstance(tops, dict):
errors.append('Top data was not formed as a dict')
# No further checks will work, bail out
return errors
for saltenv, matches in six.iteritems(tops):
if saltenv == 'include':
continue
if not isinstance(saltenv, six.string_types):
errors.append(
'Environment {0} in top file is not formed as a '
'string'.format(saltenv)
)
if saltenv == '':
errors.append('Empty saltenv statement in top file')
if not isinstance(matches, dict):
errors.append(
'The top file matches for saltenv {0} are not '
'formatted as a dict'.format(saltenv)
)
for slsmods in six.itervalues(matches):
if not isinstance(slsmods, list):
errors.append('Malformed topfile (state declarations not '
'formed as a list)')
continue
for slsmod in slsmods:
if isinstance(slsmod, dict):
# This value is a match option
for val in six.itervalues(slsmod):
if not val:
errors.append(
'Improperly formatted top file matcher '
'in saltenv {0}: {1} file'.format(
slsmod,
val
)
)
elif isinstance(slsmod, six.string_types):
# This is a sls module
if not slsmod:
errors.append(
'Environment {0} contains an empty sls '
'index'.format(saltenv)
)
return errors
def get_top(self):
'''
Returns the high data derived from the top file
'''
try:
tops = self.get_tops()
except SaltRenderError as err:
log.error('Unable to render top file: ' + str(err.error))
return {}
return self.merge_tops(tops)
def top_matches(self, top):
'''
Search through the top high data for matches and return the states
that this minion needs to execute.
Returns:
{'saltenv': ['state1', 'state2', ...]}
'''
matches = {}
# pylint: disable=cell-var-from-loop
for saltenv, body in six.iteritems(top):
if self.opts['environment']:
if saltenv != self.opts['environment']:
continue
for match, data in six.iteritems(body):
def _filter_matches(_match, _data, _opts):
if isinstance(_data, six.string_types):
_data = [_data]
if self.matcher.confirm_top(
_match,
_data,
_opts
):
if saltenv not in matches:
matches[saltenv] = []
for item in _data:
if 'subfilter' in item:
_tmpdata = item.pop('subfilter')
for match, data in six.iteritems(_tmpdata):
_filter_matches(match, data, _opts)
if isinstance(item, six.string_types):
matches[saltenv].append(item)
_filter_matches(match, data, self.opts['nodegroups'])
ext_matches = self.client.ext_nodes()
for saltenv in ext_matches:
if saltenv in matches:
matches[saltenv] = list(
set(ext_matches[saltenv]).union(matches[saltenv]))
else:
matches[saltenv] = ext_matches[saltenv]
# pylint: enable=cell-var-from-loop
return matches
def load_dynamic(self, matches):
'''
If autoload_dynamic_modules is True then automatically load the
dynamic modules
'''
if not self.opts['autoload_dynamic_modules']:
return
if self.opts.get('local', False):
syncd = self.state.functions['saltutil.sync_all'](list(matches),
refresh=False)
else:
syncd = self.state.functions['saltutil.sync_all'](list(matches),
refresh=False)
if syncd['grains']:
self.opts['grains'] = salt.loader.grains(self.opts)
self.state.opts['pillar'] = self.state._gather_pillar()
self.state.module_refresh()
def render_state(self, sls, saltenv, mods, matches, local=False):
'''
Render a state file and retrieve all of the include states
'''
errors = []
if not local:
state_data = self.client.get_state(sls, saltenv)
fn_ = state_data.get('dest', False)
else:
fn_ = sls
if not os.path.isfile(fn_):
errors.append(
'Specified SLS {0} on local filesystem cannot '
'be found.'.format(sls)
)
if not fn_:
errors.append(
'Specified SLS {0} in saltenv {1} is not '
'available on the salt master or through a configured '
'fileserver'.format(sls, saltenv)
)
state = None
try:
state = compile_template(
fn_, self.state.rend, self.state.opts['renderer'], saltenv,
sls, rendered_sls=mods
)
except SaltRenderError as exc:
msg = 'Rendering SLS \'{0}:{1}\' failed: {2}'.format(
saltenv, sls, exc
)
log.critical(msg)
errors.append(msg)
except Exception as exc:
msg = 'Rendering SLS {0} failed, render error: {1}'.format(
sls, exc
)
log.critical(
msg,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
errors.append('{0}\n{1}'.format(msg, traceback.format_exc()))
try:
mods.add('{0}:{1}'.format(saltenv, sls))
except AttributeError:
pass
if state:
if not isinstance(state, dict):
errors.append(
'SLS {0} does not render to a dictionary'.format(sls)
)
else:
include = []
if 'include' in state:
if not isinstance(state['include'], list):
err = ('Include Declaration in SLS {0} is not formed '
'as a list'.format(sls))
errors.append(err)
else:
include = state.pop('include')
self._handle_extend(state, sls, saltenv, errors)
self._handle_exclude(state, sls, saltenv, errors)
self._handle_state_decls(state, sls, saltenv, errors)
for inc_sls in include:
# inc_sls may take the form of:
# 'sls.to.include' <- same as {<saltenv>: 'sls.to.include'}
# {<env_key>: 'sls.to.include'}
# {'_xenv': 'sls.to.resolve'}
xenv_key = '_xenv'
if isinstance(inc_sls, dict):
env_key, inc_sls = inc_sls.popitem()
else:
env_key = saltenv
if env_key not in self.avail:
msg = ('Nonexistent saltenv \'{0}\' found in include '
'of \'{1}\' within SLS \'{2}:{3}\''
.format(env_key, inc_sls, saltenv, sls))
log.error(msg)
errors.append(msg)
continue
if inc_sls.startswith('.'):
levels, include = \
re.match(r'^(\.+)(.*)$', inc_sls).groups()
level_count = len(levels)
p_comps = sls.split('.')
if state_data.get('source', '').endswith('/init.sls'):
p_comps.append('init')
if level_count > len(p_comps):
msg = ('Attempted relative include of \'{0}\' '
'within SLS \'{1}:{2}\' '
'goes beyond top level package '
.format(inc_sls, saltenv, sls))
log.error(msg)
errors.append(msg)
continue
inc_sls = '.'.join(p_comps[:-level_count] + [include])
if env_key != xenv_key:
if matches is None:
matches = []
# Resolve inc_sls in the specified environment
if env_key in matches or fnmatch.filter(self.avail[env_key], inc_sls):
resolved_envs = [env_key]
else:
resolved_envs = []
else:
# Resolve inc_sls in the subset of environment matches
resolved_envs = [
aenv for aenv in matches
if fnmatch.filter(self.avail[aenv], inc_sls)
]
# An include must be resolved to a single environment, or
# the include must exist in the current environment
if len(resolved_envs) == 1 or saltenv in resolved_envs:
# Match inc_sls against the available states in the
# resolved env, matching wildcards in the process. If
# there were no matches, then leave inc_sls as the
# target so that the next recursion of render_state
# will recognize the error.
sls_targets = fnmatch.filter(
self.avail[saltenv],
inc_sls
) or [inc_sls]
for sls_target in sls_targets:
r_env = resolved_envs[0] if len(resolved_envs) == 1 else saltenv
mod_tgt = '{0}:{1}'.format(r_env, sls_target)
if mod_tgt not in mods:
nstate, err = self.render_state(
sls_target,
r_env,
mods,
matches
)
if nstate:
self.merge_included_states(state, nstate, errors)
state.update(nstate)
if err:
errors.extend(err)
else:
msg = ''
if not resolved_envs:
msg = ('Unknown include: Specified SLS {0}: {1} is not available on the salt '
'master in saltenv(s): {2} '
).format(env_key,
inc_sls,
', '.join(matches) if env_key == xenv_key else env_key)
elif len(resolved_envs) > 1:
msg = ('Ambiguous include: Specified SLS {0}: {1} is available on the salt master '
'in multiple available saltenvs: {2}'
).format(env_key,
inc_sls,
', '.join(resolved_envs))
log.critical(msg)
errors.append(msg)
try:
self._handle_iorder(state)
except TypeError:
log.critical('Could not render SLS {0}. Syntax error detected.'.format(sls))
else:
state = {}
return state, errors
def _handle_iorder(self, state):
'''
Take a state and apply the iorder system
'''
if self.opts['state_auto_order']:
for name in state:
for s_dec in state[name]:
if not isinstance(s_dec, six.string_types):
# PyDSL OrderedDict?
continue
if not isinstance(state[name], dict):
# Include's or excludes as lists?
continue
if not isinstance(state[name][s_dec], list):
# Bad syntax, let the verify seq pick it up later on
continue
found = False
if s_dec.startswith('_'):
continue
for arg in state[name][s_dec]:
if isinstance(arg, dict):
if len(arg) > 0:
if next(six.iterkeys(arg)) == 'order':
found = True
if not found:
if not isinstance(state[name][s_dec], list):
# quite certainly a syntax error, managed elsewhere
continue
state[name][s_dec].append(
{'order': self.iorder}
)
self.iorder += 1
return state
def _handle_state_decls(self, state, sls, saltenv, errors):
'''
Add sls and saltenv components to the state
'''
for name in state:
if not isinstance(state[name], dict):
if name == '__extend__':
continue
if name == '__exclude__':
continue
if isinstance(state[name], six.string_types):
# Is this is a short state, it needs to be padded
if '.' in state[name]:
comps = state[name].split('.')
state[name] = {'__sls__': sls,
'__env__': saltenv,
comps[0]: [comps[1]]}
continue
errors.append(
'ID {0} in SLS {1} is not a dictionary'.format(name, sls)
)
continue
skeys = set()
for key in state[name]:
if key.startswith('_'):
continue
if not isinstance(state[name][key], list):
continue
if '.' in key:
comps = key.split('.')
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - source: salt://redis/redis.conf
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
'ID \'{0}\' in SLS \'{1}\' contains multiple state '
'declarations of the same type'.format(name, sls)
)
continue
state[name][comps[0]] = state[name].pop(key)
state[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
if '__sls__' not in state[name]:
state[name]['__sls__'] = sls
if '__env__' not in state[name]:
state[name]['__env__'] = saltenv
def _handle_extend(self, state, sls, saltenv, errors):
'''
Take the extend dec out of state and apply to the highstate global
dec
'''
if 'extend' in state:
ext = state.pop('extend')
if not isinstance(ext, dict):
errors.append(('Extension value in SLS \'{0}\' is not a '
'dictionary').format(sls))
return
for name in ext:
if not isinstance(ext[name], dict):
errors.append(('Extension name \'{0}\' in SLS \'{1}\' is '
'not a dictionary'
.format(name, sls)))
continue
if '__sls__' not in ext[name]:
ext[name]['__sls__'] = sls
if '__env__' not in ext[name]:
ext[name]['__env__'] = saltenv
for key in ext[name]:
if key.startswith('_'):
continue
if not isinstance(ext[name][key], list):
continue
if '.' in key:
comps = key.split('.')
ext[name][comps[0]] = ext[name].pop(key)
ext[name][comps[0]].append(comps[1])
state.setdefault('__extend__', []).append(ext)
def _handle_exclude(self, state, sls, saltenv, errors):
'''
Take the exclude dec out of the state and apply it to the highstate
global dec
'''
if 'exclude' in state:
exc = state.pop('exclude')
if not isinstance(exc, list):
err = ('Exclude Declaration in SLS {0} is not formed '
'as a list'.format(sls))
errors.append(err)
state.setdefault('__exclude__', []).extend(exc)
def render_highstate(self, matches):
'''
Gather the state files and render them into a single unified salt
high data structure.
'''
highstate = self.building_highstate
all_errors = []
mods = set()
statefiles = []
for saltenv, states in six.iteritems(matches):
for sls_match in states:
try:
statefiles = fnmatch.filter(self.avail[saltenv], sls_match)
except KeyError:
all_errors.extend(
['No matching salt environment for environment '
'\'{0}\' found'.format(saltenv)]
)
# if we did not found any sls in the fileserver listing, this
# may be because the sls was generated or added later, we can
# try to directly execute it, and if it fails, anyway it will
# return the former error
if not statefiles:
statefiles = [sls_match]
for sls in statefiles:
r_env = '{0}:{1}'.format(saltenv, sls)
if r_env in mods:
continue
state, errors = self.render_state(
sls, saltenv, mods, matches)
if state:
self.merge_included_states(highstate, state, errors)
for i, error in enumerate(errors[:]):
if 'is not available' in error:
# match SLS foobar in environment
this_sls = 'SLS {0} in saltenv'.format(
sls_match)
if this_sls in error:
errors[i] = (
'No matching sls found for \'{0}\' '
'in env \'{1}\''.format(sls_match, saltenv))
all_errors.extend(errors)
self.clean_duplicate_extends(highstate)
return highstate, all_errors
def clean_duplicate_extends(self, highstate):
if '__extend__' in highstate:
highext = []
for items in (six.iteritems(ext) for ext in highstate['__extend__']):
for item in items:
if item not in highext:
highext.append(item)
highstate['__extend__'] = [{t[0]: t[1]} for t in highext]
def merge_included_states(self, highstate, state, errors):
# The extend members can not be treated as globally unique:
if '__extend__' in state:
highstate.setdefault('__extend__',
[]).extend(state.pop('__extend__'))
if '__exclude__' in state:
highstate.setdefault('__exclude__',
[]).extend(state.pop('__exclude__'))
for id_ in state:
if id_ in highstate:
if highstate[id_] != state[id_]:
errors.append((
'Detected conflicting IDs, SLS'
' IDs need to be globally unique.\n The'
' conflicting ID is \'{0}\' and is found in SLS'
' \'{1}:{2}\' and SLS \'{3}:{4}\'').format(
id_,
highstate[id_]['__env__'],
highstate[id_]['__sls__'],
state[id_]['__env__'],
state[id_]['__sls__'])
)
try:
highstate.update(state)
except ValueError:
errors.append(
'Error when rendering state with contents: {0}'.format(state)
)
def _check_pillar(self, force=False):
'''
Check the pillar for errors, refuse to run the state if there are
errors in the pillar and return the pillar errors
'''
if force:
return True
if '_errors' in self.state.opts['pillar']:
return False
return True
def matches_whitelist(self, matches, whitelist):
'''
Reads over the matches and returns a matches dict with just the ones
that are in the whitelist
'''
if not whitelist:
return matches
ret_matches = {}
if not isinstance(whitelist, list):
whitelist = whitelist.split(',')
for env in matches:
for sls in matches[env]:
if sls in whitelist:
ret_matches[env] = ret_matches[env] if env in ret_matches else []
ret_matches[env].append(sls)
return ret_matches
def call_highstate(self, exclude=None, cache=None, cache_name='highstate',
force=False, whitelist=None):
'''
Run the sequence to execute the salt highstate for this minion
'''
# Check that top file exists
tag_name = 'no_|-states_|-states_|-None'
ret = {tag_name: {
'result': False,
'comment': 'No states found for this minion',
'name': 'No States',
'changes': {},
'__run_num__': 0,
}}
cfn = os.path.join(
self.opts['cachedir'],
'{0}.cache.p'.format(cache_name)
)
if cache:
if os.path.isfile(cfn):
with salt.utils.fopen(cfn, 'rb') as fp_:
high = self.serial.load(fp_)
return self.state.call_high(high)
# File exists so continue
err = []
try:
top = self.get_top()
except SaltRenderError as err:
ret[tag_name]['comment'] = 'Unable to render top file: '
ret[tag_name]['comment'] += str(err.error)
return ret
except Exception:
trb = traceback.format_exc()
err.append(trb)
return err
err += self.verify_tops(top)
matches = self.top_matches(top)
if not matches:
msg = 'No Top file or external nodes data matches found.'
ret[tag_name]['comment'] = msg
return ret
matches = self.matches_whitelist(matches, whitelist)
self.load_dynamic(matches)
if not self._check_pillar(force):
err += ['Pillar failed to render with the following messages:']
err += self.state.opts['pillar']['_errors']
else:
high, errors = self.render_highstate(matches)
if exclude:
if isinstance(exclude, str):
exclude = exclude.split(',')
if '__exclude__' in high:
high['__exclude__'].extend(exclude)
else:
high['__exclude__'] = exclude
err += errors
if err:
return err
if not high:
return ret
cumask = os.umask(0o77)
try:
if salt.utils.is_windows():
# Make sure cache file isn't read-only
self.state.functions['cmd.run']('attrib -R "{0}"'.format(cfn), output_loglevel='quiet')
with salt.utils.fopen(cfn, 'w+b') as fp_:
try:
self.serial.dump(high, fp_)
except TypeError:
# Can't serialize pydsl
pass
except (IOError, OSError):
msg = 'Unable to write to "state.highstate" cache file {0}'
log.error(msg.format(cfn))
os.umask(cumask)
return self.state.call_high(high)
def compile_highstate(self):
'''
Return just the highstate or the errors
'''
err = []
top = self.get_top()
err += self.verify_tops(top)
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
err += errors
if err:
return err
return high
def compile_low_chunks(self):
'''
Compile the highstate but don't run it, return the low chunks to
see exactly what the highstate will execute
'''
top = self.get_top()
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
# If there is extension data reconcile it
high, ext_errors = self.state.reconcile_extend(high)
errors += ext_errors
# Verify that the high data is structurally sound
errors += self.state.verify_high(high)
high, req_in_errors = self.state.requisite_in(high)
errors += req_in_errors
high = self.state.apply_exclude(high)
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.state.compile_high_data(high)
return chunks
class HighState(BaseHighState):
'''
Generate and execute the salt "High State". The High State is the
compound state derived from a group of template files stored on the
salt master or in the local cache.
'''
# a stack of active HighState objects during a state.highstate run
stack = []
def __init__(self, opts, pillar=None, jid=None, pillar_enc=None, proxy=None):
self.opts = opts
self.client = salt.fileclient.get_file_client(self.opts)
BaseHighState.__init__(self, opts)
self.state = State(self.opts, pillar, jid, pillar_enc, proxy=proxy)
self.matcher = salt.minion.Matcher(self.opts)
# tracks all pydsl state declarations globally across sls files
self._pydsl_all_decls = {}
# a stack of current rendering Sls objects, maintained and used by the pydsl renderer.
self._pydsl_render_stack = []
def push_active(self):
self.stack.append(self)
@classmethod
def clear_active(cls):
# Nuclear option
#
# Blow away the entire stack. Used primarily by the test runner but also
# useful in custom wrappers of the HighState class, to reset the stack
# to a fresh state.
cls.stack = []
@classmethod
def pop_active(cls):
cls.stack.pop()
@classmethod
def get_active(cls):
try:
return cls.stack[-1]
except IndexError:
return None
class MasterState(State):
'''
Create a State object for master side compiling
'''
def __init__(self, opts, minion):
State.__init__(self, opts)
def load_modules(self, data=None, proxy=None):
'''
Load the modules into the state
'''
log.info('Loading fresh modules for state activity')
# Load a modified client interface that looks like the interface used
# from the minion, but uses remote execution
#
self.functions = salt.client.FunctionWrapper(
self.opts,
self.opts['id']
)
# Load the states, but they should not be used in this class apart
# from inspection
self.utils = salt.loader.utils(self.opts)
self.serializers = salt.loader.serializers(self.opts)
self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers)
self.rend = salt.loader.render(self.opts, self.functions, states=self.states)
class MasterHighState(HighState):
'''
Execute highstate compilation from the master
'''
def __init__(self, master_opts, minion_opts, grains, id_,
saltenv=None,
env=None):
if isinstance(env, six.string_types):
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt '
'Boron.'
)
# Backwards compatibility
saltenv = env
# Force the fileclient to be local
opts = copy.deepcopy(minion_opts)
opts['file_client'] = 'local'
opts['file_roots'] = master_opts['master_roots']
opts['renderer'] = master_opts['renderer']
opts['state_top'] = master_opts['state_top']
opts['id'] = id_
opts['grains'] = grains
HighState.__init__(self, opts)
class RemoteHighState(object):
'''
Manage gathering the data from the master
'''
def __init__(self, opts, grains):
self.opts = opts
self.grains = grains
self.serial = salt.payload.Serial(self.opts)
# self.auth = salt.crypt.SAuth(opts)
self.channel = salt.transport.Channel.factory(self.opts['master_uri'])
def compile_master(self):
'''
Return the state data from the master
'''
load = {'grains': self.grains,
'opts': self.opts,
'cmd': '_master_state'}
try:
return self.channel.send(load, tries=3, timeout=72000)
except SaltReqTimeoutError:
return {}
|
the-stack_0_1370 | from rest_framework.test import APITestCase
from django.urls import reverse
from datetime import datetime
import json
from crawlers.models import Crawler
from crawlers.models import CrawlerExecution
from crawlers.models import CrawlerExecutionGroup
from crawlers.models import STARTED
from django.contrib.auth import get_user_model
class CrawlerEndpoint(APITestCase):
def setUp(self):
self.endpoint = '/api/crawlers/'
Crawler.objects.bulk_create([
Crawler(site_name="mpf", site_name_display="MPF",
url_root="www.mpf.mp.br", task_name="mpf_crawler"),
Crawler(site_name="incra", site_name_display="INCRA",
url_root="www.gov.br/incra/pt-br", task_name="incra_crawler"),
Crawler(site_name="tcu", site_name_display="TCU",
url_root="pesquisa.apps.tcu.gov.br", task_name="tcu_crawler"),
])
self.crawler_to_be_create = {
"site_name": "ibama",
"site_name_display": "IBAMA",
"url_root": "www.gov.br/ibama/pt-br",
"task_name": "ibama_crawler",
}
def tearDown(self):
Crawler.objects.all().delete()
def user_login(self):
username = "admin"
email = "[email protected]"
password = "admin"
User = get_user_model()
User.objects.create_superuser(
username=username,
email=email,
password=password)
return json.loads(
self.client.post(
'/token/',
{
"username": username,
"password": password
}
).content)["access"]
def test_list_all_crawlers(self):
response = json.loads(self.client.get(
self.endpoint,
format='json'
).content)
self.assertEqual(
3,
len(response['results']),
)
def test_create(self):
token = self.user_login()
response = self.client.post(
self.endpoint,
self.crawler_to_be_create,
HTTP_AUTHORIZATION='Bearer {}'.format(token)
)
json_response = json.loads(response.content)
self.assertEqual(201, response.status_code)
self.assertEqual(
self.crawler_to_be_create['site_name'], json_response['site_name'])
self.assertEqual(
self.crawler_to_be_create['url_root'], json_response['url_root'])
self.assertEqual(
self.crawler_to_be_create['task_name'], json_response['task_name'])
def test_get(self):
token = self.user_login()
# Create Crawler
crawler_response = json.loads(self.client.post(
self.endpoint,
self.crawler_to_be_create,
HTTP_AUTHORIZATION='Bearer {}'.format(token)
).content)
response = self.client.get(
f"{self.endpoint}{crawler_response['id']}/",
format='json',
HTTP_AUTHORIZATION='Bearer {}'.format(token)
)
json_response = json.loads(response.content)
self.assertEqual(200, response.status_code)
self.assertEqual(
self.crawler_to_be_create['site_name'], json_response['site_name'])
self.assertEqual(
self.crawler_to_be_create['url_root'], json_response['url_root'])
self.assertEqual(
self.crawler_to_be_create['task_name'], json_response['task_name'])
def test_update(self):
token = self.user_login()
# Create Crawler
crawler_response = json.loads(self.client.post(
self.endpoint,
self.crawler_to_be_create,
HTTP_AUTHORIZATION='Bearer {}'.format(token)
).content)
crawler_update = {
"site_name": "ibge",
"site_name_display": "IBGE",
"url_root": "www.ibge.gov.br",
"task_name": "ibge_crawler",
}
updated_response = self.client.put(
f"{self.endpoint}{crawler_response['id']}/",
crawler_update,
HTTP_AUTHORIZATION='Bearer {}'.format(token)
)
json_response = json.loads(updated_response.content)
self.assertEqual(200, updated_response.status_code)
self.assertEqual(
crawler_update['site_name'], json_response['site_name'])
self.assertEqual(crawler_update['url_root'], json_response['url_root'])
self.assertEqual(
crawler_update['task_name'], json_response['task_name'])
def test_delete(self):
token = self.user_login()
# Create Crawler
crawler_response = json.loads(self.client.post(
self.endpoint,
self.crawler_to_be_create,
HTTP_AUTHORIZATION='Bearer {}'.format(token)
).content)
response = self.client.delete(
f"{self.endpoint}{crawler_response['id']}/",
format='json',
HTTP_AUTHORIZATION='Bearer {}'.format(token)
)
self.assertEqual(204, response.status_code)
class CrawlerExecutionsEndpoint(APITestCase):
def setUp(self):
self.endpoint_base = '/api/crawlers'
self.crawler_to_be_create = Crawler.objects.create(
site_name="mpf",
url_root="www.mpf.mp.br",
task_name="mpf_crawler"
)
self.crawler_group_exec = CrawlerExecutionGroup.objects.create(
crawler=self.crawler_to_be_create,
task_name="mpf_crawler_group",
finish_datetime=datetime(2021, 10, 10, 8, 35, 21),
state=STARTED,
)
CrawlerExecution.objects.bulk_create([
CrawlerExecution(
crawler_execution_group=self.crawler_group_exec,
task_id="352c6526-3153-11ec-8d3d-0242ac130003",
finish_datetime=datetime(2021, 10, 10, 8, 35, 21),
),
CrawlerExecution(
crawler_execution_group=self.crawler_group_exec,
task_id="352c6742-3153-11ec-8d3d-0242ac130003",
finish_datetime=datetime(2021, 10, 10, 8, 40, 10),
),
CrawlerExecution(
crawler_execution_group=self.crawler_group_exec,
task_id="352c6832-3153-11ec-8d3d-0242ac130003",
finish_datetime=datetime(2021, 10, 10, 8, 50, 15),
),
])
def tearDown(self):
Crawler.objects.all().delete()
def user_login(self):
username = "admin"
email = "[email protected]"
password = "admin"
User = get_user_model()
User.objects.create_superuser(
username=username,
email=email,
password=password)
return json.loads(
self.client.post(
'/token/',
{
"username": username,
"password": password
}
).content)["access"]
def test_list_all_crawler_executions(self):
token = self.user_login()
response = json.loads(self.client.get(
f"{self.endpoint_base}/{self.crawler_to_be_create.id}/executions/",
format='json',
HTTP_AUTHORIZATION='Bearer {}'.format(token)
).content)
crawler_executions_group = response['results']
self.assertEqual(
1,
len(crawler_executions_group),
)
self.assertEqual(
3,
len(crawler_executions_group[0]['crawler_executions']),
)
|
the-stack_0_1371 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeJpegOp."""
import os
import time
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class DecodeJpegBenchmark(test.Benchmark):
"""Evaluate tensorflow DecodeJpegOp performance."""
def _evalDecodeJpeg(self,
image_name,
parallelism,
num_iters,
crop_during_decode=None,
crop_window=None,
tile=None):
"""Evaluate DecodeJpegOp for the given image.
TODO(tanmingxing): add decoding+cropping as well.
Args:
image_name: a string of image file name (without suffix).
parallelism: the number of concurrent decode_jpeg ops to be run.
num_iters: number of iterations for evaluation.
crop_during_decode: If true, use fused DecodeAndCropJpeg instead of
separate decode and crop ops. It is ignored if crop_window is None.
crop_window: if not None, crop the decoded image. Depending on
crop_during_decode, cropping could happen during or after decoding.
tile: if not None, tile the image to composite a larger fake image.
Returns:
The duration of the run in seconds.
"""
ops.reset_default_graph()
image_file_path = resource_loader.get_path_to_datafile(
os.path.join('core', 'lib', 'jpeg', 'testdata', image_name))
# resource_loader does not seem to work well under benchmark runners.
# So if the above path is not available, try another way to access the file:
if not os.path.exists(image_file_path):
image_file_path = resource_loader.get_path_to_datafile(
os.path.join(
'..', '..', 'core', 'lib', 'jpeg', 'testdata', image_name))
if tile is None:
image_content = variable_scope.get_variable(
'image_%s' % image_name,
initializer=io_ops.read_file(image_file_path))
else:
single_image = image_ops.decode_jpeg(
io_ops.read_file(image_file_path), channels=3, name='single_image')
# Tile the image to composite a new larger image.
tiled_image = array_ops.tile(single_image, tile)
image_content = variable_scope.get_variable(
'tiled_image_%s' % image_name,
initializer=image_ops.encode_jpeg(tiled_image))
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
images = []
for _ in xrange(parallelism):
if crop_window is None:
# No crop.
image = image_ops.decode_jpeg(image_content, channels=3)
elif crop_during_decode:
# combined decode and crop.
image = image_ops.decode_and_crop_jpeg(
image_content, crop_window, channels=3)
else:
# separate decode and crop.
image = image_ops.decode_jpeg(image_content, channels=3)
image = image_ops.crop_to_bounding_box(
image,
offset_height=crop_window[0],
offset_width=crop_window[1],
target_height=crop_window[2],
target_width=crop_window[3])
images.append(image)
r = control_flow_ops.group(*images)
for _ in xrange(3):
# Skip warm up time.
self.evaluate(r)
start_time = time.time()
for _ in xrange(num_iters):
self.evaluate(r)
end_time = time.time()
return end_time - start_time
def benchmarkDecodeJpegSmall(self):
"""Evaluate single DecodeImageOp for small size image."""
num_iters = 10
crop_window = [10, 10, 50, 50]
for parallelism in [1, 100]:
duration_decode = self._evalDecodeJpeg('small.jpg', parallelism,
num_iters)
duration_decode_crop = self._evalDecodeJpeg('small.jpg', parallelism,
num_iters, False, crop_window)
duration_decode_after_crop = self._evalDecodeJpeg(
'small.jpg', parallelism, num_iters, True, crop_window)
self.report_benchmark(
name='decode_jpeg_small_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode)
self.report_benchmark(
name='decode_crop_jpeg_small_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_crop)
self.report_benchmark(
name='decode_after_crop_jpeg_small_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_after_crop)
def benchmarkDecodeJpegMedium(self):
"""Evaluate single DecodeImageOp for medium size image."""
num_iters = 10
crop_window = [10, 10, 50, 50]
for parallelism in [1, 100]:
duration_decode = self._evalDecodeJpeg('medium.jpg', parallelism,
num_iters)
duration_decode_crop = self._evalDecodeJpeg('medium.jpg', parallelism,
num_iters, False, crop_window)
duration_decode_after_crop = self._evalDecodeJpeg(
'medium.jpg', parallelism, num_iters, True, crop_window)
self.report_benchmark(
name='decode_jpeg_medium_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode)
self.report_benchmark(
name='decode_crop_jpeg_medium_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_crop)
self.report_benchmark(
name='decode_after_crop_jpeg_medium_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_after_crop)
def benchmarkDecodeJpegLarge(self):
"""Evaluate single DecodeImageOp for large size image."""
num_iters = 10
crop_window = [10, 10, 50, 50]
tile = [4, 4, 1]
for parallelism in [1, 100]:
# Tile the medium size image to composite a larger fake image.
duration_decode = self._evalDecodeJpeg('medium.jpg', parallelism,
num_iters, tile)
duration_decode_crop = self._evalDecodeJpeg(
'medium.jpg', parallelism, num_iters, False, crop_window, tile)
duration_decode_after_crop = self._evalDecodeJpeg(
'medium.jpg', parallelism, num_iters, True, crop_window, tile)
self.report_benchmark(
name='decode_jpeg_large_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode)
self.report_benchmark(
name='decode_crop_jpeg_large_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_crop)
self.report_benchmark(
name='decode_after_crop_jpeg_large_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_after_crop)
if __name__ == '__main__':
test.main()
|
the-stack_0_1372 | # encoding: utf-8
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import shutil
import tempfile
from io import StringIO
import docker
import py
import pytest
from docker.constants import DEFAULT_DOCKER_API_VERSION
from .. import mock
from .. import unittest
from ..helpers import build_config
from compose.cli.command import get_project
from compose.cli.command import get_project_name
from compose.cli.docopt_command import NoSuchCommand
from compose.cli.errors import UserError
from compose.cli.main import TopLevelCommand
from compose.const import IS_WINDOWS_PLATFORM
from compose.project import Project
class CLITestCase(unittest.TestCase):
def test_default_project_name(self):
test_dir = py._path.local.LocalPath('tests/fixtures/simple-composefile')
with test_dir.as_cwd():
project_name = get_project_name('.')
self.assertEqual('simplecomposefile', project_name)
def test_project_name_with_explicit_base_dir(self):
base_dir = 'tests/fixtures/simple-composefile'
project_name = get_project_name(base_dir)
self.assertEqual('simplecomposefile', project_name)
def test_project_name_with_explicit_uppercase_base_dir(self):
base_dir = 'tests/fixtures/UpperCaseDir'
project_name = get_project_name(base_dir)
self.assertEqual('uppercasedir', project_name)
def test_project_name_with_explicit_project_name(self):
name = 'explicit-project-name'
project_name = get_project_name(None, project_name=name)
self.assertEqual('explicitprojectname', project_name)
@mock.patch.dict(os.environ)
def test_project_name_from_environment_new_var(self):
name = 'namefromenv'
os.environ['COMPOSE_PROJECT_NAME'] = name
project_name = get_project_name(None)
self.assertEqual(project_name, name)
def test_project_name_with_empty_environment_var(self):
base_dir = 'tests/fixtures/simple-composefile'
with mock.patch.dict(os.environ):
os.environ['COMPOSE_PROJECT_NAME'] = ''
project_name = get_project_name(base_dir)
self.assertEqual('simplecomposefile', project_name)
@mock.patch.dict(os.environ)
def test_project_name_with_environment_file(self):
base_dir = tempfile.mkdtemp()
try:
name = 'namefromenvfile'
with open(os.path.join(base_dir, '.env'), 'w') as f:
f.write('COMPOSE_PROJECT_NAME={}'.format(name))
project_name = get_project_name(base_dir)
assert project_name == name
# Environment has priority over .env file
os.environ['COMPOSE_PROJECT_NAME'] = 'namefromenv'
assert get_project_name(base_dir) == os.environ['COMPOSE_PROJECT_NAME']
finally:
shutil.rmtree(base_dir)
def test_get_project(self):
base_dir = 'tests/fixtures/longer-filename-composefile'
project = get_project(base_dir)
self.assertEqual(project.name, 'longerfilenamecomposefile')
self.assertTrue(project.client)
self.assertTrue(project.services)
def test_command_help(self):
with mock.patch('sys.stdout', new=StringIO()) as fake_stdout:
TopLevelCommand.help({'COMMAND': 'up'})
assert "Usage: up" in fake_stdout.getvalue()
def test_command_help_nonexistent(self):
with pytest.raises(NoSuchCommand):
TopLevelCommand.help({'COMMAND': 'nonexistent'})
@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason="requires dockerpty")
@mock.patch('compose.cli.main.RunOperation', autospec=True)
@mock.patch('compose.cli.main.PseudoTerminal', autospec=True)
def test_run_interactive_passes_logs_false(self, mock_pseudo_terminal, mock_run_operation):
mock_client = mock.create_autospec(docker.APIClient)
mock_client.api_version = DEFAULT_DOCKER_API_VERSION
project = Project.from_config(
name='composetest',
client=mock_client,
config_data=build_config({
'service': {'image': 'busybox'}
}),
)
command = TopLevelCommand(project)
with pytest.raises(SystemExit):
command.run({
'SERVICE': 'service',
'COMMAND': None,
'-e': [],
'--user': None,
'--no-deps': None,
'-d': False,
'-T': None,
'--entrypoint': None,
'--service-ports': None,
'--publish': [],
'--volume': [],
'--rm': None,
'--name': None,
'--workdir': None,
})
_, _, call_kwargs = mock_run_operation.mock_calls[0]
assert call_kwargs['logs'] is False
def test_run_service_with_restart_always(self):
mock_client = mock.create_autospec(docker.APIClient)
mock_client.api_version = DEFAULT_DOCKER_API_VERSION
project = Project.from_config(
name='composetest',
client=mock_client,
config_data=build_config({
'service': {
'image': 'busybox',
'restart': 'always',
}
}),
)
command = TopLevelCommand(project)
command.run({
'SERVICE': 'service',
'COMMAND': None,
'-e': [],
'--user': None,
'--no-deps': None,
'-d': True,
'-T': None,
'--entrypoint': None,
'--service-ports': None,
'--publish': [],
'--volume': [],
'--rm': None,
'--name': None,
'--workdir': None,
})
self.assertEqual(
mock_client.create_host_config.call_args[1]['restart_policy']['Name'],
'always'
)
command = TopLevelCommand(project)
command.run({
'SERVICE': 'service',
'COMMAND': None,
'-e': [],
'--user': None,
'--no-deps': None,
'-d': True,
'-T': None,
'--entrypoint': None,
'--service-ports': None,
'--publish': [],
'--volume': [],
'--rm': True,
'--name': None,
'--workdir': None,
})
self.assertFalse(
mock_client.create_host_config.call_args[1].get('restart_policy')
)
def test_command_manual_and_service_ports_together(self):
project = Project.from_config(
name='composetest',
client=None,
config_data=build_config({
'service': {'image': 'busybox'},
}),
)
command = TopLevelCommand(project)
with self.assertRaises(UserError):
command.run({
'SERVICE': 'service',
'COMMAND': None,
'-e': [],
'--user': None,
'--no-deps': None,
'-d': True,
'-T': None,
'--entrypoint': None,
'--service-ports': True,
'--publish': ['80:80'],
'--rm': None,
'--name': None,
})
|
the-stack_0_1375 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Created By Rodrigo Wilkens
# Last update 02/April/2022
# version ='1.0'
# ---------------------------------------------------------------------------
import openreview
import os
import yaml
from tqdm import tqdm
import sys
from util import *
username = sys.argv[1]
password = sys.argv[2]
try:
client_acl = openreview.Client(baseurl='https://api.openreview.net', username=username, password=password)
except:
print("OpenReview connection refused")
exit()
download_all = eval(sys.argv[4]) if len(sys.argv)>4 else True
download_pdf = eval(sys.argv[5]) if len(sys.argv)>5 else True
if not download_all or not download_pdf:
print("The output of this run cannot be used at ACLPUB2")
acl_name = 'aclweb.org/ACL/2022/Conference' if len(sys.argv)<=3 else sys.argv[3]
attachment_types = {"software":"software", "Data":"note"}
papers_folder = "papers"
attachments_folder = "attachments"
if not os.path.exists(papers_folder):
os.mkdir(papers_folder)
if not os.path.exists(attachments_folder):
os.mkdir(attachments_folder)
submissions=list(openreview.tools.iterget_notes(client_acl, invitation=acl_name+'/-/Blind_Submission', details='original'))
decision_by_forum={d.forum: d for d in list(openreview.tools.iterget_notes(client_acl, invitation=acl_name+'/Paper.*/-/Decision')) if 'accept' in d.content['decision'].lower()}
papers = []
small_log = open("papers.log","w")
for submission in tqdm(submissions):
if submission.id not in decision_by_forum:
continue
######################
#### main
authorsids = submission.details['original']['content']['authorids']
authors = []
for authorsid in authorsids:
author, error = get_user(authorsid, client_acl)
if error:
small_log.write("Error at " + authorsid + " from (#" + str(submission.number) + "; openreview ID: " + submission.id + ") " + submission.content["title"] + "\n")
if author:
authors.append(author)
assert len(authors)>0
paper = {
"id": submission.number,# len(papers)+1,
"title":submission.content["title"],
"authors":authors,
"abstract":submission.content["abstract"] if "abstract" in submission.content else "",
"file": str(submission.number) + ".pdf", #str(len(papers)+1) + ".pdf",
"pdf_file":submission.content["pdf"].split("/")[-1],
'decision':decision_by_forum[submission.id].content['decision'],
"openreview_id":submission.id
}
######################
#### attributes
submitted_area = submission.content["track"] if "track" in submission.content else None
if 'paper_type' in submission.content:
paper_type = " ".join(submission.content['paper_type'].split()[:2]).lower()
else:
paper_type = "N/A"
presentation_type = "N/A"
paper["attributes"] = {
"submitted_area":submitted_area,
"paper_type":paper_type,
"presentation_type":presentation_type,
}
######################
#### attachments
attachments = []
for att_type in attachment_types:
if att_type in submission.content and submission.content[att_type]:
attachments.append({"type": attachment_types[att_type],
"file": "attachments/" + str(paper["id"]) + "_" + str(submission.content[att_type].split(".")[-1]),
"open_review_id": str(submission.content[att_type])
} )
if download_all:
file_tye = submission.content["software"].split(".")[-1]
f = client_acl.get_attachment(submission.id, att_type)
with open(os.path.join(attachments_folder, str(paper["id"]) + "." + file_tye),'wb') as op: op.write(f)
if download_pdf:
f = client_acl.get_pdf(id=paper['openreview_id'])
with open(os.path.join(papers_folder, str(paper["id"]) + ".pdf"),'wb') as op: op.write(f)
if len(attachments)>0:
paper["attachments"] = attachments
papers.append(paper)
# if len(papers)>10:
# print(len(papers))
# break
small_log.close()
def get_paper_key(p):
return p["id"]
papers.sort(key=get_paper_key)
yaml.dump(papers, open('papers.yml', 'w'))
|
the-stack_0_1377 | from dash import html, dcc
from trading_tool.db import create_connection
from trading_tool.client import TEST_CLIENT
from views.header import make_header
from views.backtesting import make_backtesting_container_1, make_backtesting_container_2
from views.profile import make_profile_description
from views.footer import make_footer
conn = create_connection("trading_tool.db")
def make_layout():
overview_tab = dcc.Tab(
label="Overview",
value="overview-tab",
className="my-tab",
selected_className="my-tab-selected",
children=[make_profile_description(TEST_CLIENT)],
)
backtesting_tab = dcc.Tab(
label="Backtesting",
value="backtesting-tab",
className="my-tab",
selected_className="my-tab-selected",
children=[make_backtesting_container_1(), make_backtesting_container_2()],
)
# body
layout = html.Div(
[
# header
make_header(),
# horizontal line
html.Hr(),
# tabs
dcc.Tabs(
value="overview-tab",
className="my-tab-container",
children=[overview_tab, backtesting_tab],
),
# footer
make_footer(),
],
id="layout",
)
layout = dcc.Loading(children=layout)
return layout
|
the-stack_0_1378 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
class CBScoreMixin(object):
@property
def cqe_performance_score(self):
"""
Compute perf score from corresponding overheads using CQE formula.
:return: performance score, [0.0, 1.0]
"""
# Computed according to the CQE scoring manual
# https://cgc.darpa.mil/CQE_Scoring.pdf
perf_score = None
perf_factor = 1 + max(0.25 * self.size_overhead,
self.memory_overhead,
self.time_overhead)
if 0 <= perf_factor < 1.10:
perf_score = 1
elif 1.10 <= perf_factor < 1.62:
perf_score = (perf_factor - 0.1) ** -4
elif 1.62 <= perf_factor < 2:
perf_score = (-1 * 0.493 * perf_factor) + 0.986
else:
perf_score = 0
return perf_score
@property
def cqe_functionality_score(self):
"""
Compute functionality score from functionality factor using CQE formula.
:return: functionality score [0.0, 1.0]
"""
func_factor = self.success
func_score = 0.0
if func_factor == 1:
func_score = 1.0
elif 0.40 <= func_factor < 1:
func_score = (2 - func_factor) ** (-4)
elif 0 < func_factor < 0.40:
func_score = 0.381 * func_factor
else:
func_score = 0.0
return float(func_score)
@property
def availability(self):
return min(self.cqe_performance_score, self.cqe_functionality_score)
@property
def cb_score(self):
return self.availability * self.security
|
the-stack_0_1379 | # Code listing #27
# Note: This contains a second fix only for the find_optimal_route_to_my_office_from_home function
# Since this is a fixed module, and its second version, we will call it metrictest_fix2.py.
import random
def find_optimal_route_to_my_office_from_home(start_time,
expected_time,
favorite_route='SBS1K',
favorite_option='bus'):
""" Find optimal route for me to go from home to office.
First two inputs should be datetime instances.
"""
# Convert to minutes
tdiff = (expected_time - start_time).total_seconds()/60.0
options = {range(0, 30): 'car',
range(30, 45): ('car', 'metro'),
range(45, 60): ('bus:335E', 'bus:connector')}
if tdiff < 80:
# Pick the range it falls into
for drange in options:
if tdiff in drange:
return drange[tdiff]
# Might as well go by normal bus
return random.choice(('bus:330', 'bus:331', ':'.join((favorite_option,
favorite_route))))
|
the-stack_0_1380 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""ActiveMaster definition."""
from config_bootstrap import Master
class V8FYI(Master.Master3):
base_app_url = 'https://v8-status.appspot.com'
tree_status_url = base_app_url + '/status'
store_revisions_url = base_app_url + '/revisions'
last_good_url = base_app_url + '/lkgr'
project_name = 'V8 FYI'
master_port_id = 12
project_url = 'http://v8.googlecode.com'
buildbot_url = 'http://build.chromium.org/p/client.v8.fyi/'
service_account_file = 'service-account-v8.json'
pubsub_service_account_file = 'service-account-luci-milo.json'
pubsub_topic = 'projects/luci-milo/topics/public-buildbot'
name = 'client.v8.fyi'
|
the-stack_0_1381 | # coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs the tf_cnn_benchmarks tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import unittest
from absl import app
from absl import flags as absl_flags
from cnn_quantization.tf_cnn_benchmarks import all_reduce_benchmark_test
from cnn_quantization.tf_cnn_benchmarks import allreduce_test
from cnn_quantization.tf_cnn_benchmarks import benchmark_cnn_distributed_test
from cnn_quantization.tf_cnn_benchmarks import benchmark_cnn_test
from cnn_quantization.tf_cnn_benchmarks import cnn_util_test
from cnn_quantization.tf_cnn_benchmarks import variable_mgr_util_test
from cnn_quantization.tf_cnn_benchmarks.models import nasnet_test
# Ideally, we wouldn't need this option, and run both distributed tests and non-
# distributed tests. But, TensorFlow allocates all the GPU memory by default, so
# the non-distributed tests allocate all the GPU memory. The distributed tests
# spawn processes that run TensorFlow, and cannot run if all the GPU memory is
# already allocated. If a non-distributed test is run, then a distributed test
# is run in the same process, the distributed test will fail because there is no
# more GPU memory for the spawned processes to allocate.
absl_flags.DEFINE_boolean('run_distributed_tests', False,
'If True, run the distributed tests. If False, the'
'non-distributed tests.')
absl_flags.DEFINE_boolean('full_tests', False,
'If True, all distributed or non-distributed tests '
'are run, which can take hours. If False, only a '
'subset of tests will be run. This subset runs much '
'faster and tests almost all the functionality as '
'the full set of tests, so it is recommended to keep '
'this option set to False.')
FLAGS = absl_flags.FLAGS
def main(_):
loader = unittest.defaultTestLoader
if FLAGS.full_tests:
suite = unittest.TestSuite([
loader.loadTestsFromModule(allreduce_test),
loader.loadTestsFromModule(cnn_util_test),
loader.loadTestsFromModule(variable_mgr_util_test),
loader.loadTestsFromModule(benchmark_cnn_test),
loader.loadTestsFromModule(all_reduce_benchmark_test),
loader.loadTestsFromModule(nasnet_test),
])
dist_suite = unittest.TestSuite([
loader.loadTestsFromModule(benchmark_cnn_distributed_test),
])
else:
suite = unittest.TestSuite([
loader.loadTestsFromModule(allreduce_test),
loader.loadTestsFromModule(cnn_util_test),
loader.loadTestsFromModule(all_reduce_benchmark_test),
loader.loadTestsFromModule(variable_mgr_util_test),
loader.loadTestsFromTestCase(benchmark_cnn_test.TestAlexnetModel),
loader.loadTestsFromTestCase(benchmark_cnn_test.TfCnnBenchmarksTest),
loader.loadTestsFromTestCase(benchmark_cnn_test.VariableUpdateTest),
loader.loadTestsFromTestCase(
benchmark_cnn_test.VariableMgrLocalReplicatedTest),
])
dist_suite = unittest.TestSuite([
loader.loadTestsFromNames([
'benchmark_cnn_distributed_test.DistributedVariableUpdateTest'
'.testVarUpdateDefault',
'benchmark_cnn_distributed_test.TfCnnBenchmarksDistributedTest'
'.testParameterServer',
]),
])
if FLAGS.run_distributed_tests:
print('Running distributed tests')
result = unittest.TextTestRunner(verbosity=2).run(dist_suite)
else:
print('Running non-distributed tests')
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
if __name__ == '__main__':
app.run(main)
|
the-stack_0_1382 | import os
import sys
import codecs
import pkg_resources
from setuptools import setup, find_packages
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
# __version__ = "1.x.x"
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
def parse_requirements(filename):
""" load requirements from a pip requirements file. (replacing from pip.req import parse_requirements)"""
lineiter = (line.strip() for line in open(filename))
reqs = [line for line in lineiter if line and not line.startswith("#")]
if sys.platform == "win32":
reqs.append('pywin32')
if sys.version_info[:2] <= (3, 6) and \
"opencv-contrib-python" not in [d.project_name for d in pkg_resources.working_set]:
# If py<=3.6 and opencv-contrib-python has not been installed, install version==3.2.0.7
reqs.remove("opencv-contrib-python")
reqs.append("opencv-contrib-python==3.2.0.7")
if sys.version_info.major == 2:
# facebook-wda only supports py3
reqs.remove("facebook-wda>=1.3.3")
return reqs
setup(
name='airtest',
version=get_version("airtest/utils/version.py"),
author='Netease Games',
author_email='[email protected]',
description='UI Test Automation Framework for Games and Apps on Android/iOS/Windows/Linux',
long_description='UI Test Automation Framework for Games and Apps on Android/iOS/Windows, present by NetEase Games',
url='https://github.com/AirtestProject/Airtest',
license='Apache License 2.0',
keywords=['automation', 'automated-test', 'game', 'android', 'ios', 'windows', 'linux'],
packages=find_packages(exclude=['cover', 'playground', 'tests', 'dist']),
package_data={
'android_deps': ["*.apk", "airtest/core/android/static"],
'html_statics': ["airtest/report"],
'ios_deps': ["airtest/core/ios/iproxy"],
},
include_package_data=True,
install_requires=parse_requirements('requirements.txt'),
extras_require={
'tests': [
'nose',
],
'docs': [
'sphinx',
'recommonmark',
'sphinx_rtd_theme',
'mock',
]},
entry_points="""
[console_scripts]
airtest = airtest.cli.__main__:main
""",
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
|
the-stack_0_1385 | # 354, https://leetcode.com/problems/russian-doll-envelopes/description/
# Sol-1 normal dp with time complexity: O(n^2), TLE
# no optimization
class Solution(object):
def maxEnvelopes(self, envelopes):
"""
:type envelopes: List[List[int]]
:rtype: int
"""
if not envelopes or len(envelopes) == 0:
return 0
# dp with time complexity: O(n^2)
# sort envelopes
envelopes.sort(key=lambda x:(x[0], x[1]))
dp = [1 for _ in range(len(envelopes))]
for i in range(1, len(envelopes)):
for j in range(i):
if envelopes[i][0] > envelopes[j][0] and envelopes[i][1] > envelopes[j][1]:
dp[i] = max(dp[i], dp[j] + 1)
return max(dp)
# dp with optimization based on 300) longest increasing sub-sequence
# time complexity: O(nlgn), Accepted, by huijiang
# good explanation: https://leetcode.com/problems/russian-doll-envelopes/discuss/82751/
# O(Nlog(N))-python-solution-explained
# e =[[5,4],[6,4],[6,7],[2,3]]
# e.sort(key=lambda x: (x[0], -x[1]))
# [[2, 3], [5, 4], [6, 7], [6, 4]]
# Since the width is increasing, we only need to consider height.
# [3, 4] cannot contains [3, 3], so we need to put [3, 4] before [3, 3]
# when sorting otherwise it will be counted as an increasing number if the order is [3, 3], [3, 4]
class Solution(object):
def maxEnvelopes(self, envelopes):
"""
:type envelopes: List[List[int]]
:rtype: int
"""
envelopes.sort(key=lambda x:(x[0],-x[1]))
res = [0] * len(envelopes)
size = 0
for envelop in envelopes:
i, j = 0, size
while i != j:
m = (i + j) // 2
if envelop[1] > res[m]:
i = m + 1
else:
j = m
res[i] = envelop[1]
size = max(size, i + 1)
return size
s = Solution()
print(s.maxEnvelopes([[5,4],[6,4],[6,7],[2,3]]))
|
the-stack_0_1386 | import os
import board
import displayio
from adafruit_display_text.label import Label
from adafruit_bitmap_font import bitmap_font
# the current working directory (where this file is)
cwd = ("/"+__file__).rsplit('/', 1)[0]
fonts = [file for file in os.listdir(cwd+"/fonts/")
if (file.endswith(".bdf") and not file.startswith("._"))]
for i, filename in enumerate(fonts):
fonts[i] = cwd+"/fonts/"+filename
print(fonts)
##########################################################################
THE_FONT = fonts[0]
DISPLAY_STRING = "A multi-line-\nexample of\n font bounding!"
WRAP_CHARS = 40
##########################################################################
# Make the display context
splash = displayio.Group()
board.DISPLAY.show(splash)
# Make a background color fill
color_bitmap = displayio.Bitmap(320, 240, 1)
color_palette = displayio.Palette(1)
color_palette[0] = 0xFFFFFF
bg_sprite = displayio.TileGrid(color_bitmap,
pixel_shader=color_palette,
position=(0, 0))
splash.append(bg_sprite)
# Load the font
font = bitmap_font.load_font(THE_FONT)
font.load_glyphs(DISPLAY_STRING.encode('utf-8'))
print(DISPLAY_STRING)
text = Label(font, text=DISPLAY_STRING)
text.x = 20
text.y = 100
text.color = 0x0
# Make a background color fill
dims = text.bounding_box
print(dims)
textbg_bitmap = displayio.Bitmap(dims[2], dims[3], 1)
textbg_palette = displayio.Palette(1)
textbg_palette[0] = 0xFF0000
textbg_sprite = displayio.TileGrid(textbg_bitmap,
pixel_shader=textbg_palette,
position=(text.x+dims[0], text.y+dims[1]))
splash.append(textbg_sprite)
splash.append(text)
board.DISPLAY.refresh_soon()
board.DISPLAY.wait_for_frame()
while True:
pass
|
the-stack_0_1387 | """Ensures that the config for the package is handled correctly."""
import io
import pytest
import unittest.mock as mock
fake_io_file1 = io.StringIO('{"DEFAULT_DATABASE_ROOT": "./molscore_data"}')
fake_io_file2 = io.StringIO('')
fake_io_file2.close = lambda: None
def test_local_variables():
import molscore.config
"""ensure the config variables are initialized"""
assert type(molscore.config._initial_config) == dict,\
"Config should be loaded as dict"
assert type(molscore.config.VALID_CONFIG_PARAMETERS) == list,\
"Config param options should be list of str."
return
def test__check_config():
import molscore.config
"""Another layer of protection for ensuring configs are handled."""
# good dict, valid config params
# nothing should happen, eg nothing raised
good = {'DEFAULT_DATABASE_ROOT': './'}
molscore.config._check_config(good)
# bad dict, incorrect value
bad = {'DEFAULT_DATABASE_ROOT': 5}
with pytest.raises(TypeError):
molscore.config._check_config(bad)
# bad dict, folder does not exist
bad = {'DEFAULT_DATABASE_ROOT': './not_a_real_folder_hopefully/my_data'}
with pytest.raises(ValueError):
molscore.config._check_config(bad)
return
@mock.patch('molscore._set_globals')
@mock.patch('molscore.config.open', side_effect=[fake_io_file1, fake_io_file2])
def test_update(mocked_open, mocked_global_setter, working_test_dir):
import molscore.config
"""Update config without actually doing so."""
molscore.config.update('DEFAULT_DATABASE_ROOT',
f'{working_test_dir}/new_data')
assert mocked_global_setter.called,\
"""Did not update global variables"""
assert fake_io_file2.getvalue() == '{"DEFAULT_DATABASE_ROOT": "'+str(working_test_dir)+'/new_data"}',\
"Did not save to file the new variable"
return
|
the-stack_0_1389 | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #stops agressive error message printing
import tensorflow as tf
import MLDashboard.MLDashboardBackend as MLDashboardBackend
import MLDashboard.MLCallbacksBackend as MLCallbacksBackend
from MLDashboard.MLCommunicationBackend import Message, MessageMode
import time
from MLDashboard.Examples.InteractiveDashboardDemo import get_model
class myCustomCallback(MLCallbacksBackend.DashboardCallbacks):
def __init__(self, updatelist, returnlist, model, x_train, y_train, x_test, y_test, labels, config):
super().__init__(updatelist, returnlist, model, x_train, y_train, x_test, y_test, labels, config)
def custom_on_test_begin(self, logs):
print("We are beginning the evaluation step.")
def run(testmode = False):
print("Starting custom callbacks demo...")
print("Setting up dashboard...")
#Create dashboard and return communication tools (this starts the process)
dashboardjsonfile = os.path.dirname(__file__) + '/dashboarddemo.json'
dashboardProcess, updatelist, returnlist = MLDashboardBackend.createDashboard(dashboardjsonfile,
openatend=not testmode)
print("Loading data...")
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
print("Formatting data...")
x_train = x_train.reshape(-1, 784).astype("float32") / 255.0
x_test = x_test.reshape(-1, 784).astype("float32") / 255.0
print("Sampling data...")
# Limit the train data to 10000 samples
x_train = x_train[:10000]
y_train = y_train[:10000]
# Limit test data to 1000 samples
x_test = x_test[:1000]
y_test = y_test[:1000]
print("Creating model...")
model = get_model()
print("Creating custom callbacks...")
#Callbacks require update and return list for communicating with dashboard
#Model and datasets are useful for sending that data to certain modules
config = MLCallbacksBackend.CallbackConfig()
labels = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
callback = myCustomCallback(updatelist, returnlist, model, x_train, y_train, x_test, y_test, labels, config)
print("Starting training...")
trainingstarttime = time.time()
model.fit(x_train, y_train, epochs=20, callbacks=[callback])
print("Training finished in: ", round(time.time() - trainingstarttime, 3), " seconds.")
print("Evaluating model...")
model.evaluate(x_test, y_test, batch_size=128, callbacks=[callback])
updatelist.append(Message(MessageMode.End, {}))
print("Exiting cleanly...")
dashboardProcess.join()
print("Dashboard exited.")
#This handles any extra data that the dashboard sent, such as save commands
callback.HandleRemaingCommands()
if __name__ == '__main__':
run() |
the-stack_0_1391 | # import warnings
import numpy as np
from types import SimpleNamespace
import warnings
import matplotlib.pyplot as plt
from power_planner import graphs
# EXAMPLE DATA
instance = np.random.rand(1, 100, 100)
instance_corr = np.zeros((100, 100))
# corridor: 1 is feasible region, 0 is forbidden
# pad at the border, necessary for weighted_graph processing (np.roll function)
instance_corr[6:-6, 6:-6] = 1
instance_corr[:]
cfg = SimpleNamespace(
**{
# angle weight doesn't matter
"ANGLE_WEIGHT": 0,
# maximum angle -> needed to define half donut, can stay like that
"MAX_ANGLE": 1.57079,
"MAX_ANGLE_LG": 1.57079,
# scale can stay 1 as well, probably not used
"scale": 1,
# you need to set this, the pixel-wise minimum and maximum distance
# between pylons
"PYLON_DIST_MAX": 5.0,
"PYLON_DIST_MIN": 3.0,
# if you have only one category:
"class_weights": [1],
"layer_classes": ["resistance"],
# you need to set this, the start and destination points
"dest_inds": np.array([93, 90]),
"start_inds": np.array([7, 9])
}
)
graph = graphs.WeightedKSP(instance, instance_corr)
# single shortest path (building the graph)
path, path_cost, cost_sum = graph.single_sp(**vars(cfg))
print("output path:", path)
graph.get_shortest_path_tree()
# to output k paths
ksp = graph.find_ksp(5, overlap=0.5)
# ksp ist a list of form:
# [[path1, path_costs1, cost_sum1], [path2, path_costs2, cost_sum2], ...]
ksp_output_paths = [k[0] for k in ksp]
print(ksp_output_paths)
plt.figure(figsize=(10, 10))
plt.imshow(np.tile(np.expand_dims(instance[0], 2), 3))
for path in ksp_output_paths:
path = np.asarray(path)
# switched 0 and 1 because scatter and imshow have switched axes
plt.scatter(path[:, 1], path[:, 0], s=50)
plt.savefig("test_ksp.png")
|
the-stack_0_1394 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
-------------------------------------------------------------------------------
Python Script for Figure 2b
-------------------------------------------------------------------------------
Article Title: A Deep Learning-Based Model of Global Terrestrial Evaporation
Author: Akash Koppa
Affiliation: Hydro-Climate Extremes Lab (H-CEL), Ghent University, Belgium
Contact: Akash Koppa ([email protected])
-------------------------------------------------------------------------------
"""
## import required libraries
import pandas as pd
import os as os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mp
import matplotlib.lines as li
import seaborn as sb
## user defined configuration
inpdir = "<< Specify path to input data here >>"
reffil = {"ref": os.path.join(inpdir, "e_observed_sites.h5")}
modfil = {"mo1": os.path.join(inpdir, "e_process_sites.h5"),
"mo2": os.path.join(inpdir, "e_process_sites.h5")}
modmap = {"ref": "FLUXNET",
"mo1": "GLEAMv35b",
"mo2": "GLEAMHybrid"}
figmap = {"ref": "FLUXNET",
"mo1": "Process-Based Model",
"mo2": "Hybrid Model"}
marmap = {"ref": "FluxMarker",
"mo1": "GLEAMv35bMarker",
"mo2": "GLEAMHybridMarker"}
sizmap = {"ref": "FluxSize",
"mo1": "GLEAMv35bSize",
"mo2": "GLEAMHybridSize"}
sitfil = {"siteda": os.path.join(inpdir, "sites.h5")}
## main code
# read in the site data
sitdat = pd.read_hdf(sitfil["siteda"])
# read in the reference FLUXNET data
refdat = pd.read_hdf(reffil["ref"])
refdat[refdat < 0.0] = 0.0
# loop through the models and calculate the correlation for every site
corall = sitdat
stdall = sitdat
rmsall = sitdat
kgeall = sitdat
for modtmp in modfil.keys():
moddat = pd.read_hdf(modfil[modtmp])
moddat[moddat < 0.0] = 0.0
cortmp = []
stdtmp = []
rmstmp = []
kgetmp = []
# loop through the sites and calculate correlation and std
for sittmp in sitdat.index:
refsit = refdat[sittmp]
refsit.name = "ref"
modsit = moddat[sittmp]
modsit.name = modtmp
datsit = pd.concat([refsit, modsit], axis = 1)
datsit = datsit.dropna(how = "any")
datcor = datsit["ref"].corr(datsit[modtmp], method = "spearman")
modstd = datsit[modtmp].std()
datrms = ((datsit[modtmp] - datsit["ref"])**2).mean() ** 0.5
# kge
corrat = (datcor - 1)**2
stdrat = ((modstd/datsit["ref"].std()) - 1)**2
menrat = ((datsit[modtmp].mean()/datsit["ref"].mean()) - 1)**2
kgeval = 1 - np.sqrt(corrat + stdrat + menrat)
# append
kgetmp.append(kgeval)
cortmp.append(datcor)
stdtmp.append(modstd)
rmstmp.append(datrms)
# create a pandas series from the correlation and standard deviation data
cortm1 = pd.Series(cortmp, index = sitdat.index, name = modmap[modtmp])
stdtm1 = pd.Series(stdtmp, index = sitdat.index, name = modmap[modtmp])
rmstm1 = pd.Series(rmstmp, index = sitdat.index, name = modmap[modtmp])
kgetm1 = pd.Series(kgetmp, index = sitdat.index, name = modmap[modtmp])
# append the data to the final data frames
corall = pd.concat([corall, cortm1], axis = 1)
stdall = pd.concat([stdall, stdtm1], axis = 1)
rmsall = pd.concat([rmsall, rmstm1], axis = 1)
kgeall = pd.concat([kgeall, kgetm1], axis = 1)
# replace all infinite values with nan
stdall = stdall.replace(float('inf'), np.nan)
corall = corall.replace(float('inf'), np.nan)
rmsall = rmsall.replace(float('inf'), np.nan)
# melt all datasets
kgevio = kgeall[["svortv", "GLEAMHybrid","GLEAMv35b"]]
kgevio = kgevio.rename(columns = {"svortv": "Vegetation Type",
"GLEAMHybrid": "Hybrid Model",
"GLEAMv35b": "Process-Based Model"})
kgevio = kgevio.melt(id_vars = "Vegetation Type")
kgevio = kgevio.rename(columns = {"value": "Kling-Gupta Efficiency"})
kgevio.loc[kgevio["Kling-Gupta Efficiency"] < -1.5, "Kling-Gupta Efficiency"] = np.nan
# plot the violin plots
mm = 0.0393701
sb.set_theme(style = "darkgrid")
sb.set_style("ticks")
figure = mp.pyplot.figure(figsize = (89*mm, 89*mm))
figaxi = figure.add_subplot(1, 1, 1)
figaxi.set_title("Evaporation ($E$)", fontsize = 8)
figaxi = sb.violinplot(x = "Vegetation Type",
y = "Kling-Gupta Efficiency",
hue = "variable",
split = "true",
data = kgevio,
inner = "quartile",
palette = "Set2",
fontsize = 7,
linewidth = 1.0,
edgecolor = "black",
order = ["Short", "Tall"])
plt.legend(loc = "lower left", edgecolor = "black", fontsize = 7)
yticks = figaxi.get_yticks()
yticks[yticks == -0.5] = -0.41
figaxi.set_yticks(yticks)
figaxi.set_ylim(-2.0)
figaxi.set_xlabel(figaxi.get_xlabel(), fontsize = 8)
figaxi.set_ylabel(figaxi.get_ylabel(), fontsize = 8)
figaxi.tick_params(axis='both', which='major', labelsize=7)
plt.axhline(-0.41, color = "red",
linestyle = "solid",
linewidth = 1.0)
figure.tight_layout()
plt.savefig("<< Specify output path for the figure here >>")
|
the-stack_0_1396 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 18 00:38:17 2019
@author: yifan
"""
'''
Energy Evaulation function given a super cell
'''
import os
import json
import pickle
import random
import numpy as np
from ase import Atom, Atoms
from ase.build import surface
from ase.data import covalent_radii
from ase.io import read, write
from ase.visualize import view
from numpy.linalg import norm
from sklearn.metrics import mean_squared_error
from itertools import combinations
import lattice_functions as lf
from set_ce_lattice import dz, mother
import matplotlib
# matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
# plt.switch_backend('agg')
font = {'family': 'normal', 'size': 15}
matplotlib.rc('font', **font)
import platform
HomePath = os.path.expanduser('~')
ProjectPath = os.path.join(HomePath, 'Documents', 'GitHub', 'Pdn-CO-Stability')
if platform.system() == 'Linux':
ProjectPath = '/work/ccei_biomass/users/wangyf/cluster_project/CE_opt'
path = os.path.join(ProjectPath, 'Pdn-CE')
#%%
'''
Useful functions
'''
'''
ind - individal, one hot encoding array consisting of 0s and 1s
ind_index - config or occupied node index list consisting of integer numbers
'''
def occupancy():
'''
Creat occupancy for a node in the configuration, value between 0 or 1
'''
occ = random.randint(0, 1)
return occ
def one_hot_to_index(individual):
'''
Convert an individual from one hot encoding to a list index
'''
ind_index = list(np.nonzero(individual)[0])
return ind_index
def index_to_one_hot(ind_index, n_nodes):
'''
Convert an individual from a list index to one hot encoding
'''
individual = np.zeros(n_nodes, dtype=int)
individual[np.array(ind_index)] = 1
return individual
def check_Pd_Pd_distance(ind_index, mother):
'''
Takes in configuration and return False if atoms are closer than nearest neighbors
'''
acceptance_flag = True
combos = list(combinations(ind_index, 2))
ncombo = len(combos)
for i in range(ncombo):
pt1 = mother[combos[i][0]]
pt2 = mother[combos[i][1]]
distance = lf.two_points_D(pt1, pt2)
if distance < 1.0:
acceptance_flag = False
break
return acceptance_flag
def check_Pd_Pd_neighboring(occ_node_index, ind_index, mother):
'''
Takes in a node index and mother
return if the node is near an existing node
'''
acceptance_flag = True
pt1 = mother[occ_node_index[0]]
min_distance = np.min([lf.two_points_D(pt1, pt2) for pt2 in mother[ind_index] if not np.all(pt2 == pt1)])
# print(min_distance)
if not min_distance == 1.0:
acceptance_flag = False
return acceptance_flag
def swap_occ_empty(ind):
'''
Core function of the random walk
Swap an occupied site and an empty site
takes in one hot numpy array - ind
return the new configuration and the chosen node
'''
x_new = ind.copy()
occ_indices = np.where(x_new == 1)[0]
chosen_occ_i = np.random.choice(occ_indices, 1)
x_new[chosen_occ_i] = 0
empty_indices = np.where(x_new == 0)[0]
chosen_empty_i = np.random.choice(empty_indices, 1)
x_new[chosen_empty_i] = 1
return x_new, chosen_empty_i, chosen_occ_i
def append_support(ind_index, mother, view_flag=False):
'''
Append the configuration onto a ceria support surface
- Inputs
- ind_index : the occupied nodes for a given configuration
- mother : the mother cell
- view_flag : show in ase GUI
'''
# Useful bond information
Pdr = covalent_radii[46]
#Or = covalent_radii[8]
PdPd = Pdr * 2
PdO = 2.1 # the average PdO length take from CONTCAR files
def ceria():
a = 5.49 # Lattice constant
CeO2 = Atoms('Ce4O8', scaled_positions=[(0., 0., 0.),
(0., 0.5, 0.5),
(0.5, 0., 0.5),
(0.5, 0.5, 0.),
(0.75, 0.25, 0.25),
(0.25, 0.75, 0.75),
(0.75, 0.75, 0.75),
(0.25, 0.25, 0.25),
(0.25, 0.25, 0.75),
(0.75, 0.75, 0.25),
(0.25, 0.75, 0.25),
(0.75, 0.25, 0.75)],
cell=[a, a, a],
pbc=True)
#(1,1,1) is the slab type. There are 2 unit cells along z direction
slab = surface(CeO2, (1, 1, 1), 2)
# Repeating the slab 5 unit cells in x and 5 unit cell in y directions
# At the end the ceria slab is 10 by 10
# the Pd supercell mother is also 10 by 10
slab = slab.repeat((5, 5, 1))
slab.center(vacuum=10.0, axis=2)
# clave the top layer O atoms
del slab[[atom.index for atom in slab if atom.z > 15]]
return slab
support = ceria()
# set origin value by looking at the ase GUI, pick one oxygen atom
origin_index = 17
origin = support[origin_index].position.copy()
origin[2] = origin[2] + PdO
# superpose the Pd lattice onto ceria lattice
mother_with_support = origin + (mother - mother[0]) * PdPd
# select the occupied nodes
Pdpos = mother_with_support[ind_index]
# append Pd atoms to the support
nPd = len(Pdpos)
for i in range(nPd):
support.append(Atom('Pd', position=Pdpos[i]))
'''
Append an atom in the vaccum at the top corner
for plotting purpose just becase POV is dumb
'''
dumb_x = 0 # support.cell[0][0] + support.cell[0][1]
dumb_y = 0 # support.cell[1][0] + support.cell[1][1]
dumb_z = support.cell[2][2] - 1
dumb_pos = np.array([dumb_x, dumb_y, dumb_z])
support.append(Atom('He', position=dumb_pos))
if view_flag:
view(support)
return support, mother_with_support
def check_floating_atoms(ind, mother):
'''
Check if the configuration has any floating atoms in the layer above base layer
If floating_flag = true, the configuration is considered as infeasible,
If floatinfg_flag = false, the configuration can be accepted
Input the individial one-hot coding and the mother coordinates
'''
# Convert to config list
config = one_hot_to_index(ind)
# Collect the atoms above the base layer
config_layer = lf.cal_layers(mother, dz, config)
config_base_above = list(np.array(config)[np.where(config_layer > 1 )])
# Check the CN of atoms above the base layer
Graphs = lf.initialize_graph_object(mother, dz, NN1 = 1)
Gm = Graphs.Gm
cn_list = []
for ci in config_base_above:
cni = len([i for i in list(Gm.neighbors(ci)) if i in config])
cn_list.append(cni)
# Isolated node list, CN < 2
iso_list = list(np.array(config_base_above)[np.where(np.array(cn_list) < 2)])
floating_flag = (len(iso_list) > 0)
return floating_flag
#%%
class Pdn():
def __init__(self, model_file, mother=mother, super_cell_flag=False):
'''
loading the regression results
'''
self.mother = mother
# The flag to inluce 1NN and edges shorter than 1NN
NN1 = 1
[self.Gcv, self.J, self.intercept, self.RMSE_test_atom, self.RMSE_test_site] = pickle.load(open(model_file, "rb"))
self.super_cell_flag = super_cell_flag
# Initialize graph object
self.Graphs = lf.initialize_graph_object(self.mother, dz, NN1 = 1)
# Initialize calculation object
empty = 'grey'
filled = 'r'
occ = [empty, filled]
self.Cal = lf.calculations(occ)
self.Gm = self.Graphs.Gm
def save_super_clusters(self):
'''
save the signficant clusters in super cell to a json file
called 'clusters_super_nonzero.json'
'''
with open('clusters_super_cell.json') as f:
Gcv_super = json.load(f)['Gcv']
Gcv_super_nonzero = []
Gcv_model_nonrepeat = [Gi[0] for Gi in self.Gcv] # take the first clusters in each list
for Gi_super in Gcv_super:
for Gi_model_nonrepeat in Gcv_model_nonrepeat: # check if the first one is in Gcv_super
if Gi_model_nonrepeat in Gi_super:
Gcv_super_nonzero.append(Gi_super)
# save to a json file
Gcv_super_nonzero_dict = {'Gcv': Gcv_super_nonzero}
with open(os.path.join(path, 'clusters_super_nonzero.json'), 'w') as outfile:
json.dump(Gcv_super_nonzero_dict, outfile)
def load_super_cluster(self):
'''
load 'cluster_super_cell.json'
'''
with open(os.path.join(path, 'clusters_super_nonzero.json')) as f:
self.Gcv_super = json.load(f)['Gcv']
self.Gcv = self.Gcv_super # substitue the original Gcv
def load_config(self, ind_index):
'''
load the configuration into self.Graph.Gsv
'''
if self.super_cell_flag:
self.load_super_cluster()
self.Graphs.get_configs([ind_index])
def predict_E(self, ind_index):
'''
Predict Energy of the cluster only, take in ind index
'''
self.load_config(ind_index)
pi_pred = self.Cal.get_pi_matrix_l(self.Graphs.Gsv, self.Gcv)
E_pred = float(np.dot(pi_pred, self.J) + self.intercept)
# return Graphs
return E_pred, pi_pred
def swap_occ_empty_fast(self, ind):
'''
Core function of the random walk
Swap an occupied site and a NEARBY (must be 1NN) empty site
takes in one hot numpy array - ind
'''
x_new = ind.copy()
occ_indices = list(np.where(x_new == 1)[0])
self.load_config(occ_indices)
config_G = self.Graphs.Gsv[0]
NN1_list = []
for node_i in occ_indices:
NN1_list += list(config_G.neighbors(node_i))
NN1_list = list(set(NN1_list))
NN1_list_empty = [i for i in NN1_list if i not in occ_indices]
chosen_occ_i = np.random.choice(occ_indices, 1)
chosen_empty_i = np.random.choice(NN1_list_empty, 1)
if not chosen_occ_i == chosen_empty_i:
x_new[chosen_occ_i] = 0
x_new[chosen_empty_i] = 1
return x_new, chosen_empty_i, chosen_occ_i
def swap_occ_empty_reverse(self, ind):
'''
Core function of the random walk
Swap an occupied site to an empty site on the base
takes in one hot numpy array - ind
'''
x_new = ind.copy()
occ_indices = list(np.where(x_new == 1)[0])
base_indices = np.where(self.mother[:,2] == dz)[0]
base_indices_empty = list(np.where(x_new[base_indices] == 0)[0])
chosen_occ_i = np.random.choice(occ_indices, 1)
chosen_empty_i = np.random.choice(base_indices_empty, 1)
if not chosen_occ_i == chosen_empty_i:
x_new[chosen_occ_i] = 0
x_new[chosen_empty_i] = 1
return x_new, chosen_empty_i, chosen_occ_i
def swap_iso_neighbors(self, ind, alpha1=1.0, alpha2=0.25):
'''
New version
Core function of the random walk
if there is isolated nodes:
Randomly put n isolated nodes into NN1 nodes of the existing nodes
if there is non-isolated nodes:
Shuffle the occupied nodes to NN1 nodes of the existing nodes
takes in one hot numpy array - ind
'''
x_new = ind.copy()
config = one_hot_to_index(x_new) # convert to config
NN1_list = [] # the NN1 nodes to config
cn_list = [] # the cn number for each node
for ci in config:
NN1_neighbors_i = [i for i in list(self.Gm.neighbors(ci))]
cni = len([i for i in list(self.Gm.neighbors(ci)) if i in config])
cn_list.append(cni)
NN1_list += NN1_neighbors_i
# Unique NN1 nodes
NN1_list = list(set(NN1_list))
# All possible empty NN1 nodes
NN1_list_empty = [i for i in NN1_list if i not in config]
# Get both NN1 and NN2 nodes
NN2_list = []
for ci in NN1_list:
NN2_neighbors_i = [i for i in list(self.Gm.neighbors(ci))]
NN2_list += NN2_neighbors_i
# Unique NN1 nodes
NN2_list = list(set(NN2_list + NN1_list))
# All possible empty NN1 nodes
NN2_list_empty = [i for i in NN2_list if i not in config]
# All isolated nodes with coorination number < 2
iso_list = list(np.array(config)[np.where(np.array(cn_list) < 2)])
# Given a alpha, determine the number of nodes involved in exchange
m = int(np.floor(min(len(iso_list), len(NN1_list_empty)) * alpha1))
if m > 0: # Randomly put n isolated nodes into NN1 nodes of the existing nodes
chosen_occ_i = np.unique(np.random.choice(iso_list, m, replace=False))
x_new[chosen_occ_i] = 0
chosen_empty_i = np.unique(np.random.choice(NN1_list_empty, m, replace= False))
x_new[chosen_empty_i] = 1
if m == 0: # Shuffle the occupied nodes to NN1 nodes of the existing nodes and choose n from it
# the number of occupied nodes
n = len(config)
n_possible = [n * alpha2, len(NN2_list_empty)]
if min(n_possible) > 1:
nswap = int(np.floor(min(n_possible)))
else: nswap = 1
#print('\t Swap {} atoms'.format(nswap))
chosen_occ_i = np.unique(np.random.choice(config, nswap, replace = False))
x_new[chosen_occ_i] = 0
chosen_empty_i = np.unique(np.random.choice(NN2_list_empty, nswap, replace= False))
x_new[chosen_empty_i] = 1
return x_new, chosen_empty_i, chosen_occ_i
|
the-stack_0_1397 | #
# Copyright 2019 Xilinx, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inaccel.coral as inaccel
import numpy as np
import time
BinomialTreeInputDataTypeDouble = np.dtype([('S', np.double), ('K', np.double), ('T', np.double), ('rf', np.double), ('V', np.double), ('q', np.double), ('N', np.int32), ('packed', np.int32, 3)])
class BinomialTree:
MAX_OPTION_CALCULATIONS = 1024;
BinomialTreeEuropeanPut = np.int32(1);
BinomialTreeEuropeanCall = np.int32(2);
BinomialTreeAmericanPut = np.int32(3);
BinomialTreeAmericanCall = np.int32(4);
def __init__(self):
with inaccel.allocator:
self.inputBuffer = np.ndarray(self.MAX_OPTION_CALCULATIONS, dtype = BinomialTreeInputDataTypeDouble)
self.outputBuffer = np.ndarray(self.MAX_OPTION_CALCULATIONS, dtype = np.double)
def run(self, optionType):
self.m_runStartTime = int(round(time.time() * 1000000))
numOptions = np.int32(self.inputBuffer.size)
startIndex = np.int32(0)
if ((numOptions % 8) != 0):
raise RuntimeError("[XLNX] BinomialTree::run - number of options to calculate should be a multiple of 8")
req = inaccel.request("com.xilinx.vitis.quantitativeFinance.binomialTree.engine")
req.arg(self.inputBuffer).arg(self.outputBuffer).arg(optionType).arg(numOptions).arg(startIndex)
inaccel.submit(req).result()
self.m_runEndTime = int(round(time.time() * 1000000))
def lastruntime(self):
duration = self.m_runEndTime - self.m_runStartTime
return duration
|
the-stack_0_1401 | import codecs
import time
from datetime import datetime
from script.util.Logger import Logger, pprint_logger
from script.util.misc_util import log_error_trace
def file_lines_job(func, in_file='input.txt', out_file='output.txt', encoding='UTF8'):
def wrapper():
read_time = time.time()
with codecs.open(in_file, 'r', encoding=encoding) as f:
lines = [line for line in f.readlines()]
new_lines = []
for line in lines:
line = line.replace('\r', '')
line = line.replace('\n', '')
new_lines += [line]
lines = new_lines
read_time = time.time() - read_time
old_len = len(lines)
print("read '{}', {} lines, {:.3f}'s elapsed".format(in_file, old_len, read_time))
func_time = time.time()
lines = func(lines)
func_time = time.time() - func_time
print("in func {:.3f}'s elapsed".format(func_time))
write_time = time.time()
if lines is not None:
new_lines = []
for line in lines:
line = str(line)
if line[-1] is not '\n':
new_lines += [line + '\n']
else:
new_lines += [line]
lines = new_lines
with codecs.open(out_file, 'w', encoding=encoding) as f:
f.writelines(lines)
write_time = time.time() - write_time
new_len = len(lines)
if old_len - new_len == 0:
print('same len')
elif old_len - new_len > 0:
print("del {} lines".format(old_len - new_len))
else:
print("add {} lines".format(-(old_len - new_len)))
print("write '{}', {} lines, {:.3f}'s elapsed".format(out_file, new_len, write_time))
else:
write_time = 0
print("total {:.4f}'s elapsed".format(read_time + func_time + write_time))
wrapper.__name__ = func.__name__
return wrapper
def file_str_job(func, in_file='input.txt', out_file='output.txt', encoding='UTF8'):
def wrapper():
with codecs.open(in_file, 'r', encoding=encoding) as f:
line = "".join([line for line in f.readlines()])
print("read '{}', {} length".format(in_file, len(line)))
line = func(line)
if line is not None:
with codecs.open(out_file, 'w', encoding=encoding) as f:
f.writelines(str(line))
print("write '{}', {} length".format(out_file, len(line)))
wrapper.__name__ = func.__name__
return wrapper
def deco_exception_handle(func):
"""decorator for catch exception and log"""
def wrapper(*args, **kwargs):
self = args[0]
log_func = self.log
try:
return func(*args, **kwargs)
except KeyboardInterrupt:
log_func("KeyboardInterrupt detected abort process")
except Exception as e:
log_error_trace(log_func, e)
wrapper.__name__ = func.__name__
return wrapper
def deco_log_func_name(func):
def wrapper(*args, **kwargs):
self = args[0]
log_func = self.log
return log_func(func.__name__, *args, **kwargs)
wrapper.__name__ = func.__name__
return wrapper
def deco_timeit(func):
def wrapper(*args, **kwargs):
date = datetime.now()
start = time.time()
try:
ret = func(*args, **kwargs)
except BaseException as e:
log_error_trace(print, e)
ret = None
finally:
elapse_time = time.time() - start
msg = f"in {func.__name__}(), time {time.time() - start:.4f}'s elapsed"
if elapse_time > 60:
now = datetime.now() - date
msg += f", {now}"
print(msg)
return ret
wrapper.__name__ = func.__name__
return wrapper
def deco_save_log(func):
def wrapper(*args, **kwargs):
logger = Logger(func.__name__, level='INFO')
print = logger.info
pprint = pprint_logger(print)
func_name = func.__name__
print('#' * 80)
print(f'begin {func_name}')
ret = func(print, pprint, *args, **kwargs)
print(f'end {func_name}')
print('#' * 80)
return ret
return wrapper
|
the-stack_0_1402 | # Copyright (c) 2019, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: MIT
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/MIT
import torch
from ..base import BaseLearner
class BaseSkillDiscoveryLearner(BaseLearner):
def __init__(self, env_reward=False, hidden_size=128, num_layers=4, normalize_inputs=False, **kwargs):
self.env_reward = bool(env_reward)
self.hidden_size = int(hidden_size)
self.num_layers = int(num_layers)
self.normalize_inputs = bool(normalize_inputs)
super().__init__(**kwargs)
self.ep_summary_keys = ["cumulative_rew", "cumulative_im_rew", "cumulative_density_rew"]
def fill_summary(self, *values):
self._ep_summary = [float(sum([e['reward'] for e in self.agent.episode])),
float(sum([e.get('im_reward', 0.) for e in self.agent.episode])),
float(sum([e.get('density_model_reward', 0.) for e in self.agent.episode]))]
self._ep_summary += [v.item() for v in values]
def relabel_episode(self):
self._compress_me = []
for e in self.agent.episode:
# Optionally take into account extrinsic reward
r = e['env_reward'] * float(self.env_reward)
e['reward'] = r
self._compress_me.append(self.agent.episode)
# Add discriminator reward
self._add_im_reward()
def relabel_batch(self, batch):
# Compute intrinsic rewards
with torch.no_grad():
new_im_rew = self.im.surprisal(batch)
if self.density is not None:
new_density_rew = self.density.novelty(batch)
else:
new_density_rew = torch.zeros_like(new_im_rew)
# Make sure that weights for intrinsic rewards are not None
im_nu = self.im_nu if self.im_nu is not None else 0.
density_nu = self.density_nu if self.density_nu is not None else 0.
# Detach intrinsic rewards from computation graph
new_im_rew = new_im_rew.detach()
new_density_rew = new_density_rew.detach()
# Optionally take into account extrinsic reward
r = batch['env_reward'] * float(self.env_reward)
# Add intrinsic rewards
r += im_nu * new_im_rew + density_nu * new_density_rew
batch['reward'] = r
batch['im_reward'] = new_im_rew
batch['density_model_reward'] = new_density_rew
return batch
def _compute_surprisal(self, batched_episode):
return self.im.surprisal(batched_episode)
def _add_im_reward(self):
if self.im is not None:
for ep in self._compress_me:
batched_episode = {key: torch.stack([e[key] for e in ep]) for key in ep[0].keys()}
surprisals = self._compute_surprisal(batched_episode)
if self.im_scale:
self.train()
_ = self._im_bn(surprisals.view(-1, 1))
self.eval()
surprisals = surprisals / torch.sqrt(self._im_bn.running_var[0])
for e, s in zip(ep, surprisals):
e['reward'] += (self.im_nu * s.detach())
e['im_reward'] = s.detach()
def preprocess_skill(self, z, **kwargs):
return self.agent.preprocess_skill(z, **kwargs)
def get_values(self, batch):
return self.v_module(
batch['state'],
self.preprocess_skill(batch['skill']),
)
def get_terminal_values(self, batch):
return self.v_module(
batch['next_state'][-1:],
self.preprocess_skill(batch['skill'][-1:])
)
def get_policy_lprobs_and_nents(self, batch):
log_prob, n_ent, _ = self.policy(
batch['state'],
self.preprocess_skill(batch['skill']),
action_logit=batch['action_logit']
)
return log_prob.sum(dim=1), n_ent
def get_im_loss(self, batch):
return self.im(batch)
def soft_update(self):
module_pairs = [
dict(source=self.v_module, target=self.v_target),
]
for pair in module_pairs:
for p, p_targ in zip(pair['source'].parameters(), pair['target'].parameters()):
p_targ.data *= self.polyak
p_targ.data += (1 - self.polyak) * p.data
def _get_q_module(self, q_i):
q_i = q_i if q_i is not None else 1
assert q_i in [1, 2]
return [self.q1, self.q2][q_i - 1]
def get_action_qs(self, batch, q_i=None):
return self.get_curr_qs(batch, new_actions=None, q_i=q_i)
def get_policy_loss_and_actions(self, batch):
policy_actions, logprobs = self.sample_policy_actions_and_lprobs(batch)
p_obj = self.q1.q_no_grad(batch['state'], policy_actions, self.preprocess_skill(batch['skill']))
if hasattr(self, 'alpha'): # for SAC
p_obj -= self.alpha * logprobs
p_losses = -p_obj # flip sign to turn the maximization objective into a loss function to minimize
p_loss = p_losses.mean()
return p_loss, policy_actions
def get_curr_qs(self, batch, new_actions=None, q_i=None):
"""
Compute Q_i(s,a). Use new_actions to override the actions in the batch (e.g. for SAC).
q_i selects the index of the Q-function.
"""
action = new_actions if new_actions is not None else batch['action']
return self._get_q_module(q_i)(
batch['state'],
action,
self.preprocess_skill(batch['skill'])
)
def get_next_vs(self, batch):
return self.v_target(
batch['next_state'],
self.preprocess_skill(batch['skill']),
)
def sample_policy_actions_and_lprobs(self, batch): # For SAC; we need to sample new actions when updating V
""" Sample new actions. Returns (actions, logprobs) tuple. """
action, action_logit, lprobs, n_ent = self.policy(
batch['state'],
self.preprocess_skill(batch['skill'])
)
return action, lprobs.sum(dim=1)
|
the-stack_0_1411 | import numpy as np
import pytest
import pandas as pd
from pandas import CategoricalIndex, Index, Series
import pandas._testing as tm
class TestMap:
@pytest.mark.parametrize(
"data, categories",
[
(list("abcbca"), list("cab")),
(pd.interval_range(0, 3).repeat(3), pd.interval_range(0, 3)),
],
ids=["string", "interval"],
)
def test_map_str(self, data, categories, ordered):
# GH 31202 - override base class since we want to maintain categorical/ordered
index = CategoricalIndex(data, categories=categories, ordered=ordered)
result = index.map(str)
expected = CategoricalIndex(
map(str, data), categories=map(str, categories), ordered=ordered
)
tm.assert_index_equal(result, expected)
def test_map(self):
ci = pd.CategoricalIndex(list("ABABC"), categories=list("CBA"), ordered=True)
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(list("ababc"), categories=list("cba"), ordered=True)
tm.assert_index_equal(result, exp)
ci = pd.CategoricalIndex(
list("ABABC"), categories=list("BAC"), ordered=False, name="XXX"
)
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(
list("ababc"), categories=list("bac"), ordered=False, name="XXX"
)
tm.assert_index_equal(result, exp)
# GH 12766: Return an index not an array
tm.assert_index_equal(
ci.map(lambda x: 1), Index(np.array([1] * 5, dtype=np.int64), name="XXX")
)
# change categories dtype
ci = pd.CategoricalIndex(list("ABABC"), categories=list("BAC"), ordered=False)
def f(x):
return {"A": 10, "B": 20, "C": 30}.get(x)
result = ci.map(f)
exp = pd.CategoricalIndex(
[10, 20, 10, 20, 30], categories=[20, 10, 30], ordered=False
)
tm.assert_index_equal(result, exp)
result = ci.map(Series([10, 20, 30], index=["A", "B", "C"]))
tm.assert_index_equal(result, exp)
result = ci.map({"A": 10, "B": 20, "C": 30})
tm.assert_index_equal(result, exp)
def test_map_with_categorical_series(self):
# GH 12756
a = Index([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = CategoricalIndex(["odd", "even", "odd", np.nan])
tm.assert_index_equal(a.map(b), exp)
exp = Index(["odd", "even", "odd", np.nan])
tm.assert_index_equal(a.map(c), exp)
@pytest.mark.parametrize(
("data", "f"),
(
([1, 1, np.nan], pd.isna),
([1, 2, np.nan], pd.isna),
([1, 1, np.nan], {1: False}),
([1, 2, np.nan], {1: False, 2: False}),
([1, 1, np.nan], Series([False, False])),
([1, 2, np.nan], Series([False, False, False])),
),
)
def test_map_with_nan(self, data, f): # GH 24241
values = pd.Categorical(data)
result = values.map(f)
if data[1] == 1:
expected = pd.Categorical([False, False, np.nan])
tm.assert_categorical_equal(result, expected)
else:
expected = Index([False, False, np.nan])
tm.assert_index_equal(result, expected)
def test_map_with_dict_or_series(self):
orig_values = ["a", "B", 1, "a"]
new_values = ["one", 2, 3.0, "one"]
cur_index = CategoricalIndex(orig_values, name="XXX")
expected = CategoricalIndex(new_values, name="XXX", categories=[3.0, 2, "one"])
mapper = Series(new_values[:-1], index=orig_values[:-1])
result = cur_index.map(mapper)
# Order of categories in result can be different
tm.assert_index_equal(result, expected)
mapper = {o: n for o, n in zip(orig_values[:-1], new_values[:-1])}
result = cur_index.map(mapper)
# Order of categories in result can be different
tm.assert_index_equal(result, expected)
|
the-stack_0_1412 | #!/usr/bin/env python
from __future__ import print_function
import cv2
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import time
import argparse
from opencvutils import Camera
import socket as Socket
from opencvutils import __version__ as VERSION
# import errno
# threaded version
# http://stackoverflow.com/questions/12650238/processing-simultaneous-asynchronous-requests-with-python-basehttpserver
def compress(orig, comp):
return float(orig) / float(comp)
class mjpgServer(BaseHTTPRequestHandler):
"""
A simple mjpeg server that either publishes images directly from a camera
or republishes images from another pygecko process.
"""
cam = None
ip = None
hostname = None
# def setUpCamera(self, cv=None, pi=None, win=(320, 240)):
# """
# cv - camera number, usually 0
# pi - set to True
# """
# if pi:
# self.cam = Camera('pi')
# self.cam.init(win=win)
# elif cv:
# self.cam = Camera('cv')
# self.cam.init(cameraNumber=cv, win=win)
#
# else:
# raise Exception('Error, you must specify "cv" or "pi" for camera type')
def getImage(self):
if self.cam:
print('cam')
return self.cam.read()
else:
# if not self.cam:
# raise Exception('Error, you must setup camera first')
# print('You should call setUpCamera() first ... let us try now and assume "cv=0"')
# self.setUpCamera(cv=0)
self.cam = Camera('cv')
self.cam.init(cameraNumber=0, win=(640, 480))
return False, None
# def do_HEAD(s):
# print 'do_HEAD'
# s.send_response(200)
# s.send_header("Content-type", "text/html")
# s.end_headers()
def do_GET(self):
print('connection from:', self.address_string())
# if self.ip is None or self.hostname is None:
# self.hostname = Socket.gethostname()
# self.ip = Socket.gethostbyname(Socket.gethostname())
if self.path == '/mjpg':
self.send_response(200)
self.send_header(
'Content-type',
'multipart/x-mixed-replace; boundary=--jpgboundary'
)
self.end_headers()
while True:
# ret, img = capture.read()
ret, img = self.getImage()
if not ret:
# print 'crap'
time.sleep(1)
continue
ret, jpg = cv2.imencode('.jpg', img)
# print 'Compression ratio: %d4.0:1'%(compress(img.size,jpg.size))
self.wfile.write("--jpgboundary")
self.send_header('Content-type', 'image/jpeg')
# self.send_header('Content-length',str(tmpFile.len))
self.send_header('Content-length', str(jpg.size))
self.end_headers()
self.wfile.write(jpg.tostring())
time.sleep(0.05)
# elif self.path == '/':
# # hn = self.server.server_address[0]
# port = self.server.server_address[1]
# ip = self.ip
# hostname = self.ip
#
# self.send_response(200)
# self.send_header('Content-type', 'text/html')
# self.end_headers()
# self.wfile.write('<html><head></head><body>')
# self.wfile.write('<h1>{0!s}[{1!s}]:{2!s}</h1>'.format(hostname, ip, port))
# self.wfile.write('<img src="http://{}:{}/mjpg"/>'.format(ip, port))
# self.wfile.write('<p>{0!s}</p>'.format((self.version_string())))
# self.wfile.write('<p>The mjpg stream can be accessed directly at:<ul>')
# self.wfile.write('<li><a href="http://{0!s}:{1!s}/mjpg"/>http://{0!s}:{1!s}/mjpg</a></li>'.format(ip, port))
# self.wfile.write('<li><a href="http://{0!s}:{1!s}/mjpg"/>http://{0!s}:{1!s}/mjpg</a></li>'.format(hostname, port))
# self.wfile.write('</p></ul>')
# self.wfile.write('<p>This only handles one connection at a time</p>')
# self.wfile.write('</body></html>')
else:
print('error', self.path)
self.send_response(404)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head></head><body>')
self.wfile.write('<h1>{0!s} not found</h1>'.format(self.path))
self.wfile.write('</body></html>')
def handleArgs():
parser = argparse.ArgumentParser(version=VERSION, description='A simple mjpeg server Example: mjpeg-server -p 8080 --camera 4')
parser.add_argument('-p', '--port', help='mjpeg publisher port, default is 9000', type=int, default=9000)
parser.add_argument('-c', '--camera', help='set opencv camera number, ex. -c 1', type=int, default=0)
parser.add_argument('-t', '--type', help='set camera type, either pi or cv, ex. -t pi', default='cv')
parser.add_argument('-s', '--size', help='set size', nargs=2, type=int, default=(320, 240))
args = vars(parser.parse_args())
args['size'] = (args['size'][0], args['size'][1])
return args
def main():
args = handleArgs()
try:
# win = args['size']
# if args['type'] is 'cv':
# cv = args['camera']
# mjpgServer.setUpCamera(cv=cv, win=win)
# else:
# mjpgServer.setUpCamera(pi=True, win=win)
server = HTTPServer(('0.0.0.0', args['port']), mjpgServer)
print("server started on {}:{}".format(Socket.gethostname(), args['port']))
server.serve_forever()
except KeyboardInterrupt:
print('KeyboardInterrupt')
server.socket.close()
if __name__ == '__main__':
main()
|
the-stack_0_1414 | #!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool descendants/ancestors information update.
Test mempool update of transaction descendants/ancestors information (count, size)
when transactions have been re-added from a disconnected block to the mempool.
"""
import time
from decimal import Decimal
from test_framework.test_framework import MERICATestFramework
from test_framework.util import assert_equal
class MempoolUpdateFromBlockTest(MERICATestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-limitdescendantsize=1000', '-limitancestorsize=1000']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def transaction_graph_test(self, size, n_tx_to_mine=None, start_input_txid='', end_address='', fee=Decimal(0.00100000)):
"""Create an acyclic tournament (a type of directed graph) of transactions and use it for testing.
Keyword arguments:
size -- the order N of the tournament which is equal to the number of the created transactions
n_tx_to_mine -- the number of transaction that should be mined into a block
If all of the N created transactions tx[0]..tx[N-1] reside in the mempool,
the following holds:
the tx[K] transaction:
- has N-K descendants (including this one), and
- has K+1 ancestors (including this one)
More details: https://en.wikipedia.org/wiki/Tournament_(graph_theory)
"""
if not start_input_txid:
start_input_txid = self.nodes[0].getblock(self.nodes[0].getblockhash(1))['tx'][0]
if not end_address:
end_address = self.nodes[0].getnewaddress()
first_block_hash = ''
tx_id = []
tx_size = []
self.log.info('Creating {} transactions...'.format(size))
for i in range(0, size):
self.log.debug('Preparing transaction #{}...'.format(i))
# Prepare inputs.
if i == 0:
inputs = [{'txid': start_input_txid, 'vout': 0}]
inputs_value = self.nodes[0].gettxout(start_input_txid, 0)['value']
else:
inputs = []
inputs_value = 0
for j, tx in enumerate(tx_id[0:i]):
# Transaction tx[K] is a child of each of previous transactions tx[0]..tx[K-1] at their output K-1.
vout = i - j - 1
inputs.append({'txid': tx_id[j], 'vout': vout})
inputs_value += self.nodes[0].gettxout(tx, vout)['value']
self.log.debug('inputs={}'.format(inputs))
self.log.debug('inputs_value={}'.format(inputs_value))
# Prepare outputs.
tx_count = i + 1
if tx_count < size:
# Transaction tx[K] is an ancestor of each of subsequent transactions tx[K+1]..tx[N-1].
n_outputs = size - tx_count
output_value = ((inputs_value - fee) / Decimal(n_outputs)).quantize(Decimal('0.00000001'))
outputs = {}
for _ in range(n_outputs):
outputs[self.nodes[0].getnewaddress()] = output_value
else:
output_value = (inputs_value - fee).quantize(Decimal('0.00000001'))
outputs = {end_address: output_value}
self.log.debug('output_value={}'.format(output_value))
self.log.debug('outputs={}'.format(outputs))
# Create a new transaction.
unsigned_raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed_raw_tx = self.nodes[0].signrawtransactionwithwallet(unsigned_raw_tx)
tx_id.append(self.nodes[0].sendrawtransaction(signed_raw_tx['hex']))
tx_size.append(self.nodes[0].getrawmempool(True)[tx_id[-1]]['vsize'])
if tx_count in n_tx_to_mine:
# The created transactions are mined into blocks by batches.
self.log.info('The batch of {} transactions has been accepted into the mempool.'.format(len(self.nodes[0].getrawmempool())))
block_hash = self.nodes[0].generate(1)[0]
if not first_block_hash:
first_block_hash = block_hash
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.info('All of the transactions from the current batch have been mined into a block.')
elif tx_count == size:
# At the end all of the mined blocks are invalidated, and all of the created
# transactions should be re-added from disconnected blocks to the mempool.
self.log.info('The last batch of {} transactions has been accepted into the mempool.'.format(len(self.nodes[0].getrawmempool())))
start = time.time()
self.nodes[0].invalidateblock(first_block_hash)
end = time.time()
assert_equal(len(self.nodes[0].getrawmempool()), size)
self.log.info('All of the recently mined transactions have been re-added into the mempool in {} seconds.'.format(end - start))
self.log.info('Checking descendants/ancestors properties of all of the in-mempool transactions...')
for k, tx in enumerate(tx_id):
self.log.debug('Check transaction #{}.'.format(k))
assert_equal(self.nodes[0].getrawmempool(True)[tx]['descendantcount'], size - k)
assert_equal(self.nodes[0].getrawmempool(True)[tx]['descendantsize'], sum(tx_size[k:size]))
assert_equal(self.nodes[0].getrawmempool(True)[tx]['ancestorcount'], k + 1)
assert_equal(self.nodes[0].getrawmempool(True)[tx]['ancestorsize'], sum(tx_size[0:(k + 1)]))
def run_test(self):
# Use batch size limited by DEFAULT_ANCESTOR_LIMIT = 25 to not fire "too many unconfirmed parents" error.
self.transaction_graph_test(size=100, n_tx_to_mine=[25, 50, 75])
if __name__ == '__main__':
MempoolUpdateFromBlockTest().main()
|
the-stack_0_1415 | import re
from itertools import groupby
from operator import attrgetter
import itertools
import urllib
import json
from datetime import date, timedelta, datetime, MINYEAR
from dateutil.relativedelta import relativedelta
from dateutil import parser
import requests
import sqlalchemy as sa
from collections import namedtuple
import os
from haystack.backends.solr_backend import SolrSearchQuery
from haystack.query import SearchQuerySet
import pytz
from django.db import transaction, connection, connections
from django.conf import settings
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.decorators import login_required
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.shortcuts import render
from django.db.models.functions import Lower, Now, Cast
from django.db.models import (Max,
Min,
Prefetch,
Case,
When,
Value,
IntegerField,
Q)
from django.utils import timezone
from django.utils.text import slugify
from django.views.generic import TemplateView
from django.http import HttpResponse, HttpResponseRedirect, HttpResponsePermanentRedirect, HttpResponseNotFound
from django.shortcuts import render_to_response, redirect
from django.core import management
from django.core.serializers import serialize
from django.views.generic import View
from councilmatic_core.views import IndexView, BillDetailView, \
CouncilMembersView, AboutView, CommitteeDetailView, CommitteesView, \
PersonDetailView, EventDetailView, EventsView, CouncilmaticFacetedSearchView
from councilmatic_core.models import *
from opencivicdata.core.models import PersonLink
from lametro.models import LAMetroBill, LAMetroPost, LAMetroPerson, \
LAMetroEvent, LAMetroOrganization, LAMetroSubject
from lametro.forms import AgendaUrlForm, AgendaPdfForm, LAMetroCouncilmaticSearchForm
from councilmatic.settings_jurisdiction import MEMBER_BIOS
from councilmatic.settings import MERGER_BASE_URL, PIC_BASE_URL
from opencivicdata.legislative.models import EventDocument
app_timezone = pytz.timezone(settings.TIME_ZONE)
class LAMetroIndexView(IndexView):
template_name = 'lametro/index.html'
event_model = LAMetroEvent
@property
def extra_context(self):
extra = {}
extra['upcoming_board_meetings'] = self.event_model.upcoming_board_meetings()[:2]
extra['current_meeting'] = self.event_model.current_meeting()
extra['bilingual'] = bool([e for e in extra['current_meeting'] if e.bilingual])
extra['USING_ECOMMENT'] = settings.USING_ECOMMENT
extra['todays_meetings'] = self.event_model.todays_meetings().order_by('start_date')
extra['form'] = LAMetroCouncilmaticSearchForm()
return extra
class LABillDetail(BillDetailView):
model = LAMetroBill
template_name = 'lametro/legislation.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
bill = self.get_object()
context['attachments'] = bill.attachments.all().order_by(Lower('note'))
actions = bill.actions.all()
organization_lst = [action.organization for action in actions]
context['sponsorships'] = set(organization_lst)
related_bills = context['legislation']\
.related_bills\
.exclude(related_bill__isnull=True)\
.annotate(latest_date=Max('related_bill__actions__date'))\
.order_by('-latest_date')
context['related_bills'] = related_bills
context['actions'] = bill.actions_and_agendas
return context
class LAMetroEventDetail(EventDetailView):
model = LAMetroEvent
template_name = 'lametro/event.html'
def post(self, request, *args, **kwargs):
self.object = self.get_object() # Assign object to detail view, so that get_context_data can find this variable: https://stackoverflow.com/questions/34460708/checkoutview-object-has-no-attribute-object
event = self.get_object()
event_slug = event.slug
# Look for the button name and assign form values.
if 'url_form' in request.POST:
url_form = AgendaUrlForm(request.POST)
pdf_form = AgendaPdfForm()
elif 'pdf_form' in request.POST:
pdf_form = AgendaPdfForm(request.POST, request.FILES)
url_form = AgendaUrlForm()
# Validate forms and redirect.
if url_form.is_valid():
agenda_url = url_form['agenda'].value()
document_obj, created = EventDocument.objects.get_or_create(
event=event,
note='Event Document - Manual upload URL')
document_obj.date=timezone.now().date()
document_obj.save()
document_obj.links.create(url=agenda_url)
return HttpResponseRedirect('/event/%s' % event_slug)
elif pdf_form.is_valid() and 'pdf_form' in request.POST:
agenda_pdf = request.FILES['agenda']
handle_uploaded_agenda(agenda=agenda_pdf, event=event)
return HttpResponseRedirect('/event/%s' % event_slug)
else:
return self.render_to_response(self.get_context_data(url_form=url_form, pdf_form=pdf_form))
def get_object(self):
# Get the event with prefetched media_urls in proper order.
event = LAMetroEvent.objects.with_media().get(slug=self.kwargs['slug'])
return event
def get_context_data(self, **kwargs):
context = super(EventDetailView, self).get_context_data(**kwargs)
event = context['event']
# Metro admins should see a status report if Legistar is down.
# GET the calendar page, which contains relevant URL for agendas.
if self.request.user.is_authenticated:
r = requests.get('https://metro.legistar.com/calendar.aspx')
context['legistar_ok'] = r.ok
# GET the event URL; allow admin to delete event if 404
response = requests.get(event.api_source.url)
context['event_ok'] = response.ok
try:
context['minutes'] = event.documents.get(note__icontains='minutes')
except EventDocument.DoesNotExist:
pass
agenda_with_board_reports = event.agenda\
.filter(related_entities__bill__versions__isnull=False)\
.annotate(int_order=Cast('order', IntegerField()))\
.order_by('int_order')
# Find agenda link.
if event.documents.all():
for document in event.documents.all():
if "Agenda" in document.note:
context['agenda_url'] = document.links.first().url
context['document_timestamp'] = document.date
elif "Manual upload URL" in document.note:
context['uploaded_agenda_url'] = document.links.first().url
context['document_timestamp'] = document.date
elif "Manual upload PDF" in document.note:
context['uploaded_agenda_pdf'] = document.links.first().url
context['document_timestamp'] = document.date
'''
LA Metro Councilmatic uses the adv_cache library
to partially cache templates: in the event view, we cache
the entire template, except the iframe. (N.B. With
this library, the views do not cached, unless
explicitly wrapped in a django cache decorator.
Nonetheless, several popular browsers (e.g.,
Chrome and Firefox) retrieve cached iframe images,
regardless of the site's caching specifications.
We use the agenda's "date" timestamp to bust
the iframe cache: we save it inside context and
then assign it as the "name" of the iframe,
preventing the browser from retrieving a cached
iframe, when the timestamp changes.
'''
context['related_board_reports'] = agenda_with_board_reports
context['base_url'] = PIC_BASE_URL # Give JS access to this variable
context['has_agenda'] = (context.get('agenda_url') or
context.get('uploaded_agenda_url') or
context.get('uploaded_agenda_pdf'))
# Render forms if not a POST request
if 'url_form' not in context:
context['url_form'] = AgendaUrlForm()
if 'pdf_form' not in context:
context['pdf_form'] = AgendaPdfForm()
context['USING_ECOMMENT'] = settings.USING_ECOMMENT
return context
def handle_uploaded_agenda(agenda, event):
with open('lametro/static/pdf/agenda-%s.pdf' % event.slug, 'wb+') as destination:
for chunk in agenda.chunks():
destination.write(chunk)
# Create the document in database
document_obj, created = EventDocument.objects.get_or_create(
event=event,
note='Event Document - Manual upload PDF')
document_obj.date = timezone.now().date
document_obj.links.create(url='pdf/agenda-%s.pdf' % event.slug)
document_obj.save()
# Collect static to render PDF on server
management.call_command('collectstatic', '--noinput')
@login_required
def delete_submission(request, event_slug):
event = LAMetroEvent.objects.get(slug=event_slug)
event_doc = EventDocument.objects.filter(event_id=event.id, note__icontains='Manual upload')
for e in event_doc:
# Remove stored PDF from Metro app.
if 'Manual upload PDF' in e.note:
try:
os.remove('lametro/static/%s' % e.links.get().url )
except OSError:
pass
e.delete()
return HttpResponseRedirect('/event/%s' % event_slug)
@login_required
def delete_event(request, event_slug):
event = LAMetroEvent.objects.get(slug=event_slug)
event.delete()
return HttpResponseRedirect('/events/')
class LAMetroEventsView(EventsView):
template_name = 'lametro/events.html'
def get_context_data(self, **kwargs):
context = {}
# Did the user set date boundaries?
start_date_str = self.request.GET.get('from')
end_date_str = self.request.GET.get('to')
day_grouper = lambda x: (x.local_start_time.year, x.local_start_time.month, x.local_start_time.day)
minutes_queryset = EventDocument.objects.filter(note__icontains='minutes')
# If yes...
if start_date_str and end_date_str:
context['start_date'] = start_date_str
context['end_date'] = end_date_str
start_date_time = parser.parse(start_date_str)
end_date_time = parser.parse(end_date_str)
select_events = LAMetroEvent.objects\
.with_media()\
.filter(start_time__gt=start_date_time)\
.filter(start_time__lt=end_date_time)\
.order_by('start_time')\
select_events = select_events.prefetch_related(Prefetch('documents',
minutes_queryset,
to_attr='minutes'))\
.prefetch_related('minutes__links')
org_select_events = []
for event_date, events in itertools.groupby(select_events, key=day_grouper):
events = sorted(events, key=attrgetter('start_time'))
org_select_events.append([date(*event_date), events])
context['select_events'] = org_select_events
# If all meetings
elif self.request.GET.get('show'):
all_events = LAMetroEvent.objects\
.with_media()\
.order_by('-start_time')\
org_all_events = []
for event_date, events in itertools.groupby(all_events, key=day_grouper):
events = sorted(events, key=attrgetter('start_time'))
org_all_events.append([date(*event_date), events])
context['all_events'] = org_all_events
# If no...
else:
# Upcoming events
future_events = LAMetroEvent.objects\
.with_media()\
.filter(start_time__gt=timezone.now())\
.order_by('start_time')\
org_future_events = []
for event_date, events in itertools.groupby(future_events, key=day_grouper):
events = sorted(events, key=attrgetter('start_time'))
org_future_events.append([date(*event_date), events])
context['future_events'] = org_future_events
# Past events
past_events = LAMetroEvent.objects\
.with_media()\
.filter(start_time__lt=timezone.now())\
.order_by('-start_time')
past_events = past_events.prefetch_related(Prefetch('documents',
minutes_queryset,
to_attr='minutes'))\
.prefetch_related('minutes__links')
org_past_events = []
for event_date, events in itertools.groupby(past_events, key=day_grouper):
events = sorted(events, key=attrgetter('start_time'))
org_past_events.append([date(*event_date), events])
context['past_events'] = org_past_events
context['user_subscribed'] = False
if self.request.user.is_authenticated:
user = self.request.user
context['user'] = user
if settings.USING_NOTIFICATIONS:
if (len(user.eventssubscriptions.all()) > 0):
context['user_subscribed'] = True
return context
class LABoardMembersView(CouncilMembersView):
template_name = 'lametro/board_members.html'
def map(self):
maps = {'map_geojson_districts': {'type': 'FeatureCollection',
'features': []},
'map_geojson_sectors': {'type': 'FeatureCollection',
'features': []},
'map_geojson_city': {'type': 'FeatureCollection',
'features': []},
}
posts = LAMetroPost.objects\
.filter(shape__isnull=False)\
.exclude(label='Appointee of Mayor of the City of Los Angeles')
for post in posts:
district = post.label
try:
current_membership = post.memberships.get(end_date_dt__gt=Now())
except ObjectDoesNotExist:
council_member = 'Vacant'
detail_link = ''
else:
council_member = current_membership.person.name
detail_link = current_membership.person.slug
feature = {
'type': 'Feature',
'geometry': json.loads(post.shape.json),
'properties': {
'district': district,
'council_member': council_member,
'detail_link': '/person/' + detail_link,
'select_id': 'polygon-{}'.format(slugify(district)),
},
}
if 'council_district' in post.division_id:
maps['map_geojson_districts']['features'].append(feature)
if 'la_metro_sector' in post.division_id:
maps['map_geojson_sectors']['features'].append(feature)
if post.division_id == 'ocd-division/country:us/state:ca/place:los_angeles':
maps['map_geojson_city']['features'].append(feature)
return maps
def get_queryset(self):
board = Organization.objects.get(name=settings.OCD_CITY_COUNCIL_NAME)
memberships = board.memberships.filter(Q(role='Board Member') |
Q(role='Nonvoting Board Member'))\
.filter(end_date_dt__gte=Now())
display_order = {
'Chair': 0,
'Vice Chair': 1,
'1st Chair': 1,
'1st Vice Chair': 1,
'2nd Chair': 2,
'2nd Vice Chair': 2,
'Board Member': 3,
'Nonvoting Board Member': 4,
}
sortable_memberships = []
# Display board leadership first. Person.board_office is null for
# members without leadership roles, so fall back to using their
# board membership role to decide display order.
for m in memberships:
primary_post = m.person.board_office or m
m.index = display_order[primary_post.role]
sortable_memberships.append(m)
return sorted(sortable_memberships, key=lambda x: (
x.index,
x.person.family_name
))
def get_context_data(self, *args, **kwargs):
context = super(CouncilMembersView, self).get_context_data(**kwargs)
context['seo'] = self.get_seo_blob()
board = LAMetroOrganization.objects.get(name=settings.OCD_CITY_COUNCIL_NAME)
context['recent_activity'] = board.actions.order_by('-date', '-bill__identifier', '-order')
context['recent_events'] = board.recent_events
if settings.MAP_CONFIG:
context.update(self.map())
return context
class LAMetroAboutView(AboutView):
template_name = 'lametro/about.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['timestamp'] = datetime.datetime.now(app_timezone).strftime('%m%d%Y%s')
return context
class LACommitteesView(CommitteesView):
template_name = 'lametro/committees.html'
def get_queryset(self):
'''
We only want committees that have at least one member who is not
the CEO. We also want to not count the CEO in the committee
size
'''
ceo = LAMetroPerson.ceo()
memberships = Membership.objects\
.exclude(person=ceo)\
.filter(end_date_dt__gt=Now(),
organization__classification='committee')
qs = LAMetroOrganization.objects\
.filter(classification='committee')\
.filter(memberships__in=memberships)\
.distinct()
qs = qs.prefetch_related(Prefetch('memberships',
memberships,
to_attr='current_members'))
return qs
class LACommitteeDetailView(CommitteeDetailView):
model = LAMetroOrganization
template_name = 'lametro/committee.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
committee = context['committee']
if getattr(settings, 'COMMITTEE_DESCRIPTIONS', None):
description = settings.COMMITTEE_DESCRIPTIONS.get(committee.slug)
context['committee_description'] = description
ceo = LAMetroPerson.ceo()
non_ceos = committee.all_members\
.annotate(index=Case(
When(role='Chair', then=Value(0)),
When(role='Vice Chair', then=Value(1)),
When(role='1st Vice Chair', then=Value(1)),
When(role='2nd Vice Chair', then=Value(2)),
When(role='Member', then=Value(3)),
default=Value(999),
output_field=IntegerField()))\
.exclude(person=ceo)\
.order_by('index', 'person__family_name', 'person__given_name')
context['non_ceos'] = non_ceos
context['ceo'] = ceo
return context
class LAPersonDetailView(PersonDetailView):
template_name = 'lametro/person.html'
model = LAMetroPerson
def dispatch(self, request, *args, **kwargs):
slug = self.kwargs['slug']
try:
person = self.model.objects.get(slug=slug)
except LAMetroPerson.DoesNotExist:
person = None
else:
response = super().dispatch(request, *args, **kwargs)
if not person:
# Grab the first and last name from slug like "john-smith-af5a8ab39aad"
short_slug = '-'.join(slug.split('-')[:-1])
try:
person = self.model.objects.get(slug__startswith=short_slug)
except (LAMetroPerson.DoesNotExist, LAMetroPerson.MultipleObjectsReturned):
# Return a 404 if more than one matching slug, or if there are no matching slugs
response = HttpResponseNotFound()
else:
response = HttpResponsePermanentRedirect(reverse('person', args=[person.slug]))
return response
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
person = context['person']
council_post = person.latest_council_membership.post
try:
context['qualifying_post'] = council_post.acting_label
except AttributeError:
context['qualifying_post'] = None
try:
if council_post.shape:
context['map_geojson'] = serialize('geojson',
[council_post],
geometry_field='shape',
fields=())
else:
context['map_geojson'] = None
except AttributeError:
context['map_geojson'] = None
if person.committee_sponsorships:
context['sponsored_legislation'] = person.committee_sponsorships
else:
context['sponsored_legislation'] = []
context['memberships_list'] = person.current_memberships\
.exclude(organization__name='Board of Directors')\
.annotate(index=Case(
When(role='Chair', then=Value(0)),
When(role='Vice Chair', then=Value(1)),
When(role='1st Vice Chair', then=Value(1)),
When(role='2nd Vice Chair', then=Value(2)),
When(role='Member', then=Value(3)),
default=Value(999),
output_field=IntegerField()))\
.order_by('index')
if person.slug in MEMBER_BIOS:
context['member_bio'] = MEMBER_BIOS[person.slug]
try:
context['website_url'] = person.links.get(note='web_site').url
except PersonLink.DoesNotExist:
pass
return context
class IdentifierBoostSearchQuery(SolrSearchQuery):
def run(self, spelling_query=None, **kwargs):
'''
If the search contains identifiers, boost results with matching
identifiers.
Reference:
https://medium.com/@pablocastelnovo/if-they-match-i-want-them-to-be-always-first-boosting-documents-in-apache-solr-with-the-boost-362abd36476c
'''
identifiers = set(re.findall('\d{4}-\d{4}', self.build_query()))
if identifiers:
kwargs.update({
'defType': 'edismax',
'bq': '+'.join('identifier:"{}"^2.0'.format(i) for i in identifiers),
})
return super().run(spelling_query, **kwargs)
class LAMetroCouncilmaticFacetedSearchView(CouncilmaticFacetedSearchView):
def __init__(self, *args, **kwargs):
kwargs['form_class'] = LAMetroCouncilmaticSearchForm
super(LAMetroCouncilmaticFacetedSearchView, self).__init__(*args, **kwargs)
def extra_context(self):
extra_context = super().extra_context()
extra_context['topic_facets'] = [facet for facet, _ in LAMetroSubject.CLASSIFICATION_CHOICES]
return extra_context
def build_form(self, form_kwargs=None):
if not form_kwargs:
form_kwargs = {}
form_kwargs['selected_facets'] = self.request.GET.getlist("selected_facets")
form_kwargs['search_corpus'] = 'bills' if self.request.GET.get('search-reports') else 'all'
form_kwargs['result_type'] = self.request.GET.get('result_type', 'all')
sqs = SearchQuerySet(
query=IdentifierBoostSearchQuery('default')
).facet('bill_type', sort='index')\
.facet('sponsorships', sort='index')\
.facet('legislative_session', sort='index')\
.facet('inferred_status')\
.facet('topics')\
.facet('lines_and_ways')\
.facet('phase')\
.facet('project')\
.facet('metro_location')\
.facet('geo_admin_location')\
.facet('motion_by')\
.facet('significant_date')\
.facet('plan_program_policy')\
.highlight(**{'hl.fl': 'text,attachment_text'})
data = None
kwargs = {
'load_all': self.load_all,
}
if form_kwargs:
kwargs.update(form_kwargs)
dataDict = {}
if len(self.request.GET):
data = self.request.GET
dataDict = dict(data)
if self.searchqueryset is not None:
kwargs['searchqueryset'] = sqs
if dataDict.get('sort_by'):
for el in dataDict['sort_by']:
if el == 'date':
if dataDict.get('order_by') == ['asc']:
kwargs['searchqueryset'] = sqs.order_by('last_action_date')
else:
kwargs['searchqueryset'] = sqs.order_by('-last_action_date')
if el == 'title':
if dataDict.get('order_by') == ['desc']:
kwargs['searchqueryset'] = sqs.order_by('-sort_name')
else:
kwargs['searchqueryset'] = sqs.order_by('sort_name')
if el == 'relevance':
kwargs['searchqueryset'] = sqs
elif dataDict.get('q'):
kwargs['searchqueryset'] = sqs
else:
kwargs['searchqueryset'] = sqs.order_by('-last_action_date')
return self.form_class(data, **kwargs)
class GoogleView(IndexView):
template_name = 'lametro/google66b34bb6957ad66c.html'
class LAMetroArchiveSearch(TemplateView):
template_name = 'lametro/archive_search.html'
def metro_login(request):
logout(request)
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = form.get_user()
if user is not None:
login(request, user)
return HttpResponseRedirect('/events/')
else:
form = AuthenticationForm()
return render(request, 'lametro/metro_login.html', {'form': form})
def metro_logout(request):
logout(request)
return HttpResponseRedirect('/')
|
the-stack_0_1416 | # encoding: UTF-8
# Copyright 2016 Google.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tensorflowvisu
import math
import mnistdata
print("Tensorflow version " + tf.__version__)
tf.set_random_seed(0)
# neural network with 5 layers
#
# · · · · · · · · · · (input data, flattened pixels) X [batch, 784] # 784 = 28*28
# \x/x\x/x\x/x\x/x\x/ -- fully connected layer (sigmoid+BN) W1 [784, 200] B1[200]
# · · · · · · · · · Y1 [batch, 200]
# \x/x\x/x\x/x\x/ -- fully connected layer (sigmoid+BN) W2 [200, 100] B2[100]
# · · · · · · · Y2 [batch, 100]
# \x/x\x/x\x/ -- fully connected layer (sigmoid+BN) W3 [100, 60] B3[60]
# · · · · · Y3 [batch, 60]
# \x/x\x/ -- fully connected layer (sigmoid+BN) W4 [60, 30] B4[30]
# · · · Y4 [batch, 30]
# \x/ -- fully connected layer (softmax+BN) W5 [30, 10] B5[10]
# · Y5 [batch, 10]
# Download images and labels into mnist.test (10K images+labels) and mnist.train (60K images+labels)
mnist = mnistdata.read_data_sets("data", one_hot=True, reshape=False)
# input X: 28x28 grayscale images, the first dimension (None) will index the images in the mini-batch
X = tf.placeholder(tf.float32, [None, 28, 28, 1])
# correct answers will go here
Y_ = tf.placeholder(tf.float32, [None, 10])
# train/test selector for batch normalisation
tst = tf.placeholder(tf.bool)
# training iteration
iter = tf.placeholder(tf.int32)
# five layers and their number of neurons (tha last layer has 10 softmax neurons)
L = 200
M = 100
N = 60
P = 30
Q = 10
# Weights initialised with small random values between -0.2 and +0.2
# When using RELUs, make sure biases are initialised with small *positive* values for example 0.1 = tf.ones([K])/10
W1 = tf.Variable(tf.truncated_normal([784, L], stddev=0.1)) # 784 = 28 * 28
S1 = tf.Variable(tf.ones([L]))
O1 = tf.Variable(tf.zeros([L]))
W2 = tf.Variable(tf.truncated_normal([L, M], stddev=0.1))
S2 = tf.Variable(tf.ones([M]))
O2 = tf.Variable(tf.zeros([M]))
W3 = tf.Variable(tf.truncated_normal([M, N], stddev=0.1))
S3 = tf.Variable(tf.ones([N]))
O3 = tf.Variable(tf.zeros([N]))
W4 = tf.Variable(tf.truncated_normal([N, P], stddev=0.1))
S4 = tf.Variable(tf.ones([P]))
O4 = tf.Variable(tf.zeros([P]))
W5 = tf.Variable(tf.truncated_normal([P, Q], stddev=0.1))
B5 = tf.Variable(tf.zeros([Q]))
## Batch normalisation conclusions with sigmoid activation function:
# BN is applied between logits and the activation function
# On Sigmoids it is very clear that without BN, the sigmoids saturate, with BN, they output
# a clean gaussian distribution of values, especially with high initial learning rates.
# sigmoid, no batch-norm, lr(0.003, 0.0001, 2000) => 97.5%
# sigmoid, batch-norm lr(0.03, 0.0001, 1000) => 98%
# sigmoid, batch-norm, no offsets => 97.3%
# sigmoid, batch-norm, no scales => 98.1% but cannot hold fast learning rate at start
# sigmoid, batch-norm, no scales, no offsets => 96%
# Both scales and offsets are useful with sigmoids.
# With RELUs, the scale variables can be omitted.
# Biases are not useful with batch norm, offsets are to be used instead
# Steady 98.5% accuracy using these parameters:
# moving average decay: 0.998 (equivalent to averaging over two epochs)
# learning rate decay from 0.03 to 0.0001 speed 1000 => max 98.59 at 6500 iterations, 98.54 at 10K it, 98% at 1300it, 98.5% at 3200it
def batchnorm(Ylogits, Offset, Scale, is_test, iteration):
exp_moving_avg = tf.train.ExponentialMovingAverage(0.998, iteration) # adding the iteration prevents from averaging across non-existing iterations
bnepsilon = 1e-5
mean, variance = tf.nn.moments(Ylogits, [0])
update_moving_averages = exp_moving_avg.apply([mean, variance])
m = tf.cond(is_test, lambda: exp_moving_avg.average(mean), lambda: mean)
v = tf.cond(is_test, lambda: exp_moving_avg.average(variance), lambda: variance)
Ybn = tf.nn.batch_normalization(Ylogits, m, v, Offset, Scale, bnepsilon)
return Ybn, update_moving_averages
def no_batchnorm(Ylogits, Offset, Scale, is_test, iteration):
return Ylogits, tf.no_op()
# The model
XX = tf.reshape(X, [-1, 784])
Y1l = tf.matmul(XX, W1)
Y1bn, update_ema1 = batchnorm(Y1l, O1, S1, tst, iter)
Y1 = tf.nn.sigmoid(Y1bn)
Y2l = tf.matmul(Y1, W2)
Y2bn, update_ema2 = batchnorm(Y2l, O2, S2, tst, iter)
Y2 = tf.nn.sigmoid(Y2bn)
Y3l = tf.matmul(Y2, W3)
Y3bn, update_ema3 = batchnorm(Y3l, O3, S3, tst, iter)
Y3 = tf.nn.sigmoid(Y3bn)
Y4l = tf.matmul(Y3, W4)
Y4bn, update_ema4 = batchnorm(Y4l, O4, S4, tst, iter)
Y4 = tf.nn.sigmoid(Y4bn)
Ylogits = tf.matmul(Y4, W5) + B5
Y = tf.nn.softmax(Ylogits)
update_ema = tf.group(update_ema1, update_ema2, update_ema3, update_ema4)
# cross-entropy loss function (= -sum(Y_i * log(Yi)) ), normalised for batches of 100 images
# TensorFlow provides the softmax_cross_entropy_with_logits function to avoid numerical stability
# problems with log(0) which is NaN
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy)*100
# accuracy of the trained model, between 0 (worst) and 1 (best)
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# matplotlib visualisation
allweights = tf.concat([tf.reshape(W1, [-1]), tf.reshape(W2, [-1]), tf.reshape(W3, [-1]), tf.reshape(W4, [-1]), tf.reshape(W5, [-1])], 0)
allbiases = tf.concat([tf.reshape(O1, [-1]), tf.reshape(O2, [-1]), tf.reshape(O3, [-1]), tf.reshape(O4, [-1]), tf.reshape(B5, [-1])], 0)
# to use for sigmoid
allactivations = tf.concat([tf.reshape(Y1, [-1]), tf.reshape(Y2, [-1]), tf.reshape(Y3, [-1]), tf.reshape(Y4, [-1])], 0)
# to use for RELU
#allactivations = tf.concat([tf.reduce_max(Y1, [0]), tf.reduce_max(Y2, [0]), tf.reduce_max(Y3, [0]), tf.reduce_max(Y4, [0])], 0)
alllogits = tf.concat([tf.reshape(Y1l, [-1]), tf.reshape(Y2l, [-1]), tf.reshape(Y3l, [-1]), tf.reshape(Y4l, [-1])], 0)
I = tensorflowvisu.tf_format_mnist_images(X, Y, Y_)
It = tensorflowvisu.tf_format_mnist_images(X, Y, Y_, 1000, lines=25)
datavis = tensorflowvisu.MnistDataVis(title4="Logits", title5="activations", histogram4colornum=2, histogram5colornum=2)
# training step
# the learning rate is: # 0.0001 + 0.03 * (1/e)^(step/1000)), i.e. exponential decay from 0.03->0.0001
lr = 0.0001 + tf.train.exponential_decay(0.03, iter, 1000, 1/math.e)
train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)
# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# You can call this function in a loop to train the model, 100 images at a time
def training_step(i, update_test_data, update_train_data):
# training on batches of 100 images with 100 labels
batch_X, batch_Y = mnist.train.next_batch(100)
# compute training values for visualisation
if update_train_data:
a, c, im, al, ac, l = sess.run([accuracy, cross_entropy, I, alllogits, allactivations, lr],
feed_dict={X: batch_X, Y_: batch_Y, iter: i, tst: False})
print(str(i) + ": accuracy:" + str(a) + " loss: " + str(c) + " (lr:" + str(l) + ")")
datavis.append_training_curves_data(i, a, c)
datavis.update_image1(im)
datavis.append_data_histograms(i, al, ac)
# compute test values for visualisation
if update_test_data:
a, c, im = sess.run([accuracy, cross_entropy, It], feed_dict={X: mnist.test.images, Y_: mnist.test.labels, tst: True})
print(str(i) + ": ********* epoch " + str(i*100//mnist.train.images.shape[0]+1) + " ********* test accuracy:" + str(a) + " test loss: " + str(c))
datavis.append_test_curves_data(i, a, c)
datavis.update_image2(im)
# the backpropagation training step, also updates exponential moving averages for batch norm
sess.run([train_step, update_ema], feed_dict={X: batch_X, Y_: batch_Y, iter: i, tst: False})
datavis.animate(training_step, iterations=10000+1, train_data_update_freq=20, test_data_update_freq=100, more_tests_at_start=True)
# to save the animation as a movie, add save_movie=True as an argument to datavis.animate
# to disable the visualisation use the following line instead of the datavis.animate line
# for i in range(10000+1): training_step(i, i % 100 == 0, i % 20 == 0)
print("max test accuracy: " + str(datavis.get_max_test_accuracy()))
# Some results to expect:
# (In all runs, if sigmoids are used, all biases are initialised at 0, if RELUs are used,
# all biases are initialised at 0.1 apart from the last one which is initialised at 0.)
## decaying learning rate from 0.003 to 0.0001 decay_speed 2000, 10K iterations
# final test accuracy = 0.9813 (sigmoid - training cross-entropy not stabilised)
# final test accuracy = 0.9842 (relu - training set fully learned, test accuracy stable)
|
the-stack_0_1419 | ############################################################################
# Copyright 2018 Anthony Ma & Stanford University #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
##############################################################################
# Imports
##############################################################################
from vmd import *
from .stratify_hbonds import *
from .stratify_ligand_hbonds import *
__all__ = ['compute_hydrogen_bonds']
##############################################################################
# Globals
##############################################################################
WATER_TO_PROTEIN_DIST = 5
WATER_TO_LIGAND_DIST = 12
##############################################################################
# Functions
##############################################################################
def filter_duplicates(donors, acceptors):
"""
Filter out duplicate donor acceptor atom pairs
"""
pairs = sorted(list(set([(d, acceptors[idx]) for idx, d in enumerate(donors)])))
new_donors, new_acceptors = [], []
for d, a in pairs:
new_donors.append(d)
new_acceptors.append(a)
return new_donors, new_acceptors
def calc_ligand_donor_acceptor_pairs(traj_frag_molid, frame_idx, solvent_resn, sele_id, ligands,
HBOND_CUTOFF_DISTANCE, HBOND_CUTOFF_ANGLE):
"""
Compute donor and acceptor atom pairs for hydrogen bonds in terms of numeric VMD indices
"""
donors, acceptors = [], []
for ligand in ligands:
if sele_id is None:
measure_hbonds_command = "measure hbonds %s %s [atomselect %s \"" \
"(resname %s and within %s of resname %s) or " \
"(not carbon and not sulfur and protein within %s of resname %s) or " \
"(not carbon and not sulfur and resname %s) and (not lipid)\" frame %s]" % \
(HBOND_CUTOFF_DISTANCE, HBOND_CUTOFF_ANGLE, traj_frag_molid, solvent_resn,
WATER_TO_LIGAND_DIST, ligand, WATER_TO_LIGAND_DIST, ligand, ligand, frame_idx)
else:
measure_hbonds_command = "measure hbonds %s %s [atomselect %s \"" \
"(resname %s and within %s of resname %s) or " \
"((not carbon and not sulfur and protein and (%s)) and within %s of resname %s) or " \
"(not carbon and not sulfur and resname %s) and (not lipid)\" frame %s]" % \
(HBOND_CUTOFF_DISTANCE, HBOND_CUTOFF_ANGLE, traj_frag_molid, solvent_resn,
WATER_TO_LIGAND_DIST, ligand, sele_id, WATER_TO_LIGAND_DIST, ligand, ligand,
frame_idx)
# donor_acceptor_indices should be of format "{106 91 85 99 120 130} {91 55 55 69 105 69} {107 92 86 100 121 131}"
donor_acceptor_indices = evaltcl(measure_hbonds_command)
# Parse atom indices
donor_acceptor_lists = donor_acceptor_indices.split("}")
# Filter out improperly parsed coordinates
if len(donor_acceptor_lists) != 4:
continue
donor_list = donor_acceptor_lists[0].split("{")[1].split(" ")
acceptor_list = donor_acceptor_lists[1].split("{")[1].split(" ")
for idx, d in enumerate(donor_list):
a = acceptor_list[idx]
if d == "" or a == "":
continue
donors.append(int(d))
acceptors.append(int(a))
return donors, acceptors
def calc_donor_acceptor_pairs(traj_frag_molid, frame_idx, solvent_resn, sele_id,
HBOND_CUTOFF_DISTANCE, HBOND_CUTOFF_ANGLE):
"""
Compute donor and acceptor atom pairs for hydrogen bonds in terms of numeric VMD indices
"""
# Measure Hbonds command
if sele_id is None:
measure_hbonds_command = "measure hbonds %s %s [atomselect %s \"" \
"(resname %s and within %s of protein) or " \
"protein and not lipid and not carbon and not sulfur\" frame %s]" % \
(HBOND_CUTOFF_DISTANCE, HBOND_CUTOFF_ANGLE, traj_frag_molid, solvent_resn,
WATER_TO_PROTEIN_DIST, frame_idx)
else:
measure_hbonds_command = "measure hbonds %s %s [atomselect %s \"" \
"(resname %s and within %s of (protein and (%s))) or " \
"protein and (%s) and not lipid and not carbon and not sulfur\" frame %s]" % \
(HBOND_CUTOFF_DISTANCE, HBOND_CUTOFF_ANGLE, traj_frag_molid, solvent_resn,
WATER_TO_PROTEIN_DIST, sele_id, sele_id, frame_idx)
# donor_acceptor_indices should be of format "{106 91 85 99 120 130} {91 55 55 69 105 69} {107 92 86 100 121 131}"
donor_acceptor_indices = evaltcl(measure_hbonds_command)
# Parse atom indices
donor_acceptor_lists = donor_acceptor_indices.split("}")
# Filter out improperly parsed coordinates
if len(donor_acceptor_lists) != 4:
return [], []
donor_list = donor_acceptor_lists[0].split("{")[1].split(" ")
acceptor_list = donor_acceptor_lists[1].split("{")[1].split(" ")
donors, acceptors = [], []
for idx, d in enumerate(donor_list):
a = acceptor_list[idx]
if d == "" or a == "":
continue
donors.append(int(d))
acceptors.append(int(a))
return donors, acceptors
def compute_hydrogen_bonds(traj_frag_molid, frame_idx, index_to_label, solvent_resn, sele_id, ligand=None,
HBOND_CUTOFF_DISTANCE=3.5, HBOND_CUTOFF_ANGLE=70):
"""
Compute hydrogen bonds involving protein for a single frame of simulation
Parameters
----------
traj_frag_molid: int
Specifies which trajectory fragment in VMD to perform computations upon
frame_idx: int
Specify frame index with respect to the smaller trajectory fragment
solvent_resn: string, default = TIP3
Denotes the resname of solvent in simulation
sele_id: string, default = None
Compute contacts on subset of atom selection based on VMD query
ligand: list of string
???
HBOND_CUTOFF_DISTANCE: float, default = 3.5 Angstroms
HBOND_CUTOFF_ANGLE: float, default = 70 degrees
Return
------
hbonds: list of tuples, [(frame_idx, atom1_label, atom2_label, itype), ...]
"""
itype = "hb"
if ligand:
itype = "lhb"
donors, acceptors = calc_ligand_donor_acceptor_pairs(traj_frag_molid, frame_idx, solvent_resn, sele_id, ligand,
HBOND_CUTOFF_DISTANCE, HBOND_CUTOFF_ANGLE)
else:
donors, acceptors = calc_donor_acceptor_pairs(traj_frag_molid, frame_idx, solvent_resn, sele_id,
HBOND_CUTOFF_DISTANCE, HBOND_CUTOFF_ANGLE)
donors, acceptors = filter_duplicates(donors, acceptors)
hbonds = []
for idx, donor in enumerate(donors):
acceptor = acceptors[idx]
donor_label, acceptor_label = index_to_label[donor], index_to_label[acceptor]
hbonds.append([frame_idx, donor_label, acceptor_label, itype])
# Perform post processing on hbonds list to stratify into different subtypes
if itype == "hb":
hbond_subtypes = stratify_hbond_subtypes(hbonds, solvent_resn)
elif itype == "lhb":
hbond_subtypes = stratify_ligand_hbond_subtypes(hbonds, solvent_resn, ligand)
return hbond_subtypes
|
the-stack_0_1421 | import random
import numpy as np
import cv2
import lmdb
import torch
import torch.utils.data as data
import data.util as util
class LQGTDataset(data.Dataset):
"""
Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, etc) and GT image pairs.
If only GT images are provided, generate LQ images on-the-fly.
"""
def __init__(self, opt):
super(LQGTDataset, self).__init__()
self.opt = opt
self.data_type = self.opt['data_type']
self.paths_LQ, self.paths_GT = None, None
self.sizes_LQ, self.sizes_GT = None, None
self.LQ_env, self.GT_env = None, None # environments for lmdb
self.paths_GT, self.sizes_GT = util.get_image_paths(self.data_type, opt['dataroot_GT'])
self.paths_LQ, self.sizes_LQ = util.get_image_paths(self.data_type, opt['dataroot_LQ'])
assert self.paths_GT, 'Error: GT path is empty.'
if self.paths_LQ and self.paths_GT:
assert len(self.paths_LQ) == len(
self.paths_GT
), 'GT and LQ datasets have different number of images - {}, {}.'.format(
len(self.paths_LQ), len(self.paths_GT))
self.random_scale_list = [1]
def _init_lmdb(self):
# https://github.com/chainer/chainermn/issues/129
self.GT_env = lmdb.open(self.opt['dataroot_GT'], readonly=True, lock=False, readahead=False,
meminit=False)
if self.paths_LQ is not None:
self.LQ_env = lmdb.open(self.opt['dataroot_LQ'], readonly=True, lock=False, readahead=False,
meminit=False)
def __getitem__(self, index):
if self.data_type == 'lmdb' and (self.GT_env is None or self.LQ_env is None):
self._init_lmdb()
GT_path, LQ_path = None, None
scale = self.opt['scale']
GT_size = self.opt['GT_size']
# get GT image
GT_path = self.paths_GT[index]
resolution = [int(s) for s in self.sizes_GT[index].split('_')
] if self.data_type == 'lmdb' else None
img_GT = util.read_img(self.GT_env, GT_path, resolution)
if self.opt['phase'] != 'train': # modcrop in the validation / test phase
img_GT = util.modcrop(img_GT, scale)
if self.opt['color']: # change color space if necessary
img_GT = util.channel_convert(img_GT.shape[2], self.opt['color'], [img_GT])[0]
# get LQ image
if self.paths_LQ:
LQ_path = self.paths_LQ[index]
resolution = [int(s) for s in self.sizes_LQ[index].split('_')
] if self.data_type == 'lmdb' else None
img_LQ = util.read_img(self.LQ_env, LQ_path, resolution)
else: # down-sampling on-the-fly
# randomly scale during training
if self.opt['phase'] == 'train':
random_scale = random.choice(self.random_scale_list)
H_s, W_s, _ = img_GT.shape
def _mod(n, random_scale, scale, thres):
rlt = int(n * random_scale)
rlt = (rlt // scale) * scale
return thres if rlt < thres else rlt
H_s = _mod(H_s, random_scale, scale, GT_size)
W_s = _mod(W_s, random_scale, scale, GT_size)
img_GT = cv2.resize(img_GT, (W_s, H_s), interpolation=cv2.INTER_LINEAR)
if img_GT.ndim == 2:
img_GT = cv2.cvtColor(img_GT, cv2.COLOR_GRAY2BGR)
H, W, _ = img_GT.shape
# using matlab imresize
img_LQ = util.imresize_np(img_GT, 1 / scale, True)
if img_LQ.ndim == 2:
img_LQ = np.expand_dims(img_LQ, axis=2)
if self.opt['phase'] == 'train':
# if the image size is too small
H, W, _ = img_GT.shape
if H < GT_size or W < GT_size:
img_GT = cv2.resize(img_GT, (GT_size, GT_size), interpolation=cv2.INTER_LINEAR)
# using matlab imresize
img_LQ = util.imresize_np(img_GT, 1 / scale, True)
if img_LQ.ndim == 2:
img_LQ = np.expand_dims(img_LQ, axis=2)
H, W, C = img_GT.shape
LQ_size = GT_size // scale
# randomly crop
rnd_h = random.randint(0, max(0, H//scale - LQ_size))
rnd_w = random.randint(0, max(0, W//scale - LQ_size))
img_LQ = img_LQ[rnd_h:rnd_h + LQ_size, rnd_w:rnd_w + LQ_size, :]
rnd_h_GT, rnd_w_GT = int(rnd_h * scale), int(rnd_w * scale)
img_GT = img_GT[rnd_h_GT:rnd_h_GT + GT_size, rnd_w_GT:rnd_w_GT + GT_size, :]
# augmentation - flip, rotate
img_LQ, img_GT = util.augment([img_LQ, img_GT], self.opt['use_flip'],
self.opt['use_rot'])
if self.opt['color']: # change color space if necessary
img_LQ = util.channel_convert(img_LQ.shape[2], self.opt['color'], [img_LQ])[0]
# BGR to RGB, HWC to CHW, numpy to tensor
if img_GT.shape[2] == 3:
img_GT = img_GT[:, :, [2, 1, 0]]
img_LQ = img_LQ[:, :, [2, 1, 0]]
img_GT = torch.from_numpy(np.ascontiguousarray(np.transpose(img_GT, (2, 0, 1)))).float()
img_LQ = torch.from_numpy(np.ascontiguousarray(np.transpose(img_LQ, (2, 0, 1)))).float()
if LQ_path is None:
LQ_path = GT_path
return {'LQ': img_LQ, 'GT': img_GT, 'LQ_path': LQ_path, 'GT_path': GT_path}
def __len__(self):
return len(self.paths_GT)
|
the-stack_0_1425 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
"""Function to add two numbers together in a linked list notation
Args:
l1 (ListNode): Integer number 1 represented as a linked list
l2 (ListNode): Integer number 2 represented as a linked list
Returns:
ListNode: the resulting sum returned as a linked list
"""
head = ListNode(0)
cur = head
carry = 0
while l1 or l2 or carry:
# Set the value, including handling when l1 or l2 is none where we set to 0
val1 = l1.val if l1 else 0
val2 = l2.val if l2 else 0
# Find the value of the two nodes, and determine if there's a carry for next value
sum_value = val1 + val2 + carry
carry = sum_value // 10
sum_value = sum_value % 10
# Create node and append to the list
node = ListNode(sum_value)
# Move to the next ndoe in each list
l1 = l1.next if l1 else 0
l2 = l2.next if l2 else 0
cur.next = node
cur = node
return head.next
|
the-stack_0_1426 | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin test framework primitive and message strcutures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization."""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70918
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
# NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def ser_uint64(u):
rs = b""
for i in range(2):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress():
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv():
typemap = {
0: "MSG_ERROR",
1: "MSG_TX",
2: "MSG_BLOCK",
3: "MSG_FILTERED_BLOCK",
4: "MSG_TXLOCK_REQUEST",
5: "MSG_TXLOCK_VOTE",
6: "MSG_SPORK",
7: "MSG_MASTERNODE_WINNER",
8: "MSG_MASTERNODE_SCANNING_ERROR",
9: "MSG_BUDGET_VOTE",
10: "MSG_BUDGET_PROPOSAL",
11: "MSG_BUDGET_FINALIZED",
12: "MSG_BUDGET_FINALIZED_VOTE",
13: "MSG_MASTERNODE_QUORUM",
14: "MSG_MASTERNODE_QUORUM",
15: "MSG_MASTERNODE_ANNOUNCE",
16: "MSG_MASTERNODE_PING",
17: "MSG_DSTX",
18: "MSG_PUBCOINS",
19: "MSG_GENWIT",
20: "MSG_ACC_VALUE"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator():
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint():
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def serialize_uniqueness(self):
r = b""
r += struct.pack("<I", self.n)
r += ser_uint256(self.hash)
return r
def deserialize_uniqueness(self, f):
self.n = struct.unpack("<I", f.read(4))[0]
self.hash = deser_uint256(f)
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
NullOutPoint = COutPoint(0, 0xffffffff)
class CTxIn():
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
def is_zerocoinspend(self):
return bytes_to_hex_str(self.scriptSig)[:2] == "c2"
class CTxOut():
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CTransaction():
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def is_coinbase(self):
return (
len(self.vin) == 1 and
self.vin[0].prevout == NullOutPoint and
(not self.vin[0].is_zerocoinspend())
)
def is_coinstake(self):
return (
len(self.vin) == 1 and
len(self.vout) >= 2 and
self.vout[0] == CTxOut()
)
def from_hex(self, hexstring):
f = BytesIO(hex_str_to_bytes(hexstring))
self.deserialize(f)
def spends(self, outpoint):
return len([x for x in self.vin if
x.prevout.hash == outpoint.hash and x.prevout.n == outpoint.n]) > 0
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader():
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.nAccumulatorCheckpoint = header.nAccumulatorCheckpoint
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 4
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.nAccumulatorCheckpoint = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.nAccumulatorCheckpoint = deser_uint256(f)
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
# oasis
def solve_stake(self, stakeInputs, prevModifier):
target0 = uint256_from_compact(self.nBits)
loop = True
while loop:
for uniqueness in stakeInputs:
nvalue, _, prevTime = stakeInputs[uniqueness]
target = int(target0 * nvalue / 100) % 2**256
data = b""
# always modifier V2 (256 bits) on regtest
data += ser_uint256(prevModifier)
data += struct.pack("<I", prevTime)
# prevout is CStake uniqueness
data += uniqueness
data += struct.pack("<I", self.nTime)
posHash = uint256_from_str(hash256(data))
if posHash <= target:
self.prevoutStake = uniqueness
loop = False
break
if loop:
self.nTime += 1
return True
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
self.sig_key = None # not serialized / used only to re_sign
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
self.sig_key = None
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
if hasattr(self, 'vchBlockSig'):
r += ser_string(self.vchBlockSig)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def sign_block(self, key, low_s=True):
data = b""
data += struct.pack("<i", self.nVersion)
data += ser_uint256(self.hashPrevBlock)
data += ser_uint256(self.hashMerkleRoot)
data += struct.pack("<I", self.nTime)
data += struct.pack("<I", self.nBits)
data += struct.pack("<I", self.nNonce)
data += ser_uint256(self.nAccumulatorCheckpoint)
sha256NoSig = hash256(data)
self.vchBlockSig = key.sign(sha256NoSig, low_s=low_s)
self.sig_key = key
self.low_s = low_s
def re_sign_block(self):
if self.sig_key == None:
raise Exception("Unable to re-sign block. Key Not present, use 'sign_block' first.")
return self.sign_block(self.sig_key, self.low_s)
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction():
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs():
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs():
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest():
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions():
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree():
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
self.fBad = False
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock():
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version():
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack():
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr():
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv():
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata():
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks():
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx():
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block():
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize(with_witness=False)
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic():
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr():
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping():
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong():
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool():
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders():
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders():
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers():
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject():
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter():
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct():
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock():
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn():
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn():
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=False)
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
|
the-stack_0_1428 | #!/usr/bin/env python
# coding: utf-8
"""
pokepy.api
User interaction with this package is done through this file.
"""
import functools
import os
import sys
import types
from collections import namedtuple
import appdirs # dependency of FileCache
from beckett.clients import BaseClient
from beckett.constants import DEFAULT_VALID_STATUS_CODES
from .fcache.cache import FileCache
from . import resources_v2 as rv2
from . import __version__
class V2Client(BaseClient):
"""Pokéapi client"""
class Meta(BaseClient.Meta):
name = 'pokepy-v2-client-' + __version__
base_url = 'https://pokeapi.co/api/v2'
resources = (
rv2.BerryResource,
rv2.BerryFirmnessResource,
rv2.BerryFlavorResource,
rv2.ContestTypeResource,
rv2.ContestEffectResource,
rv2.SuperContestEffectResource,
rv2.EncounterMethodResource,
rv2.EncounterConditionResource,
rv2.EncounterConditionValueResource,
rv2.EvolutionChainResource,
rv2.EvolutionTriggerResource,
rv2.GenerationResource,
rv2.PokedexResource,
rv2.VersionResource,
rv2.VersionGroupResource,
rv2.ItemResource,
rv2.ItemAttributeResource,
rv2.ItemCategoryResource,
rv2.ItemFlingEffectResource,
rv2.ItemPocketResource,
rv2.MachineResource,
rv2.MoveResource,
rv2.MoveAilmentResource,
rv2.MoveBattleStyleResource,
rv2.MoveCategoryResource,
rv2.MoveDamageClassResource,
rv2.MoveLearnMethodResource,
rv2.MoveTargetResource,
rv2.LocationResource,
rv2.LocationAreaResource,
rv2.PalParkAreaResource,
rv2.RegionResource,
rv2.AbilityResource,
rv2.CharacteristicResource,
rv2.EggGroupResource,
rv2.GenderResource,
rv2.GrowthRateResource,
rv2.NatureResource,
rv2.PokeathlonStatResource,
rv2.PokemonResource,
rv2.PokemonColorResource,
rv2.PokemonFormResource,
rv2.PokemonHabitatResource,
rv2.PokemonShapeResource,
rv2.PokemonSpeciesResource,
rv2.StatResource,
rv2.TypeResource,
rv2.LanguageResource
)
def __init__(self, cache=None, cache_location=None, *args, **kwargs):
"""
Parameters
----------
cache: str
cache can be 'in_memory' or 'in_disk',
for memory-based or disk-based cache, respectively.
Optional.
cache_location: str
cache directory, for disk-based cache.
Optional.
"""
if cache is None: # empty wrapping function
def no_cache(func):
@functools.wraps(func)
def inner(*args, **kwargs):
return func(*args, **kwargs)
return inner
cache_function = no_cache
else:
if cache in ['in_memory', 'in_disk']:
cache_function = self._caching(cache.split('in_')[1], cache_location)
self.cache_type = cache
def cache_info_total(self):
return self._cache_info_(self._cache_hits_global,
self._cache_misses_global,
self._cache_len_global)
def cache_clear_total(self):
for get_method_name in self._all_get_methods_names:
getattr(self, get_method_name).cache_clear()
def cache_location_absolute(self):
return self._cache_location_global
# global cache related methods
self.cache_info = types.MethodType(cache_info_total, self)
self.cache_clear = types.MethodType(cache_clear_total, self)
self.cache_location = types.MethodType(cache_location_absolute, self)
self._cache_hits_global = 0
self._cache_misses_global = 0
self._cache_len_global = 0
self._cache_location_global = ''
self._cache_info_ = namedtuple('CacheInfo', ['hits', 'misses', 'size'])
else: # wrong cache parameter
raise ValueError('Accepted values for cache are "in_memory" or "in_disk"')
self._cache = cache_function
self._all_get_methods_names = []
super(V2Client, self).__init__(*args, **kwargs)
def _assign_method(self, resource_class, method_type):
"""
Exactly the same code as the original except:
- uid is now first parameter (after self). Therefore, no need to explicitly call 'uid='
- Ignored the other http methods besides GET (as they are not needed for the pokeapi.co API)
- Added cache wrapping function
- Added a way to list all get methods
- Added a filter for single element lists (extract element into a standalone object)
"""
method_name = resource_class.get_method_name(
resource_class, method_type)
valid_status_codes = getattr(
resource_class.Meta,
'valid_status_codes',
DEFAULT_VALID_STATUS_CODES
)
def extract_single_element_list(func):
@functools.wraps(func)
def inner(*args, **kwargs):
final = func(*args, **kwargs)
if len(final) == 1:
final = final[0]
return final
return inner
# uid is now the first argument (after self)
@self._cache
@extract_single_element_list
def get(self, uid=None, method_type=method_type,
method_name=method_name,
valid_status_codes=valid_status_codes,
resource=resource_class, data=None, **kwargs):
uid = uid.lower() if isinstance(uid, str) else uid
return self.call_api(
method_type, method_name,
valid_status_codes, resource,
data, uid=uid, **kwargs)
# only GET method is used
setattr(
self, method_name,
types.MethodType(get, self)
)
# for easier listing of get methods
self._all_get_methods_names.append(method_name)
def _caching(self, disk_or_memory, cache_directory=None):
"""
Decorator that allows caching the outputs of the BaseClient get methods.
Cache can be either disk- or memory-based.
Disk-based cache is reloaded automatically between runs if the same
cache directory is specified.
Cache is kept per each unique uid.
ex:
>> client.get_pokemon(1) -> output gets cached
>> client.get_pokemon(uid=1) -> output already cached
>> client.get_pokemon(2) -> output gets cached
Parameters
----------
disk_or_memory: str
Specify if the cache is disk- or memory-based. Accepts 'disk' or 'memory'.
cache_directory: str
Specify the directory for the disk-based cache.
Optional, will chose an appropriate and platform-specific directory if not specified.
Ignored if memory-based cache is selected.
"""
if disk_or_memory not in ('disk', 'memory'):
raise ValueError('Accepted values are "disk" or "memory"')
# Because of how BaseClient get methods are generated, they don't get a proper __name__.
# As such, it is hard to generate a specific cache directory name for each get method.
# Therefore, I decided to just generate a number for each folder, starting at zero.
# The same get methods get the same number every time because their order doesn't change.
# Also, variable is incremented inside a list because nonlocals are only python 3.0 and up.
get_methods_id = [0]
def memoize(func):
_global_cache_dir = ''
if disk_or_memory == 'disk':
if cache_directory:
# Python 2 and 3.4 workaround
if (sys.version_info[0] == 2 and not
isinstance(cache_directory, (str, unicode))) or (
sys.version_info[0:2] == (3, 4) and not
isinstance(cache_directory, str)):
raise TypeError('expected str, not %s' % cache_directory.__class__.__name__)
_global_cache_dir = os.path.join(cache_directory, 'pokepy_cache')
cache_dir = os.path.join(_global_cache_dir, str(get_methods_id[0]))
else:
_global_cache_dir = appdirs.user_cache_dir('pokepy_cache', False,
opinion=False)
cache_dir = os.path.join(_global_cache_dir, str(get_methods_id[0]))
cache = FileCache('pokepy', flag='cs', app_cache_dir=cache_dir)
get_methods_id[0] += 1
else: # 'memory'
cache = {}
_global_cache_dir = 'ram'
# global cache directory
# should only be set when setting the first get method
if not self._cache_location_global:
self._cache_location_global = _global_cache_dir
hits = [0]
misses = [0]
def cache_info():
return self._cache_info_(hits[0], misses[0], len(cache))
def cache_clear():
# global cache info
self._cache_hits_global -= hits[0]
self._cache_misses_global -= misses[0]
self._cache_len_global -= len(cache)
# local cache info
hits[0] = 0
misses[0] = 0
cache.clear() # for disk-based cache, files are deleted but not the directories
if disk_or_memory == 'disk':
cache.create() # recreate cache file handles
def cache_location():
return 'ram' if disk_or_memory == 'memory' else cache.cache_dir
@functools.wraps(func)
def memoizer(*args, **kwargs):
# arguments to the get methods can be a value or uid=value
key = str(args[1]) if len(args) > 1 else str(kwargs.get("uid"))
if key not in cache:
# local and global cache info
misses[0] += 1
self._cache_misses_global += 1
cache[key] = func(*args, **kwargs)
self._cache_len_global += 1
else:
self._cache_hits_global += 1 # global cache info
hits[0] += 1 # local cache info
return cache[key]
memoizer.cache_info = cache_info
memoizer.cache_clear = cache_clear
memoizer.cache_location = cache_location
return memoizer
return memoize
|
the-stack_0_1431 | from dataset import *
from visualize import *
from forward import *
# utils
from libs.utils import _init_fn
from libs.load_model import *
def prepare_dataloader(cfg, dict_DB):
# train dataloader
if cfg.run_mode == 'train':
dataset = Train_Dataset_SLNet(cfg)
trainloader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=cfg.batch_size['img'],
shuffle=True,
num_workers=cfg.num_workers,
worker_init_fn=_init_fn)
dict_DB['trainloader'] = trainloader
val_dataset = Train_Dataset_SLNet(cfg, mode='val')
valloader = torch.utils.data.DataLoader(dataset=val_dataset,
batch_size=cfg.batch_size['img'],
shuffle=True,
num_workers=cfg.num_workers,
worker_init_fn=_init_fn)
dict_DB['valloader'] = valloader
# test dataloader
if cfg.dataset == 'SEL':
dataset = SEL_Test_Dataset(cfg)
testloader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=cfg.batch_test_size['img'],
shuffle=False,
num_workers=cfg.num_workers,
worker_init_fn=_init_fn)
else:
dataset = SEL_Hard_Test_Dataset(cfg)
testloader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=cfg.batch_size['img'],
shuffle=False,
num_workers=cfg.num_workers,
worker_init_fn=_init_fn)
dict_DB['testloader'] = testloader
return dict_DB
def prepare_model(cfg, dict_DB):
if 'test' in cfg.run_mode:
dict_DB = load_SLNet_for_test(cfg, dict_DB)
if 'train' in cfg.run_mode:
dict_DB = load_SLNet_for_train(cfg, dict_DB)
dict_DB['forward_model'] = Forward_Model(cfg=cfg)
return dict_DB
def prepare_visualization(cfg, dict_DB):
dict_DB['visualize'] = Visualize_plt(cfg=cfg)
return dict_DB
def prepare_training(cfg, dict_DB):
logfile = cfg.output_dir + 'train/log/logfile.txt'
mkdir(path=cfg.output_dir + 'train/log/')
if cfg.run_mode == 'train' and cfg.resume == True:
rmfile(path=logfile)
val_result = {'AUC_A': 0, 'AUC_P': 0,
'AUC_R_upper_P_0.80': 0,
'AUC_R_upper_P_0.82': 0}
dict_DB['val_result'] = val_result
dict_DB['epoch'] = 0
dict_DB['logfile'] = logfile
return dict_DB
|
the-stack_0_1435 | # Practical Machine learning
# Bayesian learning - Naive Bayes example
# Chapter 9
from datatypes import Dataset
from classifier import naive_bayes, svm, naive_bayes_custom, knn
from feature_selection import univariate_feature_selection, lda, pca
from sklearn.cross_validation import train_test_split
from numpy import mean, var, sum, diag, shape
def load_spam_ds():
"""
Loads the data from file and build the dataset in scikit format.
() -> Dataset
"""
data = []
target = []
i = 0
with open("data/spambase.data", "r") as f:
for line in f:
# Removes \r\n from line
line = line.replace("\n","").replace("\r","")
items = line.split(",")
features = [float(item) for item in items[:-1]]
spam_class = int(items[-1])
data.append(features)
target.append(spam_class)
return Dataset(data, target)
def split_train_test(ds):
"""
Given the dataset, split in two datasets:
One is the Training set. Other is the Test set.
The proportion is 80% to 20% Respectively
Dataset -> Dataset, Dataset
"""
samples_train, samples_test, classes_train, classes_test = train_test_split(ds.data, ds.target, test_size=0.2)
training_set = Dataset(samples_train, classes_train)
test_set = Dataset(samples_test, classes_test)
return training_set, test_set
def run(n=0, dimension_reduction=univariate_feature_selection, learning=naive_bayes_custom):
"""
Starts the classification Pipeline
"""
ds = load_spam_ds()
if n > 0 and n < len(ds.data):
ds = dimension_reduction(ds, n)
evaluate(ds, learning)
def evaluate(ds, classifier_class, iterations=10):
'''
Train a given classifier n times
and prints his confusion matrix and the accuracy of the classifier
with a margin of error (by Chebychev Inequation)
'''
results = []
for i in range(iterations):
training_set, test_set = split_train_test(ds)
classifier = classifier_class(training_set)
cm = 1.0 * classifier.classify(test_set) / len(test_set.data)
results += [cm]
cm_mean = mean(results, axis=0)
cm_variance = var(results, axis=0)
print ("Accuracy of", sum(diag(cm_mean))*100, "% (+-", iterations * sum(diag(cm_variance)), ") with", (1 - 1.0/(iterations*iterations)), "of certain." )
print ("\nConfusion Matrix:\n",cm_mean,"\n")
if __name__ == "__main__":
algo=[naive_bayes_custom, naive_bayes, knn, svm]
feature=[univariate_feature_selection, pca, lda]
num=[1,10,0]
for n in num:
for f in feature:
if (n==0):
print("\nUsing all features")
else:
print("\nUsing",n,"feature(s) (", f.__name__, ")" )
print("=======================================================\n")
for a in algo:
print("* Learning Algorithm:", a.__name__)
run(n, f, a)
if (n==0):
break
|
the-stack_0_1437 | #!/usr/bin/env python
import sys
import os
import rospy
import rospkg
from threading import Thread
from python_qt_binding import loadUi
from python_qt_binding import QtGui
from python_qt_binding.QtGui import QWidget
from trainergui import Ui_MainWindow
from inmoov_msgs.msg import MotorStatus
from inmoov_msgs.msg import MotorCommand
from inmoov_msgs.srv import MotorParameter
from sensor_msgs.msg import JointState
from std_msgs.msg import Header
# https://github.com/ColinDuquesnoy/QDarkStyleSheet
import qdarkstyle
# https://nikolak.com/pyqt-qt-designer-getting-started/
class ExampleApp(QtGui.QMainWindow, Ui_MainWindow):
def __init__(self):
# Explaining super is out of the scope of this article
# So please google it if you're not familar with it
# Simple reason why we use it here is that it allows us to
# access variables, methods etc in the design.py file
super(self.__class__, self).__init__()
self.setupUi(self) # This is defined in design.py file automatically
# It sets up layout and widgets that are defined
self.parameterTopic = ["servobus/torso/motorparameter","servobus/leftarm/motorparameter","servobus/rightarm/motorparameter"]
self.motorcommand = MotorCommand()
self.jointcommand = JointState()
self.jointNames = []
for servo in range (0, 11):
self.jointNames.append( rospy.get_param('servobus/torso/servomap/'+str(servo)+'/name'))
for servo in range (0, 11):
self.jointNames.append( rospy.get_param('servobus/leftarm/servomap/'+str(servo)+'/name'))
for servo in range (0, 11):
self.jointNames.append( rospy.get_param('servobus/rightarm/servomap/'+str(servo)+'/name'))
#print(self.jointNames)
#'right_pinky','right_ring','right_middle','right_index','right_thumb',
#'right_hand','right_bicep','right_bicep_rotate','right_shoulder_side','right_shoulder_up','','',
#'eye_leftright','eyes_updown','jaw','head_leftright','head_updown','head_tilt','waist_lean','waist_rotate','','','','',
#'left_pinky','left_ring','left_middle','left_index','left_thumb',
#'left_hand','left_bicep','left_bicep_rotate','left_shoulder_side','left_shoulder_up','','',
self.setupDropDowns()
self.cmbBus.currentIndexChanged.connect(self.busChanged)
self.cmbServo.currentIndexChanged.connect(self.servoChanged)
self.txtGoal.editingFinished.connect(self.setGoal)
self.txtMinPulse.editingFinished.connect(self.setMinPulse)
self.txtMaxPulse.editingFinished.connect(self.setMaxPulse)
self.txtMinGoal.editingFinished.connect(self.setMinGoal)
self.txtMaxGoal.editingFinished.connect(self.setMaxGoal)
self.txtMinSensor.editingFinished.connect(self.setMinSensor)
self.txtMaxSensor.editingFinished.connect(self.setMaxSensor)
self.chkEnabled.stateChanged.connect(self.setEnabled)
self.chkCalibrated.stateChanged.connect(self.setCalibrated)
self.sliderGoal.valueChanged.connect(self.sliderChanged)
rospy.init_node('trainer', anonymous=True)
self.commandPublisher = []
self.commandPublisher.append(rospy.Publisher("servobus/torso/motorcommand", MotorCommand, queue_size=10))
self.commandPublisher.append(rospy.Publisher("servobus/leftarm/motorcommand", MotorCommand, queue_size=10))
self.commandPublisher.append(rospy.Publisher("servobus/rightarm/motorcommand", MotorCommand, queue_size=10))
self.statusSubscriber = []
self.statusSubscriber.append(rospy.Subscriber("servobus/torso/motorstatus", MotorStatus, self.callback0))
self.statusSubscriber.append(rospy.Subscriber("servobus/leftarm/motorstatus", MotorStatus, self.callback1))
self.statusSubscriber.append(rospy.Subscriber("servobus/rightarm/motorstatus", MotorStatus, self.callback2))
self.jointPublisher = rospy.Publisher("joint_command", JointState, queue_size=10)
self.bus = 0
self.servo = 0
self.busChanged()
self.servoChanged()
def busChanged(self):
# unregister topics and reregister to the new ones
self.bus = self.cmbBus.currentIndex()
self.cmbServo.clear()
for s in range(0, 11):
self.cmbServo.addItem(self.jointNames[(self.bus * 11) + s])
#self.commandPublisher.unregister()
#self.commandPublisher = rospy.Publisher(self.commandTopic[bus], MotorCommand, queue_size=10)
#self.statusSubscriber.unregister()
#self.statusSubscriber = rospy.Subscriber(self.statusTopic[self.bus], MotorStatus, self.callback)
self.servoChanged()
def servoChanged(self):
if self.cmbServo.count() > 0:
self.servo = self.cmbServo.currentIndex()
self.getMinPulse()
self.getMaxPulse()
self.getMinGoal()
self.getMaxGoal()
self.getGoal()
self.getMinSensor()
self.getMaxSensor()
self.getEnabled()
self.getCalibrated()
def callback0(self, data):
if data.id == self.servo and self.bus == 0:
#print data.posraw
#self.chkEnabled.setChecked(bool(data.enabled))
self.txtPosition.setText(str(data.position))
self.txtSpeed.setText(str(data.presentspeed))
self.txtSensorRaw.setText(str(data.posraw))
self.chkMoving.setChecked(bool(data.moving))
self.chkPower.setChecked(bool(data.power))
#self.txtGoal.setText(str(data.goal))
def callback1(self, data):
if data.id == self.servo and self.bus == 1:
#print data.posraw
#self.chkEnabled.setChecked(bool(data.enabled))
self.txtPosition.setText(str(data.position))
self.txtSpeed.setText(str(data.presentspeed))
self.txtSensorRaw.setText(str(data.posraw))
self.chkMoving.setChecked(bool(data.moving))
self.chkPower.setChecked(bool(data.power))
#self.txtGoal.setText(str(data.goal))
def callback2(self, data):
if data.id == self.servo and self.bus == 2:
#print data.posraw
#self.chkEnabled.setChecked(bool(data.enabled))
self.txtPosition.setText(str(data.position))
self.txtSpeed.setText(str(data.presentspeed))
self.txtSensorRaw.setText(str(data.posraw))
self.chkMoving.setChecked(bool(data.moving))
self.chkPower.setChecked(bool(data.power))
#self.txtGoal.setText(str(data.goal))
def degreestoradians(self, d):
return d*(3.1415926/180.0)
def setupDropDowns(self):
self.cmbBus.addItem(rospy.get_param('/servobus/torso/name'))
self.cmbBus.addItem(rospy.get_param('/servobus/leftarm/name'))
self.cmbBus.addItem(rospy.get_param('/servobus/rightarm/name'))
for servo in range (0, 11):
print('/servobus/torso/servomap/' + str(servo) + '/name')
self.cmbServo.addItem(rospy.get_param('/servobus/torso/servomap/' + str(servo) + '/name'))
#self.cmbServo.addItem('Servo 00')
#self.cmbServo.addItem('Servo 01')
#self.cmbServo.addItem('Servo 02')
#self.cmbServo.addItem('Servo 03')
#self.cmbServo.addItem('Servo 04')
#self.cmbServo.addItem('Servo 05')
#self.cmbServo.addItem('Servo 06')
#self.cmbServo.addItem('Servo 07')
#self.cmbServo.addItem('Servo 08')
#self.cmbServo.addItem('Servo 09')
#self.cmbServo.addItem('Servo 10')
#self.cmbServo.addItem('Servo 11')
self.cmbSmoothing.addItem('0 - Instant')
self.cmbSmoothing.addItem('1 - Max Speed')
self.cmbSmoothing.addItem('2 - Linear Ramp')
self.cmbSmoothing.addItem('3 - COS Ramp')
self.cmbSmoothing.addItem('4 - COS^2 Ramp')
def sliderChanged(self, i):
self.txtGoal.setText(str(i/1000.0))
self.setGoal()
def setGoal(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0x1E
self.motorcommand.value = float(self.txtGoal.text())
#print(self.motorcommand.value)
self.commandPublisher[self.bus].publish(self.motorcommand)
self.jointcommand.header = Header()
self.jointcommand.header.stamp = rospy.Time.now()
self.jointcommand.name = [self.jointNames[((self.bus * 12) + self.servo)]]
self.jointcommand.position = [self.degreestoradians(float(self.txtGoal.text()))]
self.jointcommand.velocity = []
self.jointcommand.effort = []
self.jointPublisher.publish(self.jointcommand)
def getGoal(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
value = motorparameter(self.cmbServo.currentIndex(), 0x1E).data
self.txtGoal.setText(str(value))
self.sliderGoal.setValue(int(value * 1000.0))
def setMinPulse(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0x14
self.motorcommand.value = float(self.txtMinPulse.text())
self.commandPublisher[self.bus].publish(self.motorcommand)
def getMinPulse(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
self.txtMinPulse.setText(str(motorparameter(self.cmbServo.currentIndex(), 0x14).data))
def setMaxPulse(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0x16
self.motorcommand.value = float(self.txtMaxPulse.text())
self.commandPublisher[self.bus].publish(self.motorcommand)
def getMaxPulse(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
self.txtMaxPulse.setText(str(motorparameter(self.cmbServo.currentIndex(), 0x16).data))
def setMinGoal(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0x06
self.motorcommand.value = float(self.txtMinGoal.text())
self.sliderGoal.setMinimum(int(self.motorcommand.value * 1000.0))
self.commandPublisher[self.bus].publish(self.motorcommand)
def getMinGoal(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
value = motorparameter(self.cmbServo.currentIndex(), 0x06).data
self.txtMinGoal.setText(str(value))
self.sliderGoal.setMinimum(int(value * 1000.0))
def setMaxGoal(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0x08
self.motorcommand.value = float(self.txtMaxGoal.text())
self.sliderGoal.setMaximum(int(self.motorcommand.value * 1000.0))
self.commandPublisher[self.bus].publish(self.motorcommand)
def getMaxGoal(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
value = motorparameter(self.cmbServo.currentIndex(), 0x08).data
self.txtMaxGoal.setText(str(value))
self.sliderGoal.setMaximum(int(value * 1000.0))
def setMinSensor(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0xA2
self.motorcommand.value = float(self.txtMinSensor.text())
self.commandPublisher[self.bus].publish(self.motorcommand)
def getMinSensor(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
self.txtMinSensor.setText(str(motorparameter(self.cmbServo.currentIndex(), 0xA2).data))
def setMaxSensor(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0xA4
self.motorcommand.value = float(self.txtMaxSensor.text())
self.commandPublisher[self.bus].publish(self.motorcommand)
def getMaxSensor(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
self.txtMaxSensor.setText(str(motorparameter(self.cmbServo.currentIndex(), 0xA4).data))
def setEnabled(self):
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0x18
self.motorcommand.value = float(self.chkEnabled.isChecked())
self.commandPublisher[self.bus].publish(self.motorcommand)
def getEnabled(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
self.chkEnabled.setChecked(bool(motorparameter(self.cmbServo.currentIndex(), 0x18).data))
def setCalibrated(self):
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0xA0
self.motorcommand.value = float(self.chkCalibrated.isChecked())
self.commandPublisher[self.bus].publish(self.motorcommand)
def getCalibrated(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
self.chkCalibrated.setChecked(bool(motorparameter(self.cmbServo.currentIndex(), 0xA0).data))
def main():
app = QtGui.QApplication(sys.argv) # A new instance of QApplication
app.setStyleSheet(qdarkstyle.load_stylesheet(pyside=False))
form = ExampleApp() # We set the form to be our ExampleApp (design)
form.show() # Show the form
app.exec_() # and execute the app
if __name__ == '__main__': # if we're running file directly and not importing it
main()
|
the-stack_0_1440 | import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
#blur the frame to get rid of noise. the kernel should be ODD
#frame = cv2.GaussianBlur(frame,(7,7),0)
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#apply the background subtraction
fgmask = fgbg.apply(frame)
kernel = np.ones((3,3),np.uint8)
kernel_lg = np.ones((7,7),np.uint8)
#erosion followed by dilation is called an opening
#mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
#erode the mask to get rid of noise
fgmask = cv2.erode(fgmask,kernel,iterations = 1)
#dialate it back to regain some lost area
fgmask = cv2.dilate(fgmask,kernel_lg,iterations = 1)
# Display the resulting frame
cv2.imshow('frame',gray)
cv2.imshow('vanish',fgmask)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() |
the-stack_0_1441 | import re
import os
from subprocess import call
from clint.textui import colored, puts, puts_err, indent, progress
def command_out(message):
"""
Used to print shell commands to stderr
"""
puts_err(colored.green(message))
def message(message, n_indent = 4, color = "blue"):
with indent(n_indent):
if color == "blue":
puts_err(colored.blue('\n' + message + '\n'))
elif color == "red":
puts_err(colored.red('\n' + message + '\n'))
def boolify(s):
if s == 'True':
return True
if s == 'False':
return False
raise ValueError("huh?")
def autoconvert(s):
for fn in (boolify, int, float):
try:
return fn(s)
except ValueError:
pass
return s
def parse_region(region):
return re.split("[:-]+", region)
# Stack Overflow: 377017
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def check_program_exists(program):
if which(program) is None:
exit(puts_err(colored.red("\n\t" + program + " not installed or on PATH.\n")))
def run_command(comm, shell = True):
"""
Runs a shell command
"""
sh_out = ' '.join(comm) if type(comm) == list else comm
command_out(sh_out)
out = call(comm, shell = shell)
if out != 0:
raise Exception(f"Error [{out}] running {sh_out}")
return out
# Levenshtein edit distnace
# https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python
def lev(s1, s2):
if len(s1) < len(s2):
return lev(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = list(range(len(s2) + 1))
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
|
the-stack_0_1443 | """ Utility functions for photoscript """
import fnmatch
import os
import re
import subprocess
def ditto(src, dest, norsrc=False):
""" Copies a file or directory tree from src path to dest path
src: source path as string
dest: destination path as string
norsrc: (bool) if True, uses --norsrc flag with ditto so it will not copy
resource fork or extended attributes. May be useful on volumes that
don't work with extended attributes (likely only certain SMB mounts)
default is False
Uses ditto to perform copy; will silently overwrite dest if it exists
Raises exception if copy fails or either path is None """
if src is None or dest is None:
raise ValueError("src and dest must not be None", src, dest)
if norsrc:
command = ["/usr/bin/ditto", "--norsrc", src, dest]
else:
command = ["/usr/bin/ditto", src, dest]
# if error on copy, subprocess will raise CalledProcessError
result = subprocess.run(command, check=True, stderr=subprocess.PIPE)
return result.returncode
def findfiles(pattern, path_):
"""Returns list of filenames from path_ matched by pattern
shell pattern. Matching is case-insensitive.
If 'path_' is invalid/doesn't exist, returns []."""
if not os.path.isdir(path_):
return []
# See: https://gist.github.com/techtonik/5694830
rule = re.compile(fnmatch.translate(pattern), re.IGNORECASE)
return [name for name in os.listdir(path_) if rule.match(name)]
|
the-stack_0_1444 | # Copyright (c) 2018-2020, NVIDIA CORPORATION.
import pickle
import warnings
from numbers import Number
from types import SimpleNamespace
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
from numba import cuda, njit
import cudf
from cudf import _lib as libcudf
from cudf._lib.column import Column
from cudf._lib.null_mask import (
MaskState,
bitmask_allocation_size_bytes,
create_null_mask,
)
from cudf._lib.scalar import as_device_scalar
from cudf._lib.stream_compaction import distinct_count as cpp_distinct_count
from cudf._lib.transform import bools_to_mask
from cudf.core.abc import Serializable
from cudf.core.buffer import Buffer
from cudf.core.dtypes import CategoricalDtype
from cudf.utils import ioutils, utils
from cudf.utils.dtypes import (
NUMERIC_TYPES,
check_cast_unsupported_dtype,
cudf_dtypes_to_pandas_dtypes,
get_time_unit,
is_categorical_dtype,
is_list_dtype,
is_numerical_dtype,
is_scalar,
is_string_dtype,
is_struct_dtype,
min_signed_type,
min_unsigned_type,
np_to_pa_dtype,
)
from cudf.utils.utils import mask_dtype
class ColumnBase(Column, Serializable):
def __init__(
self,
data,
size,
dtype,
mask=None,
offset=0,
null_count=None,
children=(),
):
"""
Parameters
----------
data : Buffer
dtype
The type associated with the data Buffer
mask : Buffer, optional
children : tuple, optional
"""
super().__init__(
data,
size=size,
dtype=dtype,
mask=mask,
offset=offset,
children=children,
)
def as_frame(self):
"""
Converts a Column to Frame
"""
return cudf.core.frame.Frame({None: self.copy(deep=False)})
@property
def data_array_view(self):
"""
View the data as a device array object
"""
if self.dtype == "object":
raise ValueError("Cannot get an array view of a StringColumn")
if is_categorical_dtype(self.dtype):
return self.codes.data_array_view
else:
dtype = self.dtype
result = cuda.as_cuda_array(self.data)
# Workaround until `.view(...)` can change itemsize
# xref: https://github.com/numba/numba/issues/4829
result = cuda.devicearray.DeviceNDArray(
shape=(result.nbytes // dtype.itemsize,),
strides=(dtype.itemsize,),
dtype=dtype,
gpu_data=result.gpu_data,
)
return result
@property
def mask_array_view(self):
"""
View the mask as a device array
"""
result = cuda.as_cuda_array(self.mask)
dtype = mask_dtype
# Workaround until `.view(...)` can change itemsize
# xref: https://github.com/numba/numba/issues/4829
result = cuda.devicearray.DeviceNDArray(
shape=(result.nbytes // dtype.itemsize,),
strides=(dtype.itemsize,),
dtype=dtype,
gpu_data=result.gpu_data,
)
return result
def __len__(self):
return self.size
def to_pandas(self, index=None, nullable=False, **kwargs):
if nullable and self.dtype in cudf_dtypes_to_pandas_dtypes:
pandas_nullable_dtype = cudf_dtypes_to_pandas_dtypes[self.dtype]
arrow_array = self.to_arrow()
pandas_array = pandas_nullable_dtype.__from_arrow__(arrow_array)
pd_series = pd.Series(pandas_array, copy=False)
elif str(self.dtype) in NUMERIC_TYPES and self.null_count == 0:
pd_series = pd.Series(cupy.asnumpy(self.values), copy=False)
else:
pd_series = self.to_arrow().to_pandas(**kwargs)
if index is not None:
pd_series.index = index
return pd_series
def __iter__(self):
cudf.utils.utils.raise_iteration_error(obj=self)
@property
def values_host(self):
"""
Return a numpy representation of the Column.
"""
return self.data_array_view.copy_to_host()
@property
def values(self):
"""
Return a CuPy representation of the Column.
"""
if len(self) == 0:
return cupy.asarray([], dtype=self.dtype)
if self.has_nulls:
raise ValueError("Column must have no nulls.")
return cupy.asarray(self.data_array_view)
def clip(self, lo, hi):
if is_categorical_dtype(self):
input_col = self.astype(self.categories.dtype)
return libcudf.replace.clip(input_col, lo, hi).astype(self.dtype)
else:
return libcudf.replace.clip(self, lo, hi)
def equals(self, other, check_dtypes=False):
if self is other:
return True
if other is None or len(self) != len(other):
return False
if check_dtypes:
if self.dtype != other.dtype:
return False
return (self == other).min()
def all(self):
return bool(libcudf.reduce.reduce("all", self, dtype=np.bool_))
def any(self):
return bool(libcudf.reduce.reduce("any", self, dtype=np.bool_))
def __sizeof__(self):
n = self.data.size
if self.nullable:
n += bitmask_allocation_size_bytes(self.size)
return n
@classmethod
def _concat(cls, objs, dtype=None):
if len(objs) == 0:
dtype = pd.api.types.pandas_dtype(dtype)
if is_categorical_dtype(dtype):
dtype = CategoricalDtype()
return column_empty(0, dtype=dtype, masked=True)
# If all columns are `NumericalColumn` with different dtypes,
# we cast them to a common dtype.
# Notice, we can always cast pure null columns
not_null_cols = list(filter(lambda o: o.valid_count > 0, objs))
if len(not_null_cols) > 0 and (
len(
[
o
for o in not_null_cols
if not is_numerical_dtype(o.dtype)
or np.issubdtype(o.dtype, np.datetime64)
]
)
== 0
):
col_dtypes = [o.dtype for o in not_null_cols]
# Use NumPy to find a common dtype
common_dtype = np.find_common_type(col_dtypes, [])
# Cast all columns to the common dtype
for i in range(len(objs)):
objs[i] = objs[i].astype(common_dtype)
# Find the first non-null column:
head = objs[0]
for i, obj in enumerate(objs):
if obj.valid_count > 0:
head = obj
break
for i, obj in enumerate(objs):
# Check that all columns are the same type:
if not pd.api.types.is_dtype_equal(obj.dtype, head.dtype):
# if all null, cast to appropriate dtype
if obj.valid_count == 0:
objs[i] = column_empty_like(
head, dtype=head.dtype, masked=True, newsize=len(obj)
)
else:
raise ValueError("All columns must be the same type")
cats = None
is_categorical = all(is_categorical_dtype(o.dtype) for o in objs)
# Combine CategoricalColumn categories
if is_categorical:
# Combine and de-dupe the categories
cats = (
cudf.concat([o.cat().categories for o in objs])
.to_series()
.drop_duplicates(ignore_index=True)
._column
)
objs = [
o.cat()._set_categories(
o.cat().categories, cats, is_unique=True
)
for o in objs
]
# Map `objs` into a list of the codes until we port Categorical to
# use the libcudf++ Category data type.
objs = [o.cat().codes._column for o in objs]
head = head.cat().codes._column
newsize = sum(map(len, objs))
if newsize > libcudf.MAX_COLUMN_SIZE:
raise MemoryError(
f"Result of concat cannot have "
f"size > {libcudf.MAX_COLUMN_SIZE_STR}"
)
# Filter out inputs that have 0 length
objs = [o for o in objs if len(o) > 0]
# Perform the actual concatenation
if newsize > 0:
col = libcudf.concat.concat_columns(objs)
else:
col = column_empty(0, head.dtype, masked=True)
if is_categorical:
col = build_categorical_column(
categories=cats,
codes=as_column(col.base_data, dtype=col.dtype),
mask=col.base_mask,
size=col.size,
offset=col.offset,
)
return col
def dropna(self):
dropped_col = self.as_frame().dropna()._as_column()
return dropped_col
def to_arrow(self):
"""Convert to PyArrow Array
Examples
--------
>>> import cudf
>>> col = cudf.core.column.as_column([1, 2, 3, 4])
>>> col.to_arrow()
<pyarrow.lib.Int64Array object at 0x7f886547f830>
[
1,
2,
3,
4
]
"""
if isinstance(self, cudf.core.column.CategoricalColumn):
# arrow doesn't support unsigned codes
signed_type = (
min_signed_type(self.codes.max())
if self.codes.size > 0
else np.int8
)
codes = self.codes.astype(signed_type)
categories = self.categories
out_indices = codes.to_arrow()
out_dictionary = categories.to_arrow()
return pa.DictionaryArray.from_arrays(
out_indices, out_dictionary, ordered=self.ordered,
)
if isinstance(self, cudf.core.column.StringColumn) and (
self.null_count == len(self)
):
return pa.NullArray.from_buffers(
pa.null(), len(self), [pa.py_buffer((b""))]
)
return libcudf.interop.to_arrow(
libcudf.table.Table(
cudf.core.column_accessor.ColumnAccessor({"None": self})
),
[["None"]],
keep_index=False,
)["None"].chunk(0)
@classmethod
def from_arrow(cls, array):
"""
Convert PyArrow Array/ChunkedArray to column
Parameters
----------
array : PyArrow Array/ChunkedArray
Returns
-------
column
Examples
--------
>>> import pyarrow as pa
>>> import cudf
>>> cudf.core.column.ColumnBase.from_arrow(pa.array([1, 2, 3, 4]))
<cudf.core.column.numerical.NumericalColumn object at 0x7f8865497ef0>
"""
if not isinstance(array, (pa.Array, pa.ChunkedArray)):
raise TypeError("array should be PyArrow array or chunked array")
data = pa.table([array], [None])
if isinstance(array.type, pa.DictionaryType):
indices_table = pa.table(
{
"None": pa.chunked_array(
[chunk.indices for chunk in data["None"].chunks],
type=array.type.index_type,
)
}
)
dictionaries_table = pa.table(
{
"None": pa.chunked_array(
[chunk.dictionary for chunk in data["None"].chunks],
type=array.type.value_type,
)
}
)
codes = libcudf.interop.from_arrow(
indices_table, indices_table.column_names
)._data["None"]
categories = libcudf.interop.from_arrow(
dictionaries_table, dictionaries_table.column_names
)._data["None"]
return build_categorical_column(
categories=categories,
codes=codes,
mask=codes.base_mask,
size=codes.size,
ordered=array.type.ordered,
)
elif isinstance(array.type, pa.StructType):
return cudf.core.column.StructColumn.from_arrow(array)
return libcudf.interop.from_arrow(data, data.column_names)._data[
"None"
]
def _get_mask_as_column(self):
return libcudf.transform.mask_to_bools(
self.base_mask, self.offset, self.offset + len(self)
)
def _memory_usage(self, **kwargs):
return self.__sizeof__()
def to_gpu_array(self, fillna=None):
"""Get a dense numba device array for the data.
Parameters
----------
fillna : scalar, 'pandas', or None
See *fillna* in ``.to_array``.
Notes
-----
if ``fillna`` is ``None``, null values are skipped. Therefore, the
output size could be smaller.
"""
if fillna:
return self.fillna(self.default_na_value()).data_array_view
else:
return self.dropna().data_array_view
def to_array(self, fillna=None):
"""Get a dense numpy array for the data.
Parameters
----------
fillna : scalar, 'pandas', or None
Defaults to None, which will skip null values.
If it equals "pandas", null values are filled with NaNs.
Non integral dtype is promoted to np.float64.
Notes
-----
if ``fillna`` is ``None``, null values are skipped. Therefore, the
output size could be smaller.
"""
return self.to_gpu_array(fillna=fillna).copy_to_host()
def _fill(self, fill_value, begin=0, end=-1, inplace=False):
if end <= begin or begin >= self.size:
return self if inplace else self.copy()
if is_categorical_dtype(self.dtype):
return self._fill_categorical(fill_value, begin, end, inplace)
fill_scalar = as_device_scalar(fill_value, self.dtype)
if not inplace:
return libcudf.filling.fill(self, begin, end, fill_scalar)
if is_string_dtype(self.dtype):
return self._mimic_inplace(
libcudf.filling.fill(self, begin, end, fill_scalar),
inplace=True,
)
if fill_value is None and not self.nullable:
mask = create_null_mask(self.size, state=MaskState.ALL_VALID)
self.set_base_mask(mask)
libcudf.filling.fill_in_place(self, begin, end, fill_scalar)
return self
def _fill_categorical(self, fill_value, begin, end, inplace):
fill_code = self._encode(fill_value)
fill_scalar = as_device_scalar(fill_code, self.codes.dtype)
result = self if inplace else self.copy()
libcudf.filling.fill_in_place(result.codes, begin, end, fill_scalar)
return result
def shift(self, offset, fill_value):
return libcudf.copying.shift(self, offset, fill_value)
@property
def valid_count(self):
"""Number of non-null values"""
return len(self) - self.null_count
@property
def nullmask(self):
"""The gpu buffer for the null-mask
"""
if self.nullable:
return self.mask_array_view
else:
raise ValueError("Column has no null mask")
def copy(self, deep=True):
"""Columns are immutable, so a deep copy produces a copy of the
underlying data and mask and a shallow copy creates a new column and
copies the references of the data and mask.
"""
if deep:
return libcudf.copying.copy_column(self)
else:
return build_column(
self.base_data,
self.dtype,
mask=self.base_mask,
size=self.size,
offset=self.offset,
children=self.base_children,
)
def view(self, dtype):
"""
View the data underlying a column as different dtype.
The source column must divide evenly into the size of
the desired data type. Columns with nulls may only be
viewed as dtypes with size equal to source dtype size
Parameters
----------
dtype : NumPy dtype, string
The dtype to view the data as
"""
dtype = np.dtype(dtype)
if dtype.kind in ("o", "u", "s"):
raise TypeError(
"Bytes viewed as str without metadata is ambiguous"
)
if self.dtype.itemsize == dtype.itemsize:
return build_column(
self.base_data,
dtype=dtype,
mask=self.base_mask,
size=self.size,
offset=self.offset,
)
else:
if self.null_count > 0:
raise ValueError(
"Can not produce a view of a column with nulls"
)
if (self.size * self.dtype.itemsize) % dtype.itemsize:
raise ValueError(
f"Can not divide {self.size * self.dtype.itemsize}"
+ f" total bytes into {dtype} with size {dtype.itemsize}"
)
new_buf_ptr = (
self.base_data.ptr + self.offset * self.dtype.itemsize
)
new_buf_size = self.size * self.dtype.itemsize
view_buf = Buffer(
data=new_buf_ptr,
size=new_buf_size,
owner=self.base_data._owner,
)
return build_column(view_buf, dtype=dtype)
def element_indexing(self, index):
"""Default implementation for indexing to an element
Raises
------
``IndexError`` if out-of-bound
"""
index = np.int32(index)
if index < 0:
index = len(self) + index
if index > len(self) - 1 or index < 0:
raise IndexError("single positional indexer is out-of-bounds")
return libcudf.copying.get_element(self, index).value
def __getitem__(self, arg):
if isinstance(arg, Number):
arg = int(arg)
return self.element_indexing(arg)
elif isinstance(arg, slice):
if is_categorical_dtype(self):
codes = self.codes[arg]
return build_categorical_column(
categories=self.categories,
codes=as_column(codes.base_data, dtype=codes.dtype),
mask=codes.base_mask,
ordered=self.ordered,
size=codes.size,
offset=codes.offset,
)
start, stop, stride = arg.indices(len(self))
if start < 0:
start = start + len(self)
if stop < 0:
stop = stop + len(self)
if start >= stop:
return column_empty(0, self.dtype, masked=True)
# compute mask slice
if stride == 1 or stride is None:
return libcudf.copying.column_slice(self, [start, stop])[0]
else:
# Need to create a gather map for given slice with stride
gather_map = arange(
start=start,
stop=stop,
step=stride,
dtype=np.dtype(np.int32),
)
return self.take(gather_map)
else:
arg = as_column(arg)
if len(arg) == 0:
arg = as_column([], dtype="int32")
if pd.api.types.is_integer_dtype(arg.dtype):
return self.take(arg)
if pd.api.types.is_bool_dtype(arg.dtype):
return self.apply_boolean_mask(arg)
raise NotImplementedError(type(arg))
def __setitem__(self, key, value):
"""
Set the value of self[key] to value.
If value and self are of different types,
value is coerced to self.dtype
"""
if isinstance(key, slice):
key_start, key_stop, key_stride = key.indices(len(self))
if key_start < 0:
key_start = key_start + len(self)
if key_stop < 0:
key_stop = key_stop + len(self)
if key_start >= key_stop:
return self.copy()
if (key_stride is None or key_stride == 1) and is_scalar(value):
return self._fill(value, key_start, key_stop, inplace=True)
if key_stride != 1 or key_stride is not None or is_scalar(value):
key = arange(
start=key_start,
stop=key_stop,
step=key_stride,
dtype=np.dtype(np.int32),
)
nelem = len(key)
else:
nelem = abs(key_stop - key_start)
else:
key = as_column(key)
if pd.api.types.is_bool_dtype(key.dtype):
if not len(key) == len(self):
raise ValueError(
"Boolean mask must be of same length as column"
)
key = arange(len(self))[key]
if hasattr(value, "__len__") and len(value) == len(self):
value = as_column(value)[key]
nelem = len(key)
if is_scalar(value):
if is_categorical_dtype(self.dtype):
value = self._encode(value)
else:
value = self.dtype.type(value) if value is not None else value
else:
if len(value) != nelem:
msg = (
f"Size mismatch: cannot set value "
f"of size {len(value)} to indexing result of size "
f"{nelem}"
)
raise ValueError(msg)
value = as_column(value).astype(self.dtype)
if is_categorical_dtype(value.dtype):
value = value.cat().set_categories(self.categories)
assert self.dtype == value.dtype
if (
isinstance(key, slice)
and (key_stride == 1 or key_stride is None)
and not is_scalar(value)
):
out = libcudf.copying.copy_range(
value, self, 0, nelem, key_start, key_stop, False
)
if is_categorical_dtype(value.dtype):
out = build_categorical_column(
categories=value.categories,
codes=as_column(out.base_data, dtype=out.dtype),
mask=out.base_mask,
size=out.size,
offset=out.offset,
ordered=value.ordered,
)
else:
try:
if is_scalar(value):
input = self
if is_categorical_dtype(self.dtype):
input = self.codes
out = input.as_frame()._scatter(key, [value])._as_column()
if is_categorical_dtype(self.dtype):
out = build_categorical_column(
categories=self.categories,
codes=as_column(out.base_data, dtype=out.dtype),
mask=out.base_mask,
size=out.size,
offset=out.offset,
ordered=self.ordered,
)
else:
if not isinstance(value, Column):
value = as_column(value)
out = (
self.as_frame()
._scatter(key, value.as_frame())
._as_column()
)
except RuntimeError as e:
if "out of bounds" in str(e):
raise IndexError(
f"index out of bounds for column of size {len(self)}"
) from e
raise
self._mimic_inplace(out, inplace=True)
def fillna(self, value=None, method=None, dtype=None):
"""Fill null values with ``value``.
Returns a copy with null filled.
"""
return libcudf.replace.replace_nulls(
input_col=self, replacement=value, method=method, dtype=dtype
)
def isnull(self):
"""Identify missing values in a Column.
"""
result = libcudf.unary.is_null(self)
if self.dtype.kind == "f":
# Need to consider `np.nan` values incase
# of a float column
result = result | libcudf.unary.is_nan(self)
return result
def isna(self):
"""Identify missing values in a Column. Alias for isnull.
"""
return self.isnull()
def notnull(self):
"""Identify non-missing values in a Column.
"""
result = libcudf.unary.is_valid(self)
if self.dtype.kind == "f":
# Need to consider `np.nan` values incase
# of a float column
result = result & libcudf.unary.is_non_nan(self)
return result
def notna(self):
"""Identify non-missing values in a Column. Alias for notnull.
"""
return self.notnull()
def find_first_value(self, value):
"""
Returns offset of first value that matches
"""
# FIXME: Inefficient, may be need a libcudf api
index = cudf.core.index.RangeIndex(0, stop=len(self))
indices = index.take(self == value)
if not len(indices):
raise ValueError("value not found")
return indices[0]
def find_last_value(self, value):
"""
Returns offset of last value that matches
"""
# FIXME: Inefficient, may be need a libcudf api
index = cudf.core.index.RangeIndex(0, stop=len(self))
indices = index.take(self == value)
if not len(indices):
raise ValueError("value not found")
return indices[-1]
def append(self, other):
return ColumnBase._concat([self, as_column(other)])
def quantile(self, q, interpolation, exact):
raise TypeError(f"cannot perform quantile with type {self.dtype}")
def median(self, skipna=None):
raise TypeError(f"cannot perform median with type {self.dtype}")
def take(self, indices, keep_index=True):
"""Return Column by taking values from the corresponding *indices*.
"""
# Handle zero size
if indices.size == 0:
return column_empty_like(self, newsize=0)
try:
return (
self.as_frame()
._gather(indices, keep_index=keep_index)
._as_column()
)
except RuntimeError as e:
if "out of bounds" in str(e):
raise IndexError(
f"index out of bounds for column of size {len(self)}"
) from e
raise
def isin(self, values):
"""Check whether values are contained in the Column.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a TypeError. Instead, turn a single string into a list
of one element.
Returns
-------
result: Column
Column of booleans indicating if each element is in values.
Raises
-------
TypeError
If values is a string
"""
if is_scalar(values):
raise TypeError(
"only list-like objects are allowed to be passed "
f"to isin(), you passed a [{type(values).__name__}]"
)
lhs = self
rhs = None
try:
# We need to convert values to same type as self,
# hence passing dtype=self.dtype
rhs = as_column(values, dtype=self.dtype)
# Short-circuit if rhs is all null.
if lhs.null_count == 0 and (rhs.null_count == len(rhs)):
return full(len(self), False, dtype="bool")
except ValueError:
# pandas functionally returns all False when cleansing via
# typecasting fails
return full(len(self), False, dtype="bool")
# If categorical, combine categories first
if is_categorical_dtype(lhs):
lhs_cats = lhs.cat().categories._values
rhs_cats = rhs.cat().categories._values
if not np.issubdtype(rhs_cats.dtype, lhs_cats.dtype):
# If they're not the same dtype, short-circuit if the values
# list doesn't have any nulls. If it does have nulls, make
# the values list a Categorical with a single null
if not rhs.has_nulls:
return full(len(self), False, dtype="bool")
rhs = as_column(pd.Categorical.from_codes([-1], categories=[]))
rhs = rhs.cat().set_categories(lhs_cats).astype(self.dtype)
lhs = cudf.DataFrame({"x": lhs, "orig_order": arange(len(lhs))})
rhs = cudf.DataFrame(
{"x": rhs, "bool": full(len(rhs), True, dtype="bool")}
)
res = lhs.merge(rhs, on="x", how="left").sort_values(by="orig_order")
res = res.drop_duplicates(subset="orig_order", ignore_index=True)
res = res._data["bool"].fillna(False)
return res
def as_mask(self):
"""Convert booleans to bitmask
Returns
-------
Buffer
"""
if self.has_nulls:
raise ValueError("Column must have no nulls.")
return bools_to_mask(self)
@ioutils.doc_to_dlpack()
def to_dlpack(self):
"""{docstring}"""
return cudf.io.dlpack.to_dlpack(self)
@property
def is_unique(self):
return self.distinct_count() == len(self)
@property
def is_monotonic(self):
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
if not hasattr(self, "_is_monotonic_increasing"):
if self.has_nulls:
self._is_monotonic_increasing = False
else:
self._is_monotonic_increasing = self.as_frame()._is_sorted(
ascending=None, null_position=None
)
return self._is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
if not hasattr(self, "_is_monotonic_decreasing"):
if self.has_nulls:
self._is_monotonic_decreasing = False
else:
self._is_monotonic_decreasing = self.as_frame()._is_sorted(
ascending=[False], null_position=None
)
return self._is_monotonic_decreasing
def get_slice_bound(self, label, side, kind):
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
"""
assert kind in ["ix", "loc", "getitem", None]
if side not in ("left", "right"):
raise ValueError(
"Invalid value for side kwarg,"
" must be either 'left' or 'right': %s" % (side,)
)
# TODO: Handle errors/missing keys correctly
# Not currently using `kind` argument.
if side == "left":
return self.find_first_value(label, closest=True)
if side == "right":
return self.find_last_value(label, closest=True) + 1
def sort_by_values(self, ascending=True, na_position="last"):
col_inds = self.as_frame()._get_sorted_inds(ascending, na_position)
col_keys = self[col_inds]
return col_keys, col_inds
def distinct_count(self, method="sort", dropna=True):
if method != "sort":
msg = "non sort based distinct_count() not implemented yet"
raise NotImplementedError(msg)
return cpp_distinct_count(self, ignore_nulls=dropna)
def astype(self, dtype, **kwargs):
if is_categorical_dtype(dtype):
return self.as_categorical_column(dtype, **kwargs)
elif pd.api.types.pandas_dtype(dtype).type in {
np.str_,
np.object_,
str,
}:
return self.as_string_column(dtype, **kwargs)
elif is_list_dtype(dtype):
if not self.dtype == dtype:
raise NotImplementedError(
"Casting list columns not currently supported"
)
return self
elif np.issubdtype(dtype, np.datetime64):
return self.as_datetime_column(dtype, **kwargs)
elif np.issubdtype(dtype, np.timedelta64):
return self.as_timedelta_column(dtype, **kwargs)
else:
return self.as_numerical_column(dtype)
def as_categorical_column(self, dtype, **kwargs):
if "ordered" in kwargs:
ordered = kwargs["ordered"]
else:
ordered = False
sr = cudf.Series(self)
# Re-label self w.r.t. the provided categories
if isinstance(dtype, (cudf.CategoricalDtype, pd.CategoricalDtype)):
labels = sr.label_encoding(cats=dtype.categories)
if "ordered" in kwargs:
warnings.warn(
"Ignoring the `ordered` parameter passed in `**kwargs`, "
"will be using `ordered` parameter of CategoricalDtype"
)
return build_categorical_column(
categories=dtype.categories,
codes=labels._column,
mask=self.mask,
ordered=dtype.ordered,
)
cats = sr.unique().astype(sr.dtype)
label_dtype = min_unsigned_type(len(cats))
labels = sr.label_encoding(cats=cats, dtype=label_dtype, na_sentinel=1)
# columns include null index in factorization; remove:
if self.has_nulls:
cats = cats.dropna()
min_type = min_unsigned_type(len(cats), 8)
labels = labels - 1
if np.dtype(min_type).itemsize < labels.dtype.itemsize:
labels = labels.astype(min_type)
return build_categorical_column(
categories=cats._column,
codes=labels._column,
mask=self.mask,
ordered=ordered,
)
def as_numerical_column(self, dtype):
raise NotImplementedError
def as_datetime_column(self, dtype, **kwargs):
raise NotImplementedError
def as_timedelta_column(self, dtype, **kwargs):
raise NotImplementedError
def as_string_column(self, dtype, **kwargs):
raise NotImplementedError
def apply_boolean_mask(self, mask):
mask = as_column(mask, dtype="bool")
result = (
self.as_frame()._apply_boolean_mask(boolean_mask=mask)._as_column()
)
return result
def argsort(self, ascending=True, na_position="last"):
sorted_indices = self.as_frame()._get_sorted_inds(
ascending=ascending, na_position=na_position
)
return sorted_indices
@property
def __cuda_array_interface__(self):
output = {
"shape": (len(self),),
"strides": (self.dtype.itemsize,),
"typestr": self.dtype.str,
"data": (self.data_ptr, False),
"version": 1,
}
if self.nullable and self.has_nulls:
# Create a simple Python object that exposes the
# `__cuda_array_interface__` attribute here since we need to modify
# some of the attributes from the numba device array
mask = SimpleNamespace(
__cuda_array_interface__={
"shape": (len(self),),
"typestr": "<t1",
"data": (self.mask_ptr, True),
"version": 1,
}
)
output["mask"] = mask
return output
def __add__(self, other):
return self.binary_operator("add", other)
def __sub__(self, other):
return self.binary_operator("sub", other)
def __mul__(self, other):
return self.binary_operator("mul", other)
def __eq__(self, other):
return self.binary_operator("eq", other)
def __ne__(self, other):
return self.binary_operator("ne", other)
def __or__(self, other):
return self.binary_operator("or", other)
def __and__(self, other):
return self.binary_operator("and", other)
def __floordiv__(self, other):
return self.binary_operator("floordiv", other)
def __truediv__(self, other):
return self.binary_operator("truediv", other)
def __mod__(self, other):
return self.binary_operator("mod", other)
def __pow__(self, other):
return self.binary_operator("pow", other)
def __lt__(self, other):
return self.binary_operator("lt", other)
def __gt__(self, other):
return self.binary_operator("gt", other)
def __le__(self, other):
return self.binary_operator("le", other)
def __ge__(self, other):
return self.binary_operator("ge", other)
def searchsorted(
self, value, side="left", ascending=True, na_position="last"
):
values = as_column(value).as_frame()
return self.as_frame().searchsorted(
values, side, ascending=ascending, na_position=na_position
)
def unique(self):
"""
Get unique values in the data
"""
return (
self.as_frame()
.drop_duplicates(keep="first", ignore_index=True)
._as_column()
)
def serialize(self):
header = {}
frames = []
header["type-serialized"] = pickle.dumps(type(self))
header["dtype"] = self.dtype.str
data_header, data_frames = self.data.serialize()
header["data"] = data_header
frames.extend(data_frames)
if self.nullable:
mask_header, mask_frames = self.mask.serialize()
header["mask"] = mask_header
frames.extend(mask_frames)
header["frame_count"] = len(frames)
return header, frames
@classmethod
def deserialize(cls, header, frames):
dtype = header["dtype"]
data = Buffer.deserialize(header["data"], [frames[0]])
mask = None
if "mask" in header:
mask = Buffer.deserialize(header["mask"], [frames[1]])
return build_column(data=data, dtype=dtype, mask=mask)
def min(self, skipna=None, dtype=None):
result_col = self._process_for_reduction(skipna=skipna)
if isinstance(result_col, ColumnBase):
return libcudf.reduce.reduce("min", result_col, dtype=dtype)
else:
return result_col
def max(self, skipna=None, dtype=None):
result_col = self._process_for_reduction(skipna=skipna)
if isinstance(result_col, ColumnBase):
return libcudf.reduce.reduce("max", result_col, dtype=dtype)
else:
return result_col
def sum(self, skipna=None, dtype=None, min_count=0):
raise TypeError(f"cannot perform sum with type {self.dtype}")
def product(self, skipna=None, dtype=None, min_count=0):
raise TypeError(f"cannot perform prod with type {self.dtype}")
def mean(self, skipna=None, dtype=None):
raise TypeError(f"cannot perform mean with type {self.dtype}")
def std(self, skipna=None, ddof=1, dtype=np.float64):
raise TypeError(f"cannot perform std with type {self.dtype}")
def var(self, skipna=None, ddof=1, dtype=np.float64):
raise TypeError(f"cannot perform var with type {self.dtype}")
def kurtosis(self, skipna=None):
raise TypeError(f"cannot perform kurt with type {self.dtype}")
def skew(self, skipna=None):
raise TypeError(f"cannot perform skew with type {self.dtype}")
def cov(self, other):
raise TypeError(
f"cannot perform covarience with types {self.dtype}, "
f"{other.dtype}"
)
def corr(self, other):
raise TypeError(
f"cannot perform corr with types {self.dtype}, {other.dtype}"
)
def nans_to_nulls(self):
if self.dtype.kind == "f":
col = self.fillna(np.nan)
newmask = libcudf.transform.nans_to_nulls(col)
return self.set_mask(newmask)
else:
return self
def _process_for_reduction(self, skipna=None, min_count=0):
skipna = True if skipna is None else skipna
if skipna:
result_col = self.nans_to_nulls()
if result_col.has_nulls:
result_col = result_col.dropna()
else:
if self.has_nulls:
return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)
result_col = self
if min_count > 0:
valid_count = len(result_col) - result_col.null_count
if valid_count < min_count:
return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)
elif min_count < 0:
warnings.warn(
f"min_count value cannot be negative({min_count}), will "
f"default to 0."
)
return result_col
def scatter_to_table(
self, row_indices, column_indices, names, nrows=None, ncols=None
):
"""
Scatters values from the column into a table.
Parameters
----------
row_indices
A column of the same size as `self` specifying the
row index to scatter each value to
column_indices
A column of the same size as `self` specifying the
column index to scatter each value to
names
The column names of the resulting table
Returns
-------
"""
if nrows is None:
nrows = 0
if len(row_indices) > 0:
nrows = int(row_indices.max() + 1)
if ncols is None:
ncols = 0
if len(column_indices) > 0:
ncols = int(column_indices.max() + 1)
if nrows * ncols == 0:
return cudf.core.frame.Frame({})
scatter_map = (column_indices * np.int32(nrows)) + row_indices
target = cudf.core.frame.Frame(
{None: column_empty_like(self, masked=True, newsize=nrows * ncols)}
)
target._data[None][scatter_map] = self
result_frames = target._split(range(nrows, nrows * ncols, nrows))
return cudf.core.frame.Frame(
{
name: next(iter(f._columns))
for name, f in zip(names, result_frames)
}
)
def column_empty_like(column, dtype=None, masked=False, newsize=None):
"""Allocate a new column like the given *column*
"""
if dtype is None:
dtype = column.dtype
row_count = len(column) if newsize is None else newsize
if (
hasattr(column, "dtype")
and is_categorical_dtype(column.dtype)
and dtype == column.dtype
):
codes = column_empty_like(column.codes, masked=masked, newsize=newsize)
return build_column(
data=None,
dtype=dtype,
mask=codes.base_mask,
children=(as_column(codes.base_data, dtype=codes.dtype),),
size=codes.size,
)
return column_empty(row_count, dtype, masked)
def column_empty_like_same_mask(column, dtype):
"""Create a new empty Column with the same length and the same mask.
Parameters
----------
dtype : np.dtype like
The dtype of the data buffer.
"""
result = column_empty_like(column, dtype)
if column.nullable:
result = result.set_mask(column.mask)
return result
def column_empty(row_count, dtype="object", masked=False):
"""Allocate a new column like the given row_count and dtype.
"""
dtype = pd.api.types.pandas_dtype(dtype)
children = ()
if is_categorical_dtype(dtype):
data = None
children = (
build_column(
data=Buffer.empty(row_count * np.dtype("int32").itemsize),
dtype="int32",
),
)
elif dtype.kind in "OU":
data = None
children = (
full(row_count + 1, 0, dtype="int32"),
build_column(
data=Buffer.empty(row_count * np.dtype("int8").itemsize),
dtype="int8",
),
)
else:
data = Buffer.empty(row_count * dtype.itemsize)
if masked:
mask = create_null_mask(row_count, state=MaskState.ALL_NULL)
else:
mask = None
return build_column(
data, dtype, mask=mask, size=row_count, children=children
)
def build_column(
data, dtype, mask=None, size=None, offset=0, null_count=None, children=()
):
"""
Build a Column of the appropriate type from the given parameters
Parameters
----------
data : Buffer
The data buffer (can be None if constructing certain Column
types like StringColumn, ListColumn, or CategoricalColumn)
dtype
The dtype associated with the Column to construct
mask : Buffer, optional
The mask buffer
size : int, optional
offset : int, optional
children : tuple, optional
"""
dtype = pd.api.types.pandas_dtype(dtype)
if is_categorical_dtype(dtype):
if not len(children) == 1:
raise ValueError(
"Must specify exactly one child column for CategoricalColumn"
)
if not isinstance(children[0], ColumnBase):
raise TypeError("children must be a tuple of Columns")
return cudf.core.column.CategoricalColumn(
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
children=children,
)
elif dtype.type is np.datetime64:
return cudf.core.column.DatetimeColumn(
data=data,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
)
elif dtype.type is np.timedelta64:
return cudf.core.column.TimeDeltaColumn(
data=data,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
)
elif dtype.type in (np.object_, np.str_):
return cudf.core.column.StringColumn(
mask=mask,
size=size,
offset=offset,
children=children,
null_count=null_count,
)
elif is_list_dtype(dtype):
return cudf.core.column.ListColumn(
size=size,
dtype=dtype,
mask=mask,
offset=offset,
null_count=null_count,
children=children,
)
elif is_struct_dtype(dtype):
return cudf.core.column.StructColumn(
data=data,
size=size,
dtype=dtype,
mask=mask,
null_count=null_count,
children=children,
)
else:
return cudf.core.column.NumericalColumn(
data=data,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
)
def build_categorical_column(
categories,
codes,
mask=None,
size=None,
offset=0,
null_count=None,
ordered=None,
):
"""
Build a CategoricalColumn
Parameters
----------
categories : Column
Column of categories
codes : Column
Column of codes, the size of the resulting Column will be
the size of `codes`
mask : Buffer
Null mask
size : int, optional
offset : int, optional
ordered : bool
Indicates whether the categories are ordered
"""
codes_dtype = min_unsigned_type(len(categories))
codes = as_column(codes)
if codes.dtype != codes_dtype:
codes = codes.astype(codes_dtype)
dtype = CategoricalDtype(categories=as_column(categories), ordered=ordered)
return build_column(
data=None,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
children=(codes,),
)
def as_column(arbitrary, nan_as_null=None, dtype=None, length=None):
"""Create a Column from an arbitrary object
Parameters
----------
arbitrary : object
Object to construct the Column from. See *Notes*.
nan_as_null : bool, optional, default None
If None (default), treats NaN values in arbitrary as null if there is
no mask passed along with it. If True, combines the mask and NaNs to
form a new validity mask. If False, leaves NaN values as is.
dtype : optional
Optionally typecast the constructed Column to the given
dtype.
length : int, optional
If `arbitrary` is a scalar, broadcast into a Column of
the given length.
Returns
-------
A Column of the appropriate type and size.
Notes
-----
Currently support inputs are:
* ``Column``
* ``Series``
* ``Index``
* Scalars (can be broadcasted to a specified `length`)
* Objects exposing ``__cuda_array_interface__`` (e.g., numba device arrays)
* Objects exposing ``__array_interface__``(e.g., numpy arrays)
* pyarrow array
* pandas.Categorical objects
"""
if isinstance(arbitrary, ColumnBase):
if dtype is not None:
return arbitrary.astype(dtype)
else:
return arbitrary
elif isinstance(arbitrary, cudf.Series):
data = arbitrary._column
if dtype is not None:
data = data.astype(dtype)
elif isinstance(arbitrary, cudf.Index):
data = arbitrary._values
if dtype is not None:
data = data.astype(dtype)
elif type(arbitrary) is Buffer:
if dtype is None:
raise TypeError("dtype cannot be None if 'arbitrary' is a Buffer")
data = build_column(arbitrary, dtype=dtype)
elif hasattr(arbitrary, "__cuda_array_interface__"):
desc = arbitrary.__cuda_array_interface__
current_dtype = np.dtype(desc["typestr"])
arb_dtype = check_cast_unsupported_dtype(current_dtype)
if desc.get("mask", None) is not None:
# Extract and remove the mask from arbitrary before
# passing to cupy.asarray
mask = _mask_from_cuda_array_interface_desc(arbitrary)
arbitrary = SimpleNamespace(__cuda_array_interface__=desc.copy())
arbitrary.__cuda_array_interface__["mask"] = None
desc = arbitrary.__cuda_array_interface__
else:
mask = None
arbitrary = cupy.asarray(arbitrary)
if arb_dtype != current_dtype:
arbitrary = arbitrary.astype(arb_dtype)
current_dtype = arb_dtype
if (
desc["strides"] is not None
and not (arbitrary.itemsize,) == arbitrary.strides
):
arbitrary = cupy.ascontiguousarray(arbitrary)
data = _data_from_cuda_array_interface_desc(arbitrary)
col = build_column(data, dtype=current_dtype, mask=mask)
if dtype is not None:
col = col.astype(dtype)
if isinstance(col, cudf.core.column.CategoricalColumn):
return col
elif np.issubdtype(col.dtype, np.floating):
if nan_as_null or (mask is None and nan_as_null is None):
mask = libcudf.transform.nans_to_nulls(col.fillna(np.nan))
col = col.set_mask(mask)
elif np.issubdtype(col.dtype, np.datetime64):
if nan_as_null or (mask is None and nan_as_null is None):
col = utils.time_col_replace_nulls(col)
return col
elif isinstance(arbitrary, (pa.Array, pa.ChunkedArray)):
col = ColumnBase.from_arrow(arbitrary)
if isinstance(arbitrary, pa.NullArray):
if type(dtype) == str and dtype == "empty":
new_dtype = pd.api.types.pandas_dtype(
arbitrary.type.to_pandas_dtype()
)
else:
new_dtype = pd.api.types.pandas_dtype(dtype)
col = col.astype(new_dtype)
return col
elif isinstance(arbitrary, (pd.Series, pd.Categorical)):
if isinstance(arbitrary, pd.Series) and isinstance(
arbitrary.array, pd.core.arrays.masked.BaseMaskedArray
):
return as_column(arbitrary.array)
if is_categorical_dtype(arbitrary):
data = as_column(pa.array(arbitrary, from_pandas=True))
elif arbitrary.dtype == np.bool:
data = as_column(cupy.asarray(arbitrary), dtype=arbitrary.dtype)
elif arbitrary.dtype.kind in ("f"):
arb_dtype = check_cast_unsupported_dtype(arbitrary.dtype)
data = as_column(
cupy.asarray(arbitrary, dtype=arb_dtype),
nan_as_null=nan_as_null,
dtype=dtype,
)
elif arbitrary.dtype.kind in ("u", "i"):
data = as_column(
cupy.asarray(arbitrary), nan_as_null=nan_as_null, dtype=dtype
)
else:
data = as_column(
pa.array(arbitrary, from_pandas=nan_as_null),
dtype=arbitrary.dtype,
)
if dtype is not None:
data = data.astype(dtype)
elif isinstance(arbitrary, (pd.Timestamp, pd.Timedelta)):
# This will always treat NaTs as nulls since it's not technically a
# discrete value like NaN
data = as_column(pa.array(pd.Series([arbitrary]), from_pandas=True))
if dtype is not None:
data = data.astype(dtype)
elif np.isscalar(arbitrary) and not isinstance(arbitrary, memoryview):
length = length or 1
if (
(nan_as_null is True)
and isinstance(arbitrary, (np.floating, float))
and np.isnan(arbitrary)
):
arbitrary = None
if dtype is None:
dtype = np.dtype("float64")
data = as_column(
utils.scalar_broadcast_to(arbitrary, length, dtype=dtype)
)
if not nan_as_null:
if np.issubdtype(data.dtype, np.floating):
data = data.fillna(np.nan)
elif np.issubdtype(data.dtype, np.datetime64):
data = data.fillna(np.datetime64("NaT"))
elif hasattr(arbitrary, "__array_interface__"):
# CUDF assumes values are always contiguous
desc = arbitrary.__array_interface__
shape = desc["shape"]
arb_dtype = np.dtype(desc["typestr"])
# CUDF assumes values are always contiguous
if len(shape) > 1:
raise ValueError("Data must be 1-dimensional")
arbitrary = np.asarray(arbitrary)
# Handle case that `arbitary` elements are cupy arrays
if (
shape
and shape[0]
and hasattr(arbitrary[0], "__cuda_array_interface__")
):
return as_column(
cupy.asarray(arbitrary, dtype=arbitrary[0].dtype),
nan_as_null=nan_as_null,
dtype=dtype,
length=length,
)
if not arbitrary.flags["C_CONTIGUOUS"]:
arbitrary = np.ascontiguousarray(arbitrary)
if dtype is not None:
arbitrary = arbitrary.astype(dtype)
if arb_dtype.kind == "M":
time_unit = get_time_unit(arbitrary)
cast_dtype = time_unit in ("D", "W", "M", "Y")
if cast_dtype:
arbitrary = arbitrary.astype(np.dtype("datetime64[s]"))
buffer = Buffer(arbitrary.view("|u1"))
mask = None
if nan_as_null is None or nan_as_null is True:
data = as_column(
buffer, dtype=arbitrary.dtype, nan_as_null=nan_as_null
)
data = utils.time_col_replace_nulls(data)
mask = data.mask
data = cudf.core.column.datetime.DatetimeColumn(
data=buffer, mask=mask, dtype=arbitrary.dtype
)
elif arb_dtype.kind == "m":
time_unit = get_time_unit(arbitrary)
cast_dtype = time_unit in ("D", "W", "M", "Y")
if cast_dtype:
arbitrary = arbitrary.astype(np.dtype("timedelta64[s]"))
buffer = Buffer(arbitrary.view("|u1"))
mask = None
if nan_as_null is None or nan_as_null is True:
data = as_column(
buffer, dtype=arbitrary.dtype, nan_as_null=nan_as_null
)
data = utils.time_col_replace_nulls(data)
mask = data.mask
data = cudf.core.column.timedelta.TimeDeltaColumn(
data=buffer, mask=mask, dtype=arbitrary.dtype
)
elif arb_dtype.kind in ("O", "U"):
data = as_column(
pa.Array.from_pandas(arbitrary), dtype=arbitrary.dtype
)
# There is no cast operation available for pa.Array from int to
# str, Hence instead of handling in pa.Array block, we
# will have to type-cast here.
if dtype is not None:
data = data.astype(dtype)
elif arb_dtype.kind in ("f"):
arb_dtype = check_cast_unsupported_dtype(
arb_dtype if dtype is None else dtype
)
data = as_column(
cupy.asarray(arbitrary, dtype=arb_dtype),
nan_as_null=nan_as_null,
)
else:
data = as_column(cupy.asarray(arbitrary), nan_as_null=nan_as_null)
elif isinstance(arbitrary, pd.core.arrays.numpy_.PandasArray):
if is_categorical_dtype(arbitrary.dtype):
arb_dtype = arbitrary.dtype
else:
if arbitrary.dtype == pd.StringDtype():
arb_dtype = np.dtype("O")
else:
arb_dtype = check_cast_unsupported_dtype(arbitrary.dtype)
if arb_dtype != arbitrary.dtype.numpy_dtype:
arbitrary = arbitrary.astype(arb_dtype)
if arb_dtype.kind in ("O", "U"):
data = as_column(pa.Array.from_pandas(arbitrary), dtype=arb_dtype)
else:
data = as_column(
pa.array(
arbitrary,
from_pandas=True if nan_as_null is None else nan_as_null,
),
nan_as_null=nan_as_null,
)
if dtype is not None:
data = data.astype(dtype)
elif isinstance(arbitrary, memoryview):
data = as_column(
np.asarray(arbitrary), dtype=dtype, nan_as_null=nan_as_null
)
elif isinstance(arbitrary, cudf.Scalar):
data = libcudf.column.make_column_from_scalar(
arbitrary, length if length else 1
)
elif isinstance(arbitrary, pd.core.arrays.masked.BaseMaskedArray):
cudf_dtype = arbitrary._data.dtype
data = Buffer(arbitrary._data.view("|u1"))
data = as_column(data, dtype=cudf_dtype)
mask = arbitrary._mask
mask = bools_to_mask(as_column(mask).unary_operator("not"))
data = data.set_mask(mask)
else:
try:
data = as_column(
memoryview(arbitrary), dtype=dtype, nan_as_null=nan_as_null
)
except TypeError:
pa_type = None
np_type = None
try:
if dtype is not None:
if is_list_dtype(dtype):
data = pa.array(arbitrary)
if type(data) not in (pa.ListArray, pa.NullArray):
raise ValueError(
"Cannot create list column from given data"
)
return as_column(data, nan_as_null=nan_as_null)
dtype = pd.api.types.pandas_dtype(dtype)
if is_categorical_dtype(dtype):
raise TypeError
else:
np_type = np.dtype(dtype).type
if np_type == np.bool_:
pa_type = pa.bool_()
else:
pa_type = np_to_pa_dtype(np.dtype(dtype))
data = as_column(
pa.array(
arbitrary,
type=pa_type,
from_pandas=True
if nan_as_null is None
else nan_as_null,
),
dtype=dtype,
nan_as_null=nan_as_null,
)
except (pa.ArrowInvalid, pa.ArrowTypeError, TypeError):
if is_categorical_dtype(dtype):
sr = pd.Series(arbitrary, dtype="category")
data = as_column(sr, nan_as_null=nan_as_null, dtype=dtype)
elif np_type == np.str_:
sr = pd.Series(arbitrary, dtype="str")
data = as_column(sr, nan_as_null=nan_as_null)
else:
native_dtype = dtype
if dtype is None and pd.api.types.infer_dtype(
arbitrary
) in ("mixed", "mixed-integer"):
native_dtype = "object"
data = np.asarray(
arbitrary,
dtype=native_dtype
if native_dtype is None
else np.dtype(native_dtype),
)
data = as_column(
data, dtype=dtype, nan_as_null=nan_as_null
)
return data
def column_applymap(udf, column, out_dtype):
"""Apply an element-wise function to transform the values in the Column.
Parameters
----------
udf : function
Wrapped by numba jit for call on the GPU as a device function.
column : Column
The source column.
out_dtype : numpy.dtype
The dtype for use in the output.
Returns
-------
result : Column
"""
core = njit(udf)
results = column_empty(len(column), dtype=out_dtype)
values = column.data_array_view
if column.nullable:
# For masked columns
@cuda.jit
def kernel_masked(values, masks, results):
i = cuda.grid(1)
# in range?
if i < values.size:
# valid?
if utils.mask_get(masks, i):
# call udf
results[i] = core(values[i])
masks = column.mask_array_view
kernel_masked.forall(len(column))(values, masks, results)
else:
# For non-masked columns
@cuda.jit
def kernel_non_masked(values, results):
i = cuda.grid(1)
# in range?
if i < values.size:
# call udf
results[i] = core(values[i])
kernel_non_masked.forall(len(column))(values, results)
return as_column(results)
def _data_from_cuda_array_interface_desc(obj):
desc = obj.__cuda_array_interface__
ptr = desc["data"][0]
nelem = desc["shape"][0] if len(desc["shape"]) > 0 else 1
dtype = np.dtype(desc["typestr"])
data = Buffer(data=ptr, size=nelem * dtype.itemsize, owner=obj)
return data
def _mask_from_cuda_array_interface_desc(obj):
desc = obj.__cuda_array_interface__
mask = desc.get("mask", None)
if mask is not None:
desc = mask.__cuda_array_interface__
ptr = desc["data"][0]
nelem = desc["shape"][0]
typestr = desc["typestr"]
typecode = typestr[1]
if typecode == "t":
mask_size = bitmask_allocation_size_bytes(nelem)
mask = Buffer(data=ptr, size=mask_size, owner=obj)
elif typecode == "b":
col = as_column(mask)
mask = bools_to_mask(col)
else:
raise NotImplementedError(
f"Cannot infer mask from typestr {typestr}"
)
return mask
def serialize_columns(columns):
"""
Return the headers and frames resulting
from serializing a list of Column
Parameters
----------
columns : list
list of Columns to serialize
Returns
-------
headers : list
list of header metadata for each Column
frames : list
list of frames
"""
headers = []
frames = []
if len(columns) > 0:
header_columns = [c.serialize() for c in columns]
headers, column_frames = zip(*header_columns)
for f in column_frames:
frames.extend(f)
return headers, frames
def deserialize_columns(headers, frames):
"""
Construct a list of Columns from a list of headers
and frames.
"""
columns = []
for meta in headers:
col_frame_count = meta["frame_count"]
col_typ = pickle.loads(meta["type-serialized"])
colobj = col_typ.deserialize(meta, frames[:col_frame_count])
columns.append(colobj)
# Advance frames
frames = frames[col_frame_count:]
return columns
def arange(start, stop=None, step=1, dtype=None):
"""
Returns a column with evenly spaced values within a given interval.
Values are generated within the half-open interval [start, stop).
The first three arguments are mapped like the range built-in function,
i.e. start and step are optional.
Parameters
----------
start : int/float
Start of the interval.
stop : int/float, default is None
Stop of the interval.
step : int/float, default 1
Step width between each pair of consecutive values.
dtype : default None
Data type specifier. It is inferred from other arguments by default.
Returns
-------
cudf.core.column.NumericalColumn
Examples
--------
>>> import cudf
>>> col = cudf.core.column.arange(2, 7, 1, dtype='int16')
>>> col
<cudf.core.column.numerical.NumericalColumn object at 0x7ff7998f8b90>
>>> cudf.Series(col)
0 2
1 3
2 4
3 5
4 6
dtype: int16
"""
if stop is None:
stop = start
start = 0
if step is None:
step = 1
size = int(np.ceil((stop - start) / step))
return libcudf.filling.sequence(
size,
as_device_scalar(start, dtype=dtype),
as_device_scalar(step, dtype=dtype),
)
def full(size, fill_value, dtype=None):
"""
Returns a column of given size and dtype, filled with a given value.
Parameters
----------
size : int
size of the expected column.
fill_value : scalar
A scalar value to fill a new array.
dtype : default None
Data type specifier. It is inferred from other arguments by default.
Returns
-------
Column
Examples
--------
>>> import cudf
>>> col = cudf.core.column.full(size=5, fill_value=7, dtype='int8')
>>> col
<cudf.core.column.numerical.NumericalColumn object at 0x7fa0912e8b90>
>>> cudf.Series(col)
0 7
1 7
2 7
3 7
4 7
dtype: int8
"""
return libcudf.column.make_column_from_scalar(
cudf.Scalar(fill_value, dtype), size
)
|
the-stack_0_1446 | import copy
import logging
import os.path as osp
import numpy as np
import torch
from fvcore.common.file_io import PathManager
from PIL import Image
from pycocotools import mask as maskUtils
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.detection_utils import SizeMismatchError
from detectron2.structures import BoxMode
from .augmentation import RandomCropWithInstance
from .detection_utils import (annotations_to_instances, build_augmentation,
transform_instance_annotations)
import re
"""
This file contains the default mapping that's applied to "dataset dicts".
"""
__all__ = ["DatasetMapperWithBasis"]
logger = logging.getLogger(__name__)
def save_tmp_image(image, tmp_dir="tmp", img_name=None):
import os
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
if img_name is None:
tmp_id = len(os.listdir(tmp_dir))
img_name = "%d.png" % tmp_id
import cv2
cv2.imwrite("tmp/%s" % img_name, image)
def segmToRLE(segm, img_size):
h, w = img_size
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, h, w)
rle = maskUtils.merge(rles)
elif type(segm["counts"]) == list:
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, h, w)
else:
# rle
rle = segm
return rle
def segmToMask(segm, img_size):
rle = segmToRLE(segm, img_size)
m = maskUtils.decode(rle)
return m
def read_image_and_resize(file_name, shape, format=None):
"""
Read an image into the given format.
Will apply rotation and flipping if the image has such exif information.
Args:
file_name (str): image file path
format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601".
Returns:
image (np.ndarray): an HWC image in the given format, which is 0-255, uint8 for
supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601.
"""
with open(file_name, "rb") as f:
image = Image.open(f)
image = image.resize(shape)
# work around this bug: https://github.com/python-pillow/Pillow/issues/3973
image = utils._apply_exif_orientation(image)
return utils.convert_PIL_to_numpy(image, format)
def normalization(heatmap, target_min=-1, target_max=1):
input_min = np.min(heatmap[np.nonzero(heatmap)])
heatmap[np.nonzero(heatmap)] = heatmap[np.nonzero(heatmap)] - input_min
input_max = np.max(heatmap)
heatmap = heatmap / input_max * (target_max - target_min) + target_min
return heatmap
class DatasetMapperWithBasis(DatasetMapper):
"""
This caller enables the default Detectron2 mapper to read an additional basis semantic label
"""
def __init__(self, cfg, is_train=True):
super().__init__(cfg, is_train)
# Rebuild augmentations
logger.info(
"Rebuilding the augmentations. The previous augmentations will be overridden."
)
self.augmentation = build_augmentation(cfg, is_train)
if cfg.INPUT.CROP.ENABLED and is_train:
self.augmentation.insert(
0,
RandomCropWithInstance(
cfg.INPUT.CROP.TYPE,
cfg.INPUT.CROP.SIZE,
cfg.INPUT.CROP.CROP_INSTANCE,
),
)
logging.getLogger(__name__).info(
"Cropping used in training: " + str(self.augmentation[0])
)
self.basis_loss_on = cfg.MODEL.BASIS_MODULE.LOSS_ON
self.ann_set = cfg.MODEL.BASIS_MODULE.ANN_SET
self.stcseg_enabled = cfg.MODEL.STCSEG.ENABLED
self.use_depth = cfg.MODEL.STCSEG.BOUNDARY.USE_DEPTH
self.use_optical_flow = cfg.MODEL.STCSEG.BOUNDARY.USE_OPTICAL_FLOW
if self.stcseg_enabled:
self.use_instance_mask = False
self.recompute_boxes = False
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
# USER: Write your own image loading if it's not from a file
try:
image = utils.read_image(
dataset_dict["file_name"], format=self.image_format
)
# print("%s image shape:" % dataset_dict["file_name"], image.shape)
except Exception as e:
print(dataset_dict["file_name"])
print(e)
raise e
try:
utils.check_image_size(dataset_dict, image)
except SizeMismatchError as e:
expected_wh = (dataset_dict["width"], dataset_dict["height"])
image_wh = (image.shape[1], image.shape[0])
if (image_wh[1], image_wh[0]) == expected_wh:
print("transposing image {}".format(dataset_dict["file_name"]))
image = image.transpose(1, 0, 2)
else:
raise e
# USER: Remove if you don't do semantic/panoptic segmentation.
if "sem_seg_file_name" in dataset_dict:
sem_seg_gt = utils.read_image(
dataset_dict.pop("sem_seg_file_name"), "L"
).squeeze(2)
else:
sem_seg_gt = None
boxes = np.asarray(
[
BoxMode.convert(
instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS
)
for instance in dataset_dict["annotations"]
]
)
aug_input = T.StandardAugInput(image, boxes=boxes, sem_seg=sem_seg_gt)
transforms = aug_input.apply_augmentations(self.augmentation)
image, sem_seg_gt = aug_input.image, aug_input.sem_seg
# save_tmp_image(image, img_name=dataset_dict["file_name"].split('/')[-1].split('.')[0] + '.png')
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(
np.ascontiguousarray(image.transpose(2, 0, 1))
)
if sem_seg_gt is not None:
dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
# ---------------------- Add Depth ---------------------------
if self.use_depth: # For kitti object
# print(dataset_dict["file_name"])
try:
if re.search("kitti_mot", dataset_dict["file_name"]):
# depth_image = utils.read_image(
# dataset_dict["file_name"].replace("image_02", "image_depth").replace(".png", "_disp.jpeg"), format=self.image_format
# )
# depth_image = np.load(dataset_dict["file_name"].replace("training", "Depth/training").replace(".png", "_disp.npy"))
# depth_image = depth_image[0].transpose(1,2,0) * (1, 1, 1) * 10
depth_image = utils.read_image(
dataset_dict["file_name"].replace("image_02", "depth"), format=self.image_format
)
elif re.search("kitti", dataset_dict["file_name"]):
depth_image = utils.read_image(
dataset_dict["file_name"].replace("image_2", "depth"), format=self.image_format
)
elif re.search("ytvis", dataset_dict["file_name"]):
depth_image = utils.read_image(
dataset_dict["file_name"].replace("JPEGImages", "Depth").replace(".jpg", ".png"), format=self.image_format
)
# print("%s depth shape:" % dataset_dict["file_name"], depth_image.shape)
# assert (depth_image.shape[1], depth_image.shape[0]) == (dataset_dict["width"], dataset_dict["height"]), dataset_dict["file_name"] + ": " + str(depth_image.shape)
else:
print(dataset_dict["file_name"])
print("!!!!!!! Please use kitti or ytvis !!!!!!!")
except Exception as e:
print("Depth file for ", dataset_dict["file_name"])
print(e)
raise e
try:
utils.check_image_size(dataset_dict, depth_image)
except SizeMismatchError as e:
expected_wh = (dataset_dict["width"], dataset_dict["height"])
depth_image_wh = (depth_image.shape[1], depth_image.shape[0])
if (depth_image_wh[1], depth_image_wh[0]) == expected_wh:
print("transposing image {}".format(dataset_dict["file_name"]))
depth_image = depth_image.transpose(1, 0, 2)
else:
raise e
# aug_depth_input = T.StandardAugInput(depth_image, boxes=boxes, sem_seg=sem_seg_gt)
# depth_transforms = aug_depth_input.apply_augmentations(self.augmentation)
# depth_image = aug_depth_input.image
depth_image = transforms.apply_image(depth_image)
# save_tmp_image(depth_image, img_name=dataset_dict["file_name"].split('/')[-1].split('.')[0] + '_depth.png')
dataset_dict["depth_image"] = torch.as_tensor(
np.ascontiguousarray(depth_image.transpose(2, 0, 1))
)
# ---------------------- Add Depth ---------------------------
# ---------------------- Add Flow ---------------------------
if self.use_optical_flow: # For kitti object
# print(dataset_dict["file_name"])
try:
if re.search("kitti_mot", dataset_dict["file_name"]):
flow_image_path = dataset_dict["file_name"].replace("image_02", "optical_flow")
elif re.search("ytvis", dataset_dict["file_name"]):
flow_image_path = dataset_dict["file_name"].replace("JPEGImages", "OpticalFlow").replace(".jpg", ".png")
else:
print(dataset_dict["file_name"])
print("!!!!!!! Please use kitti mot or ytvis !!!!!!!")
flow_image = read_image_and_resize(
flow_image_path, shape=(dataset_dict["width"], dataset_dict["height"]),
format=self.image_format
)
except Exception as e:
print(flow_image_path)
print(e)
raise e
try:
utils.check_image_size(dataset_dict, flow_image)
except SizeMismatchError as e:
expected_wh = (dataset_dict["width"], dataset_dict["height"])
flow_image_wh = (flow_image.shape[1], flow_image.shape[0])
if (flow_image_wh[1], flow_image_wh[0]) == expected_wh:
print("transposing image {}".format(dataset_dict["file_name"]))
flow_image = flow_image.transpose(1, 0, 2)
else:
raise e
# aug_flow_input = T.StandardAugInput(flow_image, boxes=boxes, sem_seg=sem_seg_gt)
# flow_transforms = aug_flow_input.apply_augmentations(self.augmentation)
# flow_image = aug_flow_input.image
flow_image = transforms.apply_image(flow_image)
# save_tmp_image(flow_image, img_name=dataset_dict["file_name"].split('/')[-1].split('.')[0] + '_flow.png')
dataset_dict["flow_image"] = torch.as_tensor(
np.ascontiguousarray(flow_image.transpose(2, 0, 1))
)
# ---------------------- Add Flow ---------------------------
# USER: Remove if you don't use pre-computed proposals.
# Most users would not need this feature.
if self.proposal_topk:
utils.transform_proposals(
dataset_dict,
image_shape,
transforms,
proposal_topk=self.proposal_topk,
min_box_size=self.proposal_min_box_size,
)
if not self.is_train:
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
dataset_dict.pop("pano_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
if not self.use_instance_mask:
anno.pop("segmentation", None)
if not self.use_keypoint:
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
transform_instance_annotations(
obj,
transforms,
image_shape,
keypoint_hflip_indices=self.keypoint_hflip_indices,
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = annotations_to_instances(
annos, image_shape, mask_format=self.instance_mask_format
)
# After transforms such as cropping are applied, the bounding box may no longer
# tightly bound the object. As an example, imagine a triangle object
# [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight
# bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to
if self.recompute_boxes:
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
dataset_dict["instances"] = utils.filter_empty_instances(instances)
if self.basis_loss_on and self.is_train:
# load basis supervisions
if self.ann_set == "coco":
basis_sem_path = (
dataset_dict["file_name"]
.replace("train2017", "thing_train2017")
.replace("image/train", "thing_train")
)
else:
basis_sem_path = (
dataset_dict["file_name"]
.replace("coco", "lvis")
.replace("train2017", "thing_train")
)
# change extension to npz
basis_sem_path = osp.splitext(basis_sem_path)[0] + ".npz"
basis_sem_gt = np.load(basis_sem_path)["mask"]
basis_sem_gt = transforms.apply_segmentation(basis_sem_gt)
basis_sem_gt = torch.as_tensor(basis_sem_gt.astype("long"))
dataset_dict["basis_sem"] = basis_sem_gt
return dataset_dict
|
the-stack_0_1451 | # system_project_folder.py (c) 2010 Dany Lebel (Axon_D)
#
# ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
bl_info = {
"name": "Project Folder",
"author": "Dany Lebel (Axon_D), Spirou4D",
"version": (0, 3, 1),
"blender": (2, 80, 0),
"location": "Info -> File Menu -> Project Folder",
"description": "Open the project folder in a file browser",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/Scripts/System/Project_Folder",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"category": "System"}
import bpy
import os
from platform import system as currentOS
class ProjectFolder(bpy.types.Operator):
"""Open the Project Folder in a file Browser"""
bl_idname = "file.project_folder"
bl_label = "Project Folder"
def execute(self, context):
try :
path = self.path()
except ValueError:
self.report({'INFO'}, "No project folder yet")
return {'FINISHED'}
bpy.ops.wm.path_open(filepath=path)
return {'FINISHED'}
def path(self):
filepath = bpy.data.filepath
relpath = bpy.path.relpath(filepath)
path = filepath[0: -1 * (relpath.__len__() - 2)]
return path
# Registration
def menu_func(self, context):
self.layout.operator(
ProjectFolder.bl_idname,
text="Project Folder",
icon='FILEBROWSER')
def register():
bpy.utils.register_class(ProjectFolder)
bpy.types.TOPBAR_MT_file.prepend(menu_func)
def unregister():
bpy.utils.unregister_class(ProjectFolder)
bpy.types.TOPBAR_MT_file.remove(menu_func)
if __name__ == "__main__":
register()
|
the-stack_0_1452 | import os
from numpy.distutils.core import setup
from numpy.distutils.misc_util import Configuration
from numpy import get_include
from scipy._build_utils import numpy_nodepr_api
def configuration(parent_package='', top_path=None):
config = Configuration('ndimage', parent_package, top_path)
include_dirs = ['src',
get_include(),
os.path.join(os.path.dirname(__file__), '..', '_lib', 'src')]
config.add_extension("_nd_image",
sources=["src/nd_image.c",
"src/ni_filters.c",
"src/ni_fourier.c",
"src/ni_interpolation.c",
"src/ni_measure.c",
"src/ni_morphology.c",
"src/ni_splines.c",
"src/ni_support.c"],
include_dirs=include_dirs,
**numpy_nodepr_api)
# Cython wants the .c and .pyx to have the underscore.
config.add_extension("_ni_label",
sources=["src/_ni_label.c",],
include_dirs=['src']+[get_include()])
config.add_extension("_ctest",
sources=["src/_ctest.c"],
include_dirs=[get_include()],
**numpy_nodepr_api)
config.add_extension("_cytest",
sources=["src/_cytest.c"])
config.add_data_dir('tests')
return config
if __name__ == '__main__':
setup(**configuration(top_path='').todict())
|
the-stack_0_1453 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for allowing TF ops to work with Keras Functional API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.keras.saving import model_config
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
def _single_op_at_end():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10)(inputs)
outputs = gen_nn_ops.relu(x)
return keras.Model(inputs, outputs)
def _single_identity_op_at_end():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10)(inputs)
outputs = array_ops.identity(x)
assert 'Identity' in outputs.name
return keras.Model(inputs, outputs)
def _multiple_ops_at_end():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10)(inputs)
x = gen_nn_ops.relu(x)
outputs = gen_nn_ops.relu(x)
return keras.Model(inputs, outputs)
def _single_op_in_middle():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10)(inputs)
x = gen_nn_ops.relu(x)
outputs = keras.layers.Dense(10)(x)
return keras.Model(inputs, outputs)
def _multiple_ops_in_middle():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10)(inputs)
x = gen_nn_ops.relu(x)
x = gen_nn_ops.relu(x)
outputs = keras.layers.Dense(10)(x)
return keras.Model(inputs, outputs)
def _single_standalone_branch():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10)(inputs)
outputs = x * 2
return keras.Model(inputs, outputs)
def _single_op_with_attrs():
inputs = keras.Input(shape=(10,))
x = math_ops.reduce_mean(inputs, axis=1, keepdims=True)
outputs = keras.layers.Dense(10)(x)
return keras.Model(inputs, outputs)
def _multiple_uses():
inputs = keras.Input(shape=(10,))
x = math_ops.reduce_mean(inputs, axis=1, keepdims=True)
x1 = keras.layers.Dense(10)(x)
x2 = keras.layers.Dense(10)(x)
outputs = x1 + x2
return keras.Model(inputs, outputs)
def _op_with_tensor_list():
inputs = keras.Input(shape=(10,))
x = array_ops.concat([inputs, inputs], axis=1)
outputs = keras.layers.Dense(10)(x)
return keras.Model(inputs, outputs)
def _add_n():
inputs = keras.Input(shape=(10,))
outputs = math_ops.add_n([inputs, inputs, inputs])
return keras.Model(inputs, outputs)
def _reuse_op():
inputs = keras.Input(shape=(10,))
# This op needs to be checked multiple times.
x = gen_nn_ops.relu(inputs)
y = keras.layers.Dense(10)(x)
x2 = x * 2
y2 = keras.layers.Dense(10)(x2)
outputs = y + y2
return keras.Model(inputs, outputs)
def _float64_op():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10, dtype='float64')(inputs)
x = gen_nn_ops.relu(x)
assert x.dtype == 'float64', 'x has dtype: %s' % x.dtype
outputs = keras.layers.Dense(10)(x)
return keras.Model(inputs, outputs)
class MyAdd(keras.layers.Layer):
def call(self, x, y):
return x + y
def _layer_with_tensor_arg():
inputs = keras.Input(shape=(10,))
x = inputs * 2
outputs = MyAdd()(inputs, x)
return keras.Model(inputs, outputs)
class LayerWithLayer(keras.layers.Layer):
def build(self, input_shape):
self.bias = self.add_weight(name='bias', dtype='float32')
self.layer = keras.layers.Dense(10)
def call(self, inputs):
inputs = inputs * self.bias
# Would throw an error if Keras History was created here.
return self.layer(inputs)
def _inner_layer():
inputs = keras.Input(shape=(10,))
outputs = LayerWithLayer()(inputs)
return keras.Model(inputs, outputs)
def _reuse_ancillary_layer():
inputs = (keras.Input(shape=(5,)), keras.Input(shape=(5,)))
base_model = keras.Sequential([
keras.layers.Dense(3, input_shape=(5,)),
])
outputs = base_model(inputs[0])
model = keras.Model(inputs, outputs)
# The second input is only involved in ancillary layers.
outputs_delta = outputs - base_model(0.5 * inputs[1])
l2_loss = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(outputs_delta), -1))
model.add_loss(l2_loss)
model.add_metric(l2_loss, aggregation='mean', name='l2_loss')
l1_loss = 0.01 * math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.abs(outputs_delta), -1))
model.add_loss(l1_loss)
model.add_metric(l1_loss, aggregation='mean', name='l1_loss')
return model
@keras_parameterized.run_all_keras_modes
class AutoLambdaTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
('single_op_at_end', _single_op_at_end),
('single_identity_op_at_end', _single_identity_op_at_end),
('multiple_ops_at_end', _multiple_ops_at_end),
('single_op_in_middle', _single_op_in_middle),
('multiple_ops_in_middle', _multiple_ops_in_middle),
('single_standalone_branch', _single_standalone_branch),
('single_op_with_attrs', _single_op_with_attrs),
('multiple_uses', _multiple_uses),
('op_with_tensor_list', _op_with_tensor_list),
('add_n', _add_n),
('_reuse_op', _reuse_op),
('_float64_op', _float64_op),
('_inner_layer', _inner_layer),
('_reuse_ancillary_layer', _reuse_ancillary_layer),
('_layer_with_tensor_arg', _layer_with_tensor_arg),
)
def test_autolambda(self, model_fn):
model = model_fn()
model.compile(
adam.Adam(0.001),
'mse',
run_eagerly=testing_utils.should_run_eagerly())
np_inputs = nest.map_structure(
lambda x: np.ones((10,) + tuple(x.shape[1:]), 'float32'), model.inputs)
np_outputs = nest.map_structure(
lambda x: np.ones((10,) + tuple(x.shape[1:]), 'float32'), model.outputs)
model.fit(np_inputs, np_outputs, batch_size=2)
model(np_inputs) # Test calling the model directly on inputs.
new_model = keras.Model.from_config(
model.get_config(),
custom_objects={
'LayerWithLayer': LayerWithLayer,
'MyAdd': MyAdd
})
new_model.compile(
adam.Adam(0.001),
'mse',
run_eagerly=testing_utils.should_run_eagerly())
new_model.fit(np_inputs, np_outputs, batch_size=2)
new_model(np_inputs) # Test calling the new model directly on inputs.
# Assert that metrics are preserved and in the right order.
self.assertAllEqual(model.metrics_names, new_model.metrics_names)
# Assert that layer names don't change.
self.assertAllEqual([layer.name for layer in model.layers],
[layer.name for layer in new_model.layers])
def test_numerical_correctness_simple(self):
x = ops.convert_to_tensor_v2([[-1., 0., -2., 1.]])
inputs = keras.Input(shape=(4,))
outputs = gen_nn_ops.relu(inputs)
model = keras.Model(inputs, outputs)
y = self.evaluate(model(x))
self.assertAllClose(y, [[0., 0., 0., 1.]])
def test_numerical_correctness_with_attrs(self):
x = ops.convert_to_tensor_v2([[1.5, 1.5], [2.5, 3.5]])
inputs = keras.Input(shape=(10,))
outputs = math_ops.reduce_mean(inputs, axis=1)
model = keras.Model(inputs, outputs)
y = self.evaluate(model(x))
self.assertAllClose(y, [1.5, 3.])
def test_numerical_correctness_serialization(self):
x = ops.convert_to_tensor_v2([-1., 0., -2., 1.])
inputs = keras.Input(shape=(4,))
outputs = gen_nn_ops.relu(inputs)
model1 = keras.Model(inputs, outputs)
y1 = self.evaluate(model1(x))
model2 = keras.Model.from_config(model1.get_config())
y2 = self.evaluate(model2(x))
self.assertAllClose(y1, y2)
def test_gradient_tape_in_function(self):
z = keras.Input((1,))
x = math_ops.matmul(z, constant_op.constant(2.0, shape=(1, 1)))
x = math_ops.reduce_mean(x, axis=0, keepdims=True)
h = gen_nn_ops.relu(x)
m = keras.Model(z, h)
@def_function.function()
def f(x):
with backprop.GradientTape() as t:
t.watch(x)
z = m(x ** 2)
grads = t.gradient(z, x)
return grads
self.assertAllEqual(f(constant_op.constant(10.0, shape=(1, 1))),
constant_op.constant(40.0, shape=(1, 1)))
f = def_function.function(f)
self.assertAllEqual(f(constant_op.constant(10.0, shape=(1, 1))),
constant_op.constant(40.0, shape=(1, 1)))
def test_no_tracking(self):
if not context.executing_eagerly():
x = constant_op.constant(1.0, shape=(10, 10))
keras.layers.Dense(1)(x)
self.assertTrue(x._keras_history_checked)
def test_timing_scales_linearly(self):
def _construct_graph_of_size(size):
start = time.time()
x = keras.backend.placeholder(shape=(10, 4))
for _ in range(size):
x = keras.layers.Dense(4)(x)
x = gen_nn_ops.relu(x)
end = time.time()
return end - start
size_50 = _construct_graph_of_size(50)
size_500 = _construct_graph_of_size(500)
# Check construction time grows approx. linearly with size.
e = 3 # Fudge factor to prevent flakiness.
self.assertLess(size_500, (10 * e) * size_50)
def test_no_mask_tracking(self):
x = keras.backend.placeholder((10, 10))
y = keras.layers.Masking(0.)(x)
self.assertTrue(y._keras_mask._keras_history_checked)
def test_built(self):
inputs = keras.Input(shape=(10,))
outputs = gen_nn_ops.relu(inputs)
model = keras.Model(inputs, outputs)
model.compile('sgd', 'mse')
for layer in model.layers:
self.assertTrue(layer.built)
# Test something that requires Layers to be built.
model.summary()
def test_json_serialization(self):
inputs = keras.Input(shape=(4,), dtype='uint8')
outputs = math_ops.cast(inputs, 'float32') / 4.
model = model_config.model_from_json(keras.Model(inputs, outputs).to_json())
self.assertAllEqual(
self.evaluate(model(np.array([0, 64, 128, 192], np.uint8))),
[0., 16., 32., 48.])
model.summary()
class InputInEagerTest(test.TestCase):
"""Tests ops on graph tensors in Eager runtime.
Input returns graph/symbolic tensors in the Eager runtime (this
happens, for example, with tensors returned from Keras layers). These
should be routed to the graph-style branch of these ops (b/134715641)
"""
def test_identity(self):
with context.eager_mode():
x = keras.Input(shape=(1,))
self.assertTrue(hasattr(x, 'graph'))
ident = array_ops.identity(x)
# This is now a graph tensor, and should be able to continue in graphland
self.assertIn('Identity', ident.name)
def test_size(self):
with context.eager_mode():
x = keras.Input(shape=(3,))
self.assertTrue(hasattr(x, 'graph'))
self.assertAllEqual(x.get_shape().as_list(), [None, 3])
sz = array_ops.size(x)
# This is now a graph tensor, and should be able to continue in graphland
self.assertIn('Size', sz.name)
if __name__ == '__main__':
test.main()
|
the-stack_0_1454 | #
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorflow as tf
class SharedRunningStats(object):
def __init__(self, replicated_device=None, epsilon=1e-2, name="", create_ops=True):
self.sess = None
self.name = name
self.replicated_device = replicated_device
self.epsilon = epsilon
self.ops_were_created = False
if create_ops:
with tf.device(replicated_device):
self.create_ops()
def create_ops(self, shape=[1], clip_values=None):
self.clip_values = clip_values
with tf.variable_scope(self.name):
self._sum = tf.get_variable(
dtype=tf.float64,
initializer=tf.constant_initializer(0.0),
name="running_sum", trainable=False, shape=shape, validate_shape=False,
collections=[tf.GraphKeys.GLOBAL_VARIABLES])
self._sum_squared = tf.get_variable(
dtype=tf.float64,
initializer=tf.constant_initializer(self.epsilon),
name="running_sum_squared", trainable=False, shape=shape, validate_shape=False,
collections=[tf.GraphKeys.GLOBAL_VARIABLES])
self._count = tf.get_variable(
dtype=tf.float64,
shape=(),
initializer=tf.constant_initializer(self.epsilon),
name="count", trainable=False, collections=[tf.GraphKeys.GLOBAL_VARIABLES])
self._shape = None
self._mean = tf.div(self._sum, self._count, name="mean")
self._std = tf.sqrt(tf.maximum((self._sum_squared - self._count*tf.square(self._mean))
/ tf.maximum(self._count-1, 1), self.epsilon), name="stdev")
self.tf_mean = tf.cast(self._mean, 'float32')
self.tf_std = tf.cast(self._std, 'float32')
self.new_sum = tf.placeholder(dtype=tf.float64, name='sum')
self.new_sum_squared = tf.placeholder(dtype=tf.float64, name='var')
self.newcount = tf.placeholder(shape=[], dtype=tf.float64, name='count')
self._inc_sum = tf.assign_add(self._sum, self.new_sum, use_locking=True)
self._inc_sum_squared = tf.assign_add(self._sum_squared, self.new_sum_squared, use_locking=True)
self._inc_count = tf.assign_add(self._count, self.newcount, use_locking=True)
self.raw_obs = tf.placeholder(dtype=tf.float64, name='raw_obs')
self.normalized_obs = (self.raw_obs - self._mean) / self._std
if self.clip_values is not None:
self.clipped_obs = tf.clip_by_value(self.normalized_obs, self.clip_values[0], self.clip_values[1])
self.ops_were_created = True
def set_session(self, sess):
self.sess = sess
def push(self, x):
x = x.astype('float64')
self.sess.run([self._inc_sum, self._inc_sum_squared, self._inc_count],
feed_dict={
self.new_sum: x.sum(axis=0).ravel(),
self.new_sum_squared: np.square(x).sum(axis=0).ravel(),
self.newcount: np.array(len(x), dtype='float64')
})
if self._shape is None:
self._shape = x.shape
@property
def n(self):
return self.sess.run(self._count)
@property
def mean(self):
return self.sess.run(self._mean)
@property
def var(self):
return self.std ** 2
@property
def std(self):
return self.sess.run(self._std)
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, val):
self._shape = val
self.new_sum.set_shape(val)
self.new_sum_squared.set_shape(val)
self.tf_mean.set_shape(val)
self.tf_std.set_shape(val)
self._sum.set_shape(val)
self._sum_squared.set_shape(val)
def normalize(self, batch):
if self.clip_values is not None:
return self.sess.run(self.clipped_obs, feed_dict={self.raw_obs: batch})
else:
return self.sess.run(self.normalized_obs, feed_dict={self.raw_obs: batch})
|
the-stack_0_1459 | import os
import mmcv
import numpy as np
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmpose.datasets.pipelines import Compose
from mmpose.models import build_posenet
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
def init_pose_model(config, checkpoint=None, device='cuda:0'):
"""Initialize a pose model from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
config.model.pretrained = None
model = build_posenet(config.model)
if checkpoint is not None:
# load model checkpoint
load_checkpoint(model, checkpoint, map_location=device)
# save the config in the model for convenience
model.cfg = config
model.to(device)
model.eval()
return model
def _xyxy2xywh(bbox_xyxy):
"""Transform the bbox format from x1y1x2y2 to xywh.
Args:
bbox_xyxy (np.ndarray): Bounding boxes (with scores), shaped (n, 4) or
(n, 5). (left, top, right, bottom, [score])
Returns:
np.ndarray: Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, width, height, [score])
"""
bbox_xywh = bbox_xyxy.copy()
bbox_xywh[:, 2] = bbox_xywh[:, 2] - bbox_xywh[:, 0] + 1
bbox_xywh[:, 3] = bbox_xywh[:, 3] - bbox_xywh[:, 1] + 1
return bbox_xywh
def _xywh2xyxy(bbox_xywh):
"""Transform the bbox format from xywh to x1y1x2y2.
Args:
bbox_xywh (ndarray): Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, width, height, [score])
Returns:
np.ndarray: Bounding boxes (with scores), shaped (n, 4) or
(n, 5). (left, top, right, bottom, [score])
"""
bbox_xyxy = bbox_xywh.copy()
bbox_xyxy[:, 2] = bbox_xyxy[:, 2] + bbox_xyxy[:, 0] - 1
bbox_xyxy[:, 3] = bbox_xyxy[:, 3] + bbox_xyxy[:, 1] - 1
return bbox_xyxy
def _box2cs(cfg, box):
"""This encodes bbox(x,y,w,h) into (center, scale)
Args:
x, y, w, h
Returns:
tuple: A tuple containing center and scale.
- np.ndarray[float32](2,): Center of the bbox (x, y).
- np.ndarray[float32](2,): Scale of the bbox w & h.
"""
x, y, w, h = box[:4]
input_size = cfg.data_cfg['image_size']
aspect_ratio = input_size[0] / input_size[1]
center = np.array([x + w * 0.5, y + h * 0.5], dtype=np.float32)
if w > aspect_ratio * h:
h = w * 1.0 / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
# pixel std is 200.0
scale = np.array([w / 200.0, h / 200.0], dtype=np.float32)
scale = scale * 1.25
return center, scale
class LoadImage:
"""A simple pipeline to load image."""
def __init__(self, color_type='color', channel_order='rgb'):
self.color_type = color_type
self.channel_order = channel_order
def __call__(self, results):
"""Call function to load images into results.
Args:
results (dict): A result dict contains the file name
of the image to be read.
Returns:
dict: ``results`` will be returned containing loaded image.
"""
if isinstance(results['img_or_path'], str):
results['image_file'] = results['img_or_path']
else:
results['image_file'] = ''
img = mmcv.imread(results['img_or_path'], self.color_type,
self.channel_order)
results['img'] = img
return results
def _inference_single_pose_model(model, img_or_path, bbox, dataset):
"""Inference a single bbox.
num_keypoints: K
Args:
model (nn.Module): The loaded pose model.
image_name (str | np.ndarray):Image_name
bbox (list | np.ndarray): Bounding boxes (with scores),
shaped (4, ) or (5, ). (left, top, width, height, [score])
dataset (str): Dataset name.
Returns:
ndarray[Kx3]: Predicted pose x, y, score.
"""
cfg = model.cfg
device = next(model.parameters()).device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.test_pipeline[1:]
test_pipeline = Compose(test_pipeline)
assert len(bbox) in [4, 5]
center, scale = _box2cs(cfg, bbox)
flip_pairs = None
if dataset == 'TopDownCocoDataset' or dataset == 'TopDownOCHumanDataset':
flip_pairs = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12],
[13, 14], [15, 16]]
elif dataset == 'TopDownCocoWholeBodyDataset':
body = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16]]
foot = [[17, 20], [18, 21], [19, 22]]
face = [[23, 39], [24, 38], [25, 37], [26, 36], [27, 35], [28, 34],
[29, 33], [30, 32], [40, 49], [41, 48], [42, 47], [43, 46],
[44, 45], [54, 58], [55, 57], [59, 68], [60, 67], [61, 66],
[62, 65], [63, 70], [64, 69], [71, 77], [72, 76], [73, 75],
[78, 82], [79, 81], [83, 87], [84, 86], [88, 90]]
hand = [[91, 112], [92, 113], [93, 114], [94, 115], [95, 116],
[96, 117], [97, 118], [98, 119], [99, 120], [100, 121],
[101, 122], [102, 123], [103, 124], [104, 125], [105, 126],
[106, 127], [107, 128], [108, 129], [109, 130], [110, 131],
[111, 132]]
flip_pairs = body + foot + face + hand
elif dataset == 'TopDownAicDataset':
flip_pairs = [[0, 3], [1, 4], [2, 5], [6, 9], [7, 10], [8, 11]]
elif (dataset == 'TopDownOneHand10KDataset'
or dataset == 'TopDownFreiHandDataset'
or dataset == 'TopDownPanopticDataset'):
flip_pairs = []
else:
raise NotImplementedError()
# prepare data
data = {
'img_or_path':
img_or_path,
'center':
center,
'scale':
scale,
'bbox_score':
bbox[4] if len(bbox) == 5 else 1,
'dataset':
dataset,
'joints_3d':
np.zeros((cfg.data_cfg.num_joints, 3), dtype=np.float32),
'joints_3d_visible':
np.zeros((cfg.data_cfg.num_joints, 3), dtype=np.float32),
'rotation':
0,
'ann_info': {
'image_size': cfg.data_cfg['image_size'],
'num_joints': cfg.data_cfg['num_joints'],
'flip_pairs': flip_pairs
}
}
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
# just get the actual data from DataContainer
data['img_metas'] = data['img_metas'].data[0]
# forward the model
with torch.no_grad():
all_preds, _, _ = model(
return_loss=False, img=data['img'], img_metas=data['img_metas'])
return all_preds[0]
def inference_top_down_pose_model(model,
img_or_path,
person_bboxes,
bbox_thr=None,
format='xywh',
dataset='TopDownCocoDataset'):
"""Inference a single image with a list of person bounding boxes.
num_people: P
num_keypoints: K
bbox height: H
bbox width: W
Args:
model (nn.Module): The loaded pose model.
image_name (str| np.ndarray): Image_name
person_bboxes: (np.ndarray[P x 4] or [P x 5]): Each person bounding box
shaped (4, ) or (5, ), contains 4 box coordinates (and score).
bbox_thr: Threshold for bounding boxes. Only bboxes with higher scores
will be fed into the pose detector. If bbox_thr is None, ignore it.
format: bbox format ('xyxy' | 'xywh'). Default: 'xywh'.
'xyxy' means (left, top, right, bottom),
'xywh' means (left, top, width, height).
dataset (str): Dataset name, e.g. 'TopDownCocoDataset'.
Returns:
list[dict]: The bbox & pose info.
Each item in the list is a dictionary,
containing the bbox: (left, top, right, bottom, [score])
and the pose (ndarray[Kx3]): x, y, score
"""
# only two kinds of bbox format is supported.
assert format in ['xyxy', 'xywh']
# transform the bboxes format to xywh
if format == 'xyxy':
person_bboxes = _xyxy2xywh(np.array(person_bboxes))
pose_results = []
if len(person_bboxes) > 0:
if bbox_thr is not None:
person_bboxes = person_bboxes[person_bboxes[:, 4] > bbox_thr]
for bbox in person_bboxes:
pose = _inference_single_pose_model(model, img_or_path, bbox,
dataset)
pose_results.append({
'bbox':
_xywh2xyxy(np.expand_dims(np.array(bbox), 0)),
'keypoints':
pose,
})
return pose_results
def inference_bottom_up_pose_model(model, img_or_path):
"""Inference a single image.
num_people: P
num_keypoints: K
bbox height: H
bbox width: W
Args:
model (nn.Module): The loaded pose model.
image_name (str| np.ndarray): Image_name.
Returns:
list[ndarray]: The predicted pose info.
The length of the list
is the number of people (P). Each item in the
list is a ndarray, containing each person's
pose (ndarray[Kx3]): x, y, score
"""
pose_results = []
cfg = model.cfg
device = next(model.parameters()).device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.test_pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = {
'img_or_path': img_or_path,
'dataset': 'coco',
'ann_info': {
'image_size':
cfg.data_cfg['image_size'],
'num_joints':
cfg.data_cfg['num_joints'],
'flip_index':
[0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15],
}
}
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
# just get the actual data from DataContainer
data['img_metas'] = data['img_metas'].data[0]
# forward the model
with torch.no_grad():
all_preds, _, _ = model(
return_loss=False, img=data['img'], img_metas=data['img_metas'])
for pred in all_preds:
pose_results.append({
'keypoints': pred[:, :3],
})
return pose_results
def vis_pose_result(model,
img,
result,
kpt_score_thr=0.3,
dataset='TopDownCocoDataset',
show=False,
out_file=None):
"""Visualize the detection results on the image.
Args:
model (nn.Module): The loaded detector.
img (str | np.ndarray): Image filename or loaded image.
result (list[dict]): The results to draw over `img`
(bbox_result, pose_result).
kpt_score_thr (float): The threshold to visualize the keypoints.
skeleton (list[tuple()]): Default None.
show (bool): Whether to show the image. Default True.
out_file (str|None): The filename of the output visualization image.
"""
if hasattr(model, 'module'):
model = model.module
palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102],
[230, 230, 0], [255, 153, 255], [153, 204, 255],
[255, 102, 255], [255, 51, 255], [102, 178, 255],
[51, 153, 255], [255, 153, 153], [255, 102, 102],
[255, 51, 51], [153, 255, 153], [102, 255, 102],
[51, 255, 51], [0, 255, 0], [0, 0, 255], [255, 0, 0],
[255, 255, 255]])
radius = 4
if dataset == 'TopDownCocoDataset' or dataset == 'BottomUpCocoDataset' \
or dataset == 'TopDownOCHumanDataset':
# show the results
skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
[7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
[1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]
pose_limb_color = palette[[
0, 0, 0, 0, 7, 7, 7, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 16, 16
]]
pose_kpt_color = palette[[
16, 16, 16, 16, 16, 9, 9, 9, 9, 9, 9, 0, 0, 0, 0, 0, 0
]]
elif dataset == 'TopDownCocoWholeBodyDataset':
# show the results
skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
[7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
[1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7], [16, 18],
[16, 19], [16, 20], [17, 21], [17, 22], [17, 23], [92, 93],
[93, 94], [94, 95], [95, 96], [92, 97], [97, 98], [98, 99],
[99, 100], [92, 101], [101, 102], [102, 103], [103, 104],
[92, 105], [105, 106], [106, 107], [107, 108], [92, 109],
[109, 110], [110, 111], [111, 112], [113, 114], [114, 115],
[115, 116], [116, 117], [113, 118], [118, 119], [119, 120],
[120, 121], [113, 122], [122, 123], [123, 124], [124, 125],
[113, 126], [126, 127], [127, 128], [128, 129], [113, 130],
[130, 131], [131, 132], [132, 133]]
pose_limb_color = palette[
[0, 0, 0, 0, 7, 7, 7, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 16, 16] +
[16, 16, 16, 16, 16, 16] + [
0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16, 16,
16
] + [
0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16, 16,
16
]]
pose_kpt_color = palette[
[16, 16, 16, 16, 16, 9, 9, 9, 9, 9, 9, 0, 0, 0, 0, 0, 0] +
[0, 0, 0, 0, 0, 0] + [
19,
] * (68 + 42)]
radius = 1
elif dataset == 'TopDownAicDataset':
skeleton = [[3, 2], [2, 1], [1, 14], [14, 4], [4, 5], [5, 6], [9, 8],
[8, 7], [7, 10], [10, 11], [11, 12], [13, 14], [1, 7],
[4, 10]]
pose_limb_color = palette[[
9, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 0, 7, 7
]]
pose_kpt_color = palette[[
9, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 16, 0, 0
]]
elif (dataset == 'TopDownOneHand10KDataset'
or dataset == 'TopDownFreiHandDataset'
or dataset == 'TopDownPanopticDataset'):
skeleton = [[1, 2], [2, 3], [3, 4], [4, 5], [1, 6], [6, 7], [7, 8],
[8, 9], [1, 10], [10, 11], [11, 12], [12, 13], [1, 14],
[14, 15], [15, 16], [16, 17], [1, 18], [18, 19], [19, 20],
[20, 21]]
pose_limb_color = palette[[
0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16, 16, 16
]]
pose_kpt_color = palette[[
0, 0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16, 16,
16
]]
else:
raise NotImplementedError()
img = model.show_result(
img,
result,
skeleton,
radius=radius,
pose_kpt_color=pose_kpt_color,
pose_limb_color=pose_limb_color,
kpt_score_thr=kpt_score_thr,
show=show,
out_file=out_file)
return img
|
the-stack_0_1460 | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_log import versionutils
from glance.common import wsgi
from glance.i18n import _LW
LOG = logging.getLogger(__name__)
"""v3 controller stub
Since Glance Artifact Service was released in Liberty as experimental Glance v3
API, its router was referenced in paste configuration as glance.api.v3.router
In Mitaka the Artifacts Service was moved into a standalone process and its
router was renamed to glance.api.artifacts.router.
However, in existing deployments the glance-api-paste.ini may still reference
the glance.api.v3.router. To not break these deployments this stub is included
to redirect the v3 request to glare service (if it is present) or return a 410
otherwise.
This stub controller should be removed in future releases.
"""
class API(wsgi.Router):
def __init__(self, mapper):
versionutils.report_deprecated_feature(
LOG,
_LW('/v3 controller is deprecated and will be removed from '
'glance-api soon. Remove the reference to it from '
'glance-api-paste.ini configuration file and use Glance '
'Artifact Service API instead'))
redirector = self._get_redirector()
mapper.connect(None, "/artifacts",
controller=redirector, action='redirect')
mapper.connect(None, "/artifacts/{path:.*}",
controller=redirector, action='redirect')
super(API, self).__init__(mapper)
def _get_redirector(self):
return wsgi.Resource(RedirectController(),
serializer=RedirectResponseSerializer())
class RedirectController(object):
def redirect(self, req, path=None):
try:
glare_endpoint = next((s['endpoints']
for s in req.context.service_catalog
if s['type'] == 'artifact'))[0]['publicURL']
if path:
path = '/' + path
return '{0}/v0.1/artifacts{1}'.format(glare_endpoint, path or "")
except StopIteration:
return None
class RedirectResponseSerializer(wsgi.JSONResponseSerializer):
def default(self, response, res):
if res:
response.location = res
response.status_int = 301
else:
response.status_int = 410
|
the-stack_0_1463 | #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import math
import os
import shutil
try:
import cudf
import dask_cudf
except ImportError:
cudf = None
dask_cudf = None
import numpy as np
import pytest
from pandas.api.types import is_integer_dtype
import nvtabular as nvt
from merlin.core.dispatch import HAS_GPU, make_df
from merlin.dag import ColumnSelector, postorder_iter_nodes
from merlin.schema import Schema, Tags
from nvtabular import Dataset, Workflow, ops
from nvtabular.utils import set_dask_client
from tests.conftest import assert_eq, get_cats, mycols_csv
def test_workflow_double_fit():
raw_df = make_df({"user_session": ["1", "2", "4", "4", "5"]})
cat_feats = ["user_session"] >> nvt.ops.Categorify()
for _ in [1, 2]:
df_event = nvt.Dataset(raw_df)
workflow = nvt.Workflow(cat_feats)
workflow.fit(df_event)
workflow.transform(df_event).to_ddf().compute()
@pytest.mark.parametrize("engine", ["parquet"])
def test_workflow_fit_op_rename(tmpdir, dataset, engine):
# NVT
schema = dataset.schema
for name in schema.column_names:
dataset.schema.column_schemas[name] = dataset.schema.column_schemas[name].with_tags(
[Tags.USER]
)
selector = nvt.ColumnSelector(tags=[Tags.USER])
workflow_ops_1 = selector >> nvt.ops.Rename(postfix="_1")
workflow_1 = nvt.Workflow(workflow_ops_1)
workflow_1.fit(dataset)
workflow_1.save(str(tmpdir / "one"))
new_dataset = workflow_1.transform(dataset).to_ddf().compute()
assert len(new_dataset.columns) > 0
assert all("_1" in col for col in new_dataset.columns)
@pytest.mark.parametrize("engine", ["parquet"])
def test_grab_additional_input_columns(dataset, engine):
schema = Schema(["x", "y"])
node1 = ["x"] >> ops.FillMissing()
node2 = node1 >> ops.Clip(min_value=0)
add_node = node2 + ["y"]
workflow = Workflow(add_node).fit_schema(schema)
output_df = workflow.transform(dataset).to_ddf().compute()
assert len(workflow.output_node.input_columns.names) == 2
assert workflow.output_node.input_columns.names == ["x", "y"]
assert len(workflow.output_node.output_columns.names) == 2
assert workflow.output_node.output_columns.names == ["x", "y"]
assert len(output_df.columns) == 2
assert output_df.columns.tolist() == ["x", "y"]
@pytest.mark.parametrize("gpu_memory_frac", [0.01, 0.1])
@pytest.mark.parametrize("engine", ["parquet", "csv", "csv-no-header"])
@pytest.mark.parametrize("dump", [True, False])
@pytest.mark.parametrize("use_client", [True, False])
def test_gpu_workflow_api(tmpdir, client, df, dataset, gpu_memory_frac, engine, dump, use_client):
cat_names = ["name-cat", "name-string"] if engine == "parquet" else ["name-string"]
cont_names = ["x", "y", "id"]
label_name = ["label"]
set_dask_client(client=client if use_client else None)
norms = ops.Normalize()
cat_features = cat_names >> ops.Categorify(cat_cache="host")
cont_features = cont_names >> ops.FillMissing() >> ops.Clip(min_value=0) >> ops.LogOp >> norms
workflow = Workflow(cat_features + cont_features + label_name)
workflow.fit(dataset)
if dump:
workflow_dir = os.path.join(tmpdir, "workflow")
workflow.save(workflow_dir)
workflow = None
workflow = Workflow.load(workflow_dir)
def get_norms(tar):
gdf = tar.fillna(0)
gdf = gdf * (gdf >= 0).astype("int")
gdf = np.log(gdf + 1)
return gdf
# Check mean and std - No good right now we have to add all other changes; Clip, Log
assert math.isclose(get_norms(df.y).mean(), norms.means["y"], rel_tol=1e-1)
assert math.isclose(get_norms(df.y).std(), norms.stds["y"], rel_tol=1e-1)
assert math.isclose(get_norms(df.x).mean(), norms.means["x"], rel_tol=1e-1)
assert math.isclose(get_norms(df.x).std(), norms.stds["x"], rel_tol=1e-1)
# Check that categories match
if engine == "parquet":
cats_expected0 = df["name-cat"].unique().values_host if HAS_GPU else df["name-cat"].unique()
cats0 = get_cats(workflow, "name-cat")
# adding the None entry as a string because of move from gpu
assert all(cat in [None] + sorted(cats_expected0.tolist()) for cat in cats0.tolist())
assert len(cats0.tolist()) == len(cats_expected0.tolist() + [None])
if HAS_GPU:
cats_expected1 = (
df["name-string"].unique().values_host if HAS_GPU else df["name-string"].unique()
)
else:
cats_expected1 = df["name-string"].unique()
cats1 = get_cats(workflow, "name-string")
# adding the None entry as a string because of move from gpu
assert all(cat in [None] + sorted(cats_expected1.tolist()) for cat in cats1.tolist())
assert len(cats1.tolist()) == len(cats_expected1.tolist() + [None])
# Write to new "shuffled" and "processed" dataset
workflow.transform(dataset).to_parquet(
tmpdir,
out_files_per_proc=10,
shuffle=nvt.io.Shuffle.PER_PARTITION,
)
dataset_2 = Dataset(glob.glob(str(tmpdir) + "/*.parquet"), part_mem_fraction=gpu_memory_frac)
df_pp = nvt.dispatch.concat(list(dataset_2.to_iter()), axis=0)
if engine == "parquet":
assert is_integer_dtype(df_pp["name-cat"].dtype)
assert is_integer_dtype(df_pp["name-string"].dtype)
num_rows, num_row_groups, col_names = nvt.dispatch.read_parquet_metadata(
str(tmpdir) + "/_metadata"
)
assert num_rows == len(df_pp)
@pytest.mark.parametrize("engine", ["csv", "csv-no-header"])
def test_gpu_dataset_iterator_csv(df, dataset, engine):
df_itr = nvt.dispatch.concat(list(dataset.to_iter(columns=mycols_csv)), axis=0)
assert_eq(df_itr.reset_index(drop=True), df.reset_index(drop=True))
def test_spec_set(tmpdir, client):
gdf_test = make_df(
{
"ad_id": [1, 2, 2, 6, 6, 8, 3, 3],
"source_id": [2, 4, 4, 7, 5, 2, 5, 2],
"platform": [1, 2, np.nan, 2, 1, 3, 3, 1],
"cont": [1, 2, np.nan, 2, 1, 3, 3, 1],
"clicked": [1, 0, 1, 0, 0, 1, 1, 0],
}
)
cats = ColumnSelector(["ad_id", "source_id", "platform"])
cat_features = cats >> ops.Categorify
cont_features = ColumnSelector(["cont"]) >> ops.FillMissing >> ops.Normalize
te_features = cats >> ops.TargetEncoding("clicked", kfold=5, fold_seed=42, p_smooth=20)
set_dask_client(client=client)
p = Workflow(cat_features + cont_features + te_features)
p.fit_transform(nvt.Dataset(gdf_test)).to_ddf().compute()
@pytest.mark.parametrize("gpu_memory_frac", [0.01, 0.1])
@pytest.mark.parametrize("engine", ["parquet", "csv", "csv-no-header"])
@pytest.mark.parametrize("dump", [True, False])
def test_gpu_workflow(tmpdir, df, dataset, gpu_memory_frac, engine, dump):
cat_names = ["name-cat", "name-string"] if engine == "parquet" else ["name-string"]
cont_names = ["x", "y", "id"]
label_name = ["label"]
norms = ops.Normalize()
conts = cont_names >> ops.FillMissing() >> ops.Clip(min_value=0) >> norms
cats = cat_names >> ops.Categorify()
workflow = nvt.Workflow(conts + cats + label_name)
workflow.fit(dataset)
if dump:
workflow_dir = os.path.join(tmpdir, "workflow")
workflow.save(workflow_dir)
workflow = None
workflow = Workflow.load(workflow_dir)
def get_norms(tar):
gdf = tar.fillna(0)
gdf = gdf * (gdf >= 0).astype("int")
return gdf
assert math.isclose(get_norms(df.x).mean(), norms.means["x"], rel_tol=1e-4)
assert math.isclose(get_norms(df.y).mean(), norms.means["y"], rel_tol=1e-4)
assert math.isclose(get_norms(df.x).std(), norms.stds["x"], rel_tol=1e-3)
assert math.isclose(get_norms(df.y).std(), norms.stds["y"], rel_tol=1e-3)
# Check that categories match
if engine == "parquet":
cats_expected0 = df["name-cat"].unique().values_host if HAS_GPU else df["name-cat"].unique()
cats0 = get_cats(workflow, "name-cat")
# adding the None entry as a string because of move from gpu
assert all(cat in [None] + sorted(cats_expected0.tolist()) for cat in cats0.tolist())
assert len(cats0.tolist()) == len(cats_expected0.tolist() + [None])
cats_expected1 = (
df["name-string"].unique().values_host if HAS_GPU else df["name-string"].unique()
)
cats1 = get_cats(workflow, "name-string")
# adding the None entry as a string because of move from gpu
assert all(cat in [None] + sorted(cats_expected1.tolist()) for cat in cats1.tolist())
assert len(cats1.tolist()) == len(cats_expected1.tolist() + [None])
# Write to new "shuffled" and "processed" dataset
workflow.transform(dataset).to_parquet(
output_path=tmpdir, out_files_per_proc=10, shuffle=nvt.io.Shuffle.PER_PARTITION
)
dataset_2 = Dataset(glob.glob(str(tmpdir) + "/*.parquet"), part_mem_fraction=gpu_memory_frac)
df_pp = nvt.dispatch.concat(list(dataset_2.to_iter()), axis=0)
if engine == "parquet":
assert is_integer_dtype(df_pp["name-cat"].dtype)
assert is_integer_dtype(df_pp["name-string"].dtype)
num_rows, num_row_groups, col_names = nvt.dispatch.read_parquet_metadata(
str(tmpdir) + "/_metadata"
)
assert num_rows == len(df_pp)
@pytest.mark.parametrize("gpu_memory_frac", [0.01, 0.1])
@pytest.mark.parametrize("engine", ["parquet", "csv", "csv-no-header"])
@pytest.mark.parametrize("dump", [True, False])
@pytest.mark.parametrize("replace", [True, False])
def test_gpu_workflow_config(tmpdir, client, df, dataset, gpu_memory_frac, engine, dump, replace):
cat_names = ["name-cat", "name-string"] if engine == "parquet" else ["name-string"]
cont_names = ["x", "y", "id"]
label_name = ["label"]
norms = ops.Normalize()
cat_features = cat_names >> ops.Categorify()
if replace:
cont_features = cont_names >> ops.FillMissing() >> ops.LogOp >> norms
else:
fillmissing_logop = (
cont_names
>> ops.FillMissing()
>> ops.LogOp
>> ops.Rename(postfix="_FillMissing_1_LogOp_1")
)
cont_features = cont_names + fillmissing_logop >> norms
set_dask_client(client=client)
workflow = Workflow(cat_features + cont_features + label_name)
workflow.fit(dataset)
if dump:
workflow_dir = os.path.join(tmpdir, "workflow")
workflow.save(workflow_dir)
workflow = None
workflow = Workflow.load(workflow_dir)
def get_norms(tar):
ser_median = tar.dropna().quantile(0.5, interpolation="linear")
gdf = tar.fillna(ser_median)
gdf = np.log(gdf + 1)
return gdf
# Check mean and std - No good right now we have to add all other changes; Clip, Log
concat_ops = "_FillMissing_1_LogOp_1"
if replace:
concat_ops = ""
assert math.isclose(get_norms(df.x).mean(), norms.means["x" + concat_ops], rel_tol=1e-1)
assert math.isclose(get_norms(df.y).mean(), norms.means["y" + concat_ops], rel_tol=1e-1)
assert math.isclose(get_norms(df.x).std(), norms.stds["x" + concat_ops], rel_tol=1e-1)
assert math.isclose(get_norms(df.y).std(), norms.stds["y" + concat_ops], rel_tol=1e-1)
# Check that categories match
if engine == "parquet":
cats_expected0 = df["name-cat"].unique().values_host if HAS_GPU else df["name-cat"].unique()
cats0 = get_cats(workflow, "name-cat")
# adding the None entry as a string because of move from gpu
assert all(cat in [None] + sorted(cats_expected0.tolist()) for cat in cats0.tolist())
assert len(cats0.tolist()) == len(cats_expected0.tolist() + [None])
cats_expected1 = (
df["name-string"].unique().values_host if HAS_GPU else df["name-string"].unique()
)
cats1 = get_cats(workflow, "name-string")
# adding the None entry as a string because of move from gpu
assert all(cat in [None] + sorted(cats_expected1.tolist()) for cat in cats1.tolist())
assert len(cats1.tolist()) == len(cats_expected1.tolist() + [None])
# Write to new "shuffled" and "processed" dataset
workflow.transform(dataset).to_parquet(
tmpdir,
out_files_per_proc=10,
shuffle=nvt.io.Shuffle.PER_PARTITION,
)
dataset_2 = Dataset(glob.glob(str(tmpdir) + "/*.parquet"), part_mem_fraction=gpu_memory_frac)
df_pp = nvt.dispatch.concat(list(dataset_2.to_iter()), axis=0)
if engine == "parquet":
assert is_integer_dtype(df_pp["name-cat"].dtype)
assert is_integer_dtype(df_pp["name-string"].dtype)
num_rows, num_row_groups, col_names = nvt.dispatch.read_parquet_metadata(
str(tmpdir) + "/_metadata"
)
assert num_rows == len(df_pp)
@pytest.mark.parametrize("shuffle", [nvt.io.Shuffle.PER_WORKER, nvt.io.Shuffle.PER_PARTITION, None])
@pytest.mark.parametrize("use_client", [True, False])
def test_parquet_output(client, use_client, tmpdir, shuffle):
out_files_per_proc = 2
set_dask_client(client=client if use_client else None)
n_workers = len(client.cluster.workers) if use_client else 1
out_path = str(tmpdir.mkdir("processed"))
path = str(tmpdir.join("simple.parquet"))
size = 25
row_group_size = 5
df = make_df({"a": np.arange(size)})
df.to_parquet(path, row_group_size=row_group_size, engine="pyarrow")
columns = ["a"]
dataset = nvt.Dataset(path, engine="parquet", row_groups_per_part=1)
workflow = nvt.Workflow(columns >> ops.Normalize())
workflow.fit_transform(dataset).to_parquet(
output_path=out_path, shuffle=shuffle, out_files_per_proc=out_files_per_proc
)
# Check that the number of output files is correct
result = glob.glob(os.path.join(out_path, "*.parquet"))
assert len(result) == out_files_per_proc * n_workers
# Make sure _metadata exists
meta_path = os.path.join(out_path, "_metadata")
assert os.path.exists(meta_path)
# Make sure _metadata makes sense
_metadata = nvt.dispatch.read_parquet_metadata(meta_path)
assert _metadata[0] == size
assert _metadata[2] == columns
@pytest.mark.parametrize("engine", ["parquet"])
def test_join_external_workflow(tmpdir, df, dataset, engine):
# Define "external" table
how = "left"
drop_duplicates = True
cache = "device"
shift = 100
df_ext = df[["id"]].copy().sort_values("id")
df_ext["new_col"] = df_ext["id"] + shift
df_ext["new_col_2"] = "keep"
df_ext["new_col_3"] = "ignore"
df_ext_check = df_ext.copy()
# Define Op
on = "id"
columns_left = list(df.columns)
columns_ext = ["id", "new_col", "new_col_2"]
df_ext_check = df_ext_check[columns_ext]
if drop_duplicates:
df_ext_check.drop_duplicates(ignore_index=True, inplace=True)
joined = ColumnSelector(columns_left) >> nvt.ops.JoinExternal(
df_ext,
on,
how=how,
columns_ext=columns_ext,
cache=cache,
drop_duplicates_ext=drop_duplicates,
)
# Define Workflow
gdf = df.reset_index()
dataset = nvt.Dataset(gdf)
processor = nvt.Workflow(joined)
processor.fit(dataset)
new_gdf = processor.transform(dataset).to_ddf().compute().reset_index()
# Validate
check_gdf = gdf.merge(df_ext_check, how=how, on=on)
assert len(check_gdf) == len(new_gdf)
assert (new_gdf["id"] + shift).all() == new_gdf["new_col"].all()
assert gdf["id"].all() == new_gdf["id"].all()
assert "new_col_2" in new_gdf.columns
assert "new_col_3" not in new_gdf.columns
@pytest.mark.parametrize("shuffle", [nvt.io.Shuffle.PER_WORKER, nvt.io.Shuffle.PER_PARTITION, None])
@pytest.mark.parametrize("use_client", [True, False])
@pytest.mark.parametrize("apply_offline", [True, False])
def test_workflow_apply(client, use_client, tmpdir, shuffle, apply_offline):
set_dask_client(client=client if use_client else None)
out_files_per_proc = 2
out_path = str(tmpdir.mkdir("processed"))
path = str(tmpdir.join("simple.parquet"))
size = 25
row_group_size = 5
cont_names = ["cont1", "cont2"]
cat_names = ["cat1", "cat2"]
label_name = ["label"]
df = make_df(
{
"cont1": np.arange(size, dtype=np.float64),
"cont2": np.arange(size, dtype=np.float64),
"cat1": np.arange(size, dtype=np.int32),
"cat2": np.arange(size, dtype=np.int32),
"label": np.arange(size, dtype=np.float64),
}
)
df.to_parquet(path, row_group_size=row_group_size, engine="pyarrow")
dataset = nvt.Dataset(path, engine="parquet", row_groups_per_part=1)
cat_features = cat_names >> ops.Categorify()
cont_features = cont_names >> ops.FillMissing() >> ops.Clip(min_value=0) >> ops.LogOp
workflow = Workflow(cat_features + cont_features + label_name)
workflow.fit(dataset)
# Force dtypes
dict_dtypes = {}
for col in cont_names:
dict_dtypes[col] = np.float32
for col in cat_names:
dict_dtypes[col] = np.float32
for col in label_name:
dict_dtypes[col] = np.int64
workflow.transform(dataset).to_parquet(
# apply_offline=apply_offline, Not any more?
# record_stats=apply_offline, Not any more?
output_path=out_path,
shuffle=shuffle,
out_files_per_proc=out_files_per_proc,
dtypes=dict_dtypes,
)
# Check dtypes
for filename in glob.glob(os.path.join(out_path, "*.parquet")):
gdf = nvt.dispatch.read_dispatch(filename)(filename)
assert dict(gdf.dtypes) == dict_dtypes
@pytest.mark.parametrize("use_parquet", [True, False])
def test_workflow_generate_columns(tmpdir, use_parquet):
out_path = str(tmpdir.mkdir("processed"))
path = str(tmpdir.join("simple.parquet"))
# Stripped down dataset with geo_locaiton codes like in outbrains
df = make_df({"geo_location": ["US>CA", "CA>BC", "US>TN>659"]})
# defining a simple workflow that strips out the country code from the first two digits of the
# geo_location code and sticks in a new 'geo_location_country' field
country = (
["geo_location"]
>> ops.LambdaOp(
f=lambda col: col.str.slice(0, 2),
)
>> ops.Rename(postfix="_country")
)
cat_features = ["geo_location"] + country >> ops.Categorify()
workflow = Workflow(cat_features)
if use_parquet:
df.to_parquet(path)
dataset = nvt.Dataset(path)
else:
dataset = nvt.Dataset(df)
# just make sure this works without errors
workflow.fit(dataset)
workflow.transform(dataset).to_parquet(out_path)
def test_fit_simple():
data = make_df({"x": [0, 1, 2, None, 0, 1, 2], "y": [None, 3, 4, 5, 3, 4, 5]})
dataset = Dataset(data)
workflow = Workflow(["x", "y"] >> ops.FillMedian() >> ops.LambdaOp(lambda x: x * x))
workflow.fit(dataset)
transformed = workflow.transform(dataset).to_ddf().compute()
expected = make_df({"x": [0, 1, 4, 1, 0, 1, 4], "y": [16, 9, 16, 25, 9, 16, 25]})
if not HAS_GPU:
transformed["x"] = transformed["x"].astype(expected["x"].dtype)
transformed["y"] = transformed["y"].astype(expected["y"].dtype)
assert_eq(expected, transformed)
@pytest.mark.skipif(not cudf, reason="needs cudf")
def test_transform_geolocation():
raw = """US>SC>519 US>CA>807 US>MI>505 US>CA>510 CA>NB US>CA>534""".split()
data = make_df({"geo_location": raw})
geo_location = ColumnSelector(["geo_location"])
state = (
geo_location
>> ops.LambdaOp(lambda col: col.str.slice(0, 5))
>> ops.Rename(postfix="_state")
)
country = (
geo_location
>> ops.LambdaOp(lambda col: col.str.slice(0, 2))
>> ops.Rename(postfix="_country")
)
geo_features = state + country + geo_location >> ops.HashBucket(num_buckets=100)
# for this workflow we don't have any statoperators, so we can get away without fitting
workflow = Workflow(geo_features)
transformed = workflow.transform(Dataset(data)).to_ddf().compute()
expected = make_df()
expected["geo_location_state"] = data["geo_location"].str.slice(0, 5).hash_values() % 100
expected["geo_location_country"] = data["geo_location"].str.slice(0, 2).hash_values() % 100
expected["geo_location"] = data["geo_location"].hash_values() % 100
expected = expected.astype(np.int32)
assert_eq(expected, transformed)
def test_workflow_move_saved(tmpdir):
raw = """US>SC>519 US>CA>807 US>MI>505 US>CA>510 CA>NB US>CA>534""".split()
data = make_df({"geo": raw})
geo_location = ColumnSelector(["geo"])
state = (
geo_location
>> ops.LambdaOp(lambda col: col.str.slice(0, 5))
>> ops.Rename(postfix="_state")
)
country = (
geo_location
>> ops.LambdaOp(lambda col: col.str.slice(0, 2))
>> ops.Rename(postfix="_country")
)
geo_features = state + country + geo_location >> ops.Categorify()
# create the workflow and transform the input
workflow = Workflow(geo_features)
expected = workflow.fit_transform(Dataset(data)).to_ddf().compute()
# save the workflow (including categorical mapping parquet files)
# and then verify we can load the saved workflow after moving the directory
out_path = os.path.join(tmpdir, "output", "workflow")
workflow.save(out_path)
moved_path = os.path.join(tmpdir, "output", "workflow2")
shutil.move(out_path, moved_path)
workflow2 = Workflow.load(moved_path)
# also check that when transforming our input we get the same results after loading
transformed = workflow2.transform(Dataset(data)).to_ddf().compute()
assert_eq(expected, transformed)
def test_workflow_input_output_dtypes():
df = make_df({"genre": ["drama", "comedy"], "user": ["a", "b"], "unneeded": [1, 2]})
features = [["genre", "user"], "genre"] >> ops.Categorify(encode_type="combo")
workflow = Workflow(features)
workflow.fit(Dataset(df))
assert "unneeded" not in workflow.input_dtypes
assert set(workflow.input_dtypes.keys()) == {"genre", "user"}
assert set(workflow.output_dtypes.keys()) == {"genre_user", "genre"}
@pytest.mark.skipif(not cudf, reason="needs cudf")
def test_workflow_transform_ddf_dtypes():
# Initial Dataset
dtypes = {"name": str, "id": int, "x": float, "y": float}
df = cudf.datasets.timeseries(dtypes=dtypes).reset_index()
ddf = dask_cudf.from_cudf(df, npartitions=2)
dataset = Dataset(ddf)
# Create and Execute Workflow
cols = ["name", "x", "y", "timestamp"]
cat_cols = ["id"] >> ops.Normalize()
workflow = Workflow(cols + cat_cols)
workflow.fit(dataset)
transformed_ddf = workflow.transform(dataset).to_ddf()
# no transforms on the pass through cols, should have original dtypes
for col in cols:
assert_eq(ddf.dtypes[col], transformed_ddf.dtypes[col])
# Followup dask-cudf sorting used to throw an exception because of dtype issues,
# check that it works now
transformed_ddf.sort_values(["id", "timestamp"]).compute()
def test_workflow_saved_schema(tmpdir):
raw = """US>SC>519 US>CA>807 US>MI>505 US>CA>510 CA>NB US>CA>534""".split()
data = make_df({"geo": raw})
geo_location = ColumnSelector(["geo"])
state = (
geo_location
>> ops.LambdaOp(lambda col: col.str.slice(0, 5))
>> ops.Rename(postfix="_state")
)
country = (
geo_location
>> ops.LambdaOp(lambda col: col.str.slice(0, 2))
>> ops.Rename(postfix="_country")
)
geo_features = state + country + geo_location >> ops.Categorify()
# create the workflow and transform the input
workflow = Workflow(geo_features)
workflow.fit(Dataset(data))
real_input_schema = workflow.input_schema
real_output_schema = workflow.output_schema
# save the workflow (including categorical mapping parquet files)
# and then verify we can load the saved workflow after moving the directory
out_path = os.path.join(tmpdir, "output", "workflow")
workflow.save(out_path)
workflow2 = Workflow.load(out_path)
assert workflow2.input_schema == real_input_schema
assert workflow2.output_schema == real_output_schema
for node in postorder_iter_nodes(workflow2.output_node):
assert node.input_schema is not None
assert node.output_schema is not None
|
the-stack_0_1464 | #!/usr/bin/env python
##############################################################################
# Copyright (c) 2017 Intel Corporation
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# Unittest for yardstick.benchmark.orchestrator.heat
import unittest
import mock
from yardstick.orchestrator.kubernetes import KubernetesObject
from yardstick.orchestrator.kubernetes import KubernetesTemplate
class GetTemplateTestCase(unittest.TestCase):
def test_get_template(self):
output_t = {
"apiVersion": "v1",
"kind": "ReplicationController",
"metadata": {
"name": "host-k8s-86096c30"
},
"spec": {
"replicas": 1,
"template": {
"metadata": {
"labels": {
"app": "host-k8s-86096c30"
}
},
"spec": {
"containers": [
{
"args": [
"-c",
"chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
service ssh restart;while true ; do sleep 10000; done"
],
"command": [
"/bin/bash"
],
"image": "openretriever/yardstick",
"name": "host-k8s-86096c30-container",
"volumeMounts": [
{
"mountPath": "/root/.ssh/",
"name": "k8s-86096c30-key"
}
]
}
],
"volumes": [
{
"configMap": {
"name": "k8s-86096c30-key"
},
"name": "k8s-86096c30-key"
}
],
"nodeSelector": {
"kubernetes.io/hostname": "node-01"
}
}
}
}
}
input_s = {
'command': '/bin/bash',
'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
service ssh restart;while true ; do sleep 10000; done'],
'ssh_key': 'k8s-86096c30-key',
'nodeSelector': { 'kubernetes.io/hostname': 'node-01'}
}
name = 'host-k8s-86096c30'
output_r = KubernetesObject(name, **input_s).get_template()
self.assertEqual(output_r, output_t)
class GetRcPodsTestCase(unittest.TestCase):
@mock.patch('yardstick.orchestrator.kubernetes.k8s_utils.get_pod_list')
def test_get_rc_pods(self, mock_get_pod_list):
servers = {
'host': {
'image': 'openretriever/yardstick',
'command': '/bin/bash',
'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
service ssh restart;while true ; do sleep 10000; done']
},
'target': {
'image': 'openretriever/yardstick',
'command': '/bin/bash',
'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
service ssh restart;while true ; do sleep 10000; done']
}
}
k8s_template = KubernetesTemplate('k8s-86096c30', servers)
mock_get_pod_list.return_value.items = []
pods = k8s_template.get_rc_pods()
self.assertEqual(pods, [])
def main():
unittest.main()
if __name__ == '__main__':
main()
|
the-stack_0_1465 | #!/usr/bin/env python
r"""Import BTi / 4D MagnesWH3600 data to fif file.
Notes
-----
1. Currently direct inclusion of reference channel weights
is not supported. Please use \'mne_create_comp_data\' to include
the weights or use the low level functions from this module to
include them by yourself.
2. The informed guess for the 4D name is E31 for the ECG channel and
E63, E63 for the EOG channels. Pleas check and adjust if those channels
are present in your dataset but 'ECG 01' and 'EOG 01', 'EOG 02' don't
appear in the channel names of the raw object.
Examples
--------
.. code-block:: console
$ mne bti2fiff --pdf C,rfDC -o my_raw.fif
"""
# Authors: Denis A. Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Yuval Harpaz <[email protected]>
#
# simplified bsd-3 license
import sys
import mne
from mne.io import read_raw_bti
def run():
"""Run command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__)
parser.add_option('-p', '--pdf', dest='pdf_fname',
help='Input data file name', metavar='FILE')
parser.add_option('-c', '--config', dest='config_fname',
help='Input config file name', metavar='FILE',
default='config')
parser.add_option('--head_shape', dest='head_shape_fname',
help='Headshape file name', metavar='FILE',
default='hs_file')
parser.add_option('-o', '--out_fname', dest='out_fname',
help='Name of the resulting fiff file',
default='as_data_fname')
parser.add_option('-r', '--rotation_x', dest='rotation_x', type='float',
help='Compensatory rotation about Neuromag x axis, deg',
default=2.0)
parser.add_option('-T', '--translation', dest='translation', type='str',
help='Default translation, meter',
default=(0.00, 0.02, 0.11))
parser.add_option('--ecg_ch', dest='ecg_ch', type='str',
help='4D ECG channel name',
default='E31')
parser.add_option('--eog_ch', dest='eog_ch', type='str',
help='4D EOG channel names',
default='E63,E64')
options, args = parser.parse_args()
pdf_fname = options.pdf_fname
if pdf_fname is None:
parser.print_help()
sys.exit(1)
config_fname = options.config_fname
head_shape_fname = options.head_shape_fname
out_fname = options.out_fname
rotation_x = options.rotation_x
translation = options.translation
ecg_ch = options.ecg_ch
eog_ch = options.ecg_ch.split(',')
if out_fname == 'as_data_fname':
out_fname = pdf_fname + '_raw.fif'
raw = read_raw_bti(pdf_fname=pdf_fname, config_fname=config_fname,
head_shape_fname=head_shape_fname,
rotation_x=rotation_x, translation=translation,
ecg_ch=ecg_ch, eog_ch=eog_ch)
raw.save(out_fname)
raw.close()
mne.utils.run_command_if_main()
|
the-stack_0_1467 | # coding=utf-8
import urllib
from bs4 import BeautifulSoup
def run(bot, chat_id, user, keyConfig, message, totalResults=1):
requestText = message.replace(bot.name, '').strip().upper()
code = urllib.urlopen('http://www.abbreviations.com/' + requestText).read()
resultsList = acronym_results_parser(code)
result = ''
if resultsList:
searchResults = acronym_results_printer(requestText, resultsList)
result = user + ', ' + searchResults
else:
result='I\'m sorry ' + (user if not user == '' else 'Dave') + \
', I\'m afraid I can\'t find the acronym *' + \
str(requestText) + '*'
try:
bot.sendMessage(chat_id=chat_id, text=result, parse_mode='Markdown')
except:
bot.sendMessage(chat_id=chat_id, text=result.replace('*', ''))
def acronym_results_parser(code):
soup = BeautifulSoup(code, 'html.parser')
resultList = []
for resultRow in soup.findAll('p', attrs={'class':'desc'}):
resultList.append(resultRow.string)
return resultList
def acronym_results_printer(request, list):
AllGameDetailsFormatted = '*' + str(request) + '* could mean:'
for item in list:
encodedItem = str(item)
if (encodedItem != 'None'):
AllGameDetailsFormatted += '\n'
for char in encodedItem.replace('Definition', '').replace('*', '\*'):
if char.isupper():
AllGameDetailsFormatted += '*' + char + '*'
else:
AllGameDetailsFormatted += char
return AllGameDetailsFormatted
|
the-stack_0_1468 | # coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import os
from pyiron.lammps.base import Input
from pyiron.lammps.interactive import LammpsInteractive
from pyiron_contrib.atomistics.mlip.mlip import read_cgfs
from pyiron_base import GenericParameters
__author__ = "Jan Janssen"
__copyright__ = "Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - " \
"Computational Materials Design (CM) Department"
__version__ = "1.0"
__maintainer__ = "Jan Janssen"
__email__ = "[email protected]"
__status__ = "development"
__date__ = "Sep 1, 2018"
class LammpsMlip(LammpsInteractive):
def __init__(self, project, job_name):
super(LammpsMlip, self).__init__(project, job_name)
self.input = MlipInput()
self.__name__ = "LammpsMlip"
self.__version__ = None # Reset the version number to the executable is set automatically
self._executable = None
self._executable_activate()
def set_input_to_read_only(self):
"""
This function enforces read-only mode for the input classes, but it has to be implement in the individual
classes.
"""
super(LammpsMlip, self).set_input_to_read_only()
self.input.mlip.read_only = True
def write_input(self):
super(LammpsMlip, self).write_input()
if self.input.mlip['mlip:load-from'] == 'auto':
self.input.mlip['mlip:load-from'] = os.path.basename(self.potential['Filename'][0][0])
self.input.mlip.write_file(file_name="mlip.ini", cwd=self.working_directory)
def enable_active_learning(self):
self.input.mlip.load_string("""\
abinitio void
mlip mtpr
mlip:load-from Trained.mtp_
calculate-efs TRUE
fit FALSE
select TRUE
select:site-en-weight 0.0
select:energy-weight 1.0
select:force-weight 0.0
select:stress-weight 0.0
select:threshold-init 1e-5
select:threshold 2.0
select:threshold-swap 1.000001
select:threshold-break 5.0
select:save-selected selected.cfg
select:save-state selection.mvs
select:load-state state.mvs
select:efs-ignored FALSE
select:log selection.log
write-cfgs:skip 0
log lotf.log""")
def collect_output(self):
super(LammpsMlip, self).collect_output()
if 'select:save-selected' in self.input.mlip._dataset['Parameter']:
file_name = os.path.join(self.working_directory, self.input.mlip['select:save-selected'])
if os.path.exists(file_name):
cell, positions, forces, stress, energy, indicies, grades, jobids, timesteps = read_cgfs(file_name=file_name)
with self.project_hdf5.open("output/mlip") as hdf5_output:
hdf5_output['forces'] = forces
hdf5_output['energy_tot'] = energy
hdf5_output['pressures'] = stress
hdf5_output['cells'] = cell
hdf5_output['positions'] = positions
hdf5_output['indicies'] = indicies
class MlipInput(Input):
def __init__(self):
self.mlip = MlipParameter()
super(MlipInput, self).__init__()
def to_hdf(self, hdf5):
"""
Args:
hdf5:
Returns:
"""
with hdf5.open("input") as hdf5_input:
self.mlip.to_hdf(hdf5_input)
super(MlipInput, self).to_hdf(hdf5)
def from_hdf(self, hdf5):
"""
Args:
hdf5:
Returns:
"""
with hdf5.open("input") as hdf5_input:
self.mlip.from_hdf(hdf5_input)
super(MlipInput, self).from_hdf(hdf5)
class MlipParameter(GenericParameters):
def __init__(self, separator_char=' ', comment_char='#', table_name="mlip_inp"):
super(MlipParameter, self).__init__(separator_char=separator_char, comment_char=comment_char, table_name=table_name)
def load_default(self, file_content=None):
if file_content is None:
file_content = '''\
abinitio void
mlip mtpr
mlip:load-from auto
calculate-efs TRUE
fit FALSE
select FALSE
'''
self.load_string(file_content)
|
the-stack_0_1469 | from torch import nn
from torchvision.models import resnet
from torch.utils import model_zoo
class ResEnc(resnet.ResNet):
def __init__(self, block, layers, url=None):
self.url = url
super().__init__(block, layers)
del self.avgpool
del self.fc
def initialize(self):
if self.url:
self.load_state_dict(model_zoo.load_url(self.url), strict=False)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x2 = self.layer1(x)
x3 = self.layer2(x2)
x4 = self.layer3(x3)
x5 = self.layer4(x4)
return [x2, x3, x4, x5]
def res18_enc():
encoder = ResEnc(resnet.BasicBlock, [2, 2, 2, 2], resnet.model_urls['resnet18'])
encoder.initialize()
return encoder
def res34_enc():
encoder = ResEnc(resnet.BasicBlock, [3, 4, 6, 3], resnet.model_urls['resnet34'])
encoder.initialize()
return encoder
res_inchannels = [64, 128, 256, 512] |
the-stack_0_1470 | r"""
Relative finite field extensions
Considering a *absolute field* `F_{q^m}` and a *relative_field* `F_q`, with
`q = p^s`, `p` being a prime and `s, m` being integers, this file
contains a class to take care of the representation of `F_{q^m}`-elements
as `F_q`-elements.
.. WARNING::
As this code is experimental, a warning is thrown when a
relative finite field extension is created for the first time
in a session (see :class:`sage.misc.superseded.experimental`).
TESTS::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: RelativeFiniteFieldExtension(Fqm, Fq)
doctest:...: FutureWarning: This class/method/function is marked as experimental. It, its functionality or its interface might change without a formal deprecation.
See http://trac.sagemath.org/20284 for details.
Relative field extension between Finite Field in aa of size 2^4 and Finite Field in a of size 2^2
"""
# ****************************************************************************
# Copyright (C) 2016 David Lucas <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.misc.cachefunc import cached_method
from sage.structure.sage_object import SageObject
from sage.categories.homset import Hom
from sage.matrix.constructor import column_matrix
from sage.modules.free_module_element import vector
from sage.misc.superseded import experimental
class RelativeFiniteFieldExtension(SageObject):
r"""
Considering `p` a prime number, n an integer and three finite fields
`F_p`, `F_q` and `F_{q^m}`, this class contains a set of methods
to manage the representation of elements of the relative extension
`F_{q^m}` over `F_q`.
INPUT:
- ``absolute_field``, ``relative_field`` -- two finite fields, ``relative_field``
being a subfield of ``absolute_field``
- ``embedding`` -- (default: ``None``) an homomorphism from ``relative_field`` to
``absolute_field``. If ``None`` is provided, it will default to the first
homomorphism of the list of homomorphisms Sage can build.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: RelativeFiniteFieldExtension(Fqm, Fq)
Relative field extension between Finite Field in aa of size 2^4 and Finite Field in a of size 2^2
It is possible to specify the embedding to use
from ``relative_field`` to ``absolute_field``::
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq, embedding=Hom(Fq, Fqm)[1])
sage: FE.embedding() == Hom(Fq, Fqm)[1]
True
"""
@experimental(trac_number=20284)
def __init__(self, absolute_field, relative_field, embedding=None):
r"""
TESTS:
If ``absolute_field`` is not a finite field, an error is raised::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm = RR
sage: Fq.<a> = GF(4)
sage: RelativeFiniteFieldExtension(Fqm, Fq)
Traceback (most recent call last):
...
ValueError: absolute_field has to be a finite field
Same for ``relative_field``::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq = RR
sage: RelativeFiniteFieldExtension(Fqm, Fq)
Traceback (most recent call last):
...
ValueError: relative_field has to be a finite field
If ``relative_field`` is not a subfield of ``absolute_field``, an exception
is raised::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(8)
sage: RelativeFiniteFieldExtension(Fqm, Fq)
Traceback (most recent call last):
...
ValueError: relative_field has to be a subfield of absolute_field
"""
if not absolute_field.is_finite():
raise ValueError("absolute_field has to be a finite field")
if not relative_field.is_finite():
raise ValueError("relative_field has to be a finite field")
s = relative_field.degree()
sm = absolute_field.degree()
if not s.divides(sm):
raise ValueError("relative_field has to be a subfield of absolute_field")
H = Hom(relative_field, absolute_field)
if embedding is not None and embedding not in H:
raise ValueError("embedding has to be an embedding from relative_field to absolute_field")
elif embedding is not None:
self._phi = embedding
else:
self._phi = H[0]
self._prime_field = relative_field.base_ring()
self._relative_field = relative_field
self._absolute_field = absolute_field
alpha = relative_field.gen()
beta = absolute_field.gen()
self._alphas = [alpha ** i for i in range(s)]
self._betas = [beta ** i for i in range(sm)]
self._relative_field_degree = s
self._absolute_field_degree = sm
def _repr_(self):
r"""
Returns a string representation of ``self``.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: RelativeFiniteFieldExtension(Fqm, Fq)
Relative field extension between Finite Field in aa of size 2^4 and Finite Field in a of size 2^2
"""
return "Relative field extension between %s and %s" % (self.absolute_field(), self.relative_field())
def _latex_(self):
r"""
Returns a latex representation of ``self``.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: latex(RelativeFiniteFieldExtension(Fqm, Fq))
\textnormal{Relative field extension between \Bold{F}_{2^{4}} and \Bold{F}_{2^{2}}}
"""
return "\\textnormal{Relative field extension between %s and %s}" % (self.absolute_field()._latex_(),
self.relative_field()._latex_())
def __eq__(self, other):
r"""
Tests equality between embeddings.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fq = GF(4)
sage: FQ = GF(4**3)
sage: H = Hom(Fq, FQ)
sage: E1 = RelativeFiniteFieldExtension(FQ, Fq)
sage: E2 = RelativeFiniteFieldExtension(FQ, Fq, H[0])
sage: E3 = RelativeFiniteFieldExtension(FQ, Fq, H[1])
sage: E1 == E2
True
sage: E1 == E3
False
"""
return isinstance(other, RelativeFiniteFieldExtension) \
and self.embedding() == other.embedding()
@cached_method
def _representation_matrix(self):
r"""
Returns the matrix used to represents elements of the absolute field
as vectors in the basis of the relative field over the prime field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE._representation_matrix()
[1 0 0 0]
[0 0 1 1]
[0 1 1 1]
[0 0 0 1]
"""
s = self.relative_field_degree()
m = self.extension_degree()
betas = self.absolute_field_basis()
phi_alphas = [ self._phi(self._alphas[i]) for i in range(s) ]
A = column_matrix([vector(betas[i] * phi_alphas[j])
for i in range(m) for j in range(s)])
return A.inverse()
def _flattened_relative_field_representation(self, b):
r"""
Returns a vector representation of ``b`` in the basis of
the relative field over the prime field.
INPUT:
- ``b`` -- an element of the absolute field
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: b = aa^3 + aa^2 + aa + 1
sage: FE._flattened_relative_field_representation(b)
(1, 0, 1, 1)
"""
if b not in self.absolute_field():
raise ValueError("The input has to be an element of the absolute field")
return self._representation_matrix() * vector(b)
def relative_field_representation(self, b):
r"""
Returns a vector representation of the field element ``b`` in the basis
of the absolute field over the relative field.
INPUT:
- ``b`` -- an element of the absolute field
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: b = aa^3 + aa^2 + aa + 1
sage: FE.relative_field_representation(b)
(1, a + 1)
"""
if b not in self.absolute_field():
raise ValueError("The input has to be an element of the absolute field")
s = self.relative_field_degree()
if s == 1:
return vector(b)
Fq = self.relative_field()
vect = self._flattened_relative_field_representation(b)
sm = self.absolute_field_degree()
list_elts = []
for i in range(0, sm, s):
list_elts.append(Fq(vect[i:i + s]))
return vector(Fq, list_elts)
def absolute_field_representation(self, a):
r"""
Returns an absolute field representation of the relative field
vector ``a``.
INPUT:
- ``a`` -- a vector in the relative extension field
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: b = aa^3 + aa^2 + aa + 1
sage: rel = FE.relative_field_representation(b)
sage: FE.absolute_field_representation(rel) == b
True
"""
s = self.relative_field_degree()
m = self.extension_degree()
if len(a) != m:
raise ValueError("The input has to be a vector with length equal to the order of the absolute field")
if not a.base_ring() == self.relative_field():
raise ValueError("The input has to be over the prime field")
alphas = self.relative_field_basis()
betas = self.absolute_field_basis()
phi = self.embedding()
b = self.absolute_field().zero()
flattened_relative_field_rep_list = []
for i in a:
tmp = vector(i).list()
for j in tmp:
flattened_relative_field_rep_list.append(j)
flattened_relative_field_rep = vector(flattened_relative_field_rep_list)
for i in range(m):
b += betas[i] * phi(sum([flattened_relative_field_rep[j] * alphas[j%s] for j in range(i*s, i*s + s)]))
return b
def is_in_relative_field(self, b):
r"""
Returns ``True`` if ``b`` is in the relative field.
INPUT:
- ``b`` -- an element of the absolute field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.is_in_relative_field(aa^2 + aa)
True
sage: FE.is_in_relative_field(aa^3)
False
"""
vect = self.relative_field_representation(b)
return vect[1:vect.length()].is_zero()
def cast_into_relative_field(self, b, check=True):
r"""
Casts an absolute field element into the relative field (if possible).
This is the inverse function of the field embedding.
INPUT:
- ``b`` -- an element of the absolute field which also lies in the
relative field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: phi = FE.embedding()
sage: b = aa^2 + aa
sage: FE.is_in_relative_field(b)
True
sage: FE.cast_into_relative_field(b)
a
sage: phi(FE.cast_into_relative_field(b)) == b
True
"""
if check:
if not self.is_in_relative_field(b):
raise ValueError("%s does not belong to the relative field" % b)
return self.relative_field_representation(b)[0]
def embedding(self):
r"""
Returns the embedding which is used to go from the
relative field to the absolute field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.embedding()
Ring morphism:
From: Finite Field in a of size 2^2
To: Finite Field in aa of size 2^4
Defn: a |--> aa^2 + aa
"""
return self._phi
def relative_field_basis(self):
r"""
Returns a basis of the relative field over the prime field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.relative_field_basis()
[1, a]
"""
return self._alphas
def absolute_field_basis(self):
r"""
Returns a basis of the absolute field over the prime field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.absolute_field_basis()
[1, aa, aa^2, aa^3]
"""
return self._betas
def relative_field_degree(self):
r"""
Let `F_p` be the base field of our relative field `F_q`.
Returns `s` where `p^s = q`
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.relative_field_degree()
2
"""
return self._relative_field_degree
def absolute_field_degree(self):
r"""
Let `F_p` be the base field of our absolute field `F_{q^m}`.
Returns `sm` where `p^{sm} = q^{m}`
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.absolute_field_degree()
4
"""
return self._absolute_field_degree
def extension_degree(self):
r"""
Return `m`, the extension degree of the absolute field over
the relative field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(64)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.extension_degree()
3
"""
return self.absolute_field_degree() // self.relative_field_degree()
def prime_field(self):
r"""
Returns the base field of our absolute and relative fields.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.prime_field()
Finite Field of size 2
"""
return self._prime_field
def relative_field(self):
r"""
Returns the relative field of ``self``.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.relative_field()
Finite Field in a of size 2^2
"""
return self._relative_field
def absolute_field(self):
r"""
Returns the absolute field of ``self``.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.absolute_field()
Finite Field in aa of size 2^4
"""
return self._absolute_field
|
the-stack_0_1472 | from __future__ import absolute_import, division, print_function
from stripe_modern import util
from stripe_modern.api_resources.customer import Customer
from stripe_modern.api_resources.abstract import APIResource
from stripe_modern.six.moves.urllib.parse import quote_plus
class CustomerBalanceTransaction(APIResource):
OBJECT_NAME = "customer_balance_transaction"
def instance_url(self):
token = util.utf8(self.id)
customer = util.utf8(self.customer)
base = Customer.class_url()
cust_extn = quote_plus(customer)
extn = quote_plus(token)
return "%s/%s/balance_transactions/%s" % (base, cust_extn, extn)
@classmethod
def retrieve(cls, id, api_key=None, **params):
raise NotImplementedError(
"Can't retrieve a Customer Balance Transaction without a Customer ID. "
"Use Customer.retrieve_customer_balance_transaction('cus_123', 'cbtxn_123')"
)
|
the-stack_0_1473 | import os
import json
import base64
import random
import hashlib
import jinja2
import webapp2
from google.appengine.ext import ndb
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), "templates")),
autoescape=True
)
class CounterConfig(ndb.Model):
shards = ndb.IntegerProperty(default=50, indexed=False)
class CounterShard(ndb.Model):
count = ndb.IntegerProperty(default=0, indexed=False)
class Hash(ndb.Model):
pass
def _key(*parts):
hasher = hashlib.sha256()
for part in parts:
hasher.update(base64.b64encode(part) + "\x00")
return base64.b64encode(hasher.digest())
def _shard_key(team, attr, shard):
return _key(team, attr, str(shard))
@ndb.tasklet
def _shards():
cache_key = _key("shards")
context = ndb.get_context()
shards = yield context.memcache_get(cache_key)
if shards is None:
config = yield CounterConfig.get_or_insert_async("main")
shards = config.shards
yield context.memcache_add(cache_key, shards)
raise ndb.Return(shards)
@ndb.tasklet
def mark_tasklet(team, attr, value):
cache_key = _key("hash", team, attr, value)
context = ndb.get_context()
exists = yield context.memcache_get(cache_key)
if exists:
return
yield [
context.memcache_add(cache_key, True),
_mark_tasklet(team, attr, value)
]
@ndb.transactional_tasklet(xg=True)
def _mark_tasklet(team, attr, value):
key_name = _key(team, attr, value)
hash_entity = yield Hash.get_by_id_async(key_name)
if hash_entity is not None:
return
yield [
Hash(key=ndb.Key(Hash, key_name)).put_async(),
ndb.get_context().memcache_incr(_key("count", team, attr)),
_incr_tasklet(team, attr)
]
@ndb.transactional_tasklet
def _incr_tasklet(team, attr):
shards = yield _shards()
shard = random.randint(0, shards - 1)
counter = yield CounterShard.get_or_insert_async(_shard_key(team, attr, shard))
counter.count += 1
yield counter.put_async()
@ndb.tasklet
def count_tasklet(team, attr, force_recount=False):
cache_key = _key("count", team, attr)
context = ndb.get_context()
if not force_recount:
count = yield context.memcache_get(cache_key)
if count is not None:
raise ndb.Return((team, attr, count))
shards = yield _shards()
keys = [ndb.Key(CounterShard, _shard_key(team, attr, shard)) for shard in xrange(shards)]
results = yield ndb.get_multi_async(keys)
count = 0
for counter in results:
if counter is None:
continue
count += counter.count
cache_key = _key("count", team, attr)
context = ndb.get_context()
yield context.memcache_set(cache_key, count, random.randint(90, 120))
raise ndb.Return((team, attr, count))
@ndb.synctasklet
def scores(teams=["yellow", "blue", "red"], force_recount=False):
tasklets = []
for team in teams:
tasklets.extend([
count_tasklet(team, "user_agents", force_recount),
count_tasklet(team, "remote_addrs", force_recount)
])
results = yield tasklets
scores = {}
for team, attr, count in results:
scores.setdefault(team, {}).setdefault(attr, count)
raise ndb.Return(scores)
class TeamPage(webapp2.RequestHandler):
@ndb.synctasklet
def get(self, team):
team = team.lower()
user_agent = self.request.headers.get("user-agent", "")
remote_addr = self.request.remote_addr
yield [
mark_tasklet(team, "user_agents", user_agent),
mark_tasklet(team, "remote_addrs", remote_addr)
]
template = env.get_template("team.html")
self.response.write(template.render({
"team": team.capitalize(),
"image": {
"yellow": "/static/yellowteam.png",
"blue": "/static/blueteam.png",
"red": "/static/redteam.png"
}.get(team, "/static/unknown.png"),
"color": jinja2.Markup({
"yellow": "#FFEF00",
"red": "#53140A",
"blue": "#0056B9"
}.get(team, "#777777"))
}))
class ScorePage(webapp2.RequestHandler):
def get(self):
template = env.get_template("scores.html")
self.response.write(template.render({}))
class ScoreAPI(webapp2.RequestHandler):
def get(self):
self.response.headers["Content-Type"] = "application/json"
self.response.write(json.dumps(scores()))
class RecalcTask(webapp2.RequestHandler):
def get(self):
scores(force_recount=True)
class MainPage(webapp2.RequestHandler):
def get(self):
template = env.get_template("index.html")
self.response.write(template.render({}))
app = webapp2.WSGIApplication(routes=[
("(?i)/(yellow|blue|red)/?", TeamPage),
("(?i)/scores/api/?", ScoreAPI),
("(?i)/scores/?", ScorePage),
("/", MainPage)
])
tasks = webapp2.WSGIApplication(routes=[
("/tasks/recalc_scores", RecalcTask)
])
|
the-stack_0_1476 | import json
from controller.client import Client
def anunciarview():
isvalid, trades = _anunciar()
if isvalid:
print("--------------------- LISTA DE TROCAS -------------------")
for trade in trades:
print("Usuário {", trade.name, '} - Código da Troca: {', trade.idTrade, '}')
print("Oferece -> ID figura: ", trade.offerID, '- Nome: ', trade.offerName, ' - Raridade: ',
trade.offerRarity)
print("Deseja <- ID figura: ", trade.takingID, '- Nome: ', trade.takingName, ' - Raridade: ',
trade.takingRarity)
print('--------------------- ------*----- -------------------')
return trades
else:
print('Lamentamos, mas não foi possível exibir as trocas')
return None
def _anunciar():
client = Client()
response = client.listTrade()
isvalid = response.response
trades = response.list
return isvalid, trades
|
the-stack_0_1477 | from __future__ import unicode_literals
import errno
import os
import re
import socket
import sys
from datetime import datetime
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import get_internal_wsgi_application, run
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.exceptions import MigrationSchemaMissing
from django.db.migrations.executor import MigrationExecutor
from django.utils import autoreload, six
from django.utils.encoding import force_text, get_system_encoding
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
leave_locale_alone = True
default_port = '8000'
def add_arguments(self, parser):
parser.add_argument('addrport', nargs='?',
help='Optional port number, or ipaddr:port')
parser.add_argument('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use an IPv6 address.')
parser.add_argument('--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.')
parser.add_argument('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.')
def execute(self, *args, **options):
if options.get('no_color'):
# We rely on the environment because it's currently the only
# way to reach WSGIRequestHandler. This seems an acceptable
# compromise considering `runserver` runs indefinitely.
os.environ[str("DJANGO_COLORS")] = str("nocolor")
super(Command, self).execute(*args, **options)
def get_handler(self, *args, **options):
"""
Returns the default WSGI handler for the runner.
"""
return get_internal_wsgi_application()
def handle(self, *args, **options):
from django.conf import settings
if not settings.DEBUG and not settings.ALLOWED_HOSTS:
raise CommandError('You must set secure.ALLOWED_HOSTS if DEBUG is False.')
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not options.get('addrport'):
self.addr = ''
self.port = self.default_port
else:
m = re.match(naiveip_re, options['addrport'])
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % options['addrport'])
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
self._raw_ipv6 = bool(self.use_ipv6)
self.run(**options)
def run(self, **options):
"""
Runs the server, using the autoreloader if needed
"""
use_reloader = options.get('use_reloader')
if use_reloader:
autoreload.main(self.inner_run, None, options)
else:
self.inner_run(None, **options)
def inner_run(self, *args, **options):
# If an exception was silenced in ManagementUtility.execute in order
# to be raised in the child process, raise it now.
autoreload.raise_last_exception()
threading = options.get('use_threading')
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks...\n\n")
self.check(display_num_errors=True)
self.check_migrations()
now = datetime.now().strftime('%B %d, %Y - %X')
if six.PY2:
now = now.decode(get_system_encoding())
self.stdout.write(now)
self.stdout.write((
"Django version %(version)s, using secure %(secure)r\n"
"Starting development server at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"version": self.get_version(),
"secure": settings.SETTINGS_MODULE,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading)
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = force_text(e)
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
def check_migrations(self):
"""
Checks to see if the set of migrations on disk matches the
migrations in the database. Prints a warning if they don't match.
"""
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
except MigrationSchemaMissing:
self.stdout.write(self.style.NOTICE(
"\nNot checking migrations as it is not possible to access/create the django_migrations table."
))
return
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
self.stdout.write(self.style.NOTICE(
"\nYou have unapplied migrations; your app may not work properly until they are applied."
))
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
# Kept for backward compatibility
BaseRunserverCommand = Command
|
the-stack_0_1478 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework():
"""Base class for a nodepay test script.
Individual nodepay test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave nodepayds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop nodepayds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing nodepayd/nodepay-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: nodepayds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a nodepayd"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
time.sleep(10)
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple nodepayds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
time.sleep(10)
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a nodepayd test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple nodepayd test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'nodepayd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "nodepayd should have exited with an error"
else:
assert_msg = "nodepayd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1454124732 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as nodepayd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("BITCOIND", "nodepayd"), "-spendzeroconfchange=1", "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'zerocoin', 'backups']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(BitcoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some nodepayd binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "nodepayd"),
help="nodepayd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "nodepayd"),
help="nodepayd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
|
the-stack_0_1479 | #! /usr/bin/python3
"""
The database connections are read‐only, so SQL injection attacks can’t be a
problem.
"""
import sys
import os
import threading
import decimal
import time
import json
import re
import requests
import collections
import logging
logger = logging.getLogger(__name__)
from logging import handlers as logging_handlers
D = decimal.Decimal
import binascii
import struct
import apsw
import flask
from flask.ext.httpauth import HTTPBasicAuth
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
import jsonrpc
from jsonrpc import dispatcher
import inspect
from counterpartylib.lib import config
from counterpartylib.lib import exceptions
from counterpartylib.lib import util
from counterpartylib.lib import check
from counterpartylib.lib import backend
from counterpartylib.lib import database
from counterpartylib.lib import transaction
from counterpartylib.lib import blocks
from counterpartylib.lib import script
from counterpartylib.lib.messages import send
from counterpartylib.lib.messages import order
from counterpartylib.lib.messages import btcpay
from counterpartylib.lib.messages import issuance
from counterpartylib.lib.messages import broadcast
from counterpartylib.lib.messages import bet
from counterpartylib.lib.messages import dividend
from counterpartylib.lib.messages import burn
from counterpartylib.lib.messages import cancel
from counterpartylib.lib.messages import rps
from counterpartylib.lib.messages import rpsresolve
from counterpartylib.lib.messages import publish
from counterpartylib.lib.messages import execute
API_TABLES = ['assets', 'balances', 'credits', 'debits', 'bets', 'bet_matches',
'broadcasts', 'btcpays', 'burns', 'cancels',
'dividends', 'issuances', 'orders', 'order_matches', 'sends',
'bet_expirations', 'order_expirations', 'bet_match_expirations',
'order_match_expirations', 'bet_match_resolutions', 'rps',
'rpsresolves', 'rps_matches', 'rps_expirations', 'rps_match_expirations',
'mempool']
API_TRANSACTIONS = ['bet', 'broadcast', 'btcpay', 'burn', 'cancel',
'dividend', 'issuance', 'order', 'send',
'rps', 'rpsresolve', 'publish', 'execute']
COMMONS_ARGS = ['encoding', 'fee_per_kb', 'regular_dust_size',
'multisig_dust_size', 'op_return_value', 'pubkey',
'allow_unconfirmed_inputs', 'fee', 'fee_provided']
API_MAX_LOG_SIZE = 10 * 1024 * 1024 #max log size of 20 MB before rotation (make configurable later)
API_MAX_LOG_COUNT = 10
current_api_status_code = None #is updated by the APIStatusPoller
current_api_status_response_json = None #is updated by the APIStatusPoller
class APIError(Exception):
pass
# TODO: ALL queries EVERYWHERE should be done with these methods
def db_query(db, statement, bindings=(), callback=None, **callback_args):
"""Allow direct access to the database in a parametrized manner."""
cursor = db.cursor()
if hasattr(callback, '__call__'):
cursor.execute(statement, bindings)
for row in cursor:
callback(row, **callback_args)
results = None
else:
results = list(cursor.execute(statement, bindings))
cursor.close()
return results
def get_rows(db, table, filters=None, filterop='AND', order_by=None, order_dir=None, start_block=None, end_block=None,
status=None, limit=1000, offset=0, show_expired=True):
"""SELECT * FROM wrapper. Filters results based on a filter data structure (as used by the API)."""
if filters == None:
filters = []
def value_to_marker(value):
# if value is an array place holder is (?,?,?,..)
if isinstance(value, list):
return '''({})'''.format(','.join(['?' for e in range(0, len(value))]))
else:
return '''?'''
# TODO: Document that op can be anything that SQLite3 accepts.
if not table or table.lower() not in API_TABLES:
raise APIError('Unknown table')
if filterop and filterop.upper() not in ['OR', 'AND']:
raise APIError('Invalid filter operator (OR, AND)')
if order_dir and order_dir.upper() not in ['ASC', 'DESC']:
raise APIError('Invalid order direction (ASC, DESC)')
if not isinstance(limit, int):
raise APIError('Invalid limit')
elif limit > 1000:
raise APIError('Limit should be lower or equal to 1000')
if not isinstance(offset, int):
raise APIError('Invalid offset')
# TODO: accept an object: {'field1':'ASC', 'field2': 'DESC'}
if order_by and not re.compile('^[a-z0-9_]+$').match(order_by):
raise APIError('Invalid order_by, must be a field name')
if isinstance(filters, dict): #single filter entry, convert to a one entry list
filters = [filters,]
elif not isinstance(filters, list):
filters = []
# TODO: Document this! (Each filter can be an ordered list.)
new_filters = []
for filter_ in filters:
if type(filter_) in (list, tuple) and len(filter_) in [3, 4]:
new_filter = {'field': filter_[0], 'op': filter_[1], 'value': filter_[2]}
if len(filter_) == 4:
new_filter['case_sensitive'] = filter_[3]
new_filters.append(new_filter)
elif type(filter_) == dict:
new_filters.append(filter_)
else:
raise APIError('Unknown filter type')
filters = new_filters
# validate filter(s)
for filter_ in filters:
for field in ['field', 'op', 'value']: #should have all fields
if field not in filter_:
raise APIError("A specified filter is missing the '%s' field" % field)
if not isinstance(filter_['value'], (str, int, float, list)):
raise APIError("Invalid value for the field '%s'" % filter_['field'])
if isinstance(filter_['value'], list) and filter_['op'].upper() not in ['IN', 'NOT IN']:
raise APIError("Invalid value for the field '%s'" % filter_['field'])
if filter_['op'].upper() not in ['=', '==', '!=', '>', '<', '>=', '<=', 'IN', 'LIKE', 'NOT IN', 'NOT LIKE']:
raise APIError("Invalid operator for the field '%s'" % filter_['field'])
if 'case_sensitive' in filter_ and not isinstance(filter_['case_sensitive'], bool):
raise APIError("case_sensitive must be a boolean")
# SELECT
statement = '''SELECT * FROM {}'''.format(table)
# WHERE
bindings = []
conditions = []
for filter_ in filters:
case_sensitive = False if 'case_sensitive' not in filter_ else filter_['case_sensitive']
if filter_['op'] == 'LIKE' and case_sensitive == False:
filter_['field'] = '''UPPER({})'''.format(filter_['field'])
filter_['value'] = filter_['value'].upper()
marker = value_to_marker(filter_['value'])
conditions.append('''{} {} {}'''.format(filter_['field'], filter_['op'], marker))
if isinstance(filter_['value'], list):
bindings += filter_['value']
else:
bindings.append(filter_['value'])
# AND filters
more_conditions = []
if table not in ['balances', 'order_matches', 'bet_matches']:
if start_block != None:
more_conditions.append('''block_index >= ?''')
bindings.append(start_block)
if end_block != None:
more_conditions.append('''block_index <= ?''')
bindings.append(end_block)
elif table in ['order_matches', 'bet_matches']:
if start_block != None:
more_conditions.append('''tx0_block_index >= ?''')
bindings.append(start_block)
if end_block != None:
more_conditions.append('''tx1_block_index <= ?''')
bindings.append(end_block)
# status
if isinstance(status, list) and len(status) > 0:
more_conditions.append('''status IN {}'''.format(value_to_marker(status)))
bindings += status
elif isinstance(status, str) and status != '':
more_conditions.append('''status == ?''')
bindings.append(status)
# legacy filters
if not show_expired and table == 'orders':
#Ignore BTC orders one block early.
expire_index = util.CURRENT_BLOCK_INDEX + 1
more_conditions.append('''((give_asset == ? AND expire_index > ?) OR give_asset != ?)''')
bindings += [config.BTC, expire_index, config.BTC]
if (len(conditions) + len(more_conditions)) > 0:
statement += ''' WHERE'''
all_conditions = []
if len(conditions) > 0:
all_conditions.append('''({})'''.format(''' {} '''.format(filterop.upper()).join(conditions)))
if len(more_conditions) > 0:
all_conditions.append('''({})'''.format(''' AND '''.join(more_conditions)))
statement += ''' {}'''.format(''' AND '''.join(all_conditions))
# ORDER BY
if order_by != None:
statement += ''' ORDER BY {}'''.format(order_by)
if order_dir != None:
statement += ''' {}'''.format(order_dir.upper())
# LIMIT
if limit:
statement += ''' LIMIT {}'''.format(limit)
if offset:
statement += ''' OFFSET {}'''.format(offset)
return db_query(db, statement, tuple(bindings))
def compose_transaction(db, name, params,
encoding='auto',
fee_per_kb=config.DEFAULT_FEE_PER_KB,
regular_dust_size=config.DEFAULT_REGULAR_DUST_SIZE,
multisig_dust_size=config.DEFAULT_MULTISIG_DUST_SIZE,
op_return_value=config.DEFAULT_OP_RETURN_VALUE,
pubkey=None,
allow_unconfirmed_inputs=False,
fee=None,
fee_provided=0):
"""Create and return a transaction."""
# Get provided pubkeys.
if type(pubkey) == str:
provided_pubkeys = [pubkey]
elif type(pubkey) == list:
provided_pubkeys = pubkey
elif pubkey == None:
provided_pubkeys = []
else:
assert False
# Get additional pubkeys from `source` and `destination` params.
# Convert `source` and `destination` to pubkeyhash form.
for address_name in ['source', 'destination']:
if address_name in params:
address = params[address_name]
provided_pubkeys += script.extract_pubkeys(address)
params[address_name] = script.make_pubkeyhash(address)
# Check validity of collected pubkeys.
for pubkey in provided_pubkeys:
if not script.is_fully_valid(binascii.unhexlify(pubkey)):
raise script.AddressError('invalid public key: {}'.format(pubkey))
compose_method = sys.modules['counterpartylib.lib.messages.{}'.format(name)].compose
compose_params = inspect.getargspec(compose_method)[0]
missing_params = [p for p in compose_params if p not in params and p != 'db']
for param in missing_params:
params[param] = None
# try: # NOTE: For debugging, e.g. with `Invalid Params` error.
tx_info = compose_method(db, **params)
return transaction.construct(db, tx_info, encoding=encoding,
fee_per_kb=fee_per_kb,
regular_dust_size=regular_dust_size,
multisig_dust_size=multisig_dust_size,
op_return_value=op_return_value,
provided_pubkeys=provided_pubkeys,
allow_unconfirmed_inputs=allow_unconfirmed_inputs,
exact_fee=fee,
fee_provided=fee_provided)
# except:
# import traceback
# traceback.print_exc()
def sign_transaction(unsigned_tx_hex, private_key_wif):
"""Sign the transaction."""
return transaction.sign_tx(proxy, unsigned_tx_hex,
private_key_wif=private_key_wif)
def broadcast_transaction(signed_tx_hex):
"""Broadcast a transaction."""
if not config.TESTNET and config.BROADCAST_TX_MAINNET in ['bci', 'bci-failover']:
url = "https://blockchain.info/pushtx"
params = {'tx': signed_tx_hex}
response = requests.post(url, data=params)
if response.text.lower() != 'transaction submitted' or response.status_code != 200:
if config.BROADCAST_TX_MAINNET == 'bci-failover':
return transaction.broadcast_tx(signed_tx_hex)
else:
raise APIError(response.text)
return response.text
else:
return transaction.broadcast_tx(signed_tx_hex)
def do_transaction(db, name, params, private_key_wif, **kwargs):
"""Create, sign and broadcast transaction."""
unsigned_tx = compose_transaction(db, proxy, name, params, **kwargs)
signed_tx = sign_transaction(proxy, unsigned_tx, private_key_wif=private_key_wif)
return broadcast_transaction(proxy, signed_tx)
def init_api_access_log():
"""Init API logger."""
if config.API_LOG:
api_logger = logging.getLogger("tornado")
h = logging_handlers.RotatingFileHandler(config.API_LOG, 'a', API_MAX_LOG_SIZE, API_MAX_LOG_COUNT)
api_logger.setLevel(logging.INFO)
api_logger.addHandler(h)
api_logger.propagate = False
class APIStatusPoller(threading.Thread):
"""Perform regular checks on the state of the backend and the database."""
def __init__(self):
self.last_database_check = 0
threading.Thread.__init__(self)
self.stop_event = threading.Event()
def stop(self):
self.stop_event.set()
def run(self):
logger.debug('Starting API Status Poller.')
global current_api_status_code, current_api_status_response_json
db = database.get_connection(read_only=True, integrity_check=False)
while self.stop_event.is_set() != True:
try:
# Check that bitcoind is running, communicable, and caught up with the blockchain.
# Check that the database has caught up with bitcoind.
if time.time() - self.last_database_check > 10 * 60: # Ten minutes since last check.
code = 11
logger.debug('Checking backend state.')
check.backend_state()
code = 12
logger.debug('Checking database state.')
check.database_state(db, backend.getblockcount())
self.last_database_check = time.time()
except (check.BackendError, check.DatabaseError) as e:
exception_name = e.__class__.__name__
exception_text = str(e)
logger.debug("API Status Poller: %s", exception_text)
jsonrpc_response = jsonrpc.exceptions.JSONRPCServerError(message=exception_name, data=exception_text)
current_api_status_code = code
current_api_status_response_json = jsonrpc_response.json.encode()
else:
current_api_status_code = None
current_api_status_response_json = None
time.sleep(config.BACKEND_POLL_INTERVAL)
class APIServer(threading.Thread):
"""Handle JSON-RPC API calls."""
def __init__(self):
self.is_ready = False
threading.Thread.__init__(self)
self.stop_event = threading.Event()
self.ioloop = IOLoop.instance()
def stop(self):
self.ioloop.stop()
self.join()
self.stop_event.set()
def run(self):
logger.info('Starting API Server.')
db = database.get_connection(read_only=True, integrity_check=False)
app = flask.Flask(__name__)
auth = HTTPBasicAuth()
@auth.get_password
def get_pw(username):
if username == config.RPC_USER:
return config.RPC_PASSWORD
return None
######################
#READ API
# Generate dynamically get_{table} methods
def generate_get_method(table):
def get_method(**kwargs):
try:
return get_rows(db, table=table, **kwargs)
except TypeError as e: #TODO: generalise for all API methods
raise APIError(str(e))
return get_method
for table in API_TABLES:
new_method = generate_get_method(table)
new_method.__name__ = 'get_{}'.format(table)
dispatcher.add_method(new_method)
@dispatcher.add_method
def sql(query, bindings=None):
if bindings == None:
bindings = []
return db_query(db, query, tuple(bindings))
######################
#WRITE/ACTION API
# Generate dynamically create_{transaction} and do_{transaction} methods
def generate_create_method(tx):
def split_params(**kwargs):
transaction_args = {}
common_args = {}
private_key_wif = None
for key in kwargs:
if key in COMMONS_ARGS:
common_args[key] = kwargs[key]
elif key == 'privkey':
private_key_wif = kwargs[key]
else:
transaction_args[key] = kwargs[key]
return transaction_args, common_args, private_key_wif
def create_method(**kwargs):
try:
transaction_args, common_args, private_key_wif = split_params(**kwargs)
return compose_transaction(db, name=tx, params=transaction_args, **common_args)
except TypeError as e: #TODO: generalise for all API methods
raise APIError(str(e))
def do_method(**kwargs):
try:
transaction_args, common_args, private_key_wif = split_params(**kwargs)
return do_transaction(db, name=tx, params=transaction_args, private_key_wif=private_key_wif, **common_args)
except TypeError as e: #TODO: generalise for all API methods
raise APIError(str(e))
return create_method, do_method
for tx in API_TRANSACTIONS:
create_method, do_method = generate_create_method(tx)
create_method.__name__ = 'create_{}'.format(tx)
do_method.__name__ = 'do_{}'.format(tx)
dispatcher.add_method(create_method)
dispatcher.add_method(do_method)
@dispatcher.add_method
def sign_tx(unsigned_tx_hex, privkey):
return sign_transaction(unsigned_tx_hex, private_key_wif=privkey)
@dispatcher.add_method
def broadcast_tx(signed_tx_hex):
return broadcast_transaction(signed_tx_hex)
@dispatcher.add_method
def get_messages(block_index):
if not isinstance(block_index, int):
raise APIError("block_index must be an integer.")
cursor = db.cursor()
cursor.execute('select * from messages where block_index = ? order by message_index asc', (block_index,))
messages = cursor.fetchall()
cursor.close()
return messages
@dispatcher.add_method
def get_messages_by_index(message_indexes):
"""Get specific messages from the feed, based on the message_index.
@param message_index: A single index, or a list of one or more message indexes to retrieve.
"""
if not isinstance(message_indexes, list):
message_indexes = [message_indexes,]
for idx in message_indexes: #make sure the data is clean
if not isinstance(idx, int):
raise APIError("All items in message_indexes are not integers")
cursor = db.cursor()
cursor.execute('SELECT * FROM messages WHERE message_index IN (%s) ORDER BY message_index ASC'
% (','.join([str(x) for x in message_indexes]),))
messages = cursor.fetchall()
cursor.close()
return messages
@dispatcher.add_method
def get_xcp_supply():
return util.xcp_supply(db)
@dispatcher.add_method
def get_asset_info(assets):
if not isinstance(assets, list):
raise APIError("assets must be a list of asset names, even if it just contains one entry")
assetsInfo = []
for asset in assets:
# BTC and XCP.
if asset in [config.BTC, config.XCP]:
if asset == config.BTC:
supply = backend.get_btc_supply(normalize=False)
else:
supply = util.xcp_supply(db)
assetsInfo.append({
'asset': asset,
'owner': None,
'divisible': True,
'locked': False,
'supply': supply,
'description': '',
'issuer': None
})
continue
# User‐created asset.
cursor = db.cursor()
issuances = list(cursor.execute('''SELECT * FROM issuances WHERE (status = ? AND asset = ?) ORDER BY block_index ASC''', ('valid', asset)))
cursor.close()
if not issuances:
continue #asset not found, most likely
else:
last_issuance = issuances[-1]
locked = False
for e in issuances:
if e['locked']: locked = True
assetsInfo.append({
'asset': asset,
'owner': last_issuance['issuer'],
'divisible': bool(last_issuance['divisible']),
'locked': locked,
'supply': util.asset_supply(db, asset),
'description': last_issuance['description'],
'issuer': last_issuance['issuer']})
return assetsInfo
@dispatcher.add_method
def get_block_info(block_index):
assert isinstance(block_index, int)
cursor = db.cursor()
cursor.execute('''SELECT * FROM blocks WHERE block_index = ?''', (block_index,))
blocks = list(cursor)
if len(blocks) == 1:
block = blocks[0]
elif len(blocks) == 0:
raise exceptions.DatabaseError('No blocks found.')
else:
assert False
cursor.close()
return block
@dispatcher.add_method
def get_blocks(block_indexes):
"""fetches block info and messages for the specified block indexes"""
if not isinstance(block_indexes, (list, tuple)):
raise APIError("block_indexes must be a list of integers.")
if len(block_indexes) >= 250:
raise APIError("can only specify up to 250 indexes at a time.")
block_indexes_str = ','.join([str(x) for x in block_indexes])
cursor = db.cursor()
cursor.execute('SELECT * FROM blocks WHERE block_index IN (%s) ORDER BY block_index ASC'
% (block_indexes_str,))
blocks = cursor.fetchall()
cursor.execute('SELECT * FROM messages WHERE block_index IN (%s) ORDER BY block_index ASC, message_index ASC'
% (block_indexes_str,))
messages = collections.deque(cursor.fetchall())
for block in blocks:
# messages_in_block = []
block['_messages'] = []
while len(messages) and messages[0]['block_index'] == block['block_index']:
block['_messages'].append(messages.popleft())
assert not len(messages) #should have been cleared out
cursor.close()
return blocks
@dispatcher.add_method
def get_running_info():
latestBlockIndex = backend.getblockcount()
try:
check.database_state(db, latestBlockIndex)
except exceptions.DatabaseError:
caught_up = False
else:
caught_up = True
try:
last_block = util.CURRENT_BLOCK_INDEX
except:
last_block = None
try:
last_message = util.last_message(db)
except:
last_message = None
return {
'db_caught_up': caught_up,
'bitcoin_block_count': latestBlockIndex,
'last_block': last_block,
'last_message_index': last_message['message_index'] if last_message else -1,
'running_testnet': config.TESTNET,
'running_testcoin': config.TESTCOIN,
'version_major': config.VERSION_MAJOR,
'version_minor': config.VERSION_MINOR,
'version_revision': config.VERSION_REVISION
}
@dispatcher.add_method
def get_element_counts():
counts = {}
cursor = db.cursor()
for element in ['transactions', 'blocks', 'debits', 'credits', 'balances', 'sends', 'orders',
'order_matches', 'btcpays', 'issuances', 'broadcasts', 'bets', 'bet_matches', 'dividends',
'burns', 'cancels', 'order_expirations', 'bet_expirations', 'order_match_expirations',
'bet_match_expirations', 'messages']:
cursor.execute("SELECT COUNT(*) AS count FROM %s" % element)
count_list = cursor.fetchall()
assert len(count_list) == 1
counts[element] = count_list[0]['count']
cursor.close()
return counts
@dispatcher.add_method
def get_asset_names():
cursor = db.cursor()
names = [row['asset'] for row in cursor.execute("SELECT DISTINCT asset FROM issuances WHERE status = 'valid' ORDER BY asset ASC")]
cursor.close()
return names
@dispatcher.add_method
def get_holder_count(asset):
holders = util.holders(db, asset)
addresses = []
for holder in holders:
addresses.append(holder['address'])
return {asset: len(set(addresses))}
@dispatcher.add_method
def get_holders(asset):
holders = util.holders(db, asset)
return holders
@dispatcher.add_method
def search_raw_transactions(address):
return backend.searchrawtransactions(address)
@dispatcher.add_method
def get_unspent_txouts(address, return_confirmed=False):
result = backend.get_unspent_txouts(address, return_confirmed=return_confirmed)
if return_confirmed:
return {'all': result[0], 'confirmed': result[1]}
else:
return result
@dispatcher.add_method
def get_tx_info(tx_hex):
source, destination, btc_amount, fee, data = blocks.get_tx_info(tx_hex)
return source, destination, btc_amount, fee, util.hexlify(data)
@dispatcher.add_method
def unpack(data_hex):
data = binascii.unhexlify(data_hex)
message_type_id = struct.unpack(config.TXTYPE_FORMAT, data[:4])[0]
message = data[4:]
# TODO: This works for precisely those messages for which
# `unpack()` is defined.
for message_type in API_TRANSACTIONS:
if message_type_id == sys.modules['lib.messages.{}'.format(message_type)].ID:
unpack_method = sys.modules['lib.messages.{}'.format(message_type)].unpack
unpacked = unpack_method(db, message, util.CURRENT_BLOCK_INDEX)
return message_type_id, unpacked
@dispatcher.add_method
def search_pubkey(pubkeyhash, provided_pubkeys=None):
return backend.pubkeyhash_to_pubkey(pubkeyhash, provided_pubkeys=provided_pubkeys)
def _set_cors_headers(response):
if config.RPC_ALLOW_CORS:
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'
@app.route('/', methods=["OPTIONS",])
@app.route('/api/', methods=["OPTIONS",])
def handle_options():
response = flask.Response('', 204)
_set_cors_headers(response)
return response
@app.route('/', methods=["POST",])
@app.route('/api/', methods=["POST",])
@auth.login_required
def handle_post():
try:
request_json = flask.request.get_data().decode('utf-8')
request_data = json.loads(request_json)
assert 'id' in request_data and request_data['jsonrpc'] == "2.0" and request_data['method']
# params may be omitted
except:
obj_error = jsonrpc.exceptions.JSONRPCInvalidRequest(data="Invalid JSON-RPC 2.0 request format")
return flask.Response(obj_error.json.encode(), 200, mimetype='application/json')
#only arguments passed as a dict are supported
if request_data.get('params', None) and not isinstance(request_data['params'], dict):
obj_error = jsonrpc.exceptions.JSONRPCInvalidRequest(
data='Arguments must be passed as a JSON object (list of unnamed arguments not supported)')
return flask.Response(obj_error.json.encode(), 200, mimetype='application/json')
#return an error if API fails checks
if not config.FORCE and current_api_status_code:
return flask.Response(current_api_status_response_json, 200, mimetype='application/json')
jsonrpc_response = jsonrpc.JSONRPCResponseManager.handle(request_json, dispatcher)
response = flask.Response(jsonrpc_response.json.encode(), 200, mimetype='application/json')
_set_cors_headers(response)
return response
init_api_access_log()
http_server = HTTPServer(WSGIContainer(app), xheaders=True)
try:
http_server.listen(config.RPC_PORT, address=config.RPC_HOST)
self.is_ready = True
self.ioloop.start()
except OSError:
raise APIError("Cannot start the API subsystem. Is {} already running, or is something else listening on port {}?".format(config.XCP_CLIENT, config.RPC_PORT))
db.close()
http_server.stop()
self.ioloop.close()
return
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
the-stack_0_1481 | from datetime import date, datetime
from functools import reduce
from typing import Any, Union
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
import polars as pl
from polars import testing
from polars.datatypes import Float64, Int32, Int64, UInt32, UInt64
def _getattr_multi(obj: object, op: str) -> Any:
""" "
Allows `op` to be multiple layers deep, i.e. op="str.lengths" will mean we first
get the attribute "str", and then the attribute "lengths"
"""
op_list = op.split(".")
return reduce(lambda o, m: getattr(o, m), op_list, obj)
def verify_series_and_expr_api(
input: pl.Series, expected: pl.Series, op: str, *args: Any, **kwargs: Any
) -> None:
"""
Small helper function to test element-wise functions for both the series and expressions api.
Examples
--------
>>> s = pl.Series([1, 3, 2])
>>> expected = pl.Series([1, 2, 3])
>>> verify_series_and_expr_api(s, expected, "sort")
"""
expr = _getattr_multi(pl.col("*"), op)(*args, **kwargs)
result_expr: pl.Series = input.to_frame().select(expr)[:, 0] # type: ignore
result_series = _getattr_multi(input, op)(*args, **kwargs)
testing.assert_series_equal(result_expr, expected)
testing.assert_series_equal(result_series, expected)
def test_cum_agg() -> None:
s = pl.Series("a", [1, 2, 3, 2])
verify_series_and_expr_api(s, pl.Series("a", [1, 3, 6, 8]), "cumsum")
verify_series_and_expr_api(s, pl.Series("a", [1, 1, 1, 1]), "cummin")
verify_series_and_expr_api(s, pl.Series("a", [1, 2, 3, 3]), "cummax")
verify_series_and_expr_api(s, pl.Series("a", [1, 2, 6, 12]), "cumprod")
def test_init_inputs() -> None:
# Good inputs
pl.Series("a", [1, 2])
pl.Series("a", values=[1, 2])
pl.Series(name="a", values=[1, 2])
pl.Series(values=[1, 2], name="a")
assert pl.Series([1, 2]).dtype == pl.Int64
assert pl.Series(values=[1, 2]).dtype == pl.Int64
assert pl.Series("a").dtype == pl.Float32 # f32 type used in case of no data
assert pl.Series().dtype == pl.Float32
assert pl.Series(values=[True, False]).dtype == pl.Boolean
assert pl.Series(values=np.array([True, False])).dtype == pl.Boolean
assert pl.Series(values=np.array(["foo", "bar"])).dtype == pl.Utf8
assert pl.Series(values=["foo", "bar"]).dtype == pl.Utf8
assert pl.Series("a", [pl.Series([1, 2, 4]), pl.Series([3, 2, 1])]).dtype == pl.List
assert pl.Series(pd.Series([1, 2])).dtype == pl.Int64
assert pl.Series("a", [10000, 20000, 30000], dtype=pl.Time).dtype == pl.Time
# 2d numpy array
res = pl.Series(name="a", values=np.array([[1, 2], [3, 4]]))
assert all(res[0] == np.array([1, 2]))
assert all(res[1] == np.array([3, 4]))
assert (
pl.Series(values=np.array([["foo", "bar"], ["foo2", "bar2"]])).dtype
== pl.Object
)
# Bad inputs
with pytest.raises(ValueError):
pl.Series([1, 2, 3], [1, 2, 3])
with pytest.raises(ValueError):
pl.Series({"a": [1, 2, 3]})
with pytest.raises(OverflowError):
pl.Series("bigint", [2 ** 64])
def test_concat() -> None:
s = pl.Series("a", [2, 1, 3])
assert pl.concat([s, s]).len() == 6
# check if s remains unchanged
assert s.len() == 3
def test_to_frame() -> None:
s = pl.Series([1, 2])
assert s.to_frame().shape == (2, 1)
def test_bitwise_ops() -> None:
a = pl.Series([True, False, True])
b = pl.Series([False, True, True])
assert (a & b).series_equal(pl.Series([False, False, True]))
assert (a | b).series_equal(pl.Series([True, True, True]))
assert (a ^ b).series_equal(pl.Series([True, True, False]))
assert (~a).series_equal(pl.Series([False, True, False]))
# rand/rxor/ror we trigger by casting the left hand to a list here in the test
# Note that the type annotations only allow Series to be passed in, but there is
# specific code to deal with non-Series inputs.
assert (True & a).series_equal(pl.Series([True, False, True])) # type: ignore
assert (True | a).series_equal(pl.Series([True, True, True])) # type: ignore
assert (True ^ a).series_equal(pl.Series([False, True, False])) # type: ignore
def test_bitwise_floats_invert() -> None:
a = pl.Series([2.0, 3.0, 0.0])
assert ~a == NotImplemented
def test_equality() -> None:
a = pl.Series("a", [1, 2])
b = a
cmp = a == b
assert isinstance(cmp, pl.Series)
assert cmp.sum() == 2
assert (a != b).sum() == 0
assert (a >= b).sum() == 2
assert (a <= b).sum() == 2
assert (a > b).sum() == 0
assert (a < b).sum() == 0
assert a.sum() == 3
assert a.series_equal(b)
a = pl.Series("name", ["ham", "foo", "bar"])
testing.assert_series_equal((a == "ham"), pl.Series("name", [True, False, False]))
def test_agg() -> None:
series = pl.Series("a", [1, 2])
assert series.mean() == 1.5
assert series.min() == 1
assert series.max() == 2
@pytest.mark.parametrize(
"s", [pl.Series([1, 2], dtype=Int64), pl.Series([1, 2], dtype=Float64)]
)
def test_arithmetic(s: pl.Series) -> None:
a = s
b = s
assert ((a * b) == [1, 4]).sum() == 2
assert ((a / b) == [1.0, 1.0]).sum() == 2
assert ((a + b) == [2, 4]).sum() == 2
assert ((a - b) == [0, 0]).sum() == 2
assert ((a + 1) == [2, 3]).sum() == 2
assert ((a - 1) == [0, 1]).sum() == 2
assert ((a / 1) == [1.0, 2.0]).sum() == 2
assert ((a // 2) == [0, 1]).sum() == 2
assert ((a * 2) == [2, 4]).sum() == 2
assert ((1 + a) == [2, 3]).sum() == 2
assert ((1 - a) == [0, -1]).sum() == 2
assert ((1 * a) == [1, 2]).sum() == 2
# integer division
testing.assert_series_equal(1 / a, pl.Series([1.0, 0.5])) # type: ignore
if s.dtype == Int64:
expected = pl.Series([1, 0])
else:
expected = pl.Series([1.0, 0.5])
testing.assert_series_equal(1 // a, expected)
# modulo
assert ((1 % a) == [0, 1]).sum() == 2
assert ((a % 1) == [0, 0]).sum() == 2
# negate
assert (-a == [-1, -2]).sum() == 2
# wrong dtypes in rhs operands
assert ((1.0 - a) == [0, -1]).sum() == 2
assert ((1.0 / a) == [1.0, 0.5]).sum() == 2
assert ((1.0 * a) == [1, 2]).sum() == 2
assert ((1.0 + a) == [2, 3]).sum() == 2
assert ((1.0 % a) == [0, 1]).sum() == 2
a = pl.Series("a", [datetime(2021, 1, 1)])
with pytest.raises(ValueError):
a // 2
with pytest.raises(ValueError):
a / 2
with pytest.raises(ValueError):
a * 2
with pytest.raises(ValueError):
a % 2
with pytest.raises(ValueError):
a ** 2
with pytest.raises(ValueError):
2 / a
with pytest.raises(ValueError):
2 // a
with pytest.raises(ValueError):
2 * a
with pytest.raises(ValueError):
2 % a
with pytest.raises(ValueError):
2 ** a
def test_add_string() -> None:
s = pl.Series(["hello", "weird"])
result = s + " world"
testing.assert_series_equal(result, pl.Series(["hello world", "weird world"]))
def test_various() -> None:
a = pl.Series("a", [1, 2])
assert a.is_null().sum() == 0
assert a.name == "a"
a.rename("b", in_place=True)
assert a.name == "b"
assert a.len() == 2
assert len(a) == 2
b = a.slice(1, 1)
assert b.len() == 1
assert b.series_equal(pl.Series("b", [2]))
a.append(b)
assert a.series_equal(pl.Series("b", [1, 2, 2]))
a = pl.Series("a", range(20))
assert a.head(5).len() == 5
assert a.tail(5).len() == 5
assert a.head(5) != a.tail(5)
a = pl.Series("a", [2, 1, 4])
a.sort(in_place=True)
assert a.series_equal(pl.Series("a", [1, 2, 4]))
a = pl.Series("a", [2, 1, 1, 4, 4, 4])
testing.assert_series_equal(a.arg_unique(), pl.Series("a", [0, 1, 3], dtype=UInt32))
assert a.take([2, 3]).series_equal(pl.Series("a", [1, 4]))
assert a.is_numeric()
a = pl.Series("bool", [True, False])
assert not a.is_numeric()
def test_filter_ops() -> None:
a = pl.Series("a", range(20))
assert a[a > 1].len() == 18
assert a[a < 1].len() == 1
assert a[a <= 1].len() == 2
assert a[a >= 1].len() == 19
assert a[a == 1].len() == 1
assert a[a != 1].len() == 19
def test_cast() -> None:
a = pl.Series("a", range(20))
assert a.cast(pl.Float32).dtype == pl.Float32
assert a.cast(pl.Float64).dtype == pl.Float64
assert a.cast(pl.Int32).dtype == pl.Int32
assert a.cast(pl.UInt32).dtype == pl.UInt32
assert a.cast(pl.Datetime).dtype == pl.Datetime
assert a.cast(pl.Date).dtype == pl.Date
def test_to_python() -> None:
a = pl.Series("a", range(20))
b = a.to_list()
assert isinstance(b, list)
assert len(b) == 20
b = a.to_list(use_pyarrow=True)
assert isinstance(b, list)
assert len(b) == 20
a = pl.Series("a", [1, None, 2])
assert a.null_count() == 1
assert a.to_list() == [1, None, 2]
def test_sort() -> None:
a = pl.Series("a", [2, 1, 3])
testing.assert_series_equal(a.sort(), pl.Series("a", [1, 2, 3]))
testing.assert_series_equal(a.sort(reverse=True), pl.Series("a", [3, 2, 1]))
def test_rechunk() -> None:
a = pl.Series("a", [1, 2, 3])
b = pl.Series("b", [4, 5, 6])
a.append(b)
assert a.n_chunks() == 2
assert a.rechunk(in_place=False).n_chunks() == 1
a.rechunk(in_place=True)
assert a.n_chunks() == 1
def test_indexing() -> None:
a = pl.Series("a", [1, 2, None])
assert a[1] == 2
assert a[2] is None
b = pl.Series("b", [True, False])
assert b[0]
assert not b[1]
a = pl.Series("a", ["a", None])
assert a[0] == "a"
assert a[1] is None
a = pl.Series("a", [0.1, None])
assert a[0] == 0.1
assert a[1] is None
def test_arrow() -> None:
a = pl.Series("a", [1, 2, 3, None])
out = a.to_arrow()
assert out == pa.array([1, 2, 3, None])
a = pa.array(["foo", "bar"], pa.dictionary(pa.int32(), pa.utf8()))
s = pl.Series("a", a)
assert s.dtype == pl.Categorical
assert (
pl.from_arrow(pa.array([["foo"], ["foo", "bar"]], pa.list_(pa.utf8()))).dtype
== pl.List
)
def test_view() -> None:
a = pl.Series("a", [1.0, 2.0, 3.0])
assert isinstance(a.view(), np.ndarray)
assert np.all(a.view() == np.array([1, 2, 3]))
def test_ufunc() -> None:
a = pl.Series("a", [1.0, 2.0, 3.0, 4.0])
b = np.multiply(a, 4)
assert isinstance(b, pl.Series)
assert b == [4, 8, 12, 16]
# test if null bitmask is preserved
a = pl.Series("a", [1.0, None, 3.0])
b = np.exp(a)
assert b.null_count() == 1
# test if it works with chunked series.
a = pl.Series("a", [1.0, None, 3.0])
b = pl.Series("b", [4.0, 5.0, None])
a.append(b)
assert a.n_chunks() == 2
c = np.multiply(a, 3)
testing.assert_series_equal(c, pl.Series("a", [3.0, None, 9.0, 12.0, 15.0, None]))
def test_get() -> None:
a = pl.Series("a", [1, 2, 3])
assert a[0] == 1
assert a[:2] == [1, 2]
assert a[range(1)] == [1, 2]
assert a[range(0, 2, 2)] == [1, 3]
def test_set() -> None:
a = pl.Series("a", [True, False, True])
mask = pl.Series("msk", [True, False, True])
a[mask] = False
testing.assert_series_equal(a, pl.Series("", [False] * 3))
def test_set_value_as_list_fail() -> None:
""" " it is not allowed to use a list to set values"""
s = pl.Series("a", [1, 2, 3])
with pytest.raises(ValueError):
s[[0, 1]] = [4, 5]
@pytest.mark.parametrize("key", [True, False, 1.0])
def test_set_invalid_key(key: Any) -> None:
s = pl.Series("a", [1, 2, 3])
with pytest.raises(ValueError):
s[key] = 1
@pytest.mark.parametrize(
"key",
[
pl.Series([False, True, True]),
pl.Series([1, 2], dtype=UInt32),
pl.Series([1, 2], dtype=UInt64),
],
)
def test_set_key_series(key: pl.Series) -> None:
"""only UInt32/UInt64/bool are allowed"""
s = pl.Series("a", [1, 2, 3])
s[key] = 4
testing.assert_series_equal(s, pl.Series("a", [1, 4, 4]))
def test_set_np_array_boolean_mask() -> None:
a = pl.Series("a", [1, 2, 3])
mask = np.array([True, False, True])
a[mask] = 4
testing.assert_series_equal(a, pl.Series("a", [4, 2, 4]))
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.uint32, np.uint64])
def test_set_np_array(dtype: Any) -> None:
a = pl.Series("a", [1, 2, 3])
idx = np.array([0, 2], dtype=dtype)
a[idx] = 4
testing.assert_series_equal(a, pl.Series("a", [4, 2, 4]))
@pytest.mark.parametrize("idx", [[0, 2], (0, 2)])
def test_set_list_and_tuple(idx: Union[list, tuple]) -> None:
a = pl.Series("a", [1, 2, 3])
a[idx] = 4
testing.assert_series_equal(a, pl.Series("a", [4, 2, 4]))
def test_fill_null() -> None:
a = pl.Series("a", [1, 2, None])
verify_series_and_expr_api(a, pl.Series("a", [1, 2, 2]), "fill_null", "forward")
verify_series_and_expr_api(
a, pl.Series("a", [1, 2, 14], dtype=Int64), "fill_null", 14
)
def test_apply() -> None:
a = pl.Series("a", [1, 2, None])
b = a.apply(lambda x: x ** 2)
assert b == [1, 4, None]
a = pl.Series("a", ["foo", "bar", None])
b = a.apply(lambda x: x + "py")
assert b == ["foopy", "barpy", None]
b = a.apply(lambda x: len(x), return_dtype=pl.Int32)
assert b == [3, 3, None]
b = a.apply(lambda x: len(x))
assert b == [3, 3, None]
# just check that it runs (somehow problem with conditional compilation)
a = pl.Series("a", [2, 2, 3]).cast(pl.Datetime)
a.apply(lambda x: x)
a = pl.Series("a", [2, 2, 3]).cast(pl.Date)
a.apply(lambda x: x)
def test_shift() -> None:
a = pl.Series("a", [1, 2, 3])
testing.assert_series_equal(a.shift(1), pl.Series("a", [None, 1, 2]))
testing.assert_series_equal(a.shift(-1), pl.Series("a", [2, 3, None]))
testing.assert_series_equal(a.shift(-2), pl.Series("a", [3, None, None]))
testing.assert_series_equal(a.shift_and_fill(-1, 10), pl.Series("a", [2, 3, 10]))
def test_rolling() -> None:
a = pl.Series("a", [1, 2, 3, 2, 1])
testing.assert_series_equal(a.rolling_min(2), pl.Series("a", [None, 1, 2, 2, 1]))
testing.assert_series_equal(a.rolling_max(2), pl.Series("a", [None, 2, 3, 3, 2]))
testing.assert_series_equal(a.rolling_sum(2), pl.Series("a", [None, 3, 5, 5, 3]))
testing.assert_series_equal(
a.rolling_mean(2), pl.Series("a", [None, 1.5, 2.5, 2.5, 1.5])
)
assert a.rolling_std(2).to_list()[1] == pytest.approx(0.7071067811865476)
assert a.rolling_var(2).to_list()[1] == pytest.approx(0.5)
testing.assert_series_equal(
a.rolling_median(4), pl.Series("a", [None, None, None, 2, 2], dtype=Float64)
)
testing.assert_series_equal(
a.rolling_quantile(0, "nearest", 3),
pl.Series("a", [None, None, 1, 2, 1], dtype=Float64),
)
testing.assert_series_equal(
a.rolling_quantile(0, "lower", 3),
pl.Series("a", [None, None, 1, 2, 1], dtype=Float64),
)
testing.assert_series_equal(
a.rolling_quantile(0, "higher", 3),
pl.Series("a", [None, None, 1, 2, 1], dtype=Float64),
)
assert a.rolling_skew(4).null_count() == 3
def test_object() -> None:
vals = [[12], "foo", 9]
a = pl.Series("a", vals)
assert a.dtype == pl.Object
assert a.to_list() == vals
assert a[1] == "foo"
def test_repeat() -> None:
s = pl.repeat(1, 10)
assert s.dtype == pl.Int64
assert s.len() == 10
s = pl.repeat("foo", 10)
assert s.dtype == pl.Utf8
assert s.len() == 10
s = pl.repeat(1.0, 5)
assert s.dtype == pl.Float64
assert s.len() == 5
assert s == [1.0, 1.0, 1.0, 1.0, 1.0]
s = pl.repeat(True, 5)
assert s.dtype == pl.Boolean
assert s.len() == 5
def test_median() -> None:
s = pl.Series([1, 2, 3])
assert s.median() == 2
def test_quantile() -> None:
s = pl.Series([1, 2, 3])
assert s.quantile(0.5, "nearest") == 2
assert s.quantile(0.5, "lower") == 2
assert s.quantile(0.5, "higher") == 2
def test_shape() -> None:
s = pl.Series([1, 2, 3])
assert s.shape == (3,)
@pytest.mark.parametrize("arrow_available", [True, False])
def test_create_list_series(arrow_available: bool) -> None:
pl.internals.series._PYARROW_AVAILABLE = arrow_available
a = [[1, 2], None, [None, 3]]
s = pl.Series("", a)
assert s.to_list() == a
def test_iter() -> None:
s = pl.Series("", [1, 2, 3])
itr = s.__iter__()
assert itr.__next__() == 1
assert itr.__next__() == 2
assert itr.__next__() == 3
assert sum(s) == 6
def test_empty() -> None:
a = pl.Series(dtype=pl.Int8)
assert a.dtype == pl.Int8
a = pl.Series()
assert a.dtype == pl.Float32
a = pl.Series("name", [])
assert a.dtype == pl.Float32
a = pl.Series(values=(), dtype=pl.Int8)
assert a.dtype == pl.Int8
def test_describe() -> None:
num_s = pl.Series([1, 2, 3])
float_s = pl.Series([1.3, 4.6, 8.9])
str_s = pl.Series(["abc", "pqr", "xyz"])
bool_s = pl.Series([True, False, None, True, True])
date_s = pl.Series([date(2021, 1, 1), date(2021, 1, 2), date(2021, 1, 3)])
empty_s = pl.Series(np.empty(0))
assert num_s.describe().shape == (6, 2)
assert float_s.describe().shape == (6, 2)
assert str_s.describe().shape == (3, 2)
assert bool_s.describe().shape == (3, 2)
assert date_s.describe().shape == (4, 2)
with pytest.raises(ValueError):
assert empty_s.describe()
def test_is_in() -> None:
s = pl.Series([1, 2, 3])
out = s.is_in([1, 2])
assert out == [True, True, False]
df = pl.DataFrame({"a": [1.0, 2.0], "b": [1, 4]})
assert df[pl.col("a").is_in(pl.col("b")).alias("mask")]["mask"] == [True, False]
def test_str_slice() -> None:
df = pl.DataFrame({"a": ["foobar", "barfoo"]})
assert df["a"].str.slice(-3) == ["bar", "foo"]
assert df[[pl.col("a").str.slice(2, 4)]]["a"] == ["obar", "rfoo"]
def test_arange_expr() -> None:
df = pl.DataFrame({"a": ["foobar", "barfoo"]})
out = df[[pl.arange(0, pl.col("a").count() * 10)]]
assert out.shape == (20, 1)
assert out.select_at_idx(0)[-1] == 19
# eager arange
out2 = pl.arange(0, 10, 2, eager=True)
assert out2 == [0, 2, 4, 8, 8]
out3 = pl.arange(pl.Series([0, 19]), pl.Series([3, 39]), step=2, eager=True)
assert out3.dtype == pl.List # type: ignore
assert out3[0].to_list() == [0, 2] # type: ignore
def test_round() -> None:
a = pl.Series("f", [1.003, 2.003])
b = a.round(2)
assert b == [1.00, 2.00]
def test_apply_list_out() -> None:
s = pl.Series("count", [3, 2, 2])
out = s.apply(lambda val: pl.repeat(val, val))
assert out[0] == [3, 3, 3]
assert out[1] == [2, 2]
assert out[2] == [2, 2]
def test_is_first() -> None:
s = pl.Series("", [1, 1, 2])
assert s.is_first() == [True, False, True]
def test_reinterpret() -> None:
s = pl.Series("a", [1, 1, 2], dtype=pl.UInt64)
assert s.reinterpret(signed=True).dtype == pl.Int64
df = pl.DataFrame([s])
assert df[[pl.col("a").reinterpret(signed=True)]]["a"].dtype == pl.Int64
def test_mode() -> None:
s = pl.Series("a", [1, 1, 2])
assert s.mode() == [1]
df = pl.DataFrame([s])
assert df[[pl.col("a").mode()]]["a"] == [1]
def test_jsonpath_single() -> None:
s = pl.Series(['{"a":"1"}', None, '{"a":2}', '{"a":2.1}', '{"a":true}'])
expected = pl.Series(
[
"1",
None,
"2",
"2.1",
"true",
]
)
verify_series_and_expr_api(s, expected, "str.json_path_match", "$.a")
def test_extract_regex() -> None:
s = pl.Series(
[
"http://vote.com/ballon_dor?candidate=messi&ref=polars",
"http://vote.com/ballon_dor?candidat=jorginho&ref=polars",
"http://vote.com/ballon_dor?candidate=ronaldo&ref=polars",
]
)
expected = pl.Series(
[
"messi",
None,
"ronaldo",
]
)
verify_series_and_expr_api(s, expected, "str.extract", r"candidate=(\w+)", 1)
def test_rank_dispatch() -> None:
s = pl.Series("a", [1, 2, 3, 2, 2, 3, 0])
testing.assert_series_equal(
s.rank("dense"), pl.Series("a", [2, 3, 4, 3, 3, 4, 1], dtype=UInt32)
)
df = pl.DataFrame([s])
assert df.select(pl.col("a").rank("dense"))["a"] == [2, 3, 4, 3, 3, 4, 1]
testing.assert_series_equal(
s.rank("dense", reverse=True),
pl.Series("a", [3, 2, 1, 2, 2, 1, 4], dtype=UInt32),
)
def test_diff_dispatch() -> None:
s = pl.Series("a", [1, 2, 3, 2, 2, 3, 0])
expected = pl.Series("a", [1, 1, -1, 0, 1, -3])
testing.assert_series_equal(s.diff(null_behavior="drop"), expected)
df = pl.DataFrame([s])
testing.assert_series_equal(
df.select(pl.col("a").diff())["a"], pl.Series("a", [None, 1, 1, -1, 0, 1, -3])
)
def test_pct_change_dispatch() -> None:
s = pl.Series("a", [1, 2, 4, 8, 16, 32, 64])
expected = pl.Series("a", [None, None, float("inf"), 3.0, 3.0, 3.0, 3.0])
verify_series_and_expr_api(s, expected, "pct_change", 2)
def test_skew_dispatch() -> None:
s = pl.Series("a", [1, 2, 3, 2, 2, 3, 0])
assert s.skew(True) == pytest.approx(-0.5953924651018018)
assert s.skew(False) == pytest.approx(-0.7717168360221258)
df = pl.DataFrame([s])
assert np.isclose(df.select(pl.col("a").skew(False))["a"][0], -0.7717168360221258)
def test_kurtosis_dispatch() -> None:
s = pl.Series("a", [1, 2, 3, 2, 2, 3, 0])
expected = -0.6406250000000004
assert s.kurtosis() == pytest.approx(expected)
df = pl.DataFrame([s])
assert np.isclose(df.select(pl.col("a").kurtosis())["a"][0], expected)
def test_arr_lengths_dispatch() -> None:
s = pl.Series("a", [[1, 2], [1, 2, 3]])
testing.assert_series_equal(s.arr.lengths(), pl.Series("a", [2, 3], dtype=UInt32))
df = pl.DataFrame([s])
testing.assert_series_equal(
df.select(pl.col("a").arr.lengths())["a"], pl.Series("a", [2, 3], dtype=UInt32)
)
def test_arr_arithmetic() -> None:
s = pl.Series("a", [[1, 2], [1, 2, 3]])
testing.assert_series_equal(s.arr.sum(), pl.Series("a", [3, 6]))
testing.assert_series_equal(s.arr.mean(), pl.Series("a", [1.5, 2.0]))
testing.assert_series_equal(s.arr.max(), pl.Series("a", [2, 3]))
testing.assert_series_equal(s.arr.min(), pl.Series("a", [1, 1]))
def test_arr_ordering() -> None:
s = pl.Series("a", [[2, 1], [1, 3, 2]])
testing.assert_series_equal(s.arr.sort(), pl.Series("a", [[1, 2], [1, 2, 3]]))
testing.assert_series_equal(s.arr.reverse(), pl.Series("a", [[1, 2], [2, 3, 1]]))
def test_arr_unique() -> None:
s = pl.Series("a", [[2, 1], [1, 2, 2]])
result = s.arr.unique()
assert len(result) == 2
assert sorted(result[0]) == [1, 2]
assert sorted(result[1]) == [1, 2]
def test_sqrt_dispatch() -> None:
s = pl.Series("a", [1, 2])
testing.assert_series_equal(s.sqrt(), pl.Series("a", [1.0, np.sqrt(2)]))
df = pl.DataFrame([s])
testing.assert_series_equal(
df.select(pl.col("a").sqrt())["a"], pl.Series("a", [1.0, np.sqrt(2)])
)
def test_range() -> None:
s = pl.Series("a", [1, 2, 3, 2, 2, 3, 0])
assert s[2:5].series_equal(s[range(2, 5)])
df = pl.DataFrame([s])
assert df[2:5].frame_equal(df[range(2, 5)])
def test_strict_cast() -> None:
with pytest.raises(RuntimeError):
pl.Series("a", [2 ** 16]).cast(dtype=pl.Int16, strict=True)
with pytest.raises(RuntimeError):
pl.DataFrame({"a": [2 ** 16]}).select([pl.col("a").cast(pl.Int16, strict=True)])
def test_list_concat_dispatch() -> None:
s0 = pl.Series("a", [[1, 2]])
s1 = pl.Series("b", [[3, 4, 5]])
expected = pl.Series("a", [[1, 2, 3, 4, 5]])
out = s0.arr.concat([s1])
assert out.series_equal(expected)
out = s0.arr.concat(s1)
assert out.series_equal(expected)
df = pl.DataFrame([s0, s1])
assert df.select(pl.concat_list(["a", "b"]).alias("a"))["a"].series_equal(expected)
assert df.select(pl.col("a").arr.concat("b").alias("a"))["a"].series_equal(expected)
assert df.select(pl.col("a").arr.concat(["b"]).alias("a"))["a"].series_equal(
expected
)
def test_floor_divide() -> None:
s = pl.Series("a", [1, 2, 3])
testing.assert_series_equal(s // 2, pl.Series("a", [0, 1, 1]))
testing.assert_series_equal(
pl.DataFrame([s]).select(pl.col("a") // 2)["a"], pl.Series("a", [0, 1, 1])
)
def test_true_divide() -> None:
s = pl.Series("a", [1, 2])
testing.assert_series_equal(s / 2, pl.Series("a", [0.5, 1.0]))
testing.assert_series_equal(
pl.DataFrame([s]).select(pl.col("a") / 2)["a"], pl.Series("a", [0.5, 1.0])
)
# rtruediv
testing.assert_series_equal(
pl.DataFrame([s]).select(2 / pl.col("a"))["literal"],
pl.Series("literal", [2.0, 1.0]),
)
# https://github.com/pola-rs/polars/issues/1369
vals = [3000000000, 2, 3]
foo = pl.Series(vals)
testing.assert_series_equal(foo / 1, pl.Series(vals, dtype=Float64))
testing.assert_series_equal(
pl.DataFrame({"a": vals}).select([pl.col("a") / 1])["a"],
pl.Series("a", vals, dtype=Float64),
)
def test_invalid_categorical() -> None:
s = pl.Series("cat_series", ["a", "b", "b", "c", "a"]).cast(pl.Categorical)
assert s.std() is None
assert s.var() is None
assert s.median() is None
assert s.quantile(0.5) is None
assert s.mode().to_list() == [None]
def test_bitwise() -> None:
a = pl.Series("a", [1, 2, 3])
b = pl.Series("b", [3, 4, 5])
testing.assert_series_equal(a & b, pl.Series("a", [1, 0, 1]))
testing.assert_series_equal(a | b, pl.Series("a", [3, 6, 7]))
testing.assert_series_equal(a ^ b, pl.Series("a", [2, 6, 6]))
df = pl.DataFrame([a, b])
out = df.select(
[
(pl.col("a") & pl.col("b")).alias("and"),
(pl.col("a") | pl.col("b")).alias("or"),
(pl.col("a") ^ pl.col("b")).alias("xor"),
]
)
testing.assert_series_equal(out["and"], pl.Series("and", [1, 0, 1]))
testing.assert_series_equal(out["or"], pl.Series("or", [3, 6, 7]))
testing.assert_series_equal(out["xor"], pl.Series("xor", [2, 6, 6]))
def test_to_numpy() -> None:
pl.internals.series._PYARROW_AVAILABLE = False
a = pl.Series("a", [1, 2, 3])
assert np.all(a.to_numpy() == np.array([1, 2, 3]))
a = pl.Series("a", [1, 2, None])
np.testing.assert_array_equal(a.to_numpy(), np.array([1.0, 2.0, np.nan]))
def test_from_sequences() -> None:
# test int, str, bool, flt
values = [
[[1], [None, 3]],
[["foo"], [None, "bar"]],
[[True], [None, False]],
[[1.0], [None, 3.0]],
]
for vals in values:
pl.internals.series._PYARROW_AVAILABLE = False
a = pl.Series("a", vals)
pl.internals.series._PYARROW_AVAILABLE = True
b = pl.Series("a", vals)
assert a.series_equal(b, null_equal=True)
assert a.to_list() == vals
def test_comparisons_int_series_to_float() -> None:
srs_int = pl.Series([1, 2, 3, 4])
testing.assert_series_equal(srs_int - 1.0, pl.Series([0, 1, 2, 3]))
testing.assert_series_equal(srs_int + 1.0, pl.Series([2, 3, 4, 5]))
testing.assert_series_equal(srs_int * 2.0, pl.Series([2, 4, 6, 8]))
# todo: this is inconsistent
testing.assert_series_equal(srs_int / 2.0, pl.Series([0.5, 1.0, 1.5, 2.0]))
testing.assert_series_equal(srs_int % 2.0, pl.Series([1, 0, 1, 0]))
testing.assert_series_equal(4.0 % srs_int, pl.Series([0, 0, 1, 0]))
testing.assert_series_equal(srs_int // 2.0, pl.Series([0, 1, 1, 2]))
testing.assert_series_equal(srs_int < 3.0, pl.Series([True, True, False, False]))
testing.assert_series_equal(srs_int <= 3.0, pl.Series([True, True, True, False]))
testing.assert_series_equal(srs_int > 3.0, pl.Series([False, False, False, True]))
testing.assert_series_equal(srs_int >= 3.0, pl.Series([False, False, True, True]))
testing.assert_series_equal(srs_int == 3.0, pl.Series([False, False, True, False]))
testing.assert_series_equal(srs_int - True, pl.Series([0, 1, 2, 3]))
def test_comparisons_float_series_to_int() -> None:
srs_float = pl.Series([1.0, 2.0, 3.0, 4.0])
testing.assert_series_equal(srs_float - 1, pl.Series([0.0, 1.0, 2.0, 3.0]))
testing.assert_series_equal(srs_float + 1, pl.Series([2.0, 3.0, 4.0, 5.0]))
testing.assert_series_equal(srs_float * 2, pl.Series([2.0, 4.0, 6.0, 8.0]))
testing.assert_series_equal(srs_float / 2, pl.Series([0.5, 1.0, 1.5, 2.0]))
testing.assert_series_equal(srs_float % 2, pl.Series([1.0, 0.0, 1.0, 0.0]))
testing.assert_series_equal(4 % srs_float, pl.Series([0.0, 0.0, 1.0, 0.0]))
testing.assert_series_equal(srs_float // 2, pl.Series([0.0, 1.0, 1.0, 2.0]))
testing.assert_series_equal(srs_float < 3, pl.Series([True, True, False, False]))
testing.assert_series_equal(srs_float <= 3, pl.Series([True, True, True, False]))
testing.assert_series_equal(srs_float > 3, pl.Series([False, False, False, True]))
testing.assert_series_equal(srs_float >= 3, pl.Series([False, False, True, True]))
testing.assert_series_equal(srs_float == 3, pl.Series([False, False, True, False]))
testing.assert_series_equal(srs_float - True, pl.Series([0.0, 1.0, 2.0, 3.0]))
def test_comparisons_bool_series_to_int() -> None:
srs_bool = pl.Series([True, False])
# todo: do we want this to work?
testing.assert_series_equal(srs_bool / 1, pl.Series([True, False], dtype=Float64))
with pytest.raises(TypeError, match=r"\-: 'Series' and 'int'"):
srs_bool - 1
with pytest.raises(TypeError, match=r"\+: 'Series' and 'int'"):
srs_bool + 1
with pytest.raises(TypeError, match=r"\%: 'Series' and 'int'"):
srs_bool % 2
with pytest.raises(TypeError, match=r"\*: 'Series' and 'int'"):
srs_bool * 1
with pytest.raises(
TypeError, match=r"'<' not supported between instances of 'Series' and 'int'"
):
srs_bool < 2
with pytest.raises(
TypeError, match=r"'>' not supported between instances of 'Series' and 'int'"
):
srs_bool > 2
def test_trigonometry_functions() -> None:
srs_float = pl.Series("t", [0.0, np.pi])
assert np.allclose(srs_float.sin(), np.array([0.0, 0.0]))
assert np.allclose(srs_float.cos(), np.array([1.0, -1.0]))
assert np.allclose(srs_float.tan(), np.array([0.0, -0.0]))
srs_float = pl.Series("t", [1.0, 0.0, -1])
assert np.allclose(srs_float.arcsin(), np.array([1.571, 0.0, -1.571]), atol=0.01)
assert np.allclose(srs_float.arccos(), np.array([0.0, 1.571, 3.142]), atol=0.01)
assert np.allclose(srs_float.arctan(), np.array([0.785, 0.0, -0.785]), atol=0.01)
def test_abs() -> None:
# ints
s = pl.Series([1, -2, 3, -4])
testing.assert_series_equal(s.abs(), pl.Series([1, 2, 3, 4]))
testing.assert_series_equal(np.abs(s), pl.Series([1, 2, 3, 4])) # type: ignore
# floats
s = pl.Series([1.0, -2.0, 3, -4.0])
testing.assert_series_equal(s.abs(), pl.Series([1.0, 2.0, 3.0, 4.0]))
testing.assert_series_equal(
np.abs(s), pl.Series([1.0, 2.0, 3.0, 4.0]) # type: ignore
)
testing.assert_series_equal(
pl.select(pl.lit(s).abs()).to_series(), pl.Series([1.0, 2.0, 3.0, 4.0])
)
def test_to_dummies() -> None:
s = pl.Series("a", [1, 2, 3])
result = s.to_dummies()
expected = pl.DataFrame({"a_1": [1, 0, 0], "a_2": [0, 1, 0], "a_3": [0, 0, 1]})
assert result.frame_equal(expected)
def test_value_counts() -> None:
s = pl.Series("a", [1, 2, 2, 3])
result = s.value_counts()
expected = pl.DataFrame({"a": [1, 2, 3], "counts": [1, 2, 1]})
result_sorted: pl.DataFrame = result.sort("a")
assert result_sorted.frame_equal(expected)
def test_chunk_lengths() -> None:
s = pl.Series("a", [1, 2, 2, 3])
# this is a Series with one chunk, of length 4
assert s.n_chunks() == 1
assert s.chunk_lengths() == [4]
def test_limit() -> None:
s = pl.Series("a", [1, 2, 3])
assert s.limit(2).series_equal(pl.Series("a", [1, 2]))
def test_filter() -> None:
s = pl.Series("a", [1, 2, 3])
mask = pl.Series("", [True, False, True])
assert s.filter(mask).series_equal(pl.Series("a", [1, 3]))
assert s.filter([True, False, True]).series_equal(pl.Series("a", [1, 3]))
def test_take_every() -> None:
s = pl.Series("a", [1, 2, 3, 4])
assert s.take_every(2).series_equal(pl.Series("a", [1, 3]))
def test_argsort() -> None:
s = pl.Series("a", [5, 3, 4, 1, 2])
expected = pl.Series("a", [3, 4, 1, 2, 0], dtype=UInt32)
verify_series_and_expr_api(s, expected, "argsort")
expected_reverse = pl.Series("a", [0, 2, 1, 4, 3], dtype=UInt32)
verify_series_and_expr_api(s, expected_reverse, "argsort", True)
def test_arg_min_and_arg_max() -> None:
s = pl.Series("a", [5, 3, 4, 1, 2])
assert s.arg_min() == 3
assert s.arg_max() == 0
def test_is_null_is_not_null() -> None:
s = pl.Series("a", [1.0, 2.0, 3.0, None])
assert s.is_null().series_equal(pl.Series("a", [False, False, False, True]))
assert s.is_not_null().series_equal(pl.Series("a", [True, True, True, False]))
def test_is_finite_is_infinite() -> None:
s = pl.Series("a", [1.0, 2.0, np.inf])
s.is_finite().series_equal(pl.Series("a", [True, True, False]))
s.is_infinite().series_equal(pl.Series("a", [False, False, True]))
def test_is_nan_is_not_nan() -> None:
s = pl.Series("a", [1.0, 2.0, 3.0, np.NaN])
assert s.is_nan().series_equal(pl.Series("a", [False, False, False, True]))
assert s.is_not_nan().series_equal(pl.Series("a", [True, True, True, False]))
def test_is_unique() -> None:
s = pl.Series("a", [1, 2, 2, 3])
assert s.is_unique().series_equal(pl.Series("a", [True, False, False, True]))
def test_is_duplicated() -> None:
s = pl.Series("a", [1, 2, 2, 3])
assert s.is_duplicated().series_equal(pl.Series("a", [False, True, True, False]))
def test_dot() -> None:
s = pl.Series("a", [1, 2, 3])
s2 = pl.Series("b", [4.0, 5.0, 6.0])
assert s.dot(s2) == 32
def test_sample() -> None:
s = pl.Series("a", [1, 2, 3, 4, 5])
assert len(s.sample(n=2)) == 2
assert len(s.sample(frac=0.4)) == 2
assert len(s.sample(n=2, with_replacement=True)) == 2
# on a series of length 5, you cannot sample more than 5 items
with pytest.raises(Exception):
s.sample(n=10, with_replacement=False)
# unless you use with_replacement=True
assert len(s.sample(n=10, with_replacement=True)) == 10
def test_peak_max_peak_min() -> None:
s = pl.Series("a", [4, 1, 3, 2, 5])
result = s.peak_min()
expected = pl.Series([False, True, False, True, False])
testing.assert_series_equal(result, expected)
result = s.peak_max()
expected = pl.Series([True, False, True, False, True])
testing.assert_series_equal(result, expected)
def test_shrink_to_fit() -> None:
s = pl.Series("a", [4, 1, 3, 2, 5])
assert s.shrink_to_fit(in_place=True) is None
s = pl.Series("a", [4, 1, 3, 2, 5])
assert isinstance(s.shrink_to_fit(in_place=False), pl.Series)
def test_str_concat() -> None:
s = pl.Series(["1", None, "2"])
result = s.str_concat()
expected = pl.Series(["1-null-2"])
testing.assert_series_equal(result, expected)
def test_str_lengths() -> None:
s = pl.Series(["messi", "ronaldo", None])
expected = pl.Series([5, 7, None], dtype=UInt32)
verify_series_and_expr_api(s, expected, "str.lengths")
def test_str_contains() -> None:
s = pl.Series(["messi", "ronaldo", "ibrahimovic"])
expected = pl.Series([True, False, False])
verify_series_and_expr_api(s, expected, "str.contains", "mes")
def test_str_encode() -> None:
s = pl.Series(["foo", "bar", None])
hex_encoded = pl.Series(["666f6f", "626172", None])
base64_encoded = pl.Series(["Zm9v", "YmFy", None])
verify_series_and_expr_api(s, hex_encoded, "str.encode", "hex")
verify_series_and_expr_api(s, base64_encoded, "str.encode", "base64")
with pytest.raises(ValueError):
s.str.encode("utf8")
def test_str_decode() -> None:
hex_encoded = pl.Series(["666f6f", "626172", None])
base64_encoded = pl.Series(["Zm9v", "YmFy", None])
expected = pl.Series(["foo", "bar", None])
verify_series_and_expr_api(hex_encoded, expected, "str.decode", "hex")
verify_series_and_expr_api(base64_encoded, expected, "str.decode", "base64")
def test_str_decode_exception() -> None:
s = pl.Series(["not a valid", "626172", None])
with pytest.raises(Exception):
s.str.decode(encoding="hex", strict=True)
with pytest.raises(Exception):
s.str.decode(encoding="base64", strict=True)
with pytest.raises(ValueError):
s.str.decode("utf8")
def test_str_replace_str_replace_all() -> None:
s = pl.Series(["hello", "world", "test"])
expected = pl.Series(["hell0", "w0rld", "test"])
verify_series_and_expr_api(s, expected, "str.replace", "o", "0")
s = pl.Series(["hello", "world", "test"])
expected = pl.Series(["hell0", "w0rld", "test"])
verify_series_and_expr_api(s, expected, "str.replace_all", "o", "0")
def test_str_to_lowercase() -> None:
s = pl.Series(["Hello", "WORLD"])
expected = pl.Series(["hello", "world"])
verify_series_and_expr_api(s, expected, "str.to_lowercase")
def test_str_to_uppercase() -> None:
s = pl.Series(["Hello", "WORLD"])
expected = pl.Series(["HELLO", "WORLD"])
verify_series_and_expr_api(s, expected, "str.to_uppercase")
def test_str_rstrip() -> None:
s = pl.Series([" hello ", "world\t "])
expected = pl.Series([" hello", "world"])
testing.assert_series_equal(s.str.rstrip(), expected)
def test_str_lstrip() -> None:
s = pl.Series([" hello ", "\t world"])
expected = pl.Series(["hello ", "world"])
testing.assert_series_equal(s.str.lstrip(), expected)
def test_str_strptime() -> None:
s = pl.Series(["2020-01-01", "2020-02-02"])
expected = pl.Series([date(2020, 1, 1), date(2020, 2, 2)])
verify_series_and_expr_api(s, expected, "str.strptime", pl.Date, "%Y-%m-%d")
s = pl.Series(["2020-01-01 00:00:00", "2020-02-02 03:20:10"])
expected = pl.Series(
[datetime(2020, 1, 1, 0, 0, 0), datetime(2020, 2, 2, 3, 20, 10)]
)
verify_series_and_expr_api(
s, expected, "str.strptime", pl.Datetime, "%Y-%m-%d %H:%M:%S"
)
def test_dt_strftime() -> None:
a = pl.Series("a", [10000, 20000, 30000], dtype=pl.Date)
assert a.dtype == pl.Date
expected = pl.Series("a", ["1997-05-19", "2024-10-04", "2052-02-20"])
verify_series_and_expr_api(a, expected, "dt.strftime", "%F")
def test_dt_year_month_week_day_ordinal_day() -> None:
a = pl.Series("a", [10000, 20000, 30000], dtype=pl.Date)
exp = pl.Series("a", [1997, 2024, 2052], dtype=Int32)
verify_series_and_expr_api(a, exp, "dt.year")
verify_series_and_expr_api(a, pl.Series("a", [5, 10, 2], dtype=UInt32), "dt.month")
verify_series_and_expr_api(a, pl.Series("a", [0, 4, 1], dtype=UInt32), "dt.weekday")
verify_series_and_expr_api(a, pl.Series("a", [21, 40, 8], dtype=UInt32), "dt.week")
verify_series_and_expr_api(a, pl.Series("a", [19, 4, 20], dtype=UInt32), "dt.day")
verify_series_and_expr_api(
a, pl.Series("a", [139, 278, 51], dtype=UInt32), "dt.ordinal_day"
)
assert a.dt.median() == date(2024, 10, 4)
assert a.dt.mean() == date(2024, 10, 4)
def test_dt_datetimes() -> None:
s = pl.Series(["2020-01-01 00:00:00", "2020-02-02 03:20:10"])
s = s.str.strptime(pl.Datetime, fmt="%Y-%m-%d %H:%M:%S")
# hours, minutes, seconds and nanoseconds
verify_series_and_expr_api(s, pl.Series("", [0, 3], dtype=UInt32), "dt.hour")
verify_series_and_expr_api(s, pl.Series("", [0, 20], dtype=UInt32), "dt.minute")
verify_series_and_expr_api(s, pl.Series("", [0, 10], dtype=UInt32), "dt.second")
verify_series_and_expr_api(s, pl.Series("", [0, 0], dtype=UInt32), "dt.nanosecond")
# epoch methods
verify_series_and_expr_api(
s, pl.Series("", [18262, 18294], dtype=Int32), "dt.epoch_days"
)
verify_series_and_expr_api(
s,
pl.Series("", [1_577_836_800, 1_580_613_610], dtype=Int64),
"dt.epoch_seconds",
)
verify_series_and_expr_api(
s,
pl.Series("", [1_577_836_800_000, 1_580_613_610_000], dtype=Int64),
"dt.epoch_milliseconds",
)
def test_reshape() -> None:
s = pl.Series("a", [1, 2, 3, 4])
out = s.reshape((-1, 2))
expected = pl.Series("a", [[1, 2], [3, 4]])
assert out.series_equal(expected)
out = s.reshape((2, 2))
assert out.series_equal(expected)
out = s.reshape((2, -1))
assert out.series_equal(expected)
out = s.reshape((-1, 1))
expected = pl.Series("a", [[1], [2], [3], [4]])
assert out.series_equal(expected)
# test lazy_dispatch
out = pl.select(pl.lit(s).reshape((-1, 1))).to_series()
assert out.series_equal(expected)
def test_init_categorical() -> None:
for values in [[None], ["foo", "bar"], [None, "foo", "bar"]]:
expected = pl.Series("a", values, dtype=pl.Utf8).cast(pl.Categorical)
a = pl.Series("a", values, dtype=pl.Categorical)
testing.assert_series_equal(a, expected)
def test_nested_list_types_preserved() -> None:
expected_dtype = pl.UInt32
srs1 = pl.Series([pl.Series([3, 4, 5, 6], dtype=expected_dtype) for _ in range(5)])
for srs2 in srs1:
assert srs2.dtype == expected_dtype
def test_log_exp() -> None:
a = pl.Series("a", [1, 100, 1000])
b = pl.Series("a", [0.0, 2.0, 3.0])
verify_series_and_expr_api(a, b, "log10")
expected = pl.Series("a", np.log(a.to_numpy()))
verify_series_and_expr_api(a, expected, "log")
expected = pl.Series("a", np.exp(b.to_numpy()))
verify_series_and_expr_api(b, expected, "exp")
def test_shuffle() -> None:
a = pl.Series("a", [1, 2, 3])
out = a.shuffle(2)
expected = pl.Series("a", [2, 3, 1])
testing.assert_series_equal(out, expected)
out = pl.select(pl.lit(a).shuffle(2)).to_series()
testing.assert_series_equal(out, expected)
def test_to_physical() -> None:
# casting an int result in an int
a = pl.Series("a", [1, 2, 3])
verify_series_and_expr_api(a, a, "to_physical")
# casting a date results in an Int32
a = pl.Series("a", [date(2020, 1, 1)] * 3)
expected = pl.Series("a", [18262] * 3, dtype=Int32)
verify_series_and_expr_api(a, expected, "to_physical")
def test_is_between_datetime() -> None:
s = pl.Series("a", [datetime(2020, 1, 1, 10, 0, 0), datetime(2020, 1, 1, 20, 0, 0)])
start = datetime(2020, 1, 1, 12, 0, 0)
end = datetime(2020, 1, 1, 23, 0, 0)
expected = pl.Series("a", [False, True])
# only on the expression api
result = s.to_frame().with_column(pl.col("*").is_between(start, end))["is_between"]
testing.assert_series_equal(result.rename("a"), expected)
@pytest.mark.parametrize("f", ["sin", "cos", "tan", "arcsin", "arccos", "arctan"])
def test_trigonometric(f: str) -> None:
s = pl.Series("a", [0.0])
expected = pl.Series("a", getattr(np, f)(s.to_numpy()))
verify_series_and_expr_api(s, expected, f)
def test_ewm_mean() -> None:
a = pl.Series("a", [2, 5, 3])
expected = pl.Series(
"a",
[
2.0,
4.0,
3.4285714285714284,
],
)
verify_series_and_expr_api(a, expected, "ewm_mean", alpha=0.5, adjust=True)
expected = pl.Series("a", [2.0, 3.8, 3.421053])
verify_series_and_expr_api(a, expected, "ewm_mean", com=2.0, adjust=True)
expected = pl.Series("a", [2.0, 3.5, 3.25])
verify_series_and_expr_api(a, expected, "ewm_mean", alpha=0.5, adjust=False)
a = pl.Series("a", [2, 3, 5, 7, 4])
expected = pl.Series("a", [None, 2.666667, 4.0, 5.6, 4.774194])
verify_series_and_expr_api(
a, expected, "ewm_mean", alpha=0.5, adjust=True, min_periods=2
)
expected = pl.Series("a", [None, None, 4.0, 5.6, 4.774194])
verify_series_and_expr_api(
a, expected, "ewm_mean", alpha=0.5, adjust=True, min_periods=3
)
a = pl.Series("a", [None, 1.0, 5.0, 7.0, None, 2.0, 5.0, 4])
expected = pl.Series(
"a",
[
None,
1.0,
3.6666666666666665,
5.571428571428571,
5.571428571428571,
3.6666666666666665,
4.354838709677419,
4.174603174603175,
],
)
verify_series_and_expr_api(
a, expected, "ewm_mean", alpha=0.5, adjust=True, min_periods=1
)
expected = pl.Series("a", [None, 1.0, 3.0, 5.0, 5.0, 3.5, 4.25, 4.125])
verify_series_and_expr_api(
a, expected, "ewm_mean", alpha=0.5, adjust=False, min_periods=1
)
def test_ewm_std_var() -> None:
a = pl.Series("a", [2, 5, 3])
assert (a.ewm_std(alpha=0.5) ** 2).to_list() == a.ewm_var(alpha=0.5).to_list()
def test_extend() -> None:
a = pl.Series("a", [1, 2, 3])
expected = pl.Series("a", [1, 2, 3, 1, 1, 1])
verify_series_and_expr_api(a, expected, "extend", 1, 3)
expected = pl.Series("a", [1, 2, 3, None, None, None])
verify_series_and_expr_api(a, expected, "extend", None, 3)
def test_any_all() -> None:
a = pl.Series("a", [True, False, True])
expected = pl.Series("a", [True])
verify_series_and_expr_api(a, expected, "any")
expected = pl.Series("a", [False])
verify_series_and_expr_api(a, expected, "all")
a = pl.Series("a", [True, True, True])
expected = pl.Series("a", [True])
verify_series_and_expr_api(a, expected, "any")
expected = pl.Series("a", [True])
verify_series_and_expr_api(a, expected, "all")
a = pl.Series("a", [False, False, False])
expected = pl.Series("a", [False])
verify_series_and_expr_api(a, expected, "any")
expected = pl.Series("a", [False])
verify_series_and_expr_api(a, expected, "all")
def test_product() -> None:
a = pl.Series("a", [1, 2, 3])
out = a.product()
assert out == 6
a = pl.Series("a", [1, 2, None])
out = a.product()
assert out is None
a = pl.Series("a", [None, 2, 3])
out = a.product()
assert out is None
|
the-stack_0_1484 | #!/usr/bin/env python
import sys, os
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
matplotlib.rcParams['errorbar.capsize'] = 6
matplotlib.rcParams['axes.grid'] = True
matplotlib.rcParams['font.size'] = 18
matplotlib.rcParams['figure.figsize'] = (9.75, 5.85) #(10, 6)
matplotlib.rcParams['savefig.dpi'] = 600
def main(inFile,outFile):
if not os.path.isfile(inFile):
raise ValueError('File {:} does not exist'.format(str(inFile)))
#if output exists mv to .bak
if os.path.isfile(outFile):
print('ATTENTION: {:} exists, moving to *.bak'.format(outFile))
os.rename(outFile, outFile+'.bak')
x, y = np.loadtxt(inFile, skiprows=4, usecols=(0,2), unpack=True)
plt.xlabel("Pore Diameter [Å]")
plt.ylabel("Pore-Size Distribution")
plt.xlim([min(x),max(x)])
plt.plot(x,y)
plt.tight_layout()
plt.savefig(outFile)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Plot RASPA Pore-Size Distribution')
parser.add_argument('input', type=str, help='RASPA PSD Output File')
parser.add_argument('output', type=str, help='Filename for the PNG Output')
args = parser.parse_args()
main(args.input,args.output)
|
the-stack_0_1485 | import requests
from bs4 import BeautifulSoup as bs
def image_url_retrieval(url):
r = requests.get(url)
soup = bs(r.content, 'html.parser')
profile_image = soup.find('img', {'alt': 'Avatar'})['src']
return profile_image
def main():
github_user = input('What github user would you like to find? ')
url = f"https://github.com/{github_user}"
print(image_url_retrieval(url))
if __name__ == '__main__':
main() |
the-stack_0_1486 | import os
import sys
sys.path.append(os.path.dirname(__file__))
import ConfigParser as cp
from os import path
import argparse
from transfer.base import Transfer
from walker import Walker
def main(file_path=None, all_sync=False):
inifile = cp.SafeConfigParser()
inifile.read(os.getcwd() + "/confing.ini")
""" load config file """
host = inifile.get("receiver", "host")
port = inifile.get("receiver", "port")
user = inifile.get("receiver", "user")
passwd = inifile.get("receiver", "passwd")
header_path = inifile.get("file", "header_path")
transfer = Transfer("ftp")
transfer.inst.connect(host, port, user, passwd)
if all_sync:
syncdir = inifile.get("all_sync", "syncdir")
walker = Walker(syncdir)
w = walker.start()
while True:
try:
file_path = w.next()
remote_path = file_path.replace(header_path, "")
dirname = os.path.dirname(remote_path)
filename = os.path.basename(remote_path)
send(transfer.inst, dirname, filename, file_path)
except StopIteration:
return
if file_path:
remote_path = file_path.replace(header_path, "")
if remote_path[0] != "/":
remote_path = "/" + remote_path
dirname = os.path.dirname(remote_path)
filename = os.path.basename(remote_path)
""" Connection with remote server """
send(transfer.inst, dirname, filename, file_path)
def send(transfer, dirname, filename, file_path):
# Need space on top of directory name.
# But I don't know why this is required...
transfer.mkdir(" "+dirname)
transfer.send(dirname, filename, file_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="FTP transfer")
parser.add_argument(
"-f", "--file", dest="file_path", default=None, help="file path")
parser.add_argument(
"-a", "--all", dest="all_sync", default=None, help="all sync")
args = parser.parse_args()
main(args.file_path, args.all_sync)
|
the-stack_0_1487 | import sys
import numpy as np
from collections import Counter
from cuteSV.cuteSV_genotype import cal_GL, threshold_ref_count, count_coverage
'''
*******************************************
TO DO LIST
*******************************************
1. Identify DP with samfile pointer;
2. Add CIPOS, CILEN and/or CIEND;
3. Determine (IM)PRECISE type.
4. Filter DUP to improve INS FN rate.
*******************************************
'''
def resolution_DUP(path, chr, read_count, max_cluster_bias, sv_size,
bam_path, action, MaxSize, gt_round):
semi_dup_cluster = list()
semi_dup_cluster.append([0, 0, ''])
candidate_single_SV = list()
file = open(path, 'r')
for line in file:
seq = line.strip('\n').split('\t')
if seq[1] != chr:
continue
pos_1 = int(seq[2])
pos_2 = int(seq[3])
read_id = seq[4]
if pos_1 - semi_dup_cluster[-1][0] > max_cluster_bias or pos_2 - semi_dup_cluster[-1][1] > max_cluster_bias:
if len(semi_dup_cluster) >= read_count:
if semi_dup_cluster[-1][0] == semi_dup_cluster[-1][1] == 0:
pass
else:
generate_dup_cluster(semi_dup_cluster,
chr,
read_count,
max_cluster_bias,
sv_size,
candidate_single_SV,
bam_path,
action,
MaxSize,
gt_round)
semi_dup_cluster = []
semi_dup_cluster.append([pos_1, pos_2, read_id])
else:
if semi_dup_cluster[-1][0] == semi_dup_cluster[-1][1] == 0:
semi_dup_cluster = []
semi_dup_cluster.append([pos_1, pos_2, read_id])
else:
semi_dup_cluster.append([pos_1, pos_2, read_id])
if len(semi_dup_cluster) >= read_count:
if semi_dup_cluster[-1][0] == semi_dup_cluster[-1][1] == 0:
pass
else:
generate_dup_cluster(semi_dup_cluster,
chr,
read_count,
max_cluster_bias,
sv_size,
candidate_single_SV,
bam_path,
action,
MaxSize,
gt_round)
file.close()
return candidate_single_SV
def generate_dup_cluster(semi_dup_cluster, chr, read_count, max_cluster_bias,
sv_size, candidate_single_SV, bam_path, action, MaxSize, gt_round):
# calculate support reads
support_read = list(set([i[2] for i in semi_dup_cluster]))
if len(support_read) < read_count:
return
low_b = int(len(semi_dup_cluster)*0.4)
up_b = int(len(semi_dup_cluster)*0.6)
if low_b == up_b:
breakpoint_1 = semi_dup_cluster[low_b][0]
breakpoint_2 = semi_dup_cluster[low_b][1]
else:
breakpoint_1 = [i[0] for i in semi_dup_cluster[low_b:up_b]]
breakpoint_2 = [i[1] for i in semi_dup_cluster[low_b:up_b]]
breakpoint_1 = int(sum(breakpoint_1)/len(semi_dup_cluster[low_b:up_b]))
breakpoint_2 = int(sum(breakpoint_2)/len(semi_dup_cluster[low_b:up_b]))
if sv_size <= breakpoint_2 - breakpoint_1 <= MaxSize or (sv_size <= breakpoint_2 - breakpoint_1 and MaxSize == -1):
if action:
import time
# time_start = time.time()
DV, DR, GT, GL, GQ, QUAL = call_gt(bam_path,
breakpoint_1,
breakpoint_2,
chr,
support_read,
min(max_cluster_bias, breakpoint_2 - breakpoint_1),
gt_round)
# print(DV, DR, GT, GL, GQ, QUAL)
# cost_time = time.time() - time_start
# print("DUP", chr, int(breakpoint_1), int(breakpoint_2), DR, DV, QUAL, "%.4f"%cost_time)
else:
DR = '.'
GT = './.'
GL = '.,.,.'
GQ = "."
QUAL = "."
candidate_single_SV.append([chr,
'DUP',
str(breakpoint_1),
str(breakpoint_2 - breakpoint_1),
str(len(support_read)),
str(DR),
str(GT),
str(GL),
str(GQ),
str(QUAL),
str(','.join(support_read))])
def run_dup(args):
return resolution_DUP(*args)
def call_gt(bam_path, pos_1, pos_2, chr, read_id_list, max_cluster_bias, gt_round):
import pysam
bamfile = pysam.AlignmentFile(bam_path)
querydata = set()
search_start = max(int(pos_1 - max_cluster_bias/2), 0)
search_end = min(int(pos_1 + max_cluster_bias/2), bamfile.get_reference_length(chr))
up_bound = threshold_ref_count(len(read_id_list))
status = count_coverage(chr,
search_start,
search_end,
bamfile,
querydata,
up_bound,
gt_round)
if status == -1:
DR = '.'
GT = "./."
GL = ".,.,."
GQ = "."
QUAL = "."
elif status == 1:
DR = 0
for query in querydata:
if query not in read_id_list:
DR += 1
GT, GL, GQ, QUAL = cal_GL(DR, len(read_id_list))
else:
search_start = max(int(pos_2 - max_cluster_bias/2), 0)
search_end = min(int(pos_2 + max_cluster_bias/2), bamfile.get_reference_length(chr))
status_2 = count_coverage(chr,
search_start,
search_end,
bamfile,
querydata,
up_bound,
gt_round)
# status_2 judgement
DR = 0
for query in querydata:
if query not in read_id_list:
DR += 1
GT, GL, GQ, QUAL = cal_GL(DR, len(read_id_list))
bamfile.close()
return len(read_id_list), DR, GT, GL, GQ, QUAL |
the-stack_0_1488 | import torch
import torch.nn.functional as F
import torch.optim as optim
from gym.spaces import flatten
from blobrl.agents import DQN
from blobrl.memories import ExperienceReplay
from blobrl.networks import C51Network
class CategoricalDQN(DQN):
def __init__(self, observation_space, action_space, memory=ExperienceReplay(), network=None, num_atoms=51,
r_min=-10, r_max=10, step_train=1, batch_size=32, gamma=1.0,
optimizer=None, greedy_exploration=None, device=None):
"""
:param device: torch device to run agent
:type: torch.device
:param action_space:
:param observation_space:
:param memory:
:param network:
:param num_atoms:
:param r_min:
:param r_max:
:param step_train:
:param batch_size:
:param gamma:
:param optimizer:
:param greedy_exploration:
"""
if network is None and optimizer is None:
network = C51Network(observation_space=observation_space,
action_space=action_space)
num_atoms = 51
optimizer = optim.Adam(network.parameters())
super().__init__(observation_space=observation_space, action_space=action_space, memory=memory,
network=network, step_train=step_train, batch_size=batch_size, gamma=gamma,
loss=None, optimizer=optimizer, greedy_exploration=greedy_exploration, device=device)
self.num_atoms = num_atoms
self.r_min = r_min
self.r_max = r_max
self.delta_z = (r_max - r_min) / float(num_atoms - 1)
self.z = torch.tensor([r_min + i * self.delta_z for i in range(num_atoms)], device=self.device)
def get_action(self, observation):
""" Return action choice by the agents
:param observation: stat of environment
:type observation: gym.Space
"""
if not self.greedy_exploration.be_greedy(self.step) and self.with_exploration:
return self.action_space.sample()
observation = torch.tensor([flatten(self.observation_space, observation)], device=self.device).float()
prediction = self.network.forward(observation)
def return_values(values):
if isinstance(values, list):
return [return_values(v) for v in values]
q_values = values * self.z
q_values = torch.sum(q_values, dim=2)
return torch.argmax(q_values).detach().item()
return return_values(prediction)
def apply_loss(self, next_prediction, prediction, actions, rewards, next_observations, dones, len_space):
if isinstance(next_prediction, list):
[self.apply_loss(n, p, a, rewards, next_observations, dones, c) for n, p, a, c in
zip(next_prediction, prediction, actions.permute(1, 0, *[i for i in range(2, len(actions.shape))]),
len_space)]
else:
q_values_next = next_prediction * self.z
q_values_next = torch.sum(q_values_next, dim=2)
actions = F.one_hot(actions.long(), num_classes=len_space)
actions_next = torch.argmax(q_values_next, dim=1)
actions_next = F.one_hot(actions_next, num_classes=len_space)
dones = dones.view(-1, 1)
tz = rewards.view(-1, 1) + self.gamma * self.z * (1 - dones)
tz = tz.clamp(self.r_min, self.r_max)
b = (tz - self.r_min) / self.delta_z
l, u = b.floor().to(torch.int64), b.ceil().to(torch.int64)
l[(u > 0) * (l == u)] -= 1
u[(l < (self.num_atoms - 1)) * (l == u)] += 1
m_prob = torch.zeros((self.batch_size, len_space, self.num_atoms), device=self.device)
predictions_next = next_prediction[actions_next == 1, :]
offset = torch.linspace(0, (self.batch_size - 1) * self.num_atoms, self.batch_size,
device=self.device).view(-1,
1)
offset = offset.expand(self.batch_size, self.num_atoms)
u_index = (u + offset).view(-1).to(torch.int64)
l_index = (l + offset).view(-1).to(torch.int64)
predictions_next = (dones + (1 - dones) * predictions_next)
m_prob_action = m_prob[actions == 1, :].view(-1)
m_prob_action.index_add_(0, u_index, (predictions_next * (u - b)).view(-1))
m_prob_action.index_add_(0, l_index, (predictions_next * (b - l)).view(-1))
m_prob[actions == 1, :] = m_prob_action.view(-1, self.num_atoms)
self.optimizer.zero_grad()
loss = - prediction.log() * m_prob
loss.sum((1, 2)).mean().backward(retain_graph=True)
def __str__(self):
return 'CategoricalDQN-' + str(self.observation_space) + "-" + str(self.action_space) + "-" + str(
self.network) + "-" + str(self.memory) + "-" + str(self.step_train) + "-" + str(
self.step) + "-" + str(self.batch_size) + "-" + str(self.gamma) + "-" + str(self.loss) + "-" + str(
self.optimizer) + "-" + str(self.greedy_exploration) + "-" + str(self.num_atoms) + "-" + str(
self.r_min) + "-" + str(self.r_max) + "-" + str(self.delta_z) + "-" + str(self.z)
|
the-stack_0_1489 | import torch
import numpy as np
import pandas as pd
import torch.optim as optim
import torch.nn.functional as F
def load_training_data(path='./data/training_label.txt'):
if 'training_label' in path:
with open(path, 'r', encoding='UTF-8') as f:
lines = f.readlines()
lines = [line.strip('\n').split(' ') for line in lines]
x = [line[2:] for line in lines]
y = [line[0] for line in lines]
return x, y
else:
with open(path, 'r', encoding='UTF-8') as f:
lines = f.readlines()
x = [line.strip('\n').split(' ') for line in lines]
return x
def load_testing_data(path='./data/testing_data.txt'):
with open(path, 'r', encoding='UTF-8') as f:
lines = f.readlines()
X = ["".join(line.strip('\n').split(",")[1:]).strip() for line in lines[1:]]
X = [sen.split(' ') for sen in X]
return X
def evaluation(outputs, labels):
outputs[outputs >= 0.5] = 1
outputs[outputs < 0.5] = 0
correct = torch.sum(torch.eq(outputs, labels)).item()
return correct
|
the-stack_0_1490 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/startup.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
#
# CLEAN ROOM MODULE
#
# This module is classified as a "Clean Room" module and is subject to
# restrictions on what it may import.
#
# See: https://king-phisher.readthedocs.io/en/latest/development/modules.html#clean-room-modules
#
################################################################################
import collections
import gc
import logging
import os
import shutil
import subprocess
import sys
from king_phisher import its
from king_phisher import version
ProcessResults = collections.namedtuple('ProcessResults', ('stdout', 'stderr', 'status'))
"""
A named tuple for holding the results of an executed external process.
.. py:attribute:: stdout
A string containing the data the process wrote to stdout.
.. py:attribute:: stderr
A string containing the data the process wrote to stderr.
.. py:attribute:: status
An integer representing the process's exit code.
"""
def _run_pipenv(args, cwd=None):
"""
Execute Pipenv with the supplied arguments and return the
:py:class:`~.ProcessResults`. If the exit status is non-zero, then the
stdout buffer from the Pipenv execution will be written to stderr.
:param tuple args: The arguments for the Pipenv.
:param str cwd: An optional current working directory to use for the
process.
:return: The results of the execution.
:rtype: :py:class:`~.ProcessResults`
"""
path = which('pipenv')
if path is None:
return RuntimeError('pipenv could not be found')
args = (path,) + tuple(args)
results = run_process(args, cwd=cwd)
if results.status:
sys.stderr.write('pipenv encountered the following error:\n')
sys.stderr.write(results.stdout)
sys.stderr.flush()
return results
def pipenv_entry(parser, entry_point):
"""
Run through startup logic for a Pipenv script (see Pipenv: `Custom Script
Shortcuts`_ for more information). This sets up a basic stream logging
configuration, establishes the Pipenv environment and finally calls the
actual entry point using :py:func:`os.execve`.
.. note::
Due to the use of :py:func:`os.execve`, this function does not return.
.. note::
Due to the use of :py:func:`os.execve` and ``os.EX_*`` exit codes, this
function is not available on Windows.
:param parser: The argument parser to use. Arguments are added to it and
extracted before passing the remainder to the entry point.
:param str entry_point: The name of the entry point using Pipenv.
.. _Custom Script Shortcuts: https://pipenv.readthedocs.io/en/latest/advanced/#custom-script-shortcuts
"""
if its.on_windows:
# this is because of the os.exec call and os.EX_* status codes
raise RuntimeError('pipenv_entry is incompatible with windows')
env_group = parser.add_argument_group('environment wrapper options')
env_group.add_argument('--env-install', dest='pipenv_install', default=False, action='store_true', help='install pipenv environment and exit')
env_group.add_argument('--env-update', dest='pipenv_update', default=False, action='store_true', help='update pipenv requirements and exit')
argp_add_default_args(parser)
arguments, _ = parser.parse_known_args()
sys_argv = sys.argv
sys_argv.pop(0)
if sys.version_info < (3, 4):
print('[-] the Python version is too old (minimum required is 3.4)')
return os.EX_SOFTWARE
# initialize basic stream logging
logger = logging.getLogger('KingPhisher.wrapper')
logger.setLevel(arguments.loglvl if arguments.loglvl else 'WARNING')
console_log_handler = logging.StreamHandler()
console_log_handler.setLevel(arguments.loglvl if arguments.loglvl else 'WARNING')
console_log_handler.setFormatter(logging.Formatter('%(levelname)-8s %(message)s'))
logger.addHandler(console_log_handler)
target_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
logger.debug("target diretory: {}".format(target_directory))
os.environ['PIPENV_VENV_IN_PROJECT'] = os.environ.get('PIPENV_VENV_IN_PROJECT', 'True')
os.environ['PIPENV_PIPFILE'] = os.environ.get('PIPENV_PIPFILE', os.path.join(target_directory, 'Pipfile'))
logger.info('checking for the pipenv environment')
if which('pipenv') is None:
logger.exception('pipenv not found, run tools/install.sh --update')
return os.EX_UNAVAILABLE
pipenv_path = which('pipenv')
logger.debug("pipenv path: {0!r}".format(pipenv_path))
if arguments.pipenv_install or not os.path.isdir(os.path.join(target_directory, '.venv')):
if arguments.pipenv_install:
logger.info('installing the pipenv environment')
else:
logger.warning('no pre-existing pipenv environment was found, installing it now')
results = _run_pipenv(('--site-packages', 'install'), cwd=target_directory)
if results.status:
logger.error('failed to install the pipenv environment')
logger.info('removing the incomplete .venv directory')
try:
shutil.rmtree(os.path.join(target_directory, '.venv'))
except OSError:
logger.error('failed to remove the incomplete .venv directory', exc_info=True)
return results.status
if arguments.pipenv_install:
return os.EX_OK
if arguments.pipenv_update:
logger.info('updating the pipenv environment')
results = _run_pipenv(('--site-packages', 'update'), cwd=target_directory)
if results.status:
logger.error('failed to update the pipenv environment')
return results.status
logger.info('the pipenv environment has been updated')
return os.EX_OK
logger.debug('pipenv Pipfile: {}'.format(os.environ['PIPENV_PIPFILE']))
# the blank arg being passed is required for pipenv
passing_argv = [' ', 'run', entry_point] + sys_argv
os.execve(pipenv_path, passing_argv, os.environ)
def run_process(process_args, cwd=None, encoding='utf-8'):
"""
Start a process, wait for it to complete and return a
:py:class:`~.ProcessResults` object.
:param process_args: The arguments for the processes including the binary.
:param cwd: An optional current working directory to use for the process.
:param str encoding: The encoding to use for strings.
:return: The results of the process including the status code and any text
printed to stdout or stderr.
:rtype: :py:class:`~.ProcessResults`
"""
cwd = cwd or os.getcwd()
process_handle = subprocess.Popen(process_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
process_handle.wait()
results = ProcessResults(
process_handle.stdout.read().decode(encoding),
process_handle.stderr.read().decode(encoding),
process_handle.returncode
)
return results
def which(program):
"""
Examine the ``PATH`` environment variable to determine the location for the
specified program. If it can not be found None is returned. This is
fundamentally similar to the Unix utility of the same name.
:param str program: The name of the program to search for.
:return: The absolute path to the program if found.
:rtype: str
"""
is_exe = lambda fpath: (os.path.isfile(fpath) and os.access(fpath, os.X_OK))
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
if is_exe(program):
return os.path.abspath(program)
return None
def argp_add_default_args(parser, default_root=''):
"""
Add standard arguments to a new :py:class:`argparse.ArgumentParser`
instance. Used to add the utilities argparse options to the wrapper for
display.
:param parser: The parser to add arguments to.
:type parser: :py:class:`argparse.ArgumentParser`
:param str default_root: The default root logger to specify.
"""
parser.add_argument('-v', '--version', action='version', version=parser.prog + ' Version: ' + version.version)
parser.add_argument('-L', '--log', dest='loglvl', choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'FATAL'), help='set the logging level')
parser.add_argument('--logger', default=default_root, help='specify the root logger')
gc_group = parser.add_argument_group('garbage collector options')
gc_group.add_argument('--gc-debug-leak', action='store_const', const=gc.DEBUG_LEAK, default=0, help='set the DEBUG_LEAK flag')
gc_group.add_argument('--gc-debug-stats', action='store_const', const=gc.DEBUG_STATS, default=0, help='set the DEBUG_STATS flag')
return parser
def argp_add_client(parser):
"""
Add client-specific arguments to a new :py:class:`argparse.ArgumentParser`
instance.
:param parser: The parser to add arguments to.
:type parser: :py:class:`argparse.ArgumentParser`
"""
kpc_group = parser.add_argument_group('client specific options')
kpc_group.add_argument('-c', '--config', dest='config_file', required=False, help='specify a configuration file to use')
kpc_group.add_argument('--no-plugins', dest='use_plugins', default=True, action='store_false', help='disable all plugins')
kpc_group.add_argument('--no-style', dest='use_style', default=True, action='store_false', help='disable interface styling')
return parser
def argp_add_server(parser):
"""
Add server-specific arguments to a new :py:class:`argparse.ArgumentParser`
instance.
:param parser: The parser to add arguments to.
:type parser: :py:class:`argparse.ArgumentParser`
"""
kps_group = parser.add_argument_group('server specific options')
kps_group.add_argument('-f', '--foreground', dest='foreground', action='store_true', default=False, help='run in the foreground (do not fork)')
kps_group.add_argument('--update-geoip-db', dest='update_geoip_db', action='store_true', default=False, help='update the geoip database and exit')
kps_group.add_argument('--verify-config', dest='verify_config', action='store_true', default=False, help='verify the configuration and exit')
kps_group.add_argument('config_file', action='store', help='configuration file to use')
return parser
|
the-stack_0_1491 | import media
import fresh_tomatoes
import httplib # Used in Python 2
conn = httplib.HTTPSConnection("api.themoviedb.org")
payload = "{}"
# Movie IDs from themoviedb.org
movie_id = ["246655", "2080", "49538", "127585", "246655", "263115"]
release_date = []
# for loop to find Release date for each movie_id
for moviedb in movie_id:
conn.request("GET", "/3/movie/" + moviedb + "/release_dates?api_key=c744541ef0c2fd5ff5ccba678f100347", payload) # noqa
res = conn.getresponse()
data = res.read()
j = data.decode("utf-8").find("US") # find position of US release date info
US = data.decode("utf-8")[j:]
i = US.find("T00")
release_date.append(US[i-10:i])
# Creates instances of the Movie class for each movie
xmen = media.Movie("X-Men",
"https://upload.wikimedia.org/wikipedia/en/8/81/X-MenfilmPoster.jpg", # noqa
"https://www.youtube.com/watch?v=nbNcULQFojc",
release_date[0])
xmen_origins_wolverine = media.Movie("X-Men Origins: Wolverine",
"https://upload.wikimedia.org/wikipedia/en/e/ec/X-Men_Origins_Wolverine.jpg", # noqa
"https://www.youtube.com/watch?v=toLpchTUYk8", # noqa
release_date[1])
xmen_first_class = media.Movie("X-Men: First Class",
"https://upload.wikimedia.org/wikipedia/en/5/55/X-MenFirstClassMoviePoster.jpg", #noqa
"https://www.youtube.com/watch?v=UrbHykKUfTM",
release_date[2])
xmen_days_of_future_past = media.Movie("X-Men: Days of Future Past",
"https://upload.wikimedia.org/wikipedia/en/0/0c/X-Men_Days_of_Future_Past_poster.jpg", # noqa
"https://www.youtube.com/watch?v=pK2zYHWDZKo", # noqa
release_date[3])
xmen_apocalypse = media.Movie("X-Men: Apocalypse",
"https://upload.wikimedia.org/wikipedia/en/0/04/X-Men_-_Apocalypse.jpg", # noqa
"https://www.youtube.com/watch?v=Jer8XjMrUB4&spfreload=10", # noqa
release_date[4])
logan = media.Movie("Logan",
"https://upload.wikimedia.org/wikipedia/en/3/37/Logan_2017_poster.jpg", # noqa
"https://www.youtube.com/watch?v=Div0iP65aZo",
release_date[5])
# Creates a movies array to be accessed from fresh_tomatoes.py
movies = [xmen, xmen_origins_wolverine, xmen_first_class,
xmen_days_of_future_past, xmen_apocalypse, logan]
# Creates a HTML page by passing in movies
fresh_tomatoes.open_movies_page(movies)
|
the-stack_0_1492 | import copy
from collections import OrderedDict
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
from ..utils import DeprecationHelper, EasyDict, classproperty
__all__ = ['Space', 'NestedSpace', 'AutoGluonObject', 'List', 'Dict',
'Categorical', 'Choice', 'Real', 'Int', 'Bool']
class Space(object):
"""Basic search space describing set of possible values for hyperparameter.
"""
pass
class SimpleSpace(Space):
"""Non-nested search space (i.e. corresponds to a single simple hyperparameter).
"""
def __repr__(self):
reprstr = self.__class__.__name__
if hasattr(self, 'lower') and hasattr(self, 'upper'):
reprstr += ': lower={}, upper={}'.format(self.lower, self.upper)
if hasattr(self, 'value'):
reprstr += ': value={}'.format(self.value)
return reprstr
def get_hp(self, name):
"""Fetch particular hyperparameter based on its name.
"""
raise NotImplementedError
@property
def hp(self):
""" Return hyperparameter corresponding to this search space.
"""
return self.get_hp(name='')
@property
def default(self):
"""Return default value of hyperparameter corresponding to this search space.
"""
default = self._default if self._default else self.hp.default_value
return default
@default.setter
def default(self, value):
"""Set default value for hyperparameter corresponding to this search space.
"""
self._default = value
@property
def rand(self):
"""Return randomly sampled (but valid) value from this search space.
"""
cs = CS.ConfigurationSpace()
cs.add_hyperparameter(self.hp)
return cs.sample_configuration().get_dictionary()['']
class NestedSpace(Space):
"""Nested hyperparameter search space, which is a search space that itself contains multiple search spaces.
"""
def sample(self, **config):
"""Sample a configuration from this search space.
"""
pass
@property
def cs(self):
""" ConfigSpace representation of this search space.
"""
raise NotImplementedError
@property
def kwspaces(self):
""" OrderedDict representation of this search space.
"""
raise NotImplementedError
@property
def default(self):
"""Return default value for hyperparameter corresponding to this search space.
"""
config = self.cs.get_default_configuration().get_dictionary()
return self.sample(**config)
@property
def rand(self):
"""Randomly sample configuration from this nested search space.
"""
config = self.cs.sample_configuration().get_dictionary()
return self.sample(**config)
class AutoGluonObject(NestedSpace):
r"""Searchable objects,
created by decorating a custom Python class or function using the
:func:`autogluon.obj` or :func:`autogluon.func` decorators.
"""
def __call__(self, *args, **kwargs):
"""Convenience method for interacting with AutoGluonObject.
"""
if not self._inited:
self._inited = True
self._instance = self.init()
return self._instance.__call__(*args, **kwargs)
def init(self):
"""Instantiate an actual instance of this `AutoGluonObject`.
In order to interact with such an `object`, you must always first call: `object.init()`.
"""
config = self.cs.get_default_configuration().get_dictionary()
return self.sample(**config)
@property
def cs(self):
""" ConfigSpace representation of this search space.
"""
cs = CS.ConfigurationSpace()
for k, v in self.kwvars.items():
if isinstance(v, NestedSpace):
_add_cs(cs, v.cs, k)
elif isinstance(v, Space):
hp = v.get_hp(name=k)
_add_hp(cs, hp)
else:
_rm_hp(cs, k)
return cs
@classproperty
def kwspaces(cls):
""" OrderedDict representation of this search space.
"""
return cls.__init__.kwspaces
def sample(self):
"""Sample a configuration from this search space.
"""
raise NotImplementedError
def __repr__(self):
return 'AutoGluonObject'
class List(NestedSpace):
r"""Nested search space corresponding to an ordered list of hyperparameters.
Parameters
----------
args : list
a list of search spaces.
Examples
--------
>>> sequence = ag.List(
>>> ag.space.Categorical('conv3x3', 'conv5x5', 'conv7x7'),
>>> ag.space.Categorical('BatchNorm', 'InstanceNorm'),
>>> ag.space.Categorical('relu', 'sigmoid'),
>>> )
"""
def __init__(self, *args):
self.data = [*args]
def __iter__(self):
for elem in self.data:
yield elem
def __getitem__(self, index):
return self.data[index]
def __setitem__(self, index, data):
self.data[index] = data
def __len__(self):
return len(self.data)
def __getstate__(self):
return self.data
def __setstate__(self, d):
self.data = d
def __getattribute__(self, s):
try:
x = super(List, self).__getattribute__(s)
except AttributeError:
pass
else:
return x
x = self.data.__getattribute__(s)
return x
def sample(self, **config):
"""Sample a configuration from this search space.
"""
ret = []
kwspaces = self.kwspaces
striped_keys = [k.split('.')[0] for k in config.keys()]
for idx, obj in enumerate(self.data):
if isinstance(obj, NestedSpace):
sub_config = _strip_config_space(config, prefix=str(idx))
ret.append(obj.sample(**sub_config))
elif isinstance(obj, SimpleSpace):
ret.append(config[str(idx)])
else:
ret.append(obj)
return ret
@property
def cs(self):
""" ConfigSpace representation of this search space.
"""
cs = CS.ConfigurationSpace()
for k, v in enumerate(self.data):
if isinstance(v, NestedSpace):
_add_cs(cs, v.cs, str(k))
elif isinstance(v, Space):
hp = v.get_hp(name=str(k))
_add_hp(cs, hp)
return cs
@property
def kwspaces(self):
""" OrderedDict representation of this search space.
"""
kw_spaces = OrderedDict()
for idx, obj in enumerate(self.data):
k = str(idx)
if isinstance(obj, NestedSpace):
kw_spaces[k] = obj
for sub_k, sub_v in obj.kwspaces.items():
new_k = '{}.{}'.format(k, sub_k)
kw_spaces[new_k] = sub_v
elif isinstance(obj, Space):
kw_spaces[k] = obj
return kw_spaces
def __repr__(self):
reprstr = self.__class__.__name__ + str(self.data)
return reprstr
class Dict(NestedSpace):
"""Nested search space for dictionary containing multiple hyperparameters.
Examples
--------
>>> g = ag.space.Dict(
>>> hyperparam1 = ag.space.Categorical('alpha', 'beta'),
>>> hyperparam2 = ag.space.Int(0, 3)
>>> )
>>> print(g)
"""
def __init__(self, **kwargs):
self.data = EasyDict(kwargs)
def __getattribute__(self, s):
try:
x = super(Dict, self).__getattribute__(s)
except AttributeError:
pass
else:
return x
x = self.data.__getattribute__(s)
return x
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, data):
self.data[key] = data
def __getstate__(self):
return self.data
def __setstate__(self, d):
self.data = d
@property
def cs(self):
""" ConfigSpace representation of this search space.
"""
cs = CS.ConfigurationSpace()
for k, v in self.data.items():
if isinstance(v, NestedSpace):
_add_cs(cs, v.cs, k)
elif isinstance(v, Space):
hp = v.get_hp(name=k)
_add_hp(cs, hp)
return cs
@property
def kwspaces(self):
""" OrderedDict representation of this search space.
"""
kw_spaces = OrderedDict()
for k, obj in self.data.items():
if isinstance(obj, NestedSpace):
kw_spaces[k] = obj
for sub_k, sub_v in obj.kwspaces.items():
new_k = '{}.{}'.format(k, sub_k)
kw_spaces[new_k] = sub_v
kw_spaces[new_k] = sub_v
elif isinstance(obj, Space):
kw_spaces[k] = obj
return kw_spaces
def sample(self, **config):
"""Sample a configuration from this search space.
"""
ret = {}
ret.update(self.data)
kwspaces = self.kwspaces
kwspaces.update(config)
striped_keys = [k.split('.')[0] for k in config.keys()]
for k, v in kwspaces.items():
if k in striped_keys:
if isinstance(v, NestedSpace):
sub_config = _strip_config_space(config, prefix=k)
ret[k] = v.sample(**sub_config)
else:
ret[k] = v
return ret
def __repr__(self):
reprstr = self.__class__.__name__ + str(self.data)
return reprstr
class Categorical(NestedSpace):
"""Nested search space for hyperparameters which are categorical. Such a hyperparameter takes one value out of the discrete set of provided options.
Parameters
----------
data : Space or python built-in objects
the choice candidates
Examples
--------
a = ag.space.Categorical('a', 'b', 'c', 'd')
b = ag.space.Categorical('resnet50', autogluon_obj())
"""
def __init__(self, *data):
self.data = [*data]
def __iter__(self):
for elem in self.data:
yield elem
def __getitem__(self, index):
return self.data[index]
def __setitem__(self, index, data):
self.data[index] = data
def __len__(self):
return len(self.data)
@property
def cs(self):
""" ConfigSpace representation of this search space.
"""
cs = CS.ConfigurationSpace()
if len(self.data) == 0:
return CS.ConfigurationSpace()
hp = CSH.CategoricalHyperparameter(name='choice', choices=range(len(self.data)))
_add_hp(cs, hp)
for i, v in enumerate(self.data):
if isinstance(v, NestedSpace):
_add_cs(cs, v.cs, str(i))
return cs
def sample(self, **config):
"""Sample a configuration from this search space.
"""
choice = config.pop('choice')
if isinstance(self.data[choice], NestedSpace):
# nested space: Categorical of AutoGluonobjects
min_config = _strip_config_space(config, prefix=str(choice))
return self.data[choice].sample(**min_config)
else:
return self.data[choice]
@property
def kwspaces(self):
"""OrderedDict representation of this search space.
"""
kw_spaces = OrderedDict()
for idx, obj in enumerate(self.data):
if isinstance(obj, NestedSpace):
for sub_k, sub_v in obj.kwspaces.items():
new_k = '{}.{}'.format(idx, sub_k)
kw_spaces[new_k] = sub_v
return kw_spaces
def __repr__(self):
reprstr = self.__class__.__name__ + str(self.data)
return reprstr
Choice = DeprecationHelper(Categorical, 'Choice')
class Real(SimpleSpace):
"""Search space for numeric hyperparameter that takes continuous values.
Parameters
----------
lower : float
the lower bound of the search space
upper : float
the upper bound of the search space
default : float (optional)
default value
log : (True/False)
Whether to search the values on a logarithmic rather than linear scale.
This is useful for numeric hyperparameters (such as learning rates) whose search space spans many orders of magnitude.
Examples
--------
>>> learning_rate = ag.Real(0.01, 0.1, log=True)
"""
def __init__(self, lower, upper, default=None, log=False):
self.lower = lower
self.upper = upper
self.log = log
self._default = default
def get_hp(self, name):
return CSH.UniformFloatHyperparameter(name=name, lower=self.lower, upper=self.upper,
default_value=self._default, log=self.log)
class Int(SimpleSpace):
"""Search space for numeric hyperparameter that takes integer values.
Parameters
----------
lower : int
The lower bound of the search space
upper : int
The upper bound of the search space
default : int (optional)
Default value
Examples
--------
>>> range = ag.space.Int(0, 100)
"""
def __init__(self, lower, upper, default=None):
self.lower = lower
self.upper = upper
self._default = default
def get_hp(self, name):
return CSH.UniformIntegerHyperparameter(name=name, lower=self.lower, upper=self.upper,
default_value=self._default)
class Bool(Int):
"""Search space for hyperparameter that is either True or False.
`ag.Bool()` serves as shorthand for: `ag.space.Categorical(True, False)`
Examples
--------
pretrained = ag.space.Bool()
"""
def __init__(self):
super(Bool, self).__init__(0, 1)
def _strip_config_space(config, prefix):
# filter out the config with the corresponding prefix
new_config = {}
for k, v in config.items():
if k.startswith(prefix):
new_config[k[len(prefix)+1:]] = v
return new_config
def _add_hp(cs, hp):
if hp.name in cs._hyperparameters:
cs._hyperparameters[hp.name] = hp
else:
cs.add_hyperparameter(hp)
def _add_cs(master_cs, sub_cs, prefix, delimiter='.', parent_hp=None):
new_parameters = []
for hp in sub_cs.get_hyperparameters():
new_parameter = copy.deepcopy(hp)
# Allow for an empty top-level parameter
if new_parameter.name == '':
new_parameter.name = prefix
elif not prefix == '':
new_parameter.name = "%s%s%s" % (prefix, '.', new_parameter.name)
new_parameters.append(new_parameter)
for hp in new_parameters:
_add_hp(master_cs, hp)
def _rm_hp(cs, k):
if k in cs._hyperparameters:
cs._hyperparameters.pop(k)
for hp in cs.get_hyperparameters():
if hp.name.startswith("%s."%(k)):
cs._hyperparameters.pop(hp.name)
|
the-stack_0_1493 | from datetime import datetime
import pytest
from .fixtures import create_plan, get_subscription # NOQA: F401
from .test_paddle import BadPaddleDataWarning, paddle_client # NOQA: F401
def test_list_subscription_users(paddle_client, get_subscription): # NOQA: F811,E501
subscription_users = paddle_client.list_subscription_users()
for subscription in subscription_users:
assert isinstance(subscription['subscription_id'], int)
assert isinstance(subscription['plan_id'], int)
assert isinstance(subscription['user_id'], int)
assert isinstance(subscription['user_email'], str)
assert isinstance(subscription['marketing_consent'], bool)
assert isinstance(subscription['update_url'], str)
assert isinstance(subscription['update_url'], str)
assert isinstance(subscription['state'], str)
assert isinstance(subscription['cancel_url'], str)
assert isinstance(subscription['signup_date'], str)
datetime.strptime(subscription['signup_date'], '%Y-%m-%d %H:%M:%S')
assert isinstance(subscription['last_payment'], dict)
assert isinstance(subscription['payment_information'], dict)
assert isinstance(subscription['linked_subscriptions'], list)
def test_list_subscription_users_with_subscription_id(paddle_client, get_subscription): # NOQA: F811,E501
subscription_id = get_subscription['subscription_id']
subscription_users = paddle_client.list_subscription_users(
subscription_id=subscription_id,
)
for subscription in subscription_users:
assert subscription['subscription_id'] == subscription_id
def test_list_subscription_users_with_plan_id(paddle_client, get_subscription): # NOQA: F811,E501
plan_id = get_subscription['plan_id']
subscription_users = paddle_client.list_subscription_users(plan_id=plan_id)
for subscription in subscription_users:
assert subscription['plan_id'] == plan_id
def test_list_subscription_users_with_state(paddle_client, get_subscription): # NOQA: F811,E501
state = get_subscription['state']
subscription_users = paddle_client.list_subscription_users(state=state)
for subscription in subscription_users:
assert subscription['state'] == state
def test_list_subscription_users_with_page(paddle_client, get_subscription): # NOQA: F811,E501
list_one = paddle_client.list_subscription_users(
results_per_page=1, page=1,
)
list_two = paddle_client.list_subscription_users(
results_per_page=1, page=2,
)
assert list_one != list_two
def test_list_subscription_users_with_results_per_page(paddle_client, get_subscription): # NOQA: F811,E501
list_one = paddle_client.list_subscription_users(
results_per_page=1, page=1,
)
assert len(list_one) == 1
def test_list_subscription_users_invalid_state(paddle_client): # NOQA: F811
with pytest.raises(ValueError) as error:
paddle_client.list_subscription_users(state='test')
error.match('state must be one of active, past due, trialling, paused')
def test_update_subscription(paddle_client, get_subscription): # NOQA: F811
"""
If you get the error:
Unable to find subscription with id 1
You will need to manually enter a subscription_id below.
(this is why it's mocked in the first place, it's a pain sorry)
"""
subscription_id = get_subscription['subscription_id']
# Can't udate passthrough (least destructive) as 'list_subscription_users'
# does not return it in the response
started_at_paused = 'paused_at' in get_subscription
pause = not started_at_paused
response = paddle_client.update_subscription(
subscription_id=subscription_id,
pause=pause,
)
assert response['subscription_id'] == subscription_id
assert isinstance(response['user_id'], int)
assert isinstance(response['plan_id'], int)
assert isinstance(response['next_payment'], dict)
new_subscription_data = paddle_client.list_subscription_users(
subscription_id=subscription_id,
)
new_subscription_data = new_subscription_data[0]
if started_at_paused:
assert 'paused_at' not in new_subscription_data
assert 'paused_from' not in new_subscription_data
assert 'paused_reason' not in new_subscription_data
else:
assert isinstance(new_subscription_data['paused_at'], str)
datetime.strptime(new_subscription_data['paused_at'], '%Y-%m-%d %H:%M:%S') # NOQA: E501
assert isinstance(new_subscription_data['paused_from'], str)
datetime.strptime(new_subscription_data['paused_from'], '%Y-%m-%d %H:%M:%S') # NOQA: E501
assert new_subscription_data['paused_reason'] == 'voluntary'
# Set the pause state back to what is was before the test ran
paddle_client.update_subscription(
subscription_id=subscription_id,
pause=not pause,
)
# Test the change back worked
new_subscription_data = paddle_client.list_subscription_users(
subscription_id=subscription_id,
)
new_subscription_data = new_subscription_data[0]
if started_at_paused:
assert isinstance(new_subscription_data['paused_at'], str)
datetime.strptime(new_subscription_data['paused_at'], '%Y-%m-%d %H:%M:%S') # NOQA: E501
assert isinstance(new_subscription_data['paused_from'], str)
datetime.strptime(new_subscription_data['paused_from'], '%Y-%m-%d %H:%M:%S') # NOQA: E501
assert new_subscription_data['paused_reason'] == 'voluntary'
else:
assert 'paused_at' not in new_subscription_data
assert 'paused_from' not in new_subscription_data
assert 'paused_reason' not in new_subscription_data
def test_update_subscription_invalid_currency(paddle_client): # NOQA: F811
with pytest.raises(ValueError) as error:
paddle_client.update_subscription(
subscription_id=1, currency='test'
)
error.match('currency must be one of USD, GBP, EUR')
@pytest.mark.mocked
def test_cancel_subscription(mocker, paddle_client): # NOQA: F811
"""
This test is mocked as subscriptions must be created manually (see
`Creating a subscription` in CONTRIBUTING.md) as there is no API
to do so
If this test fails it means a change has been made which has affected
the cancel subscription endpoint.
The code now needs to be run directly against Paddle's API at least once to
ensure the new code is working as expected.
Please uncomment the '@pytest.mark.skip()' line for the
'cancel_subscription_no_mock' test to run the the cancel_subscription code
against the Paddle API to check the changes work.
Once the `cancel_subscription_no_mock` test passes please update
the mock below and comment out the function again.
"""
subscription_id = 123
json = {
'subscription_id': subscription_id,
'vendor_id': paddle_client.vendor_id,
'vendor_auth_code': paddle_client.api_key,
}
url = 'https://sandbox-vendors.paddle.com/api/2.0/subscription/users_cancel' # NOQA: E501
method = 'POST'
request = mocker.patch('paddle.paddle.requests.request')
paddle_client.cancel_subscription(
subscription_id=subscription_id,
)
request.assert_called_once_with(url=url, json=json, method=method)
# Comment out '@pytest.mark.skip()' to ensure the cancel_subscription
# code is working as expected
@pytest.mark.skip()
def test_cancel_subscription_no_mock(paddle_client, get_subscription): # NOQA: F811,E501
subscription_id = get_subscription
response = paddle_client.cancel_subscription(
subscription_id=subscription_id,
)
assert response is True
|
the-stack_0_1494 | import numpy as np
from scipy.linalg import logm
""" UTILITY FUNCTIONS """
def hat(w):
""" Function takes in a vector of size 3 and returns
its corresponding skew-symmetric matrix """
w1 = w[0]
w2 = w[1]
w3 = w[2]
what = np.array( [ [0,-w3,w2], [w3,0,-w1], [-w2,w1,0] ] )
return what
def unhat(what):
""" Function takes in a skew-symmetric matrix and returns
its corresponding vector """
w1 = what[2,1]
w2 = what[0,2]
w3 = what[1,0]
w = np.array( (w1,w2,w3) )
return w
def qmult(q1,q2):
""" Function takes in quaternions q1 and q2, and performs
quaternion multiplication: q3 = q1*q2 """
v1 = q1[0:3]
s1 = q1[3]
q3 = np.block([ [s1*np.identity(3) + hat(v1), v1[:,np.newaxis] ], [-v1, s1] ]) @ q2
return q3
def qconj(q):
""" Function takes in a quaternion and returns its conjugate """
v = q[0:3]
v = -v
qplus = np.concatenate((v,q[3,np.newaxis]),axis=0)
return qplus
def phi_to_quat(phi):
""" Function takes in a rotation parameterized by
Euler Axis & Angle and returns its corresponding quaternion """
if np.linalg.norm(phi) > 10*np.pi/180:
theta = np.linalg.norm(phi)
r = phi/theta
qvec = r*np.sin(theta/2)
qsca = np.array(np.cos(theta/2))
q = np.hstack((qvec,qsca))
else:
qvec = phi/2
qsca = np.array(1-1/8*np.dot(phi,phi))
q = np.hstack((qvec,qsca))
return q
def quat_to_phi(q):
""" Function takes in a rotation parameterized by
a quaternion and returns its corresponding Euler Axis & Angle """
Q = quat_to_rot(q)
phi = unhat(logm(Q))
return phi
def quat_to_rot(q):
""" Function takes in a rotation parameterized by
a quaternion and returns its corresponding rotation matrix """
v = q[0:3]
s = q[3]
A = np.identity(3) + 2*hat(v) @ (s*np.identity(3) + hat(v))
#A = np.transpose(A)
return A
""" Below is another way to convert from quaternion to rotation matrix
def quat_to_rot(q):
q1 = q[0]
q2 = q[1]
q3 = q[2]
q4 = q[3]
Q = np.array( [ [0,-q3,+q2], [+q3,0,-q1], [-q2,q1,0] ] )
A = (q4**2 - (q1**2+q2**2+q3**2))*np.identity(3) + 2*np.outer(np.array([q1,q2,q3]), np.array([q1,q2,q3])) - 2*q4 * Q
return A
""" |
the-stack_0_1495 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from job_spider.process import SpiderProcess, WriterProcess
from multiprocessing import Queue
import time
def main():
queue = Queue()
p1 = SpiderProcess(queue)
p2 = WriterProcess(queue)
p1.start()
p2.start()
while p2.is_alive():
if not p1.is_alive():
p1 = SpiderProcess(queue)
p1.start()
time.sleep(1)
p1.terminate()
p2.terminate()
if __name__ == '__main__':
main()
|
the-stack_0_1496 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.bigquery.v2",
manifest={"StandardSqlDataType", "StandardSqlField", "StandardSqlStructType",},
)
class StandardSqlDataType(proto.Message):
r"""The type of a variable, e.g., a function argument. Examples: INT64:
{type_kind="INT64"} ARRAY: {type_kind="ARRAY",
array_element_type="STRING"} STRUCT<x STRING, y ARRAY>:
{type_kind="STRUCT", struct_type={fields=[ {name="x",
type={type_kind="STRING"}}, {name="y", type={type_kind="ARRAY",
array_element_type="DATE"}} ]}}
Attributes:
type_kind (~.standard_sql.StandardSqlDataType.TypeKind):
Required. The top level type of this field.
Can be any standard SQL data type (e.g.,
"INT64", "DATE", "ARRAY").
array_element_type (~.standard_sql.StandardSqlDataType):
The type of the array's elements, if type_kind = "ARRAY".
struct_type (~.standard_sql.StandardSqlStructType):
The fields of this struct, in order, if type_kind =
"STRUCT".
"""
class TypeKind(proto.Enum):
r""""""
TYPE_KIND_UNSPECIFIED = 0
INT64 = 2
BOOL = 5
FLOAT64 = 7
STRING = 8
BYTES = 9
TIMESTAMP = 19
DATE = 10
TIME = 20
DATETIME = 21
GEOGRAPHY = 22
NUMERIC = 23
BIGNUMERIC = 24
ARRAY = 16
STRUCT = 17
type_kind = proto.Field(proto.ENUM, number=1, enum=TypeKind,)
array_element_type = proto.Field(
proto.MESSAGE, number=2, oneof="sub_type", message="StandardSqlDataType",
)
struct_type = proto.Field(
proto.MESSAGE, number=3, oneof="sub_type", message="StandardSqlStructType",
)
class StandardSqlField(proto.Message):
r"""A field or a column.
Attributes:
name (str):
Optional. The name of this field. Can be
absent for struct fields.
type (~.standard_sql.StandardSqlDataType):
Optional. The type of this parameter. Absent
if not explicitly specified (e.g., CREATE
FUNCTION statement can omit the return type; in
this case the output parameter does not have
this "type" field).
"""
name = proto.Field(proto.STRING, number=1)
type = proto.Field(proto.MESSAGE, number=2, message=StandardSqlDataType,)
class StandardSqlStructType(proto.Message):
r"""
Attributes:
fields (Sequence[~.standard_sql.StandardSqlField]):
"""
fields = proto.RepeatedField(proto.MESSAGE, number=1, message=StandardSqlField,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
the-stack_0_1498 | # Time: O(n)
# Space: O(n)
class Solution(object):
def maxSumOfThreeSubarrays(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
n = len(nums)
accu = [0]
for num in nums:
accu.append(accu[-1]+num)
left_pos = [0] * n
total = accu[k]-accu[0]
for i in xrange(k, n):
if accu[i+1]-accu[i+1-k] > total:
left_pos[i] = i+1-k
total = accu[i+1]-accu[i+1-k]
else:
left_pos[i] = left_pos[i-1]
right_pos = [n-k] * n
total = accu[n]-accu[n-k]
for i in reversed(xrange(n-k)):
if accu[i+k]-accu[i] > total:
right_pos[i] = i
total = accu[i+k]-accu[i]
else:
right_pos[i] = right_pos[i+1]
result, max_sum = [], 0
for i in xrange(k, n-2*k+1):
left, right = left_pos[i-1], right_pos[i+k]
total = (accu[i+k]-accu[i]) + \
(accu[left+k]-accu[left]) + \
(accu[right+k]-accu[right])
if total > max_sum:
max_sum = total
result = [left, i, right]
return result
|
the-stack_0_1500 | # -*- coding: utf-8 -*-
# This sample demonstrates handling intents from an Alexa skill using the Alexa Skills Kit SDK for Python.
# Please visit https://alexa.design/cookbook for additional examples on implementing slots, dialog management,
# session persistence, api calls, and more.
# This sample is built using the handler classes approach in skill builder.
import logging
import ask_sdk_core.utils as ask_utils
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.dispatch_components import AbstractRequestHandler
from ask_sdk_core.dispatch_components import AbstractExceptionHandler
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model import Response
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class LaunchRequestHandler(AbstractRequestHandler):
"""Handler for Skill Launch."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("LaunchRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Welcome to EduToken, my name is Veronica. What can I do for you? "
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class CraftonCollegeIntentHandler(AbstractRequestHandler):
"""Handler for CraftonCollegeIntent. """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("CraftonCollegeIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "With its dedicated professors, ample extracurricular opportunities, supportive staff, and beautiful surroundings, Crafton Hills College is a place where students thrive. "
speak_output += "CHC offers more than 50 majors in the liberal arts and sciences, vocations, and technical studies, and currently serves about 4,500 students."
speak_output += "Professors are experts in their field, and are active in their professions outside of the classroom."
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class FallEnrollmentIntentHandler(AbstractRequestHandler):
"""Handler for FallEnrollmentIntent. """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("FallEnrollmentIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> response
speak_output = "Depending on your priority level, registration for the Fall 2021 term is open from May 10th through August 15th. "
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class FallStartIntentHandler(AbstractRequestHandler):
"""Handler for FallStartIntent. """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("FallStartIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> response
speak_output = "The Fall 2021 term commences on August 16th. "
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class FinancialAidAvailableIntentHandler(AbstractRequestHandler):
"""Handler for FinancialAidAvailableIntent. """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("FinancialAidAvailableIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> response
speak_output = "For the 2021 and 2022 academic year, financial aid applications opened on October 1st, 2020. "
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class FounderIntentHandler(AbstractRequestHandler):
"""Handler for FounderIntent. """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("FounderIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> response
speak_output = "EduToken was created by Aruna Bisht, Lucas Manning, and Aaron Montano at the UC Berkeley FinTech Bootcamp in June, 2021. "
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class JokeIntentHandler(AbstractRequestHandler):
"""Handler for JokeIntent. """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("JokeIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> response
speak_output = "I don't mean to brag about my financial skills...but my bank calls me every day to tell me that my debt is outstanding. "
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class OverviewIntentHandler(AbstractRequestHandler):
"""Handler for OverviewIntent. """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("OverviewIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> response
speak_output = "EduToken is a revolutionary system of redeeming and ensuring educational costs. "
speak_output += "Our goal is to ensure that all of your educational needs are properly accounted for."
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class SummerEnrollmentIntentHandler(AbstractRequestHandler):
"""Handler for SummerEnrollmentIntent. """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("SummerEnrollmentIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> response
speak_output = "Depending on your priority level, enrollment for the 2021 Summer term begins on April 12th, and ends on May 31st. "
speak_output += "Please visit the Crafton Hills College Admissions page to determine your priority level."
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class SummerSchoolStartIntentHandler(AbstractRequestHandler):
"""Handler for SummerSchoolStartIntent. """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("SummerSchoolStartIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> response
speak_output = "Summer classes begin June 1st, June 14th, and July 6th. "
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class HelpIntentHandler(AbstractRequestHandler):
"""Handler for Help Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("AMAZON.HelpIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "You can say hello to me! How can I help?"
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class CancelOrStopIntentHandler(AbstractRequestHandler):
"""Single handler for Cancel and Stop Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return (ask_utils.is_intent_name("AMAZON.CancelIntent")(handler_input) or
ask_utils.is_intent_name("AMAZON.StopIntent")(handler_input))
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Goodbye!"
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class FallbackIntentHandler(AbstractRequestHandler):
"""Single handler for Fallback Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("AMAZON.FallbackIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
logger.info("In FallbackIntentHandler")
speech = "Hmm, I'm not sure. You can say Hello or Help. What would you like to do?"
reprompt = "I didn't catch that. What can I help you with?"
return handler_input.response_builder.speak(speech).ask(reprompt).response
class SessionEndedRequestHandler(AbstractRequestHandler):
"""Handler for Session End."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("SessionEndedRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
# Any cleanup logic goes here.
return handler_input.response_builder.response
class IntentReflectorHandler(AbstractRequestHandler):
"""The intent reflector is used for interaction model testing and debugging.
It will simply repeat the intent the user said. You can create custom handlers
for your intents by defining them above, then also adding them to the request
handler chain below.
"""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("IntentRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
intent_name = ask_utils.get_intent_name(handler_input)
speak_output = "You just triggered " + intent_name + "."
return (
handler_input.response_builder
.speak(speak_output)
# .ask("add a reprompt if you want to keep the session open for the user to respond")
.response
)
class CatchAllExceptionHandler(AbstractExceptionHandler):
"""Generic error handling to capture any syntax or routing errors. If you receive an error
stating the request handler chain is not found, you have not implemented a handler for
the intent being invoked or included it in the skill builder below.
"""
def can_handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> bool
return True
def handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> Response
logger.error(exception, exc_info=True)
speak_output = "Sorry, I had trouble doing what you asked. Please try again."
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
# The SkillBuilder object acts as the entry point for your skill, routing all request and response
# payloads to the handlers above. Make sure any new handlers or interceptors you've
# defined are included below. The order matters - they're processed top to bottom.
sb = SkillBuilder()
sb.add_request_handler(LaunchRequestHandler())
sb.add_request_handler(CraftonCollegeIntentHandler())
sb.add_request_handler(FallEnrollmentIntentHandler())
sb.add_request_handler(FallStartIntentHandler())
sb.add_request_handler(FinancialAidAvailableIntentHandler())
sb.add_request_handler(FounderIntentHandler())
sb.add_request_handler(JokeIntentHandler())
sb.add_request_handler(OverviewIntentHandler())
sb.add_request_handler(SummerEnrollmentIntentHandler())
sb.add_request_handler(SummerSchoolStartIntentHandler())
sb.add_request_handler(HelpIntentHandler())
sb.add_request_handler(CancelOrStopIntentHandler())
sb.add_request_handler(FallbackIntentHandler())
sb.add_request_handler(SessionEndedRequestHandler())
sb.add_request_handler(IntentReflectorHandler()) # make sure IntentReflectorHandler is last so it doesn't override your custom intent handlers
sb.add_exception_handler(CatchAllExceptionHandler())
lambda_handler = sb.lambda_handler()
|
the-stack_0_1501 | import socket
import struct
send_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
data_bytes = struct.pack("!BBBB", 0, 0, 255, 255)
header = struct.pack("!BIIH", 0, 0, 0, len(data_bytes))
message = header + data_bytes
send_sock.sendto(message, ("localhost", 42000)) |
the-stack_0_1502 | # coding: utf-8
import pprint
import re
import six
class DeleteBatchTaskFileRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'file_id': 'str'
}
attribute_map = {
'instance_id': 'Instance-Id',
'file_id': 'file_id'
}
def __init__(self, instance_id=None, file_id=None):
"""DeleteBatchTaskFileRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self._file_id = None
self.discriminator = None
if instance_id is not None:
self.instance_id = instance_id
self.file_id = file_id
@property
def instance_id(self):
"""Gets the instance_id of this DeleteBatchTaskFileRequest.
:return: The instance_id of this DeleteBatchTaskFileRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this DeleteBatchTaskFileRequest.
:param instance_id: The instance_id of this DeleteBatchTaskFileRequest.
:type: str
"""
self._instance_id = instance_id
@property
def file_id(self):
"""Gets the file_id of this DeleteBatchTaskFileRequest.
:return: The file_id of this DeleteBatchTaskFileRequest.
:rtype: str
"""
return self._file_id
@file_id.setter
def file_id(self, file_id):
"""Sets the file_id of this DeleteBatchTaskFileRequest.
:param file_id: The file_id of this DeleteBatchTaskFileRequest.
:type: str
"""
self._file_id = file_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteBatchTaskFileRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_1503 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 1.0.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# %autosave 0
# %load_ext autoreload
# %autoreload 2
import os
import logging
import numpy as np
import pandas as pd
import pkg_resources
import seaborn as sns
import matplotlib.pyplot as plt
from math import sqrt
from GIPlot import GIPlot
from crispy.Utils import Utils
from crispy.QCPlot import QCplot
from scipy.stats import spearmanr, skew
from minlib.Utils import density_interpolate
from sklearn.metrics import mean_squared_error
from minlib.Utils import project_score_sample_map
from crispy.CRISPRData import CRISPRDataSet, Library
from crispy.LibRepresentationReport import LibraryRepresentaion
LOG = logging.getLogger("Crispy")
DPATH = pkg_resources.resource_filename("crispy", "data/")
RPATH = pkg_resources.resource_filename("notebooks", "minlib/reports/")
# MinLibCas9 library information
#
mlib = Library.load_library("MinLibCas9.csv.gz", set_index=False)
mlib.index = [f"{i}" for i in mlib["WGE_ID"]]
mlib["sgRNA"] = [s if len(s) == 19 else s[1:-3] for s in mlib["WGE_Sequence"]]
# Assemble raw counts matrix
#
SPATH = pkg_resources.resource_filename("notebooks", "minlib/minlibcas9_screens")
plasmid_counts = pd.read_csv(f"{SPATH}/Minimal_library_output_108.csv", index_col=0).rename(columns=dict(counts="MinLibCas9"))
#
#
lib_report = LibraryRepresentaion(plasmid_counts[["MinLibCas9"]])
pal = dict(MHG_library_v1=QCplot.PAL_DBGD[0], MinLibCas9=QCplot.PAL_DBGD[1])
# Lorenz curves#
lib_report.lorenz_curve(palette=pal)
plt.gcf().set_size_inches(2., 2.)
plt.savefig(f"{RPATH}/librepresentation_lorenz_curve.pdf", bbox_inches="tight", dpi=600)
plt.close("all")
# Lorenz curves#
plot_df = plasmid_counts["MinLibCas9"].sort_values().reset_index()
skew_ratio = plot_df["MinLibCas9"].quantile([.9, .1])
skew_ratio = skew_ratio[.9] / skew_ratio[.1]
fig, ax = plt.subplots(1, 1, figsize=(2.5, 1.5), dpi=600)
ax.plot(
plot_df.index,
plot_df["MinLibCas9"],
color=pal["MinLibCas9"],
# edgecolor="w",
lw=1,
# s=6,
alpha=.8,
zorder=3,
)
ax.set_xlabel("Ranked sgRNAs")
ax.set_ylabel("Number of reads")
ax.set_xticks([0, plot_df.shape[0] / 2, plot_df.shape[0]])
ax.grid(True, ls="-", lw=0.1, alpha=1.0, zorder=0, axis="y")
annot_text = f"Skew ratio = {skew_ratio:.2f}"
ax.text(
0.95,
0.05,
annot_text,
fontsize=6,
transform=ax.transAxes,
ha="right",
)
plt.savefig(f"{RPATH}/librepresentation_scatter.pdf", bbox_inches="tight", dpi=600)
plt.close("all")
|
the-stack_0_1505 | import torch
import torch.nn as nn
from mmcv.cnn.utils.weight_init import xavier_init
from mmedit.models.registry import COMPONENTS
@COMPONENTS.register_module()
class PlainRefiner(nn.Module):
"""Simple refiner from Deep Image Matting.
Args:
conv_channels (int): Number of channels produced by the three main
convolutional layer.
loss_refine (dict): Config of the loss of the refiner. Default: None.
pretrained (str): Name of pretrained model. Default: None.
"""
def __init__(self, conv_channels=64, pretrained=None):
super(PlainRefiner, self).__init__()
self.refine_conv1 = nn.Conv2d(
4, conv_channels, kernel_size=3, padding=1)
self.refine_conv2 = nn.Conv2d(
conv_channels, conv_channels, kernel_size=3, padding=1)
self.refine_conv3 = nn.Conv2d(
conv_channels, conv_channels, kernel_size=3, padding=1)
self.refine_pred = nn.Conv2d(
conv_channels, 1, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m)
def forward(self, x, raw_alpha):
"""Forward function.
Args:
x (Tensor): The input feature map of refiner.
raw_alpha (Tensor): The raw predicted alpha matte.
Returns:
Tensor: The refined alpha matte.
"""
out = self.relu(self.refine_conv1(x))
out = self.relu(self.refine_conv2(out))
out = self.relu(self.refine_conv3(out))
raw_refine = self.refine_pred(out)
pred_refine = torch.sigmoid(raw_alpha + raw_refine)
return pred_refine
|
the-stack_0_1506 | import tensorflow as tf
def get_loss_and_keys(which, params, ff_means_only=False):
"""
Returns the loss function.
:param which: submodel, must be one of "flux_fractions" or "histograms"
:param params: parameter dictionary
:param ff_means_only: even if aleatoric uncertainties for flux fractions are enabled, only train the means
:return: loss function, list of keys required (apart from true label, which is always assumed to be first input)
"""
if which == "flux_fractions":
loss, loss_keys = get_loss_and_keys_flux_fractions(params.train["ff_loss"],
do_var=params.nn.ff["alea_var"] and not ff_means_only,
do_covar=params.nn.ff["alea_covar"] and not ff_means_only)
elif which == "histograms":
loss, loss_keys = get_loss_and_keys_histograms(params.train["hist_loss"],
smoothing_empl=params.train["hist_pinball_smoothing"])
else:
raise NotImplementedError
return loss, loss_keys
def get_loss_and_keys_flux_fractions(ff_loss_str, do_var=False, do_covar=False):
"""
Returns the loss function for the flux fraction estimation.
:param ff_loss_str: : string specifying histogram loss
:param do_var: estimate aleatoric variances?
:param do_covar: estimate aleatoric covariance matrix?
:return: loss function, list of keys required (apart from true label, which is always assumed to be first input)
"""
assert not (do_var and do_covar), "Either 'do_var' or 'do_covar' should be chosen, not both!"
if do_var or do_covar:
assert ff_loss_str.lower() in ["l2", "mse"], "For flux fraction uncertainty estimation choose 'l2' loss!"
if ff_loss_str.lower() in ["l2", "mse"]:
if do_covar:
loss = max_llh_loss_covar
loss_keys = ["ff_mean", "ff_covar"]
elif do_var:
loss = max_llh_loss_var
loss_keys = ["ff_mean", "ff_logvar"]
else:
loss = tf.keras.losses.mse
loss_keys = ["ff_mean"]
elif ff_loss_str.lower() in ["l1", "mae"]:
loss = tf.keras.losses.mae
loss_keys = ["ff_mean"]
elif ff_loss_str.lower() in ["x-ent", "x_ent"]:
loss = tf.keras.losses.categorical_crossentropy
loss_keys = ["ff_mean"]
else:
raise NotImplementedError
return loss, loss_keys
def get_loss_and_keys_histograms(hist_loss_str, smoothing_empl=None):
"""
Returns the loss function for the SCD histogram estimation.
:param hist_loss_str: string specifying histogram loss
:param smoothing_empl: scalar determining the smoothing for Earth Mover's Pinball loss
:return: loss function, list of keys required (apart from true label, which is always assumed to be first input)
"""
loss_keys = ["hist"]
if hist_loss_str.lower() in ["l2", "mse"]:
def loss(y_true, y_pred): return tf.reduce_mean(tf.keras.losses.mse(y_true, y_pred), 1) # avg. over channels
elif hist_loss_str.lower() in ["l1", "mae"]:
def loss(y_true, y_pred): return tf.reduce_mean(tf.keras.losses.mae(y_true, y_pred), 1) # avg. over channels
elif hist_loss_str.lower() in ["x-ent", "x_ent"]:
def loss(y_true, y_pred): return tf.reduce_mean(tf.keras.losses.categorical_crossentropy(y_true, y_pred), 1)
elif hist_loss_str.lower() in ["em1", "em_1"]:
def loss(y_true, y_pred): return emd_loss(y_true, y_pred, r=1)
elif hist_loss_str.lower() in ["em2", "em_2"]:
def loss(y_true, y_pred): return emd_loss(y_true, y_pred, r=2)
elif hist_loss_str.lower() == "cjs":
loss = cjs_loss
elif hist_loss_str.lower() == "empl":
def loss(y_true, y_pred, tau): return empl(y_true, y_pred, tau, smoothing=smoothing_empl)
loss_keys += ["tau"]
else:
raise NotImplementedError
return loss, loss_keys
############################
# FLUX FRACTION LOSSES
############################
def max_llh_loss_covar(y_true, y_pred, covar, eps=None):
"""
(Neg.) maximum likelihood loss function for a full Gaussian covariance matrix.
:param y_true: label
:param y_pred: prediction
:param covar: uncertainty covariance matrix
:param eps: small number for numerical stability, defaults to tf.keras.backend.epsilon()
:return: max. likelihood loss (up to a constant)
"""
if eps is None:
eps = tf.keras.backend.epsilon()
err = tf.expand_dims(y_pred - y_true, -1)
term1 = tf.squeeze(err * tf.linalg.matmul(tf.linalg.inv(covar), err), -1)
term2 = tf.math.log(eps + tf.linalg.det(covar))
max_llh_loss = (tf.reduce_sum(term1, 1) + term2) / 2.0
return max_llh_loss
def max_llh_loss_var(y_true, y_pred, logvar):
"""
(Neg.) maximum likelihood loss function for a diagonal Gaussian covariance matrix.
:param y_true: label
:param y_pred: prediction
:param logvar: uncertainty log-variances
:return: max. likelihood loss (up to a constant)
"""
err = y_pred - y_true
precision = tf.exp(-logvar)
term1 = err ** 2 * precision
term2 = logvar
max_llh_loss = tf.reduce_sum(term1 + term2, 1) / 2.0
return max_llh_loss
############################
# HISTOGRAM LOSSES
############################
def emd_loss(y_true, y_pred, r=2, weights=None, do_root=False):
"""
Computes the Earth Mover's Distance loss.
Hou, Le, Chen-Ping Yu, and Dimitris Samaras. "Squared Earth Mover's
Distance-based Loss for Training Deep Neural Networks." arXiv:1611.05916.
:param y_true: a 2-D (or 3-D) `Tensor` of the ground truth probability mass functions
:param y_pred: a 2-D (or 3-D) `Tensor` of the estimated p.m.f.-s
:param r: a constant for the r-norm.
:param weights: weight the loss differently for different samples
:param do_root: if True: raise result to the power of "1/r"
`y_true` and `y_pred` are assumed to have equal mass as
\sum^{N}_{i=1} {y_true}_i = \sum^{N}_{i=1} {y_pred}_i
:return: A 0-D `Tensor` with EMD loss.
"""
ecdf_true = tf.math.cumsum(y_true, axis=1)
ecdf_pred = tf.math.cumsum(y_pred, axis=1)
if weights is None:
weights = tf.ones_like(ecdf_true)
if len(weights.shape) < len(y_true.shape): # if bin-dimension is missing
weights = tf.expand_dims(weights, 1)
if r == 1:
emd = tf.reduce_mean(tf.abs(ecdf_true - ecdf_pred) * weights, axis=1)
elif r == 2:
emd = tf.reduce_mean((ecdf_true - ecdf_pred) ** 2 * weights, axis=1)
if do_root:
emd = tf.sqrt(emd)
else:
emd = tf.reduce_mean(tf.pow(tf.abs(ecdf_true - ecdf_pred) * weights, r), axis=1)
if do_root:
emd = tf.pow(emd, 1 / r)
return tf.reduce_mean(emd, 1) # average over channels
def cjs_loss(y_true, y_pred, eps=1e-10):
"""
Computes the symmetrical discrete cumulative Jensen-Shannon divergence from https://arxiv.org/pdf/1708.07089.pdf
:param y_true: labels
:param y_pred: prediction
:param eps: lower cutoff for logarithm (for numerical stability)
:return CJS loss
"""
cdf_true = tf.cumsum(y_true, axis=1)
cdf_pred = tf.cumsum(y_pred, axis=1)
def accjs(p_, q_):
# if p(i) = 0 then ACCJS(p, q)(i) = 0 since xlog(x) -> 0 as x-> 0
p_ = tf.clip_by_value(p_, eps, 1.0)
return 0.5 * tf.reduce_sum(p_ * tf.math.log(p_ / (0.5 * (p_ + q_))), axis=1)
loss = accjs(cdf_pred, cdf_true) + accjs(cdf_true, cdf_pred)
return tf.reduce_mean(loss, 1) # average over channels
def empl(y_true, y_pred, tau, weights=None, smoothing=0.0):
"""
Compute the Earth Mover's Pinball Loss (arXiv:2106.02051).
:param y_true: label
:param y_pred: prediction
:param tau: quantile levels of interest
:param weights: weight the loss differently for different samples
:param smoothing: scalar >= 0 that determines smoothing of loss function around 0
:return Earth Mover's Pinball Loss
"""
ecdf_true = tf.math.cumsum(y_true, axis=1)
ecdf_pred = tf.math.cumsum(y_pred, axis=1)
delta = ecdf_pred - ecdf_true
# If there is an extra dimension for the channel: tau might need to be expanded
if len(tau.shape) == 2 and len(delta.shape) == 3:
tau = tf.expand_dims(tau, 2)
# Non-smooth C0 loss (default)
if smoothing == 0.0:
mask = tf.cast(tf.greater_equal(delta, tf.zeros_like(delta)), tf.float32) - tau
loss = mask * delta
# Smooth loss
else:
loss = -tau * delta + smoothing * tf.math.softplus(delta / smoothing)
if weights is None:
weights = tf.ones_like(ecdf_true)
if len(weights.shape) < len(y_true.shape): # if bin-dimension is missing
weights = tf.expand_dims(weights, 1)
# avg. the weighted loss over the bins (1) and channel dimension (2)
return tf.reduce_mean(loss * weights, [1, 2])
|
the-stack_0_1507 | from dataclasses import dataclass
from datetime import datetime
from datetime import timezone
from typing import Any
from telliot_core.datasource import DataSource
from telliot_core.dtypes.datapoint import DataPoint
from telliot_feed_examples.utils.log import get_logger
logger = get_logger(__name__)
@dataclass
class DivaManualSource(DataSource[Any]):
"""DataSource for Diva Protocol manually-entered data."""
reference_asset: str = ""
timestamp: int = 0
def parse_user_val(self) -> float:
"""Parse historical price from user input."""
print(
"Enter price to report for reference asset "
f"{self.reference_asset} at timestamp {self.timestamp}:"
)
data = None
while data is None:
inpt = input()
try:
inpt = float(inpt) # type: ignore
except ValueError:
print("Invalid input. Enter decimal value (float).")
continue
print(f"Submitting value: {inpt}\nPress [ENTER] to confirm.")
_ = input()
data = inpt
return data
async def fetch_new_datapoint(self) -> DataPoint[float]:
"""Update current value with time-stamped value fetched from user input.
Returns:
Current time-stamped value
"""
data = self.parse_user_val()
dt = datetime.fromtimestamp(self.timestamp, tz=timezone.utc)
datapoint = (data, dt)
self.store_datapoint(datapoint)
logger.info(f"Stored price of {self.reference_asset} at {dt}: {data}")
return datapoint
|
the-stack_0_1509 | from flaskr.db import get_db
def test_poly(client, app):
client.post("/api/poly", data="2x+x^2-y^2")
with app.app_context():
db = get_db()
count = db.execute("SELECT COUNT(id) FROM polynomials").fetchone()[0]
assert count == 1
with app.app_context():
db = get_db()
count = db.execute("SELECT COUNT(id) FROM p_members").fetchone()[0]
assert count == 3
response = client.get("/api/poly/eval?polynomial_id=1&x=3.0&y=3.0")
assert response.data == b"6.0"
|
the-stack_0_1513 | # coding=utf-8
import ast
from asdl.lang.py3.py3_transition_system import *
from asdl.hypothesis import *
import astor
if __name__ == '__main__':
# read in the grammar specification of Python 2.7, defined in ASDL
asdl_text = open('py3_asdl.simplified.txt').read()
grammar = ASDLGrammar.from_text(asdl_text)
py_code = """pandas.read('file.csv', nrows=100)"""
# get the (domain-specific) python AST of the example Python code snippet
py_ast = ast.parse(py_code)
# convert the python AST into general-purpose ASDL AST used by tranX
asdl_ast = python_ast_to_asdl_ast(py_ast.body[0], grammar)
print('String representation of the ASDL AST: \n%s' % asdl_ast.to_string())
print('Size of the AST: %d' % asdl_ast.size)
# we can also convert the ASDL AST back into Python AST
py_ast_reconstructed = asdl_ast_to_python_ast(asdl_ast, grammar)
# initialize the Python transition parser
parser = Python3TransitionSystem(grammar)
# get the sequence of gold-standard actions to construct the ASDL AST
actions = parser.get_actions(asdl_ast)
# a hypothesis is an (partial) ASDL AST generated using a sequence of tree-construction actions
hypothesis = Hypothesis()
for t, action in enumerate(actions, 1):
# the type of the action should belong to one of the valid continuing types
# of the transition system
assert action.__class__ in parser.get_valid_continuation_types(hypothesis)
# if it's an ApplyRule action, the production rule should belong to the
# set of rules with the same LHS type as the current rule
if isinstance(action, ApplyRuleAction) and hypothesis.frontier_node:
assert action.production in grammar[hypothesis.frontier_field.type]
p_t = hypothesis.frontier_node.created_time if hypothesis.frontier_node else -1
print('t=%d, p_t=%d, Action=%s' % (t, p_t, action))
hypothesis.apply_action(action)
# get the surface code snippets from the original Python AST,
# the reconstructed AST and the AST generated using actions
# they should be the same
src1 = astor.to_source(py_ast).strip()
src2 = astor.to_source(py_ast_reconstructed).strip()
src3 = astor.to_source(asdl_ast_to_python_ast(hypothesis.tree, grammar)).strip()
assert src1 == src2 == src3 == "pandas.read('file.csv', nrows=100)"
|
the-stack_0_1515 | #!/usr/bin/env python
import sys
from nfbuildwindows import NFBuildWindows
def main():
library_target = 'NFDecoder'
nfbuild = NFBuildWindows()
nfbuild.build_print("Installing Dependencies")
nfbuild.installDependencies(android=True)
# Make our main build artifacts
nfbuild.build_print("C++ Build Start (x86)")
nfbuild.makeBuildDirectory()
nfbuild.generateProject(android=True, android_arm=False)
targets = [library_target]
for target in targets:
nfbuild.buildTarget(target)
nfbuild.build_print("C++ Build Start (arm64)")
nfbuild.makeBuildDirectory()
nfbuild.generateProject(android=False, android_arm=True)
targets = [library_target]
for target in targets:
nfbuild.buildTarget(target)
if __name__ == "__main__":
main()
|
the-stack_0_1516 | import random
from collections import deque
from typing import List, Tuple
from IPython.display import clear_output
import matplotlib.pyplot as plt
import numpy as np
def epsilon(current_episode, num_episodes):
"""
epsilon decays as the current episode gets higher because we want the agent to
explore more in earlier episodes (when it hasn't learned anything)
explore less in later episodes (when it has learned something)
i.e. assume that episode number is directly related to learning
"""
# return 1 - (current_episode/num_episodes)
return .5 * .9**current_episode
def update_q_prime(Qprincipal, Qtarget):
for v, v_ in zip(Qprincipal.model.parameters(), Qtarget.model.parameters()):
v_.data.copy_(v.data)
def plot_episode_rewards(values, title=''):
""" Plot the reward curve and histogram of results over time."""
# Update the window after each episode
clear_output(wait=True)
# Define the figure
f, ax = plt.subplots(nrows=1, ncols=2, figsize=(12,5))
f.suptitle(title)
ax[0].plot(values, label='score per run')
ax[0].axhline(195, c='red', ls='--', label='goal')
ax[0].set_xlabel('Episodes')
ax[0].set_ylabel('Reward')
x = range(len(values))
ax[0].legend()
# Calculate the trend
try:
z = np.polyfit(x, values, 1)
p = np.poly1d(z)
ax[0].plot(x, p(x), "--", label='trend')
except:
print('')
# Plot the histogram of results
ax[1].hist(values[-50:])
ax[1].axvline(195, c='red', label='goal')
ax[1].set_xlabel('Scores per Last 50 Episodes')
ax[1].set_ylabel('Frequency')
ax[1].legend()
plt.show()
class Experience(Tuple):
"""A tuple containing (state, action, reward, done, next_state).
state (Tensor)
action (int)
reward (float)
done (bool)
next_state (Tensor)
"""
class ReplayBuffer(object):
def __init__(self, maxlength: int):
"""
maxlength: max number of tuples to store in the buffer
if there are more tuples than maxlength, pop out the oldest tuples
"""
self.buffer = deque()
self.number: int = 0
self.maxlength: int = maxlength
def __len__(self) -> int:
return self.number
def append(self, experience: Experience):
"""
this function implements appending new experience tuple
experience: a tuple of the form (s,a,r,s^\prime)
"""
self.buffer.append(experience)
self.number += 1
def pop(self):
"""
pop out the oldest tuples if self.number > self.maxlength
"""
while self.number > self.maxlength:
self.buffer.popleft()
self.number -= 1
def sample(self, batchsize: int) -> List[Experience]:
"""Samples 'batchsize' experience tuples
Args:
batchsize (int)
Returns:
(List[Experience])
"""
minibatch: List[Experience] = random.sample(self.buffer, batchsize)
return minibatch
|
the-stack_0_1517 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Unittests for monorail.tracker.issuedetailezt."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import mock
import mox
import time
import unittest
import settings
from businesslogic import work_env
from proto import features_pb2
from features import hotlist_views
from features import send_notifications
from framework import authdata
from framework import exceptions
from framework import framework_views
from framework import framework_helpers
from framework import urls
from framework import permissions
from framework import profiler
from framework import sorting
from framework import template_helpers
from proto import project_pb2
from proto import tracker_pb2
from proto import user_pb2
from services import service_manager
from services import issue_svc
from services import tracker_fulltext
from testing import fake
from testing import testing_helpers
from tracker import issuedetailezt
from tracker import tracker_constants
from tracker import tracker_helpers
class GetAdjacentIssueTest(unittest.TestCase):
def setUp(self):
self.cnxn = 'fake cnxn'
self.services = service_manager.Services(
config=fake.ConfigService(),
issue=fake.IssueService(),
user=fake.UserService(),
project=fake.ProjectService(),
issue_star=fake.IssueStarService(),
spam=fake.SpamService())
self.services.project.TestAddProject('proj', project_id=789)
self.mr = testing_helpers.MakeMonorailRequest()
self.mr.auth.user_id = 111
self.mr.auth.effective_ids = {111}
self.mr.me_user_id = 111
self.work_env = work_env.WorkEnv(
self.mr, self.services, 'Testing phase')
def testGetAdjacentIssue_PrevIssue(self):
cur_issue = fake.MakeTestIssue(789, 2, 'sum', 'New', 111, issue_id=78902)
next_issue = fake.MakeTestIssue(789, 3, 'sum', 'New', 111, issue_id=78903)
prev_issue = fake.MakeTestIssue(789, 1, 'sum', 'New', 111, issue_id=78901)
self.services.issue.TestAddIssue(cur_issue)
self.services.issue.TestAddIssue(next_issue)
self.services.issue.TestAddIssue(prev_issue)
with self.work_env as we:
we.FindIssuePositionInSearch = mock.Mock(
return_value=[78901, 1, 78903, 3])
actual_issue = issuedetailezt.GetAdjacentIssue(
self.mr, we, cur_issue)
self.assertEqual(prev_issue, actual_issue)
we.FindIssuePositionInSearch.assert_called_once_with(cur_issue)
def testGetAdjacentIssue_NextIssue(self):
cur_issue = fake.MakeTestIssue(789, 2, 'sum', 'New', 111, issue_id=78902)
next_issue = fake.MakeTestIssue(789, 3, 'sum', 'New', 111, issue_id=78903)
prev_issue = fake.MakeTestIssue(789, 1, 'sum', 'New', 111, issue_id=78901)
self.services.issue.TestAddIssue(cur_issue)
self.services.issue.TestAddIssue(next_issue)
self.services.issue.TestAddIssue(prev_issue)
with self.work_env as we:
we.FindIssuePositionInSearch = mock.Mock(
return_value=[78901, 1, 78903, 3])
actual_issue = issuedetailezt.GetAdjacentIssue(
self.mr, we, cur_issue, next_issue=True)
self.assertEqual(next_issue, actual_issue)
we.FindIssuePositionInSearch.assert_called_once_with(cur_issue)
def testGetAdjacentIssue_NotFound(self):
cur_issue = fake.MakeTestIssue(789, 2, 'sum', 'New', 111, issue_id=78902)
prev_issue = fake.MakeTestIssue(789, 1, 'sum', 'New', 111, issue_id=78901)
self.services.issue.TestAddIssue(cur_issue)
self.services.issue.TestAddIssue(prev_issue)
with self.work_env as we:
we.FindIssuePositionInSearch = mock.Mock(
return_value=[78901, 1, 78903, 3])
with self.assertRaises(exceptions.NoSuchIssueException):
issuedetailezt.GetAdjacentIssue(
self.mr, we, cur_issue, next_issue=True)
we.FindIssuePositionInSearch.assert_called_once_with(cur_issue)
def testGetAdjacentIssue_Hotlist(self):
cur_issue = fake.MakeTestIssue(789, 2, 'sum', 'New', 111, issue_id=78902)
next_issue = fake.MakeTestIssue(789, 3, 'sum', 'New', 111, issue_id=78903)
prev_issue = fake.MakeTestIssue(789, 1, 'sum', 'New', 111, issue_id=78901)
self.services.issue.TestAddIssue(cur_issue)
self.services.issue.TestAddIssue(next_issue)
self.services.issue.TestAddIssue(prev_issue)
hotlist = fake.Hotlist('name', 678, owner_ids=[111])
with self.work_env as we:
we.GetIssuePositionInHotlist = mock.Mock(
return_value=[78901, 1, 78903, 3])
actual_issue = issuedetailezt.GetAdjacentIssue(
self.mr, we, cur_issue, hotlist=hotlist, next_issue=True)
self.assertEqual(next_issue, actual_issue)
we.GetIssuePositionInHotlist.assert_called_once_with(
cur_issue, hotlist, self.mr.can, self.mr.sort_spec,
self.mr.group_by_spec)
class FlipperRedirectTest(unittest.TestCase):
def setUp(self):
self.services = service_manager.Services(
config=fake.ConfigService(),
features=fake.FeaturesService(),
issue=fake.IssueService(),
user=fake.UserService(),
project=fake.ProjectService())
self.project = self.services.project.TestAddProject(
'proj', project_id=987, committer_ids=[111])
self.next_servlet = issuedetailezt.FlipperNext(
'req', 'res', services=self.services)
self.prev_servlet = issuedetailezt.FlipperPrev(
'req', 'res', services=self.services)
self.list_servlet = issuedetailezt.FlipperList(
'req', 'res', services=self.services)
mr = testing_helpers.MakeMonorailRequest(project=self.project)
mr.local_id = 123
mr.me_user_id = 111
self.next_servlet.mr = mr
self.prev_servlet.mr = mr
self.list_servlet.mr = mr
self.fake_issue_1 = fake.MakeTestIssue(987, 123, 'summary', 'New', 111,
project_name='rutabaga')
self.services.issue.TestAddIssue(self.fake_issue_1)
self.fake_issue_2 = fake.MakeTestIssue(987, 456, 'summary', 'New', 111,
project_name='rutabaga')
self.services.issue.TestAddIssue(self.fake_issue_2)
self.fake_issue_3 = fake.MakeTestIssue(987, 789, 'summary', 'New', 111,
project_name='potato')
self.services.issue.TestAddIssue(self.fake_issue_3)
self.next_servlet.redirect = mock.Mock()
self.prev_servlet.redirect = mock.Mock()
self.list_servlet.redirect = mock.Mock()
@mock.patch('tracker.issuedetailezt.GetAdjacentIssue')
def testFlipperNext(self, patchGetAdjacentIssue):
patchGetAdjacentIssue.return_value = self.fake_issue_2
self.next_servlet.mr.GetIntParam = mock.Mock(return_value=None)
self.next_servlet.get(project_name='proj', viewed_username=None)
self.next_servlet.mr.GetIntParam.assert_called_once_with('hotlist_id')
patchGetAdjacentIssue.assert_called_once()
self.next_servlet.redirect.assert_called_once_with(
'/p/rutabaga/issues/detail?id=456')
@mock.patch('tracker.issuedetailezt.GetAdjacentIssue')
def testFlipperNext_Hotlist(self, patchGetAdjacentIssue):
patchGetAdjacentIssue.return_value = self.fake_issue_3
self.next_servlet.mr.GetIntParam = mock.Mock(return_value=123)
# TODO(jeffcarp): Mock hotlist_id param on path here.
self.next_servlet.get(project_name='proj', viewed_username=None)
self.next_servlet.mr.GetIntParam.assert_called_with('hotlist_id')
self.next_servlet.redirect.assert_called_once_with(
'/p/potato/issues/detail?id=789')
@mock.patch('tracker.issuedetailezt.GetAdjacentIssue')
def testFlipperPrev(self, patchGetAdjacentIssue):
patchGetAdjacentIssue.return_value = self.fake_issue_2
self.next_servlet.mr.GetIntParam = mock.Mock(return_value=None)
self.prev_servlet.get(project_name='proj', viewed_username=None)
self.prev_servlet.mr.GetIntParam.assert_called_with('hotlist_id')
patchGetAdjacentIssue.assert_called_once()
self.prev_servlet.redirect.assert_called_once_with(
'/p/rutabaga/issues/detail?id=456')
@mock.patch('tracker.issuedetailezt.GetAdjacentIssue')
def testFlipperPrev_Hotlist(self, patchGetAdjacentIssue):
patchGetAdjacentIssue.return_value = self.fake_issue_3
self.prev_servlet.mr.GetIntParam = mock.Mock(return_value=123)
# TODO(jeffcarp): Mock hotlist_id param on path here.
self.prev_servlet.get(project_name='proj', viewed_username=None)
self.prev_servlet.mr.GetIntParam.assert_called_with('hotlist_id')
self.prev_servlet.redirect.assert_called_once_with(
'/p/potato/issues/detail?id=789')
@mock.patch('tracker.issuedetailezt._ComputeBackToListURL')
def testFlipperList(self, patch_ComputeBackToListURL):
patch_ComputeBackToListURL.return_value = '/p/test/issues/list'
self.list_servlet.mr.GetIntParam = mock.Mock(return_value=None)
self.list_servlet.get()
self.list_servlet.mr.GetIntParam.assert_called_with('hotlist_id')
patch_ComputeBackToListURL.assert_called_once()
self.list_servlet.redirect.assert_called_once_with(
'/p/test/issues/list')
@mock.patch('tracker.issuedetailezt._ComputeBackToListURL')
def testFlipperList_Hotlist(self, patch_ComputeBackToListURL):
patch_ComputeBackToListURL.return_value = '/p/test/issues/list'
self.list_servlet.mr.GetIntParam = mock.Mock(return_value=123)
self.list_servlet.get()
self.list_servlet.mr.GetIntParam.assert_called_with('hotlist_id')
self.list_servlet.redirect.assert_called_once_with(
'/p/test/issues/list')
class ShouldShowFlipperTest(unittest.TestCase):
def setUp(self):
self.cnxn = 'fake cnxn'
def VerifyShouldShowFlipper(
self, expected, query, sort_spec, can, create_issues=0):
"""Instantiate a _Flipper and check if makes a pipeline or not."""
services = service_manager.Services(
config=fake.ConfigService(),
issue=fake.IssueService(),
project=fake.ProjectService(),
user=fake.UserService())
project = services.project.TestAddProject(
'proj', project_id=987, committer_ids=[111])
mr = testing_helpers.MakeMonorailRequest(project=project)
mr.query = query
mr.sort_spec = sort_spec
mr.can = can
mr.project_name = project.project_name
mr.project = project
for idx in range(create_issues):
_local_id, _ = services.issue.CreateIssue(
self.cnxn, services, project.project_id,
'summary_%d' % idx, 'status', 111, [], [], [], [], 111,
'description_%d' % idx)
self.assertEqual(expected, issuedetailezt._ShouldShowFlipper(mr, services))
def testShouldShowFlipper_RegularSizedProject(self):
# If the user is looking for a specific issue, no flipper.
self.VerifyShouldShowFlipper(
False, '123', '', tracker_constants.OPEN_ISSUES_CAN)
self.VerifyShouldShowFlipper(False, '123', '', 5)
self.VerifyShouldShowFlipper(
False, '123', 'priority', tracker_constants.OPEN_ISSUES_CAN)
# If the user did a search or sort or all in a small can, show flipper.
self.VerifyShouldShowFlipper(
True, 'memory leak', '', tracker_constants.OPEN_ISSUES_CAN)
self.VerifyShouldShowFlipper(
True, 'id=1,2,3', '', tracker_constants.OPEN_ISSUES_CAN)
# Any can other than 1 or 2 is doing a query and so it should have a
# failry narrow result set size. 5 is issues starred by me.
self.VerifyShouldShowFlipper(True, '', '', 5)
self.VerifyShouldShowFlipper(
True, '', 'status', tracker_constants.OPEN_ISSUES_CAN)
# In a project without a huge number of issues, still show the flipper even
# if there was no specific query.
self.VerifyShouldShowFlipper(
True, '', '', tracker_constants.OPEN_ISSUES_CAN)
def testShouldShowFlipper_LargeSizedProject(self):
settings.threshold_to_suppress_prev_next = 1
# In a project that has tons of issues, save time by not showing the
# flipper unless there was a specific query, sort, or can.
self.VerifyShouldShowFlipper(
False, '', '', tracker_constants.ALL_ISSUES_CAN, create_issues=3)
self.VerifyShouldShowFlipper(
False, '', '', tracker_constants.OPEN_ISSUES_CAN, create_issues=3)
|
the-stack_0_1518 | """ The list of words used by DPG """
words = [ "and", "ask", "ass", "ape", "ate", "axe", "air", "aim", "ana", "awe", "act", "add", "age", "all", "ant",
"bat", "ban", "bar", "bed", "bee", "bet", "bit", "bug", "bob", "bot", "boy", "bud", "but",
"cab", "can", "cap", "cat", "car", "cog", "con", "cop", "cot", "cow", "coy", "cub", "cut",
"dad", "dam", "dan", "day", "den", "did", "dig", "dip", "doc", "dog", "don", "dot", "dry", "dug",
"ear", "eat", "egg", "ego", "elf", "elk", "elm", "end", "eye", "eve",
"fad", "fan", "far", "fat", "fax", "fig", "fit", "fix", "fly", "few", "foe", "fog", "for", "fur",
"gag", "gap", "gel", "gem", "get", "god", "goo", "got", "gum", "gun", "gut", "guy", "gym",
"hot", "how", "has", "had", "ham", "hat", "him", "her", "hit", "hop",
"ice", "icy", "ill", "ink", "inn", "ion", "its", "ivy",
"jam", "jar", "jaw", "jay", "jet", "jim", "joe", "jog", "jot", "joy", "jug",
"keg", "ken", "key", "kid", "kim", "kit", "kin",
"lab", "lad", "lap", "law", "lie", "lee", "let", "lip", "lob", "log", "lot", "low", "lug",
"mac", "mag", "map", "man", "mat", "max", "meg", "men", "met", "mom", "moo", "mop", "mow", "mud", "mug", "mut",
"nab", "nag", "nap", "net", "new", "nip", "nod", "not", "now", "nun", "nut",
"oak", "oat", "oar", "off", "oil", "old", "one", "our", "out", "own",
"pan", "pal", "pam", "pat", "pea", "pen", "pet", "pig", "pit", "pot",
"rag", "ray", "run", "ram", "ran", "rap", "rat", "rig", "rip", "rob", "ron", "rot",
"sad", "sag", "sam", "sat", "say", "see", "sex", "set", "she", "shy", "sin", "sir", "sit", "sky", "soy", "sun",
"tan", "tap", "tar", "tea", "ted", "too", "the", "tim", "tip", "toe", "tom", "toy",
"wag", "was", "wax", "way", "web", "wee", "wet", "why", "wig", "win", "wow", "won",
"yak", "yam", "yap", "yen", "yep", "yes", "yet", "yew", "you", "yum",
"zag", "zig", "zit", "zap", "zip", "zoo" ]
|
the-stack_0_1519 | import logging
from typing import List, Optional, Union, Tuple
from venidium.types.blockchain_format.program import Program, SerializedProgram
from venidium.types.generator_types import BlockGenerator, GeneratorArg, GeneratorBlockCacheInterface, CompressorArg
from venidium.util.ints import uint32, uint64
from venidium.wallet.puzzles.load_clvm import load_clvm
from venidium.wallet.puzzles.rom_bootstrap_generator import get_generator
GENERATOR_MOD = get_generator()
DECOMPRESS_BLOCK = load_clvm("block_program_zero.clvm", package_or_requirement="venidium.wallet.puzzles")
DECOMPRESS_PUZZLE = load_clvm("decompress_puzzle.clvm", package_or_requirement="venidium.wallet.puzzles")
# DECOMPRESS_CSE = load_clvm("decompress_coin_spend_entry.clvm", package_or_requirement="venidium.wallet.puzzles")
DECOMPRESS_CSE_WITH_PREFIX = load_clvm(
"decompress_coin_spend_entry_with_prefix.clvm", package_or_requirement="venidium.wallet.puzzles"
)
log = logging.getLogger(__name__)
def create_block_generator(
generator: SerializedProgram, block_heights_list: List[uint32], generator_block_cache: GeneratorBlockCacheInterface
) -> Optional[BlockGenerator]:
"""`create_block_generator` will returns None if it fails to look up any referenced block"""
generator_arg_list: List[GeneratorArg] = []
for i in block_heights_list:
previous_generator = generator_block_cache.get_generator_for_block_height(i)
if previous_generator is None:
log.error(f"Failed to look up generator for block {i}. Ref List: {block_heights_list}")
return None
generator_arg_list.append(GeneratorArg(i, previous_generator))
return BlockGenerator(generator, generator_arg_list)
def create_generator_args(generator_ref_list: List[SerializedProgram]) -> Program:
"""
`create_generator_args`: The format and contents of these arguments affect consensus.
"""
gen_ref_list = [bytes(g) for g in generator_ref_list]
return Program.to([gen_ref_list])
def create_compressed_generator(
original_generator: CompressorArg,
compressed_cse_list: List[List[Union[List[uint64], List[Union[bytes, None, Program]]]]],
) -> BlockGenerator:
"""
Bind the generator block program template to a particular reference block,
template bytes offsets, and SpendBundle.
"""
start = original_generator.start
end = original_generator.end
program = DECOMPRESS_BLOCK.curry(
DECOMPRESS_PUZZLE, DECOMPRESS_CSE_WITH_PREFIX, Program.to(start), Program.to(end), compressed_cse_list
)
generator_arg = GeneratorArg(original_generator.block_height, original_generator.generator)
return BlockGenerator(program, [generator_arg])
def setup_generator_args(self: BlockGenerator) -> Tuple[SerializedProgram, Program]:
args = create_generator_args(self.generator_refs())
return self.program, args
def run_generator(self: BlockGenerator, max_cost: int) -> Tuple[int, SerializedProgram]:
program, args = setup_generator_args(self)
return GENERATOR_MOD.run_safe_with_cost(max_cost, program, args)
def run_generator_unsafe(self: BlockGenerator, max_cost: int) -> Tuple[int, SerializedProgram]:
"""This mode is meant for accepting possibly soft-forked transactions into the mempool"""
program, args = setup_generator_args(self)
return GENERATOR_MOD.run_with_cost(max_cost, program, args)
|
Subsets and Splits