repo_name
stringlengths 5
100
| path
stringlengths 4
254
| copies
stringlengths 1
5
| size
stringlengths 4
7
| content
stringlengths 681
1M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,298,349B
| line_mean
float64 3.5
100
| line_max
int64 15
1k
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class | ratio
float64 1.5
8.15
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
JasonKessler/scattertext | demo_moral_foundations.py | 1 | 1709 | import scattertext as st
convention_df = st.SampleCorpora.ConventionData2012.get_data()
moral_foundations_feats = st.FeatsFromMoralFoundationsDictionary()
corpus = st.CorpusFromPandas(convention_df,
category_col='party',
text_col='text',
nlp=st.whitespace_nlp_with_sentences,
feats_from_spacy_doc=moral_foundations_feats).build()
cohens_d_scorer = st.CohensD(corpus).use_metadata()
term_scorer = cohens_d_scorer.set_categories('democrat', ['republican'])
mfd_df = term_scorer.get_score_df()
print(mfd_df.head())
mfd_df.to_csv('demo_moral_foundations.csv')
print('See demo_moral_foundations.csv for the output.')
html = st.produce_frequency_explorer(corpus,
category='democrat',
category_name='Democratic',
not_category_name='Republican',
metadata=convention_df['speaker'],
use_non_text_features=True,
use_full_doc=True,
term_scorer=st.CohensD(corpus).use_metadata(),
grey_threshold=0,
width_in_pixels=1000,
topic_model_term_lists=moral_foundations_feats.get_top_model_term_lists(),
metadata_descriptions=moral_foundations_feats.get_definitions())
fn = 'demo_moral_foundations.html'
with open(fn, 'wb') as out:
out.write(html.encode('utf-8'))
print('Open ./%s in Chrome.' % (fn))
| apache-2.0 | 6,000,145,424,736,266,000 | 50.787879 | 111 | 0.529549 | false | 3.983683 | false | false | false |
krisaju95/NewsArticleClustering | module7_skMeansClustering.py | 1 | 7438 | import pickle
import numpy as np
import pandas as pd
import os
import math
path = "C:/Users/hp/Desktop/FINAL YEAR PROJECT/S8/"
D = set()
A = []
words = set()
dataFrame2 = pickle.load( open(os.path.join(path, 'Feature Set','dataFrame2.p'), "rb" ))
dataFrame3 = pickle.load( open(os.path.join(path, 'Feature Set','dataFrame3.p'), "rb" ))
cosineSimilarityMatrix = pickle.load( open(os.path.join(path, 'KMeansClustering','dataFrame4.p'), "rb" ))
wordSetSize = len(dataFrame3.columns)
numberOfDocuments = len(dataFrame3.index)
m = 1
centroids = pickle.load( open(os.path.join(path, 'KMeansClustering','initialCentroids.p'), "rb" ))
dataFrame5 = pd.DataFrame(np.zeros(numberOfDocuments).reshape(numberOfDocuments,1))
clusters = []
previousClusters = []
k = len(centroids.index)
centroidCosineSimilarity = pd.DataFrame(np.zeros(shape = (numberOfDocuments , k)).reshape(numberOfDocuments , k))
# Check if the newly found clusters are the same as the previously found clusters
def convergenceCase():
i =0
if previousClusters == []:
return False
for cluster in clusters:
if cluster != previousClusters[i]:
return False
else:
i = i + 1
return True
# Given two documents, calculate their cosine similarity
def cosineSimilarity(value1 , value2):
d1 = 0
d2 = 0
dotProduct = 0
v1 = value1.as_matrix()
v2 = value2.as_matrix()
document1 = np.square(v1)
document2 = np.square(v2)
dotProduct = np.dot(v1 , v2)
d1 = math.sqrt( document1.sum() )
d2 = math.sqrt( document2.sum() )
if d1 * d2 == 0:
return 0
cosineSimilarityValue = dotProduct/(d1*d2)
return cosineSimilarityValue
# Find the most similar centroid for each document in the dataset
def findMostSimilarCentroids():
mostSimilarValue = 0
mostSimilarCentroid = 0
for row in dataFrame5.index:
mostSimilarValue = 0
mostSimilarCentroid = 0
for column in centroidCosineSimilarity.columns:
if centroidCosineSimilarity.ix[row , column] > mostSimilarValue:
mostSimilarValue = centroidCosineSimilarity.ix[row , column]
mostSimilarCentroid = column
dataFrame5.ix[row , "ClusterID"] = mostSimilarCentroid
dataFrame5.ix[row , "maxSimilarityValue"] = mostSimilarValue
# Initialize the set D with all the documents from the dataset
def initializeSetD():
for column in cosineSimilarityMatrix.columns:
D.add(column)
# Create the initial set of clusters with k empty lists, each empty list being a cluster
def initializeClusters():
global clusters
clusters = []
for i in range(k):
clusters.append([])
# Initalize a dataframe for the centroid vectors with zero values
def initializeCentroids():
for row in centroids.index:
for word in dataFrame3.columns:
centroids.ix[row , word] = 0
# Find the new centroids for each cluster once the data has been updated
def calculateNewCentroids():
global centroids
initializeCentroids()
clusterID = 0
clusterSizes = [0 , 0 , 0, 0, 0]
dataFrame3Matrix = dataFrame3.as_matrix()
centroidsMatrix = centroids.as_matrix()
centroidColumns = centroids.columns
for row in dataFrame5.index:
clusterID = dataFrame5.ix[row , "ClusterID"]
clusterSizes[int(clusterID)] = clusterSizes[int(clusterID)] + 1
centroidsMatrix[int(clusterID)] = np.add(centroidsMatrix[int(clusterID)] , dataFrame3Matrix[row])
for row in centroids.index:
centroidsMatrix[row] = np.divide(centroidsMatrix[row] , float(clusterSizes[row]))
centroids = pd.DataFrame(centroidsMatrix)
centroids.columns = centroidColumns
# Create a dataframe with cosine similarity values for all documents with each of the centroids
def calculateCosineSimilarity():
for row in range(numberOfDocuments):
document1 = dataFrame3.loc[row , :]
for column in range(k):
document2 = centroids.loc[column , :]
centroidCosineSimilarity.ix[row , column] = cosineSimilarity(document1 , document2)
# Based on the data in df5, place each dcoument in its respective cluster
def generateClusters():
clusterID = 0
initializeClusters()
for row in dataFrame5.index:
clusterID = int(dataFrame5.ix[row , "ClusterID"])
clusters[clusterID].append(row)
# Find the centroid with maximum similarity for a given document and return the clusterID along with the similarity value
def findClosestCluster(row):
maxSimilarityValue = 0
clusterID = 0
for centroid in centroidCosineSimilarity.columns:
if centroidCosineSimilarity.ix[row , centroid] > maxSimilarityValue:
maxSimilarityValue = centroidCosineSimilarity.ix[row , centroid]
clusterID = centroid
return clusterID , maxSimilarityValue
# Create a dataframe with the cluster ID and similarity value for each document
def updateCentroidData():
clusterID = 0
newSimilarityValue = 0
for row in dataFrame5.index:
clusterID = int(dataFrame5.ix[row , "ClusterID"])
if centroidCosineSimilarity.ix[row , clusterID] < dataFrame5.ix[row , "maxSimilarityValue"]:
clusterID , newSimilarityValue = findClosestCluster(row)
dataFrame5.ix[row , "maxSimilarityValue"] = newSimilarityValue
dataFrame5.ix[row , "ClusterID"] = clusterID
else:
dataFrame5.ix[row , "maxSimilarityValue"] = centroidCosineSimilarity.ix[row , clusterID]
# Main function to perform clustering on the dataset
def skMeansClustering():
global previousClusters
print "Performing Spherical K-Means Clustering"
calculateCosineSimilarity()
findMostSimilarCentroids()
generateClusters()
for i in range(50):
calculateNewCentroids()
calculateCosineSimilarity()
updateCentroidData()
generateClusters()
#print dataFrame5
if convergenceCase():
break
else:
print "Clustering iteration " , i + 1
#print centroidCosineSimilarity
previousClusters = list(clusters)
print "Converged in ", i , " iteration(s)"
print "Clusters have been generated"
print "Saving data in DataFrame5 as a pickle package and as a CSV"
dataFrame5.to_pickle(os.path.join(path, 'KMeansClustering','dataFrame5.p'))
dataFrame5.to_csv(os.path.join(path, 'KMeansClustering','dataFrame5.csv'))
print "DataFrame5 has been saved"
skMeansClustering() | gpl-3.0 | 1,653,358,669,395,767,000 | 37.365079 | 330 | 0.607287 | false | 4.183352 | false | false | false |
rickyrem/garrulous-api | model/Database.py | 1 | 2228 | # Garrulous API
# Authors: Michael Pierre and Richard Meyers
"""
Copyright (C) 2015
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sqlite3
import os
import logging
import pprint
class Database(object):
def __init__(self):
super(Database, self).__init__()
base_dir = os.path.dirname(os.path.abspath(__file__))
db_path = os.path.join(os.path.dirname(base_dir), "database")
db_path = os.path.join(db_path, "garrulous.db")
self.conn = sqlite3.connect(db_path)
self.db_cursor = self.conn.cursor()
def write(self, sql, params=()):
"""
Use this method for queries that do not return rows.
:param sql:
:return:
"""
try:
with self.conn:
self.conn.execute(sql, params)
return True
except sqlite3.IntegrityError:
print "Could not run sql: " + sql
return False
def query(self, sql, params=()):
"""
Only use this when a query returns rows.
:param sql:
:return:
"""
try:
self.db_cursor.execute(sql, params)
return self.db_cursor.fetchall()
except sqlite3.IntegrityError:
print "Could not run sql: " + sql
return False
def queryOne(self, sql, params=()):
"""
Only use this when a query returns rows.
:param sql:
:return:
"""
try:
self.db_cursor.execute(sql, params)
return self.db_cursor.fetchone()
except sqlite3.IntegrityError:
print "Could not run sql: " + sql
return False
| apache-2.0 | 6,288,271,877,029,973,000 | 28.315789 | 69 | 0.614901 | false | 4.125926 | false | false | false |
jreese/euler | python/problem26.py | 1 | 3006 | import bigfloat
from multiprocessing import Pool
import sys
dmin = 1
dmax = 1000
precision = 8192
fuzz = 5
min_repeats = 3
workers = 1
def is_repeating(substr, whole_string):
p = 0
repeats = 0
remaining = whole_string
while remaining:
if len(remaining) >= len(substr):
if remaining.startswith(substr):
repeats += 1
else:
return 0
else:
if substr[:len(remaining)] == remaining:
repeats += 1
else:
return 0
remaining = remaining[len(substr):]
return repeats
def longest_repeat(d):
context = bigfloat.precision(precision)
result_float = bigfloat.div(1, d, context=context)
result = str(result_float)[2:].strip('0')[:-fuzz]
result_len = len(result)
#print "d = {0}, result = {1}".format(d, result)
longest = ''
longest_len = 0
found = set()
for i in range(result_len):
remaining = result[i:]
for k in range(i+1, result_len):
substr = result[i:k]
substr_len = len(substr)
if substr == '0' * substr_len:
continue
new_substr = True
for f in found:
if substr == f:
new_substr = False
elif is_repeating(f, substr):
new_substr = False
if not new_substr:
continue
#print "new substring {0}".format(substr)
repeats = is_repeating(substr, remaining)
#print "substring {0} repeats {1} times".format(substr, repeats)
if repeats >= min_repeats:
#print "found repeating substring {0} (occurred {1} times)".format(substr, repeats, i=i, k=k)
found.add(substr)
if longest_len < substr_len:
#print "new longest substr!"
longest = substr
longest_len = substr_len
if remaining[1:] == remaining[1] * len(remaining[1:]):
#print "remaining string is all the same"
break
if found:
#print "Already found repeating substrings, short-circuiting"
break
if remaining == remaining[0] * len(remaining):
#print "remaining string is all the same"
break
if longest:
#print "longest substring for d = {0} is {1}".format(d, longest)
pass
return longest
longest_len = 0
longest_substr = ''
longest_d = 0
for d in range(dmin, dmax):
sys.stdout.write('.')
if d % 50 == 0:
sys.stdout.write("%d\n" % d)
sys.stdout.flush()
substr = longest_repeat(d)
substr_len = len(substr)
if substr_len > longest_len:
longest_len = substr_len
longest_d = d
longest_substr = substr
print ""
print ("longest substr: d = {0}, len = {1}, substr = {2}"
"".format(longest_d, longest_len, longest_substr))
| mit | 2,244,327,258,389,847,000 | 23.842975 | 109 | 0.528277 | false | 3.960474 | false | false | false |
alipsgh/tornado | streams/readers/arff_reader.py | 1 | 3091 | """
The Tornado Framework
By Ali Pesaranghader
University of Ottawa, Ontario, Canada
E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com
"""
import re
from data_structures.attribute import Attribute
from dictionary.tornado_dictionary import TornadoDic
class ARFFReader:
"""This class is used to read a .arff file."""
@staticmethod
def read(file_path):
labels = []
attributes = []
attributes_min_max = []
records = []
data_flag = False
reader = open(file_path, "r")
for line in reader:
if line.strip() == '':
continue
if line.startswith("@attribute") or line.startswith("@ATTRIBUTE"):
line = line.strip('\n\r\t')
line = line.split(' ')
attribute_name = line[1]
attribute_value_range = line[2]
attribute = Attribute()
attribute.set_name(attribute_name)
if attribute_value_range.lower() in ['numeric', 'real', 'integer']:
attribute_type = TornadoDic.NUMERIC_ATTRIBUTE
attribute_value_range = []
attributes_min_max.append([0, 0])
else:
attribute_type = TornadoDic.NOMINAL_ATTRIBUTE
attribute_value_range = attribute_value_range.strip('{}').replace("'", "")
attribute_value_range = attribute_value_range.split(',')
attributes_min_max.append([None, None])
attribute.set_type(attribute_type)
attribute.set_possible_values(attribute_value_range)
attributes.append(attribute)
elif line.startswith("@data") or line.startswith("@DATA"):
data_flag = True
labels = attributes[len(attributes) - 1].POSSIBLE_VALUES
attributes.pop(len(attributes) - 1)
continue
elif data_flag is True:
line = re.sub('\s+', '', line)
elements = line.split(',')
for i in range(0, len(elements) - 1):
if attributes[i].TYPE == TornadoDic.NUMERIC_ATTRIBUTE:
elements[i] = float(elements[i])
min_value = attributes_min_max[i][0]
max_value = attributes_min_max[i][1]
if elements[i] < min_value:
min_value = elements[i]
elif elements[i] > max_value:
max_value = elements[i]
attributes_min_max[i] = [min_value, max_value]
records.append(elements)
for i in range(0, len(attributes)):
if attributes[i].TYPE == TornadoDic.NUMERIC_ATTRIBUTE:
attributes[i].set_bounds_values(attributes_min_max[i][0], attributes_min_max[i][1])
return labels, attributes, records
| mit | -7,130,011,598,677,469,000 | 37.126582 | 99 | 0.505015 | false | 4.441092 | false | false | false |
nafitzgerald/allennlp | allennlp/models/simple_tagger.py | 1 | 7647 | from typing import Dict, Optional
import numpy
from overrides import overrides
import torch
from torch.nn.modules.linear import Linear
import torch.nn.functional as F
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("simple_tagger")
class SimpleTagger(Model):
"""
This ``SimpleTagger`` simply encodes a sequence of text with a stacked ``Seq2SeqEncoder``, then
predicts a tag for each token in the sequence.
Parameters
----------
vocab : ``Vocabulary``, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : ``TextFieldEmbedder``, required
Used to embed the ``tokens`` ``TextField`` we get as input to the model.
stacked_encoder : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between embedding tokens
and predicting output tags.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
stacked_encoder: Seq2SeqEncoder,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(SimpleTagger, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size("labels")
self.stacked_encoder = stacked_encoder
self.tag_projection_layer = TimeDistributed(Linear(self.stacked_encoder.get_output_dim(),
self.num_classes))
if text_field_embedder.get_output_dim() != stacked_encoder.get_input_dim():
raise ConfigurationError("The output dimension of the text_field_embedder must match the "
"input dimension of the phrase_encoder. Found {} and {}, "
"respectively.".format(text_field_embedder.get_output_dim(),
stacked_encoder.get_input_dim()))
self.metrics = {
"accuracy": CategoricalAccuracy(),
"accuracy3": CategoricalAccuracy(top_k=3)
}
initializer(self)
@overrides
def forward(self, # type: ignore
tokens: Dict[str, torch.LongTensor],
tags: torch.LongTensor = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
tokens : Dict[str, torch.LongTensor], required
The output of ``TextField.as_array()``, which should typically be passed directly to a
``TextFieldEmbedder``. This output is a dictionary mapping keys to ``TokenIndexer``
tensors. At its most basic, using a ``SingleIdTokenIndexer`` this is: ``{"tokens":
Tensor(batch_size, num_tokens)}``. This dictionary will have the same keys as were used
for the ``TokenIndexers`` when you created the ``TextField`` representing your
sequence. The dictionary is designed to be passed directly to a ``TextFieldEmbedder``,
which knows how to combine different word representations into a single vector per
token in your input.
tags : torch.LongTensor, optional (default = None)
A torch tensor representing the sequence of integer gold class labels of shape
``(batch_size, num_tokens)``.
Returns
-------
An output dictionary consisting of:
logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_tokens, tag_vocab_size)`` representing
unnormalised log probabilities of the tag classes.
class_probabilities : torch.FloatTensor
A tensor of shape ``(batch_size, num_tokens, tag_vocab_size)`` representing
a distribution of the tag classes per word.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
embedded_text_input = self.text_field_embedder(tokens)
batch_size, sequence_length, _ = embedded_text_input.size()
mask = get_text_field_mask(tokens)
encoded_text = self.stacked_encoder(embedded_text_input, mask)
logits = self.tag_projection_layer(encoded_text)
reshaped_log_probs = logits.view(-1, self.num_classes)
class_probabilities = F.softmax(reshaped_log_probs, dim=-1).view([batch_size,
sequence_length,
self.num_classes])
output_dict = {"logits": logits, "class_probabilities": class_probabilities}
if tags is not None:
loss = sequence_cross_entropy_with_logits(logits, tags, mask)
for metric in self.metrics.values():
metric(logits, tags, mask.float())
output_dict["loss"] = loss
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Does a simple position-wise argmax over each token, converts indices to string labels, and
adds a ``"tags"`` key to the dictionary with the result.
"""
all_predictions = output_dict['class_probabilities']
all_predictions = all_predictions.cpu().data.numpy()
if all_predictions.ndim == 3:
predictions_list = [all_predictions[i] for i in range(all_predictions.shape[0])]
else:
predictions_list = [all_predictions]
all_tags = []
for predictions in predictions_list:
argmax_indices = numpy.argmax(predictions, axis=-1)
tags = [self.vocab.get_token_from_index(x, namespace="labels")
for x in argmax_indices]
all_tags.append(tags)
output_dict['tags'] = all_tags
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {metric_name: metric.get_metric(reset) for metric_name, metric in self.metrics.items()}
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SimpleTagger':
embedder_params = params.pop("text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
stacked_encoder = Seq2SeqEncoder.from_params(params.pop("stacked_encoder"))
initializer = InitializerApplicator.from_params(params.pop('initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
stacked_encoder=stacked_encoder,
initializer=initializer,
regularizer=regularizer)
| apache-2.0 | 652,954,486,819,484,300 | 47.398734 | 102 | 0.629789 | false | 4.464098 | false | false | false |
caesar0301/omnilab-misc | OmniperfTools/groundtruth.py | 1 | 4311 | #!/usr/bin/env python
# Ground truth for evaluating Activity-Entity model
#
# By chenxm
#
import os
import sys
import numpy
from PyOmniMisc.traffic.http import HTTPLogReader
from PyOmniMisc.utils import stat
from PyOmniMisc.model.webtree import WebTree
def readUserClickTS(fn):
# read user click time series
ucts = []
i = 0
for line in open(fn, 'rb'):
if i != 0:
line = line.strip('\r\n ')
if len(line) == 0: continue
ucts.append(float(line.split('\t')[0]))
i+=1
return ucts
def readHttpEntries(fn):
# read http logs
etrs = []
for entry in HTTPLogReader(fn):
if entry is not None:
etrs.append(entry)
etrs = [e for e in etrs if e.rqtstart() != None] # remove entity without request times
etrs.sort(key=lambda x: x.rqtstart()) # sort entities by request times
return etrs
def modelGT(trace_folder):
print("Modeling groudtruth..")
# User click files
uc = os.path.join(trace_folder, 'userclicks2.out')
if not os.path.exists(uc):
uc = os.path.join(trace_folder, 'userclicks.out')
if not os.path.exists(uc):
raise Exception("Sry, I do not find userclicks*.out in given folder.")
# Read user clicks
ucts = readUserClickTS(uc)
if len(ucts) == 0:
print("No click times")
sys.exit(-1)
print len(ucts)
# Http log file
hl = os.path.join(trace_folder, 'http_logs')
if not os.path.exists(hl):
raise Exception("Sry, I do not find *http_logs*.out in given folder.")
# Read http logs
etrs = readHttpEntries(hl)
if len(etrs) == 0:
print("No entries")
sys.exit(-1)
# prepare data...
ua_ets = {}
for e in etrs:
ua = e.ua()
if ua not in ua_ets:
ua_ets[ua] = []
ua_ets[ua].append(e)
# time model
forest = {}
for ua in ua_ets:
if ua not in forest:
forest[ua] = []
last = None
tree = []
for e in ua_ets[ua]:
if last is None:
tree.append(e)
else:
if e.rqtstart() - last.rqtstart() <= 3: # sec, request gap
tree.append(e)
elif len(tree) != 0:
forest[ua].append(tree)
tree = []
last = e
# click times
for ua in forest:
removed = []
for tree in forest[ua]:
found = False
for node in tree:
for ts in ucts:
if node.rqtstart() - ts < 2:
found = True
break
if found: break
if not found:
removed.append(tree)
for r in removed:
forest[ua].remove(r)
return forest
def overlap_portion(t1, t2): # t1 covers t2
""" We user FMeasure to measure the distance between two tree
As for t1 covering t2, t2 is treated as the true value, and
t1 is the predicted value.
"""
dup = overlap_cnt(t1, t2)
recall = dup/len(t2)
precision = dup/len(t1)
if recall == 0 and precision == 0:
return None
return stat.FMeasure(precision, recall)
def overlap_cnt(t1, t2):# t1 covers t2
if not isinstance(t1, list) or not isinstance(t2, list) or \
len(t1) == 0 or len(t2) == 0:
raise ValueError("Invalid parameters: list required")
dup = 0.0
for e1 in t1:
for e2 in t2:
if e1 == e2:
dup +=1
break
return dup
def evaluate(forest, forest_gt):
print "Evaluation result:"
uas_target = set(forest.keys())
uas_gt = set(forest_gt.keys())
uas = uas_target & uas_gt
res = []
for ua in uas:
print ua
trees_gt = forest_gt[ua]
trees_target = []
for o in forest[ua]:
# convert format
if isinstance(o, WebTree):
tree = o.fruits()
trees_target.append(tree)
elif isinstance(o, list):
trees_target.append(o)
# evaluate
print "Target: %d, GT: %d" % (len(trees_target),len(trees_gt))
# Entity classified accuracies (in two modes):
# Trace level accuracy--------------------------
fms = []
for t1 in trees_gt:
mx = 0 # match percentage
for t2 in trees_target:
p = overlap_portion(t2, t1)
if p is not None and p > mx:
mx = p
fms.append(mx)
if len(fms) > 0:
m = numpy.mean(fms)
print m
res.append(m)
#-----------------------------------------------
# Activity level accuracy-----------------------
# fms = []
# for t1 in trees_gt:
# mx = 0
# for t2 in trees_target:
# p = overlap_portion(t2, t1)
# if p is not None and p > mx:
# mx = p
# fms.append(mx)
# print fms
# res.extend(fms)
#-----------------------------------------------
return res | gpl-2.0 | -1,255,218,300,794,877,700 | 22.955556 | 90 | 0.597309 | false | 2.790291 | false | false | false |
jeremyosborne/python | general/csv2table/csv2sql.py | 1 | 10575 | """Convert a csv file to an sqlite table."""
import sys
import csv
import sqlite3
import os
import re
# pointer to our csv file descriptor
csvFile = None
columnNames = None
columnTypes = None
columnComments = None
validDataTypes = ["string", "number", "date"]
idColumnName = "_id"
outfileName = None
outfileExtension = ".sqlite3"
tableName = None
def confirm(default=True):
"""Waits for user input, and exits on anything other than a string
that begins with "Y" or "y".
@param [default=True] {Boolean} Default response displayed to the user.
Either "[Y/n]:" (if True) for a default affirmative or "[y/N]:" (if False)
for a default negative.
@return {Boolean} True if the user typed in an affirmative response,
False if not.
"""
if default == True:
print "[Y/n]: ",
else:
print "[n/Y]: ",
response = raw_input()
if len(response) == 0:
return default
elif len(response) and (response.lower()[0] == "y"):
return True
else:
return False
def createTable():
"""Create the sqllite3 table and insert data."""
global idColumnName, columnNames, columnTypes, outfileName
print "\033[1;43m--Building data table--\033[1;m"
print "SQL statements used will be output to the screen for inspection."
print ""
conn = sqlite3.connect(outfileName)
cursor = conn.cursor()
# TODO: confirm with user (default no) before dropping the table
cursor.execute("DROP TABLE IF EXISTS "+tableName)
statement = "CREATE TABLE "+tableName+" ("+idColumnName+" INTEGER PRIMARY KEY ASC\n"
for i in range(len(columnNames)):
statement += ", "+columnNames[i]
if columnTypes[i] == "String":
statement += " TEXT\n"
elif columnTypes[i] == "Number":
statement += " NUMERIC\n"
statement += ")"
print statement
print ""
cursor.execute(statement)
conn.commit()
# Insert Data
csvFile.seek(0)
dataReader = csv.reader(csvFile)
# skip the header rows
counter = 0
for row in dataReader:
if counter < 3:
counter += 1
continue
else:
statement = "INSERT INTO "+tableName+" ("
# skip the id column, let it auto-increment
firstColumn = True
for column in columnNames:
if firstColumn == True:
statement += column
firstColumn = False
else:
statement += ", "+column
statement += ") VALUES ("
firstValue = True
for columnNum in range(len(row)):
# Need to get access to the column types to determine if we
# should quote or not
if firstValue:
firstValue = False
if columnTypes[columnNum] == "String":
statement += "'"+row[columnNum]+"'"
elif columnTypes[columnNum] == "Number":
statement += row[columnNum]
else:
if columnTypes[columnNum] == "String":
statement += ", '"+row[columnNum]+"'"
elif columnTypes[columnNum] == "Number":
statement += ", "+row[columnNum]
statement += ")"
print statement
cursor.execute(statement)
conn.commit()
# clean up
cursor.close()
conn.close()
def computeSchema():
"""Determines the table schema for our csv file."""
global csvFile, columnNames, columnTypes, columnComments
print "\033[1;43m--Computing schema--\033[1;m"
csvFile.seek(0)
schema = csv.reader(csvFile)
counter = 0
for row in schema:
if counter == 0:
columnNames = row
elif counter == 1:
columnTypes = row
elif counter == 2:
columnComments = row
break
counter += 1
print "We assume the first three rows in your csv file contain header info."
print "If the information looks incorrect, you will have an opportunity"
print "to exit and fix the csv file before creating the output table."
print "--------------------------------------------------------------------"
print "Your columns will be named (from the first row of data):"
for column in range(len(columnNames)):
print "{0:>5}: {1}".format(column, columnNames[column])
print "The data types for the columns (from the second row of data):"
for column in range(len(columnTypes)):
print "{0:>5}: {1}".format(column, columnTypes[column])
print "The descriptions of each column (from the third row of data):"
print "NOTE: Comments are ignored for sql table creation."
for column in range(len(columnComments)):
print "{0:>5}: {1}".format(column, columnComments[column])
print ""
def reportFileStats():
"""Report any stats about the csv file."""
# I think we need a new csv reader every time we want to view
# the file.
global csvFile, validDataTypes
print "\033[1;43m--Computing file stats, checking integrity--\033[1;m"
print "Number of columns in your table (determined from the first row):"
csvFile.seek(0)
columncount = 0
counter = csv.reader(csvFile)
for row in counter:
columncount = len(row)
break
print " {0}".format(columncount)
print "Number of rows in the csv file:"
csvFile.seek(0)
counter = csv.reader(csvFile)
rowcount = 0
for row in counter:
rowcount += 1
print " {0}".format(rowcount)
print "Check table integrity: expected number of columns per row?"
csvFile.seek(0)
counter = csv.reader(csvFile)
rowcount = 0
isBadTable = False
for row in counter:
if len(row) != columncount:
print "Error: row {0} has {1} columns, expected {2}".format(rowcount, len(row), columncount)
isBadTable = True
rowcount += 1
if isBadTable == False:
print "\033[1;32mTable integrity check PASS: expected dimensions.\033[1;m"
print ""
else:
print "\033[1;31mTable integrity check FAIL: unexpected dimensions.\033[1;m"
print ""
sys.exit(1)
print "Check table integrity: expected data types for each column?"
print "Valid datatypes are:"
for validType in validDataTypes:
print " {0}".format(validType)
csvFile.seek(0)
counter = csv.reader(csvFile)
rowcount = 0
isBadTable = False
for row in counter:
# data types are in the second row
if rowcount == 1:
columncount = 0
for column in row:
if column not in validDataTypes:
print "Error: column {0} has unexpected type {1}".format(columncount, column)
isBadTable = True
columncount += 1
# Only process the data type row
break
else:
rowcount += 1
if isBadTable == False:
print "\033[1;32mTable integrity check PASS: expected datatypes.\033[1;m"
print ""
else:
print "\033[1;31mTable integrity check FAIL: unexpected datatypes.\033[1;m"
print ""
sys.exit(1)
def init(filepath):
"""Kicks off the program by attempting to open the csv file."""
global csvFile, outfileName, tableName
# read stocks data, print status messages
try:
print "\033[1;43m--Opening csv file--\033[1;m"
csvFile = open(filepath, "rb")
print "\033[1;32mOpened csv file:", filepath,"\033[1;m"
# Figure out database name first
outfileMatches = re.match(r"([\w\S]*)(\.[^.]+$)", os.path.basename(filepath))
if outfileMatches == None:
# Handle the case where we don't have something that qualifies
# as an extension to the file
outfileName = filepath+outfileExtension
else:
outfileName = outfileMatches.group(1)+outfileExtension
# Figure out table name from the file name
tableName = re.match(r"([\w\S]*)(\.[^.]+$)", outfileName).group(1)
# Confirm the table and file names with the user
print "The sqlite3 table will be named:", tableName
print "NOTE: If this table already exists in the db file, the pre-existing"
print "data will be deleted (dropped) and lost."
print "Is", tableName, "the correct table name?"
if not confirm():
print "Please input a new table: "
tableName = raw_input()
print "Is", tableName, "the correct able name?"
if not confirm():
print "We must have a table name."
print ""
sys.exit()
print "The sqlite3 file will be named:", outfileName
print "Is this correct?"
if not confirm():
print "Please input the complete file and path to your sqlite3 db: "
outfileName = raw_input()
print "We will attempt to use the file at:", outfileName
print "Is this okay?"
if not confirm():
print "We need an output file."
print ""
sys.exit()
# TODO: choose a base table name, and inform the user that we will
# attempt to use this name as the table name in the database.
#
# TODO: prompt for okayness from the user, default yes
print ""
except IOError:
print "\033[1;31mFailed to open csv file:", sys.exc_info()[1],"\033[1;m"
print ""
sys.exit(1)
if __name__ == "__main__":
try:
if len(sys.argv) < 2:
print "Usage:"
print "python", sys.argv[0], "file2convert.csv"
sys.exit(1)
else:
# process the file
init(sys.argv[1])
reportFileStats()
computeSchema()
createTable()
# natural exit
sys.exit(0)
except SystemExit:
if csvFile:
# Make sure to close the file
csvFile.close()
print "Exiting program."
| mit | 2,478,496,740,230,532,000 | 33.847458 | 104 | 0.552246 | false | 4.339352 | false | false | false |
teoliphant/scipy | scipy/sparse/csgraph/tests/test_connected_components.py | 2 | 1443 | import numpy as np
from numpy.testing import assert_, assert_array_almost_equal
from scipy.sparse import csgraph
def test_weak_connections():
Xde = np.array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
Xsp = csgraph.csgraph_from_dense(Xde, null_value=0)
for X in Xsp, Xde:
n_components, labels =\
csgraph.connected_components(X, directed=True,
connection='weak')
assert_(n_components == 2)
assert_array_almost_equal(labels, [0, 0, 1])
def test_strong_connections():
X1de = np.array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
X2de = X1de + X1de.T
X1sp = csgraph.csgraph_from_dense(X1de, null_value=0)
X2sp = csgraph.csgraph_from_dense(X2de, null_value=0)
for X in X1sp, X1de:
n_components, labels =\
csgraph.connected_components(X, directed=True,
connection='strong')
assert_(n_components == 3)
labels.sort()
assert_array_almost_equal(labels, [0, 1, 2])
for X in X2sp, X2de:
n_components, labels =\
csgraph.connected_components(X, directed=True,
connection='strong')
assert_(n_components == 2)
labels.sort()
assert_array_almost_equal(labels, [0, 0, 1])
| bsd-3-clause | 2,786,668,272,811,028,500 | 29.702128 | 61 | 0.50797 | false | 3.625628 | false | false | false |
jpypi/fifar | input_data.py | 1 | 6035 | # Based on scripts at https://github.com/tensorflow/tensorflow/contrib/learn/python/learn/datasets/
'''Dataset utilities'''
import pickle
import collections
from os import path
from tensorflow.python.framework import dtypes
import numpy as np
Datasets = collections.namedtuple('Datasets', ['train', 'validation', 'test'])
def load_cifar10(data_path):
"""Load the CIFAR10 dataset.
Args:
data_path: string, path to the folder containing the cifar10 dataset
Returns:
Datasets tuple containing the train, validation, and test datasets
"""
train1 = unpickle(path.join(data_path, 'data_batch_1'))
train_data = train1[b'data']
train_target = dense_to_one_hot(train1[b'labels'], 10)
train2 = unpickle(path.join(data_path, 'data_batch_2'))
train_data = np.concatenate((train_data, train2[b'data']), axis=0)
train_target = np.concatenate((train_target, dense_to_one_hot(train2[b'labels'], 10)), axis=0)
train3 = unpickle(path.join(data_path, 'data_batch_3'))
train_data = np.concatenate((train_data, train3[b'data']), axis=0)
train_target = np.concatenate((train_target, dense_to_one_hot(train3[b'labels'], 10)), axis=0)
train_data = train_data.reshape(-1, 32*32*3)
train = DataSet(train_data, train_target)
validate1 = unpickle(path.join(data_path, 'data_batch_4'))
valid_data = validate1[b'data']
valid_target = dense_to_one_hot(validate1[b'labels'], 10)
valid_data = valid_data.reshape(-1, 32*32*3)
validation = DataSet(valid_data, valid_target)
test1 = unpickle(path.join(data_path, 'test_batch'))
test_data = test1[b'data']
test_target = dense_to_one_hot(test1[b'labels'], 10)
test_data = test_data.reshape(-1, 32*32*3)
test = DataSet(test_data, test_target)
return Datasets(train=train, validation=validation, test=test)
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
if type(labels_dense) != np.ndarray:
labels_dense = np.asarray(labels_dense)
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def unpickle(path):
with open(path, 'rb') as f:
return pickle.load(f, encoding='bytes')
#Dataset class taken shamelessly from tensorflow's MNIST tutorial files
class DataSet(object):
def __init__(self,
images,
labels,
dtype=dtypes.float32,
normalize=True,
reshape=True):
"""Construct a DataSet.
'dtype' can either be 'uint8' to leave the input as '[0, 255]', or 'float32'
to rescale into '[0, 1]'.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
if dtype not in (dtypes.uint8, dtypes.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
dtype)
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
self._num_examples = images.shape[0]
if dtype == dtypes.float32:
# Convert from [0, 255] -> [0.0, 1.0]
images = images.astype(np.float32)
images = np.multiply(images, 1.0 / 255.0)
if normalize:
images = self.preprocess(images)
# Convert shape from [num_examples, rows*columns*channels] to
# [num_examples, rows, columns, channels]
if reshape:
images = images.reshape(-1, 3, 32, 32).transpose(0,2,3,1)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def preprocess(self,images):
'''Normalize the data.'''
sub_mean = np.subtract(images, np.mean(images, axis=0))
div_std = np.divide(sub_mean, np.std(sub_mean, axis=0))
return div_std
def next_batch(self, batch_size, shuffle=True):
'''Return the next 'batch_size' examples from this data set.'''
start = self._index_in_epoch
#Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
perm0 = np.arange(self._num_examples)
np.random.shuffle(perm0)
self._images = self.images[perm0]
self._labels = self.labels[perm0]
#Go to the next epoch
if start + batch_size > self._num_examples:
#Finished Epoch
self._epochs_completed += 1
#Get ther est of examples in this epoch
rest_num_examples = self._num_examples - start
images_rest_part = self._images[start:self._num_examples]
labels_rest_part = self._labels[start:self._num_examples]
#Shuffle the data
if shuffle:
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._images = self.images[perm]
self._labels = self.labels[perm]
#Start next epoch
start = 0
self._index_in_epoch = batch_size - rest_num_examples
end = self._index_in_epoch
images_new_part = self._images[start:end]
labels_new_part = self._labels[start:end]
return np.concatenate((images_rest_part, images_new_part), axis=0), \
np.concatenate((labels_rest_part, labels_new_part), axis=0)
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
| mit | 8,557,386,500,900,142,000 | 33.683908 | 99 | 0.607125 | false | 3.631167 | true | false | false |
viaict/viaduct | app/forms/util.py | 1 | 8858 | import inspect
import itertools
from wtforms import RadioField, SubmitField, SelectFieldBase
from app.forms.fields import CustomFormSelectField, \
OrderedSelectMultipleField, OrderedQuerySelectMultipleField
class FieldTabGroup:
"""Represents a group of fields divided into tabs."""
def __init__(self, tabs):
"""Tabs should be a list of FieldTabs."""
self.type = self.__class__.__name__
self.tabs = tabs
# Don't allow empty tabs
if len(tabs) == 0:
raise ValueError('Tabs are empty')
# Check if all tabs are FieldTab
if not all(isinstance(t, FieldTab) for t in tabs):
raise ValueError('Tabs should all be instances of FieldTab')
# First field is used to determine the place of the tab group
self._firstfield = tabs[0].field_names[0]
# Make a list of all fieldnames
self._fieldnames = []
for tab in self.tabs:
self._fieldnames.extend(tab.field_names)
def _set_form(self, form):
"""
Pass the form to the FieldTabGroup.
Internal method used by FormWrapper.
"""
self.form = form
# Build a list of (tabname, fieldlist) tuples,
# where fieldlist contains the field objects itself,
# which is why the form object is required
self._tab_fields = []
for tab in self.tabs:
fields = []
for field_name in tab.field_names:
fields.append(getattr(form, field_name))
self._tab_fields.append((tab.name, fields))
def __iter__(self):
if not hasattr(self, 'form'):
raise ValueError('_set_form should be called before iterating')
return iter(self._tab_fields)
@property
def hex_id(self):
"""Get the id of the object as hexadecimals. (used for rendering)."""
return hex(id(self))[2:]
class FieldTab:
"""
Represents a tab containing fields.
To be used in combination with FieldTabGroup.
"""
def __init__(self, name, field_names):
if len(field_names) == 0:
raise ValueError('Fields are empty')
self.name = name
self.field_names = field_names
def __repr__(self):
return "<{} '{}'>".format(self.__class__.__name, self.name)
class FieldVerticalSplit:
"""
Vertical field splits.
Represents a vertical split of fields,
i.e. fields next to each other.
"""
def __init__(self, field_names, large_spacing=False):
"""
field_names should be a list of list of fields to be splitted.
For example,
[['X1', 'X2'], ['Y1', 'Y2']]
will render as:
[ X1 ] [ Y1 ]
[ X2 ] [ Y2 ]
"""
self.amount_splits = len(field_names)
self.type = self.__class__.__name__
# Allowed amounts of splits which all can be divided evenly
allowed_split_amounts = [2, 3, 4]
if self.amount_splits not in allowed_split_amounts:
raise ValueError("Amount of splits should be equal to one of: {}",
", ".join(map(str, allowed_split_amounts)))
self.field_names_list = field_names
# Make a list of all fieldnames (i.e. flatten the field_names list)
self._fieldnames = []
for fields in self.field_names_list:
self._fieldnames.extend(fields)
# First field is used to determine the place of the vertical split
self._firstfield = field_names[0][0]
if large_spacing:
if self.amount_splits == 2:
self.column_sizes = [5, 5]
self.spacing_sizes = [0, 2]
elif self.amount_splits == 3:
self.column_sizes = [3, 4, 3]
self.spacing_sizes = [0, 1, 1]
elif self.amount_splits == 4:
self.column_sizes = [2, 2, 2, 2]
self.spacing_sizes = [0, 1, 2, 1]
else:
self.column_sizes = [12 // self.amount_splits] * self.amount_splits
self.spacing_sizes = [0] * self.amount_splits
def _set_form(self, form):
"""
Pass the form to the FieldVerticalSplit.
Internal method used by FormWrapper.
"""
self.form = form
self._fields = []
for field_names in self.field_names_list:
fields = []
for field_name in field_names:
fields.append(getattr(form, field_name))
self._fields.append(fields)
def __iter__(self):
if not hasattr(self, 'form'):
raise ValueError('_set_form should be called before iterating')
return iter(self._fields)
class FormWrapper:
"""Helper class for form rendering."""
def __init__(self, form):
self.form = form
self.groups = []
self.vsplits = []
self.ordered_multiselect_fields = []
self.csrf_token = form.csrf_token
self.has_ordered_multiselect_fields = False
self.has_select_fields = False
self.has_custom_form_fields = False
self.has_submit_field = False
for attrname, obj in inspect.getmembers(form):
# Collect the tab groups in the form
if isinstance(obj, FieldTabGroup):
obj.name = attrname
self.groups.append(obj)
# Collect the vertical splits in the form
elif isinstance(obj, FieldVerticalSplit):
obj.name = attrname
self.vsplits.append(obj)
# Check if the form has select fields
elif isinstance(obj, SelectFieldBase) \
and not isinstance(obj, OrderedSelectMultipleField) \
and not isinstance(obj, OrderedQuerySelectMultipleField) \
and not isinstance(obj, RadioField):
self.has_select_fields = True
# Check if the form has ordered multi-select fields
elif isinstance(obj, OrderedSelectMultipleField) \
or isinstance(obj, OrderedQuerySelectMultipleField):
self.has_ordered_multiselect_fields = True
self.ordered_multiselect_fields.append(obj)
# Check if the form has custom form select fields
elif isinstance(obj, CustomFormSelectField):
self.has_select_fields = True
self.has_custom_form_fields = True
# Check if the form has a submit field
elif isinstance(obj, SubmitField):
self.has_submit_field = True
try:
# Dictionary from first field object of a tab group
# to the group object itself
groups_firstfields = {
getattr(form, g._firstfield): g
for g in self.groups
}
# List of all fields belonging to a group
groups_fields = list(map(
lambda f: getattr(form, f), itertools.chain(
*map(lambda g: g._fieldnames, self.groups))))
except TypeError:
raise TypeError('Group field should be a string')
try:
# Dictionary from first field object of a vertial split
# to the vertical split object itself
vsplits_firstfields = {
getattr(form, v._firstfield): v
for v in self.vsplits
}
# List of all fields belonging to a vertical split
vsplit_fields = list(map(
lambda f: getattr(form, f), itertools.chain(
*map(lambda v: v._fieldnames, self.vsplits))))
except TypeError:
raise TypeError('Vertical split field should be a string')
self._fields = []
ignore_fields = []
if hasattr(form, '_RenderIgnoreFields'):
ignore_fields = form._RenderIgnoreFields
for field in form:
# Add the group when the first field occurs in the field list
if field in groups_firstfields:
self._fields.append(groups_firstfields[field])
# Add the vertical split when the first field
# occurs in the field list
elif field in vsplits_firstfields:
self._fields.append(vsplits_firstfields[field])
# Otherwise, add a field when it does not belong to a group
elif (field not in groups_fields and
field not in vsplit_fields and
field.name not in ignore_fields):
self._fields.append(field)
# Give every group and vsplit the form object to make them
# iterable over their tabs/fields
for g in self.groups + self.vsplits:
g._set_form(form)
def __iter__(self):
return iter(self._fields)
| mit | 6,605,752,003,280,129,000 | 32.80916 | 79 | 0.566606 | false | 4.409159 | false | false | false |
ilogue/niprov | niprov/pictures.py | 1 | 1378 | from niprov.format import Format
import io, os
_CACHE = {}
class PictureCache(Format):
def __init__(self, dependencies):
cachedir = os.path.expanduser('~/.niprov-snapshots')
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
def new(self):
return io.BytesIO()
def keep(self, picture, for_):
imgId = for_.provenance['id']
if hasattr(picture, 'read'):
picture.seek(0)
bytes = picture.read()
else:
bytes = str(picture)
_CACHE[imgId] = bytes
def getBytes(self, for_):
imgId = for_.provenance['id']
if imgId in _CACHE:
return _CACHE[imgId]
return None
def getFilepath(self, for_):
return self.saveToDisk(for_)
def saveToDisk(self, for_):
imgId = for_.provenance['id']
fpath = os.path.expanduser('~/.niprov-snapshots/{}.png'.format(imgId))
if os.path.isfile(fpath):
return fpath
elif imgId in _CACHE:
with open(fpath, 'w') as picfile:
picfile.write(_CACHE[imgId])
return fpath
else:
return None
def serializeSingle(self, image):
"""Provides file path to picture of image.
This is part of the :class:`.Format` interface.
"""
return self.getFilepath(for_=image)
| bsd-3-clause | 6,516,384,278,792,297,000 | 25.5 | 78 | 0.555878 | false | 3.88169 | false | false | false |
jyundt/oval | migrations/versions/628b5fe65b72_rename_current_team_to_current_team_id.py | 1 | 1107 | """Rename current_team to current_team_id
Revision ID: 628b5fe65b72
Revises: a14e1ddd71e2
Create Date: 2016-04-21 11:06:27.786845
"""
# revision identifiers, used by Alembic.
revision = '628b5fe65b72'
down_revision = 'a14e1ddd71e2'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('racer', sa.Column('current_team_id', sa.Integer(), nullable=True))
op.drop_constraint(u'racer_current_team_fkey', 'racer', type_='foreignkey')
op.create_foreign_key(None, 'racer', 'team', ['current_team_id'], ['id'])
op.drop_column('racer', 'current_team')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('racer', sa.Column('current_team', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'racer', type_='foreignkey')
op.create_foreign_key(u'racer_current_team_fkey', 'racer', 'team', ['current_team'], ['id'])
op.drop_column('racer', 'current_team_id')
### end Alembic commands ###
| gpl-2.0 | -8,020,372,367,204,022,000 | 33.59375 | 103 | 0.679313 | false | 3.092179 | false | false | false |
PennNLP/SLURP | semantics/lexical_constants.py | 1 | 1815 | """Word-related constants used by semantics."""
ENTITY_ALIASES = {'me': 'Commander',
'i': 'Commander'}
# Primary verbnet senses for actions
SEARCH_ACTION = "search"
GO_ACTION = "go"
GET_ACTION = "retrieve"
FOLLOW_ACTION = "follow"
SEE_ACTION = "see"
TELL_ACTION = "tell"
BEGIN_ACTION = "begin"
ACTIVATE_ACTION = "activate"
DEACTIVATE_ACTION = "deactivate"
DEFUSE_ACTION = "defuse"
AVOID_ACTION = "avoid"
PATROL_ACTION = "patrol"
CARRY_ACTION = "carry"
STAY_ACTION = "stay"
# Mapping of other verbnet senses to the same actions.
# We include the identity entries just to make things easier on the talkback side
ACTION_ALIASES = {
'appear': GO_ACTION,
'get': GET_ACTION,
'obtain': GET_ACTION,
'meander': GO_ACTION,
'slide': GO_ACTION,
'nonvehicle': GO_ACTION,
'escape': GO_ACTION,
'rummage': SEARCH_ACTION,
'characterize': SEE_ACTION,
'chase': FOLLOW_ACTION,
'lodge': STAY_ACTION,
SEARCH_ACTION: SEARCH_ACTION,
GO_ACTION: GO_ACTION,
GET_ACTION: GET_ACTION,
FOLLOW_ACTION: FOLLOW_ACTION,
SEE_ACTION: SEE_ACTION,
TELL_ACTION: TELL_ACTION,
BEGIN_ACTION: BEGIN_ACTION,
ACTIVATE_ACTION: ACTIVATE_ACTION,
DEACTIVATE_ACTION: DEACTIVATE_ACTION,
AVOID_ACTION: AVOID_ACTION,
PATROL_ACTION: PATROL_ACTION,
CARRY_ACTION: CARRY_ACTION,
STAY_ACTION: STAY_ACTION,
DEFUSE_ACTION: DEFUSE_ACTION,
}
UNDERSTOOD_SENSES = set(ACTION_ALIASES.keys())
| gpl-3.0 | -1,162,972,557,801,783,600 | 33.903846 | 81 | 0.539394 | false | 3.681542 | false | false | false |
kpn-digital/py-timeexecution | time_execution/decorator.py | 1 | 2501 | """
Time Execution decorator
"""
import socket
import time
from fqn_decorators import Decorator
from fqn_decorators.asynchronous import AsyncDecorator
from pkgsettings import Settings
SHORT_HOSTNAME = socket.gethostname()
settings = Settings()
settings.configure(backends=[], hooks=[], duration_field="value")
def write_metric(name, **metric):
for backend in settings.backends:
backend.write(name, **metric)
def _apply_hooks(hooks, response, exception, metric, func, func_args, func_kwargs):
metadata = dict()
for hook in hooks:
hook_result = hook(
response=response,
exception=exception,
metric=metric,
func=func,
func_args=func_args,
func_kwargs=func_kwargs,
)
if hook_result:
metadata.update(hook_result)
return metadata
class time_execution(Decorator):
def __init__(self, func=None, **params):
self.start_time = None
super(time_execution, self).__init__(func, **params)
def before(self):
self.start_time = time.time()
def after(self):
duration = round(time.time() - self.start_time, 3) * 1000
metric = {"name": self.fqn, settings.duration_field: duration, "hostname": SHORT_HOSTNAME}
origin = getattr(settings, "origin", None)
if origin:
metric["origin"] = origin
hooks = self.params.get("extra_hooks", [])
disable_default_hooks = self.params.get("disable_default_hooks", False)
if not disable_default_hooks:
hooks = settings.hooks + hooks
# Apply the registered hooks, and collect the metadata they might
# return to be stored with the metrics
metadata = _apply_hooks(
hooks=hooks,
response=self.result,
exception=self.get_exception(),
metric=metric,
func=self.func,
func_args=self.args,
func_kwargs=self.kwargs,
)
metric.update(metadata)
write_metric(**metric)
def get_exception(self):
"""Retrieve the exception"""
if self.exc_info is None:
return
exc_type, exc_value, exc_tb = self.exc_info
if exc_value is None:
exc_value = exc_type()
if exc_value.__traceback__ is not exc_tb:
return exc_value.with_traceback(exc_tb)
return exc_value
class time_execution_async(AsyncDecorator, time_execution):
pass
| apache-2.0 | -1,143,102,933,210,368,000 | 26.483516 | 98 | 0.606158 | false | 4.140728 | false | false | false |
oxnz/algorithms | leetcode/SimplifyPath.py | 1 | 1845 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ===============================================================
#
# Filename: SimplifyPath.py
#
# Author: Oxnz
# Email: [email protected]
# Created: [2014-12-01 18:54:04 CST]
# Last-update: 2014-12-01 18:54:04 CST
# Description: ANCHOR
#
# Version: 0.0.1
# Revision: [None]
# Revision history: [None]
# Date Author Remarks: [None]
#
# License:
# Copyright (c) 2013 Oxnz
#
# Distributed under terms of the [LICENSE] license.
# [license]
#
# ===============================================================
#
class Solution:
# @param path, a string
# @return a string
def simplifyPath(self, path):
parts = path.split('/')
length = len(parts)
pathv = ['']
needsep = True
i = 0
while i < len(parts):
if parts[i] == '':
i += 1
elif parts[i] == '..':
i += 1
if len(pathv):
pathv.pop()
elif parts[i] == '.':
i += 1
else:
pathv.append(parts[i])
i += 1
path = '/'.join(pathv)
if path == '':
return '/'
elif path[0] != '/':
path = '/' + path
return path
import unittest
import os.path
class TestSolution(unittest.TestCase):
def setUp(self):
self._simpath = Solution().simplifyPath
def test_case(self):
for path in {
'/': '/',
'/../': '/',
'/home//foo': '/home/foo',
'/../../../': '/',
'/././../../././': '/',
'/a/./b///../c/../././../d/..//../e/./f/./g/././//.//h///././/..///': '/e/f/g',
}:
self.assertEqual(os.path.abspath(path), self._simpath(path))
if __name__ == '__main__':
unittest.main()
| mit | 1,549,973,420,774,528,500 | 23.276316 | 91 | 0.407588 | false | 3.527725 | true | false | false |
UKPLab/sentence-transformers | examples/training/quora_duplicate_questions/training_MultipleNegativesRankingLoss.py | 1 | 8798 | """
This scripts demonstrates how to train a sentence embedding model for Information Retrieval.
As dataset, we use Quora Duplicates Questions, where we have pairs of duplicate questions.
As loss function, we use MultipleNegativesRankingLoss. Here, we only need positive pairs, i.e., pairs of sentences/texts that are considered to be relevant. Our dataset looks like this (a_1, b_1), (a_2, b_2), ... with a_i / b_i a text and (a_i, b_i) are relevant (e.g. are duplicates).
MultipleNegativesRankingLoss takes a random subset of these, for example (a_1, b_1), ..., (a_n, b_n). a_i and b_i are considered to be relevant and should be close in vector space. All other b_j (for i != j) are negative examples and the distance between a_i and b_j should be maximized. Note: MultipleNegativesRankingLoss only works if a random b_j is likely not to be relevant for a_i. This is the case for our duplicate questions dataset: If a sample randomly b_j, it is unlikely to be a duplicate of a_i.
The model we get works well for duplicate questions mining and for duplicate questions information retrieval. For question pair classification, other losses (like OnlineConstrativeLoss) work better.
"""
from torch.utils.data import DataLoader
from sentence_transformers import losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import csv
import os
from zipfile import ZipFile
import random
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
logger = logging.getLogger(__name__)
#### /print debug information to stdout
#As base model, we use DistilBERT-base that was pre-trained on NLI and STSb data
model = SentenceTransformer('stsb-distilbert-base')
#Training for multiple epochs can be beneficial, as in each epoch a mini-batch is sampled differently
#hence, we get different negatives for each positive
num_epochs = 10
#Increasing the batch size improves the performance for MultipleNegativesRankingLoss. Choose it as large as possible
#I achieved the good results with a batch size of 300-350 (requires about 30 GB of GPU memory)
train_batch_size = 64
dataset_path = 'quora-IR-dataset'
model_save_path = 'output/training_MultipleNegativesRankingLoss-'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
os.makedirs(model_save_path, exist_ok=True)
# Check if the dataset exists. If not, download and extract
if not os.path.exists(dataset_path):
logger.info("Dataset not found. Download")
zip_save_path = 'quora-IR-dataset.zip'
util.http_get(url='https://sbert.net/datasets/quora-IR-dataset.zip', path=zip_save_path)
with ZipFile(zip_save_path, 'r') as zip:
zip.extractall(dataset_path)
######### Read train data ##########
train_samples = []
with open(os.path.join(dataset_path, "classification/train_pairs.tsv"), encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
if row['is_duplicate'] == '1':
train_samples.append(InputExample(texts=[row['question1'], row['question2']], label=1))
train_samples.append(InputExample(texts=[row['question2'], row['question1']], label=1)) #if A is a duplicate of B, then B is a duplicate of A
# After reading the train_samples, we create a DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.MultipleNegativesRankingLoss(model)
################### Development Evaluators ##################
# We add 3 evaluators, that evaluate the model on Duplicate Questions pair classification,
# Duplicate Questions Mining, and Duplicate Questions Information Retrieval
evaluators = []
###### Classification ######
# Given (quesiton1, question2), is this a duplicate or not?
# The evaluator will compute the embeddings for both questions and then compute
# a cosine similarity. If the similarity is above a threshold, we have a duplicate.
dev_sentences1 = []
dev_sentences2 = []
dev_labels = []
with open(os.path.join(dataset_path, "classification/dev_pairs.tsv"), encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
dev_sentences1.append(row['question1'])
dev_sentences2.append(row['question2'])
dev_labels.append(int(row['is_duplicate']))
binary_acc_evaluator = evaluation.BinaryClassificationEvaluator(dev_sentences1, dev_sentences2, dev_labels)
evaluators.append(binary_acc_evaluator)
###### Duplicate Questions Mining ######
# Given a large corpus of questions, identify all duplicates in that corpus.
# For faster processing, we limit the development corpus to only 10,000 sentences.
max_dev_samples = 10000
dev_sentences = {}
dev_duplicates = []
with open(os.path.join(dataset_path, "duplicate-mining/dev_corpus.tsv"), encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
dev_sentences[row['qid']] = row['question']
if len(dev_sentences) >= max_dev_samples:
break
with open(os.path.join(dataset_path, "duplicate-mining/dev_duplicates.tsv"), encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
if row['qid1'] in dev_sentences and row['qid2'] in dev_sentences:
dev_duplicates.append([row['qid1'], row['qid2']])
# The ParaphraseMiningEvaluator computes the cosine similarity between all sentences and
# extracts a list with the pairs that have the highest similarity. Given the duplicate
# information in dev_duplicates, it then computes and F1 score how well our duplicate mining worked
paraphrase_mining_evaluator = evaluation.ParaphraseMiningEvaluator(dev_sentences, dev_duplicates, name='dev')
evaluators.append(paraphrase_mining_evaluator)
###### Duplicate Questions Information Retrieval ######
# Given a question and a large corpus of thousands questions, find the most relevant (i.e. duplicate) question
# in that corpus.
# For faster processing, we limit the development corpus to only 10,000 sentences.
max_corpus_size = 10000
ir_queries = {} #Our queries (qid => question)
ir_needed_qids = set() #QIDs we need in the corpus
ir_corpus = {} #Our corpus (qid => question)
ir_relevant_docs = {} #Mapping of relevant documents for a given query (qid => set([relevant_question_ids])
with open(os.path.join(dataset_path, 'information-retrieval/dev-queries.tsv'), encoding='utf8') as fIn:
next(fIn) #Skip header
for line in fIn:
qid, query, duplicate_ids = line.strip().split('\t')
duplicate_ids = duplicate_ids.split(',')
ir_queries[qid] = query
ir_relevant_docs[qid] = set(duplicate_ids)
for qid in duplicate_ids:
ir_needed_qids.add(qid)
# First get all needed relevant documents (i.e., we must ensure, that the relevant questions are actually in the corpus
distraction_questions = {}
with open(os.path.join(dataset_path, 'information-retrieval/corpus.tsv'), encoding='utf8') as fIn:
next(fIn) #Skip header
for line in fIn:
qid, question = line.strip().split('\t')
if qid in ir_needed_qids:
ir_corpus[qid] = question
else:
distraction_questions[qid] = question
# Now, also add some irrelevant questions to fill our corpus
other_qid_list = list(distraction_questions.keys())
random.shuffle(other_qid_list)
for qid in other_qid_list[0:max(0, max_corpus_size-len(ir_corpus))]:
ir_corpus[qid] = distraction_questions[qid]
#Given queries, a corpus and a mapping with relevant documents, the InformationRetrievalEvaluator computes different IR
# metrices. For our use case MRR@k and Accuracy@k are relevant.
ir_evaluator = evaluation.InformationRetrievalEvaluator(ir_queries, ir_corpus, ir_relevant_docs)
evaluators.append(ir_evaluator)
# Create a SequentialEvaluator. This SequentialEvaluator runs all three evaluators in a sequential order.
# We optimize the model with respect to the score from the last evaluator (scores[-1])
seq_evaluator = evaluation.SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
logger.info("Evaluate model without training")
seq_evaluator(model, epoch=0, steps=0, output_path=model_save_path)
# Train the model
model.fit(train_objectives=[(train_dataloader, train_loss)],
evaluator=seq_evaluator,
epochs=num_epochs,
warmup_steps=1000,
output_path=model_save_path
)
| apache-2.0 | -5,897,717,325,137,696,000 | 45.062827 | 508 | 0.720732 | false | 3.608696 | false | false | false |
BD2KGenomics/slugflow | src/toil/test/src/autoDeploymentTest.py | 1 | 23946 | import logging
import subprocess
import time
from contextlib import contextmanager
from toil.lib.iterables import concat
from toil.test import ApplianceTestSupport, needs_local_appliance, needs_mesos, slow
from toil.version import exactPython
logger = logging.getLogger(__name__)
@needs_mesos
@needs_local_appliance
@slow
class AutoDeploymentTest(ApplianceTestSupport):
"""
Tests various auto-deployment scenarios. Using the appliance, i.e. a docker container,
for these tests allows for running worker processes on the same node as the leader process
while keeping their file systems separate from each other and the leader process. Separate
file systems are crucial to prove that auto-deployment does its job.
"""
def setUp(self):
logging.basicConfig(level=logging.INFO)
super(AutoDeploymentTest, self).setUp()
@contextmanager
def _venvApplianceCluster(self):
"""
Creates an appliance cluster with a virtualenv at './venv' on the leader and a temporary
directory on the host mounted at /data in the leader and worker containers.
"""
dataDirPath = self._createTempDir(purpose='data')
with self._applianceCluster(mounts={dataDirPath: '/data'}) as (leader, worker):
leader.runOnAppliance('virtualenv',
'--system-site-packages',
'--never-download', # prevent silent upgrades to pip etc
'--python', exactPython,
'venv')
leader.runOnAppliance('venv/bin/pip', 'list') # For diagnostic purposes
yield leader, worker
# TODO: Are we sure the python in the appliance we are testing is the same
# as the one we are testing from? If not, how can we get the version it is?
sitePackages = 'venv/lib/{}/site-packages'.format(exactPython)
def testRestart(self):
"""
Test whether auto-deployment works on restart.
"""
with self._venvApplianceCluster() as (leader, worker):
def userScript():
from toil.common import Toil
from toil.job import Job
# noinspection PyUnusedLocal
def job(job, disk='10M', cores=1, memory='10M'):
assert False
if __name__ == '__main__':
options = Job.Runner.getDefaultArgumentParser().parse_args()
with Toil(options) as toil:
if toil.config.restart:
toil.restart()
else:
toil.start(Job.wrapJobFn(job))
userScript = self._getScriptSource(userScript)
leader.deployScript(path=self.sitePackages,
packagePath='foo.bar',
script=userScript)
pythonArgs = ['venv/bin/python', '-m', 'foo.bar']
toilArgs = ['--logDebug',
'--batchSystem=mesos',
'--mesosMaster=localhost:5050',
'--defaultMemory=10M',
'/data/jobstore']
command = concat(pythonArgs, toilArgs)
self.assertRaises(subprocess.CalledProcessError, leader.runOnAppliance, *command)
# Deploy an updated version of the script ...
userScript = userScript.replace('assert False', 'assert True')
leader.deployScript(path=self.sitePackages,
packagePath='foo.bar',
script=userScript)
# ... and restart Toil.
command = concat(pythonArgs, '--restart', toilArgs)
leader.runOnAppliance(*command)
def testSplitRootPackages(self):
"""
Test whether auto-deployment works with a virtualenv in which jobs are defined in
completely separate branches of the package hierarchy. Initially, auto-deployment did
deploy the entire virtualenv but jobs could only be defined in one branch of the package
hierarchy. We define a branch as the maximum set of fully qualified package paths that
share the same first component. IOW, a.b and a.c are in the same branch, while a.b and
d.c are not.
"""
with self._venvApplianceCluster() as (leader, worker):
# Deploy the library module with job definitions
def libraryModule():
# noinspection PyUnusedLocal
def libraryJob(job):
open('/data/foo.txt', 'w').close()
leader.deployScript(path=self.sitePackages,
packagePath='toil_lib.foo',
script=libraryModule)
# Deploy the user script
def userScript():
# noinspection PyUnresolvedReferences
from toil_lib.foo import libraryJob
from toil.common import Toil
from toil.job import Job
# noinspection PyUnusedLocal
def job(job, disk='10M', cores=1, memory='10M'):
# Double the requirements to prevent chaining as chaining might hide problems
# in auto-deployment code.
job.addChildJobFn(libraryJob, disk='20M', cores=cores, memory=memory)
if __name__ == '__main__':
options = Job.Runner.getDefaultArgumentParser().parse_args()
with Toil(options) as toil:
if toil.config.restart:
toil.restart()
else:
toil.start(Job.wrapJobFn(job))
leader.deployScript(path=self.sitePackages,
packagePath='toil_script.bar',
script=userScript)
# Assert that output file isn't there
worker.runOnAppliance('test', '!', '-f', '/data/foo.txt')
# Just being paranoid
self.assertRaises(subprocess.CalledProcessError,
worker.runOnAppliance, 'test', '-f', '/data/foo.txt')
leader.runOnAppliance('venv/bin/python',
'-m', 'toil_script.bar',
'--logDebug',
'--batchSystem=mesos',
'--mesosMaster=localhost:5050',
'--defaultMemory=10M',
'/data/jobstore')
# Assert that out output file is there
worker.runOnAppliance('test', '-f', '/data/foo.txt')
def testUserTypesInJobFunctionArgs(self):
"""
Test encapsulated, function-wrapping jobs where the function arguments reference
user-defined types.
Mainly written to cover https://github.com/BD2KGenomics/toil/issues/1259 but then also
revealed https://github.com/BD2KGenomics/toil/issues/1278.
"""
with self._venvApplianceCluster() as (leader, worker):
def userScript():
from toil.common import Toil
from toil.job import Job
# A user-defined type, i.e. a type defined in the user script
class X(object):
pass
# noinspection PyUnusedLocal
def job(job, x, disk='10M', cores=1, memory='10M'):
return x
if __name__ == '__main__':
options = Job.Runner.getDefaultArgumentParser().parse_args()
x = X()
with Toil(options) as toil:
r = toil.start(Job.wrapJobFn(job, x).encapsulate())
# Assert that the return value is of type X, but not X from the __main__
# module but X from foo.bar, the canonical name for the user module. The
# translation from __main__ to foo.bar is a side effect of auto-deployment.
assert r.__class__ is not X
import foo.bar
assert r.__class__ is foo.bar.X
# Assert that a copy was made. This is a side effect of pickling/unpickling.
assert x is not r
userScript = self._getScriptSource(userScript)
leader.deployScript(path=self.sitePackages,
packagePath='foo.bar',
script=userScript)
leader.runOnAppliance('venv/bin/python', '-m', 'foo.bar',
'--logDebug',
'--batchSystem=mesos',
'--mesosMaster=localhost:5050',
'--defaultMemory=10M',
'--defaultDisk=10M',
'/data/jobstore')
def testDeferralWithConcurrentEncapsulation(self):
"""
Ensure that the following DAG succeeds:
┌───────────┐
│ Root (W1) │
└───────────┘
│
┌──────────┴─────────┐
▼ ▼
┌────────────────┐ ┌────────────────────┐
│ Deferring (W2) │ │ Encapsulating (W3) │═══════════════╗
└────────────────┘ └────────────────────┘ ║
│ ║
▼ ▼
┌───────────────────┐ ┌────────────────┐
│ Encapsulated (W3) │ │ Follow-on (W6) │
└───────────────────┘ └────────────────┘
│ │
┌───────┴────────┐ │
▼ ▼ ▼
┌──────────────┐ ┌──────────────┐ ┌──────────────┐
│ Dummy 1 (W4) │ │ Dummy 2 (W5) │ │ Last (W6) │
└──────────────┘ └──────────────┘ └──────────────┘
The Wn numbers denote the worker processes that a particular job is run in. `Deferring`
adds a deferred function and then runs for a long time. The deferred function will be
present in the cache state for the duration of `Deferred`. `Follow-on` is the generic Job
instance that's added by encapsulating a job. It runs on the same worker node but in a
separate worker process, as the first job in that worker. Because …
1) it is the first job in its worker process (the user script has not been made available
on the sys.path by a previous job in that worker) and
2) it shares the cache state with the `Deferring` job and
3) it is an instance of Job (and so does not introduce the user script to sys.path itself),
… it might cause problems with deserializing a defered function defined in the user script.
`Encapsulated` has two children to ensure that `Follow-on` is run in a separate worker.
"""
with self._venvApplianceCluster() as (leader, worker):
def userScript():
from toil.common import Toil
from toil.job import Job
def root(rootJob):
def nullFile():
return rootJob.fileStore.jobStore.importFile('file:///dev/null')
startFile = nullFile()
endFile = nullFile()
rootJob.addChildJobFn(deferring, startFile, endFile)
encapsulatedJob = Job.wrapJobFn(encapsulated, startFile)
encapsulatedJob.addChildFn(dummy)
encapsulatedJob.addChildFn(dummy)
encapsulatingJob = encapsulatedJob.encapsulate()
rootJob.addChild(encapsulatingJob)
encapsulatingJob.addChildJobFn(last, endFile)
def dummy():
pass
def deferred():
pass
# noinspection PyUnusedLocal
def deferring(job, startFile, endFile):
job.defer(deferred)
job.fileStore.jobStore.deleteFile(startFile)
timeout = time.time() + 10
while job.fileStore.jobStore.fileExists(endFile):
assert time.time() < timeout
time.sleep(1)
def encapsulated(job, startFile):
timeout = time.time() + 10
while job.fileStore.jobStore.fileExists(startFile):
assert time.time() < timeout
time.sleep(1)
def last(job, endFile):
job.fileStore.jobStore.deleteFile(endFile)
if __name__ == '__main__':
options = Job.Runner.getDefaultArgumentParser().parse_args()
with Toil(options) as toil:
rootJob = Job.wrapJobFn(root)
toil.start(rootJob)
userScript = self._getScriptSource(userScript)
leader.deployScript(path=self.sitePackages,
packagePath='foo.bar',
script=userScript)
leader.runOnAppliance('venv/bin/python', '-m', 'foo.bar',
'--logDebug',
'--batchSystem=mesos',
'--mesosMaster=localhost:5050',
'--retryCount=0',
'--defaultMemory=10M',
'--defaultDisk=10M',
'/data/jobstore')
def testDeferralWithFailureAndEncapsulation(self):
"""
Ensure that the following DAG succeeds:
┌───────────┐
│ Root (W1) │
└───────────┘
│
┌──────────┴─────────┐
▼ ▼
┌────────────────┐ ┌────────────────────┐
│ Deferring (W2) │ │ Encapsulating (W3) │═══════════════════════╗
└────────────────┘ └────────────────────┘ ║
│ ║
▼ ▼
┌───────────────────┐ ┌────────────────┐
│ Encapsulated (W3) │════════════╗ │ Follow-on (W7) │
└───────────────────┘ ║ └────────────────┘
│ ║
┌──────┴──────┐ ║
▼ ▼ ▼
┌────────────┐┌────────────┐ ┌──────────────┐
│ Dummy (W4) ││ Dummy (W5) │ │ Trigger (W6) │
└────────────┘└────────────┘ └──────────────┘
`Trigger` causes `Deferring` to crash. `Follow-on` runs next, detects `Deferring`'s
left-overs and runs the deferred function. `Follow-on` is an instance of `Job` and the
first job in its worker process. This test ensures that despite these circumstances,
the user script is loaded before the deferred functions defined in it are being run.
`Encapsulated` has two children to ensure that `Follow-on` is run in a new worker. That's
the only way to guarantee that the user script has not been loaded yet, which would cause
the test to succeed coincidentally. We want to test that auto-deploying and loading of the
user script are done properly *before* deferred functions are being run and before any
jobs have been executed by that worker.
"""
with self._venvApplianceCluster() as (leader, worker):
def userScript():
import os
import time
from toil.common import Toil
from toil.job import Job
from toil.leader import FailedJobsException
TIMEOUT = 10
def root(rootJob):
def nullFile():
return rootJob.fileStore.jobStore.importFile('file:///dev/null')
startFile = nullFile()
endFile = nullFile()
rootJob.addChildJobFn(deferring, startFile, endFile)
encapsulatedJob = Job.wrapJobFn(encapsulated, startFile)
encapsulatedJob.addChildFn(dummy)
encapsulatedJob.addChildFn(dummy)
encapsulatedJob.addFollowOnJobFn(trigger, endFile)
encapsulatingJob = encapsulatedJob.encapsulate()
rootJob.addChild(encapsulatingJob)
def dummy():
pass
def deferredFile(config):
"""
Return path to a file at the root of the job store, exploiting the fact that
the job store is shared between leader and worker container.
"""
prefix = 'file:'
locator = config.jobStore
assert locator.startswith(prefix)
return os.path.join(locator[len(prefix):], 'testDeferredFile')
def deferred(deferredFilePath):
"""
The deferred function that is supposed to run.
"""
os.unlink(deferredFilePath)
# noinspection PyUnusedLocal
def deferring(job, startFile, endFile):
"""
A job that adds the deferred function and then crashes once the `trigger` job
tells it to.
"""
job.defer(deferred, deferredFile(job._config))
jobStore = job.fileStore.jobStore
jobStore.deleteFile(startFile)
with jobStore.updateFileStream(endFile) as fH:
fH.write(str(os.getpid()))
timeout = time.time() + TIMEOUT
while jobStore.fileExists(endFile):
assert time.time() < timeout
time.sleep(1)
os.kill(os.getpid(), 9)
def encapsulated(job, startFile):
"""
A job that waits until the `deferring` job is running and waiting to be crashed.
"""
timeout = time.time() + TIMEOUT
while job.fileStore.jobStore.fileExists(startFile):
assert time.time() < timeout
time.sleep(1)
def trigger(job, endFile):
"""
A job that determines the PID of the worker running the `deferring` job,
tells the `deferring` job to crash and then waits for the corresponding
worker process to end. By waiting we can be sure that the `follow-on` job
finds the left-overs of the `deferring` job.
"""
import errno
jobStore = job.fileStore.jobStore
with jobStore.readFileStream(endFile) as fH:
pid = int(fH.read())
os.kill(pid, 0)
jobStore.deleteFile(endFile)
timeout = time.time() + TIMEOUT
while True:
try:
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.ESRCH:
break
else:
raise
else:
assert time.time() < timeout
time.sleep(1)
def tryUnlink(deferredFilePath):
try:
os.unlink(deferredFilePath)
except OSError as e:
if e.errno == errno.ENOENT:
pass
else:
raise
if __name__ == '__main__':
import errno
options = Job.Runner.getDefaultArgumentParser().parse_args()
with Toil(options) as toil:
deferredFilePath = deferredFile(toil.config)
open(deferredFilePath, 'w').close()
try:
assert os.path.exists(deferredFilePath)
try:
toil.start(Job.wrapJobFn(root))
except FailedJobsException as e:
assert e.numberOfFailedJobs == 2 # `root` and `deferring`
assert not os.path.exists(deferredFilePath), \
'Apparently, the deferred function did not run.'
else:
assert False, 'Workflow should not have succeeded.'
finally:
tryUnlink(deferredFilePath)
userScript = self._getScriptSource(userScript)
leader.deployScript(path=self.sitePackages,
packagePath='foo.bar',
script=userScript)
leader.runOnAppliance('venv/bin/python', '-m', 'foo.bar',
'--logDebug',
'--batchSystem=mesos',
'--mesosMaster=localhost:5050',
'--retryCount=0',
'--defaultMemory=10M',
'--defaultDisk=10M',
'/data/jobstore')
| apache-2.0 | 5,329,596,328,372,900,000 | 45.396694 | 100 | 0.462326 | false | 4.736553 | true | false | false |
pjgeng/Contour-Labels | create_contour_labels.py | 1 | 10693 | ##Create Contour Labels=name
##input_contours=vector
##input_label_guides=vector
##output_contours=output vector
##output_labels=output vector
##create_clipped_contours=boolean True
##smooth_contours=boolean False
##invert_labels=boolean False
##index_contour_modal=number 25
##contour_step=number 5
##start_buffer=number 20
##buffer_increment=number 10
##elevation_field_name=String elev
import math
import qgis
from qgis.core import *
from PyQt4.QtCore import *
def calcDist(p1x,p1y,p2x,p2y):
dist = math.sqrt((p2x - p1x)**2 + (p2y - p1y)**2)
return dist
version = qgis.utils.QGis.QGIS_VERSION.split('-')[0].split('.',2)
progress.setText("Running Contour Label creation for QGIS version "+str(qgis.utils.QGis.QGIS_VERSION.split('-')[0]))
if (smooth_contours):
progress.setText("Smoothing contours")
outputs_GRASSGENERALIZE_1=processing.runalg('grass7:v.generalize',input_contours,9,20,7,50,0.5,3,0,0,0,1,1,1,False,True,None,-1,0.0001,0,None)
use_contours=outputs_GRASSGENERALIZE_1['output']
else:
progress.setText("Using existing contours")
use_contours=input_contours
progress.setText("Creating contour intersections")
outputs_QGISLINEINTERSECTIONS_1=processing.runalg('qgis:lineintersections',use_contours,input_label_guides,'ID','id',None)
progress.setText("Processing elevations")
outputs_QGISJOINATTRIBUTESTABLE_1=processing.runalg('qgis:joinattributestable', outputs_QGISLINEINTERSECTIONS_1['OUTPUT'],input_contours,'ID','ID',None)
outputs_QGISFIELDCALCULATOR_10=processing.runalg('qgis:fieldcalculator', outputs_QGISJOINATTRIBUTESTABLE_1['OUTPUT_LAYER'],'elevation',1,1.0,0.0,True,'"'+str(elevation_field_name)+'"',None)
outputs_QGISDELETECOLUMN_1=processing.runalg('qgis:deletecolumn',outputs_QGISFIELDCALCULATOR_10['OUTPUT_LAYER'],str(elevation_field_name),None)
outputs_QGISFIELDCALCULATOR_11=processing.runalg('qgis:fieldcalculator', outputs_QGISDELETECOLUMN_1['OUTPUT'],'elev',1,1.0,0.0,True,'"elevation"',None)
outputs_QGISDELETECOLUMN_2=processing.runalg('qgis:deletecolumn',outputs_QGISFIELDCALCULATOR_11['OUTPUT_LAYER'],'elevation',None)
outputs_QGISDELETECOLUMN_3=processing.runalg('qgis:deletecolumn',outputs_QGISDELETECOLUMN_2['OUTPUT'],'ID_2',None)
outputs_QGISFIELDCALCULATOR_7=processing.runalg('qgis:fieldcalculator', outputs_QGISDELETECOLUMN_3['OUTPUT'],'key',2,128.0,0.0,True,'concat("id_1",\'_\',"elev")',None)
progress.setText("Determining index contours")
outputs_QGISFIELDCALCULATOR_1=processing.runalg('qgis:fieldcalculator', outputs_QGISFIELDCALCULATOR_7['OUTPUT_LAYER'],'index',1,1.0,0.0,True,'"elev" % '+str(index_contour_modal)+' = 0',None)
progress.setText("Calculating label rotation")
outputs_QGISFIELDCALCULATOR_12=processing.runalg('qgis:fieldcalculator', outputs_QGISFIELDCALCULATOR_1['OUTPUT_LAYER'],'rot',0,6.0,3.0,True,'0',None)
outputs_QGISFIXEDDISTANCEBUFFER_3=processing.runalg('qgis:fixeddistancebuffer', outputs_QGISFIELDCALCULATOR_1['OUTPUT_LAYER'],2.0,5.0,False,None)
outputs_QGISINTERSECTION_2=processing.runalg('qgis:intersection', use_contours,outputs_QGISFIXEDDISTANCEBUFFER_3['OUTPUT'],None)
outputs_QGISFIELDCALCULATOR_2=processing.runalg('qgis:fieldcalculator', outputs_QGISINTERSECTION_2['OUTPUT'],'sint',2,128.0,0.0,True,'geom_to_wkt(start_point($geometry))',None)
outputs_QGISFIELDCALCULATOR_3=processing.runalg('qgis:fieldcalculator', outputs_QGISFIELDCALCULATOR_2['OUTPUT_LAYER'],'eint',2,128.0,0.0,True,'geom_to_wkt(end_point($geometry))',None)
if (invert_labels):
deg = 270
else:
deg = 90
outputs_QGISFIELDCALCULATOR_5=processing.runalg('qgis:fieldcalculator', outputs_QGISFIELDCALCULATOR_3['OUTPUT_LAYER'],'rot',0,6.0,3.0,True,str(deg)+'-((atan((x(geom_from_wkt("sint"))-x(geom_from_wkt("eint")))/(y(geom_from_wkt("sint"))-y(geom_from_wkt("eint")))))*180/3.14159+(180*(((y(geom_from_wkt("sint"))-y(geom_from_wkt("eint")))<0)+(((x(geom_from_wkt("sint"))-x(geom_from_wkt("eint")))<0 AND (y(geom_from_wkt("sint"))-y(geom_from_wkt("eint")))>0)*2))))',None)
progress.setText("Determining contours to label")
rlayer = QgsVectorLayer(outputs_QGISFIELDCALCULATOR_5['OUTPUT_LAYER'], 'rlayer', 'ogr')
tlayer = QgsVectorLayer(outputs_QGISFIELDCALCULATOR_12['OUTPUT_LAYER'], 'tlayer', 'ogr')
dshort =start_buffer
dmid =start_buffer*2
dlong = start_buffer*3
if not tlayer.isValid():
progress.setText("Layer failed to load!")
exit(0)
if not rlayer.isValid():
progress.setText("Layer failed to load!")
exit(0)
tlayer.dataProvider().addAttributes([QgsField("label", QVariant.Int)])
tlayer.updateFields()
new_field_index = tlayer.fieldNameIndex('label')
rot_index = tlayer.fieldNameIndex('rot')
tlayer.startEditing()
for f in processing.features(tlayer):
tlayer.changeAttributeValue(f.id(), new_field_index, 0)
for t in processing.features(rlayer):
if (f['key'] == t['key']):
tlayer.changeAttributeValue(f.id(), rot_index, t['rot'])
tlayer.commitChanges()
tlayer.startEditing()
for f in processing.features(tlayer):
t = None
for t in processing.features(tlayer):
if (t['key'] == str(f['id_1'])+'_'+str(f['elev']+contour_step)):
fup = t
break
else:
fup = -99
t = None
for t in processing.features(tlayer):
if (t['key'] == str(f['id_1'])+'_'+str(f['elev']-contour_step)):
fdown = t
break
else:
fdown = -99
change = 0
if (f['index'] == 1):
change = 1
else:
if (fdown != -99):
distd = calcDist(f.geometry().asPoint().x(),f.geometry().asPoint().y(),fdown.geometry().asPoint().x(),fdown.geometry().asPoint().y())
fdl = fdown['label']
fdi = fdown['index']
else:
distd = 0
fdl = 0
fdi = 0
if (fup != -99):
distu = calcDist(f.geometry().asPoint().x(),f.geometry().asPoint().y(),fup.geometry().asPoint().x(),fup.geometry().asPoint().y())
ful = fup['label']
fui = fup['index']
else:
distu = 0
ful = 0
fui = 0
if ((distu >= dlong and distd >= dlong) or (distu >= dlong and fdown == -99) or (distd >= dlong and fup == -99)):
change = 1
elif ((distu >= dmid and fui == 0 and distd >= dmid and fdi == 0) or (distu >= dmid and fui == 0 and fdown == -99) or (distd >= dmid and fdi == 0 and fup == -99)):
change = 1
tlayer.changeAttributeValue(f.id(), new_field_index, change)
tlayer.commitChanges()
tlayer.startEditing()
for f in processing.features(tlayer):
t = None
for t in processing.features(tlayer):
if (t['key'] == str(f['id_1'])+'_'+str(f['elev']+contour_step)):
fup = t
break
else:
fup = -99
t = None
for t in processing.features(tlayer):
if (t['key'] == str(f['id_1'])+'_'+str(f['elev']-contour_step)):
fdown = t
break
else:
fdown = -99
if (f['label'] == 1):
continue
else:
change = 0
if (fdown != -99):
distd = calcDist(f.geometry().asPoint().x(),f.geometry().asPoint().y(),fdown.geometry().asPoint().x(),fdown.geometry().asPoint().y())
fdl = fdown['label']
fdi = fdown['index']
else:
distd = 0
fdl = 0
fdi = 0
if (fup != -99):
distu = calcDist(f.geometry().asPoint().x(),f.geometry().asPoint().y(),fup.geometry().asPoint().x(),fup.geometry().asPoint().y())
ful = fup['label']
fui = fup['index']
else:
distu = 0
ful = 0
fui = 0
if (distu > dshort and ful == 0 and distd > dshort and fdl == 0):
change = 1
elif (distu > dshort and ful == 0 and distd >= dlong):
change = 1
elif (distd > dshort and fdl == 0 and distu >= dlong):
change = 1
tlayer.changeAttributeValue(f.id(), new_field_index, change)
tlayer.commitChanges()
outputs_QGISFIELDCALCULATOR_8=processing.runalg('qgis:fieldcalculator', outputs_QGISFIELDCALCULATOR_12['OUTPUT_LAYER'],'buffer',1,3.0,0.0,True,'('+str(start_buffer)+' + ((length(to_string( "elev"))-1) * '+str(buffer_increment)+'))',None)
if (create_clipped_contours):
progress.setText("Creating clipped contours")
outputs_QGISEXTRACTBYATTRIBUTE_1=processing.runalg('qgis:extractbyattribute', outputs_QGISFIELDCALCULATOR_8['OUTPUT_LAYER'],'label',0,'1',None)
outputs_QGISFIXEDDISTANCEBUFFER_1=processing.runalg('qgis:fixeddistancebuffer', outputs_QGISEXTRACTBYATTRIBUTE_1['OUTPUT'],2.0,5.0,False,None)
outputs_QGISVARIABLEDISTANCEBUFFER_1=processing.runalg('qgis:variabledistancebuffer', outputs_QGISEXTRACTBYATTRIBUTE_1['OUTPUT'],'buffer',5.0,False,None)
outputs_QGISINTERSECTION_1=processing.runalg('qgis:intersection', use_contours,outputs_QGISVARIABLEDISTANCEBUFFER_1['OUTPUT'],None)
outputs_QGISMULTIPARTTOSINGLEPARTS_1=processing.runalg('qgis:multiparttosingleparts', outputs_QGISINTERSECTION_1['OUTPUT'],None)
if (int(version[0]) == 2 and int(version[1]) == 14):
outputs_QGISEXTRACTBYLOCATION_1=processing.runalg('qgis:extractbylocation', outputs_QGISMULTIPARTTOSINGLEPARTS_1['OUTPUT'],outputs_QGISFIXEDDISTANCEBUFFER_1['OUTPUT'],['intersects','crosses'],None)
elif (int(version[0]) == 2 and int(version[1]) == 16):
outputs_QGISEXTRACTBYLOCATION_1=processing.runalg('qgis:extractbylocation', outputs_QGISMULTIPARTTOSINGLEPARTS_1['OUTPUT'],outputs_QGISFIXEDDISTANCEBUFFER_1['OUTPUT'],['intersects','crosses'],1.0,None)
outputs_QGISFIXEDDISTANCEBUFFER_2=processing.runalg('qgis:fixeddistancebuffer', outputs_QGISEXTRACTBYLOCATION_1['OUTPUT'],2.0,5.0,False,None)
progress.setText("Returning final clipped contours")
if (int(version[0]) == 2 and int(version[1]) == 14):
outputs_QGISDIFFERENCE_1=processing.runalg('qgis:difference',use_contours,outputs_QGISFIXEDDISTANCEBUFFER_2['OUTPUT'],output_contours)
elif (int(version[0]) == 2 and int(version[1]) == 16):
outputs_QGISDIFFERENCE_1=processing.runalg('qgis:difference',use_contours,outputs_QGISFIXEDDISTANCEBUFFER_2['OUTPUT'],False,output_contours)
else:
output_contours = input_contours
progress.setText("Cleaning output layers.")
progress.setText("Returning labels")
outputs_QGISDELETECOLUMN_4=processing.runalg('qgis:deletecolumn',outputs_QGISFIELDCALCULATOR_8['OUTPUT_LAYER'],'buffer',None)
outputs_QGISDELETECOLUMN_5=processing.runalg('qgis:deletecolumn',outputs_QGISDELETECOLUMN_4['OUTPUT'],'ID',None)
outputs_QGISDELETECOLUMN_6=processing.runalg('qgis:deletecolumn',outputs_QGISDELETECOLUMN_5['OUTPUT'],'ID_1',output_labels)
progress.setText("All done.")
| mit | -4,487,152,315,475,190,000 | 56.181818 | 464 | 0.678668 | false | 3.145 | false | false | false |
cmr/cmr_rrt | src/tree.py | 1 | 1500 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# simple tree with parent-pointers and data associated with each node and
# edge.
class Node(object):
def __init__(self, data=None, parent=None):
self.data = data
self.children = []
self.parent = None
self.edge_data = None
def add_child(self, child, edge_data=None):
if child not in self.children:
child.parent = self
child.edge_data = edge_data
self.children.append(child)
def detach(self):
try:
idx = self.parent.children.remove(self)
except ValueError as e:
print "Help!! My parent doesn't think I am its child :("
raise e
self.parent = None
self.edge_data = None
def find_root(self):
cur = self.parent
while cur.parent is not None:
cur = cur.parent
return cur
| gpl-3.0 | -3,372,609,052,861,359,000 | 33.090909 | 73 | 0.650667 | false | 4.155125 | false | false | false |
mattjhayes/nmeta2 | nmeta2/nmeta2.py | 1 | 29505 | #!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is the main module of the nmeta2 suite
running on top of the Ryu SDN controller to provide network identity
and flow (traffic classification) metadata.
.
It supports OpenFlow v1.3 switches and Data Path Auxiliary Engines
(DPAE)
.
Do not use this code for production deployments - it is proof
of concept code and carries no warrantee whatsoever.
.
You have been warned.
"""
#*** Logging Imports:
import logging
#import coloredlogs
#*** General Imports:
import sys
import time
#*** mongodb Database Import:
from pymongo import MongoClient
#*** Ryu Imports:
from ryu import utils
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import HANDSHAKE_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4, ipv6
from ryu.lib.packet import tcp
#*** Required for api module context:
from ryu.app.wsgi import WSGIApplication
#*** nmeta imports:
import config
import switch_abstraction
import api
import main_policy
import of_error_decode
#*** JSON imports:
import json
from json import JSONEncoder
#*** Universal Unique Identifier:
from uuid import UUID
class Nmeta(app_manager.RyuApp):
"""
This is the main class of nmeta2 and is run by Ryu
"""
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
#*** Used to call api module:
_CONTEXTS = {'wsgi': WSGIApplication}
def __init__(self, *args, **kwargs):
super(Nmeta, self).__init__(*args, **kwargs)
#*** Version number for compatibility checks:
self.version = '0.3.5'
#*** Instantiate config class which imports configuration file
#*** config.yaml and provides access to keys/values:
self.config = config.Config()
#*** Get logging config values from config class:
_logging_level_s = self.config.get_value \
('nmeta_logging_level_s')
_logging_level_c = self.config.get_value \
('nmeta_logging_level_c')
_syslog_enabled = self.config.get_value('syslog_enabled')
_loghost = self.config.get_value('loghost')
_logport = self.config.get_value('logport')
_logfacility = self.config.get_value('logfacility')
_syslog_format = self.config.get_value('syslog_format')
_console_log_enabled = self.config.get_value('console_log_enabled')
_coloredlogs_enabled = self.config.get_value('coloredlogs_enabled')
_console_format = self.config.get_value('console_format')
#*** Set up Logging:
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
#*** Syslog:
if _syslog_enabled:
#*** Log to syslog on host specified in config.yaml:
self.syslog_handler = logging.handlers.SysLogHandler(address=(
_loghost, _logport),
facility=_logfacility)
syslog_formatter = logging.Formatter(_syslog_format)
self.syslog_handler.setFormatter(syslog_formatter)
self.syslog_handler.setLevel(_logging_level_s)
#*** Add syslog log handler to logger:
self.logger.addHandler(self.syslog_handler)
#*** Console logging:
if _console_log_enabled:
#*** Log to the console:
self.console_handler = logging.StreamHandler()
console_formatter = logging.Formatter(_console_format)
self.console_handler.setFormatter(console_formatter)
self.console_handler.setLevel(_logging_level_c)
#*** Add console log handler to logger:
self.logger.addHandler(self.console_handler)
#*** Set a variable to indicate if either or both levels are
#*** at debug:
if _logging_level_s == 'DEBUG' or _logging_level_c == 'DEBUG':
self.debug_on = True
else:
self.debug_on = False
#*** Set up variables:
#*** Get max bytes of new flow packets to send to controller from
#*** config file:
self.miss_send_len = self.config.get_value("miss_send_len")
if self.miss_send_len < 1500:
self.logger.info("Be aware that setting "
"miss_send_len to less than a full size packet "
"may result in errors due to truncation. "
"Configured value is %s bytes",
self.miss_send_len)
#*** Load the Flow Table ID numbers:
self.ft_iig = self.config.get_value("ft_iig")
self.ft_iim = self.config.get_value("ft_iim")
self.ft_tc = self.config.get_value("ft_tc")
self.ft_tt = self.config.get_value("ft_tt")
self.ft_fwd = self.config.get_value("ft_fwd")
#*** Context Configuration:
self.context_default = self.config.get_value("context_default")
#*** DPAE Registration Parameters:
self.dpae2ctrl_mac = self.config.get_value("dpae2ctrl_mac")
self.ctrl2dpae_mac = self.config.get_value("ctrl2dpae_mac")
self.dpae_ethertype = self.config.get_value("dpae_ethertype")
#*** Tell switch how to handle fragments (see OpenFlow spec):
self.ofpc_frag = self.config.get_value("ofpc_frag")
#*** Update JSON to support UUID encoding:
JSONEncoder_olddefault = JSONEncoder.default
def JSONEncoder_newdefault(self, o):
if isinstance(o, UUID):
return str(o)
return JSONEncoder_olddefault(self, o)
JSONEncoder.default = JSONEncoder_newdefault
#*** Instantiate Module Classes:
self.switches = switch_abstraction.Switches(self, self.config)
wsgi = kwargs['wsgi']
self.api = api.Api(self, self.config, wsgi)
self.main_policy = main_policy.MainPolicy(self.config)
#*** Start mongodb:
self.logger.info("Connecting to mongodb database...")
self.mongo_addr = self.config.get_value("mongo_addr")
self.mongo_port = self.config.get_value("mongo_port")
mongo_client = MongoClient(self.mongo_addr, self.mongo_port)
#*** Connect to specific databases and collections in mongodb:
#*** ID Service database:
db_svc = mongo_client.idsvc_database
self.dbidsvc = db_svc.idsvc
#*** ID Node database:
db_node = mongo_client.idnode_database
self.dbidnode = db_svc.idnode
#*** ID IP database:
db_ip = mongo_client.idip_database
self.dbidip = db_svc.idip
#*** ID MAC database (with a connection test var):
db_mac = mongo_client.mac_database
self.dbidmac = db_mac.idmac
dbtest = db_mac.cxntest
#*** DPAE database:
db_dpae = mongo_client.dpae_database
self.dbdpae = db_dpae.dpae
#*** Test a Database Connection:
try:
dbtest.delete_many({})
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.logger.critical("Fatal. Mongodb connection failed. "
"Exception %s, %s, %s. Check that database"
" is running and nmeta config file has correct mongodb "
"connection parameters",
exc_type, exc_value, exc_traceback)
sys.exit()
test_data = {"testing": "1,2,3"}
test_id = dbtest.insert_one(test_data).inserted_id
result = dbtest.find(test_data).count()
if result == 1:
self.logger.info("Success! Connected to mongodb database")
else:
self.logger.critical("Fatal. Mongodb test failed"
"database addr mongo_addr=%s mongo_port=%s. Check that database"
" is running and nmeta config file has correct mongodb "
"connection parameters", self.mongo_addr, self.mongo_port)
sys.exit()
#*** ID Service database - delete all previous entries:
result = self.dbidsvc.delete_many({})
self.logger.info("Initialising ID Service database, Deleted %s "
"previous entries from dbidsvc", result.deleted_count)
#*** ID Node database - delete all previous entries:
result = self.dbidnode.delete_many({})
self.logger.info("Initialising ID Node database, Deleted %s previous "
"entries from dbidnode", result.deleted_count)
#*** ID IP database - delete all previous entries:
result = self.dbidip.delete_many({})
self.logger.info("Initialising ID IP database, Deleted %s previous "
"entries from dbidip", result.deleted_count)
#*** ID MAC database - delete all previous entries:
result = self.dbidmac.delete_many({})
self.logger.info("Initialising ID MAC database, Deleted %s previous "
"entries from dbidmac", result.deleted_count)
#*** DPAE database - delete all previous entries:
result = self.dbdpae.delete_many({})
self.logger.info("Initialising DPAE database, Deleted %s previous "
"entries from dbdpae", result.deleted_count)
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_connection_handler(self, ev):
"""
A switch has connected to the SDN controller.
We need to do some tasks to set the switch up properly:
- Instantiate a class to represent the switch and flow tables
- Delete all existing flow entries
- Set config for fragment handling and table miss packet length
- Set up initial flow entries in flow tables
- Install non-DPAE TC flows from optimised policy to switch
- Request the switch send us its description
Supported OpenFlow versions is controlled by the OFP_VERSIONS
constant set in class base.
"""
datapath = ev.msg.datapath
self.logger.info("In switch_connection_handler dpid=%s", datapath.id)
#*** Add switch to our class abstraction:
self.switches.add(datapath)
switch = self.switches[datapath.id]
#*** Delete all existing flows from the switch:
switch.flowtables.delete_all_flows()
#*** Set the configuration on the switch:
switch.set_switch_config(self.ofpc_frag, self.miss_send_len)
#*** Set up switch flow table basics:
switch.flowtables.add_fe_iig_broadcast()
switch.flowtables.add_fe_iig_miss()
switch.flowtables.add_fe_iim_miss()
switch.flowtables.add_fe_tcf_accepts()
switch.flowtables.add_fe_tcf_miss()
switch.flowtables.add_fe_tc_miss()
switch.flowtables.add_fe_amf_miss()
switch.flowtables.add_fe_tt_miss()
switch.flowtables.add_fe_fwd_miss()
#*** Set flow entry for DPAE join packets:
switch.flowtables.add_fe_iim_dpae_join()
#*** Install non-DPAE static TC flows from optimised policy to switch:
switch.flowtables.add_fe_tc_static \
(self.main_policy.optimised_rules.get_rules())
#*** Request the switch send us it's description:
switch.request_switch_desc()
@set_ev_cls(ofp_event.EventOFPDescStatsReply, MAIN_DISPATCHER)
def desc_stats_reply_handler(self, ev):
"""
Receive a reply from a switch to a description
statistics request
"""
body = ev.msg.body
datapath = ev.msg.datapath
dpid = datapath.id
self.logger.info('event=DescStats Switch dpid=%s is mfr_desc="%s" '
'hw_desc="%s" sw_desc="%s" serial_num="%s" dp_desc="%s"',
dpid, body.mfr_desc, body.hw_desc, body.sw_desc,
body.serial_num, body.dp_desc)
@set_ev_cls(ofp_event.EventOFPFlowRemoved, MAIN_DISPATCHER)
def flow_removed_handler(self, ev):
"""
A switch has sent an event to us because it has removed
a flow from a flow table
"""
msg = ev.msg
datapath = msg.datapath
ofp = datapath.ofproto
if msg.reason == ofp.OFPRR_IDLE_TIMEOUT:
reason = 'IDLE TIMEOUT'
elif msg.reason == ofp.OFPRR_HARD_TIMEOUT:
reason = 'HARD TIMEOUT'
elif msg.reason == ofp.OFPRR_DELETE:
reason = 'DELETE'
elif msg.reason == ofp.OFPRR_GROUP_DELETE:
reason = 'GROUP DELETE'
else:
reason = 'unknown'
self.logger.info('Flow removed msg '
'cookie=%d priority=%d reason=%s table_id=%d '
'duration_sec=%d '
'idle_timeout=%d hard_timeout=%d '
'packets=%d bytes=%d match=%s',
msg.cookie, msg.priority, reason, msg.table_id,
msg.duration_sec,
msg.idle_timeout, msg.hard_timeout,
msg.packet_count, msg.byte_count, msg.match)
# Is it a MAC learning suppression FE idle timeout?
if msg.table_id == self.ft_iim and \
msg.reason == ofp.OFPRR_IDLE_TIMEOUT:
switch = self.switches[datapath.id]
#*** Extract the MAC from the match:
mac = msg.match['eth_src']
in_port = msg.match['in_port']
#*** TBD, deal with context:
context = self.context_default
#*** Call method to delete FEs:
switch.mactable.delete(mac, in_port, context)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
"""
A switch has sent us a Packet In event
"""
msg = ev.msg
datapath = msg.datapath
ofproto = msg.datapath.ofproto
dpid = datapath.id
switch = self.switches[dpid]
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
#*** TBD, deal with context:
context = self.context_default
#*** Extra debug if syslog or console logging set to DEBUG:
if self.debug_on:
self._packet_in_debug(ev, in_port)
#*** Is it a DPAE Join request? If so, call function to handle it:
if eth.src == self.ctrl2dpae_mac and eth.dst == self.dpae2ctrl_mac:
self.dpae_join(pkt, datapath, in_port)
return 1
self.logger.info("Learned mac=%s dpid=%s port=%s",
eth.src, dpid, in_port)
#*** Add to MAC/port pair to switch MAC table:
switch.mactable.add(eth.src, in_port, context)
#*** In active mode with a DPAE, we need to add an AMF flow entry:
if self.main_policy.tc_policies.mode == 'active':
#*** Look the DPID up in the database:
db_result = self.dbdpae.find_one({'dpid': dpid})
if db_result:
self.logger.info("Found DPAE for dpid=%s, adding AMF entry",
dpid)
#*** Get the dpae port for that switch:
#*** TBD, handle more than one DPAE per switch
dpae_port = db_result['switch_port']
if dpae_port:
#*** Add FE to the Active Mode Filter (ft_amf) Flow table:
self.logger.info("Adding AMF entry dpid=%s dpae_port=%s "
"mac=%s", dpid, dpae_port, eth.src)
switch.flowtables.add_fe_amf_macport_dst(dpae_port,
eth.src)
else:
self.logger.error("No DPAE switch port for dpid=%s", dpid)
else:
self.logger.debug("No DPAE found for dpid=%s", dpid)
#*** Add source MAC / in port to Forwarding table as destinations so
#*** that we don't flood them:
switch.flowtables.add_fe_fwd_macport_dst(in_port, eth.src)
#*** Add source MAC / in port to Identity Indicator (MAC) table so
#*** that we don't get further packet in events for this combo:
switch.flowtables.add_fe_iim_macport_src(in_port, eth.src)
#*** Do a packet out to avoid going through DPAE in active mode
#*** which causes bad MAC learning in adjacent switches
#*** if forwarding entry not installed:
# Send out specific port if known or flood:
out_port = switch.mactable.mac2port(eth.dst, context)
if out_port == switch_abstraction.PORT_NOT_FOUND:
out_port = ofproto.OFPP_FLOOD
#*** Packet out:
switch.packet_out(msg.data, in_port, out_port, 0, 1)
@set_ev_cls(ofp_event.EventOFPErrorMsg,
[HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def error_msg_handler(self, ev):
"""
A switch has sent us an error event
"""
msg = ev.msg
datapath = msg.datapath
dpid = datapath.id
self.logger.error('event=OFPErrorMsg_received: dpid=%s '
'type=%s code=%s message=%s',
dpid, msg.type, msg.code, utils.hex_array(msg.data))
#*** Log human-friendly decodes for the error type and code:
type1, type2, code1, code2 = of_error_decode.decode(msg.type, msg.code)
self.logger.error('error_type=%s %s error_code=%s %s', type1, type2,
code1, code2)
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def _port_status_handler(self, ev):
"""
Switch Port Status event
"""
msg = ev.msg
reason = msg.reason
port = msg.desc.port_no
ofproto = msg.datapath.ofproto
if reason == ofproto.OFPPR_ADD:
self.logger.info("port added port=%s", port)
elif reason == ofproto.OFPPR_DELETE:
self.logger.info("port deleted port=%s", port)
elif reason == ofproto.OFPPR_MODIFY:
self.logger.info("port modified port=%s", port)
else:
self.logger.info("Illegal port state port=%s %s", port, reason)
def tc_start(self, datapath, dpae_port):
"""
Add a Flow Entry to switch to clone selected packets to a
DPAE so that it can perform Traffic Classification analysis
on them
"""
dpid = datapath.id
self.logger.info("Starting TC to DPAE on datapath=%s, dpae_port=%s",
dpid, dpae_port)
switch = self.switches[dpid]
#*** Check if Active or Passive TC Mode:
mode = self.main_policy.tc_policies.mode
self.logger.info("TC mode=%s", mode)
#*** TBD, deal with context:
context = self.context_default
#*** Set up group table to send to DPAE:
# NEEDS OVS 2.1 OR HIGHER SO COMMENTED OUT FOR THE MOMENT
# ALSO NEEDS CODE THAT CAN CATER FOR MULTIPLE DPAE
#switch.flowtables.add_group_dpae(out_port)
if self.main_policy.identity.lldp:
#*** Install FEs to send LLDP Identity indicators to DPAE:
switch.flowtables.add_fe_iig_lldp(dpae_port)
if self.main_policy.identity.dhcp:
#*** Install FEs to send DHCP Identity indicators to DPAE:
switch.flowtables.add_fe_iig_dhcp(dpae_port)
if self.main_policy.identity.dns:
#*** Install FEs to send DNS Identity indicators to DPAE:
switch.flowtables.add_fe_iig_dns(dpae_port)
if mode == 'active':
#*** Install AMF entries for MACs we already know dest for:
mac_list = switch.mactable.dump_macs(context)
for mac in mac_list:
self.logger.debug("Adding previously learned mac=%s dpid=%s "
"dpae_port=%s to Active Mode Filter (amf)", mac, dpid,
dpae_port)
switch.flowtables.add_fe_amf_macport_dst(dpae_port, mac)
#*** Install FE to so packets returning from DPAE in active mode
#*** bypass learning tables and go straight to treatment:
switch.flowtables.add_fe_iim_dpae_active_bypass(dpae_port)
#*** Add any general TC flows to send to DPAE if required by policy
#*** (i.e. statistical or payload):
switch.flowtables.add_fe_tc_dpae(
self.main_policy.optimised_rules.get_rules(),
dpae_port, mode)
self.logger.info("TC started to DPAE on datapath=%s, dpae_port=%s",
dpid, dpae_port)
_results = {"status": "tc_started",
"mode": mode}
return _results
def dpae_join(self, pkt, datapath, in_port):
"""
A DPAE may have sent us a join discovery packet (Phase 2)
Check the packet payload to see if it is valid
"""
_payload = str(pkt.protocols[-1])
self.logger.info("Phase 2 DPAE discovery packet received from dpid=%s "
"port=%s payload=%s",
datapath.id, in_port, _payload)
#*** Try decode of payload as JSON:
try:
dpae_discover = json.loads(_payload)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.logger.error("Phase 2 DPAE API Create exception while "
"decoding JSON body=%s Exception %s, %s, %s",
_payload, exc_type, exc_value, exc_traceback)
return 0
#*** Check to see if JSON has a uuid_controller key:
if 'uuid_controller' in dpae_discover:
uuid_controller = dpae_discover['uuid_controller']
else:
self.logger.debug("No uuid_controller field in discovery "
"packet so ignoring...")
return 0
#*** Check to see if JSON has a hostname_dpae key:
if 'hostname_dpae' in dpae_discover:
hostname_dpae = dpae_discover['hostname_dpae']
else:
self.logger.debug("No hostname_dpae field in discovery "
"packet so ignoring...")
return 0
#*** Check to see if JSON has a if_name key:
if 'if_name' in dpae_discover:
if_name = dpae_discover['if_name']
else:
self.logger.debug("No if_name field in discovery "
"packet so ignoring...")
return 0
#*** Check to see if JSON has a uuid_dpae key:
if 'uuid_dpae' in dpae_discover:
uuid_dpae = dpae_discover['uuid_dpae']
else:
self.logger.debug("No uuid_dpae field in discovery "
"packet so ignoring...")
return 0
#*** Look the key up in the database:
db_result = self.dbdpae.find_one({'_id': str(uuid_controller)})
if db_result:
#*** Check all fields match:
if not hostname_dpae == str(db_result[u'hostname_dpae']):
self.logger.error("Phase 2 hostname_dpae mismatch")
return 0
if not if_name == str(db_result[u'if_name']):
self.logger.error("Phase 2 if_name mismatch")
return 0
if not uuid_dpae == str(db_result[u'uuid_dpae']):
self.logger.error("Phase 2 uuid_dpae mismatch")
return 0
self.logger.debug("Phase 2 updating DPAE record")
db_result = self.dbdpae.update_one(
{'_id': str(uuid_controller)},
{
'$set': {
'dpid': datapath.id,
'switch_port': in_port
},
}
)
self.logger.debug("Phase 2 updated %s database record(s)",
db_result.modified_count)
else:
#*** Ignore as no uuid_controller key:
self.logger.debug("Phase 2 discovery packet uuid_controller field "
"not found in database, so ignoring...")
return 0
def tc_advice_id(self, dpid, tc_type, tc_subtype, src_mac, detail1):
"""
Process a Traffic Classification advice message from a DPAE
that relates to an identity
"""
switch = self.switches[dpid]
#*** TBD, deal with context:
context = self.context_default
#*** Look up source mac to get a port number:
port_number = switch.mactable.mac2port(src_mac, context)
#*** TBD, handle return value for port not found...
if tc_subtype == 'lldp':
#*** Check to see if we already know this identity:
db_data = {'id_type': tc_subtype,
'src_mac': src_mac, 'node_name': detail1}
db_result = self.dbidnode.find_one(db_data)
if not db_result:
#*** LLDP identity not in database so add it:
db_data = {'last_seen': time.time(), 'id_type': tc_subtype,
'src_mac': src_mac, 'node_name': detail1}
db_result = self.dbidnode.insert_one(db_data)
self.logger.info("Created new ID Node record id_type=%s "
"node_name=%s", tc_subtype, detail1)
#*** Check to see if we need to add a flow to switch:
switch.flowtables.add_fe_tc_id(tc_subtype, detail1, src_mac,
self.main_policy.optimised_rules.get_rules())
else:
#*** Just update the last_seen field:
db_result = self.dbdpae.update_one(
{'id_type': tc_subtype,
'src_mac': src_mac, 'node_name': detail1},
{
'$set': {
'last_seen': time.time()
},
}
)
self.logger.debug("Last seen updated for %s of %s ID Node "
"record(s) id_type=%s node_name=%s",
db_result.modified_count,
db_result.matched_count,
tc_subtype, detail1)
else:
self.logger.info("Didn't action tc_subtype=%s", tc_subtype)
def _packet_in_debug(self, ev, in_port):
"""
Generate a debug message describing the packet
in event
"""
#*** Extract parameters:
msg = ev.msg
datapath = msg.datapath
dpid = datapath.id
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
eth_src = eth.src
eth_dst = eth.dst
pkt_ip4 = pkt.get_protocol(ipv4.ipv4)
pkt_ip6 = pkt.get_protocol(ipv6.ipv6)
pkt_tcp = pkt.get_protocol(tcp.tcp)
#*** Some debug about the Packet In:
if pkt_ip4 and pkt_tcp:
self.logger.debug("event=pi_ipv4_tcp dpid=%s "
"in_port=%s ip_src=%s ip_dst=%s tcp_src=%s "
"tcp_dst=%s",
dpid, in_port, pkt_ip4.src, pkt_ip4.dst,
pkt_tcp.src_port, pkt_tcp.dst_port)
elif pkt_ip6 and pkt_tcp:
self.logger.debug("event=pi_ipv6_tcp dpid=%s "
"in_port=%s ip_src=%s ip_dst=%s tcp_src=%s "
"tcp_dst=%s",
dpid, in_port, pkt_ip6.src, pkt_ip6.dst,
pkt_tcp.src_port, pkt_tcp.dst_port)
elif pkt_ip4:
self.logger.debug("event=pi_ipv4 dpid="
"%s in_port=%s ip_src=%s ip_dst=%s proto=%s",
dpid, in_port,
pkt_ip4.src, pkt_ip4.dst, pkt_ip4.proto)
elif pkt_ip6:
self.logger.debug("event=pi_ipv6 dpid=%s "
"in_port=%s ip_src=%s ip_dst=%s",
dpid, in_port,
pkt_ip6.src, pkt_ip6.dst)
else:
self.logger.debug("event=pi_other dpid=%s "
"in_port=%s eth_src=%s eth_dst=%s eth_type=%s",
dpid, in_port, eth_src, eth_dst, eth.ethertype)
| apache-2.0 | 7,914,209,776,357,724,000 | 41.822932 | 79 | 0.557058 | false | 3.933476 | true | false | false |
gabriel-stan/gestion-tfg | apps/upload_files/service.py | 1 | 9782 | __author__ = 'tonima'
from openpyxl import load_workbook
from authentication.models import Profesor, Alumno
from tfgs.models import Titulacion, Tfg_Asig, Tfg
import utils
from django.db.models import Q
class Tfgs_masivos(object):
def __init__(self, fichero=None):
if fichero:
self.wb = load_workbook(fichero)
self.ws = self.wb.active
self.errores = []
self.exitos = []
def upload_file_tfg(self, u_fila, p_fila, cabeceras, titulacion):
for i in range(p_fila, u_fila+1):
try:
data_tfg = self.read_data(cabeceras, i)
self.tfg = self.check_tfg(data_tfg, i, titulacion)
resul = Tfg.objects.simular_create_tfg(**self.tfg)
if self.tfg is not False and resul is True:
self.exitos.append(dict(fila=i, tfg=self.tfg))
else:
self.errores.append(dict(fila=i, message=resul))
except Profesor.DoesNotExist:
self.errores.append(dict(fila=i, message='El profesor no existe'))
continue
except Titulacion.DoesNotExist:
self.errores.append(dict(fila=i, message='La titulacion no existe'))
continue
except Exception as e:
self.errores.append(dict(fila=i, message=e.message))
continue
return dict(status=True, exitos=self.exitos, errores=self.errores)
def read_data(self, cabeceras, i):
resul = dict(tipo=self.ws[cabeceras['tipo'] + str(i)].value,
titulo=self.ws[cabeceras['titulo'] + str(i)].value,
n_alumnos=self.ws[cabeceras['n_alumnos'] + str(i)].value,
descripcion=self.ws[cabeceras['descripcion'] + str(i)].value,
conocimientos_previos=
self.ws[cabeceras['conocimientos_previos'] + str(i)].value,
hard_soft=self.ws[cabeceras['hard_soft'] + str(i)].value,
#titulacion=self.ws[cabeceras['titulacion'] + str(i)].value,
tutor=self.ws[cabeceras['tutor'] + str(i)].value,
cotutor=self.ws[cabeceras['cotutor'] + str(i)].value)
return resul
def check_tfg(self, tfg, i, titulacion):
if not tfg.get('titulo'):
raise Exception('El TFG no tiene titulo')
tfg['tutor'] = Profesor.objects.get(email=tfg.get('tutor'))
tfg['titulacion'] = Titulacion.objects.get(codigo=titulacion)
if tfg.get('cotutor'):
tfg['cotutor'] = Profesor.objects.get(email=str(tfg.get('cotutor')))
tfg = dict(tipo=tfg['tipo'], titulo=tfg['titulo'], n_alumnos=tfg['n_alumnos'],
descripcion=tfg['descripcion'], conocimientos_previos=tfg['conocimientos_previos'],
hard_soft=tfg['hard_soft'],
tutor=tfg['tutor'].email, cotutor=tfg['cotutor'].email, titulacion=tfg['titulacion'].codigo)
else:
tfg = dict(tipo=tfg['tipo'], titulo=tfg['titulo'], n_alumnos=tfg['n_alumnos'],
descripcion=tfg['descripcion'], conocimientos_previos=tfg['conocimientos_previos'],
hard_soft=tfg['hard_soft'],
tutor=tfg['tutor'].email, titulacion=tfg['titulacion'].codigo)
return tfg
def upload_file_confirm(self, tfgs):
errores = []
for index, data_tfg in enumerate(tfgs):
try:
tfg = data_tfg.get('tfg')
res = Tfg.objects.create(**tfg)
if not res.get('status'):
errores.append(dict(fila=index, tfg=tfg))
except Exception as e:
errores.append(dict(fila=index, message=e.message))
continue
return dict(status=True, errores=errores)
class Tfgs_asig_masivos(Tfgs_masivos):
def __init__(self, fichero=None):
super(Tfgs_asig_masivos, self).__init__(fichero)
def upload_file_tfg(self, u_fila, p_fila, cabeceras, titulacion):
for i in range(p_fila, u_fila+1):
try:
data_tfg = self.read_data(cabeceras, i)
self.tfg = self.check_tfg(data_tfg, i, titulacion)
resul = Tfg.objects.simular_create_tfg(**self.tfg)
if self.tfg is not False and resul is True:
model_tfg = Tfg(**data_tfg)
self.check_tfg_asig(data_tfg, cabeceras, i)
tfg_asig = dict(tfg=model_tfg, alumno_1=data_tfg['alumno_1'], alumno_2=data_tfg['alumno_2'],
alumno_3=data_tfg['alumno_3'])
resul = Tfg_Asig.objects.simular_create_tfg_asig(**tfg_asig)
if resul is True:
self.exitos.append(dict(fila=i, tfg=self.tfg))
else:
self.errores.append(dict(fila=i, message=resul))
else:
self.errores.append(dict(fila=i, message=resul))
except Profesor.DoesNotExist:
self.errores.append(dict(fila=i, message='El profesor no existe'))
continue
except Alumno.DoesNotExist:
self.errores.append(dict(fila=i, message='El alumno no existe'))
continue
except Titulacion.DoesNotExist:
self.errores.append(dict(fila=i, message='La titulacion no existe'))
continue
except Exception as e:
self.errores.append(dict(fila=i, message=e.message))
continue
return dict(status=True, exitos=self.exitos, errores=self.errores)
def check_tfg_asig(self, data_tfg, cabeceras, i):
data_tfg['alumno_1'], self.tfg['alumno_1'] = utils.alumno_email_or_dni(
unicode(self.ws[cabeceras['alumno_1'] + str(i)].value) if cabeceras.get('alumno_1') and \
self.ws[cabeceras['alumno_1'] + str(i)].value \
else None)
self.tfg['nombre_alumno_1'] = unicode(self.ws[cabeceras['nombre_alumno_1'] + str(i)].value) if cabeceras.get('nombre_alumno_1') and \
self.ws[cabeceras['nombre_alumno_1'] + str(i)].value \
else None
data_tfg['alumno_2'], self.tfg['alumno_2'] = utils.alumno_email_or_dni(
unicode(self.ws[cabeceras['alumno_2'] + str(i)].value) if cabeceras.get('alumno_2') and \
self.ws[cabeceras['alumno_2'] + str(i)].value \
else None)
self.tfg['nombre_alumno_2'] = unicode(self.ws[cabeceras['nombre_alumno_2'] + str(i)].value) if cabeceras.get('nombre_alumno_2') and \
self.ws[cabeceras['nombre_alumno_2'] + str(i)].value \
else None
data_tfg['alumno_3'], self.tfg['alumno_3'] = utils.alumno_email_or_dni(
unicode(self.ws[cabeceras['alumno_3'] + str(i)].value) if cabeceras.get('alumno_3') and \
self.ws[cabeceras['alumno_3'] + str(i)].value \
else None)
self.tfg['nombre_alumno_3'] = unicode(self.ws[cabeceras['nombre_alumno_3'] + str(i)].value) if cabeceras.get('nombre_alumno_3') and \
self.ws[cabeceras['nombre_alumno_3'] + str(i)].value \
else None
def upload_file_confirm(self, tfgs):
errores = []
for index, data_tfg in enumerate(tfgs):
try:
self.alumno_1 = self.get_or_create_alumno(data_tfg['tfg'].get('alumno_1'), data_tfg['tfg'].get('nombre_alumno_1')) if data_tfg['tfg'] \
.get('alumno_1') else None
self.alumno_2 = self.get_or_create_alumno(data_tfg['tfg'].get('alumno_2'), data_tfg['tfg'].get('nombre_alumno_2')) if data_tfg['tfg'] \
.get('alumno_2') else None
self.alumno_3 = self.get_or_create_alumno(data_tfg['tfg'].get('alumno_3'), data_tfg['tfg'].get('nombre_alumno_3')) if data_tfg['tfg'] \
.get('alumno_3') else None
self.tfg = Tfg.objects.create(**data_tfg['tfg'])
res = Tfg_Asig.objects.create(tfg=self.tfg.get('data'), alumno_1=self.alumno_1, alumno_2=self.alumno_2,
alumno_3=self.alumno_3)
if not res.get('status'):
errores.append(dict(fila=index, tfg=data_tfg))
except Exception as e:
errores.append(dict(fila=index, message=e.message))
continue
return dict(status=True, errores=errores)
def get_or_create_alumno(self, alumno, nombre=None):
if utils.is_email_alumno(alumno):
if not Alumno.objects.filter(email=alumno if alumno else None).exists():
Alumno.objects.create_user(email=alumno, first_name=nombre)
try:
return Alumno.objects.get(email=alumno)
except Alumno.DoesNotExist:
raise NameError('Error en el alumno %s' % alumno)
elif utils.is_dni(alumno):
if not Alumno.objects.filter(dni=alumno if alumno else None).exists():
Alumno.objects.create_user(dni=alumno, first_name=nombre)
try:
return Alumno.objects.get(dni=alumno)
except Alumno.DoesNotExist:
raise NameError('Error en el alumno %s' % alumno)
else:
raise NameError('Error en el alumno %s' % alumno) | gpl-2.0 | 3,073,960,981,766,819,300 | 51.315508 | 151 | 0.538336 | false | 3.161603 | false | false | false |
flp9001/astrology | astro/horoscope/views.py | 1 | 3623 | #coding: utf-8
import json
import urllib
import geocoder
import pytz
from datetime import datetime
from django.shortcuts import render
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse
from django.utils import timezone
from .models import Ephemeris, Event, Houses, Location
from utils import PLANET_NAMES, SIGNS, dms
def home(request):
now = timezone.now()
params = {}
params['datenow'] = now.strftime("%d/%m/%Y")
params['timenow'] = now.strftime("%H:%M")
return render(request, 'horoscope/home.html', params)
def my_events(request):
return render(request, 'horoscope/event_list.html', {'events': Event.objects.all().select_related('ephemeris', 'location')})
def parse_date(date_str, time_str):
now = datetime.now()
if not date_str and not time_str:
return now
if not date_str:
date = now.date()
else:
date = datetime.strptime(date_str, "%d-%m-%Y").date()
if not time_str:
time = now.time()
else:
time = datetime.strptime(time_str, "%H:%M").time()
return datetime.combine(date, time)
def eph(request):
data = {}
date = request.GET.get('date', None)
time = request.GET.get('time', None)
date = parse_date(date, time)
location = request.GET.get('city', None)
if date and time and location:
l = Location.create(location)
date = l.timezone.localize(date)
date = date.astimezone(pytz.utc)
houses = Houses.create(date, l.lat, l.lng)
data['houses'] = [getattr(houses, i.name) for i in houses._meta.fields[1:]]
data['location'] = {'city': l.city, 'lat': dms(l.lat), 'lng': dms(l.lng)}
data['planets'] = get_planets(date)
data['date'] = str(date)
return HttpResponse(
json.dumps(data, indent=4),
content_type='application/javascript; charset=utf8'
)
def get_planets(date):
e = Ephemeris.create(date)
planets = []
for index, field in enumerate(e._meta.fields[:11]):
if field.name != 'id':
v = getattr(e, field.name)
planet = {}
planet['index'] = index
planet['name'] = field.name
planet['angle'] = v
planet['sign_index'] = int(v/30)
planets.append(planet)
return planets
def chart(request):
date_str = request.GET.get('date', None)
time_str = request.GET.get('time', None)
date = parse_date(date_str, time_str)
planets = []
for p in get_planets(date):
p['code'] = p['name']
p['angle'] = p['angle'] % 30
p['name'] = PLANET_NAMES[p['code']]
p['sign_code'], p['sign'] = SIGNS[p['sign_index']]
planets.append(p)
return render(request, 'horoscope/chart.html', {'date': date, 'planets': planets})
def save_event(request):
print 'save_event'
if request.method == 'POST':
name = request.POST.get('name')
date = request.POST.get('date')
time = request.POST.get('time')
location = request.POST.get('location')
e = Event.create(name, date, time, location)
e.save()
print request.POST
return HttpResponse(request.POST)
def geocode(request):
query = request.GET.get('query', None)
query = urllib.unquote(query).decode('utf8')
g = geocoder.google(query)
if g.ok and g.city:
data = [{'value': g.address, 'tokens': g.city.split()}]
else:
data = {}
data = json.dumps(data, cls=DjangoJSONEncoder, indent=2, separators=(',', ': '))
return HttpResponse(data, content_type="application/json")
| lgpl-3.0 | -8,005,701,420,390,494,000 | 25.639706 | 128 | 0.606404 | false | 3.38282 | false | false | false |
32bitmicro/EDA | python/eda/eda/pcb.py | 1 | 45843 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2014, Paweł Wodnicki
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the 32bitmicro nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL Paweł Wodnicki BE LIABLE FOR ANY
#DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from edautils import *
from eda import *
pcb_symbols= """
Symbol(' ' 18)
(
)
Symbol('!' 12)
(
SymbolLine(0 35 0 40 8)
SymbolLine(0 0 0 25 8)
)
Symbol('"' 12)
(
SymbolLine(0 0 0 10 8)
SymbolLine(10 0 10 10 8)
)
Symbol('#' 12)
(
SymbolLine(0 25 20 25 8)
SymbolLine(0 15 20 15 8)
SymbolLine(15 10 15 30 8)
SymbolLine(5 10 5 30 8)
)
Symbol('$' 12)
(
SymbolLine(15 5 20 10 8)
SymbolLine(5 5 15 5 8)
SymbolLine(0 10 5 5 8)
SymbolLine(0 10 0 15 8)
SymbolLine(0 15 5 20 8)
SymbolLine(5 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(20 25 20 30 8)
SymbolLine(15 35 20 30 8)
SymbolLine(5 35 15 35 8)
SymbolLine(0 30 5 35 8)
SymbolLine(10 0 10 40 8)
)
Symbol('%' 12)
(
SymbolLine(0 5 0 10 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 10 0 8)
SymbolLine(10 0 15 5 8)
SymbolLine(15 5 15 10 8)
SymbolLine(10 15 15 10 8)
SymbolLine(5 15 10 15 8)
SymbolLine(0 10 5 15 8)
SymbolLine(0 40 40 0 8)
SymbolLine(35 40 40 35 8)
SymbolLine(40 30 40 35 8)
SymbolLine(35 25 40 30 8)
SymbolLine(30 25 35 25 8)
SymbolLine(25 30 30 25 8)
SymbolLine(25 30 25 35 8)
SymbolLine(25 35 30 40 8)
SymbolLine(30 40 35 40 8)
)
Symbol('&' 12)
(
SymbolLine(0 35 5 40 8)
SymbolLine(0 5 0 15 8)
SymbolLine(0 5 5 0 8)
SymbolLine(0 25 15 10 8)
SymbolLine(5 40 10 40 8)
SymbolLine(10 40 20 30 8)
SymbolLine(0 15 25 40 8)
SymbolLine(5 0 10 0 8)
SymbolLine(10 0 15 5 8)
SymbolLine(15 5 15 10 8)
SymbolLine(0 25 0 35 8)
)
Symbol(''' 12)
(
SymbolLine(0 10 10 0 8)
)
Symbol('(' 12)
(
SymbolLine(0 35 5 40 8)
SymbolLine(0 5 5 0 8)
SymbolLine(0 5 0 35 8)
)
Symbol(')' 12)
(
SymbolLine(0 0 5 5 8)
SymbolLine(5 5 5 35 8)
SymbolLine(0 40 5 35 8)
)
Symbol('*' 12)
(
SymbolLine(0 10 20 30 8)
SymbolLine(0 30 20 10 8)
SymbolLine(0 20 20 20 8)
SymbolLine(10 10 10 30 8)
)
Symbol('+' 12)
(
SymbolLine(0 20 20 20 8)
SymbolLine(10 10 10 30 8)
)
Symbol(',' 12)
(
SymbolLine(0 50 10 40 8)
)
Symbol('-' 12)
(
SymbolLine(0 20 20 20 8)
)
Symbol('.' 12)
(
SymbolLine(0 40 5 40 8)
)
Symbol('/' 12)
(
SymbolLine(0 35 30 5 8)
)
Symbol('0' 12)
(
SymbolLine(0 35 5 40 8)
SymbolLine(0 5 0 35 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 15 0 8)
SymbolLine(15 0 20 5 8)
SymbolLine(20 5 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 30 20 10 8)
)
Symbol('1' 12)
(
SymbolLine(5 40 15 40 8)
SymbolLine(10 0 10 40 8)
SymbolLine(0 10 10 0 8)
)
Symbol('2' 12)
(
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 20 0 8)
SymbolLine(20 0 25 5 8)
SymbolLine(25 5 25 15 8)
SymbolLine(0 40 25 15 8)
SymbolLine(0 40 25 40 8)
)
Symbol('3' 12)
(
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 15 0 8)
SymbolLine(15 0 20 5 8)
SymbolLine(20 5 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 20 20 20 8)
)
Symbol('4' 12)
(
SymbolLine(0 20 20 0 8)
SymbolLine(0 20 25 20 8)
SymbolLine(20 0 20 40 8)
)
Symbol('5' 12)
(
SymbolLine(0 0 20 0 8)
SymbolLine(0 0 0 20 8)
SymbolLine(0 20 5 15 8)
SymbolLine(5 15 15 15 8)
SymbolLine(15 15 20 20 8)
SymbolLine(20 20 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 35 5 40 8)
)
Symbol('6' 12)
(
SymbolLine(15 0 20 5 8)
SymbolLine(5 0 15 0 8)
SymbolLine(0 5 5 0 8)
SymbolLine(0 5 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(15 20 20 25 8)
SymbolLine(0 20 15 20 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
SymbolLine(20 25 20 35 8)
)
Symbol('7' 12)
(
SymbolLine(0 40 25 15 8)
SymbolLine(25 0 25 15 8)
SymbolLine(0 0 25 0 8)
)
Symbol('8' 12)
(
SymbolLine(0 35 5 40 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(20 25 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 15 5 20 8)
SymbolLine(0 5 0 15 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 15 0 8)
SymbolLine(15 0 20 5 8)
SymbolLine(20 5 20 15 8)
SymbolLine(15 20 20 15 8)
)
Symbol('9' 12)
(
SymbolLine(0 40 20 20 8)
SymbolLine(20 5 20 20 8)
SymbolLine(15 0 20 5 8)
SymbolLine(5 0 15 0 8)
SymbolLine(0 5 5 0 8)
SymbolLine(0 5 0 15 8)
SymbolLine(0 15 5 20 8)
SymbolLine(5 20 20 20 8)
)
Symbol(':' 12)
(
SymbolLine(0 15 5 15 8)
SymbolLine(0 25 5 25 8)
)
Symbol(';' 12)
(
SymbolLine(0 40 10 30 8)
SymbolLine(10 15 10 20 8)
)
Symbol('<' 12)
(
SymbolLine(0 20 10 10 8)
SymbolLine(0 20 10 30 8)
)
Symbol('=' 12)
(
SymbolLine(0 15 20 15 8)
SymbolLine(0 25 20 25 8)
)
Symbol('>' 12)
(
SymbolLine(0 10 10 20 8)
SymbolLine(0 30 10 20 8)
)
Symbol('?' 12)
(
SymbolLine(10 20 10 25 8)
SymbolLine(10 35 10 40 8)
SymbolLine(0 5 0 10 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 15 0 8)
SymbolLine(15 0 20 5 8)
SymbolLine(20 5 20 10 8)
SymbolLine(10 20 20 10 8)
)
Symbol('A' 12)
(
SymbolLine(0 5 0 40 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 20 0 8)
SymbolLine(20 0 25 5 8)
SymbolLine(25 5 25 40 8)
SymbolLine(0 20 25 20 8)
)
Symbol('B' 12)
(
SymbolLine(0 40 20 40 8)
SymbolLine(20 40 25 35 8)
SymbolLine(25 25 25 35 8)
SymbolLine(20 20 25 25 8)
SymbolLine(5 20 20 20 8)
SymbolLine(5 0 5 40 8)
SymbolLine(0 0 20 0 8)
SymbolLine(20 0 25 5 8)
SymbolLine(25 5 25 15 8)
SymbolLine(20 20 25 15 8)
)
Symbol('C' 12)
(
SymbolLine(5 40 20 40 8)
SymbolLine(0 35 5 40 8)
SymbolLine(0 5 0 35 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 20 0 8)
)
Symbol('D' 12)
(
SymbolLine(5 0 5 40 8)
SymbolLine(20 0 25 5 8)
SymbolLine(25 5 25 35 8)
SymbolLine(20 40 25 35 8)
SymbolLine(0 40 20 40 8)
SymbolLine(0 0 20 0 8)
)
Symbol('E' 12)
(
SymbolLine(0 20 15 20 8)
SymbolLine(0 40 20 40 8)
SymbolLine(0 0 0 40 8)
SymbolLine(0 0 20 0 8)
)
Symbol('F' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 0 20 0 8)
SymbolLine(0 20 15 20 8)
)
Symbol('G' 12)
(
SymbolLine(20 0 25 5 8)
SymbolLine(5 0 20 0 8)
SymbolLine(0 5 5 0 8)
SymbolLine(0 5 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 20 40 8)
SymbolLine(20 40 25 35 8)
SymbolLine(25 25 25 35 8)
SymbolLine(20 20 25 25 8)
SymbolLine(10 20 20 20 8)
)
Symbol('H' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(25 0 25 40 8)
SymbolLine(0 20 25 20 8)
)
Symbol('I' 12)
(
SymbolLine(0 0 10 0 8)
SymbolLine(5 0 5 40 8)
SymbolLine(0 40 10 40 8)
)
Symbol('J' 12)
(
SymbolLine(0 0 15 0 8)
SymbolLine(15 0 15 35 8)
SymbolLine(10 40 15 35 8)
SymbolLine(5 40 10 40 8)
SymbolLine(0 35 5 40 8)
)
Symbol('K' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 20 20 0 8)
SymbolLine(0 20 20 40 8)
)
Symbol('L' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 40 20 40 8)
)
Symbol('M' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 0 15 15 8)
SymbolLine(15 15 30 0 8)
SymbolLine(30 0 30 40 8)
)
Symbol('N' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 0 0 5 8)
SymbolLine(0 5 25 30 8)
SymbolLine(25 0 25 40 8)
)
Symbol('O' 12)
(
SymbolLine(0 5 0 35 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 15 0 8)
SymbolLine(15 0 20 5 8)
SymbolLine(20 5 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 35 5 40 8)
)
Symbol('P' 12)
(
SymbolLine(5 0 5 40 8)
SymbolLine(0 0 20 0 8)
SymbolLine(20 0 25 5 8)
SymbolLine(25 5 25 15 8)
SymbolLine(20 20 25 15 8)
SymbolLine(5 20 20 20 8)
)
Symbol('Q' 12)
(
SymbolLine(0 5 0 35 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 15 0 8)
SymbolLine(15 0 20 5 8)
SymbolLine(20 5 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 35 5 40 8)
SymbolLine(10 30 20 40 8)
)
Symbol('R' 12)
(
SymbolLine(0 0 20 0 8)
SymbolLine(20 0 25 5 8)
SymbolLine(25 5 25 15 8)
SymbolLine(20 20 25 15 8)
SymbolLine(5 20 20 20 8)
SymbolLine(5 0 5 40 8)
SymbolLine(5 20 25 40 8)
)
Symbol('S' 12)
(
SymbolLine(20 0 25 5 8)
SymbolLine(5 0 20 0 8)
SymbolLine(0 5 5 0 8)
SymbolLine(0 5 0 15 8)
SymbolLine(0 15 5 20 8)
SymbolLine(5 20 20 20 8)
SymbolLine(20 20 25 25 8)
SymbolLine(25 25 25 35 8)
SymbolLine(20 40 25 35 8)
SymbolLine(5 40 20 40 8)
SymbolLine(0 35 5 40 8)
)
Symbol('T' 12)
(
SymbolLine(0 0 20 0 8)
SymbolLine(10 0 10 40 8)
)
Symbol('U' 12)
(
SymbolLine(0 0 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
SymbolLine(20 0 20 35 8)
)
Symbol('V' 12)
(
SymbolLine(0 0 0 30 8)
SymbolLine(0 30 10 40 8)
SymbolLine(10 40 20 30 8)
SymbolLine(20 0 20 30 8)
)
Symbol('W' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 40 15 25 8)
SymbolLine(15 25 30 40 8)
SymbolLine(30 0 30 40 8)
)
Symbol('X' 12)
(
SymbolLine(0 0 0 5 8)
SymbolLine(0 5 25 30 8)
SymbolLine(25 30 25 40 8)
SymbolLine(0 30 0 40 8)
SymbolLine(0 30 25 5 8)
SymbolLine(25 0 25 5 8)
)
Symbol('Y' 12)
(
SymbolLine(0 0 0 5 8)
SymbolLine(0 5 10 15 8)
SymbolLine(10 15 20 5 8)
SymbolLine(20 0 20 5 8)
SymbolLine(10 15 10 40 8)
)
Symbol('Z' 12)
(
SymbolLine(0 0 25 0 8)
SymbolLine(25 0 25 5 8)
SymbolLine(0 30 25 5 8)
SymbolLine(0 30 0 40 8)
SymbolLine(0 40 25 40 8)
)
Symbol('[' 12)
(
SymbolLine(0 0 5 0 8)
SymbolLine(0 0 0 40 8)
SymbolLine(0 40 5 40 8)
)
Symbol('\' 12)
(
SymbolLine(0 5 30 35 8)
)
Symbol(']' 12)
(
SymbolLine(0 0 5 0 8)
SymbolLine(5 0 5 40 8)
SymbolLine(0 40 5 40 8)
)
Symbol('^' 12)
(
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 10 5 8)
)
Symbol('_' 12)
(
SymbolLine(0 40 20 40 8)
)
Symbol('a' 12)
(
SymbolLine(15 20 20 25 8)
SymbolLine(5 20 15 20 8)
SymbolLine(0 25 5 20 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(20 20 20 35 8)
SymbolLine(20 35 25 40 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
)
Symbol('b' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
SymbolLine(20 25 20 35 8)
SymbolLine(15 20 20 25 8)
SymbolLine(5 20 15 20 8)
SymbolLine(0 25 5 20 8)
)
Symbol('c' 12)
(
SymbolLine(5 20 20 20 8)
SymbolLine(0 25 5 20 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 20 40 8)
)
Symbol('d' 12)
(
SymbolLine(20 0 20 40 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 35 5 40 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 15 20 8)
SymbolLine(15 20 20 25 8)
)
Symbol('e' 12)
(
SymbolLine(5 40 20 40 8)
SymbolLine(0 35 5 40 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(0 30 20 30 8)
SymbolLine(20 30 20 25 8)
)
Symbol('f' 10)
(
SymbolLine(5 5 5 40 8)
SymbolLine(5 5 10 0 8)
SymbolLine(10 0 15 0 8)
SymbolLine(0 20 10 20 8)
)
Symbol('g' 12)
(
SymbolLine(15 20 20 25 8)
SymbolLine(5 20 15 20 8)
SymbolLine(0 25 5 20 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
SymbolLine(0 50 5 55 8)
SymbolLine(5 55 15 55 8)
SymbolLine(15 55 20 50 8)
SymbolLine(20 20 20 50 8)
)
Symbol('h' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(20 25 20 40 8)
)
Symbol('i' 10)
(
SymbolLine(0 10 0 15 8)
SymbolLine(0 25 0 40 8)
)
Symbol('j' 10)
(
SymbolLine(5 10 5 15 8)
SymbolLine(5 25 5 50 8)
SymbolLine(0 55 5 50 8)
)
Symbol('k' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 25 15 40 8)
SymbolLine(0 25 10 15 8)
)
Symbol('l' 10)
(
SymbolLine(0 0 0 35 8)
SymbolLine(0 35 5 40 8)
)
Symbol('m' 12)
(
SymbolLine(5 25 5 40 8)
SymbolLine(5 25 10 20 8)
SymbolLine(10 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(20 25 20 40 8)
SymbolLine(20 25 25 20 8)
SymbolLine(25 20 30 20 8)
SymbolLine(30 20 35 25 8)
SymbolLine(35 25 35 40 8)
SymbolLine(0 20 5 25 8)
)
Symbol('n' 12)
(
SymbolLine(5 25 5 40 8)
SymbolLine(5 25 10 20 8)
SymbolLine(10 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(20 25 20 40 8)
SymbolLine(0 20 5 25 8)
)
Symbol('o' 12)
(
SymbolLine(0 25 0 35 8)
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(20 25 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 35 5 40 8)
)
Symbol('p' 12)
(
SymbolLine(5 25 5 55 8)
SymbolLine(0 20 5 25 8)
SymbolLine(5 25 10 20 8)
SymbolLine(10 20 20 20 8)
SymbolLine(20 20 25 25 8)
SymbolLine(25 25 25 35 8)
SymbolLine(20 40 25 35 8)
SymbolLine(10 40 20 40 8)
SymbolLine(5 35 10 40 8)
)
Symbol('q' 12)
(
SymbolLine(20 25 20 55 8)
SymbolLine(15 20 20 25 8)
SymbolLine(5 20 15 20 8)
SymbolLine(0 25 5 20 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
)
Symbol('r' 12)
(
SymbolLine(5 25 5 40 8)
SymbolLine(5 25 10 20 8)
SymbolLine(10 20 20 20 8)
SymbolLine(0 20 5 25 8)
)
Symbol('s' 12)
(
SymbolLine(5 40 20 40 8)
SymbolLine(20 40 25 35 8)
SymbolLine(20 30 25 35 8)
SymbolLine(5 30 20 30 8)
SymbolLine(0 25 5 30 8)
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 20 20 8)
SymbolLine(20 20 25 25 8)
SymbolLine(0 35 5 40 8)
)
Symbol('t' 10)
(
SymbolLine(5 0 5 35 8)
SymbolLine(5 35 10 40 8)
SymbolLine(0 15 10 15 8)
)
Symbol('u' 12)
(
SymbolLine(0 20 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
SymbolLine(20 20 20 35 8)
)
Symbol('v' 12)
(
SymbolLine(0 20 0 30 8)
SymbolLine(0 30 10 40 8)
SymbolLine(10 40 20 30 8)
SymbolLine(20 20 20 30 8)
)
Symbol('w' 12)
(
SymbolLine(0 20 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 10 40 8)
SymbolLine(10 40 15 35 8)
SymbolLine(15 20 15 35 8)
SymbolLine(15 35 20 40 8)
SymbolLine(20 40 25 40 8)
SymbolLine(25 40 30 35 8)
SymbolLine(30 20 30 35 8)
)
Symbol('x' 12)
(
SymbolLine(0 20 20 40 8)
SymbolLine(0 40 20 20 8)
)
Symbol('y' 12)
(
SymbolLine(0 20 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(20 20 20 50 8)
SymbolLine(15 55 20 50 8)
SymbolLine(5 55 15 55 8)
SymbolLine(0 50 5 55 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
)
Symbol('z' 12)
(
SymbolLine(0 20 20 20 8)
SymbolLine(0 40 20 20 8)
SymbolLine(0 40 20 40 8)
)
Symbol('{' 12)
(
SymbolLine(5 5 10 0 8)
SymbolLine(5 5 5 15 8)
SymbolLine(0 20 5 15 8)
SymbolLine(0 20 5 25 8)
SymbolLine(5 25 5 35 8)
SymbolLine(5 35 10 40 8)
)
Symbol('|' 12)
(
SymbolLine(0 0 0 40 8)
)
Symbol('}' 12)
(
SymbolLine(0 0 5 5 8)
SymbolLine(5 5 5 15 8)
SymbolLine(5 15 10 20 8)
SymbolLine(5 25 10 20 8)
SymbolLine(5 25 5 35 8)
SymbolLine(0 40 5 35 8)
)
Symbol('~' 12)
(
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 10 20 8)
SymbolLine(10 20 15 25 8)
SymbolLine(15 25 20 25 8)
SymbolLine(20 25 25 20 8)
)
"""
pcb_layers = """
Layer(1 "solder")
(
)
Layer(2 "component")
(
)
Layer(3 "GND")
(
)
Layer(4 "power")
(
)
Layer(5 "signal1")
(
)
Layer(6 "signal2")
(
)
Layer(7 "unused")
(
)
Layer(8 "unused")
(
)
Layer(9 "silk")
(
)
Layer(10 "silk")
(
)
"""
class CPCB:
" PCB class "
def __init__(self, sch=None,brd=None):
self.name=""
self.sch=sch
self.brd=brd
self.script_path=""
def addLayers(self):
# These layers have to be added in the board
# self.brd.addLayer(CLayer("solder",1)) # bottom orientation
# self.brd.addLayer(CLayer("component",2))
# these layers are already added
## self.brd.addLayer(CLayer("GND",3))
## self.brd.addLayer(CLayer("VCC",4))
## self.brd.addLayer(CLayer("blksolder",5)) # bottom orientation
## self.brd.addLayer(CLayer("blkcomponent",6))
## self.brd.addLayer(CLayer("signal3",7))
## self.brd.addLayer(CLayer("signal4",8))
## self.brd.addLayer(CLayer("Vias",9))
## self.brd.addLayer(CLayer("silk",10))
pass
#Layer(1 "solder")
#(
# Line(1375 1075 1325 1025 40 30 0x00000020)
#)
# gen sch layers scr"
def genSchLayersScr(self):
ns = ''
CRLF = "\n"
ns = pcb_layers;
return ns;
#ADD 'C1' 'G$1' POLARISED_CASE_H@ipc-7351-capacitor R0.000 (-0.300 3.300);
#ADD 'Q1' 'G$1' -PNP-SOT23-EBC@transistor R0.000 (1.600 3.300);
#ADD 'Q5' 'G$1' MMBT2222ALT1-NPN-SOT23-BEC@transistor R0.000 (0.900 2.800);
#ADD 'V1' 'GND' GND@supply2 R0.000 (0.600 0.100);
#ADD 'V2' 'G$1' VCC@supply2 R0.000 (5.600 4.200);
# gen sch add scr"
def genSchAddScr(self):
ns = ''
CRLF = "\n"
ns += "GRID INCH 0.005" + CRLF
ns += "Layer (91 Nets;" + CRLF
ns += "Layer (92 Busses;" + CRLF
ns += "Layer (93 Pins;" + CRLF
ns += "Layer (94 Symbols;" + CRLF
ns += "Layer (95 Names;" + CRLF
ns += "Layer (96 Values;" + CRLF
ns += "Layer (250 Descript;" + CRLF
ns += "Layer (251 SMDround;" + CRLF
ns += "DISPLAY -PINS" + CRLF
ns += CRLF
ns += "EDIT .S1" + CRLF
ns += "SET WIRE_BEND 2;" + CRLF
ns += "CHANGE STYLE 'Continuous'" + CRLF
for dev in self.sch.devices:
ns += "ADD '" + str(dev.refid) + "' 'G$1' " + str(dev.name) + "@" + str(dev.libname) + " " + dev.orientation + "R%.3f"% (dev.rotation) +" (" + str(dev.position) + ");" + CRLF
ns += "GRID LAST" + CRLF
return ns
# gen cmd sch net-connect"
def genSchNetConnectScr(self):
ns = ''
CRLF = "\n"
runcmd="run " + self.script_path + "/sch-net-connect.ulp"
for net in self.sch.nets.values:
prevdev=""
prevpin=""
l = ""
first = 1
for node in net.nodes:
if first:
first = 0
prevdev=str(node.dev.refid)
prevpin=str(node.pin)
else:
l = runcmd + " " + net.name + " " + prevdev + " " + prevpin + " " + str(node.dev.refid) + " " + str(node.pin) + ";" + CRLF
ns += l
prevdev=str(node.dev.refid)
prevpin=str(node.pin)
# string function
return ns
# gen sch netlist listing
def genSchNetlistLst(self):
ns = ''
CRLF = '\n'
for net in self.sch.nets.values():
name = net.name
ns += net.name + ' '
for node in net.nodes:
ns += str(node.dev.refid) + '-' + str(node.pin.num) + ' '
ns += CRLF
ns += CRLF #extra one
# string function
return ns
# gen sch netlist script
def genSchNetlistScr(self):
ns = ''
CRLF = "\n"
ns = "# Netlist script" + CRLF
ns += "# EAGLE Version 4.11" + CRLF
ns += "# Copyright Hobby-Robotics" + CRLF
ns += expandtab("#Net\tPart\tPad",12) + CRLF
ns += CRLF
for net in self.sch.nets.values():
ns += CRLF
ns += "Change Class 0;" + CRLF
l = "Signal " + " '" + net.name + "'"
first = 1
for node in net.nodes:
if first:
first = 0
l += "\t'"
else:
l += "\t\t"
l += str(node.dev.refid) + "'\t'" + str(node.pin) + "' \\" + CRLF
ns += expandtab(str(l),12)
ns += "\t\t\t;" + CRLF
# string function
return ns
# Select
# {"All objects" Select(ObjectByName) ActiveWhen(have_regex)}
# {"Elements" Select(ElementByName) ActiveWhen(have_regex)}
# {"Pads" Select(PadByName) ActiveWhen(have_regex)}
# {"Pins" Select(PinByName) ActiveWhen(have_regex)}
# {"Text" Select(TextByName) ActiveWhen(have_regex)}
# {"Vias" Select(ViaByName) ActiveWhen(have_regex)}
# Move
# {"Move selected elements to other side" Flip(SelectedElements) a={"Shift-B" "Shift<Key>b"}}
# {"Remove selected objects" RemoveSelected()}
# {Connects m=C
# {"Lookup connection to object" GetXY(Select the object) Connection(Find) a={"Ctrl-F" "Ctrl<Key>f"}}
# {"Reset scanned pads/pins/vias" Connection(ResetPinsViasAndPads) Display(Redraw)}
# {"Reset scanned lines/polygons" Connection(ResetLinesAndPolygons) Display(Redraw)}
# {"Reset all connections" Connection(Reset) Display(Redraw) a={"Shift-F" "Shift<Key>f"}}
# {"Optimize rats nest" Atomic(Save) DeleteRats(AllRats)
# Atomic(Restore) AddRats(AllRats) Atomic(Block) a={"O" "<Key>o"}}
# {"Erase rats nest" DeleteRats(AllRats) a={"E" "<Key>e"}}
# {"Erase selected rats" DeleteRats(SelectedRats) a={"Shift-E" "Shift<Key>e"}}
#
# {"Auto-route selected rats" AutoRoute(Selected)}
# {"Auto-route all rats" AutoRoute(AllRats)}
# {"Rip up all auto-routed tracks" RipUp(All)}
# {"Optimize routed tracks"
# {"Auto-Optimize" djopt(auto) a={"Shift-=" "Shift<Key>="}}
# {"Debumpify" djopt(debumpify) }
# {"Unjaggy" djopt(unjaggy) }
# {"Vianudge" djopt(vianudge) }
# {"Viatrim" djopt(viatrim) }
# {"Ortho pull" djopt(orthopull) }
# {"Simple optimization" djopt(simple) a={"=" "<Key>="}}
# {"Miter" djopt(miter) }
# {"Puller" a={"Y" "<Key>y"} Puller() }
#
# {"Only autorouted nets" OptAutoOnly() checked=optautoonly}
# }
# {"Design Rule Checker" DRC()}
# {"Apply vendor drill mapping" ApplyVendor()}
# }
def genBrdPlaceBottom(self):
ns = ''
#Select(ElementByName|ObjectByName|PadByName|PinByName)
for dev in self.brd.devices.values():
name = str(dev.refid)
if dev.bottom:
#Select(ElementByName) ActiveWhen(have_regex)
ns += 'Select(ElementByName) ActiveWhen( ' + name + ' )\n'
ns += 'Flip(SelectedElements)\n'
ns += 'Unselect(All)\n'
return ns
# gen brd cmd scr"
def genBrdCmdScr(self):
ns = ''
CRLF = "\n"
ns += "# Gen EDA generated" + CRLF
ns += "# date:" + CRLF # version
ns += "# user:" + CRLF # version
# LoadFrom(Layout|LayoutToBuffer|ElementToBuffer|Netlist|Revert,filename)
ns += 'LoadFrom( Layout, ' + self.script_path + '/' + self.brd.name + '.pcb )' + CRLF # layout
ns += 'LoadFrom( Netlist, ' + self.script_path + '/' + self.brd.name + '.net )' + CRLF # netlist
# Do not do that, do it in the placement
# ns += self.genBrdPlaceBottom()
# AddRats(AllRats|SelectedRats|Close)
ns += 'AddRats(AllRats)' + CRLF # add all rats
# AutoRoute(AllRats|SelectedRats)
ns += 'AutoRoute(AllRats)' + CRLF # route all rats
# Auto-Optimize djopt(auto)
ns += 'djopt(auto)' + CRLF # optimize all routes
# SaveTo(Layout|LayoutAs,filename)
ns += 'SaveTo( LayoutAs, ' + self.script_path + '/' + self.brd.name + '.brd )' + CRLF # board
ns += 'Quit( )' + CRLF # Quit
return ns
#####################################
## release: pcb 1.7.1.ALPHA
## date: Sun Jul 22 15:22:22 2001
## user: tp (Terry Porter,,,)
## host: gronk.porter.net
#PCB("" 6047 4000)
#
#Grid(25 0 0 0)
#Cursor(400 0 2)
#Flags(0x000000c0)
#Groups("1,s:2,c:3:4:5:6:7:8")
#Styles("Signal,10,40,20:Power,25,60,35:Fat,40,60,35:Skinny,8,36,20")
####################################
# release: pcb 1.99v
# date: Tue May 1 07:59:48 2007
# user: pawel (pawel,U-WODNICKI\pawel,S-1-5-21-1835012242-1811546175-1750076985-1007)
# host: Wodnicki
#
#FileVersion[20070407]
#
#PCB["" 350000 330000]
#
#Grid[3937.007904 1800 100 1]
#Cursor[133000 107500 2.000000]
#PolyArea[200000000.000000]
#Thermal[0.500000]
#DRC[1000 1000 1000 1000 1500 1000]
#Flags("rubberband,nameonpcb,alldirection,uniquename,snappin")
#Groups("4,5,6,c:1,2,3,s:8:7")
#Styles["Signal,1000,4000,2000,1000:Power,2500,6000,3500,1000:Fat,4000,6000,3500,1000:Skinny,800,3600,2000,1000"]
# gen brd board scr"
def genBrdBoardScr(self):
ns = ''
CRLF = "\n"
ns += "# boostEDA generated" + CRLF
ns += "# date:" + CRLF # version
ns += "# user:" + CRLF # version
# determine board size, aka outline for rectangular ones only
self.brd.outline.calcBBox()
xsize = self.brd.outline.bbox.sizeX()
ysize = self.brd.outline.bbox.sizeY()
ns += "PCB[\"" + self.brd.name + "\" "
ns += "%d "% (xsize) # x size
ns += " %d"% (ysize) # y size
ns += "]" + CRLF
ns += "Grid(25 0 0 0)" + CRLF
ns += "Cursor(400 0 2)" + CRLF
ns += "Flags(0x000000c0)" + CRLF
ns += "Groups(\"1,s:2,c:3:4:5:6:7:8\")" + CRLF
ns += "Styles(\"Signal,10,40,20:Power,25,60,35:Fat,40,60,35:Skinny,8,36,20\")" + CRLF
return ns
#Layer(1 "solder")
#(
# Line(1375 1075 1325 1025 40 30 0x00000020)
#)
def genBrdLayerFromNet(self,layer,net):
ns = ''
# Should come from board technology
### print "out net " + net.name
### print "layer num " + str(layer.num)
for line in net.route:
#print "found line on net layer num " + str(line.layernum)
if line.layernum == layer.num:
### print "out line on net " + net.name
### print "net.route length " + str(len(net.route))
### print "line.points length " + str(len(line.points))
Thickness = line.thickness
Clearance = line.thickness * 2
first = True
prev = Point()
for pt in line.points:
#print "pt " + str(pt)
if first:
first = False
else:
X1 = int(prev._x)
Y1 = int(prev._y)
X2 = int(pt._x)
Y2 = int(pt._y)
ns += 'Line [' + " %d "% X1 + " %d "% Y1 + " %d "% X2 + " %d "% Y2
ns += " %d "% Thickness
ns += " %d "% Clearance
ns += '"auto"'
ns += ']\n'
prev = pt
return ns
def genLayerBlockages(self,layer):
ns = ''
# blockages use absolute coordinates,
for rect in layer.blockages:
# order of processing is important
X1=int(rect.ll._x)
Y1=int(rect.ll._y)
X2=int(rect.ur._x)
Y2=int(rect.ur._y)
ns += ' Polygon("clearpoly")\n'
ns += '(\n'
ns += " [%d "% X1 + " %d ]"% Y1
ns += " [%d "% X1 + " %d ]"% Y2
ns += " [%d "% X2 + " %d ]"% Y2
ns += " [%d "% X2 + " %d ]"% Y1
ns += '\n'
ns += ')\n'
return ns;
# routing
# gen brd layers scr"
def genBrdLayersScr(self):
### print "PCB! gen brd layers scr"
ns = ''
CRLF = "\n"
for l in self.brd.layers:
### print "layer " + l.name
ns += "Layer (" +str(l.num) + " \"" + l.name + "\")" + CRLF
ns += "(" + CRLF
# here go all of the layer elements
for net in self.brd.nets.values():
ns += self.genBrdLayerFromNet(l,net) # Routes
ns += self.generateNetPour(l,net) # Geometry
ns += self.genLayerBlockages(l)
ns += ")" + CRLF
return ns;
def generateRoutes(self):
return self.genBrdLayersScr()
def generateNetPour(self,layer,net):
ns = ''
CRLF = "\n"
### print " layer " + str(layer)
for geom in net.geometry:
### print " found geom in " + net.name + " type " + str(type(geom)) + " layer " + str(geom.layernum) + CRLF
if geom.layernum != layer.num :
continue
# Handle rectangle
#if type(geom) is Rectangle :
if isinstance(geom, Rectangle) :
### print " found Rectangle" + CRLF
rect = Rectangle(geom.ll._x, geom.ll._y, geom.ur._x, geom.ur._y, geom.layernum )
rect.normalize() # normalize just in case
# order of processing is important
X1=int(rect.ll._x)
Y1=int(rect.ll._y)
X2=int(rect.ur._x)
Y2=int(rect.ur._y)
ns += ' Polygon("clearpoly")\n'
ns += '(\n'
ns += " [%d "% X1 + " %d ]"% Y1
ns += " [%d "% X1 + " %d ]"% Y2
ns += " [%d "% X2 + " %d ]"% Y2
ns += " [%d "% X2 + " %d ]"% Y1
ns += '\n'
ns += ')\n'
return ns;
# Geometry on nets, aka pour
def generatePour(self):
ns = ''
CRLF = "\n"
for l in self.brd.layers:
### print "layer " + l.name
ns += "Layer (" +str(l.num) + " \"" + l.name + "\")" + CRLF
ns += "(" + CRLF
# here go through the layers
for net in self.brd.nets.values():
ns += self.generateNetPour(l,net)
ns += ")" + CRLF
return ns;
# Via[]
# Via[17000 182000 31000 3000 34000 2800 "" ""]
# Via [X Y Thickness Clearance Mask Drill "Name" SFlags]
# Via (X Y Thickness Clearance Mask Drill "Name" NFlags)
# Via (X Y Thickness Clearance Drill "Name" NFlags)
# Via (X Y Thickness Drill "Name" NFlags)
# Via (X Y Thickness "Name" NFlags)
# X Y coordinates of center
# Thickness outer diameter of copper annulus
# Clearance add to thickness to get clearance diameter
# Mask diameter of solder mask opening
# Drill diameter of drill
# Name string, name of via (vias have names?)
# SFlags symbolic or numerical flags
# NFlags numerical flags only
def generateVias(self):
ns = ''
CRLF = "\n"
### print " board vias " + str(len(self.brd.vias))
for via in self.brd.vias:
### print "via " + via.name
ns += "Via ["
ns += " %d "% int(via.pos._x) + " %d "% int(via.pos._y)
ns += ' 4000 2000 0 2000 "" "" '
ns += "]" + CRLF
return ns;
#NetList()
#(
# Net("unnamed_net1" "(unknown)")
# (
# Connect("L1-2")
# Connect("L2-1")
# Connect("C2-1")
# Connect("C1-1")
# )
#)
# gen brd net scr"
def genBrdNetlistScr(self):
ns = ''
CRLF = "\n"
ns = 'NetList()' + CRLF
ns += '(' + CRLF
for net in self.sch.nets.values():
name = net.name
ns += "Net(\"" + net.name + "\" \"(unknown)\")" + CRLF
ns += "(" + CRLF
for node in net.nodes:
ns += expandtab("\tConnect(\"") + str(node.dev.refid) + "-" + str(node.pin.num) + "\")" + CRLF
ns += ")" + CRLF
ns += ')' + CRLF
return ns
# pcb footprint file may contain any of the following commands:
# Element [element_flags, description, pcb-name, value, mark_x, mark_y, text_x, text_y, text_direction, text_scale, text_flags]
# Pad [x1 y1 x2 y2 thickness clearance mask name pad_number flags]
# Pin [x y thickness clearance mask drillholedia name number flags]
# ElementArc [x y r1 r2 startangle sweepangle thickness]
# ElementLine [x1 y1 x2 y2 thickness] > thickness != 1000 = 10 mils almost for all footprints
# Comment lines start with the #-sign
#Elements
# Element [element_flags, description, pcb-name, value, mark_x, mark_y, text_x, text_y, text_direction, text_scale, text_flags] item allowed value explanation comment
# element_flags unsigned hex value
# description string text description of footprint written by footprint author
# pcb name string refdes used on this particular pcb xxx
# value string value of component on this particular pcb layout xxx
# mark_x 1/100th mils
# mark_y 1/100th mils
# text_x 1/100th mils
# text_y 1/100th mils
# text direction decimal integer 0=horiz; 1=ccw90; 2=180; 3=cw90
# text_scale decimal integer usu. set 100
# text_flags unsigned hex
# Pads
# Pad[x1 y1 x2 y2 thickness clearance mask name pad_number flags] Item Allowed Value Explanation Comment
# x1 1/100th mils x(1st point)
# y1 1/100th mils y(1st point)
# x2 1/100th mils x(2nd point)
# y2 1/100th mils y(2nd point)
# thickness 1/100 mils width of metal surrounding line segment see Brorson .pdf
# clearance 1/100 mils distance to any other copper on any layer actually 1/2 of this number is used!
# mask 1/100th mils width of mask relief actual width of the mask centered on pad copper
# name string name of pad (arb. string) e.g. pad_1 or positive or any other string
# pad_number string pad # used for nets. it MUST be consistent with the definitions on the netlist.
# flags hex value xxx
# Pin[x y thickness clearance mask drillholedia name number flags] Item Allowed Value Explanation Comment
# x 1/100th mils pin x coord.
# y 1/100th mils pin y coord.
# thickness 1/100th mils copper diameter
# clearance 1/100th mils 2*(cu to cu clearance) if you want a 10 mil clearance, put 2000 (20 mils) here
# mask 1/100th mils diameter of mask aperture actual dia. of hole in mask
# drillholedia 1/100th mils dia. of hole
# name string arb. pin name
# number decimal integer pin number used by nets/rats
# flags hex xxx
# Via[]
# Via[17000 182000 31000 3000 34000 2800 "" ""]
# Via [X Y Thickness Clearance Mask Drill "Name" SFlags]
# Via (X Y Thickness Clearance Mask Drill "Name" NFlags)
# Via (X Y Thickness Clearance Drill "Name" NFlags)
# Via (X Y Thickness Drill "Name" NFlags)
# Via (X Y Thickness "Name" NFlags)
# X Y coordinates of center
# Thickness outer diameter of copper annulus
# Clearance add to thickness to get clearance diameter
# Mask diameter of solder mask opening
# Drill diameter of drill
# Name string, name of via (vias have names?)
# SFlags symbolic or numerical flags
# NFlags numerical flags only
# On the Layer
# Line[]
# Line[137500 107500 132500 102500 4000 3000 "clearline"]
# Text[423000 391500 2 100 "T J PORTER ELECTRONICS" "auto"]
# Polygon("clearpoly")
# (
# [2000 198000] [47000 198000] [47000 187000] [126000 187000] [126000 198000]
# [297000 198000] [297000 1000] [2000 1000]
# )
# Notes:
# Pins - Throughole
# Pads - SMD
# Examples for version 1.99
# TH
# Element["" "Cap" "C17" "" 215500 81500 -9000 -32900 0 150 ""]
# (
# Pin[0 0 8000 3000 11000 3500 "1" "1" ""]
# Pin[0 -20000 8000 3000 11000 3500 "2" "2" ""]
# ElementLine [-5000 5000 5000 5000 1000]
# ElementLine [5000 5000 5000 -25000 1000]
# ElementLine [5000 -25000 -5000 -25000 1000]
# ElementLine [-5000 -25000 -5000 5000 1000]
#
# )
# SMD
# Element["" "SMD 0805" "C13" "" 252500 151000 -3000 4500 0 150 ""]
# (
# Pad[0 0 0 0 6000 3000 9000 "1" "1" "square"]
# Pad[0 -9000 0 -9000 6000 3000 9000 "2" "2" "square"]
# ElementLine [-3500 -12500 -3500 3500 1000]
# ElementLine [3500 -12500 -3500 -12500 1000]
# ElementLine [3500 3500 3500 -12500 1000]
# ElementLine [-3500 3500 3500 3500 1000]
# )
#
# Original
#Element["" "SOT-23 package" "Q7" "" 66666 66666 3200 5900 0 100 ""]
#(
# Pad[0 -300 0 300 3400 3000 4000 "1" "1" "square,edge2"]
# Pad[7800 -300 7800 300 3400 3000 4000 "2" "2" "square,edge2"]
# Pad[3900 -8500 3900 -7900 3400 3000 4000 "3" "3" "square"]
# ElementLine [10300 -11000 -2500 -11000 1000]
# ElementLine [10300 2900 10300 -11000 1000]
# ElementLine [-2500 2900 10300 2900 1000]
# ElementLine [-2500 -11000 -2500 2900 1000]
#)
# Placed on the far side -> layer onsolder?
#Element["selected,onsolder" "SOT-23 package" "Q7" "" 66666 133334 3200 -5900 0 100 "selected,auto"]
#(
# Pad[0 300 0 -300 3400 3000 4000 "1" "1" "selected,onsolder,square"]
# Pad[7800 300 7800 -300 3400 3000 4000 "2" "2" "selected,onsolder,square"]
# Pad[3900 8500 3900 7900 3400 3000 4000 "3" "3" "selected,onsolder,square,edge2"]
# ElementLine [10300 11000 -2500 11000 1000]
# ElementLine [10300 -2900 10300 11000 1000]
# ElementLine [-2500 -2900 10300 -2900 1000]
# ElementLine [-2500 11000 -2500 -2900 1000]
#
# )
# VIAs
# Via[17000 182000 31000 3000 34000 2800 "" ""]
# Via[17000 17000 31000 3000 34000 2800 "" ""]
# Via[282000 17000 31000 3000 34000 2800 "" ""]
# Via[282000 182000 31000 3000 34000 2800 "" ""]
# Via[15500 382500 31000 3000 34000 2800 "" ""]
# Via[15500 217500 31000 3000 34000 2800 "" ""]
# Via[280500 217500 31000 3000 34000 2800 "" ""]
# Tracks are made of Line????
# Layer(1 "solder")
# (
# Line[137500 107500 132500 102500 4000 3000 "clearline"]
# Line[145000 107500 137500 107500 4000 3000 "clearline"]
# Line[85000 112500 85000 107500 4000 3000 "clearline"]
# Line[97500 90000 97500 147500 4000 3000 "clearline"]
#)
# Element [element_flags, description, pcb-name, value, mark_x, mark_y, text_x, text_y, text_direction, text_scale, text_flags]
def gen0805_resitor(self,refid,x,y,v):
CRLF = '\n'
s = 'Element["" "0805 chip resitor" "' + str(refid) + '" "' + str(v) + '" ' +'%i'% x + ' ' + '%i'% y + ' 3200 5900 0 100 ""]' + CRLF
s += '(' + CRLF
s += ' Pad[0 -700 0 700 4500 3000 5100 "1" "1" "square"]' + CRLF
s += ' Pad[8000 -700 8000 700 4500 3000 5100 "2" "2" "square"]' + CRLF
s += ' ElementLine [11700 -4400 -3700 -4400 800]' + CRLF
s += ' ElementLine [11700 4400 11700 -4400 800]' + CRLF
s += ' ElementLine [-3700 4400 11700 4400 800]' + CRLF
s += ' ElementLine [-3700 -4400 -3700 4400 800]' + CRLF
s += ')' + CRLF
return s
def gen0805_capacitor(self,refid,x,y,v):
CRLF = '\n'
s = 'Element["" "0805 chip cap" "' + str(refid) + '" "' + str(v) + '" ' +'%i'% x + ' ' + '%i'% y + ' 3200 5900 0 100 ""]' + CRLF
s += '(' + CRLF
s += ' Pad[0 -700 0 700 4500 3000 5100 "1" "1" "square"]' + CRLF
s += ' Pad[8000 -700 8000 700 4500 3000 5100 "2" "2" "square"]' + CRLF
s += ' ElementLine [11700 -4400 -3700 -4400 800]' + CRLF
s += ' ElementLine [11700 4400 11700 -4400 800]' + CRLF
s += ' ElementLine [-3700 4400 11700 4400 800]' + CRLF
s += ' ElementLine [-3700 -4400 -3700 4400 800]' + CRLF
s += ')' + CRLF
return s
def genSOT23(self, refid, x, y, v):
CRLF = '\n'
s = 'Element["" "SOT-23 package" "' + str(refid) + '" "' + str(v) + '" ' +'%i'% x + ' ' + '%i'% y + ' 3200 5900 0 100 ""]' + CRLF
s += '(' + CRLF
s += ' Pad[0 -300 0 300 3400 3000 4000 "1" "1" "square,edge2"]' + CRLF
s += ' Pad[7800 -300 7800 300 3400 3000 4000 "2" "2" "square,edge2"]' + CRLF
s += ' Pad[3900 -8500 3900 -7900 3400 3000 4000 "3" "3" "square"] ' + CRLF
s += ' ElementLine [10300 -11000 -2500 -11000 1000]' + CRLF
s += ' ElementLine [10300 2900 10300 -11000 1000]' + CRLF
s += ' ElementLine [-2500 2900 10300 2900 1000]' + CRLF
s += ' ElementLine [-2500 -11000 -2500 2900 1000]' + CRLF
s += ')' + CRLF
return s
def rotatePoint(self,pt,x0,y0,angle):
dX = pt._x - x0
dY = pt._y - y0
rX = pt._x
rY = pt._y
if angle == 90:
rX = x0 + dY
rY = y0 - dX
if angle == 180:
rX = x0 - dX
rY = y0 - dY
if angle == 270:
rX = x0 - dY
rY = y0 + dX
return rX,rY
def genElementLine(self,line,dev):
# order of processing is important
X1=int(line.points[0]._x)
Y1=int(line.points[0]._y)
X2=int(line.points[1]._x)
Y2=int(line.points[1]._y)
if dev.bottom:
Y1 = 0 - Y1
Y2 = 0 - Y2
X1,Y1 = self.rotatePoint(Point(X1,Y1),0,0,dev.rotation)
X2,Y2 = self.rotatePoint(Point(X2,Y2),0,0,dev.rotation)
# keep horizontal, vertical Point2 > Point1
if (X1 == X2):
if (Y1 > Y2):
t = Y1
Y1 = Y2
Y2 = t
else:
if (Y1 == Y2):
if (X1 > X2):
t = X1
X1 = X2
X2 = t
ns = 'ElementLine [' + " %d "% X1 + " %d "% Y1 + " %d "% X2 + " %d "% Y2
ns += " %d "% line.thickness
ns += ']\n'
return ns
# rotation is clockwise
def genElementArc(self,arc,dev):
# Thickness, Clearance, Mask, Drill, Name, Number, SFlags
rX = int(arc._x)
rY = int(arc._y)
# rY is
if dev.bottom:
rY = 0 - rY
if dev.rotation == 90:
arc.sangle += 90
if dev.rotation == 180:
arc.sangle += 180
if dev.rotation == 270:
arc.sangle += 270
rX,rY = self.rotatePoint(arc,0,0,dev.rotation)
arc.sangle = arc.sangle % 360
ns = 'ElementArc [' + " %d "% rX + " %d "% rY
ns += " %d "% arc.width
ns += " %d "% arc.height
ns += " %d "% arc.sangle
ns += " %d "% arc.dangle
ns += " %d "% arc.thickness
ns += ']\n'
return ns
def genElementPin(self,pin,dev):
# Thickness, Clearance, Mask, Drill, Name, Number, SFlags
rX=int(pin.pos._x)
rY=int(pin.pos._y)
# Why we do not have to do it for the pins?
# rY is
#if dev.bottom:
# rY = 0 - rY
# Package has not been rotated and must match device pins
rX,rY = self.rotatePoint(Point(rX,rY),0,0,dev.rotation)
ns = 'Pin [' + " %d "% rX + " %d "% rY
ns += " %d "% pin.thickness
ns += " %d "% pin.clearance
ns += " %d "% pin.mask
ns += " %d "% pin.drill
ns += pin.name + ' '
ns += '"' + "%d"% pin.num + '" '
ns += pin.sflags
ns += ']\n'
return ns
def genElementPad(self,pin,dev):
# Thickness, Clearance, Mask, Name, Number, SFlags
# if package was parsed then these are set, if not I need to generate correct ones
rX1=int(pin.rX1)
rY1=int(pin.rY1)
rX2=int(pin.rX2)
rY2=int(pin.rY2)
# Why we do not have to do it for the pads?
#if dev.bottom:
# rY1 = 0 - rY1
# rY2 = 0 - rY2
rX1,rY1 = self.rotatePoint(Point(rX1,rY1),0,0,dev.rotation)
rX2,rY2 = self.rotatePoint(Point(rX2,rY2),0,0,dev.rotation)
try:
sflags = pin.sflags
except:
# no PCB sflags then generate one
# square
# edge2
if pin.pad.type == "S":
sflags ='"square"'
else:
sflags ='""'
ns = 'Pad [' + " %d "% rX1 + " %d "% rY1 + " %d "% rX2 + " %d "% rY2
ns += " %d "% pin.thickness
ns += " %d "% pin.clearance
ns += " %d "% pin.mask
ns += pin.name + ' '
ns += '"' + "%d"% pin.num + '" '
ns += sflags
ns += ']\n'
return ns
def genElementBody(self,dev):
# print'name ' + dev.name
l = len(dev.pins)
# print ' len ' + str(l)
# print 'roation ' + str(dev.rotation)
ns = '(\n'
for num in range(1,l+1):
# print 'pin ' + str(num)
pin = dev.pins[num]
ppin = dev.package.pins[num]
#if dev.package.smt: # event smt packages can have pins aka mounting holes
if ppin.smt:
ns += self.genElementPad(ppin,dev)
else:
ns += self.genElementPin(ppin,dev)
for geo in dev.package.geometry:
if isinstance(geo, Line):
ns += self.genElementLine(geo,dev)
if isinstance(geo, Arc):
ns += self.genElementArc(geo,dev)
if isinstance(geo, Text):
ns += self.genElementText(geo,dev)
ns += ')\n'
return ns
# Device is on the bottom, coordinates of the pad are for the bottom
# Pcb defines package looking from top so mirror it in X back to top
# and add the flags
# For details see the core.py
def genBrdPlaceDevOnSolder(self,dev):
for pad in dev.package.pins.values():
pad.pos._y = 0 - pad.pos._y
try: # quick fix TBI
pad.rY1 = 0 - pad.rY1
except:
pad.rY1 = 0
try: # quick fix TBI
pad.rY2 = 0 - pad.rY2
except:
pad.rY2 = 0
try: # quick fix TBI
newsflags = pad.sflags.strip('"')
except:
newsflags = 'square' # default to square
if newsflags != '':
newsflags = ',' + newsflags
newsflags = '"onsolder' + newsflags + '"'
pad.sflags = newsflags
for pad in dev.package.geometry:
pass
# print pad.sflags
# gen brd place scr"
def genBrdPlaceScr(self):
ns = ''
CRLF = '\n'
devnum = 0
self.brd.outline.calcBBox()
for dev in self.brd.devices.values():
name = str(dev.refid) + CRLF
if dev.bottom:
self.genBrdPlaceDevOnSolder(dev)
x = (int)
#x = (self.brd.outline.bbox.ur._x - dev.position._x) # position is in mils
x = dev.position._x # position is in mils
y = (int)
#y = (self.brd.outline.bbox.ur._y - dev.position._y) # position is in mils
y = dev.position._y # position is in mils
placement = '"onsolder"'
else:
x = (int)
x = dev.position._x # position is in mils
y = (int)
y = dev.position._y # position is in mils
placement = '""'
# place the device
ns += 'Element[' + placement + ' "' + str(dev.package.description) + '" "' + str(dev.refid) + '" "' + str(dev.val) + '" ' +'%i'% x + ' ' + '%i'% y + ' 3200 5900 0 100 ""]' + CRLF
ns += self.genElementBody(dev)
# if name[0:1] == 'R':
# ns += self.gen0805_resitor(dev.refid,x,y,dev.val)
# if name[0:1] == 'C':
# ns += self.gen0805_capacitor(dev.refid,x,y,dev.val)
# if name[0:1] == 'Q':
# ns += self.genSOT23(dev.refid,x,y,dev.val)
# numpins = 0
# for pin in dev.pins:
# numpins += 1
# for k in dev.pins.keys():
# pin = dev.pins[k]
# dev.rotation ?
return ns
def Cmd(self,cmds):
gen = 0
sch = 0
brd = 0
cmd = 0
add = 0
layers = 0
net_connect = 0
netlist = 0
board = 0
place = 0
route = 0
scr = 0
lst = 0
# 0
if cmds[0:1] == ['gen']:
gen = 1
# 1
if cmds[1:2] == ['sch']:
sch = 1
if cmds[1:2] == ['brd']:
brd = 1
# 2
if cmds[2:3] == ['cmd']:
cmd = 1
if cmds[2:3] == ['add']:
add = 1
if cmds[2:3] == ['layers']:
layers = 1
if cmds[2:3] == ['netconnect']:
net_connect = 1
if cmds[2:3] == ['netlist']:
netlist = 1
if cmds[2:3] == ['board']:
board = 1
if cmds[2:3] == ['place']:
place = 1
if cmds[2:3] == ['route']:
route = 1
# 3
if cmds[3:4] == ['scr']:
scr = 1
if cmds[3:4] == ['lst']:
lst = 1
if gen:
if sch:
if add:
if scr:
s = self.genSchAddScr()
return s
if layers:
if scr:
s = self.genSchLayersScr()
return s
if net_connect:
pass
if netlist:
s = self.genSchNetlistLst()
return s
if brd:
if cmd:
if scr:
s = self.genBrdCmdScr() # commands to make the board
return s
if board:
if scr:
s = self.genBrdBoardScr()
return s
if layers:
if scr:
s = self.genBrdLayersScr()
return s
if place:
if scr:
s = self.genBrdPlaceScr()
return s
if netlist:
if scr:
s = self.genBrdNetlistScr()
return s
if route:
pass
return ""
def test(self):
ic1 = CDev("U1","","IC1")
ic1.add( CPin("GND",1) )
ic1.add( CPin("VCC",2) )
self.sch.addDev(ic1)
net1 = CNet("GND")
net1.add(CNode(ic1,"GND"))
self.sch.addNet(net1)
net2 = CNet("VCC")
net2.add(CNode(ic1,"VCC"))
self.sch.addNet(net2)
print "gen sch add scr"
s = self.genSchAddScr()
print s
print "gen sch net-connect scr"
s = self.genSchNetConnectScr()
print s
print "gen sch netlist lst"
s = self.genSchNetlistLst()
print s
print "gen sch netlist scr"
s = self.genSchNetlistScr()
print s
# Some tests
if __name__ == "__main__":
import sys
#import string
import re
schem = CSchematic()
board = CBoard(schem)
board.addFromSchematic()
mucs = CPCB(schem,board)
# open input file
if sys.argv[1:] == ['test']:
mucs.test()
| bsd-3-clause | 2,569,505,188,356,980,700 | 23.228858 | 181 | 0.616108 | false | 2.445766 | false | false | false |
ecreall/nova-ideo | novaideo/connectors/yammer/views/login.py | 1 | 4288 | # -*- coding: utf8 -*-
# Copyright (c) 2017 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPForbidden
from pyramid.renderers import get_renderer
from dace.util import getSite
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.view import BasicView, ViewError
from novaideo import _
from novaideo.connectors.core import YAMMER_CONNECTOR_ID
from novaideo.connectors.yammer.content.behaviors import LogIn
from novaideo.content.novaideo_application import NovaIdeoApplication
from novaideo.utilities.util import generate_navbars
from novaideo.connectors.core import CONNECTOR_PROCESSES
@view_config(
name='yammerlogin',
context=NovaIdeoApplication,
renderer='pontus:templates/views_templates/grid.pt',
)
class LoginView(BasicView):
title = _('Log in')
name = 'login'
behaviors = [LogIn]
template = 'novaideo:views/user_management/templates/login.pt'
wrapper_template = 'pontus:templates/views_templates/simple_view_wrapper.pt'
viewid = 'login'
def update(self):
code = self.params('code')
error = self.params('error')
message = None
messages = {}
request = self.request
root = getSite()
yammer_connectors = list(root.get_connectors(YAMMER_CONNECTOR_ID))
yammer_connector = yammer_connectors[0] if yammer_connectors else None
login_url = request.resource_url(request.context, 'login')
login_url2 = request.resource_url(request.context, '@@login')
referrer = self.params('came_from')
if not referrer:
referrer = request.path_url
if '/auditstream-sse' in referrer:
return HTTPForbidden()
if login_url in referrer or login_url2 in referrer:
# never use the login form itself as came_from
referrer = request.resource_url(root)
came_from = request.session.setdefault(
'novaideo.came_from', referrer)
error_message = _("Failed login")
if yammer_connector and code:
trusted_networks = getattr(yammer_connector, 'networks', [])
source_data, user_data = yammer_connector.extract_data(code)
if not trusted_networks or \
any(n in trusted_networks for n in source_data['network_domains']):
result = self.execute({
'source_data': source_data,
'user_data': user_data,
'came_from': came_from
})
if result[0].get('logged', False):
return result[0].get('redirect')
elif trusted_networks:
error_message = _("You don't have the right to login with this account.")
error = True
if error:
error = ViewError()
error.principalmessage = error_message
message = error.render_message(request)
messages.update({error.type: [message]})
self.finished_successfully = False
# Pass this through FBO views (e.g., forbidden) which use its macros.
template = get_renderer(
'novaideo:views/user_management/templates/login.pt').implementation()
login_bodies = []
try:
login_navbars = generate_navbars(
request, request.root,
process_id=CONNECTOR_PROCESSES,
node_id='login',
descriminators=['body-action'])
login_bodies = login_navbars['body_actions']
except Exception:
pass
values = dict(
url=request.resource_url(request.virtual_root, 'login'),
came_from=came_from,
login='',
password='',
login_template=template,
logins=login_bodies
)
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
item['messages'] = messages
result = {}
result['coordinates'] = {self.coordinates: [item]}
return result
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{LogIn: LoginView})
| agpl-3.0 | 4,142,177,128,030,823,400 | 35.338983 | 89 | 0.620336 | false | 4.014981 | false | false | false |
jjgomera/pychemqt | lib/mEoS/Ne.py | 1 | 5905 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
'''Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.'''
from numpy.lib.scimath import log10
from scipy.constants import pi, Avogadro
from lib import unidades
from lib.meos import MEoS
class Ne(MEoS):
"""Multiparameter equation of state for neon"""
name = "neon"
CASNumber = "7440-01-9"
formula = "Ne"
synonym = "R-720"
_refPropName = "NEON"
_coolPropName = "Neon"
rhoc = unidades.Density(481.914888)
Tc = unidades.Temperature(44.4918)
Pc = unidades.Pressure(2678.6, "kPa")
M = 20.179 # g/mol
Tt = unidades.Temperature(24.556)
Tb = unidades.Temperature(27.104)
f_acent = -0.0387
momentoDipolar = unidades.DipoleMoment(0.0, "Debye")
id = 107
CP1 = {"ao": 2.5}
katti = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for neon of Katti (1986)",
"__doi__": {"autor": "Katti, R.S., Jacobsen, R.T, Stewart, R.B., "
"Jahangiri, M.",
"title": "Thermodynamic Properties of Neon for "
"Temperatures from the Triple Point to 700 K at "
"Pressures up to 700 MPa",
"ref": "Adv. Cryo. Eng. 31 (1986) 1189-1197",
"doi": "10.1007/978-1-4613-2213-9_132"},
"R": 8.31434,
"cp": CP1,
"ref": {"Tref": 298.15, "Pref": 101.325, "ho": 6179, "so": 146.214},
"Tmin": Tt, "Tmax": 700.0, "Pmax": 700000.0, "rhomax": 90.56,
"nr1": [0.3532653449e1, -0.4513954384e1, -0.1524027959, 0.2188568609e1,
-7.44299997, 0.7755627402e1, -0.3122553128e1, 0.1014206899e1,
-0.5289214086e-1, 0.1566849239, -0.222852705, -0.1410150942e-1,
0.7036229719e-1, -0.5882048367e-1, 0.1571172741e-1,
0.1292202769e-2, 0.7902035603e-3, -0.3794403616e-3],
"d1": [1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 6, 6, 6],
"t1": [0.5, 0.75, 3.5, 0.5, 0.75, 1, 1.5, 2.5, 0.25, 0.5, 2.5, 1, 3, 4,
5, 1, 5, 6],
"nr2": [0.04652799333, 0.04524001818, -0.2383421991, 0.629359013e-2,
-0.1272313644e-2, -0.175235256e-6, 0.7188419232e-2,
-0.5403006914e-1, 0.7578222187e-1, -0.3808588254e-1,
0.6034022431e-2],
"d2": [1, 2, 2, 2, 2, 2, 4, 8, 8, 8, 8],
"t2": [4, 1, 5, 8, 12, 32, 10, 6, 7, 8, 9],
"c2": [3, 2, 2, 4, 6, 6, 2, 2, 2, 2, 2],
"gamma2": [1]*11,
"nr3": [],
"nr4": []}
eq = katti,
_surface = {"sigma": [0.012254, 0.02728, -0.025715],
"exp": [1.4136, 1.4517, 1.6567]}
_dielectric = {
"eq": 1,
"a": [0.9969, 0], "b": [-0.109, 0.0708], "c": [-2.88, -1.0],
"Au": 0, "D": 2}
_melting = {
"eq": 2,
"__doi__": {
"autor": "Santamaría-Pérez, D., Mukherjee, G.D., Schwager, B., "
"Boehler, R.",
"title": "High-pressure melting curve of helium and neon: "
"Deviations from corresponding states theory",
"ref": "Physical Review B 81 (2010) 214101",
"doi": "10.1103/PhysRevB.81.214101"},
"Tmin": 24.4, "Tmax": 700.0,
"Tref": 24.4, "Pref": 101325,
"a2": [0.17e9], "exp2": [1/0.77]}
_vapor_Pressure = {
"eq": 3,
"n": [-0.55805e1, 0.68795e-1, 0.54840e1, -0.83760e1, 0.34276e1],
"t": [1, 1.5, 2.3, 2.8, 3.4]}
_liquid_Density = {
"eq": 1,
"n": [1.0601, 120.76, -385.53, 816.55, -899.07, 354.66],
"t": [0.33, 1.4, 1.7, 2.2, 2.6, 3.0]}
_vapor_Density = {
"eq": 2,
"n": [-0.23338e1, -0.36834e1, -0.85368e2, 0.22769e3, -0.17290e3],
"t": [0.444, 0.95, 3.5, 4.1, 4.5]}
visco0 = {"__name__": "Rabinovich (1988)",
"__doi__": {
"autor": "Rabinovich, V.A., Vasserman, A.A., Nedostup, V.I.,"
" Veksler, L.S.",
"title": "Thermophysical Properties of Neon, Argon, "
"Krypton, and Xenon",
"ref": "Hemisphere Publishing Corp., 1988.",
"doi": ""},
"eq": 0,
"method": "_visco0"}
_viscosity = visco0,
def _visco0(self, rho, T, fase=None):
a = [17.67484, -2.78751, 311498.7, -48826500, 3938774000, -1.654629e11,
2.86561e12]
Tr = T/0.29944
y = 0.68321*(a[0] + a[1]*log10(Tr) + a[2]/Tr**2 + a[3]/Tr**3 +
a[4]/Tr**4 + a[5]/Tr**5 + a[6]/Tr**6)
nt = 266.93*(T*self.M)**0.5/y
om = rho/1673.0
c = [1.03010, -0.99175, 2.47127, -3.11864, 1.57066]
b = [0.48148, -1.18732, 2.80277, -5.41058, 7.04779, -3.76608]
sum1 = sum([ci*om**i for i, ci in enumerate(c)])
sum2 = sum([bi*om**i for i, bi in enumerate(b)])
sigma = 3.05e-10*(sum1-sum2*log10(T/122.1))
br = 2.0/3.0*pi*Avogadro*sigma**3
brho = rho/self.M*1000*br
d = [1, 0.27676, 0.014355, 2.6480, -1.9643, 0.89161]
nd = sum([di*brho**i for i, di in enumerate(d)])
return unidades.Viscosity(nd*nt/100, "muPas")
| gpl-3.0 | 858,815,280,121,268,900 | 37.318182 | 79 | 0.515675 | false | 2.547927 | false | false | false |
intip/aldryn-bootstrap3 | aldryn_bootstrap3/fields.py | 1 | 3306 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import django.forms.fields
from django.utils.translation import ugettext_lazy as _
from . import widgets, constants
from .conf import settings
class Context(django.forms.fields.ChoiceField):
widget = widgets.Context
CHOICES = constants.CONTEXT_CHOICES
DEFAULT = constants.CONTEXT_DEFAULT
def __init__(self, *args, **kwargs):
if 'choices' not in kwargs:
kwargs['choices'] = self.CHOICES
if 'initial' not in kwargs:
kwargs['initial'] = self.DEFAULT
kwargs.pop('coerce', None)
kwargs.pop('max_length', None)
kwargs.pop('widget', None)
kwargs['widget'] = self.widget
super(Context, self).__init__(*args, **kwargs)
class Size(django.forms.fields.ChoiceField):
widget = widgets.Size
CHOICES = constants.SIZE_WIDGET_CHOICES
DEFAULT = constants.SIZE_WIDGET_DEFAULT
def __init__(self, *args, **kwargs):
if 'choices' not in kwargs:
kwargs['choices'] = self.CHOICES
if 'initial' not in kwargs:
kwargs['initial'] = self.DEFAULT
kwargs.pop('coerce', None)
kwargs.pop('max_length', None)
kwargs.pop('widget', None)
kwargs['widget'] = self.widget
super(Size, self).__init__(*args, **kwargs)
class Classes(django.forms.fields.CharField):
pass
class Icon(django.forms.fields.CharField):
widget = widgets.Icon
DEFAULT = ''
def __init__(self, *args, **kwargs):
if 'initial' not in kwargs:
kwargs['initial'] = self.DEFAULT
kwargs.pop('coerce', None)
kwargs.pop('max_length', None)
kwargs.pop('widget', None)
kwargs['widget'] = self.widget
super(Icon, self).__init__(*args, **kwargs)
class Integer(django.forms.fields.IntegerField):
widget = django.forms.NumberInput
def __init__(self, *args, **kwargs):
kwargs.pop('coerce', None)
kwargs.pop('max_length', None)
kwargs.pop('widget', None)
kwargs['widget'] = self.widget
super(Integer, self).__init__(*args, **kwargs)
class Classes(django.forms.fields.CharField):
widget = django.forms.widgets.Textarea
class MiniText(django.forms.fields.CharField):
widget = widgets.MiniTextarea
def __init__(self, *args, **kwargs):
kwargs.pop('coerce', None)
kwargs.pop('max_length', None)
kwargs.pop('widget', None)
kwargs['widget'] = self.widget
super(MiniText, self).__init__(*args, **kwargs)
class LinkOrButton(django.forms.fields.ChoiceField):
widget = widgets.LinkOrButton
CHOICES = (
('lnk', 'link'),
('btn', 'button'),
)
DEFAULT = 'lnk'
def __init__(self, *args, **kwargs):
if 'choices' not in kwargs:
kwargs['choices'] = self.CHOICES
if 'initial' not in kwargs:
kwargs['initial'] = self.DEFAULT
kwargs.pop('coerce', None)
kwargs.pop('max_length', None)
kwargs.pop('widget', None)
kwargs['widget'] = self.widget
super(LinkOrButton, self).__init__(*args, **kwargs)
class Responsive(MiniText):
widget = widgets.Responsive
class ResponsivePrint(MiniText):
widget = widgets.ResponsivePrint
| bsd-3-clause | -1,519,764,175,685,120,000 | 28.517857 | 59 | 0.613128 | false | 3.880282 | false | false | false |
olinlibrary/ABE | abe/auth/access_tokens.py | 1 | 2298 | import os
import time
from binascii import hexlify
import jwt
from abe import database as db
ADMIN_EMAILS = os.environ.get('ADMIN_EMAILS', '').split(',')
OAUTH_REQUIRES_CLIENT_ID = os.environ.get('OAUTH_REQUIRES_CLIENT_ID')
ACCESS_TOKEN_SECRET = (os.environ.get('ACCESS_TOKEN_SECRET') or hexlify(os.urandom(32)))
AUTHENTICATED_USER_CLAIMS = [
'create:events', 'edit:events', 'delete:events',
'create:ics',
'read:all_events',
'read:labels',
]
ADMIN_USER_CLAIMS = AUTHENTICATED_USER_CLAIMS + [
'create:protected_events', 'edit:protected_events', 'delete:protected_events',
'create:labels', 'edit:labels', 'delete:labels',
'admin:apps',
]
def create_access_token(**params):
payload = {}
payload.update(params)
payload.update({'iat': int(time.time())})
token = jwt.encode(payload, ACCESS_TOKEN_SECRET, algorithm='HS256').decode()
return token
def get_access_token_provider(token):
if is_valid_token(token):
payload = jwt.decode(token.encode(), ACCESS_TOKEN_SECRET, algorithms='HS256')
return payload.get('provider')
return None
def get_access_token_role(token):
if is_valid_token(token):
payload = jwt.decode(token.encode(), ACCESS_TOKEN_SECRET, algorithms='HS256')
return 'admin' if payload.get('email') in ADMIN_EMAILS else 'user'
return None
def access_token_scopes(token):
# The scope is computed based on the token's role, so that tokens stay
# valid if the role -> scope map changes.
scope = []
if is_valid_token(token):
payload = jwt.decode(token.encode(), ACCESS_TOKEN_SECRET, algorithms='HS256')
app = None
if 'client_id' in payload:
app = db.App.objects(client_id=payload['client_id']).first()
if not app and OAUTH_REQUIRES_CLIENT_ID:
pass # return scope
role = get_access_token_role(token)
if app and 'admin:*' not in app.scopes:
pass # role == 'user'
scope = ADMIN_USER_CLAIMS if role == 'admin' else AUTHENTICATED_USER_CLAIMS
return scope
def is_valid_token(token):
if not token:
return False
try:
jwt.decode(token.encode(), ACCESS_TOKEN_SECRET, algorithms='HS256') # for effect
except Exception:
return False
return True
| agpl-3.0 | -8,400,357,089,113,608,000 | 29.64 | 89 | 0.654047 | false | 3.445277 | false | false | false |
messersm/replay | replay/__init__.py | 1 | 1758 | r"""replay - replay the results of function calls
replay is a simple package that enables you to save the results of
time intensive deterministic function calls or random function calls
(and by function I mean any kind of callables), that should be replayed,
within a file using a simple API.
replay will never execute any code from a replay file. It hashes the
calls and looks for the hashes in the replay file, so there's no
security risk in changing replay files (other then, that you could
get other results...).
Example:
>>> import os
>>> import random
>>> import replay
>>> import tempfile
>>> fd, tmpname = tempfile.mkstemp(suffix='.replay')
>>> r1 = Replay(tmpname)
>>> random_numbers1 = [r1(random.random) for i in range(10)]
>>> r1.save()
>>> del r1
>>> r2 = Replay(tmpname)
>>> random_numbers2 = [r2(random.random) for i in range(10)]
>>> random_numbers1 == random_numbers2
True
>>> os.remove(tmpname)
Replay files can have different formats. Right now only a human-readable
and -editable format is implemented. This format looks mostly like python
except that the actual values a encoded with json.
Here's an example for such a file:
-----------------------------
random() = 0.3
pow(2, 3) = 8
# This is a comment.
random() = 0.2
sorted([7, 2, 3], reverse=true) = [7, 3, 2]
-----------------------------
You can freely edit such a file and future calls to these functions
will return the results you write into them:
Example:
>>> import os
>>> import random
>>> import replay
>>> import tempfile
>>> fd, tmpname = tempfile.mkstemp(suffix='.replay')
>>> with open(tmpname, 'w') as f: n = f.write('random() = 40\n')
>>> r = Replay(tmpname)
>>> r(random.random)
40
>>> os.remove(tmpname)
"""
__version__ = '0.1.2'
from .replay import Replay
| agpl-3.0 | 5,085,508,090,791,216,000 | 28.3 | 73 | 0.685438 | false | 3.508982 | false | false | false |
oracal/cppstub | tests/test_header_output.py | 1 | 11523 | #path hack.
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
import unittest
from cppstub import CppFile
from cppstub import CppNamespace
from cppstub import CppMethod
from cppstub import CppClass
class CppStubHeaderOutputTestSuite(unittest.TestCase):
def setUp(self):
self.cpp_file = CppFile("TestSuite")
def test_header_output_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
self.cpp_file.namespaces.append(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\n}\n\n", self.cpp_file.header())
def test_header_output_namespace_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_namespace1 = CppNamespace("test1", cpp_namespace)
cpp_namespace.namespaces.append(cpp_namespace1)
self.cpp_file.namespaces.append(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nnamespace test1\n{\n\n}\n\n}\n\n", self.cpp_file.header())
def test_header_output_multiple_namespaces(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_namespace1 = CppNamespace("test1", self.cpp_file)
self.cpp_file.namespaces.append(cpp_namespace)
self.cpp_file.namespaces.append(cpp_namespace1)
self.assertEquals("\n\nnamespace test\n{\n\n}\n\nnamespace test1\n{\n\n}\n\n", self.cpp_file.header())
def test_header_output_function_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_function = CppMethod("test1", [], "void", cpp_namespace)
cpp_namespace.methods.append(cpp_function)
self.cpp_file.namespaces.append(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nvoid test1();\n\n}\n\n", self.cpp_file.header())
def test_header_output_constructor_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method = CppMethod("Test", [], None, cpp_class)
cpp_class.methods["public"].append(cpp_method)
self.cpp_file.namespaces.append(cpp_namespace)
cpp_namespace.classes.append(cpp_class)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\npublic:\n\n Test();\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_private_access_method_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method = CppMethod("test1", [], "void", cpp_class)
cpp_class.add_method(cpp_method, "private")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\nprivate:\n\n void test1();\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_private_access_const_return_method_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method = CppMethod("test1", [], "int", cpp_class)
cpp_method.const_return_type = True
cpp_class.add_method(cpp_method, "private")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\nprivate:\n\n const int test1();\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_private_access_virtual_const_return_method_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method = CppMethod("test1", [], "int", cpp_class)
cpp_method.const_return_type = True
cpp_method.virtual = True
cpp_class.add_method(cpp_method, "private")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\nprivate:\n\n virtual const int test1();\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_static_return_method_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method = CppMethod("test1", [], "int", cpp_class)
cpp_method.static = True
cpp_class.add_method(cpp_method, "private")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\nprivate:\n\n static int test1();\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_public_access_method_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method = CppMethod("test1", [], "int", cpp_class)
cpp_class.add_method(cpp_method, "public")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\npublic:\n\n int test1();\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_protected_access_method_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method = CppMethod("test1", [], "int", cpp_class)
cpp_class.add_method(cpp_method, "protected")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\nprotected:\n\n int test1();\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_method_with_return_type_and_arguments_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method = CppMethod("test1", ["int argument"], "int", cpp_class)
cpp_class.add_method(cpp_method, "private")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\nprivate:\n\n int test1(int argument);\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_method_with_different_return_type_and_multiple_arguments_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method = CppMethod("test1", ["int argument1", "std::string argument2"], "std::string", cpp_class)
cpp_class.add_method(cpp_method, "private")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\nprivate:\n\n std::string test1(int argument1, std::string argument2);\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_multiple_methods_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method1 = CppMethod("test1", ["int argument1"], "int", cpp_class)
cpp_method2 = CppMethod("test2", ["std::string argument2"], "std::string", cpp_class)
cpp_class.add_method(cpp_method1, "private")
cpp_class.add_method(cpp_method2, "private")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\nprivate:\n\n int test1(int argument1);\n\n std::string test2(std::string argument2);\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_multiple_access_multiple_methods_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method1 = CppMethod("test1", ["int argument1"], "int", cpp_class)
cpp_method2 = CppMethod("test2", ["std::string argument2"], "std::string", cpp_class)
cpp_class.add_method(cpp_method1, "public")
cpp_class.add_method(cpp_method2, "private")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\npublic:\n\n int test1(int argument1);\n\nprivate:\n\n std::string test2(std::string argument2);\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_private_access_class_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class1 = CppClass("Test1", parent = cpp_namespace)
cpp_class2 = CppClass("Test2", parent = cpp_class1)
cpp_class1.add_class(cpp_class2, "private")
cpp_namespace.add_class(cpp_class1)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test1\n{\n\nprivate:\n\n class Test2\n {\n\n };\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_public_access_class_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class1 = CppClass("Test1", parent = cpp_namespace)
cpp_class2 = CppClass("Test2", parent = cpp_class1)
cpp_class1.add_class(cpp_class2, "public")
cpp_namespace.add_class(cpp_class1)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test1\n{\n\npublic:\n\n class Test2\n {\n\n };\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_protected_access_class_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class1 = CppClass("Test1", parent = cpp_namespace)
cpp_class2 = CppClass("Test2", parent = cpp_class1)
cpp_class1.add_class(cpp_class2, "protected")
cpp_namespace.add_class(cpp_class1)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test1\n{\n\nprotected:\n\n class Test2\n {\n\n };\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_template_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test1", parent = cpp_namespace)
cpp_class.templated = True
cpp_class.template_type = "Test"
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\ntemplate <class Test>\nclass Test1\n{\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_template_method_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test1", parent = cpp_namespace)
cpp_method = CppMethod("test1", [], "Test&", cpp_class)
cpp_method.templated = True
cpp_method.template_type = "Test"
cpp_class.add_method(cpp_method, "private")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test1\n{\n\nprivate:\n\n template <class Test>\n Test& test1();\n\n};\n\n}\n\n", self.cpp_file.header())
if __name__ == '__main__':
unittest.main()
| mit | 646,316,628,420,251,500 | 57.19697 | 211 | 0.657121 | false | 3.147501 | true | false | false |
thegamer87/Pymbrum | hr/TimeManager.py | 1 | 7095 | import httplib, urllib, json
from urlparse import urlparse
import datetime
import pytz
import re
DATA_PROVIDER_URL = "/servlet/SQLDataProviderServer"
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M"
HUMAN_TIME_FORMAT = "%Hh %Mm %Ss"
ROWS_FIELD = "rows"
ROWS_VALUE = "10"
START_ROW_FIELD = "startrow"
START_ROW_VALUE = "0"
COUNT_FIELD = "count"
COUNT_VALUE = "true"
SQL_CMD_FIELD = "sqlcmd"
SQL_CMD_VALUE = "rows:ushp_fgettimbrus"
PDATE_FIELD = "pDATE"
TIMBR_DAY_FIELD = "DAYSTAMP"
TIMBR_TIME_FIELD = "TIMETIMBR"
TIMBR_DIRECTION_FIELD = "DIRTIMBR"
TIMBR_CAUSE_FIELD = "CAUSETIMBR"
TIMBR_TYPE_FIELD = "TYPETIMBR"
TIMBR_IP_FIELD = "IPTIMBR"
minExitTime = {"https://hr.cineca.it/HRPortal":datetime.timedelta(minutes=30), "https://saas.hrzucchetti.it/hrpmaxmara":datetime.timedelta(hours=30)}
dayWorkTime={"https://hr.cineca.it/HRPortal":datetime.timedelta(hours=7, minutes=12), "https://saas.hrzucchetti.it/hrpmaxmara":datetime.timedelta(hours=8)}
class Timbratura:
VERSO_FIELD = "verso"
VERSO_ENTRATA = "E"
VERSO_USCITA = "U"
def __init__(self,day, time, direction, cause=None, type=None, ip=None):
self.day = day
self.time = time
self.direction = direction
self.cause = cause
self.type = type
self.ip=ip
def switchVerso(verso):
if verso == Timbratura.VERSO_ENTRATA:
return Timbratura.VERSO_USCITA
elif verso == Timbratura.VERSO_USCITA:
return Timbratura.VERSO_ENTRATA
def getTimbrature(cookie, url, date):
parsedUrl = urlparse(url)
host = parsedUrl.netloc
path = parsedUrl.path+DATA_PROVIDER_URL
if not date:
date = datetime.date.today().strftime(DATE_FORMAT)
params = urllib.urlencode({ROWS_FIELD:ROWS_VALUE, START_ROW_FIELD:START_ROW_VALUE, COUNT_FIELD:COUNT_VALUE, SQL_CMD_FIELD:SQL_CMD_VALUE, PDATE_FIELD:date})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain", "Cookie":cookie}
connection = httplib.HTTPSConnection(host)
connection.request("POST", path, params, headers)
response = connection.getresponse()
responseStatus = response.status
responseData = response.read()
responseDict = json.loads(responseData)
headers = responseDict["Fields"]
data = responseDict["Data"]
print "RESPONSE STATUS: ", responseStatus
timbrature = []
versoActual = Timbratura.VERSO_ENTRATA
for adata in data:
if ("t" not in adata):
day = None
time = None
dir = None
cause = None
type = None
ip = None
for index, d in enumerate(adata):
if index < len(headers):
h = headers[index]
if h == TIMBR_DAY_FIELD:
day = d
if h == TIMBR_TIME_FIELD:
time = d
if h == TIMBR_DIRECTION_FIELD:
dir = d
if h == TIMBR_CAUSE_FIELD:
cause = d
if h == TIMBR_TYPE_FIELD:
type = d
if h == TIMBR_IP_FIELD:
ip = d
timbratura = Timbratura(day, time, dir, cause, type, ip)
if (timbratura.direction != versoActual):
timbratura.direction = versoActual
versoActual = switchVerso(versoActual)
timbrature.append(timbratura)
return timbrature
def getContatori(url, timbrature):
totalWorkTime = None
totalExitTime = None
precTime = None
precDir = None
time = None
precTime = None
if (timbrature):
for timbratura in timbrature:
dir = timbratura.direction
time = datetime.datetime.strptime(timbratura.time, TIME_FORMAT)
print "DIR: ",dir," TIMBR: ",str(timbratura.time)
if not precTime:
precTime = time
if dir == Timbratura.VERSO_USCITA:
workedTime = time - precTime
print "U timbr readed ... workedTime is: ",workedTime
if (not totalWorkTime):
totalWorkTime = workedTime
else:
totalWorkTime += workedTime
print "totalWorkTime updated to ",totalWorkTime
if dir == Timbratura.VERSO_ENTRATA:
exitTime = time-precTime
print "E timbr readed ... exitTime is: ",exitTime
if (not totalExitTime):
totalExitTime = exitTime
else:
totalExitTime += exitTime
print "totalExitTime updated to ",totalExitTime
precTime = time
companyMinExitTime = minExitTime[url]
nowTime = datetime.datetime.now(pytz.timezone("Europe/Rome")).time()
nowDateTime = datetime.datetime(time.year, time.month, time.day, nowTime.hour, nowTime.minute, nowTime.second)
print "now is ",nowDateTime
workedTime = nowDateTime - time
print "worked time from last timbr to now is ",workedTime
if dir == Timbratura.VERSO_ENTRATA:
if (not totalWorkTime):
totalWorkTime = workedTime
else:
totalWorkTime += workedTime
print "last timbr readed is E ... totalWorkTime updated to ",totalWorkTime
if not totalExitTime or (totalExitTime and totalExitTime < companyMinExitTime):
if not totalExitTime:
totalWorkTime -= companyMinExitTime
else:
totalWorkTime -= (companyMinExitTime - totalExitTime)
print "exitTime < minExitTime ... totalWorkTime updated to ",totalWorkTime
print "final totalWorkTime is ",totalWorkTime
print "final totalExitTime is ",totalExitTime
companyDayWorkTime = dayWorkTime[url]
timeToExit = companyDayWorkTime - totalWorkTime
timeOfExit = nowDateTime + timeToExit
workedPercent = round(totalWorkTime.total_seconds() / companyDayWorkTime.total_seconds() * 100)
if workedPercent > 100:
workedPercent = 100
print "final work time percent is: ",workedPercent
timeOfExitString = timeOfExit.strftime(TIME_FORMAT)
if timeToExit.total_seconds() < 0:
timeOfExitString = str(timeOfExit.time())+" ... che stracacchio di uno stracacchio ci fai ancora su quella sedia !!!"
print "final timeOfExit is ",timeOfExit
h,m,s = re.split(":",str(totalWorkTime))
totalWorkTimeString = h+"h "+m+"m "+s+"s"
h,m,s = re.split(":",str(totalExitTime))
totalExitTimeString = h+"h "+m+"m "+s+"s"
workedPercentString = str(workedPercent)
else:
totalWorkTimeString = "0h 0m 0s"
totalExitTimeString = "0h 0m 0s"
timeOfExitString = ""
workedPercentString = "0"
print "no timbr readed"
return {"workedTime":totalWorkTimeString, "exitTime":totalExitTimeString, "timeOfExit":timeOfExitString, "workedPercent":workedPercentString}
| gpl-2.0 | -5,927,728,885,146,939,000 | 32.625592 | 159 | 0.60451 | false | 3.636597 | false | false | false |
fastflo/emma | emmalib/Query.py | 1 | 5944 | # -*- coding: utf-8 -*-
# emma
#
# Copyright (C) 2006 Florian Schmidt ([email protected])
# 2014 Nickolay Karnaukhov ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from Constants import *
from query_regular_expression import *
def read_query(query, _start=0):
r = re.compile(r"""
(?s)
(
("(?:[^\\]|\\.)*?")| # double quoted strings
('(?:[^\\]|\\.)*?')| # single quoted strings
(`(?:[^\\]|\\.)*?`)| # backtick quoted strings
(/\*.*?\*/)| # c-style comments
(\#[^\n]*)| # shell-style comments
(\--[^\n]*)| # sql-style comments
([^;]) # everything but a semicolon
)+
""", re.VERBOSE)
rw = re.compile("[ \r\n\t]+")
m = rw.match(query, _start)
if m:
_start = m.end(0)
match = r.match(query, _start)
if not match:
return None, len(query)
return match.start(0), match.end(0)
def read_expression(query, _start=0, concat=True, update_function=None, update_offset=0, icount=0):
r = query_regular_expression
# print "read expr in", query
match = r.search(query, _start)
# if match: print match.groups()
if not match:
return None, None
for i in range(1, match.lastindex + 1):
if match.group(i):
t = match.group(i)
e = match.end(i)
current_token = t
if current_token[len(current_token) - 1] == "(":
while 1:
icount += 1
if update_function is not None and icount >= 10:
icount = 0
update_function(False, update_offset + e)
# print "at", [query[e:e+15]], "..."
exp, end = read_expression(
query, e, False, update_function, update_offset, icount)
# print "got inner exp:", [exp]
if not exp:
break
e = end
if concat:
t += " " + exp
if exp == ")":
break
return t, e
print "should not happen!"
return None, None
def get_order_from_query(query):
current_order = []
r = re.compile(re_src_query_order)
# get current order by clause
match = re.search(r, query)
if not match:
# print "no order found in", [query]
# print "re:", [re_src_query_order]
return current_order
before, order, after = match.groups()
order.lower()
_start = 0
ident = None
while 1:
item = []
while 1:
ident, end = read_expression(order[_start:])
if not ident:
break
if ident == ",":
break
if ident[0] == "`":
ident = ident[1:-1]
item.append(ident)
_start += end
l = len(item)
if l == 0:
break
elif l == 1:
item.append(True)
elif l == 2:
if item[1].lower() == "asc":
item[1] = True
else:
item[1] = False
else:
print "unknown order item:", item, "ignoring..."
item = None
if item:
current_order.append(tuple(item))
if not ident:
break
_start += 1 # comma
return current_order
def is_query_appendable(query):
"""
@rtype: ()
@type query: str
@param query:
@return:
"""
pat = r'(?i)("(?:[^\\]|\\.)*?")|(\'(?:[^\\]|\\.)*?\')|(`(?:[^\\]|\\.)*?`)|(union)|(select[ \r\n\t]+(.*)[ \r\n\t]+from[ \r\n\t]+(.*))'
r = re.compile(pat)
_start = 0
while 1:
result = re.search(r, query[_start:])
if not result:
return False
_start += result.end()
if result.group(4):
return False # union
if result.group(5) and result.group(6) and result.group(7):
break # found select
return result
# def search_query_end(self, text, _start):
# try:
# r = self.query_end_re
# except:
# r = self.query_end_re = re.compile(r'("(?:[^\\]|\\.)*?")|(\'(?:[^\\]|\\.)*?\')|(`(?:[^\\]|\\.)*?`)|(;)')
# while 1:
# result = re.search(r, text[_start:])
# if not result:
# return None
#
# _start += result.end()
# if result.group(4):
# return _start
# def get_field_list(self, s):
# # todo USE IT!
# fields = []
# _start = 0
# ident = None
# while 1:
# item = []
# while 1:
# ident, end = self.read_expression(s[_start:])
# if not ident:
# break
# if ident == ",":
# break
# if ident[0] == "`":
# ident = ident[1:-1]
# item.append(ident)
# _start += end
# if len(item) == 1:
# fields.append(item[0])
# else:
# fields.append(item)
# if not ident:
# break
# print "found fields:", fields
# return fields
| gpl-2.0 | -3,587,987,859,716,708,400 | 29.797927 | 137 | 0.472577 | false | 3.71965 | false | false | false |
rootulp/exercism | python/twelve-days/twelve_days.py | 1 | 1769 | class TwelveDays:
CARDINALS = {
1: 'first',
2: 'second',
3: 'third',
4: 'fourth',
5: 'fifth',
6: 'sixth',
7: 'seventh',
8: 'eighth',
9: 'ninth',
10: 'tenth',
11: 'eleventh',
12: 'twelfth'
}
PHRASES = {
2: 'two Turtle Doves',
3: 'three French Hens',
4: 'four Calling Birds',
5: 'five Gold Rings',
6: 'six Geese-a-Laying',
7: 'seven Swans-a-Swimming',
8: 'eight Maids-a-Milking',
9: 'nine Ladies Dancing',
10: 'ten Lords-a-Leaping',
11: 'eleven Pipers Piping',
12: 'twelve Drummers Drumming'
}
@classmethod
def verses(cls, start, stop):
return "\n".join([cls.verse(i) for i in range(start, stop + 1)]) + "\n"
@classmethod
def verse(cls, verse_num):
return ", ".join([_f for _f in [cls.head(verse_num),
cls.mid(verse_num),
cls.tail(verse_num)] if _f])
@classmethod
def head(cls, verse_num):
return ("On the %(cardinality)s day of Christmas my true love gave to "
"me" % ({"cardinality": cls.CARDINALS[verse_num]}))
@staticmethod
def tail(verse_num):
if verse_num == 1:
return "a Partridge in a Pear Tree.\n"
return "and a Partridge in a Pear Tree.\n"
@classmethod
def mid(cls, verse_num):
if verse_num != 1:
return ", ".join([cls.PHRASES[i] for i in range(verse_num, 1, -1)])
def verse(verse_num):
return TwelveDays.verse(verse_num)
def verses(start, stop):
return TwelveDays.verses(start, stop)
def sing():
return TwelveDays.verses(1, 12)
| mit | -7,615,109,493,597,396,000 | 25.402985 | 79 | 0.508762 | false | 3.153298 | false | false | false |
bob-anderson-ok/py-ote | src/pyoteapp/iterative_logl_functions.py | 1 | 23172 | """
A collection of functions for fast MLE fits to light curves.
MLE: Maximum Likelihood Estimation
The 'fit' is to an array of intensities (y[]: float) that comprise the light
curve.
"""
import numpy as np
import sys
from math import log, pi, sqrt, exp
from typing import Tuple
from pyoteapp.solverUtils import calcNumCandidatesFromEventSize
from pyoteapp.solverUtils import calcNumCandidatesFromDandRlimits
from pyoteapp.solverUtils import model, logLikelihoodLine
from pyoteapp.likelihood_calculations import cum_loglikelihood, aicc
from numba import njit, jit
MIN_FLOAT: float = sys.float_info.min
@jit
def add_entry(ynew: float, s: float, s2: float, n: int, calc_var: bool):
"""Adds an entry to the metrics, s, s2, and n.
s: previous value of sum of y[]
s2: previous value of sum of y[]*y[]
n: previous number of entries in the metric
"""
n = n + 1
s = s + ynew
s2 = s2 + ynew * ynew
if calc_var:
var = (s2 - s * s / n) / n # This is sigma**2
else:
var = None
return s, s2, n, var
@jit
def sub_entry(ynew: float, s: float, s2: float, n: int, calc_var: bool):
"""Subtracts an entry from the metrics, s, s2, and n.
s: previous value of sum of y[]
s2: previous value of sum of y[]*y[]
n: previous number of entries in the metric
"""
n = n - 1
s = s - ynew
s2 = s2 - ynew * ynew
if calc_var:
var = (s2 - s * s / n) / n # This is sigma**2
else:
var = None
return s, s2, n, var
def calc_metric_iteratively(y: np.ndarray) -> Tuple[float, float, int, float]:
"""Calculates a metric iteratively (term by term) for test purposes only.
This is expected to be very slow compared to simply using numpy and on an
array of size 1000, the numpy version was 600 times faster.
y: array of floats
"""
# Set initial values for iteration
s = 0.0
s2 = 0.0
n = 0
var = None
for ynew in np.nditer(y):
s, s2, n, var = add_entry(ynew, s, s2, n, calc_var=True)
return s, s2, n, var
@jit
def calc_metric_numpy(y: np.ndarray):
"""Used for timing comparisons and initializing a metric from a large y[].
It calculates the metrics using fast numpy operations.
"""
n = y.size
s2 = np.sum(y * y)
s = y.sum()
var = (s2 - s * s / n) / n # This is sigma**2
return s, s2, n, var
StdAnswer = Tuple[int, int, float, float, float, float, float]
"""StdAnswer is: d, r, b, a, sigmaB, sigmaA, metric """
@njit # cache=True did not work for this function --- gave a pickling error
def find_best_event_from_min_max_size(
y: np.ndarray, left: int, right: int, min_event: int, max_event: int):
"""Finds the best size and location for an event >= min and <= max"""
# The only time this function is called with a y containing 1 element
# is during the import of this module where the call is made to force the jit
# compiler into action --- a work-around to the pickle error problem.
if y.size == 1:
yield -1.0, 0.0, 0, 0, 0.0, 0.0, 0.0, 0.0, 0.0
max_metric = 0.0
d_best = 0
r_best = 0
b_best = 0.0
a_best = 0.0
sigma_b_best = 0.0
sigma_a_best = 0.0
sigma_a = sigma_b = 0.0 # To satisfy PEP8
not_started = True
num_candidates = calcNumCandidatesFromEventSize(eventType="DandR",
left=left, right=right, minSize=min_event, maxSize=max_event)
solution_counter = 0
for event in range(min_event, max_event + 1):
d, r, b, a, sigma_b, sigma_a, metric, sol_count = \
locate_fixed_event_position(y, left, right, event)
# Initialize the 'best' values
if not_started:
max_metric = metric
d_best = d
r_best = r
b_best = b
a_best = a
sigma_b_best = sigma_b
sigma_a_best = sigma_a
not_started = False
if metric >= max_metric and b > a:
max_metric = metric
d_best = d
r_best = r
b_best = b
a_best = a
sigma_b_best = sigma_b
sigma_a_best = sigma_a
solution_counter += sol_count
# yield 'fractionDone', solution_counter / num_candidates
yield 1.0, solution_counter / num_candidates, 0, 0, 0.0, 0.0, 0.0, 0.0, 0.0
# Here we test for solution being better than straight line
if not solution_is_better_than_straight_line(
y, left, right, d_best, r_best, b_best, a_best, sigma_b_best, sigma_a_best):
# yield 'no event present', solution_counter / num_candidates
yield -1.0, solution_counter / num_candidates, 0, 0, 0.0, 0.0, 0.0, 0.0, 0.0
# yield d_best, r_best, b_best, a_best, sigma_b_best, sigma_a_best, max_metric
yield 0.0, 1.0, d_best, r_best, b_best, a_best, sigma_b_best, sigma_a_best, max_metric
def find_best_r_only_from_min_max_size(
y: np.ndarray, left: int, right: int, min_event: int, max_event: int):
"""Finds the best r-only location for r >= min_event and <= max_event"""
assert min_event >= 1
assert max_event <= right - left
def update_best_solution():
nonlocal max_metric, b_best, a_best, sigma_b, sigma_a
nonlocal r_best
max_metric = metric
b_best = b
a_best = a
sigma_b = sqrt(b_var)
sigma_a = sqrt(a_var)
r_best = r
def calc_metric():
nonlocal a_var, b_var
max_var = max(a_var, b_var, sys.float_info.min)
if a_var <= 0.0:
a_var = max_var
if b_var <= 0.0:
b_var = max_var
return -b_n * log(b_var) - a_n * log(a_var)
# These get changed by the first call to update_best_solution but
# have the be set to proper type to satisfy type checking.
metric = 0.0
max_metric = 0.0
r_best = 0
b_best = 0.0
a_best = 0.0
sigma_b = 0.0
sigma_a = 0.0
r = left + min_event
# Use numpy version of metric calculator to initialize iteration variables
b_s, b_s2, b_n, b_var = calc_metric_numpy(y[r + 1:right + 1])
a_s, a_s2, a_n, a_var = calc_metric_numpy(y[left:r])
b = b_s / b_n
a = a_s / a_n
# Calculate metric for initial position of r
metric = calc_metric()
update_best_solution()
r_final = left + max_event
while r < r_final:
# calc metric for next r position from current position
b_s, b_s2, b_n, b_var = sub_entry(y[r+1], b_s, b_s2, b_n, True)
a_s, a_s2, a_n, a_var = add_entry(y[r], a_s, a_s2, a_n, True)
r += 1
metric = calc_metric()
b = b_s / b_n
a = a_s / a_n
goodSolution = solution_is_better_than_straight_line(
y, left, right, -1, r, b, a, sqrt(b_var), sqrt(a_var), k=3)
if metric > max_metric and b > a and goodSolution:
update_best_solution()
if b_best <= a_best:
# yield 'no event present', 1.0
yield -1.0, 1.0, -1, -1, 0.0, 0.0, 0.0, 0.0, 0.0
event_size_found = r_best - left
if event_size_found == max_event or event_size_found == min_event:
# Invalid event size --- invalid limit
yield -1.0, 1.0, -1, -1, 0.0, 0.0, 0.0, 0.0, 0.0
# Here we test for the best solution being better than straight line
if not solution_is_better_than_straight_line(
y, left, right, -1, r_best, b, a, sigma_b, sigma_a, k=3):
# yield 'no event present', 1.0
yield -1.0, 1.0, -1, -1, 0.0, 0.0, 0.0, 0.0, 0.0
# yield None, r_best, b_best, a_best, sigma_b, sigma_a, max_metric
yield 0.0, 1.0, -1, r_best, b_best, a_best, sigma_b, sigma_a, max_metric
def find_best_d_only_from_min_max_size(
y: np.ndarray, left: int, right: int, min_event: int, max_event: int):
"""Finds the best d-only location for max_event >= event >= min_event """
assert min_event >= 1
assert max_event <= right - left
def update_best_solution():
nonlocal max_metric, b_best, a_best, sigma_b, sigma_a
nonlocal d_best
max_metric = metric
b_best = b
a_best = a
sigma_b = sqrt(b_var)
sigma_a = sqrt(a_var)
d_best = d
def calc_metric():
nonlocal a_var, b_var
max_var = max(a_var, b_var, sys.float_info.min)
if a_var <= 0.0:
a_var = max_var
if b_var <= 0.0:
b_var = max_var
return -b_n * log(b_var) - a_n * log(a_var)
# These get changed by the first call to update_best_solution but
# have the be set to proper type to satisfy type checking.
metric = 0.0
max_metric = 0.0
d_best = 0
b_best = 0.0
a_best = 0.0
sigma_b = 0.0
sigma_a = 0.0
d = right - max_event # Initial d position
# Use numpy version of metric calculator to initialize iteration variables
b_s, b_s2, b_n, b_var = calc_metric_numpy(y[left:d])
a_s, a_s2, a_n, a_var = calc_metric_numpy(y[d+1:right+1])
b = b_s / b_n
a = a_s / a_n
# print(b, a, b_n, a_n)
# Calculate metric for initial position of d
metric = calc_metric()
update_best_solution()
d_final = right - min_event
while d < d_final:
# calc metric for next d position from current position
b_s, b_s2, b_n, b_var = add_entry(y[d], b_s, b_s2, b_n, True)
a_s, a_s2, a_n, a_var = sub_entry(y[d+1], a_s, a_s2, a_n, True)
d += 1
metric = calc_metric()
b = b_s / b_n
a = a_s / a_n
goodSolution = solution_is_better_than_straight_line(
y, left, right, d, -1, b, a, sqrt(b_var), sqrt(a_var), k=3)
if metric > max_metric and b > a and goodSolution:
update_best_solution()
if b_best <= a_best:
# yield 'no event present', 1.0
yield -1.0, 1.0, -1, -1, 0.0, 0.0, 0.0, 0.0, 0.0
event_size_found = right - d_best
if event_size_found == max_event or event_size_found == min_event:
# Invalid event size --- invalid limit
yield -1.0, 1.0, -1, -1, 0.0, 0.0, 0.0, 0.0, 0.0
if not solution_is_better_than_straight_line(
y, left, right, d_best, -1, b, a, sigma_b, sigma_a, k=3):
# yield 'no event present', 1.0
yield -1.0, 1.0, -1, -1, 0.0, 0.0, 0.0, 0.0, 0.0
# yield d_best, None, b_best, a_best, sigma_b, sigma_a, max_metric
yield 0.0, 1.0, d_best, -1, b_best, a_best, sigma_b, sigma_a, max_metric
@njit(cache=True)
def locate_fixed_event_position(
y: np.ndarray, left: int, right: int,
event_size: int) -> Tuple[int, int, float, float, float, float,
float, int]:
"""Finds the best location for a fixed size event"""
d = left
r = d + event_size + 1
# assert(r < right)
# Use numpy version of metric calculator to initialize iteration variables
b_s, b_s2, b_n, b_var = calc_metric_numpy(y[r+1:right+1])
a_s, a_s2, a_n, a_var = calc_metric_numpy(y[left+1:r])
b = b_s / b_n
a = a_s / a_n
# Calculate metric for initial position of event at extreme left
# ========== calc_metric() ===========
max_var = max(a_var, b_var, MIN_FLOAT)
if a_var <= 0.0:
a_var = max_var
if b_var <= 0.0:
b_var = max_var
metric = - b_n * log(b_var) - a_n * log(a_var)
# ========== calc_metric() ===========
# ======= update_best_solution() ========
max_metric = metric
b_max = b
a_max = a
sigma_b = sqrt(b_var)
sigma_a = sqrt(a_var)
d_max = d
r_max = r
# ======= update_best_solution() ========
# The metric used is the variable part of logL(D,R), droping the constant
# part and ignoring a factor of 2. The full logL(D,R) would have been:
#
# -0.5 * (b_n*log(b_var) + a_n*log(a_var) + (b_n + a_n) * (1 + log(2*pi))
#
# We use the reduced form to speed the calculation yet achieve a MLE
# solution
# metrics = [metric] # For use during development
solution_count = 0
while r < right - 1:
# calc metric for next event position from current position
b_s, b_s2, b_n, b_var = add_entry(y[d], b_s, b_s2, b_n, False)
b_s, b_s2, b_n, b_var = sub_entry(y[r+1], b_s, b_s2, b_n, True)
a_s, a_s2, a_n, a_var = add_entry(y[r], a_s, a_s2, a_n, False)
a_s, a_s2, a_n, a_var = sub_entry(y[d + 1], a_s, a_s2, a_n, True)
b = b_s / b_n
a = a_s / a_n
# ========== calc_metric() ===========
max_var = max(a_var, b_var, MIN_FLOAT)
if a_var <= 0.0:
a_var = max_var
if b_var <= 0.0:
b_var = max_var
metric = - b_n * log(b_var) - a_n * log(a_var)
# ========== calc_metric() ===========
# Move to next position
d += 1
r += 1
goodSolution = solution_is_better_than_straight_line(
y, left, right, d, r, b, a, sqrt(b_var), sqrt(a_var), k=4)
if metric > max_metric and b > a and goodSolution:
# ======= update_best_solution() ========
max_metric = metric
b_max = b
a_max = a
sigma_b = sqrt(b_var)
sigma_a = sqrt(a_var)
d_max = d
r_max = r
# ======= update_best_solution() ========
solution_count += 1
return d_max, r_max, b_max, a_max, sigma_b, sigma_a, max_metric, solution_count
@njit # cache=True gave pickling error
def locate_event_from_d_and_r_ranges(
y: np.ndarray, left: int, right: int, d_start: int, d_end: int,
r_start: int, r_end: int):
"""Finds the best size and location for event specified by d & r ranges"""
# The only time this function is called with a y containing 1 element
# is during the import of this module where the call is made to force the jit
# compiler into action --- a work-around to the pickle error problem.
if y.size == 1:
yield -1.0, 0.0, -1, -1, 0.0, 0.0, 0.0, 0.0, 0.0
num_candidates = calcNumCandidatesFromDandRlimits(
eventType='DandR',
d_start=d_start, d_end=d_end, r_start=r_start, r_end=r_end)
clump_size = np.ceil(num_candidates / 50)
solution_counter = 0
d = d_start
max_metric = 0.0
d_best = 0
r_best = 0
b_s_best = 0.0
a_s_best = 0.0
b_var_best = 0.0
a_var_best = 0.0
b_n_best = 0
a_n_best = 0
not_started = True
while d <= d_end:
# Use numpy version of metric calculator to initialize iteration
# variables for current d and initial r_start
r = r_start
if d > left:
b_sl, b_s2l, b_nl, b_varl = calc_metric_numpy(y[left:d])
# Lefthand wing
else:
b_sl = 0.0
b_s2l = 0.0
b_nl = 0
b_varl = 0.0
b_sr, b_s2r, b_nr, b_varr = calc_metric_numpy(y[r+1:right+1])
# Righthand wing
b_s = b_sl + b_sr
b_s2 = b_s2l + b_s2r
b_n = b_nl + b_nr
b_var = b_varl + b_varr
a_s, a_s2, a_n, a_var = calc_metric_numpy(y[d+1:r])
# ============== calc_metric() =================
max_var = max(a_var, b_var, MIN_FLOAT)
if a_var <= 0.0:
a_var = max_var
if b_var <= 0.0:
b_var = max_var
metric = - b_n * log(b_var) - a_n * log(a_var)
# ============== calc_metric() =================
if not_started:
# =========== update_best_solution() =======
max_metric = metric
d_best = d
r_best = r
b_s_best = b_s
a_s_best = a_s
b_var_best = b_var
a_var_best = a_var
b_n_best = b_n
a_n_best = a_n
# =========== update_best_solution() =======
not_started = False
b = b_s / b_n
a = a_s / a_n
if metric >= max_metric and b > a:
# =========== update_best_solution() =======
max_metric = metric
d_best = d
r_best = r
b_s_best = b_s
a_s_best = a_s
b_var_best = b_var
a_var_best = a_var
b_n_best = b_n
a_n_best = a_n
# =========== update_best_solution() =======
while r < r_end:
r += 1
b_s, b_s2, b_n, b_var = sub_entry(y[r], b_s, b_s2, b_n, True)
a_s, a_s2, a_n, a_var = add_entry(y[r-1], a_s, a_s2, a_n, True)
# ============== calc_metric() =================
max_var = max(a_var, b_var, MIN_FLOAT)
if a_var <= 0.0:
a_var = max_var
if b_var <= 0.0:
b_var = max_var
metric = - b_n * log(b_var) - a_n * log(a_var)
# ============== calc_metric() =================
b = b_s / b_n
a = a_s / a_n
goodSolution = solution_is_better_than_straight_line(
y, left, right, d, r, b, a, sqrt(b_var), sqrt(a_var), k=4)
if metric >= max_metric and b > a and goodSolution:
# =========== update_best_solution() =======
max_metric = metric
d_best = d
r_best = r
b_s_best = b_s
a_s_best = a_s
b_var_best = b_var
a_var_best = a_var
b_n_best = b_n
a_n_best = a_n
# =========== update_best_solution() =======
solution_counter += 1
if solution_counter % clump_size == 0:
# yield 'fractionDone', solution_counter / num_candidates
yield 1.0, solution_counter / num_candidates, -1, -1, 0.0, 0.0, 0.0, 0.0, 0.0
d += 1
b = b_s_best / b_n_best
a = a_s_best / a_n_best
sigma_b = sqrt(b_var_best)
sigma_a = sqrt(a_var_best)
# Here we test for solution being better than straight line
if not solution_is_better_than_straight_line(
y, left, right, d_best, r_best, b, a, sigma_b, sigma_a, k=4):
# yield 'no event present', solution_counter / num_candidates
yield -1.0, solution_counter / num_candidates, -1, -1, 0.0, 0.0, 0.0, 0.0, 0.0
yield 0.0, 1.0, d_best, r_best, b, a, sigma_b, sigma_a, max_metric
@njit(cache=True)
def solution_is_better_than_straight_line(y, left=-1, right=-1, d=-1, r=-1, b=0.0, a=0.0, sigma_b=0.0,
sigma_a=0.0, k=4):
# The only time that the result of this routine is important is for very
# low snr signals. In that case, sigma_b and sigma_a are approximately
# equal anyway. For other cases, we want to 'score' the straight line
# against the signal in as equal a manner as possible, so we will use a
# common noise value for all points. Here we calculate that value...
big_sigma = np.float64(max(sigma_b, sigma_a))
# And here we make sure it never gets too small...
if big_sigma < (b - a) / 100.0: # 100 == max snr
big_sigma = (b - a) / 100.0
# If the current snr is greater than 3, the solution is always better
# than a straight line, so we skip the calculations.
if (b - a) / big_sigma > 3.0:
return True
# If this point is reached, a valid scoring needs to be performed.
num_pts = y.size
m, sigma = model(
B=b, A=a, D=d, R=r, sigmaB=big_sigma, sigmaA=big_sigma,
numPts=num_pts)
solution_logl = cum_loglikelihood(y, m, sigma, left, right)
# lineScore = logLikelihoodLine(y, sigmaB=big_sigma, left=left, right=right)
lineScore = logLikelihoodLine(y, sigmaB=np.sqrt(np.var(y)), left=left, right=right)
aiccSol = aicc(solution_logl, right - left + 1, k)
aiccLine = aicc(lineScore, right - left + 1, 1)
if aiccSol < aiccLine:
pLine = exp(-(aiccLine - aiccSol) / 2)
else:
pLine = 1.00
if pLine > 0.001:
return False
else:
return True
def calc_logl_from_metric(s: float, s2: float, n: int) -> Tuple[float, float]:
sigma2 = (s2 / n - (s / n) * (s / n))
# -log(sqrt(2*pi)) = -0.9189385332046727
return -n * 0.9189385332046727 - n / 2.0 - n * log(sigma2) / 2.0, sigma2
def cum_loglikelihood_raw(y, m, sigma):
""" numpy accelerated sum of loglikelihoods --- for test purposes
Args:
y (ndarray): measured values
m (ndarray): associated mean values (the 'model')
sigma (ndarray): associated stdev values
"""
n = len(y)
ans = -n * np.log(np.sqrt(2*pi))
ans -= np.sum(np.log(sigma))
ans -= (np.sum((y - m) ** 2 / sigma ** 2) / 2.0)
return ans, np.var(y)
def loglikelihood(y, m, sigma):
""" calculate ln(likelihood) of a single point from a gaussian distribution
Args:
y (float): measured value
m (float): mean (expected model value)
sigma (float): stdev of measurements
Returns:
natural logarithm of un-normalized probability based on Gaussian
distribution
"""
# log(x) is natural log (base e)
# -log(sqrt(2*pi)) = -0.9189385332046727 = -log(2*pi)/2
# t1 = -log(sqrt(2*pi))
t1 = -0.9189385332046727
t2 = -log(sigma)
t3 = -(y - m) ** 2 / (2 * sigma ** 2)
return t1 + t2 + t3
@njit
def bob():
print("Hello from Bob")
num_pts = 30
m, sigma = model(
B=4.0, A=1.0, D=19, R=29, sigmaB=1.0, sigmaA=1.0,
numPts=num_pts)
# m += np.random.normal(0.0, 0.01, 30)
# print(m.size, sigma.size)
# model.inspect_types()
# ans = solution_is_better_than_straight_line(m, 0, 29, 10, 20, 4.0, 1.0, 1.0, 1.0)
# print(ans)
# solution_is_better_than_straight_line.inspect_types()
left = 1
right = num_pts
evt = 9
noise_sigma = 0.01
noise = np.random.normal(0.0, noise_sigma, num_pts)
ans = locate_fixed_event_position(m + noise, left, right, evt)
print(ans)
noise = np.random.normal(0.0, noise_sigma, num_pts)
ans = locate_fixed_event_position(m + noise, left, right, evt)
print(ans)
noise = np.random.normal(0.0, noise_sigma, num_pts)
ans = locate_fixed_event_position(m + noise, left, right, evt)
print(ans)
print(m)
# ans = locate_fixed_event_position(m, 0, 29, 10)
# print(ans)
# ans = locate_fixed_event_position(m, 0, 29, 11)
# locate_fixed_event_position.inspect_types()
# print(ans)
# ans = calc_metric_numpy(m)
# calc_metric_numpy.inspect_types()
# print(ans)
# We perform the following calls to force the njit of the functions. This hides the
# compile time from the user (extends the load time a bit) and thus eliminates
# the slightly disconcerting 1 or 2 second delay before this functions start to
# operate if we wait until the user first invokes them after starting ppyote. We
# do this as a work-around to the pickle problem that keeps normal caching from working.
_ = find_best_event_from_min_max_size(np.zeros(1), 0, 0, 0, 0)
_ = locate_event_from_d_and_r_ranges(np.zeros(1), 0, 0, 0, 0, 0, 0)
if __name__ == "__main__":
bob()
# bob.inspect_types()
pass
| mit | -327,251,744,266,116,200 | 30.02008 | 113 | 0.541516 | false | 2.976876 | false | false | false |
vmonaco/rpscontest | agents/switching11.py | 1 | 2056 | # See http://overview.cc/RockPaperScissors for more information about rock, paper, scissors
# Similar to switching10 with an additional beat2 and complement function
import random
if input == "":
hist = ""
opp_played = []
beat = {'P': 'S', 'S': 'R', 'R': 'P'}
beat2 = {'PP': 'S', 'SS': 'R', 'RR':'P', 'PS': 'S', 'PR': 'P', 'RS': 'R', 'RP': 'P', 'SP': 'S', 'SR': 'R'}
complement = {'PS': 'R', 'PR': 'S', 'RS': 'P', 'RP': 'S', 'SP': 'R', 'SR': 'P'}
score = {'RR': 0, 'PP': 0, 'SS': 0, 'PR': 1, 'RS': 1, 'SP': 1, 'RP':-1, 'SR':-1, 'PS':-1, }
output = random.choice(["R", "P", "S"])
candidates1 = [output, output]
candidates2 = [output] * 5
performance1 = [0, 0]
performance2 = [(0, 0)] * 5
else:
hist += output.lower() + input
opp_played.append(input)
performance1[0] += score[candidates1[0] + input]
performance1[1] += score[candidates1[1] + input]
for i, p in enumerate(candidates2):
performance2[i] = ({1:performance2[i][0] + 1, 0: performance2[i][0], -1: 0}[score[p + input]],
performance2[i][1] + score[p + input])
index1 = performance1.index(max(performance1))
index2 = performance2.index(max(performance2, key=lambda x: x[0] ** 3 + x[1]))
candidates1[1] = beat[random.choice(opp_played)]
for length in range(min(10, len(hist) - 2), 0, -2):
search = hist[-length:]
idx = hist.rfind(search, 0, -2)
if idx != -1:
my = hist[idx + length].upper()
opp = hist[idx + length + 1]
candidates2[0] = beat[opp]
candidates2[1] = beat[beat[my]]
candidates2[2] = beat2[beat[my] + beat[beat[opp]]]
candidates2[3] = beat2[beat[opp] + beat[beat[my]]]
candidates2[4] = complement[''.join(sorted(set(candidates2[0] + candidates2[1] + candidates2[3])))]
break
else:
candidates = [random.choice(['R', 'P', 'S'])] * 5
candidates1[0] = candidates2[index2]
output = candidates1[index1]
| bsd-3-clause | -7,585,800,557,904,085,000 | 39.313725 | 111 | 0.529669 | false | 2.928775 | false | false | false |
joshwatson/binaryninja-api | python/examples/kaitai/bson.py | 1 | 18953 | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
from .kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO
import collections
from enum import Enum
if parse_version(ks_version) < parse_version('0.7'):
raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
class Bson(KaitaiStruct):
"""BSON, short for Binary JSON, is a binary-encoded serialization of JSON-like documents. Like JSON, BSON supports the embedding of documents and arrays within other documents and arrays. BSON also contains extensions that allow representation of data types that are not part of the JSON spec. For example, BSON has a Date type and a BinData type. BSON can be compared to binary interchange formats, like Protocol Buffers. BSON is more "schemaless" than Protocol Buffers, which can give it an advantage in flexibility but also a slight disadvantage in space efficiency (BSON has overhead for field names within the serialized data). BSON was designed to have the following three characteristics:
* Lightweight. Keeping spatial overhead to a minimum is important for any data representation format, especially when used over the network.
* Traversable. BSON is designed to be traversed easily. This is a vital property in its role as the primary data representation for MongoDB.
* Efficient. Encoding data to BSON and decoding from BSON can be performed very quickly in most languages due to the use of C data types.
"""
SEQ_FIELDS = ["len", "fields", "terminator"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['len']['start'] = self._io.pos()
self.len = self._io.read_s4le()
self._debug['len']['end'] = self._io.pos()
self._debug['fields']['start'] = self._io.pos()
self._raw_fields = self._io.read_bytes((self.len - 5))
io = KaitaiStream(BytesIO(self._raw_fields))
self.fields = self._root.ElementsList(io, self, self._root)
self.fields._read()
self._debug['fields']['end'] = self._io.pos()
self._debug['terminator']['start'] = self._io.pos()
self.terminator = self._io.ensure_fixed_contents(b"\x00")
self._debug['terminator']['end'] = self._io.pos()
class Timestamp(KaitaiStruct):
"""Special internal type used by MongoDB replication and sharding. First 4 bytes are an increment, second 4 are a timestamp."""
SEQ_FIELDS = ["increment", "timestamp"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['increment']['start'] = self._io.pos()
self.increment = self._io.read_u4le()
self._debug['increment']['end'] = self._io.pos()
self._debug['timestamp']['start'] = self._io.pos()
self.timestamp = self._io.read_u4le()
self._debug['timestamp']['end'] = self._io.pos()
class BinData(KaitaiStruct):
"""The BSON "binary" or "BinData" datatype is used to represent arrays of bytes. It is somewhat analogous to the Java notion of a ByteArray. BSON binary values have a subtype. This is used to indicate what kind of data is in the byte array. Subtypes from zero to 127 are predefined or reserved. Subtypes from 128-255 are user-defined."""
class Subtype(Enum):
generic = 0
function = 1
byte_array_deprecated = 2
uuid_deprecated = 3
uuid = 4
md5 = 5
custom = 128
SEQ_FIELDS = ["len", "subtype", "content"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['len']['start'] = self._io.pos()
self.len = self._io.read_s4le()
self._debug['len']['end'] = self._io.pos()
self._debug['subtype']['start'] = self._io.pos()
self.subtype = KaitaiStream.resolve_enum(self._root.BinData.Subtype, self._io.read_u1())
self._debug['subtype']['end'] = self._io.pos()
self._debug['content']['start'] = self._io.pos()
_on = self.subtype
if _on == self._root.BinData.Subtype.byte_array_deprecated:
self._raw_content = self._io.read_bytes(self.len)
io = KaitaiStream(BytesIO(self._raw_content))
self.content = self._root.BinData.ByteArrayDeprecated(io, self, self._root)
self.content._read()
else:
self.content = self._io.read_bytes(self.len)
self._debug['content']['end'] = self._io.pos()
class ByteArrayDeprecated(KaitaiStruct):
"""The BSON "binary" or "BinData" datatype is used to represent arrays of bytes. It is somewhat analogous to the Java notion of a ByteArray. BSON binary values have a subtype. This is used to indicate what kind of data is in the byte array. Subtypes from zero to 127 are predefined or reserved. Subtypes from 128-255 are user-defined."""
SEQ_FIELDS = ["len", "content"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['len']['start'] = self._io.pos()
self.len = self._io.read_s4le()
self._debug['len']['end'] = self._io.pos()
self._debug['content']['start'] = self._io.pos()
self.content = self._io.read_bytes(self.len)
self._debug['content']['end'] = self._io.pos()
class ElementsList(KaitaiStruct):
SEQ_FIELDS = ["elements"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['elements']['start'] = self._io.pos()
self.elements = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['elements']:
self._debug['elements']['arr'] = []
self._debug['elements']['arr'].append({'start': self._io.pos()})
_t_elements = self._root.Element(self._io, self, self._root)
_t_elements._read()
self.elements.append(_t_elements)
self._debug['elements']['arr'][len(self.elements) - 1]['end'] = self._io.pos()
i += 1
self._debug['elements']['end'] = self._io.pos()
class Cstring(KaitaiStruct):
SEQ_FIELDS = ["str"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['str']['start'] = self._io.pos()
self.str = (self._io.read_bytes_term(0, False, True, True)).decode(u"UTF-8")
self._debug['str']['end'] = self._io.pos()
class String(KaitaiStruct):
SEQ_FIELDS = ["len", "str", "terminator"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['len']['start'] = self._io.pos()
self.len = self._io.read_s4le()
self._debug['len']['end'] = self._io.pos()
self._debug['str']['start'] = self._io.pos()
self.str = (self._io.read_bytes((self.len - 1))).decode(u"UTF-8")
self._debug['str']['end'] = self._io.pos()
self._debug['terminator']['start'] = self._io.pos()
self.terminator = self._io.ensure_fixed_contents(b"\x00")
self._debug['terminator']['end'] = self._io.pos()
class Element(KaitaiStruct):
class BsonType(Enum):
min_key = -1
end_of_object = 0
number_double = 1
string = 2
document = 3
array = 4
bin_data = 5
undefined = 6
object_id = 7
boolean = 8
utc_datetime = 9
jst_null = 10
reg_ex = 11
db_pointer = 12
javascript = 13
symbol = 14
code_with_scope = 15
number_int = 16
timestamp = 17
number_long = 18
number_decimal = 19
max_key = 127
SEQ_FIELDS = ["type_byte", "name", "content"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['type_byte']['start'] = self._io.pos()
self.type_byte = KaitaiStream.resolve_enum(self._root.Element.BsonType, self._io.read_u1())
self._debug['type_byte']['end'] = self._io.pos()
self._debug['name']['start'] = self._io.pos()
self.name = self._root.Cstring(self._io, self, self._root)
self.name._read()
self._debug['name']['end'] = self._io.pos()
self._debug['content']['start'] = self._io.pos()
_on = self.type_byte
if _on == self._root.Element.BsonType.code_with_scope:
self.content = self._root.CodeWithScope(self._io, self, self._root)
self.content._read()
elif _on == self._root.Element.BsonType.reg_ex:
self.content = self._root.RegEx(self._io, self, self._root)
self.content._read()
elif _on == self._root.Element.BsonType.number_double:
self.content = self._io.read_f8le()
elif _on == self._root.Element.BsonType.symbol:
self.content = self._root.String(self._io, self, self._root)
self.content._read()
elif _on == self._root.Element.BsonType.timestamp:
self.content = self._root.Timestamp(self._io, self, self._root)
self.content._read()
elif _on == self._root.Element.BsonType.number_int:
self.content = self._io.read_s4le()
elif _on == self._root.Element.BsonType.document:
self.content = Bson(self._io)
self.content._read()
elif _on == self._root.Element.BsonType.object_id:
self.content = self._root.ObjectId(self._io, self, self._root)
self.content._read()
elif _on == self._root.Element.BsonType.javascript:
self.content = self._root.String(self._io, self, self._root)
self.content._read()
elif _on == self._root.Element.BsonType.utc_datetime:
self.content = self._io.read_s8le()
elif _on == self._root.Element.BsonType.boolean:
self.content = self._io.read_u1()
elif _on == self._root.Element.BsonType.number_long:
self.content = self._io.read_s8le()
elif _on == self._root.Element.BsonType.bin_data:
self.content = self._root.BinData(self._io, self, self._root)
self.content._read()
elif _on == self._root.Element.BsonType.string:
self.content = self._root.String(self._io, self, self._root)
self.content._read()
elif _on == self._root.Element.BsonType.db_pointer:
self.content = self._root.DbPointer(self._io, self, self._root)
self.content._read()
elif _on == self._root.Element.BsonType.array:
self.content = Bson(self._io)
self.content._read()
elif _on == self._root.Element.BsonType.number_decimal:
self.content = self._root.F16(self._io, self, self._root)
self.content._read()
self._debug['content']['end'] = self._io.pos()
class DbPointer(KaitaiStruct):
SEQ_FIELDS = ["namespace", "id"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['namespace']['start'] = self._io.pos()
self.namespace = self._root.String(self._io, self, self._root)
self.namespace._read()
self._debug['namespace']['end'] = self._io.pos()
self._debug['id']['start'] = self._io.pos()
self.id = self._root.ObjectId(self._io, self, self._root)
self.id._read()
self._debug['id']['end'] = self._io.pos()
class U3(KaitaiStruct):
"""Implements unsigned 24-bit (3 byte) integer.
"""
SEQ_FIELDS = ["b1", "b2", "b3"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['b1']['start'] = self._io.pos()
self.b1 = self._io.read_u1()
self._debug['b1']['end'] = self._io.pos()
self._debug['b2']['start'] = self._io.pos()
self.b2 = self._io.read_u1()
self._debug['b2']['end'] = self._io.pos()
self._debug['b3']['start'] = self._io.pos()
self.b3 = self._io.read_u1()
self._debug['b3']['end'] = self._io.pos()
@property
def value(self):
if hasattr(self, '_m_value'):
return self._m_value if hasattr(self, '_m_value') else None
self._m_value = ((self.b1 | (self.b2 << 8)) | (self.b3 << 16))
return self._m_value if hasattr(self, '_m_value') else None
class CodeWithScope(KaitaiStruct):
SEQ_FIELDS = ["id", "source", "scope"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['id']['start'] = self._io.pos()
self.id = self._io.read_s4le()
self._debug['id']['end'] = self._io.pos()
self._debug['source']['start'] = self._io.pos()
self.source = self._root.String(self._io, self, self._root)
self.source._read()
self._debug['source']['end'] = self._io.pos()
self._debug['scope']['start'] = self._io.pos()
self.scope = Bson(self._io)
self.scope._read()
self._debug['scope']['end'] = self._io.pos()
class F16(KaitaiStruct):
"""128-bit IEEE 754-2008 decimal floating point."""
SEQ_FIELDS = ["str", "exponent", "significand_hi", "significand_lo"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['str']['start'] = self._io.pos()
self.str = self._io.read_bits_int(1) != 0
self._debug['str']['end'] = self._io.pos()
self._debug['exponent']['start'] = self._io.pos()
self.exponent = self._io.read_bits_int(15)
self._debug['exponent']['end'] = self._io.pos()
self._debug['significand_hi']['start'] = self._io.pos()
self.significand_hi = self._io.read_bits_int(49)
self._debug['significand_hi']['end'] = self._io.pos()
self._io.align_to_byte()
self._debug['significand_lo']['start'] = self._io.pos()
self.significand_lo = self._io.read_u8le()
self._debug['significand_lo']['end'] = self._io.pos()
class ObjectId(KaitaiStruct):
"""https://docs.mongodb.com/manual/reference/method/ObjectId/."""
SEQ_FIELDS = ["epoch_time", "machine_id", "process_id", "counter"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['epoch_time']['start'] = self._io.pos()
self.epoch_time = self._io.read_u4le()
self._debug['epoch_time']['end'] = self._io.pos()
self._debug['machine_id']['start'] = self._io.pos()
self.machine_id = self._root.U3(self._io, self, self._root)
self.machine_id._read()
self._debug['machine_id']['end'] = self._io.pos()
self._debug['process_id']['start'] = self._io.pos()
self.process_id = self._io.read_u2le()
self._debug['process_id']['end'] = self._io.pos()
self._debug['counter']['start'] = self._io.pos()
self.counter = self._root.U3(self._io, self, self._root)
self.counter._read()
self._debug['counter']['end'] = self._io.pos()
class RegEx(KaitaiStruct):
SEQ_FIELDS = ["pattern", "options"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['pattern']['start'] = self._io.pos()
self.pattern = self._root.Cstring(self._io, self, self._root)
self.pattern._read()
self._debug['pattern']['end'] = self._io.pos()
self._debug['options']['start'] = self._io.pos()
self.options = self._root.Cstring(self._io, self, self._root)
self.options._read()
self._debug['options']['end'] = self._io.pos()
| mit | -7,375,716,033,907,808,000 | 46.029777 | 699 | 0.542289 | false | 3.751584 | false | false | false |
shopkeep/shpkpr | shpkpr/deployment/standard.py | 1 | 1338 | # stdlib imports
import logging
logger = logging.getLogger(__name__)
class StandardDeployment(object):
"""StandardDeployment implements Marathon's basic deployment workflow and
uses the primitives provided by the Marathon API to perform a standard
rolling deploy according to application settings.
This deployment strategy is best suited for non-web-facing applications or
those that can tolerate minor downtime during deployment e.g. consumer or
worker applications.
"""
def __init__(self, marathon_client, timeout, app_definitions, **kw):
self.marathon_client = marathon_client
self.timeout = timeout
self.app_definitions = app_definitions
def execute(self, force=False):
"""Execute standard Marathon deployment.
"""
app_ids = ", ".join([a["id"] for a in self.app_definitions])
logger.info("Executing standard deployment: {0}".format(app_ids))
deployment = self.marathon_client.deploy(
self.app_definitions,
force=force,
)
logger.info("Waiting for marathon deployment to complete: {0}".format(deployment.deployment_id))
result = deployment.wait(timeout=self.timeout)
logger.info("Marathon deployment complete: {0}".format(deployment.deployment_id))
return result
| mit | -2,837,895,533,692,400,000 | 35.162162 | 104 | 0.684604 | false | 4.474916 | false | false | false |
andreagrandi/drf3-test | drftest/shop/tests/factories.py | 1 | 1174 | import factory
from django.contrib.auth.models import User
from django.utils.timezone import now
from shop.models import (Product, Stamp, Order, OrderDetails, Voucher)
class UserFactory(factory.DjangoModelFactory):
FACTORY_FOR = User
first_name = 'DRF'
last_name = 'Test'
username = 'drftest'
password = 'drftest'
is_active = True
is_superuser = False
last_login = now()
date_joined = now()
class ProductFactory(factory.DjangoModelFactory):
FACTORY_FOR = Product
name = "Product 1"
collect_stamp = True
class StampFactory(factory.DjangoModelFactory):
FACTORY_FOR = Stamp
user = factory.SubFactory(UserFactory)
redeemed = False
class OrderFactory(factory.DjangoModelFactory):
FACTORY_FOR = Order
user = factory.SubFactory(UserFactory)
date = now()
class OrderDetailsFactory(factory.DjangoModelFactory):
FACTORY_FOR = OrderDetails
order = factory.SubFactory(OrderFactory)
product = factory.SubFactory(ProductFactory)
quantity = 4
class VoucherFactory(factory.DjangoModelFactory):
FACTORY_FOR = Voucher
user = factory.SubFactory(UserFactory)
redeemed = False
| mit | 1,264,818,017,404,281,300 | 21.150943 | 70 | 0.719761 | false | 3.750799 | false | false | false |
jinook0707/CATOS_alpha | 2016_paper/utils/Util_m_drawer.py | 1 | 10078 | '''
Util_m_drawer.py
This is for generating a JPG file per a MP4 file.
The generated JPG file shows the movement pattern
appeared thoughout the MP4 file.
(Movement summary of the movie file)
--------------------------------------------------------------------
Copyright (C) 2013 Jinook Oh ([email protected])
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os, sys
from glob import glob
from math import sqrt
from copy import copy
import cv ###
FLAG_DISPLAY_JPG = False
#------------------------------------------------------------------------------------
class M_drawer:
def __init__(self, target_path):
self.target_path = target_path
if FLAG_DISPLAY_JPG == True: cv.NamedWindow('disp', cv.CV_WINDOW_NORMAL)
if FLAG_DISPLAY_JPG == True: cv.MoveWindow('disp', 50, 50)
self.total_nFrames = 0
#---------------------------------------------------------------------------------
def get_FG_rect_value(self, inString):
# get overall foreground rect value
if inString.strip() == '(-1/ -1/ -1/ -1)': return -1
else:
b_rect = inString.replace("(","").replace(")","").split("/")
b_rect = (int(b_rect[0])*2, int(b_rect[1])*2, int(b_rect[2])*2, int(b_rect[3])*2)
return b_rect
#---------------------------------------------------------------------------------
def get_FGB_center_pt(self, fgb_pts):
# get each foreground blob's center point
number_of_pts = len(fgb_pts)
x = 0; y = 0
for i in range(number_of_pts):
x += fgb_pts[i][0]
y += fgb_pts[i][1]
x = x/number_of_pts
y = y/number_of_pts
return [x,y]
#---------------------------------------------------------------------------------
def get_img_from_video(self, cap_vf):
frames = []
nFrames = int(cv.GetCaptureProperty(cap_vf, cv.CV_CAP_PROP_FRAME_COUNT))
fps = int(cv.GetCaptureProperty(cap_vf, cv.CV_CAP_PROP_FPS))
self.total_nFrames += nFrames
for i in xrange(nFrames):
frame = cv.QueryFrame(cap_vf)
if i == 0 or i == nFrames/2: # store first, middle frame
frames.append(cv.CloneImage(frame))
### try to store the last frame, but if it fails, ignore it.
if i == nFrames-1:
try: frames.append(cv.CloneImage(frame))
except: pass
### Add differences between the middle-first & last-first onto the first frame
diff_img0 = cv.CreateImage(cv.GetSize(frames[0]), 8, 3)
diff_img1 = cv.CreateImage(cv.GetSize(frames[0]), 8, 3)
cv.AbsDiff(frames[0], frames[1], diff_img0)
if len(frames) > 2: cv.AbsDiff(frames[0], frames[2], diff_img1)
cv.Add(diff_img0, frames[0], frames[0])
if len(frames) > 2: cv.Add(diff_img1, frames[0], frames[0])
return frames[0]
#---------------------------------------------------------------------------------
def run(self):
#first_img_path = os.path.join(self.target_path, '_first_color_img_cID00.jpg')
#first_img = cv.LoadImage(first_img_path)
d_color1 = (0, 0, 0)
d_color2 = (255, 255, 255)
cat_margin = 30 # due to dilate function, etc
font = cv.InitFont(cv.CV_FONT_HERSHEY_PLAIN, 1, 1, 0, 3, 8)
dr_cnt = 0 # counter for drawing
for f in glob(os.path.join(self.target_path, '*_MR.csv')): # open all the MovementRecord files
mr_f = open(f, 'r')
mr_f_lines = mr_f.readlines()
jpg_file = f.replace(".csv", ".jpg")
if not os.path.isfile(jpg_file): # jpg file doesn't exist
video_file = f.replace("_MR.csv", ".mp4")
if os.path.isfile(video_file): # video file exist
cap_vf = cv.CaptureFromFile(video_file)
img_from_video = self.get_img_from_video(cap_vf)
last_center_pt = (-1,-1)
last_center_pt_b = (-1,-1)
last_center_pt_white = (-1,-1)
last_center_pt_black = (-1,-1)
lines_cnt = len(mr_f_lines)
for i in range(2, lines_cnt):
items = mr_f_lines[i].split(",")
if len(items) > 1:
number_of_blobs = int(items[4])
#d_color_e_dec = 255-(float(i)/lines_cnt*255)
d_color_e_inc = float(i)/lines_cnt*255
d_color = (d_color_e_inc, d_color_e_inc, d_color_e_inc)
'''
### Drawing for movement rects
b_rect = items[0].replace("(","").replace(")","").split("/")
b_rect = (int(b_rect[0]), int(b_rect[1]), int(b_rect[2]), int(b_rect[3]))
d_color_e = 255-(float(i+1)/lines_cnt*255)
d_color = (d_color_e, d_color_e, d_color_e)
cv.Rectangle(img_from_video, (b_rect[0],b_rect[1]), (b_rect[0]+b_rect[2],b_rect[1]+b_rect[3]), d_color1, 1)
center_pt = items[1].split("/")
center_pt = (int(center_pt[0]), int(center_pt[1]))
cv.Circle(img_from_video, center_pt, 3, d_color, 1)
if last_center_pt != (-1,-1): cv.Line(img_from_video, last_center_pt, center_pt, d_color, 1)
last_center_pt = tuple(center_pt)
'''
'''
### rect bounding all the foreground features
b_rect = self.get_FG_rect_value(items[2])
#cv.Rectangle(img_from_video, (b_rect[0],b_rect[1]), (b_rect[0]+b_rect[2],b_rect[1]+b_rect[3]), d_color, 1)
if b_rect != -1:
### Drawing the center point of the movement whole bounding rect (and the connecting lines between the center points.)
center_pt = items[3].split("/")
center_pt = (int(center_pt[0])*2, int(center_pt[1])*2)
cv.Circle(img_from_video, center_pt, 5, d_color, 2)
if last_center_pt_b != (-1,-1): cv.Line(img_from_video, last_center_pt_b, center_pt, d_color, 1)
last_center_pt_b = tuple(center_pt)
'''
### rects for each foreground blob
fB_grouped_pts = eval(items[8].replace("/", ","))
if fB_grouped_pts != []:
for fB_idx in range(len(fB_grouped_pts)):
fgb_pts = fB_grouped_pts[fB_idx]
if fgb_pts != '[]': fgb_center_pt = self.get_FGB_center_pt(fgb_pts)
fgb_center_pt = (fgb_center_pt[0]*2, fgb_center_pt[1]*2)
cv.Circle(img_from_video, fgb_center_pt, 3, d_color, 2)
if fB_idx > 0:
cv.Line(img_from_video, last_fgb_center_pt, fgb_center_pt, d_color, 1)
last_fgb_center_pt = copy(fgb_center_pt)
### Drawing the center point of the whitish blob
if items[7].strip() == '-1/-1': wbpt = (-1,-1)
else:
wbpt = items[7].split("/") # white blob center point
wbpt = (int(wbpt[0])*2, int(wbpt[1])*2)
if wbpt != (-1, -1):
# draw a rectangle at the center of white blob(s)
cv.Rectangle(img_from_video, (wbpt[0]-2,wbpt[1]-2), (wbpt[0]+2,wbpt[1]+2), (0,0,125), 1)
if FLAG_DISPLAY_JPG == True: cv.ShowImage('disp', img_from_video)
mr_img_path = f.replace(".csv", ".jpg")
cv.SaveImage(mr_img_path, img_from_video)
dr_cnt += 1
print "An image, %s, is generated."%mr_img_path
mr_f.close()
else:
# Video file doesn't exist (means it wasn't generated due to lack of enough JPEG files)
# Usually it's meaningless very short record. (or no record at all)
mr_f.close()
os.remove(f) # remove the MR-csv file
for f in glob(os.path.join(self.target_path, '*.log')):
log_f = open(f, 'a')
log_f.write("\n* Total number of frames of movie files: %i\n"%(self.total_nFrames))
log_f.close()
print "Number of images generated: %i"%(dr_cnt)
print "Image drawing process is complete."
#------------------------------------------------------------------------------------
if __name__ == '__main__':
path = os.getcwd()
input_path = ''
if len(sys.argv) > 1:
for i in range(1, len(sys.argv)): input_path += sys.argv[i] + ' '
input_path = input_path.strip()
else: input_path = 'output'
path = os.path.join(path, input_path)
mDrawer = M_drawer(path)
mDrawer.run()
| gpl-3.0 | 8,860,168,671,217,057,000 | 48.64532 | 150 | 0.465668 | false | 3.742295 | false | false | false |
shiehinms/vminspector | ui/ui_newconnectdlg.py | 1 | 2422 | # -*- coding: utf-8 -*-
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class Ui_NewConnectDlg(object):
def setupUi(self, NewConnectDlg):
NewConnectDlg.setWindowTitle(u"Connect VHD")
self.gridlayout = QGridLayout(NewConnectDlg)
self.gridlayout.setMargin(9)
self.gridlayout.setSpacing(6)
urlLabel = QLabel(u"URL*:")
self.urlLineEdit = QLineEdit()
urlLabel.setBuddy(self.urlLineEdit)
self.gridlayout.addWidget(urlLabel, 0, 0)
self.gridlayout.addWidget(self.urlLineEdit, 0, 1)
accountkeyLabel = QLabel(u"ACCOUNT_KEY:")
self.accountkeyLineEdit = QLineEdit()
accountkeyLabel.setBuddy(self.accountkeyLineEdit)
self.gridlayout.addWidget(accountkeyLabel, 1, 0)
self.gridlayout.addWidget(self.accountkeyLineEdit, 1, 1)
filenameLabel = QLabel(u"FILENAME:")
self.filenameLineEdit = QLineEdit()
filenameLabel.setBuddy(self.filenameLineEdit)
self.gridlayout.addWidget(filenameLabel, 2, 0)
self.gridlayout.addWidget(self.filenameLineEdit, 2, 1)
pathLabel = QLabel(u"PATH*:")
self.pathLineEdit = QLineEdit()
pathLabel.setBuddy(self.pathLineEdit)
self.gridlayout.addWidget(pathLabel, 3, 0)
self.gridlayout.addWidget(self.pathLineEdit, 3, 1)
extensionLabel = QLabel(u"EXTENSION:")
self.extensionLineEdit = QLineEdit()
extensionLabel.setBuddy(self.extensionLineEdit)
self.gridlayout.addWidget(extensionLabel, 4, 0)
self.gridlayout.addWidget(self.extensionLineEdit, 4, 1)
typeLabel = QLabel(u"TYPE:")
self.typeLineEdit = QLineEdit()
typeLabel.setBuddy(self.typeLineEdit)
self.gridlayout.addWidget(typeLabel, 5, 0)
self.gridlayout.addWidget(self.typeLineEdit, 5, 1)
self.buttonBox = QDialogButtonBox(NewConnectDlg)
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|
QDialogButtonBox.NoButton|QDialogButtonBox.Ok)
self.gridlayout.addWidget(self.buttonBox, 6, 1)
QObject.connect(self.buttonBox, SIGNAL("accepted()"),
NewConnectDlg.accept)
QObject.connect(self.buttonBox, SIGNAL("rejected()"),
NewConnectDlg.reject)
QMetaObject.connectSlotsByName(NewConnectDlg)
| apache-2.0 | 3,554,032,290,002,449,400 | 39.366667 | 66 | 0.669282 | false | 3.844444 | false | false | false |
PaddlePaddle/models | PaddleCV/tracking/ltr/models/bbreg/atom_iou_net.py | 1 | 12326 | """
the implementation of ATOM iou net
"""
import paddle
import paddle.fluid as fluid
import paddle.fluid.dygraph.nn as nn
import numpy as np
import os.path as osp
import sys
CURRENT_DIR = osp.dirname(__file__)
sys.path.append(osp.join(CURRENT_DIR, '..', '..', '..'))
def weight_init():
init = fluid.initializer.MSRAInitializer(uniform=False)
param = fluid.ParamAttr(initializer=init)
return param
def bias_init():
init = fluid.initializer.ConstantInitializer(value=0.)
param = fluid.ParamAttr(initializer=init)
return param
def norm_weight_init():
# init = fluid.initializer.ConstantInitializer(1.0)
init = fluid.initializer.Uniform(low=0., high=1.)
param = fluid.ParamAttr(initializer=init)
return param
def norm_bias_init():
init = fluid.initializer.ConstantInitializer(value=0.)
param = fluid.ParamAttr(initializer=init)
return param
class ConvBNReluLayer(fluid.dygraph.Layer):
def __init__(self,
in_channels,
out_channels,
filter_size,
stride=1,
groups=1,
padding=1,
is_test=False):
super(ConvBNReluLayer, self).__init__()
self.conv = nn.Conv2D(
num_channels=in_channels,
filter_size=filter_size,
num_filters=out_channels,
stride=stride,
padding=padding,
groups=groups,
bias_attr=bias_init(),
param_attr=weight_init())
self.bn = nn.BatchNorm(
out_channels,
param_attr=norm_weight_init(),
bias_attr=norm_bias_init(),
act=None,
momentum=0.9,
use_global_stats=is_test)
def forward(self, inputs):
res = self.conv(inputs)
self.conv_res = res
res = self.bn(res)
res = fluid.layers.relu(res)
return res
class FCBNReluLayer(fluid.dygraph.Layer):
def __init__(self,
in_channels,
out_channels,
in_size,
is_bias=True,
is_bn=True,
is_relu=True,
is_test=False):
super(FCBNReluLayer, self).__init__()
self.is_bn = is_bn
self.is_relu = is_relu
if is_bias:
bias_init = fluid.ParamAttr(
initializer=fluid.initializer.ConstantInitializer(0.))
else:
bias_init = False
self.linear = nn.Linear(
in_channels * in_size * in_size, out_channels, bias_attr=bias_init)
self.bn = nn.BatchNorm(
out_channels,
param_attr=norm_weight_init(),
bias_attr=norm_bias_init(),
act=None,
momentum=0.9,
use_global_stats=is_test)
def forward(self, x):
x = fluid.layers.reshape(x, [x.shape[0], -1])
x = self.linear(x)
if self.is_bn:
x = self.bn(x)
if self.is_relu:
x = fluid.layers.relu(x)
return x
class AtomIouNet(fluid.dygraph.Layer):
def __init__(self,
name,
input_dim=(128, 256),
pred_input_dim=(256, 256),
pred_inter_dim=(256, 256),
is_test=False):
super(AtomIouNet, self).__init__(name)
self.name = self.full_name()
self.conv3_1r = ConvBNReluLayer(
input_dim[0], 128, filter_size=3, stride=1, is_test=is_test)
self.conv3_1t = ConvBNReluLayer(
input_dim[0], 256, filter_size=3, stride=1, is_test=is_test)
self.conv3_2t = ConvBNReluLayer(
256, pred_input_dim[0], filter_size=3, stride=1, is_test=is_test)
self.fc3_1r = ConvBNReluLayer(
128, 256, filter_size=3, stride=1, padding=0, is_test=is_test)
self.conv4_1r = ConvBNReluLayer(
input_dim[1], 256, filter_size=3, stride=1, is_test=is_test)
self.conv4_1t = ConvBNReluLayer(
input_dim[1], 256, filter_size=3, stride=1, is_test=is_test)
self.conv4_2t = ConvBNReluLayer(
256, pred_input_dim[1], filter_size=3, stride=1, is_test=is_test)
self.fc34_3r = ConvBNReluLayer(
512,
pred_input_dim[0],
filter_size=1,
stride=1,
padding=0,
is_test=is_test)
self.fc34_4r = ConvBNReluLayer(
512,
pred_input_dim[1],
filter_size=1,
stride=1,
padding=0,
is_test=is_test)
self.fc3_rt = FCBNReluLayer(
pred_input_dim[0], pred_inter_dim[0], in_size=5, is_test=is_test)
self.fc4_rt = FCBNReluLayer(
pred_input_dim[1], pred_inter_dim[1], in_size=3, is_test=is_test)
bias_init = fluid.initializer.ConstantInitializer(0.)
self.iou_predictor = nn.Linear(
pred_inter_dim[0] + pred_inter_dim[1], 1, bias_attr=bias_init)
self.outs = {}
def predict_iou(self, filter, feat2, proposals):
"""
predicts IOU for the given proposals
:param modulation: Modulation vectors for the targets. Dims (batch, feature_dim).
:param feat: IoU features (from get_iou_feat) for test images. Dims (batch, feature_dim, H, W).
:param proposals: Proposal boxes for which the IoU will be predicted (batch, num_proposals, 4).
:return:
"""
fc34_3_r, fc34_4_r = filter
c3_t, c4_t = feat2
batch_size = c3_t.shape[0]
# Modulation
c3_t_att = c3_t * fluid.layers.reshape(fc34_3_r, [batch_size, -1, 1, 1])
c4_t_att = c4_t * fluid.layers.reshape(fc34_4_r, [batch_size, -1, 1, 1])
# add batch roi nums
num_proposals_per_batch = proposals.shape[1]
batch_roi_nums = np.array([num_proposals_per_batch] *
batch_size).astype(np.int64)
batch_roi_nums = fluid.dygraph.to_variable(batch_roi_nums)
# input proposals2 is in format xywh, convert it to x0y0x1y1 format
proposals_xyxy = fluid.layers.concat(
[
proposals[:, :, 0:2],
proposals[:, :, 0:2] + proposals[:, :, 2:4]
],
axis=2)
roi2 = fluid.layers.reshape(proposals_xyxy, [-1, 4])
roi2.stop_gradient = False
roi3t = fluid.layers.prroi_pool(
c3_t_att, roi2, 1 / 8., 5, 5, batch_roi_nums=batch_roi_nums)
roi4t = fluid.layers.prroi_pool(
c4_t_att, roi2, 1 / 16., 3, 3, batch_roi_nums=batch_roi_nums)
fc3_rt = self.fc3_rt(roi3t)
fc4_rt = self.fc4_rt(roi4t)
fc34_rt_cat = fluid.layers.concat([fc3_rt, fc4_rt], axis=1)
iou_pred = self.iou_predictor(fc34_rt_cat)
iou_pred = fluid.layers.reshape(iou_pred,
[batch_size, num_proposals_per_batch])
return iou_pred
def forward(self, feat1, feat2, bb1, proposals2):
"""Runs the ATOM IoUNet during training operation.
This forward pass is mainly used for training. Call the individual functions during tracking instead.
args:
feat1: Variable, Features from the reference frames (4 or 5 dims).
feat2: Variable, Features from the test frames (4 or 5 dims).
bb1: Target boxes (x,y,x2,y2) in image coords in the reference samples. Dims (images, sequences, 4).
proposals2: Proposal boxes for which the IoU will be predicted (images, sequences, num_proposals, 4)."""
assert len(feat1[0].shape) == 5, 'Expect 5 dimensional feat1'
num_test_images = feat2[0].shape[0]
batch_size = feat2[0].shape[1]
# Extract first train sample
feat1 = [f[0] for f in feat1]
bb1 = bb1[0]
# Get modulation vector
modulation = self.get_filter(feat1, bb1)
feat2 = [
fluid.layers.reshape(f,
(batch_size * num_test_images, *f.shape[-3:]))
for f in feat2
]
iou_feat = self.get_iou_feat(feat2)
new_modulation = []
for i in range(0, len(modulation)):
tmp = modulation[i]
tmp = fluid.layers.reshape(tmp, [1, batch_size, -1])
tmp = fluid.layers.expand(tmp, [num_test_images, 1, 1])
tmp = fluid.layers.reshape(tmp, [batch_size * num_test_images, -1])
new_modulation.append(tmp)
proposals2 = fluid.layers.reshape(
proposals2, [batch_size * num_test_images, -1, 4])
pred_iou = self.predict_iou(new_modulation, iou_feat, proposals2)
pred_iou = fluid.layers.reshape(pred_iou,
[num_test_images, batch_size, -1])
return pred_iou
def get_filter(self, feat1, bb1):
"""
get modulation feature [feature1, feature2] for the targets
:param feat1: variable, Backbone features from reference images. shapes (batch, feature_dim, H, W).
:param bb1: variable, Target boxes (x,y,w,h) in image coords in the reference samples. shapes (batch, 4).
:return:
"""
feat3_r, feat4_r = feat1
c3_r = self.conv3_1r(feat3_r)
# Add batch_index to rois
batch_size = bb1.shape[0]
batch_roi_nums = np.array([1] * batch_size).astype(np.int64)
batch_roi_nums = fluid.dygraph.to_variable(batch_roi_nums)
# input bb is in format xywh, convert it to x0y0x1y1 format
roi1 = fluid.layers.concat(
[bb1[:, 0:2], bb1[:, 0:2] + bb1[:, 2:4]], axis=1)
roi1.stop_gradient = False
roi3r = fluid.layers.prroi_pool(c3_r, roi1, 1 / 8., 3, 3,
batch_roi_nums)
c4_r = self.conv4_1r(feat4_r)
roi4r = fluid.layers.prroi_pool(c4_r, roi1, 1 / 16., 1, 1,
batch_roi_nums)
fc3_r = self.fc3_1r(roi3r)
# Concatenate
fc34_r = fluid.layers.concat([fc3_r, roi4r], axis=1)
fc34_3_r = self.fc34_3r(fc34_r)
fc34_4_r = self.fc34_4r(fc34_r)
return fc34_3_r, fc34_4_r
def get_iou_feat(self, feat2):
"""
Get IoU prediction features from a 4 or 5 dimensional backbone input.
:param feat2: variable, Backbone features from reference images. [feature1, feature2]
:return: features, variable
"""
feat3_t, feat4_t = feat2
c3_t = self.conv3_2t(self.conv3_1t(feat3_t))
c4_t = self.conv4_2t(self.conv4_1t(feat4_t))
return c3_t, c4_t
def atom_iounet(name,
input_dim=(128, 256),
pred_input_dim=(256, 256),
pred_inter_dim=(256, 256)):
return AtomIouNet(
name,
input_dim=input_dim,
pred_input_dim=pred_input_dim,
pred_inter_dim=pred_inter_dim)
def test_paddle_iounet():
a = np.random.uniform(-1, 1, [1, 1, 512, 18, 18]).astype(np.float32)
b = np.random.uniform(-1, 1, [1, 1, 1024, 9, 9]).astype(np.float32)
bbox = [[3, 4, 10, 11]]
proposal_bbox = [[4, 5, 11, 12] * 16]
bbox = np.reshape(np.array(bbox), [1, 1, 4]).astype(np.float32)
proposal_bbox = np.reshape(np.array(proposal_bbox),
[1, 16, 4]).astype(np.float32)
with fluid.dygraph.guard():
a_pd = fluid.dygraph.to_variable(a)
b_pd = fluid.dygraph.to_variable(b)
bbox_pd = fluid.dygraph.to_variable(bbox)
proposal_bbox_pd = fluid.dygraph.to_variable(proposal_bbox)
feat1 = [a_pd, b_pd]
feat2 = [a_pd, b_pd]
model = AtomIouNet('IOUNet', input_dim=(512, 1024))
res = model(feat1, feat2, bbox_pd, proposal_bbox_pd)
print(res.shape)
params = model.state_dict()
for v in params:
print(v, '\t', params[v].shape)
print(len(params))
if __name__ == '__main__':
test_paddle_iounet()
| apache-2.0 | -1,201,066,844,394,714,600 | 33.217143 | 117 | 0.536589 | false | 3.323268 | true | false | false |
mnori/foldatlas | foldatlas/controllers.py | 1 | 31387 | from sqlalchemy import and_
import json
import uuid
import settings
import os
from models import Feature, Transcript, NucleotideMeasurementSet, Structure, \
GeneLocation, NucleotideMeasurementRun, StructurePredictionRun, \
values_str_unpack_float, values_str_unpack_int, RawReactivities, RawReplicateCounts, Bppm
from utils import ensure_dir, insert_newlines, build_dot_bracket
import database
from database import db_session
# Fetches sequence annotation data from the DB and sends it to the genome
# browser front end as JSON.
class GenomeBrowser():
def get_transcripts(self, request):
chromosome_id = "Chr"+str(int(request.args.get('chr'))) # SQL-injection safe
start = int(request.args.get('start'))
end = int(request.args.get('end'))
# Retrieve features using the gene location cache table
sql = ( "SELECT feature.* "
"FROM gene_location, transcript, feature "
"WHERE gene_location.strain_id = '"+settings.reference_strain_id+"' "
"AND gene_location.chromosome_id = '"+chromosome_id+"' "
"AND gene_location.end > '"+str(start)+"' "
"AND gene_location.start < '"+str(end)+"' "
"AND gene_location.gene_id = transcript.gene_id "
"AND transcript.id = feature.transcript_id "
"AND feature.strain_id = '"+settings.reference_strain_id+"'")
results = database.engine.execute(sql)
# collect transcript data
transcripts = {}
feature_rows = []
for result in results:
if result.transcript_id not in transcripts:
transcripts[result.transcript_id] = {
"Parent": result.transcript_id,
"feature_type": "transcript", # without this, it won't draw
"direction": result.direction,
"start": None,
"end": None,
"id": result.transcript_id
}
transcript = transcripts[result.transcript_id]
# keep track of total start and end
if transcript["start"] == None or result.start < transcript["start"]:
transcript["start"] = result.start
if transcript["end"] == None or result.end > transcript["end"]:
transcript["end"] = result.end
feature_rows.append(result)
out = []
# add the transcript metadata to the output. make sure the transcripts are added
# in alphabetical order
transcript_ids = []
for transcript_id in transcripts:
transcript_ids.append(transcript_id)
transcript_ids = sorted(transcript_ids)
for transcript_id in transcript_ids:
out.append(transcripts[transcript_id])
# also add all the feature metadata to the output
for feature_row in feature_rows:
out.append({
"Parent": feature_row.transcript_id,
"feature_type": feature_row.type_id,
"direction": result.direction,
"start": feature_row.start,
"end": feature_row.end,
"id": feature_row.transcript_id+"-"+str(feature_row.id)
})
return json.dumps(out)
def get_genes(self, request):
from utils import Timeline
chromosome_id = "Chr"+str(int(request.args.get('chr'))) # SQL-injection safe
start = int(request.args.get('start'))
end = int(request.args.get('end'))
# fetch gene data from the location cache table.
sql = ( "SELECT * FROM gene_location "
"WHERE strain_id = '"+settings.reference_strain_id+"' "
"AND chromosome_id = '"+chromosome_id+"' "
"AND end > '"+str(start)+"' "
"AND start < '"+str(end)+"'")
results = database.engine.execute(sql)
out = []
for result in results:
out.append({
"feature_type": "gene", # without this, it won't draw
"direction": result.direction,
"id": result.gene_id,
"start": result.start,
"end": result.end,
})
buf = json.dumps(out)
return buf
# Fetch chromosome IDs and their lengths. Used for chromosome menu and also initialising the genome browser.
def get_chromosomes(self):
sql = ( "SELECT chromosome_id, CHAR_LENGTH(sequence) length FROM chromosome "
"WHERE strain_id = '"+settings.reference_strain_id+"' "
"ORDER BY chromosome_id ASC")
results = database.engine.execute(sql)
out = []
for result in results:
out.append({
"chromosome_id": result.chromosome_id,
"length": result.length,
"int_id": int(result.chromosome_id[3])
})
return out
class TranscriptView():
def __init__(self, transcript_id):
self.transcript_id = transcript_id
# Get the coords of the associated gene
data = db_session \
.query(Transcript, GeneLocation) \
.filter(
Transcript.id==transcript_id,
Transcript.gene_id==GeneLocation.gene_id,
GeneLocation.strain_id==settings.reference_strain_id
) \
.all()
self.gene_id = data[0][1].gene_id
self.transcript_data = json.dumps({
"gene_id": self.gene_id,
"transcript_id": transcript_id,
"chromosome_id": data[0][1].chromosome_id,
"start": data[0][1].start,
"end": data[0][1].end
})
self.structure_view = StructureView(self.transcript_id, settings.reference_strain_id)
self.nucleotide_measurement_view = NucleotideMeasurementView(self.transcript_id, settings.reference_strain_id)
self.empty = self.structure_view.empty and self.nucleotide_measurement_view.empty
# disable alignment view... revisit later with SNPstructure
# self.alignment_view = AlignmentView(self.transcript_id)
class NucleotideMeasurementView():
def __init__(self, transcript_id, strain_id):
self.transcript_id = transcript_id
self.strain_id = strain_id
self.build_entries([1])
def build_entries(self, experiment_ids):
from models import NucleotideMeasurementRun
# Load experiments
experiments = db_session \
.query(NucleotideMeasurementRun) \
.filter(NucleotideMeasurementRun.id.in_(experiment_ids)) \
.all()
# Load measurements
seq_str = str(Transcript(self.transcript_id).get_sequence(self.strain_id).seq)
measurements_data = db_session \
.query(NucleotideMeasurementSet) \
.filter(
NucleotideMeasurementSet.nucleotide_measurement_run_id.in_(experiment_ids),
NucleotideMeasurementSet.transcript_id==self.transcript_id
) \
.all()
data = {}
# Populate experiment rows
for experiment in experiments:
experiment_data = {
"id": experiment.id,
"description": experiment.description,
"data": []
}
for n in range(len(seq_str)): # initialise the array
experiment_data["data"].append({
"position": n,
"nuc": seq_str[n],
"measurement": None
})
data[experiment.id] = experiment_data
# Add measurements to each experiment json element
# Loop since we might be dealing with > 1 measurement set
for measurement_set in measurements_data:
experiment_id = measurement_set.nucleotide_measurement_run_id
measurements = values_str_unpack_float(measurement_set.values)
for pos in range(0, len(measurements)):
measurement = measurements[pos]
data[experiment_id]["data"][pos]["measurement"] = measurement
# For each experiment, check whether there is no data and set empty flags accordingly.
self.empty = True # all empty flag
for experiment_id in data:
entry = data[experiment_id]
empty = True
for pos in entry["data"]:
if pos["measurement"] != 0 and pos["measurement"] != None:
empty = False
self.empty = False
if empty:
del entry["data"]
entry["empty"] = True
else:
entry["empty"] = False
self.data_json = json.dumps(data)
class AlignmentView():
alignment_line_length = 80
def __init__(self, transcript_id):
self.transcript_id = transcript_id
self.build_alignment_entries()
def build_alignment_entries(self):
self.alignment_rows = []
# fetch the alignment rows from the DB, using the ORM
alignment_entries = db_session \
.query(AlignmentEntry) \
.filter(AlignmentEntry.transcript_id==self.transcript_id) \
.all()
if (len(alignment_entries) == 0):
return # not enough transcripts to align
aln_len = len(alignment_entries[0].sequence) # length of alignment, including gaps
row_n = 0
reached_end = False
seq_len_processed = 0
# initialise tot_nucs counters. these are for showing nuc counts at the ends of each alignment row.
nuc_counts = {}
for alignment_entry in alignment_entries:
nuc_counts[alignment_entry.strain_id] = 0
while(True): # Each iteration builds 1 row of alignment data
start = row_n * self.alignment_line_length
end = start + self.alignment_line_length
if aln_len < end:
reached_end = True
end = aln_len
self.alignment_rows.append({
"strain_data": {},
"diff": list("*" * (end - start))
})
# create diff - as "*" - then change to "." when a difference is encountered
# create alignment entries data structure, for showing the sequences
for alignment_entry in alignment_entries:
self.alignment_rows[row_n]["strain_data"][alignment_entry.strain_id] = {
"nuc_count": 0, # TODO fill this shiz out
"sequence": list(alignment_entry.sequence[start : end])
}
# Loop through each nucleotide in the sequence. Determine any differences between the
# strains at the position of interest. Store in "diff" variable
for n in range(start, end):
different = False
old_nuc = None
for alignment_entry in alignment_entries:
new_nuc = alignment_entry.sequence[n]
if new_nuc != "-": # keep track of nucleotide counts, for showing on the end
nuc_counts[alignment_entry.strain_id] += 1
if old_nuc != None and new_nuc != old_nuc:
self.alignment_rows[row_n]["diff"][n - start] = "."
old_nuc = new_nuc
# add nucleotide counts to the ends of the sequence alignment.
for alignment_entry in alignment_entries:
self.alignment_rows[row_n]["strain_data"][alignment_entry.strain_id]["nuc_count"] = nuc_counts[alignment_entry.strain_id]
if reached_end:
break
row_n += 1
class TranscriptSearcher():
def search(self, search_string):
from flask import abort
transcripts = db_session \
.query(Transcript) \
.filter(Transcript.id.like("%"+search_string+"%")) \
.all()
if len(transcripts) == 0: # no transcripts found
abort(404)
out = []
for transcript in transcripts:
out.append(transcript.id)
return json.dumps(out)
class CoverageSearcher():
def __init__(self):
# size of pages
self.page_size = 25
# The experiment ID to sort by. Ideally this should have a value for each
# transcript, otherwise there will be some missing transcripts...
self.nucleotide_measurement_run_id = 1
def fetch_page_count(self):
# better to do the imports closer to where they are needed
from sqlalchemy import func
from math import ceil
transcript_count = db_session \
.query(func.count('*')) \
.select_from(NucleotideMeasurementSet) \
.filter(NucleotideMeasurementSet.nucleotide_measurement_run_id==self.nucleotide_measurement_run_id) \
.scalar()
page_count = ceil(transcript_count / self.page_size)
return page_count
def fetch_transcript_data(self, page_num):
from utils import Timeline
from sqlalchemy import func, and_
from models import Structure, GeneLocation
offset = (int(page_num) - 1) * self.page_size
limit = self.page_size
sql = (
"SELECT "
" transcript.id AS transcript_id, "
" gene_location.start AS gene_start, "
" gene_location.end AS gene_end, "
" jnms.coverage AS coverage, "
" jnms.structure_transcript_id AS structure_transcript_id "
"FROM ( "
" SELECT "
" nms.*, "
" structure.transcript_id AS structure_transcript_id "
" FROM ( "
" SELECT nucleotide_measurement_set.* "
" FROM nucleotide_measurement_set "
" ORDER BY nucleotide_measurement_set.coverage DESC "
" LIMIT "+str(limit)+" OFFSET "+str(offset)+" "
" ) AS nms LEFT OUTER JOIN structure ON "
" structure.transcript_id = nms.transcript_id AND "
" structure.structure_prediction_run_id = 2 "
") AS jnms, "
" transcript, "
" gene_location "
"WHERE "
" jnms.nucleotide_measurement_run_id = 1 AND "
" transcript.id = jnms.transcript_id AND "
" transcript.gene_id = gene_location.gene_id AND "
" gene_location.strain_id = 'Col_0' "
"GROUP BY jnms.transcript_id "
"ORDER BY coverage DESC"
)
results = database.engine.execute(sql)
out = []
for row in results:
out.append({
"transcript_id": row["transcript_id"],
"gene_length": (row["gene_end"] - row["gene_start"]) + 1,
"coverage": row["coverage"],
"has_structure": False if (row["structure_transcript_id"] == None) else True
})
return out
# q = db_session \
# .query(NucleotideMeasurementSet, Transcript, GeneLocation,) \
# .filter(
# NucleotideMeasurementSet.nucleotide_measurement_run_id==self.nucleotide_measurement_run_id,
# Transcript.id==NucleotideMeasurementSet.transcript_id,
# Transcript.gene_id==GeneLocation.gene_id,
# GeneLocation.strain_id==settings.reference_strain_id # get this for gene len
# ) \
# .outerjoin(( # Left join to find in-vivo structures for structure indicator
# Structure,
# and_(
# Structure.transcript_id==NucleotideMeasurementSet.transcript_id,
# # this filters so it's only in vivo joined against
# Structure.structure_prediction_run_id==2
# )
# )) \
# .add_entity(Structure) \
# .group_by(Transcript.id) \
# .order_by(NucleotideMeasurementSet.coverage.desc()) \
# .offset((int(page_num) - 1) * self.page_size) \
# .limit(str(self.page_size)) \
# GROUP BY eliminates structures with the same transcript ID \
# results = q.all()
# tl.log("c")
# tl.dump()
# get the SQL so we can optimise the query
# from sqlalchemy.dialects import postgresql
# q_str = str(q.statement.compile(compile_kwargs={"literal_binds": True}))
# print(q_str)
# mandatory in vivo query - just for screenshot purposes
# results = db_session \
# .query(NucleotideMeasurementSet, Transcript, GeneLocation, Structure, ) \
# .filter(
# NucleotideMeasurementSet.nucleotide_measurement_run_id==self.nucleotide_measurement_run_id,
# Transcript.id==NucleotideMeasurementSet.transcript_id,
# Transcript.gene_id==GeneLocation.gene_id,
# GeneLocation.strain_id==settings.reference_strain_id, # get this for gene len
# Structure.transcript_id==NucleotideMeasurementSet.transcript_id,
# # this filters so it's only in vivo considered
# Structure.structure_prediction_run_id==2
# ) \
# .add_entity(Structure) \
# .group_by(NucleotideMeasurementSet.transcript_id) \
# .order_by(NucleotideMeasurementSet.coverage.desc()) \
# .offset((int(page_num) - 1) * self.page_size) \
# .limit(str(self.page_size)) \
# .all()
class StructureView():
def __init__(self, transcript_id, strain_id):
self.transcript_id = transcript_id
self.strain_id = strain_id
self.build_entries([1, 2])
def build_entries(self, structure_prediction_run_ids):
from models import Structure, StructurePredictionRun
# Load experiments
runs = db_session \
.query(StructurePredictionRun) \
.filter(StructurePredictionRun.id.in_(structure_prediction_run_ids)) \
.all()
data = {}
for run in runs:
run_data = {
"id": run.id,
"description": run.description,
"data": []
}
# fetch all Structure objects that match the experiment ID and the transcript ID
results = db_session \
.query(Structure) \
.filter(
Structure.structure_prediction_run_id==run.id,
Structure.transcript_id==self.transcript_id
) \
.all()
# add the structures to output json
for structure in results:
run_data["data"].append({
"id": structure.id,
"energy": structure.energy,
"pc1": structure.pc1,
"pc2": structure.pc2
})
data[run.id] = run_data
self.empty = True
for experiment_id in data:
entry = data[experiment_id]
if len(entry["data"]) > 0:
self.empty = False
if not self.empty:
self.data_json = json.dumps(data)
# Plots a single RNA structure using the RNAplot program from the ViennaRNA package.
class StructureDiagramView():
def __init__(self, structure_id):
self.structure_id = structure_id
self.build_plot()
def build_plot(self):
# convert entities to dot bracket string
data = self.build_dot_bracket()
# use ViennaRNA to get 2d plot coords
data["coords"] = self.get_vienna_layout(data)
# return the results as a json string
self.data_json = json.dumps(data)
def build_dot_bracket(self):
# get all the positions
results = db_session \
.query(Structure, Transcript) \
.filter(
Structure.id==self.structure_id,
Transcript.id==Structure.transcript_id
) \
.all()
# Get position values from Structure entity
positions = results[0][0].get_values()
seq_str = results[0][1].get_sequence_str()
dot_bracket_str = build_dot_bracket(positions)
return {
"sequence": seq_str.replace("T", "U"),
"structure": dot_bracket_str
}
# Grab 2d coords from viennaRNA
# There is a python2 wrapper for vienna RNA but not python 3 compatible
def get_vienna_layout(self, data):
temp_folder = "/tmp/"+str(uuid.uuid4())
ensure_dir(temp_folder)
dot_bracket_filepath = temp_folder+"/dotbracket.txt"
f = open(dot_bracket_filepath, "w")
f.write(data["sequence"]+"\n"+data["structure"]+"\n")
f.close()
# change to tmp folder
os.chdir(temp_folder)
# use RNAplot CLI to generate the xrna tab delimited file
os.system("RNAplot -o xrna < "+dot_bracket_filepath)
# get the coords out by parsing the file
coords = []
with open(temp_folder+"/rna.ss") as f:
for line in f:
line = line.strip()
if line == "" or line[0] == "#":
continue
bits = line.split()
x = float(bits[2])
y = float(bits[3])
coords.append([x, y])
os.system("rm -rf "+temp_folder)
return coords
# return result
class StructureCirclePlotView():
def __init__(self, structure_id):
self.structure_id = structure_id
self.get_values()
def get_values(self):
# get all the positions
results = db_session \
.query(Structure) \
.filter(Structure.id==self.structure_id) \
.all()
result = results[0]
positions = result.get_values()
bpps = result.get_bpp_values()
# build the output. backward facing links are left blank
# results must be shifted back to array indexes, since they start at 1 in the DB.
out = [];
for curr_position in range(1, len(positions) + 1):
paired_to_position = positions[curr_position - 1]
if paired_to_position == 0 or \
paired_to_position < curr_position:
link = None
else:
link = paired_to_position - 1
if link != None:
link = int(link)
out.append({
"name": curr_position - 1,
"link": link,
"bpp": None if bpps == None else bpps[curr_position - 1]
})
self.data_json = json.dumps(out)
# Generates plaintext structure text files for download
class StructureDownloader():
def __init__(self, structure_prediction_run_ids, transcript_id):
self.structure_prediction_run_ids = structure_prediction_run_ids
self.transcript_id = transcript_id
def generate(self):
# Fetch the data
results = db_session \
.query(Structure, StructurePredictionRun, Transcript) \
.filter(
StructurePredictionRun.id==Structure.structure_prediction_run_id,
Structure.structure_prediction_run_id.in_(self.structure_prediction_run_ids),
Structure.transcript_id==self.transcript_id,
Transcript.id==self.transcript_id
) \
.order_by(
Structure.structure_prediction_run_id,
Structure.id
) \
.all()
return self.generate_txt(results)
# Generates text using a more compact file format
def generate_txt(self, results):
# first we must extract and display the sequence, using the transcript object. output
# in fasta-like format
transcript = results[0][2]
buf = ">"+self.transcript_id+"\n"
buf += insert_newlines(transcript.get_sequence_str())+"\n"
for result in results:
structure = result[0]
run = result[1]
transcript = result[2]
positions = structure.get_values()
# generate and add the header text for this structure
buf += (
">sid_"+str(structure.id)+"\t"+
"ENERGY:"+str(structure.energy)+" kcal/mol\t"+
run.description+"\n")
# generate and add dot bracket text
buf += insert_newlines(build_dot_bracket(positions))+"\n"
return buf
# Generates the older and far more cluttered txt format for structures
def generate_txt_old(self, results):
# Generate tab delimited text from the data
buf = ""
for result in results:
structure = result[0]
run = result[1]
transcript = result[2]
seq_str = transcript.get_sequence_str()
positions = structure.get_values()
for curr_position in range(1, len(positions) + 1):
paired_to_position = positions[curr_position - 1]
letter = seq_str[curr_position - 1].replace("T", "U")
buf += str(structure.id)+"\t"+ \
str(run.description)+"\t"+ \
str(structure.transcript_id)+"\t"+ \
str(structure.energy)+"\t"+ \
str(structure.pc1)+"\t"+ \
str(structure.pc2)+"\t"+ \
str(letter)+"\t"+ \
str(curr_position)+"\t"+ \
str(paired_to_position)+"\n"
return buf
# Generates plain text nucleotide measurements for user download
# Includes raw and normalised
class NucleotideMeasurementDownloader():
def __init__(self, nucleotide_measurement_run_id, transcript_id):
self.nucleotide_measurement_run_id = nucleotide_measurement_run_id
self.transcript_id = transcript_id
# Retrieves raw reactivity values and outputs as text
def get_raw(self):
seq_str = Transcript(self.transcript_id).get_sequence_str()
# Use the ORM to grab compiled counts
results = db_session \
.query(RawReactivities) \
.filter(
RawReactivities.nucleotide_measurement_run_id==self.nucleotide_measurement_run_id,
RawReactivities.transcript_id==self.transcript_id
) \
.all()
measurement_set = results[0]
# minus_unpacked =
# plus_unpacked = values_str_unpack_int(measurement_set.plus_values)
cols = [
values_str_unpack_int(measurement_set.minus_values),
values_str_unpack_int(measurement_set.plus_values)
]
# Grab the raw replicate lanes data
lanes = db_session \
.query(RawReplicateCounts) \
.filter(
RawReplicateCounts.nucleotide_measurement_run_id==self.nucleotide_measurement_run_id,
RawReplicateCounts.transcript_id==self.transcript_id
) \
.order_by(
RawReplicateCounts.minusplus_id,
RawReplicateCounts.bio_replicate_id,
RawReplicateCounts.tech_replicate_id
) \
.all()
# gather the data
tech_rep_ids = set()
for lane in lanes:
cols.append(values_str_unpack_int(lane.values))
tech_rep_ids.add(lane.tech_replicate_id)
# make headers
headers = []
for lane in lanes:
# tech replicate notation only added for experiments with > 1 tech replicate
tech_str = "" if len(tech_rep_ids) == 1 else "_T"+str(lane.tech_replicate_id)
headers.append(str(lane.minusplus_id)+"_B"+str(lane.bio_replicate_id)+tech_str)
# Build and return the output
buf = "position\tsequence\tsum_minus\tsum_plus\t"+"\t".join(headers)+"\n"
for n in range(0, len(cols[0])):
# add position and seq letter
buf += str(n + 1)+"\t"+seq_str[n]
for col in cols: # add the dynamic columns
buf += "\t"+str(int(col[n]))
buf += "\n"
return buf
# Retrieves normalised reactivities and outputs as text
def get_normalised(self):
# Grab sequence string
seq_str = Transcript(self.transcript_id).get_sequence_str()
# Use the ORM to grab all the normalised stuff
results = db_session \
.query(NucleotideMeasurementSet) \
.filter(
NucleotideMeasurementSet.nucleotide_measurement_run_id==self.nucleotide_measurement_run_id,
NucleotideMeasurementSet.transcript_id==self.transcript_id
) \
.all()
measurement_set = results[0]
# TODO detect whether float or int and use the correct unpacker.
# Needed for raw count values download option
unpacked = values_str_unpack_float(measurement_set.values)
# index measurements by pos
measurements = {}
for pos in range(0, len(unpacked)):
value = unpacked[pos]
measurements[pos + 1] = "NA" if value == None else value
# build the output string
buf = ""
n = 0
for n in range(0, len(seq_str)):
pos = n + 1
measurement = "NA" if pos not in measurements else measurements[pos]
buf += str(pos)+"\t"+ \
seq_str[n]+"\t"+ \
str(measurement)+"\n"
n += 1
return buf
# Retrieves the BPPM for this transcript_id
class BppmDownloader():
def fetch(self, transcript_id):
import os
sauce_filepath = settings.bppms_folder+"/"+transcript_id+".bppm"
if not os.path.isfile(sauce_filepath):
return "No BPPM data available for "+transcript_id
buf = ""
# Open the raw BPPM and convert to our simpler format
with open(sauce_filepath, "r") as f:
first = True
for line in f:
if first: # skip the first line, which shows the length
first = False
continue
# add the text for the bppm table
if "Probability" in line: # skip header lines
continue
# extract the data, this will be used for structure BPPMs
bits = line.strip().split("\t")
pos_a = int(bits[0])
pos_b = int(bits[1])
bpp = -float(bits[2])
buf += str(pos_a)+"\t"+str(pos_b)+"\t"+str(bpp)+"\n"
return buf
# OLD method - storing in the database is not a good way to do it
# import zlib, base64
# # fetch from database
# results = db_session \
# .query(Bppm) \
# .filter(Bppm.transcript_id==transcript_id) \
# .all()
# bppm = results[0]
# # decode and return the BPPM
# decoded = base64.b64decode(bppm.data)
# data_txt = zlib.decompress(decoded)
# return data_txt
| mit | 4,064,572,954,846,963,000 | 35.454123 | 137 | 0.550355 | false | 4.109322 | false | false | false |
kevgliss/lemur | lemur/plugins/lemur_linuxdst/remote_host.py | 1 | 2482 | #!/usr/bin/python
from lemur.certificates import service
import paramiko
import stat
def copy_cert(cert_cn, dst_user, dst_priv, dst_priv_key, dst_host, dst_port, dst_dir, dst_file, dst_data):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# include the private key password if required
if dst_priv_key is None:
priv_key = paramiko.RSAKey.from_private_key_file(dst_priv)
else:
priv_key = paramiko.RSAKey.from_private_key_file(dst_priv, dst_priv_key)
# open the sftp connection
ssh.connect(dst_host, username=dst_user, port=dst_port, pkey=priv_key)
sftp = ssh.open_sftp()
# make the directory on the destination server
# files will be in a folder based on the cert_cn
# example:
# destination folder: /etc/nginx/certs/
# files will go in: /etc/nginx/certs/your.cn.com/cert.pem
try:
sftp.mkdir(dst_dir)
except IOError:
pass
try:
dst_dir_cn = dst_dir + '/' + cert_cn
sftp.mkdir(dst_dir_cn)
except IOError:
pass
cert_out = sftp.open(dst_dir_cn + '/' + dst_file, 'w')
cert_out.write(dst_data)
cert_out.close()
sftp.chmod(dst_dir_cn + '/' + dst_file, (stat.S_IRUSR))
ssh.close()
def create_cert(name, dst_dir, export_type, dst_user, dst_priv, dst_priv_key, dst_host, dst_host_port):
lem_cert = service.get_by_name(name)
dst_file = 'cert.pem'
chain_req = False
if export_type == 'NGINX':
# This process will result in a cert.pem file with the body and chain in a single file
if lem_cert.chain is None:
dst_data = lem_cert.body
else:
dst_data = lem_cert.body + '\n' + lem_cert.chain
chain_req = False
elif export_type == '3File':
# This process will results in three files. cert.pem, priv.key, chain.pem
dst_data = lem_cert.body
chain_req = True
else:
dst_data = lem_cert.body
copy_cert(lem_cert.cn, dst_user, dst_priv, dst_priv_key, dst_host, dst_host_port, dst_dir, dst_file, dst_data)
if chain_req is True:
dst_file = 'chain.pem'
dst_data = lem_cert.chain_req
copy_cert(lem_cert.cn, dst_user, dst_priv, dst_priv_key, dst_host, dst_host_port, dst_dir, dst_file, dst_data)
dst_file = 'priv.key'
dst_data = lem_cert.private_key
copy_cert(lem_cert.cn, dst_user, dst_priv, dst_priv_key, dst_host, dst_host_port, dst_dir, dst_file, dst_data)
| apache-2.0 | 1,856,185,568,808,146,400 | 33 | 118 | 0.631346 | false | 3.03423 | false | false | false |
ShipleyCollege/ViPteam1 | GUI/genBlank.py | 1 | 1267 | import sys
sys.path.append('../ExtractAndAnalyzeCode')
import Node
import Pin
'''
Inputs
p1 - output Folder
p2 - Build mode (compact, exploded, explodedLego)
p3 - Title
p4 - Row 1 Col 1 : pinType (exec, data)
p5 - Row 1 Col 1 : text (pin title or "")
p6 - Row 1 Col 2 : pinType
p7 - Row 1 Col 2 : text
p8 - Row 2 Col 1 : pinType
p9 - Row 2 Col 1 : text
p10- Row 2 Col 2 : pinType
p11- Row 2 Col 2 : text
etc.
'''
print( 'Number of arguments:', len(sys.argv), 'arguments.')
print( 'Argument List:', str(sys.argv))
print(" ")
if len(sys.argv) < 4:
print("Error, insuficient parameters. These are expected;\nP1 : Output Folder Name\nP2 : Build Mode\nP3 : Title\nP4-n : [Pin Type, Pin Name] * n")
sys.exit(0)
outputFolder = sys.argv[1]
node = Node.Node(sys.argv[3], sys.argv[2])
pins = []
for c in range(4, len(sys.argv), 2):
if c >= len(sys.argv):
continue
if sys.argv[c] == 0:
continue
if (c+1) == len(sys.argv):
continue
pin = Pin.Pin(sys.argv[c+1], sys.argv[c]) # name , type
print("Pin : " + str(pin))
pins.append(pin)
pins.reverse() # make sure the pins are the correct way round
for c in range(len(pins)):
if c % 2:
node.addPin(pins[c], "Left")
else:
node.addPin(pins[c], "Right")
print(node)
node.writeNode("0", outputFolder)
| gpl-3.0 | -3,516,996,449,689,524,700 | 20.116667 | 147 | 0.64562 | false | 2.591002 | false | false | false |
Tonyll/MyCode | EMDemo/tools/EMReplace.py | 1 | 9844 |
# -*- coding: utf-8 -*-
__author__ = "xyjxyf"
"替换枚举类型"
import os
import re
import sys
walk_path = sys.argv[1]
# 需要替换的字典:key->旧值, value->新值
replace_dic = {
#EMClient
'"EaseMob.h"': '"EMClient.h"',
'\[EaseMob sharedInstance\]': '[EMClient shareClient]',
'IChatManager': 'IEMChatManager',
'EMCommandMessageBody': 'EMCmdMessageBody',
'IChatManagerDelegate': 'EMChatManagerDelegate',
#Group
'"EMGroupStyleSetting.h"': '"EMGroupOptions.h"',
'EMGroupStyleSetting': 'EMGroupOptions',
'.groupSubject': '.subject',
'.groupDescription': '.description',
'.groupOccupantsCount': '.occupantsCount',
'.groupSetting': '.setting',
'.groupStyle': '.style',
'.groupMaxUsersCount': '.maxUsersCount',
'eGroupStyle_PrivateOnlyOwnerInvite': 'EMGroupStylePrivateOnlyOwnerInvite',
'eGroupStyle_PrivateMemberCanInvite': 'EMGroupStylePrivateMemberCanInvite',
'eGroupStyle_PublicJoinNeedApproval': 'EMGroupStylePublicJoinNeedApproval',
'eGroupStyle_PublicOpenJoin': 'EMGroupStylePublicOpenJoin',
'eGroupStyle_Default': 'EMGroupStylePrivateOnlyOwnerInvite',
'eGroupLeaveReason_BeRemoved': 'EMGroupLeaveReasonBeRemoved',
'eGroupLeaveReason_UserLeave': 'EMGroupLeaveReasonUserLeave',
'eGroupLeaveReason_Destroyed': 'EMGroupLeaveReasonDestroyed',
'fetchMyGroupsListWithError:': 'getMyGroupsFromServerWithError:',
'chatManager destroyGroup:': 'groupManager leaveGroup:',
'chatManager leaveGroup:': 'groupManager leaveGroup:',
'chatManager addOccupants:': 'groupManager addOccupants:',
'chatManager removeOccupants:': 'groupManager removeOccupants:',
'chatManager blockOccupants:': 'groupManager blockOccupants:',
'chatManager unblockOccupants:': 'groupManager unblockOccupants:',
'chatManager changeGroupSubject:': 'groupManager changeGroupSubject:',
'chatManager changeDescription:': 'groupManager changeDescription:',
'chatManager fetchGroupBansList:': 'groupManager fetchGroupBansList:',
'chatManager joinPublicGroup:': 'groupManager joinPublicGroup:',
'chatManager searchPublicGroupWithGroupId:': 'groupManager searchPublicGroupWithGroupId:',
#Contact
'didReceiveBuddyRequest:': 'didReceiveFriendInvitationFromUsername:',
'didAcceptedByBuddy:': 'didReceiveAgreedFromUsername:',
'didRejectedByBuddy:': 'didReceiveDeclinedFromUsername:',
'didRemovedByBuddy:': 'didReceiveDeletedFromUsernames:',
#Chat
'.messageBodyType': '.type',
'.attachmentDownloadStatus': '.downloadStatus',
'.chatter': '.conversationId',
'.conversationType': '.type',
'.conversationChatter': '.conversationId',
'.groupSenderName': '.from',
'.deliveryState': '.status',
'.messageType': '.chatType',
'.chatId': '.messageId',
'id<IEMMessageBody>': 'EMMessageBody',
'removeMessageWithId:': 'deleteMessageWithId:',
'removeAllMessages': 'deleteAllMessages',
'MessageBodyType': 'EMMessageBodyType',
'eMessageBodyType_Text': 'EMMessageBodyTypeText',
'eMessageBodyType_Image': 'EMMessageBodyTypeImage',
'eMessageBodyType_Video': 'EMMessageBodyTypeVideo',
'eMessageBodyType_Location': 'EMMessageBodyTypeLocation',
'eMessageBodyType_Voice': 'EMMessageBodyTypeVoice',
'eMessageBodyType_File': 'EMMessageBodyTypeFile',
'eMessageBodyType_Command': 'EMMessageBodyTypeCmd',
'EMAttachmentDownloadStatus': 'EMDownloadStatus',
'EMAttachmentDownloading': 'EMDownloadStatusDownloading',
'EMAttachmentDownloadSuccessed': 'EMDownloadStatusSuccessed',
'EMAttachmentDownloadFailure': 'EMDownloadStatusFailed',
'EMAttachmentNotStarted': 'EMDownloadStatusPending',
'eConversationTypeChat': 'EMConversationTypeChat',
'eConversationTypeGroupChat': 'EMConversationTypeGroupChat',
'eConversationTypeChatRoom': 'EMConversationTypeChatRoom',
'EMMessageType': 'EMChatType',
'eMessageTypeChat': 'EMChatTypeChat',
'eMessageTypeGroupChat': 'EMChatTypeGroupChat',
'eMessageTypeChatRoom': 'EMChatTypeChatRoom',
'MessageDeliveryState': 'EMMessageStatus',
'eMessageDeliveryState_Pending': 'EMMessageStatusPending',
'eMessageDeliveryState_Delivering': 'EMMessageStatusDelivering',
'eMessageDeliveryState_Delivered': 'EMMessageStatusSuccessed',
'eMessageDeliveryState_Failure': 'EMMessageStatusFailed',
#ChatRoom
'.chatroomSubject': '.subject',
'.chatroomDescription': '.description',
'.chatroomMaxOccupantsCount': '.maxOccupantsCount',
'eChatroomBeKickedReason_BeRemoved': 'EMChatroomBeKickedReasonBeRemoved',
'eChatroomBeKickedReason_Destroyed': 'EMChatroomBeKickedReasonDestroyed',
'beKickedOutFromChatroom:': 'didReceiveKickedFromChatroom:',
#Call
'.sessionChatter': '.remoteUsername',
'asyncAnswerCall:': 'answerCall:',
'asyncEndCall:': 'endCall:',
'eCallSessionStatusDisconnected': 'EMCallSessionStatusDisconnected',
'eCallSessionStatusRinging': 'EMCallSessionStatusRinging',
'eCallSessionStatusAnswering': 'EMCallSessionStatusConnecting',
'eCallSessionStatusPausing': 'EMCallSessionStatusConnecting',
'eCallSessionStatusConnecting': 'EMCallSessionStatusConnecting',
'eCallSessionStatusConnected': 'EMCallSessionStatusConnected',
'eCallSessionStatusAccepted': 'EMCallSessionStatusAccepted',
'eCallConnectTypeNone': 'EMCallConnectTypeNone',
'eCallConnectTypeDirect': 'EMCallConnectTypeDirect',
'eCallConnectTypeRelay': 'EMCallConnectTypeRelay',
'EMCallSessionType': 'EMCallType',
'eCallSessionTypeAudio': 'EMCallTypeVoice',
'eCallSessionTypeVideo': 'EMCallTypeVideo',
'eCallSessionTypeContent': 'EMCallTypeVoice',
'EMCallStatusChangedReason': 'EMCallEndReason',
'eCallReasonNull': 'EMCallEndReasonHangup',
'eCallReasonOffline': 'EMCallEndReasonNoResponse',
'eCallReasonNoResponse': 'EMCallEndReasonNoResponse',
'eCallReasonHangup': 'EMCallEndReasonHangup',
'eCallReasonReject': 'EMCallEndReasonDecline',
'eCallReasonBusy': 'EMCallEndReasonBusy',
'eCallReasonFailure': 'EMCallEndReasonFailed',
'eCallReason_Null': 'EMCallEndReasonHangup',
'eCallReason_Offline': 'EMCallEndReasonNoResponse',
'eCallReason_NoResponse': 'EMCallEndReasonNoResponse',
'eCallReason_Hangup': 'EMCallEndReasonHangup',
'eCallReason_Reject': 'EMCallEndReasonReject',
'eCallReason_Busy': 'EMCallEndReasonBusy',
'eCallReason_Failure': 'EMCallEndReasonFailed',
#Apns
'"EMPushNotificationOptions.h"': '"EMPushOptions.h"',
'EMPushNotificationOptions': 'EMPushOptions',
#Error
'.errorCode': '.code',
'.description': '.domain',
'EMErrorType': 'EMErrorCode',
'EMErrorNotFound': 'EMErrorNotExist',
# 'EMErrorServerMaxCountExceeded': '',
'EMErrorConfigInvalidAppKey': 'EMErrorInvalidAppkey',
'EMErrorServerAuthenticationFailure': 'EMErrorUserAuthenticationFailed',
'EMErrorServerAPNSRegistrationFailure': 'EMErrorApnsBindDeviceTokenFailed',
'EMErrorServerDuplicatedAccount': 'EMErrorUserAlreadyExist',
'EMErrorServerInsufficientPrivilege': 'EMErrorUserIllegalArgument',
'EMErrorServerTooManyOperations': 'EMErrorServerBusy',
'EMErrorAttachmentNotFound': 'EMErrorFileNotFound',
'EMErrorAttachmentUploadFailure': 'EMErrorFileUploadFailed',
'EMErrorIllegalURI': 'EMErrorInvalidURL',
'EMErrorMessageInvalid_NULL': 'EMErrorMessageInvalid',
'EMErrorMessageContainSensitiveWords': 'EMErrorMessageIncludeIllegalSpeech',
'EMErrorGroupInvalidID_NULL': 'EMErrorGroupInvalidId',
'EMErrorGroupJoined': 'EMErrorGroupAlreadyJoined',
'EMErrorGroupJoinNeedRequired': 'EMErrorGroupPermissionDenied',
# 'EMErrorGroupFetchInfoFailure': '',
# 'EMErrorGroupInvalidRequired': '',
# 'EMErrorGroupInvalidSubject_NULL': '',
# 'EMErrorGroupAddOccupantFailure': '',
'EMErrorInvalidUsername_NULL': 'EMErrorInvalidUsername',
'EMErrorInvalidUsername_Chinese': 'EMErrorInvalidUsername',
'EMErrorInvalidPassword_NULL': 'EMErrorInvalidPassword',
'EMErrorInvalidPassword_Chinese': 'EMErrorInvalidPassword',
# 'EMErrorApnsInvalidOption': '',
# 'EMErrorHasFetchedBuddyList': '',
# 'EMErrorBlockBuddyFailure': '',
# 'EMErrorUnblockBuddyFailure': '',
'EMErrorCallConnectFailure': 'EMErrorCallConnectFailed',
# 'EMErrorExisted': '',
# 'EMErrorInitFailure': '',
'EMErrorNetworkNotConnected': 'EMErrorNerworkUnavailable',
'EMErrorFailure': 'EMErrorGeneral',
# 'EMErrorFeatureNotImplemented': '',
# 'EMErrorRequestRefused': '',
'EMErrorChatroomInvalidID_NULL': 'EMErrorChatroomInvalidId',
'EMErrorChatroomJoined': 'EMErrorChatroomAlreadyJoined',
# 'EMErrorReachLimit': '',
# 'EMErrorOutOfRateLimited': '',
# 'EMErrorGroupOccupantsReachLimit': '',
# 'EMErrorTooManyLoginRequest': '',
# 'EMErrorTooManyLogoffRequest': '',
# 'EMErrorPermissionFailure': '',
# 'EMErrorIsExist': '',
# 'EMErrorPushNotificationInvalidOption': '',
# 'EMErrorCallChatterOffline': '',
}
def check_main(root_path):
for root, dirs, files in os.walk(root_path):
for file_path in files:
if file_path.endswith('.m') or file_path.endswith('.h') or file_path.endswith('.pch'):
full_path = os.path.join(root, file_path)
# 不检查 pod 第三方库
if 'Pods/' in full_path:
break
fr = open(full_path, 'r')
content = fr.read()
fr.close()
for key in replace_dic:
match = re.search(key, content)
if match:
#替换
content = re.sub(key, replace_dic[key], content);
#重新写入文件
open(full_path,'w').write(content)
if __name__ == '__main__':
check_main(walk_path)
| mit | -8,191,097,917,055,163,000 | 44.896714 | 98 | 0.722074 | false | 3.606049 | false | false | false |
gogoprog/gengine | scripts/emscripten.py | 1 | 1275 | #!/usr/bin/python3
import platform
import os
import sys
import argparse
import multiprocessing
import os.path
import common
def emcc(appDir, outputDir, includeEmptyData):
previous_dir = os.getcwd()
os.chdir(os.environ['GENGINE']+"/build")
cmd = "emcc "
cmd += "" if common.debugMode else "-O3"
cmd += " --bind gengine" + ('d' if common.debugMode else '') + ".bc"
cmd += " -o " + outputDir + "/index.html"
cmd += " --preload-file " + common.rootPath + "/res/coreData@coreData"
if includeEmptyData:
cmd += " --preload-file " + common.rootPath + "/res/data@data "
else:
cmd += " --preload-file " + appDir + "/data@data "
cmd += " --use-preload-plugins -s TOTAL_MEMORY=134217728 -s TOTAL_STACK=1048576"
cmd += " --shell-file " + common.rootPath + "/src/shell.html"
os.system(cmd)
os.chdir(previous_dir)
def build(appDir, outputDir):
common.log("Running emcc...")
current_dir = os.getcwd()
os.chdir(appDir)
os.system("rm -rf index.data index.html index.js index.html.mem")
emcc(current_dir, outputDir, True)
os.chdir(current_dir)
def runServer(targetDir):
os.chdir(targetDir)
common.log("Running HTTP server in '" + targetDir + "'...")
os.system("python -m http.server");
| mit | -973,317,280,959,787,900 | 31.692308 | 84 | 0.63451 | false | 3.148148 | false | false | false |
bijilap/NER-Tagger | netrain.py | 1 | 2159 | import sys
import subprocess
class netrain:
feature_set={}
features_fname='ne.in'
model_fname="ne.model"
def __init__(self,mname):
self.model_fname=mname
def read_training_file(self,fname):
f=open(fname,'r')
fout=open(self.features_fname,'w')
for line in f:
#print line
pword='BOS' #previous word
ppostag='BOS' #previous POS tag
nword='EOS' #next word
npostag='EOS'
pnetag='None' #previous netag
pwprefix='None'
wprefix='None'
nwprefix='None'
words_tags=line.split()
for i in range(len(words_tags)):
#print words_tags[i]+' '+str(len(words_tags[i].split('/')))
#if len(words_tags[i].split('/'))>3:
#print 'here'
#continue
word_list=words_tags[i].split('/')
postag=word_list[len(word_list)-2]
netag=word_list[len(word_list)-1]
word=words_tags[i][:len(words_tags[i])-((len(postag)+len(netag))+2)]
#(word,postag,netag)=
wprefix=word[0]
#word=word+'/'+postag
#print word+" "+tag
if i+1>=len(words_tags):
nword='EOS'
npostag='EOS'
nwprefix='None'
else:
word_list=words_tags[i+1].split('/')
npostag=word_list[len(word_list)-2]
nword=words_tags[i+1][:len(words_tags[i+1])-((len(word_list[len(word_list)-2])+len(word_list[len(word_list)-1]))+2)]
#nwprefix=nword[0]
feature=netag+" "+"pw:"+str(pword)+" w:"+str(word)+" nw:"+str(nword)+" pnetag:"+str(pnetag)+" ppostag:"+str(ppostag)+" postag:"+str(postag)+ " npostag:"+str(npostag)+'\n'
#print feature
pnetag=netag
pword=word
ppostag=postag
#pwprefix=pword[0]
fout.write(feature)
#print feature
f.close()
fout.close
def learn(self):
subprocess.call('python ./perceplearn.py '+self.features_fname+' '+self.model_fname+' -i 20',shell=True)
fname=sys.argv[1]
mname=sys.argv[2]
pobj=netrain(mname)
pobj.read_training_file(fname)
pobj.learn()
pobj.read_training_file(fname)
| apache-2.0 | -3,337,249,659,875,602,000 | 30.75 | 186 | 0.557202 | false | 2.965659 | false | false | false |
sebastian-ecki-eckstein/kreuzschiene | client/shell/tcp-switch-client.py | 1 | 4797 | #!/usr/bin/env python
import socket
class kreuz_tcp_client:
def __init__(self,ip='127.0.0.1',port=4242):
self.TCP_IP = '127.0.0.1'
self.TCP_PORT = 4242
self.BUFFER_SIZE = 1024
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((TCP_IP, TCP_PORT))
self.length = -1
self.output = []
self.outputname = []
self.inputname = []
ergebnis = self.f_get_data()
self.output = ergebnis[0]
self.outputname = ergebnis[1]
self.inputname = ergebnis[2]
def f_get_data(self):
self.sock.send("GET:DATA:".encode('UTF-8'))
data = self.sock.recv(self.BUFFER_SIZE)
datastr = data.decode(encoding='UTF-8',errors='ignore')
splitted = datastr.split(':')
output = []
outputname = []
inputname = []
if len(splitted)>3:
anzahl = int(splitted[2])
else:
return False
if self.length == -1:
self.length = anzahl
i = 0
while i < self.length:
output.append(0)
outputname.append('out_'+str(i))
inputname.append('in_'+str(i))
i = i + 1
if anzahl != self.length:
return False
if len(splitted)<((self.length*3)+3):
return False
i = 0
while i < self.length:
output[i] = splitted[3+i]
outputname[i] = splitted[(i+self.length)+3]
inputname[i] = splitted[i+(self.length*2)+3]
i = i + 1
return [output,outputname,inputname]
def f_set_output_name(self,number,name):
print("set output name")
if int(number) > self.length:
return False
sendstr = "SET:PORT:O"+str(number)+":"+name
self.sock.send(sendstr.encode('UTF-8'))
data = self.sock.recv(self.BUFFER_SIZE)
datastr = data.decode(encoding='UTF-8',errors='ignore')
splitted = datastr.split(':')
if splitted[0] == "NACK":
return False
ergebnis = self.f_get_data()
self.outputname = ergebnis[1]
return True
def f_set_input_name(self,number,name):
print("set input name")
if int(number) > self.length:
return False
sendstr = "SET:PORT:I"+str(number)+":"+name
self.sock.send(sendstr.encode('UTF-8'))
data = self.sock.recv(self.BUFFER_SIZE)
datastr = data.decode(encoding='UTF-8',errors='ignore')
splitted = datastr.split(':')
if splitted[0] == "NACK":
return False
ergebnis = self.f_get_data()
self.inputname = ergebnis[2]
return True
def f_set_output(self,outnum,innum):
print("set output input")
if int(outnum) > self.length or int(innum) > self.length:
return False
sendstr = "SET:PORT:O"+str(outnum)+":I"+str(innum)
self.sock.send(sendstr.encode('UTF-8'))
data = self.sock.recv(self.BUFFER_SIZE)
datastr = data.decode(encoding='UTF-8',errors='ignore')
splitted = datastr.split(':')
if splitted[0] == "NACK":
return False
ergebnis = self.f_get_data()
self.output = ergebnis[0]
return True
def f_update(self):
ergebnis = self.f_get_data()
self.output = ergebnis[0]
self.outputname = ergebnis[1]
self.inputname = ergebnis[2]
return True
def f_load(self,name):
print("load config")
sendstr = "SET:LOAD:"+str(name)
self.sock.send(sendstr.encode('UTF-8'))
data = self.sock.recv(self.BUFFER_SIZE)
datastr = data.decode(encoding='UTF-8',errors='ignore')
splitted = datastr.split(':')
if splitted[0] == "NACK":
return False
self.f_update()
return True
def f_save(self,name):
print("save config")
sendstr = "SET:SAVE:"+str(name)
self.sock.send(sendstr.encode('UTF-8'))
data = self.sock.recv(self.BUFFER_SIZE)
datastr = data.decode(encoding='UTF-8',errors='ignore')
splitted = datastr.split(':')
if splitted[0] == "NACK":
return False
return True
def f_get_config(self):
print("get config names")
sendstr = "GET:CONFIG:"
self.sock.send(sendstr.encode('UTF-8'))
data = self.sock.recv(self.BUFFER_SIZE)
return "test"
def f_lock(self,locker):
print("lock/unlock")
sendstr = "SET:LOCK:"+str(locker)
self.sock.send(sendstr.encode('UTF-8'))
data = self.sock.recv(self.BUFFER_SIZE)
return True
def end(self):
self.sock.close()
if __name__ == '__main__':
print("start client")
| apache-2.0 | -5,766,154,938,342,356,000 | 31.632653 | 69 | 0.543673 | false | 3.43133 | false | false | false |
MeirKriheli/statirator | statirator/core/models.py | 1 | 1691 | from __future__ import absolute_import
from django.conf import settings
from .utils import path_to_lang, LANGS_DICT
class TranslationsMixin(object):
"Helper for getting transalations"
SLUG_FIELD_FOR_TRANSLATIONS = 'slug' # Overide in models if needed
LANG_FIELD_FOR_TRANSLATIONS = 'language' # Overide in models if needed
def get_translations(self):
"Query set for the translations"
self_slug = getattr(self, self.SLUG_FIELD_FOR_TRANSLATIONS)
self_lang = getattr(self, self.LANG_FIELD_FOR_TRANSLATIONS)
slug = {self.SLUG_FIELD_FOR_TRANSLATIONS + '__exact': self_slug}
lang = {self.LANG_FIELD_FOR_TRANSLATIONS + '__exact': self_lang}
return self.__class__.objects.filter(**slug).exclude(**lang)
def get_language(self):
"Get the language display for this item's language"
attr = 'get_{0}_display'.format(self.LANG_FIELD_FOR_TRANSLATIONS)
return getattr(self, attr)()
class DummyTranslation(object):
"""Dummy translations for views to put in template context in case there's no
actual object"""
def __init__(self, request, language=None, title=None, path=None):
self.title = title
self.request = request
self.language = language or request.LANGUAGE_CODE
self.path = path or request.path
def get_translations(self):
for code, name in settings.LANGUAGES:
if code != self.language:
yield DummyTranslation(self.request, code, name, self.path)
def get_language(self):
return LANGS_DICT.get(self.language)
def get_absolute_url(self):
return path_to_lang(self.path, self.language)
| mit | 1,598,948,057,908,974,000 | 34.229167 | 81 | 0.66647 | false | 3.887356 | false | false | false |
TheRedFireFox/AnimeSubBot | src/main.py | 1 | 10335 | #!/usr/bin/env python3.4
# -*- coding: utf-8 -*-
"""
The main.py this file is the entry to the programs execution.
This will initialise the main classes and hold (for now) the main loop this
will be changed as soon as multipossessing is implemented.
"""
# standard modules import
import os
import sys
import time
import getpass
import platform
import multiprocessing
# if only windows is supported else use the curses module on linux (-.-)
try:
import msvcrt
except ImportError:
try:
import curses
except ImportError:
raise
# personal imports
import sql
import installer
import gobjects
import language
import clogging
import parsers.commandline
import parsers.configuration
import worker
def RestartProgram():
"""
Restarts the current program.
Note: this function does not return. Any cleanup action (like
saving data) must be done before calling this function.
"""
python = sys.executable
os.execl(python, python, * sys.argv)
def Install(Configuration,
SConfiguration,
MasterLanguage,
MasterLogger):
import installer
Install = installer.Installer(
Configuration = Configuration,
SConfiguration = SConfiguration,
Language = MasterLanguage,
Logging = MasterLogger,
)
Install.Install()
def TestSql(Configuration, MasterLogger, MasterLanguage):
SqlObject = None
NoConnection = True
NrTry = 0
while NrTry < 3:
SqlObject = sql.Api(
User = Configuration["Security"]["DatabaseUser"],
Password = Configuration["Security"]["DatabasePassword"],
DatabaseName = Configuration["MySQL"]["DatabaseName"],
Host=Configuration["MySQL"]["DatabaseHost"],
Port=Configuration["MySQL"]["DatabasePort"],
ReconnectTimer=int(Configuration["MySQL"]
["ReconnectionTimer"]),
LoggingObject = MasterLogger,
LanguageObject = MasterLanguage
)
if SqlObject.DatabaseConnection is None:
NrTry += 1
else:
break
SqlObject.CloseConnection()
if NrTry == 3:
return False
else:
return True
def Main():
"""
The main function that let's the application roll.
This function will initialise all the needed objects
and see that there will be always something to do.
"""
# this module is needed for the curses module for all unix distributions
CursesMasterObject = None
CursesObject = None
# if program is run not on a windows system:
if platform.system() != "Windows":
# init the curses screen
CursesMasterObject = curses.initscr()
# Use cbreak to not require a return key press
# The system will not be waiting so but continue to work.
curses.cbreak()
curses.noecho()
CursesMasterObject.nodelay(1)
maxy, maxx = CursesMasterObject.getmaxyx()
begin_x = 0
begin_y = 0
height = maxy
width = maxx
CursesObject = curses.newwin(height, width, begin_y, begin_x)
CursesObject.nodelay(1)
curses.setsyx(-1, -1)
CursesMasterObject.refresh()
CursesObject.refresh()
CursesObject.scrollok(True)
CursesObject.idlok(True)
CursesObject.leaveok(True)
# This object in needed for the main process to interact with the
# subprocess (the worker).
# SecondQueue = multiprocessing.Queue(1)
# This object is the event to shutdown all the subprocesses
# it defaults to true and will be set to false in the end.
ShutdownEventObject = multiprocessing.Event()
try:
# initialising the first logger and the language master object
# this object will be recreated later on
MasterLogger = clogging.Logger()
MasterLanguage = language.Language()
Language = MasterLanguage.CreateTranslationObject()
_ = Language.gettext
# Create the configuration class and read the configuration class.
Configuration = parsers.configuration.ConfigurationParser()
SConfiguration = parsers.configuration.SecureConfigurationParser(INTERNAL_KEY)
# check if default files exist if not install them
if ((Configuration.CheckIfExists() is False) or
(SConfiguration.CheckIfExists() is False)):
import installer
installer.Installer(Configuration,
SConfiguration,
MasterLanguage,
MasterLogger).Install("A")
else:
Configuration.ReadConfigurationFile()
SConfiguration.ReadConfigurationFile()
Configuration.AddSecureConfigurationParser(SConfiguration)
# deleting the object so that it will be garbage collected
del SConfiguration
Configuration = Configuration.ReturnClean()
# Create the language processor
MasterLanguage = language.Language()
Language = MasterLanguage.CreateTranslationObject(
Configuration["Telegram"]["DefaultLanguage"].split(","))
# This is the language object that will call the translation
# function.
_ = Language.gettext
# init parser
Parser = parsers.commandline.CustomParser(ConfigurationObject=Configuration,
LanguageObject=MasterLanguage
)
Parser.RunParser()
ParserArguments = Parser.GetArguments()
if ParserArguments.Installer is True:
# checking the installation
# reseting the configurations
import installer
Configuration = parsers.configuration.ConfigurationParser()
SConfiguration = parsers.configuration.SecureConfigurationParser(INTERNAL_KEY)
installer.Installer(Configuration,
SConfiguration,
MasterLanguage,
MasterLogger).Install()
# deleting the object so that it will be garbage collected
del SConfiguration
Configuration = Configuration.ReturnClean()
# Initialise the rest of the objects.
# first the multiprocess logger
MasterLogger.CloseHandlers()
MasterLogger = clogging.LoggingProcessSender(
LogToConsole = ParserArguments.PrintToConsole,
FileName = Configuration["Logging"]["LoggingFileName"],
MaxLogs = Configuration["Logging"]["MaxLogs"],
LoggingFormat = Configuration["Logging"]["LoggingFormat"],
Dateformat = Configuration["Logging"]["DateFormat"],
LoggingLevel = "debug",
CursesObject = CursesObject,
ShutdownEvent = ShutdownEventObject
)
MasterLogger.info(_("{AppName} has been started.").format(
AppName=gobjects.__AppName__
))
# test if there is a MySql connection
if TestSql(Configuration, MasterLogger, MasterLanguage) is False:
MasterLogger.critical(
_("{AppName} has been stopped, because you didn't "
"input the correct user name or password.").format(
AppName=gobjects.__AppName__)
)
time.sleep(0.5)
raise SystemExit
# starting the Worker
MainWorker = worker.MainWorker(
MaxWorker = Configuration["Telegram"]["MaxWorker"],
ShutDownEvent = ShutdownEventObject,
Configuration = Configuration,
Logging = MasterLogger,
Language = MasterLanguage,
BotName = None)
MainWorker.start()
# Initialise the main loop (it's a endless loop, it breaks when a
# key is pressed.)
MasterLogger.info(_("Exit loop by pressing <Esc>, <q> or <Space>"))
MasterLogger.info(_("Getting updates from the telegram api."))
# Add a comment number to the telegram request, so that the old
# messages will be sorted out.
while True:
# check if a key is pressed by user and stop if pressed.
# if windows use msvcrt
if platform.system() == "Windows":
if msvcrt.kbhit():
PressedKey = ord(msvcrt.getch())
if PressedKey == 27 or PressedKey == 113 or \
PressedKey == 32:
MasterLogger.info(_("A user shutdown was requested "
"will now shutdown."))
break
# use curses
else:
PressedKey = CursesObject.getch()
if (PressedKey == 27 or PressedKey == 113 or
PressedKey == 32):
MasterLogger.info(_("A user shutdown was requested will "
"now shutdown.")
)
break
else:
pass
time.sleep(0.5)
MasterLogger.info(_("The system is shutting down, please be patient"
" until all the workload has been cleared."))
finally:
ShutdownEventObject.set()
try:
MainWorker.join()
except UnboundLocalError:
pass
except:
raise
MasterLogger.join()
if platform.system() != "Windows":
# clean after the curses module
time.sleep(1)
curses.nocbreak()
curses.echo()
curses.endwin()
# Raise the terror of the curses module for a second time.
# (It's correctly formatted now)
try:
raise
except RuntimeError:
pass
if __name__ == "__main__":
INTERNAL_KEY = r"2#<&Sd8!upX.jm(n"
multiprocessing.freeze_support()
Main()
| gpl-2.0 | 6,433,128,542,332,228,000 | 33.915541 | 90 | 0.575907 | false | 5.201309 | true | false | false |
eriksonJAguiar/TCC-UENP-Codigos | My_codes/coleta/twitter_extract_tweets.py | 1 | 3251 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from TwitterAPI import *
from datetime import *
from pymongo import MongoClient
import sys
import json
import os.path
import time
#timeout
timeout = 60*3
timeout_start = time.time()
#Credencias de acesso App Twitter
consumer_key = "NBL0CtVrn2ajbpaGEWC1GBY2c"
consumer_secret = "2F5Uz5VYg0ONu4xTYYZsWkAGfc3TYXCkXLCsXMJ1eCKOfhBTfS"
access_token = "2345718031-we2K2PETQXkz7NCexjdGuvE2L2rnd5KfouzN3Up"
access_token_secret = "aEQPKGifu1y29Wbh3u6Z0YIcjAsBC8VeD4Y75CDL2r12o"
#acessa OAuth
# Referencia para API: https://dev.twitter.com/rest/reference
twitter = TwitterAPI(consumer_key, consumer_secret,auth_type='oAuth2')
##DataBase
client = MongoClient()
db = client.baseTweetsTCC
def saveTrends(tag,date):
try:
db.trends.insert_one(
{
'tag':tag,
'date':date
}
)
except Exception as inst:
pass
result_max = 10000
result_cont = 0
dh = datetime.now()
#tags = ['hiv','aids','viagra','tinder','menopausa','dst','ist','sifilis','usecamisinha','hpv','camisinha']
tags = []
#param = sys.argv[1:]
#print(param[0])
try:
trends_br = twitter.request('trends/place', {'id': 23424768})
trends_eua = twitter.request('trends/place', {'id': 23424977})
trends_eng = twitter.request('trends/place', {'id': 24554868})
#trends_esp = twitter.request('trends/place', {'id': 23424950})
#trends_ger = twitter.request('trends/place', {'id': 23424829})
n_trends = 10
i = 0
for br in trends_br.get_iterator():
tags.append(br['name'])
saveTrends(br['name'],dh.now())
i += 1
if i > n_trends: break
i = 0
for eua in trends_eua.get_iterator():
tags.append(eua['name'])
saveTrends(eua['name'],dh.now())
if i > n_trends: break
i += 1
i = 0
for eng in trends_eua.get_iterator():
tags.append(eng['name'])
saveTrends(eng['name'],dh.now())
if i > n_trends: break
i += 1
i = 0
#for esp in trends_esp.get_iterator():
# tags.append(esp['name'])
# saveTrends(esp['name'],dh.now())
# if i > n_trends: break
# i += 1
#i = 0
#for ger in trends_ger.get_iterator():
# tags.append(ger['name'])
# saveTrends(ger['name'],dh.now())
# if i > n_trends: break
# i += 1
except Exception as inst:
pass
while result_cont < result_max:
#print('Buscando...\n')
#print('Isso Pode Demorar Um Pouco..\n')
tag_cont = 0
while tag_cont < len(tags):
r = twitter.request('search/tweets', {'q': tags[tag_cont]})
for item in r.get_iterator():
#tweet = 'ID: %d, Usuario: %s, texto: %s, Horario: %s, Criado: %s \n'%(item['id'],item['user']['screen_name'],item['text'],dh.now(),item['created_at'])
#print(item['text'])
try:
db.tweets.insert_one(
{
'_id':item['id'],
'id_user':item['user']['id'],
'name':item['user']['screen_name'],
'text':item['text'],
'hourGet':dh.now(),
'created_at':item['created_at'],
'location':item['user']['location'],
'retweets_count':item['retweet_count']
}
)
result_cont += 1
except Exception as inst:
#print(type(inst))
pass
tag_cont += 1
#print("%d tweets capturados"%result_cont)
if time.time() >= timeout_start + timeout:
break
#print('Resultados = %d \n'%(result_cont))
#print('Coleta Relalizada com Sucesso! \n')
| gpl-3.0 | 1,860,964,976,097,334,500 | 21.115646 | 154 | 0.637958 | false | 2.440691 | false | false | false |
EmanueleLM/SAAP-Sequencial-Attacks-in-Adversarial-Patrolling | solvesaap.py | 1 | 19333 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 19 14:33:27 2017
@author: Emanuele
Solve SAAP() script:
takes as input a series of graphs encoded in a XML file filled up with an adjacency matrix A, and a description of each node
encoded as type of node (simple vertex or target), its value (0 if it's not a target, between 0 and 1
if it's a target) and a deadline (0 if it's a vertex, greater than zero and integer if it's a target)
"""
import pathfinder as pf
import numpy as np
import xml.etree.ElementTree as et
from xml.dom import minidom
import graph as gr
import os.path
import time
graphs_input_path = "C:\\Users\\Ga\\Desktop\\15_5_025\\"; # path to find the graphs' description
output_path = "C:\\Users\\Ga\\Desktop\\15_5_025\\results\\"; # path to store the output in pseudo-xml format
graphs = list(); # list that contains the name of each graph file in txt format
k = 1; # number of resources we want the solver solves for each instance of the graphs specified in graphs
# complete description of each tag in our xml file (intermediate aggregate file)
#
# .. TO_DO
#
graph_tags = list(['G', 'A', 'VERTICES', 'V0', 'T0', 'NUM_V', 'NUM_T', 'DENSITY', 'TOPOLOGY']);
other_tags = list(['K', 'PATH', 'COVERED', 'LOST', 'HISTORY', 'UTILITY', 'EXEC_TIME', 'ROUTES']);
aggregate_filepath = "C:\\Users\\Ga\\Desktop\\"; # filepath to the aggregate (.dat) file
aggregate_output = "aggregate15_5_density013.dat"; # name of the aggregate file
aggregate_prefix = ['NAME', 'TOPOLOGY', 'NUM_V', 'NUM_T', 'K', 'EXEC_TIME', 'UTILITY', 'V0', 'DENSITY']; # prefix for the aggregate file: specifies each entry on that file
#==============================================================================
# function that invokes pathfinder for a given specification of the SAAP game
# and solves it for a given number of resources k
# creates a "dom"-like structure that is used to store all the salient elements of the saap solution
# takes as input
# the file, filepath, where there's the graph specification
# number of resources avilable to A
# returns a list of the files that contains the results of the various saap instances
#==============================================================================
def solveSAAP(filepath, k):
files = list();
G, vertices, density, topology = createGraphFromFile(filepath);
equilibrium_route = list();
equilibrium_utility = -np.inf;
equilibrium_history = list();
vertex_at_equilibrium = 0;
start_time = time.time(); # start measuring the time of execution (we don't care if we have a small overhead since we don't start measuring int in the true function, that's because eery instance will have the same (little) overhead)
for v in range(len(G.getVertices())):
partial_utility = 0;
partial_history = list();
partial_route = list();
for t in G.getTargets():
partial_time = time.time();
u, route, hist = pf.PathFinder(G, v, t, k); # solve the game for a specific instance with a given number of resources 'k' for the Attacker
print(u, route, hist);
print("Partial time for a (v,t) processing: ", (time.time() - partial_time));
if u < partial_utility: # if a given instance is "pejorative(???)" A chooses that instance
partial_history = hist;
partial_route = route;
partial_utility = u;
if partial_utility > equilibrium_utility: # among all the worst attacks, D chooses the best placement and the best response
equilibrium_history = partial_history;
equilibrium_route = partial_route;
equilibrium_utility = partial_utility;
vertex_at_equilibrium = v;
exec_time = (time.time() - start_time); # calculate execution time (little overhead introduced by returning of the function, still not important since we are facing an exponential problem)
print("Equilibrium vertex: ", vertex_at_equilibrium);
print("Equilibrium Path: ", equilibrium_route, "\nEquilibrium utility ", equilibrium_utility, "\nEquilibrium history ", equilibrium_history);
print("Execution time: ", exec_time); # write all the stuff to a file in a xml pseudo-format
g_tags = list();
o_tags = list();
root = et.Element("ROOT");
g_tags.append(et.SubElement(root, graph_tags[0])); # G (graph) is the first child node of ROOT
for j in range(1,len(graph_tags)):
g_tags.append(et.SubElement(g_tags[0], graph_tags[j])); # every element of the graph is a subelement of the graph itself
for j in range(len(other_tags)):
o_tags.append(et.SubElement(root, other_tags[j]));
# follow the order in graph_tags to see what's the content of each of the following element
g_tags[1].text = str(list(G.getAdjacencyMatrix())); # adjacency matrix
g_tags[2].text = str(vertices); # specification of each vertex
g_tags[3].text = str(vertex_at_equilibrium); # vertex at the equilibrium
g_tags[4].text = str(t); # initial target
g_tags[5].text = str(len(vertices)); # number of vertices on the graph
g_tags[6].text = str(len(G.getTargets())); # number of targets on the graph
g_tags[7].text = str(G.getDensity()); # edge density
g_tags[8].text = topology; # topology of the graph
# follow the order in other_tags to see what's the content of each of the following element
o_tags[0].text = str(k+1); # number of resources
# fill this section up with the other o_tags
# o_tags[1].text =
# o_tags[2].text =
# o_tags[3].text =
# ...
o_tags[1].text = str(equilibrium_route);
o_tags[4].text = str(equilibrium_history);
o_tags[5].text = str(equilibrium_utility); # execution time
o_tags[6].text = str(exec_time); # execution time
#o_tags[7].text = str(routes); # list of all the routes generated by the saap instance
tree = et.ElementTree(root);
files.append(output_path+"topology_"+topology+"_vertices_"+str(len(G.getVertices()))+"_density_"+str(G.getDensity())+"_resources_"+str(k+1)+"_salt_"+filepath[-5:]);
tree.write(files[-1]); # write on file
return files;
#==============================================================================
# function that invokes pathfinder for a given specification of the SAAP game
# and solves it for a given number of resources k
#
# This is the fast version of the solveSAAP function since it solves the games for just each vertex
# on a random initial target under attack: we will OBVIOUSLY have a pessimistic estimate for the Attacker's utility, so please note that before using this function
# The way it extract the utility and equilibrium path is the same: it changes the execution time that is an
# estimate of the real time (it just multiplies the average of the execution time of each instance to obtain the total time)
#
# creates a "dom"-like structure that is used to store all the salient elements of the saap solution
# takes as input
# the file, filepath, where there's the graph specification
# number of resources avilable to A
# returns a list of the files that contains the results of the various saap instances
#==============================================================================
def fastSolveSAAP(filepath, k):
files = list();
G, vertices, density, topology = createGraphFromFile(filepath);
equilibrium_route = list();
equilibrium_utility = -np.inf;
equilibrium_history = list();
vertex_at_equilibrium = 0;
start_time = time.time(); # start measuring the time of execution (we don't care if we have a small overhead since we don't start measuring int in the true function, that's because eery instance will have the same (little) overhead)
for v in range(len(G.getVertices())):
partial_utility = 0;
partial_history = list();
partial_route = list();
partial_time = time.time();
t = G.getTargets()[np.random.randint(len(G.getTargets()))]; # random target
u, route, hist = pf.PathFinder(G, v, t, k); # solve the game for a specific instance with a given number of resources 'k' for the Attacker
print(u, route, hist);
print("Partial time for a (v,t) processing: ", (time.time() - partial_time));
if u < partial_utility: # if a given instance is "pejorative", A chooses that instance
partial_history = hist;
partial_route = route;
partial_utility = u;
if partial_utility > equilibrium_utility: # among all the worst attacks, D chooses the best placement and the best response
equilibrium_history = partial_history;
equilibrium_route = partial_route;
equilibrium_utility = partial_utility;
vertex_at_equilibrium = v;
exec_time = len(G.getTargets())*(time.time() - start_time); # ESTIMATE the execution time by multiplying what we've spent so far to the number of targets
print("Equilibrium vertex: ", vertex_at_equilibrium);
print("Equilibrium Path: ", equilibrium_route, "\nEquilibrium utility ", equilibrium_utility, "\nEquilibrium history ", equilibrium_history);
print("Execution time: ", exec_time); # write all the stuff to a file in a xml pseudo-format
g_tags = list();
o_tags = list();
root = et.Element("ROOT");
g_tags.append(et.SubElement(root, graph_tags[0])); # G (graph) is the first child node of ROOT
for j in range(1,len(graph_tags)):
g_tags.append(et.SubElement(g_tags[0], graph_tags[j])); # every element of the graph is a subelement of the graph itself
for j in range(len(other_tags)):
o_tags.append(et.SubElement(root, other_tags[j]));
# follow the order in graph_tags to see what's the content of each of the following element
g_tags[1].text = str(list(G.getAdjacencyMatrix())); # adjacency matrix
g_tags[2].text = str(vertices); # specification of each vertex
g_tags[3].text = str(vertex_at_equilibrium); # vertex at the equilibrium
g_tags[4].text = str(t); # initial target
g_tags[5].text = str(len(vertices)); # number of vertices on the graph
g_tags[6].text = str(len(G.getTargets())); # number of targets on the graph
g_tags[7].text = str(G.getDensity()); # edge density
g_tags[8].text = topology; # topology of the graph
# follow the order in other_tags to see what's the content of each of the following element
o_tags[0].text = str(k+1); # number of resources
# fill this section up with the other o_tags
# o_tags[1].text =
# o_tags[2].text =
# o_tags[3].text =
# ...
o_tags[1].text = str(equilibrium_route);
o_tags[4].text = str(equilibrium_history);
o_tags[5].text = str(equilibrium_utility); # execution time
o_tags[6].text = str(exec_time); # execution time
#o_tags[7].text = str(routes); # list of all the routes generated by the saap instance
tree = et.ElementTree(root);
files.append(output_path+"topology_"+topology+"_vertices_"+str(len(G.getVertices()))+"_density_"+str(G.getDensity())+"_resources_"+str(k+1)+"_salt_"+filepath[-5:]);
tree.write(files[-1]); # write on file
return files;
#==============================================================================
# function that create a graph G from a file that specifies the adjacency matrix at first
# the initial vertex v, the first target under attack t and how the graph is (vertices, targets, their values and deadlines..)
# the format of the file is the following and in this order:
# adjacency matrix A specified as [[1,0],[0,1]]
# a list of each vertex charachteristic as [vertex/target, value, deadline] where vertex=0, target=1
# , value is a real number in [0,1](0 is for vertices)
# , deadline is a natural number (0 for vertices, any other for targets)
# , e.g. [0,0,0] --> vertex, [1, 0.5, 10] --> target with 0.5 as value, 10 as deadline
# , an example of a 3*3 vertices' specification is [[0,0,0],[1,1,4],[1,0.3,5]]
# edge density defined as density = 2|E|/|V|(|V|-1)
## topology of the graph, the possible choices are {'graph', 'line', 'star', 'crique', ..}
# all this stuff must be encoded in a pseudo-xml format (just to be a little more polite and clean)
# even if you can find an example of psuedo-xml graph in the repo on github, here's one:
# <G>
# <A>[[1,1,1],[1,1,1],[1,1,1]]</A>
# <V>[[1,0.3,3],[0,0,0],[1,0.8,12]]</V>
# <DENSITY>0.3</DENSITY>
# <TOPOLOGY>graph</TOPOLOGY>
# </G>
# the previous example specifies a fully connected graph with 3 vertices, 2 targets (index 0 and 2) and a vertex (index 1)
# the density is set to 0.3
# the topology of the graph ('graph' if it's not a specific topology, 'crique', 'line', 'start' etc. otherwise)
# the function returns
# a graph G,
# the vertices that compose the graph (each one specify if it's a vertex or a target, its value and its deadline)
# the density of the graph
# the topology of the graph
#==============================================================================
def createGraphFromFile(filepath):
# elements_check = ["A", "V", "DENSITY", "TOPOLOGY"]; # elements to check if all the graph's elements are present in the file
tree = et.parse(filepath);
root = tree.getroot();
# create the empty Graph and the adjacency matrix by parsing the file (using the eval function :P bad bad bad)
adj_matrix = np.array(eval(root[0].text));
vertices = np.array(eval(root[1].text));
V = list();
# for each vertex create the graph G
for v in vertices:
V = np.append(V, gr.Vertex(int(v[0]), float(v[1]), int(v[2])));
G = gr.Graph(np.array(V));
n = 0;
for v in vertices:
G.setAdjacents(V[n], np.array(adj_matrix[n]));
n += 1;
return [G, vertices, float(root[2].text), root[3].text]; # return the graph, the vertices, the density, the topology
#==============================================================================
# function that given a xml result coming from a saap solution, prints on screen all the xml file
# takes as input the filepath of the xml file
# returns none
# please note that if verbose is set to True it will print out all the routes generated (usually a lot)
# otherwise it does not print them
#==============================================================================
def printSaapDOM(filepath, verbose):
root = et.parse(filepath).getroot();
for j in root[0]:
print(j.tag);
print(j.text, "\n");
if verbose:
nop = len(root);
else:
nop = -1;
for i in root[1:nop]:
print(i.tag);
print(i.text, "\n");
#==============================================================================
# function that "prettifies" the output
# takes as input the element in ElementTree to be prettyfied
# returns the string prettified
#==============================================================================
def prettify(elem):
rough_string = et.tostring(elem, 'utf-8');
reparsed = minidom.parseString(rough_string);
return reparsed.toprettyxml(indent="\t");
#==============================================================================
# function that returns the root of the xml file, given the path of the xml file
# it takes as input the xml file
# it returns the root element of the file
#==============================================================================
def getRootElement(filepath):
return et.parse(filepath).getroot();
#==============================================================================
# function that turns a xml file into aggregate data, useful to plot the data
# takes as input the result of a saap instance as filepath + filename
# returns a new line in the aggregate.dat file file that is composed in this way:
# filename num_nodes num_targets resources exec_time utility length_eq_path average_length_path density
#==============================================================================
def fromXml2Aggregate(filepath, filename):
data_to_find = ['TOPOLOGY', 'NUM_V', 'NUM_T', 'K', 'EXEC_TIME', 'UTILITY', 'V0', 'DENSITY'];
result = list([filename]);
root = et.parse(filepath+filename).getroot();
for i in data_to_find:
if root[0].find(str(i)) != None:
result.append(root[0].find(i).text);
else:
if root.find(i) != None:
result.append(root.find(i).text);
else:
result.append('None');
return result;
#==============================================================================
# function that creates from a graph specification a string that is used to feed the
# function that create the aggregate file from the various xml instances of saaps
# takes as input
# file, which is the filename (filepath+filename)
# returns
# the filename of the xml file to be used to feed the aggregate file
# the salt used to distinguish between graphs with same features but different topologies
#==============================================================================
def fromGraphToXmlName(file):
G, vertices, density, topology = createGraphFromFile(file);
filename = "topology_"+topology+"_vertices_"+str(len(G.getVertices()))+"_density_"+str(G.getDensity())+"_resources_"+str(2)+"_salt_"+file[-5:];
return filename;
"""
Little testing to see if the algorithms work as expected
"""
verbose = True; # this variable controls whether the output is printed
if verbose:
# extract elements from the graph file
for inputgraph in os.listdir(graphs_input_path):
if inputgraph=="results": # skipt the folder with the results
continue;
[printSaapDOM(i, True) for i in solveSAAP(graphs_input_path+inputgraph, 1)]; # solve all the SAAP instances in a given directory for a specified number of resources
if not(os.path.isfile(aggregate_filepath + aggregate_output)): # if the file does not exists, create it with the prefix
prefix = str();
for i in aggregate_prefix:
prefix += str(i)+'\t';
f = open(aggregate_filepath + aggregate_output, "w"); # create the file with the prefix
f.write(prefix + '\n');
else:
f = open(aggregate_filepath + aggregate_output, "a"); # open in appendix mode
# write all the results row by row, using the fromGraphToXmlName function as "feeder" to the fromXml2Aggregate function, plus the number of resources of a given instance
aggregatefilename = fromGraphToXmlName(graphs_input_path+inputgraph);
line = fromXml2Aggregate(output_path, aggregatefilename);
f.write(str(line)+'\n');
f.close(); # close the file | gpl-3.0 | 7,431,354,720,745,216,000 | 55.373887 | 240 | 0.605597 | false | 3.740906 | false | false | false |
Moguri/odin | src/combat/terrain.py | 1 | 3190 | import random
from panda3d.core import *
MAP_SIZE = 32
CELL_SIZE = 1
SEL_NONE = 0
SEL_CURS = 1 << 0
SEL_MOVE = 1 << 1
SEL_ATTK = 1 << 2
class Terrain(object):
# UNTESTED
# @classmethod
# def world_to_grid(cls, x, y, z):
# position = [x, y, z]
# half_size = MAP_SIZE / 2
# position[0] = int(position[0] * half_size + half_size) / CELL_SIZE
# position[1] = int(position[1] * half_size + half_size) / CELL_SIZE
#
# return position
@classmethod
def grid_to_world(cls, x, y, z):
position = [x, y, z]
position[0] = position[0] - MAP_SIZE/2 + CELL_SIZE / 2.0
position[1] = position[1] - MAP_SIZE/2 + CELL_SIZE / 2.0
return position
@classmethod
def get_random_tile(cls):
x = random.randint(0, MAP_SIZE-1)
y = random.randint(0, MAP_SIZE-1)
return [x, y, 0]
@classmethod
def _iterate_circle(cls, center, radius):
for y in range(center[1]-radius, center[1]+radius+1):
for x in range(center[0]-radius, center[0]+radius+1):
if Terrain.check_distance(radius, (x, y), center):
yield x, y
@classmethod
def check_distance(cls, range, p0, p1):
if abs(p1[0] - p0[0]) + abs(p1[1] - p0[1]) <= range:
return True
return False
@classmethod
def get_distance(cls, p0, p1):
return abs(p1[0] - p0[0]) + abs(p1[1] - p0[1])
@classmethod
def find_closest_in_range(cls, center, radius, target_pos):
closest = None
for x, y in Terrain._iterate_circle(center, radius):
if not closest:
closest = [x, y]
else:
cur_dist = Terrain.get_distance(closest, target_pos)
new_dist = Terrain.get_distance((x, y), target_pos)
if new_dist < cur_dist:
closest = [x, y]
return closest + [0]
def __init__(self):
# Load the environment model.
self.model = base.loader.loadModel("terrain")
# Reparent the model to render.
self.model.reparentTo(base.render)
# Load and set terrain shader
terrain_shader = Shader.load(Shader.SLGLSL, "shaders/basic.vs", "shaders/terrain.fs", "")
self.model.setShader(terrain_shader)
# Setup selection map
self.selection_texture = Texture()
self.selection_texture.set_compression(Texture.CMOff)
self.selection_texture.set_component_type(Texture.TUnsignedByte)
self.selection_texture.set_format(Texture.FRed)
self.model.setShaderInput("selection_map", self.selection_texture)
# Setup selection data
self.selection_image = PNMImage(MAP_SIZE, MAP_SIZE, 1)
def clear_selection(self):
self.selection_image.fill(SEL_NONE)
def set_cursor_selection(self, x, y):
self.selection_image.setXelVal(x, y, SEL_CURS)
def _display_range(self, center, radius, value):
for x, y in Terrain._iterate_circle(center, radius):
if x < 0 or x >= MAP_SIZE or y < 0 or y >= MAP_SIZE:
continue
old = self.selection_image.getGrayVal(x, y)
self.selection_image.setXelVal(x, y, old+value)
def display_move_range(self, player):
center = player.grid_position
radius = player.remaining_movement
self._display_range(center, radius, SEL_MOVE)
def display_attack_range(self, player):
center = player.grid_position
radius = player.range
self._display_range(center, radius, SEL_ATTK)
def update_selection(self):
self.selection_texture.load(self.selection_image) | apache-2.0 | 3,266,489,896,179,958,000 | 26.747826 | 91 | 0.676803 | false | 2.66055 | false | false | false |
nodakai/watchman | build/fbcode_builder/getdeps/buildopts.py | 1 | 17188 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import errno
import glob
import ntpath
import os
import subprocess
import sys
import tempfile
from .copytree import containing_repo_type
from .envfuncs import Env, add_path_entry
from .fetcher import get_fbsource_repo_data
from .manifest import ContextGenerator
from .platform import HostType, is_windows
try:
import typing # noqa: F401
except ImportError:
pass
def detect_project(path):
repo_type, repo_root = containing_repo_type(path)
if repo_type is None:
return None, None
# Look for a .projectid file. If it exists, read the project name from it.
project_id_path = os.path.join(repo_root, ".projectid")
try:
with open(project_id_path, "r") as f:
project_name = f.read().strip()
return repo_root, project_name
except EnvironmentError as ex:
if ex.errno != errno.ENOENT:
raise
return repo_root, None
class BuildOptions(object):
def __init__(
self,
fbcode_builder_dir,
scratch_dir,
host_type,
install_dir=None,
num_jobs=0,
use_shipit=False,
vcvars_path=None,
allow_system_packages=False,
):
""" fbcode_builder_dir - the path to either the in-fbsource fbcode_builder dir,
or for shipit-transformed repos, the build dir that
has been mapped into that dir.
scratch_dir - a place where we can store repos and build bits.
This path should be stable across runs and ideally
should not be in the repo of the project being built,
but that is ultimately where we generally fall back
for builds outside of FB
install_dir - where the project will ultimately be installed
num_jobs - the level of concurrency to use while building
use_shipit - use real shipit instead of the simple shipit transformer
vcvars_path - Path to external VS toolchain's vsvarsall.bat
"""
if not num_jobs:
import multiprocessing
num_jobs = multiprocessing.cpu_count()
if is_windows():
# On Windows the cpu count tends to be the HT count.
# Running with that level of concurrency tends to
# swamp the system and make hard to perform other
# light work. Let's halve the number of cores here
# to win that back. The user can still specify a
# larger number if desired.
num_jobs = int(num_jobs / 2)
if not install_dir:
install_dir = os.path.join(scratch_dir, "installed")
self.project_hashes = None
for p in ["../deps/github_hashes", "../project_hashes"]:
hashes = os.path.join(fbcode_builder_dir, p)
if os.path.exists(hashes):
self.project_hashes = hashes
break
# Detect what repository and project we are being run from.
self.repo_root, self.repo_project = detect_project(os.getcwd())
# If we are running from an fbsource repository, set self.fbsource_dir
# to allow the ShipIt-based fetchers to use it.
if self.repo_project == "fbsource":
self.fbsource_dir = self.repo_root
else:
self.fbsource_dir = None
self.num_jobs = num_jobs
self.scratch_dir = scratch_dir
self.install_dir = install_dir
self.fbcode_builder_dir = fbcode_builder_dir
self.host_type = host_type
self.use_shipit = use_shipit
self.allow_system_packages = allow_system_packages
if vcvars_path is None and is_windows():
# On Windows, the compiler is not available in the PATH by
# default so we need to run the vcvarsall script to populate the
# environment. We use a glob to find some version of this script
# as deployed with Visual Studio 2017. This logic can also
# locate Visual Studio 2019 but note that at the time of writing
# the version of boost in our manifest cannot be built with
# VS 2019, so we're effectively tied to VS 2017 until we upgrade
# the boost dependency.
vcvarsall = []
for year in ["2017", "2019"]:
vcvarsall += glob.glob(
os.path.join(
os.environ["ProgramFiles(x86)"],
"Microsoft Visual Studio",
year,
"*",
"VC",
"Auxiliary",
"Build",
"vcvarsall.bat",
)
)
vcvars_path = vcvarsall[0]
self.vcvars_path = vcvars_path
@property
def manifests_dir(self):
return os.path.join(self.fbcode_builder_dir, "manifests")
def is_darwin(self):
return self.host_type.is_darwin()
def is_windows(self):
return self.host_type.is_windows()
def get_vcvars_path(self):
return self.vcvars_path
def is_linux(self):
return self.host_type.is_linux()
def get_context_generator(self, host_tuple=None, facebook_internal=None):
""" Create a manifest ContextGenerator for the specified target platform. """
if host_tuple is None:
host_type = self.host_type
elif isinstance(host_tuple, HostType):
host_type = host_tuple
else:
host_type = HostType.from_tuple_string(host_tuple)
# facebook_internal is an Optional[bool]
# If it is None, default to assuming this is a Facebook-internal build if
# we are running in an fbsource repository.
if facebook_internal is None:
facebook_internal = self.fbsource_dir is not None
return ContextGenerator(
{
"os": host_type.ostype,
"distro": host_type.distro,
"distro_vers": host_type.distrovers,
"fb": "on" if facebook_internal else "off",
"test": "off",
}
)
def compute_env_for_install_dirs(self, install_dirs, env=None, manifest=None):
if env is not None:
env = env.copy()
else:
env = Env()
env["GETDEPS_BUILD_DIR"] = os.path.join(self.scratch_dir, "build")
env["GETDEPS_INSTALL_DIR"] = self.install_dir
# On macOS we need to set `SDKROOT` when we use clang for system
# header files.
if self.is_darwin() and "SDKROOT" not in env:
sdkroot = subprocess.check_output(["xcrun", "--show-sdk-path"])
env["SDKROOT"] = sdkroot.decode().strip()
if self.fbsource_dir:
env["YARN_YARN_OFFLINE_MIRROR"] = os.path.join(
self.fbsource_dir, "xplat/third-party/yarn/offline-mirror"
)
yarn_exe = "yarn.bat" if self.is_windows() else "yarn"
env["YARN_PATH"] = os.path.join(
self.fbsource_dir, "xplat/third-party/yarn/", yarn_exe
)
node_exe = "node-win-x64.exe" if self.is_windows() else "node"
env["NODE_BIN"] = os.path.join(
self.fbsource_dir, "xplat/third-party/node/bin/", node_exe
)
env["RUST_VENDORED_CRATES_DIR"] = os.path.join(
self.fbsource_dir, "third-party/rust/vendor"
)
hash_data = get_fbsource_repo_data(self)
env["FBSOURCE_HASH"] = hash_data.hash
env["FBSOURCE_DATE"] = hash_data.date
lib_path = None
if self.is_darwin():
lib_path = "DYLD_LIBRARY_PATH"
elif self.is_linux():
lib_path = "LD_LIBRARY_PATH"
elif self.is_windows():
lib_path = "PATH"
else:
lib_path = None
for d in install_dirs:
bindir = os.path.join(d, "bin")
if not (
manifest and manifest.get("build", "disable_env_override_pkgconfig")
):
pkgconfig = os.path.join(d, "lib/pkgconfig")
if os.path.exists(pkgconfig):
add_path_entry(env, "PKG_CONFIG_PATH", pkgconfig)
pkgconfig = os.path.join(d, "lib64/pkgconfig")
if os.path.exists(pkgconfig):
add_path_entry(env, "PKG_CONFIG_PATH", pkgconfig)
if not (manifest and manifest.get("build", "disable_env_override_path")):
add_path_entry(env, "CMAKE_PREFIX_PATH", d)
# Allow resolving shared objects built earlier (eg: zstd
# doesn't include the full path to the dylib in its linkage
# so we need to give it an assist)
if lib_path:
for lib in ["lib", "lib64"]:
libdir = os.path.join(d, lib)
if os.path.exists(libdir):
add_path_entry(env, lib_path, libdir)
# Allow resolving binaries (eg: cmake, ninja) and dlls
# built by earlier steps
if os.path.exists(bindir):
add_path_entry(env, "PATH", bindir, append=False)
# If rustc is present in the `bin` directory, set RUSTC to prevent
# cargo uses the rustc installed in the system.
if self.is_windows():
cargo_path = os.path.join(bindir, "cargo.exe")
rustc_path = os.path.join(bindir, "rustc.exe")
rustdoc_path = os.path.join(bindir, "rustdoc.exe")
else:
cargo_path = os.path.join(bindir, "cargo")
rustc_path = os.path.join(bindir, "rustc")
rustdoc_path = os.path.join(bindir, "rustdoc")
if os.path.isfile(rustc_path):
env["CARGO_BIN"] = cargo_path
env["RUSTC"] = rustc_path
env["RUSTDOC"] = rustdoc_path
if self.is_windows():
libcrypto = os.path.join(d, "lib/libcrypto.lib")
else:
libcrypto = os.path.join(d, "lib/libcrypto.so")
openssl_include = os.path.join(d, "include/openssl")
if os.path.isfile(libcrypto) and os.path.isdir(openssl_include):
# This must be the openssl library, let Rust know about it
env["OPENSSL_DIR"] = d
return env
def list_win32_subst_letters():
output = subprocess.check_output(["subst"]).decode("utf-8")
# The output is a set of lines like: `F:\: => C:\open\some\where`
lines = output.strip().split("\r\n")
mapping = {}
for line in lines:
fields = line.split(": => ")
if len(fields) != 2:
continue
letter = fields[0]
path = fields[1]
mapping[letter] = path
return mapping
def find_existing_win32_subst_for_path(
path, # type: str
subst_mapping, # type: typing.Mapping[str, str]
):
# type: (...) -> typing.Optional[str]
path = ntpath.normcase(ntpath.normpath(path))
for letter, target in subst_mapping.items():
if ntpath.normcase(target) == path:
return letter
return None
def find_unused_drive_letter():
import ctypes
buffer_len = 256
blen = ctypes.c_uint(buffer_len)
rv = ctypes.c_uint()
bufs = ctypes.create_string_buffer(buffer_len)
rv = ctypes.windll.kernel32.GetLogicalDriveStringsA(blen, bufs)
if rv > buffer_len:
raise Exception("GetLogicalDriveStringsA result too large for buffer")
nul = "\x00".encode("ascii")
used = [drive.decode("ascii")[0] for drive in bufs.raw.strip(nul).split(nul)]
possible = [c for c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"]
available = sorted(list(set(possible) - set(used)))
if len(available) == 0:
return None
# Prefer to assign later letters rather than earlier letters
return available[-1]
def create_subst_path(path):
for _attempt in range(0, 24):
drive = find_existing_win32_subst_for_path(
path, subst_mapping=list_win32_subst_letters()
)
if drive:
return drive
available = find_unused_drive_letter()
if available is None:
raise Exception(
(
"unable to make shorter subst mapping for %s; "
"no available drive letters"
)
% path
)
# Try to set up a subst mapping; note that we may be racing with
# other processes on the same host, so this may not succeed.
try:
subprocess.check_call(["subst", "%s:" % available, path])
return "%s:\\" % available
except Exception:
print("Failed to map %s -> %s" % (available, path))
raise Exception("failed to set up a subst path for %s" % path)
def _check_host_type(args, host_type):
if host_type is None:
host_tuple_string = getattr(args, "host_type", None)
if host_tuple_string:
host_type = HostType.from_tuple_string(host_tuple_string)
else:
host_type = HostType()
assert isinstance(host_type, HostType)
return host_type
def setup_build_options(args, host_type=None):
""" Create a BuildOptions object based on the arguments """
fbcode_builder_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
scratch_dir = args.scratch_path
if not scratch_dir:
# TODO: `mkscratch` doesn't currently know how best to place things on
# sandcastle, so whip up something reasonable-ish
if "SANDCASTLE" in os.environ:
if "DISK_TEMP" not in os.environ:
raise Exception(
(
"I need DISK_TEMP to be set in the sandcastle environment "
"so that I can store build products somewhere sane"
)
)
scratch_dir = os.path.join(
os.environ["DISK_TEMP"], "fbcode_builder_getdeps"
)
if not scratch_dir:
try:
scratch_dir = (
subprocess.check_output(
["mkscratch", "path", "--subdir", "fbcode_builder_getdeps"]
)
.strip()
.decode("utf-8")
)
except OSError as exc:
if exc.errno != errno.ENOENT:
# A legit failure; don't fall back, surface the error
raise
# This system doesn't have mkscratch so we fall back to
# something local.
munged = fbcode_builder_dir.replace("Z", "zZ")
for s in ["/", "\\", ":"]:
munged = munged.replace(s, "Z")
if is_windows() and os.path.isdir("c:/open"):
temp = "c:/open/scratch"
else:
temp = tempfile.gettempdir()
scratch_dir = os.path.join(temp, "fbcode_builder_getdeps-%s" % munged)
if not is_windows() and os.geteuid() == 0:
# Running as root; in the case where someone runs
# sudo getdeps.py install-system-deps
# and then runs as build without privs, we want to avoid creating
# a scratch dir that the second stage cannot write to.
# So we generate a different path if we are root.
scratch_dir += "-root"
if not os.path.exists(scratch_dir):
os.makedirs(scratch_dir)
if is_windows():
subst = create_subst_path(scratch_dir)
print(
"Mapping scratch dir %s -> %s" % (scratch_dir, subst), file=sys.stderr
)
scratch_dir = subst
else:
if not os.path.exists(scratch_dir):
os.makedirs(scratch_dir)
# Make sure we normalize the scratch path. This path is used as part of the hash
# computation for detecting if projects have been updated, so we need to always
# use the exact same string to refer to a given directory.
# But! realpath in some combinations of Windows/Python3 versions can expand the
# drive substitutions on Windows, so avoid that!
if not is_windows():
scratch_dir = os.path.realpath(scratch_dir)
host_type = _check_host_type(args, host_type)
return BuildOptions(
fbcode_builder_dir,
scratch_dir,
host_type,
install_dir=args.install_prefix,
num_jobs=args.num_jobs,
use_shipit=args.use_shipit,
vcvars_path=args.vcvars_path,
allow_system_packages=args.allow_system_packages,
)
| apache-2.0 | -3,480,103,009,987,581,000 | 36.610503 | 87 | 0.556726 | false | 4.038534 | true | false | false |
shirkey/macaroons-kopdar | create_the_token.py | 1 | 1563 | #!/usr/bin/env python
# encoding: utf-8
# START 1 OMIT
import macaroons
# a basic macaroon consists of three elements
# 1) the secret key known only to the credential authority (a web service or software)
secret = 'kopdar_python_rocks' # // HL
# 2) some interesting metadata about this macaroon (can be anything)
public = 'kopdar_members_only' # // HL
# 3) a URI/URL, possibly referencing a targeted web service (again, can be anything)
location = 'http://www.python.or.id/' # // HL
# END 1 OMIT
def get_macaroon():
servis_kopdar = macaroons.create(location, secret, public)
return servis_kopdar.serialize()
def get_secret():
return secret
if __name__ == "__main__":
# START 2 OMIT
# with these three arguments, we can now create the macaroon
servis_kopdar = macaroons.create(location, secret, public) # // HL
# we now hold a reference to our newly instantiated macaroon object
print(servis_kopdar)
# we can inspect the HMAC signature of this message
print('.signature: %s' % servis_kopdar.signature) # // HL
# or the other public metadata, like identifier or location
print('.identifier: %s' % servis_kopdar.identifier) # // HL
print('.location: %s' % servis_kopdar.location) # // HL
# or all the metadata + signature in a single call
print('.inspect():')
print servis_kopdar.inspect() # // HL
# finally, we can convert the macaroon object to a serialized form for transport
print '.serialize(): %s' % servis_kopdar.serialize() # // HL
# END 2 OMIT
get_macaroon()
| mit | -5,930,907,426,016,740,000 | 29.057692 | 86 | 0.678823 | false | 3.283613 | false | false | false |
husk00/pantaliQa | libs/pyata/src/basic_classes/connection.py | 1 | 3674 | ##########################################################
##########################################################
# description: abstract class that represents any Connection between boxes
#
# autor: jeraman
# date: 15/04/2010
##########################################################
##########################################################
from box import *
from time import *
memory_connections = []
#connects two generic boxes
def connect (b1, outlet, b2, inlet):
c = Connection(b1, outlet, b2, inlet)
return c.status
#disconnect a connection
def disconnect(b1, outlet, b2, inlet):
#procura a conexao
i = search_connection(b1, outlet, b2, inlet)
#se realmente existir
if i>-1:
return memory_connections[i].delete()
else:
return False
#searchs a generic connection
def search_connection (b1, outlet, b2, inlet):
i=0
#seraching for a specific box in memory
for c in memory_connections:
if (b1==c.box_orig) & (outlet==c.outlet) & (b2==c.box_dest) & (inlet==c.inlet):
return i
i+=1
#return -1 if not
if i==len(memory_connections):
return -1
class Connection:
canvas = " "
snd = ""
#constructor
def __init__(self, box_orig, outlet, box_dest, inlet):
self.box_orig = box_orig
self.outlet = outlet
self.box_dest = box_dest
self.inlet = inlet
self.status = self.create()
#creates a connection in Pd
def create(self):
b1 = search_box(self.box_orig)
b2 = search_box(self.box_dest)
if (b1 > -1) & (b2 > -1):
#get the state before inserting the connection
Connection.snd.save_state(Connection.canvas)
t1 = self.snd.get_file()
#try to build the connection
command = Connection.canvas + "connect " + str(b1) + " " + str(self.outlet) + " " + str(b2) + " " + str(self.inlet) + " ; "
Connection.snd.send_pd(command)
#get the state after insertin the connection
Connection.snd.save_state(Connection.canvas)
t2 = self.snd.get_file()
#verifies if changed
if t1 != t2:
memory_connections.append(self)
return True
else:
return False
#creates a connection in Pd
def delete(self):
b1 = search_box(self.box_orig)
b2 = search_box(self.box_dest)
if (b1 > -1) & (b2 > -1):
#get the state before removing the connection
Connection.snd.save_state(Connection.canvas)
t1 = self.snd.get_file()
#try to remove the connection
command = Connection.canvas + "disconnect " + str(b1) + " " + str(self.outlet) + " " + str(b2) + " " + str(self.inlet) + " ; "
Connection.snd.send_pd(command)
#get the state after removing the connection
Connection.snd.save_state(Connection.canvas)
t2 = self.snd.get_file()
#verifies if changed
if t1 != t2:
i=search_connection(self.box_orig, self.outlet, self.box_dest, self.inlet)
memory_connections.pop(i)
return True
else:
return False
#method that sets the canvas
@staticmethod
def set_canvas(nc):
Connection.canvas = nc
#method that sets the sender
@staticmethod
def set_sender(s):
Connection.snd = s
| gpl-2.0 | 8,921,326,554,972,775,000 | 27.929134 | 138 | 0.504899 | false | 3.87962 | false | false | false |
virtualopensystems/nova | nova/network/model.py | 1 | 15726 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import eventlet
import netaddr
import six
from nova import exception
from nova.i18n import _
from nova.openstack.common import jsonutils
def ensure_string_keys(d):
# http://bugs.python.org/issue4978
return dict([(str(k), v) for k, v in d.iteritems()])
# Constants for the 'vif_type' field in VIF class
VIF_TYPE_OVS = 'ovs'
VIF_TYPE_IVS = 'ivs'
VIF_TYPE_DVS = 'dvs'
VIF_TYPE_IOVISOR = 'iovisor'
VIF_TYPE_BRIDGE = 'bridge'
VIF_TYPE_802_QBG = '802.1qbg'
VIF_TYPE_802_QBH = '802.1qbh'
VIF_TYPE_MLNX_DIRECT = 'mlnx_direct'
VIF_TYPE_MIDONET = 'midonet'
VIF_TYPE_VHOSTUSER = 'vhostuser'
VIF_TYPE_OTHER = 'other'
# Constants for dictionary keys in the 'vif_details' field in the VIF
# class
VIF_DETAIL_PORT_FILTER = 'port_filter'
VIF_DETAIL_OVS_HYBRID_PLUG = 'ovs_hybrid_plug'
VIF_DETAILS_PHYSICAL_NETWORK = 'physical_network'
# Constants for the 'vif_model' values
VIF_MODEL_VIRTIO = 'virtio'
VIF_MODEL_NE2K_PCI = 'ne2k_pci'
VIF_MODEL_PCNET = 'pcnet'
VIF_MODEL_RTL8139 = 'rtl8139'
VIF_MODEL_E1000 = 'e1000'
VIF_MODEL_E1000E = 'e1000e'
VIF_MODEL_NETFRONT = 'netfront'
VIF_MODEL_SPAPR_VLAN = 'spapr-vlan'
# Constant for max length of network interface names
# eg 'bridge' in the Network class or 'devname' in
# the VIF class
NIC_NAME_LEN = 14
class Model(dict):
"""Defines some necessary structures for most of the network models."""
def __repr__(self):
return self.__class__.__name__ + '(' + dict.__repr__(self) + ')'
def _set_meta(self, kwargs):
# pull meta out of kwargs if it's there
self['meta'] = kwargs.pop('meta', {})
# update meta with any additional kwargs that may exist
self['meta'].update(kwargs)
def get_meta(self, key, default=None):
"""calls get(key, default) on self['meta']."""
return self['meta'].get(key, default)
class IP(Model):
"""Represents an IP address in Nova."""
def __init__(self, address=None, type=None, **kwargs):
super(IP, self).__init__()
self['address'] = address
self['type'] = type
self['version'] = kwargs.pop('version', None)
self._set_meta(kwargs)
# determine version from address if not passed in
if self['address'] and not self['version']:
try:
self['version'] = netaddr.IPAddress(self['address']).version
except netaddr.AddrFormatError:
msg = _("Invalid IP format %s") % self['address']
raise exception.InvalidIpAddressError(msg)
def __eq__(self, other):
keys = ['address', 'type', 'version']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
def is_in_subnet(self, subnet):
if self['address'] and subnet['cidr']:
return (netaddr.IPAddress(self['address']) in
netaddr.IPNetwork(subnet['cidr']))
else:
return False
@classmethod
def hydrate(cls, ip):
if ip:
return cls(**ensure_string_keys(ip))
return None
class FixedIP(IP):
"""Represents a Fixed IP address in Nova."""
def __init__(self, floating_ips=None, **kwargs):
super(FixedIP, self).__init__(**kwargs)
self['floating_ips'] = floating_ips or []
if not self['type']:
self['type'] = 'fixed'
def add_floating_ip(self, floating_ip):
if floating_ip not in self['floating_ips']:
self['floating_ips'].append(floating_ip)
def floating_ip_addresses(self):
return [ip['address'] for ip in self['floating_ips']]
@staticmethod
def hydrate(fixed_ip):
fixed_ip = FixedIP(**ensure_string_keys(fixed_ip))
fixed_ip['floating_ips'] = [IP.hydrate(floating_ip)
for floating_ip in fixed_ip['floating_ips']]
return fixed_ip
def __eq__(self, other):
keys = ['address', 'type', 'version', 'floating_ips']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
class Route(Model):
"""Represents an IP Route in Nova."""
def __init__(self, cidr=None, gateway=None, interface=None, **kwargs):
super(Route, self).__init__()
self['cidr'] = cidr
self['gateway'] = gateway
self['interface'] = interface
self._set_meta(kwargs)
@classmethod
def hydrate(cls, route):
route = cls(**ensure_string_keys(route))
route['gateway'] = IP.hydrate(route['gateway'])
return route
class Subnet(Model):
"""Represents a Subnet in Nova."""
def __init__(self, cidr=None, dns=None, gateway=None, ips=None,
routes=None, **kwargs):
super(Subnet, self).__init__()
self['cidr'] = cidr
self['dns'] = dns or []
self['gateway'] = gateway
self['ips'] = ips or []
self['routes'] = routes or []
self['version'] = kwargs.pop('version', None)
self._set_meta(kwargs)
if self['cidr'] and not self['version']:
self['version'] = netaddr.IPNetwork(self['cidr']).version
def __eq__(self, other):
keys = ['cidr', 'dns', 'gateway', 'ips', 'routes', 'version']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
def add_route(self, new_route):
if new_route not in self['routes']:
self['routes'].append(new_route)
def add_dns(self, dns):
if dns not in self['dns']:
self['dns'].append(dns)
def add_ip(self, ip):
if ip not in self['ips']:
self['ips'].append(ip)
def as_netaddr(self):
"""Convience function to get cidr as a netaddr object."""
return netaddr.IPNetwork(self['cidr'])
@classmethod
def hydrate(cls, subnet):
subnet = cls(**ensure_string_keys(subnet))
subnet['dns'] = [IP.hydrate(dns) for dns in subnet['dns']]
subnet['ips'] = [FixedIP.hydrate(ip) for ip in subnet['ips']]
subnet['routes'] = [Route.hydrate(route) for route in subnet['routes']]
subnet['gateway'] = IP.hydrate(subnet['gateway'])
return subnet
class Network(Model):
"""Represents a Network in Nova."""
def __init__(self, id=None, bridge=None, label=None,
subnets=None, **kwargs):
super(Network, self).__init__()
self['id'] = id
self['bridge'] = bridge
self['label'] = label
self['subnets'] = subnets or []
self._set_meta(kwargs)
def add_subnet(self, subnet):
if subnet not in self['subnets']:
self['subnets'].append(subnet)
@classmethod
def hydrate(cls, network):
if network:
network = cls(**ensure_string_keys(network))
network['subnets'] = [Subnet.hydrate(subnet)
for subnet in network['subnets']]
return network
def __eq__(self, other):
keys = ['id', 'bridge', 'label', 'subnets']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
class VIF8021QbgParams(Model):
"""Represents the parameters for a 802.1qbg VIF."""
def __init__(self, managerid, typeid, typeidversion, instanceid):
self['managerid'] = managerid
self['typeid'] = typeid
self['typeidversion'] = typeidversion
self['instanceid'] = instanceid
class VIF8021QbhParams(Model):
"""Represents the parameters for a 802.1qbh VIF."""
def __init__(self, profileid):
self['profileid'] = profileid
class VIF(Model):
"""Represents a Virtual Interface in Nova."""
def __init__(self, id=None, address=None, network=None, type=None,
details=None, devname=None, ovs_interfaceid=None,
qbh_params=None, qbg_params=None, active=False,
vhostuser_mode=None, vhostuser_path=None,
**kwargs):
super(VIF, self).__init__()
self['id'] = id
self['address'] = address
self['network'] = network or None
self['type'] = type
self['details'] = details or {}
self['devname'] = devname
self['ovs_interfaceid'] = ovs_interfaceid
self['qbh_params'] = qbh_params
self['qbg_params'] = qbg_params
self['active'] = active
self['vhostuser_path'] = vhostuser_path
self['vhostuser_mode'] = vhostuser_mode
self._set_meta(kwargs)
def __eq__(self, other):
keys = ['id', 'address', 'network', 'type', 'details', 'devname',
'ovs_interfaceid', 'qbh_params', 'qbg_params',
'active', 'vhostuser_path', 'vhostuser_mode']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
def fixed_ips(self):
return [fixed_ip for subnet in self['network']['subnets']
for fixed_ip in subnet['ips']]
def floating_ips(self):
return [floating_ip for fixed_ip in self.fixed_ips()
for floating_ip in fixed_ip['floating_ips']]
def labeled_ips(self):
"""Returns the list of all IPs
The return value looks like this flat structure::
{'network_label': 'my_network',
'network_id': 'n8v29837fn234782f08fjxk3ofhb84',
'ips': [{'address': '123.123.123.123',
'version': 4,
'type: 'fixed',
'meta': {...}},
{'address': '124.124.124.124',
'version': 4,
'type': 'floating',
'meta': {...}},
{'address': 'fe80::4',
'version': 6,
'type': 'fixed',
'meta': {...}}]
"""
if self['network']:
# remove unnecessary fields on fixed_ips
ips = [IP(**ensure_string_keys(ip)) for ip in self.fixed_ips()]
for ip in ips:
# remove floating ips from IP, since this is a flat structure
# of all IPs
del ip['meta']['floating_ips']
# add floating ips to list (if any)
ips.extend(self.floating_ips())
return {'network_label': self['network']['label'],
'network_id': self['network']['id'],
'ips': ips}
return []
def is_hybrid_plug_enabled(self):
return self['details'].get(VIF_DETAIL_OVS_HYBRID_PLUG, False)
def is_neutron_filtering_enabled(self):
return self['details'].get(VIF_DETAIL_PORT_FILTER, False)
def get_physical_network(self):
phy_network = self['network']['meta'].get('physical_network')
if not phy_network:
phy_network = self['details'].get(VIF_DETAILS_PHYSICAL_NETWORK)
return phy_network
@classmethod
def hydrate(cls, vif):
vif = cls(**ensure_string_keys(vif))
vif['network'] = Network.hydrate(vif['network'])
return vif
def get_netmask(ip, subnet):
"""Returns the netmask appropriate for injection into a guest."""
if ip['version'] == 4:
return str(subnet.as_netaddr().netmask)
return subnet.as_netaddr()._prefixlen
class NetworkInfo(list):
"""Stores and manipulates network information for a Nova instance."""
# NetworkInfo is a list of VIFs
def fixed_ips(self):
"""Returns all fixed_ips without floating_ips attached."""
return [ip for vif in self for ip in vif.fixed_ips()]
def floating_ips(self):
"""Returns all floating_ips."""
return [ip for vif in self for ip in vif.floating_ips()]
@classmethod
def hydrate(cls, network_info):
if isinstance(network_info, six.string_types):
network_info = jsonutils.loads(network_info)
return cls([VIF.hydrate(vif) for vif in network_info])
def json(self):
return jsonutils.dumps(self)
def wait(self, do_raise=True):
"""A no-op method.
This is useful to avoid type checking when NetworkInfo might be
subclassed with NetworkInfoAsyncWrapper.
"""
pass
class NetworkInfoAsyncWrapper(NetworkInfo):
"""Wrapper around NetworkInfo that allows retrieving NetworkInfo
in an async manner.
This allows one to start querying for network information before
you know you will need it. If you have a long-running
operation, this allows the network model retrieval to occur in the
background. When you need the data, it will ensure the async
operation has completed.
As an example:
def allocate_net_info(arg1, arg2)
return call_neutron_to_allocate(arg1, arg2)
network_info = NetworkInfoAsyncWrapper(allocate_net_info, arg1, arg2)
[do a long running operation -- real network_info will be retrieved
in the background]
[do something with network_info]
"""
def __init__(self, async_method, *args, **kwargs):
self._gt = eventlet.spawn(async_method, *args, **kwargs)
methods = ['json', 'fixed_ips', 'floating_ips']
for method in methods:
fn = getattr(self, method)
wrapper = functools.partial(self._sync_wrapper, fn)
functools.update_wrapper(wrapper, fn)
setattr(self, method, wrapper)
def _sync_wrapper(self, wrapped, *args, **kwargs):
"""Synchronize the model before running a method."""
self.wait()
return wrapped(*args, **kwargs)
def __getitem__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__getitem__
return self._sync_wrapper(fn, *args, **kwargs)
def __iter__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__iter__
return self._sync_wrapper(fn, *args, **kwargs)
def __len__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__len__
return self._sync_wrapper(fn, *args, **kwargs)
def __str__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__str__
return self._sync_wrapper(fn, *args, **kwargs)
def __repr__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__repr__
return self._sync_wrapper(fn, *args, **kwargs)
def wait(self, do_raise=True):
"""Wait for async call to finish."""
if self._gt is not None:
try:
# NOTE(comstud): This looks funky, but this object is
# subclassed from list. In other words, 'self' is really
# just a list with a bunch of extra methods. So this
# line just replaces the current list (which should be
# empty) with the result.
self[:] = self._gt.wait()
except Exception:
if do_raise:
raise
finally:
self._gt = None
| apache-2.0 | -2,359,084,919,034,627,000 | 32.177215 | 79 | 0.581012 | false | 3.830005 | false | false | false |
atria-soft/zeus | lutinMacro_zeus.py | 1 | 40436 | #!/usr/bin/python
import realog.debug as debug
import lutin.tools as tools
import os
import copy
list_of_known_type = [
["void", "void"],
["bool", "bool"],
["string", "etk::String"],
["uri", "etk::Uri"],
["path", "etk::Path"],
["int8", "int8_t"],
["int16", "int16_t"],
["int32", "int32_t"],
["int64", "int64_t"],
["uint8", "uint8_t"],
["uint16", "uint16_t"],
["uint32", "uint32_t"],
["uint64", "uint64_t"],
["float32", "float"],
["float64", "double"],
["vector:bool", "etk::Vector<bool>"],
["vector:string", "etk::Vector<etk::String>"],
["vector:int8", "etk::Vector<int8_t>"],
["vector:int16", "etk::Vector<int16_t>"],
["vector:int32", "etk::Vector<int32_t>"],
["vector:int64", "etk::Vector<int64_t>"],
["vector:uint8", "etk::Vector<uint8_t>"],
["vector:uint16", "etk::Vector<uint16_t>"],
["vector:uint32", "etk::Vector<uint32_t>"],
["vector:uint64", "etk::Vector<uint64_t>"],
["vector:float32", "etk::Vector<float>"],
["vector:float64", "etk::Vector<double>"],
["duration", "echrono::Duration"],
["time", "echrono::Time"],
["file", "zeus::File"],
["stream", "zeus::Stream"],
["json", "ejson::Object"],
["raw", "zeus::Raw"],
["ActionNotif", "zeus::ActionNotification<etk::String>"],
]
def get_list_type():
out = []
for elem in list_of_known_type:
out.append(elem[0])
return out
def validate_type(data):
if data in get_list_type():
return True
val = data.split(":")
if val[0] == "obj":
return True
return False
def zeus_object_to_dictionary(name):
out = {}
if type(name) == str:
name = name.split("-")
debug.debug("transform: " + str(name))
# set first capital of the class name
if len(name) != 0:
name[-1] = capital_first(name[-1])
out["namespace"] = ""
for elem in name[:-1]:
out["namespace"] += elem + "::"
out["name_class"] = out["namespace"] + name[-1]
out["name_class_short"] = name[-1]
out["name_class_proxy"] = out["namespace"] + "Proxy" + name[-1]
out["name_class_proxy_short"] = "Proxy" + name[-1]
out["name_class_register"] = out["namespace"] + "register" + name[-1]
out["name_class_register_short"] = "register" + name[-1]
out["name_class_macro"] = ""
for elem in name:
out["name_class_macro"] += elem.upper() + "_"
base_path = ""
for elem in name[:-1]:
base_path += elem + "/"
out["file_name_class_src"] = base_path + name[-1] + ".cpp";
out["file_name_class_header"] = base_path + name[-1] + ".hpp"
out["file_name_class_proxy_src"] = base_path + "Proxy" + name[-1] + ".cpp";
out["file_name_class_proxy_header"] = base_path + "Proxy" + name[-1] + ".hpp"
out["file_name_class_register_src"] = base_path + "register" + name[-1] + ".cpp";
out["file_name_class_register_header"] = base_path + "register" + name[-1] + ".hpp"
debug.debug(" class name : " + out["name_class"])
debug.debug(" class Proxy name : " + out["name_class_proxy"])
debug.debug(" path class name src : " + out["file_name_class_src"])
debug.debug(" path class name header : " + out["file_name_class_header"])
debug.debug(" path class Proxy name src : " + out["file_name_class_proxy_src"])
debug.debug(" path class Proxy name header : " + out["file_name_class_proxy_header"])
debug.debug(" path class Proxy name src : " + out["file_name_class_register_src"])
debug.debug(" path class Proxy name header : " + out["file_name_class_register_header"])
return out
def convert_type_in_cpp(data, proxy=False, argument=False):
for elem in list_of_known_type:
if data == elem[0]:
return elem[1]
val = data.split(":")
if val[0] == "obj":
prop = zeus_object_to_dictionary(val[1])
if proxy == True:
if argument == False:
return prop["name_class_proxy"]
else:
return "ememory::SharedPtr<" + prop["name_class"] + ">"
else:
if argument == True:
return prop["name_class_proxy"]
else:
return "ememory::SharedPtr<" + prop["name_class"] + ">"
debug.error(" can not find type in IDL : '" + data + "'")
def remove_start_stop_spacer(data):
dataout = copy.deepcopy(data)
while len(dataout) >= 1 \
and ( dataout[0] == " " \
or dataout[0] == "\t"):
dataout = dataout[1:]
while len(dataout) >= 1 \
and ( dataout[-1] == " " \
or dataout[-1] == "\t"):
dataout = dataout[:-1]
return dataout
def capital_first(data):
return data[0].upper() + data[1:]
class AttributeDefinition:
def __init__(self):
self.name = "";
self.brief = "";
self.type = "";
def set_name(self, name):
self.name = remove_start_stop_spacer(name);
def set_brief(self, desc):
self.name = "";
self.brief = remove_start_stop_spacer(desc).replace("\"", "\\\"")
self.type = "";
def set_type(self, type):
self.type = remove_start_stop_spacer(type);
def display(self):
debug.info(" BRIEF: " + self.brief)
debug.info(" " + self.type + " " + self.name + ";")
def generate_cpp(self, space):
out = "";
out += space + "eproperty::Value<" + convert_type_in_cpp(self.type) + "> " + self.name + "; //!<" + self.brief + "\n"
out += space + "//! Internal interface to call property\n"
out += space + "virtual " + convert_type_in_cpp(self.type) + " _internalWrapperProperty_get" + capital_first(self.name) + "() {\n"
out += space + " return " + self.name + ".get();\n"
out += space + "}\n"
out += space + "//! Internal interface to call property\n"
out += space + "virtual void _internalWrapperProperty_set" + capital_first(self.name) + "(" + convert_type_in_cpp(self.type) + " _value) {\n"
out += space + " " + self.name + ".set(_value);\n"
out += space + "}\n"
return out;
def generate_hpp_proxy(self, space):
out = "";
out += space + "zeus::RemoteProperty<" + convert_type_in_cpp(self.type) + "> " + self.name + "; //!<" + self.brief + "\n"
return out;
def generate_cpp_proxy(self, space, class_name):
out = "";
return out;
class FunctionDefinition:
def __init__(self):
self.name = ""
self.brief = ""
self.action_type = "void_tmp"
self.return_type = ""
self.return_brief = ""
self.parameters = []
self.is_action = False
def set_action(self, type):
self.action_type = remove_start_stop_spacer(type)
self.is_action = True
def set_function_name(self, name):
self.name = remove_start_stop_spacer(name);
def set_brief(self, desc):
self.name = "";
self.brief = remove_start_stop_spacer(desc).replace("\"", "\\\"")
self.return_type = "";
self.return_brief = "";
self.parameters = []
def add_param_comment(self, name, desc):
for elem in self.parameters:
if elem["name"] == "" \
and elem["brief"] == "":
elem["name"] = remove_start_stop_spacer(name)
elem["brief"] = remove_start_stop_spacer(desc).replace("\"", "\\\"")
return;
self.parameters.append({
"type":"",
"name":remove_start_stop_spacer(name),
"brief":remove_start_stop_spacer(desc).replace("\"", "\\\"")
})
def set_return_comment(self, desc):
self.return_brief = remove_start_stop_spacer(desc)
def set_return_type(self, type):
self.return_type = remove_start_stop_spacer(type)
def add_parameter_type(self, type):
for elem in self.parameters:
if elem["type"] == "":
elem["type"] = remove_start_stop_spacer(type)
return;
self.parameters.append({
"type":remove_start_stop_spacer(type),
"name":"",
"brief":""
})
def display(self):
debug.info(" BRIEF: " + self.brief)
debug.info(" BRIEF-return: " + self.return_brief)
debug.info(" " + self.return_type + " " + self.name + "(")
for elem in self.parameters:
debug.info(" " + elem["type"] + " " + elem["name"] + ", # " + elem["brief"])
if action_type == "void":
debug.info(" )")
else:
debug.info(" ) action/event type = '" + action_type + "'")
def generate_doxy(self, space):
# generate doxygen comment:
out = space + "/**\n"
if self.brief != "":
out += space + " * @brief " + self.brief + "\n"
for elem in self.parameters:
if elem["name"] == "" \
and elem["brief"] == "":
continue
out += space + " * @param[in] "
if elem["name"] != "":
out += elem["name"] + " "
if elem["brief"] != "":
out += elem["brief"] + " "
out += "\n"
if self.is_action == True:
out += space + " * @note: This is an action ==> it can notify of the progression of the call\n"
if self.return_brief != "":
out += space + " * @return " + self.return_brief + "\n"
out += space + " */\n"
return out
def generate_cpp(self, space, class_name="", virtual=True, action=False):
out = "";
out += self.generate_doxy(space)
out += space
if self.return_type != "":
if virtual == True:
out += "virtual "
out += convert_type_in_cpp(self.return_type, False, False) + " "
else:
out += "static ememory::SharedPtr<" + class_name + "> "
out += self.name + "("
param_data = ""
id_parameter = 0
if self.is_action == True:
param_data += "zeus::ActionNotification<" + convert_type_in_cpp(self.action_type, False, True) + ">& _notifs"
id_parameter += 1
for elem in self.parameters:
id_parameter += 1
if len(param_data) != 0:
param_data += ", "
param_data += convert_type_in_cpp(elem["type"], False, True) + " _"
if elem["name"] == "":
param_data += "no_name_param_" + str(id_parameter)
else:
param_data += elem["name"]
out += param_data
out += ")"
if self.return_type != "" \
and virtual == True:
out += " = 0"
out += ";\n"
return out;
def generate_hpp_proxy(self, space):
out = "";
out += self.generate_doxy(space)
out += space + "virtual zeus::Future<" + convert_type_in_cpp(self.return_type, True, False)
if self.action_type != "void_tmp":
out += "," + convert_type_in_cpp(self.action_type, True, False)
out += "> " + self.name + "("
param_data = ""
id_parameter = 0
for elem in self.parameters:
id_parameter += 1
if len(param_data) != 0:
param_data += ", "
param_data += "const " + convert_type_in_cpp(elem["type"], True, True) + "& _"
if elem["name"] == "":
param_data += "no_name_param_" + str(id_parameter)
else:
param_data += elem["name"]
out += param_data
out += ");\n"
return out;
def generate_cpp_proxy(self, space, class_name):
out = "";
out += space + "zeus::Future<" + convert_type_in_cpp(self.return_type, True, False)
if self.action_type != "void_tmp":
out += "," + convert_type_in_cpp(self.action_type, True, False)
out += "> " + class_name + "::" + self.name + "("
param_data = ""
id_parameter = 0
for elem in self.parameters:
id_parameter += 1
if len(param_data) != 0:
param_data += ", "
param_data += "const " + convert_type_in_cpp(elem["type"], True, True) + "& _"
if elem["name"] == "":
param_data += "no_name_param_" + str(id_parameter)
else:
param_data += elem["name"]
out += param_data
out += ") {\n"
space += " "
if self.is_action == True:
out += space + 'return m_obj.callAction("' + self.name + '"'
else:
out += space + 'return m_obj.call("' + self.name + '"'
id_parameter = 0
for elem in self.parameters:
id_parameter += 1
out += ", "
out += "_"
if elem["name"] == "":
out += "no_name_param_" + str(id_parameter)
else:
out += elem["name"]
out += ');\n'
out += "}\n"
space = space[:-1]
return out;
class ServiceDefinition:
def __init__(self):
self.name = [""];
self.name_prop = {}
self.brief = "";
self.version = "";
self.api = "";
self.authors = []
self.attributes = []
self.functions = []
self.factories = []
self.tools = []
self.imports = []
self.licence_header = "/** @file\n"
self.licence_header += " * @note Generated file !!! Do not modify !!!\n"
self.licence_header += " * @license MPL-2\n"
self.licence_header += " * @copyright none\n"
self.licence_header += " */\n"
def set_name(self, value):
self.name = value
# TODO : Check range ...
self.prop = zeus_object_to_dictionary(self.name)
def set_brief(self, value):
self.brief = remove_start_stop_spacer(value).replace("\"", "\\\"")
def set_version(self, value):
self.version = remove_start_stop_spacer(value).replace("\"", "\\\"")
def set_api(self, value):
self.api = remove_start_stop_spacer(value).replace("\"", "\\\"")
def add_author(self, value):
self.authors.append(remove_start_stop_spacer(value).replace("\"", "\\\""))
def add_factory(self, value):
# TODO : Check if function already exist
self.factories.append(value)
def add_tool(self, value):
# TODO : Check if function already exist
self.tools.append(value)
def add_function(self, value):
# TODO : Check if function already exist
self.functions.append(value)
def add_attribute(self, value):
# TODO : Check if attribute already exist
self.attributes.append(value)
def add_import(self, value):
self.imports.append(value)
def display(self):
debug.info("Display service definition : ")
debug.info(" name: " + str(self.name))
debug.info(" brief: '" + str(self.brief) + "'")
debug.info(" version: '" + str(self.version) + "'")
debug.info(" api: '" + str(self.api) + "'")
debug.info(" authors: '" + str(self.authors) + "'")
debug.info(" functions: ")
for elem in self.functions:
elem.display();
##
## CLASS.hpp
##
def generate_header(self):
out = ""
# TODO: add global header:
out += self.licence_header
out += "#pragma once\n"
out += "\n"
out += "#include <etk/types.hpp>\n"
out += "#include <eproperty/Value.hpp>\n"
out += "#include <zeus/Raw.hpp>\n"
out += "#include <etk/uri/uri.hpp>\n"
out += "#include <etk/String.hpp>\n"
out += "#include <etk/Vector.hpp>\n"
out += "#include <ememory/memory.hpp>\n"
out += "#include <zeus/ActionNotification.hpp>\n"
for elem in self.imports:
prop = zeus_object_to_dictionary(elem)
out += "#include <" + prop["file_name_class_header"] + ">\n"
out += "#include <" + prop["file_name_class_proxy_header"] + ">\n"
out += "\n"
space = ""
for elem in self.name[:-1]:
out += space + "namespace " + elem + " {\n"
space += " "
out += space + "class " + self.prop["name_class_proxy_short"] + ";\n"
out += space + " /**\n"
if self.brief != "":
out += space + " * @brief " + self.brief + " \n"
if self.version != "":
out += space + " * version:" + self.version + "\n"
if self.api != "":
out += space + " * api:" + self.api + "\n"
for elem in self.authors:
out += space + " * authors:" + elem + "\n"
out += space + " */\n"
out += space + "class " + self.prop["name_class_short"] + " {\n"
space += " "
out += space + "public:\n"
space += " "
if len(self.factories) == 0:
out += space + "/**\n"
out += space + " * @brief generic factory, pay attention when set arguments...\n"
out += space + " */\n"
out += space + "template<typename ... ZEUS_OBJECT_CREATE>\n"
out += space + "static ememory::SharedPtr<" + self.prop["name_class"] + "> create(ZEUS_OBJECT_CREATE ...);\n"
else:
for elem in self.factories:
out += elem.generate_cpp(space, self.prop["name_class"])
out += space + "/**\n"
out += space + " * @brief Generic virtual destructor\n"
out += space + " */\n"
out += space + "virtual ~" + self.prop["name_class_short"] + "() = default;\n"
for elem in self.attributes:
out += elem.generate_cpp(space)
for elem in self.functions:
out += elem.generate_cpp(space)
space = space[:-2]
out += space + "};\n"
# now we simply add tools provided:
for elem in self.tools:
out += elem.generate_cpp(space, virtual=False)
for elem in self.name[:-1]:
space = space[:-1]
out += space + "}\n"
return [self.prop["file_name_class_header"], out]
##
## CLASS.cpp
##
def generate_source(self):
out = ""
out += self.licence_header
out += "\n"
out += "#include <" + self.prop["file_name_class_register_header"] + ">\n"
out += "#include <" + self.prop["file_name_class_header"] + ">\n"
out += "#include <" + self.prop["file_name_class_proxy_header"] + ">\n"
out += "#include <etk/types.hpp>\n"
out += "#include <zeus/debug.hpp>\n"
out += "#include <zeus/message/Message.hpp>\n"
out += "#include <zeus/message/Data.hpp>\n"
out += "#include <zeus/message/ParamType.hpp>\n"
out += "#include <zeus/message/Parameter.hpp>\n"
out += "#include <zeus/Future.hpp>\n"
out += "#include <etk/stdTools.hpp>\n"
out += "#include <zeus/AbstractFunction.hpp>\n"
out += "#include <climits>\n"
out += "#include <etk/path/fileSystem.hpp>\n"
out += "#include <zeus/WebServer.hpp>\n"
out += "#include <zeus/Object.hpp>\n"
out += "\n"
# now gebnerate the get and set parameter object ...
out += "namespace zeus {\n"
out += " namespace message {\n"
out += " template<> const zeus::message::ParamType& createType<ememory::SharedPtr<" + self.prop["name_class"] + ">>() {\n"
out += " static zeus::message::ParamType type(\"obj:" + self.prop["name_class"] + "\", zeus::message::paramTypeObject, false, false);\n"
out += " return type;\n"
out += " }\n"
out += " \n"
out += " template<> const zeus::message::ParamType& createType<" + self.prop["name_class_proxy"] + ">() {\n"
out += " static zeus::message::ParamType type(\"obj:" + self.prop["name_class"] + "\", zeus::message::paramTypeObject, false, false);\n"
out += " return type;\n"
out += " }\n"
out += " \n"
out += " template<>\n"
out += " void Parameter::addParameter<ememory::SharedPtr<" + self.prop["name_class"] + ">>(uint16_t _paramId, const ememory::SharedPtr<" + self.prop["name_class"] + ">& _value) {\n"
out += " etk::Vector<uint8_t> data;\n"
"""
out += " addType(data, createType<" + class_name + ">());\n"
"""
out += " addTypeObject(data, \"obj:" + self.prop["name_class"] + "\");\n"
out += " int32_t currentOffset = data.size();\n"
out += " int32_t startOffset = data.size();\n"
out += " data.resize(data.size()+4);\n"
out += " uint32_t fullId = 0;\n"
# convert the object in a real System Object ....
out += " if (m_iface != null) {\n"
out += " uint16_t id = m_iface->getAddress();\n"
out += " uint16_t idObj = m_iface->getNewObjectId();\n"
out += " ememory::SharedPtr<zeus::ObjectType<" + self.prop["name_class"] + ">> obj = ememory::makeShared<zeus::ObjectType<" + self.prop["name_class"] + ">>(m_iface, idObj, _value);\n"
out += " " + self.prop["name_class_register"] + "(*obj);\n"
out += " obj->addRemote(getDestination());\n"
out += " m_iface->addWebObj(obj);\n"
out += " ZEUS_DEBUG(\"Create object ID : \" << idObj);\n"
out += " fullId = (uint32_t(id)<<16)+idObj;\n"
out += " }\n"
# return Object ID and interface adress
out += " memcpy(&data[currentOffset], &fullId, 4);\n"
out += " m_parameter.pushBack(etk::makePair(startOffset,data));\n"
out += " }\n"
out += " \n"
out += " template<>\n"
out += " " + self.prop["name_class_proxy"] + " Parameter::getParameter<" + self.prop["name_class_proxy"] + ">(int32_t _id) const {\n"
out += " ememory::SharedPtr<zeus::ObjectRemoteBase> out;\n"
out += " out = zeus::message::Parameter::getParameter<ememory::SharedPtr<zeus::ObjectRemoteBase>>(_id);\n"
out += " return zeus::ObjectRemote(out);\n"
out += " }\n"
out += " }\n"
out += " \n"
out += " template<> " + self.prop["name_class_proxy"] + " futureGetValue<" + self.prop["name_class_proxy"] + ">(ememory::SharedPtr<zeus::Promise>& _promise) {\n"
out += " ememory::SharedPtr<zeus::ObjectRemoteBase> out;\n"
out += " if ( _promise == null\n"
out += " || _promise->getRaw() == null) {\n"
out += " return zeus::ObjectRemote(out);\n"
out += " }\n"
out += " if (_promise->getRaw()->getType() != zeus::message::type::answer) {\n"
out += " ZEUS_WARNING(\"No Return value ...\");\n"
out += " return zeus::ObjectRemote(out);\n"
out += " }\n"
out += " out = static_cast<zeus::message::Answer*>(_promise->getRaw().get())->getAnswer<ememory::SharedPtr<zeus::ObjectRemoteBase>>();\n"
out += " \n"
out += " return zeus::ObjectRemote(out);\n"
out += " }\n"
out += " \n"
out += "}\n"
return [self.prop["file_name_class_src"], out]
##
## registerClass.hpp
##
def generate_register_header(self):
out = self.licence_header
out += "#pragma once\n"
out += "\n"
out += "#include <etk/types.hpp>\n"
out += "#include <zeus/Object.hpp>\n"
out += "#include <zeus/Client.hpp>\n"
out += "#include <" + self.prop["file_name_class_header"] + ">\n"
out += "#include <etk/String.hpp>\n"
out += "#include <etk/Vector.hpp>\n"
out += "\n"
space = ""
for elem in self.name[:-1]:
out += space + "namespace " + elem + " {\n"
space += " "
out += space + "\n"
out += space + "void " + self.prop["name_class_register_short"] + "(zeus::ObjectType<" + self.prop["name_class"] + ">& _interface);\n"
out += space + "\n"
for elem in self.name[:-1]:
space = space[:-1]
out += space + "}\n"
out += space + "\n"
out += space + "#define " + self.prop["name_class_macro"] + "DECLARE(type) \\\n"
out += space + " ETK_EXPORT_API void SERVICE_IO_instanciate(uint32_t _transactionId, ememory::SharedPtr<zeus::WebServer>& _iface, uint32_t _destination) { \\\n"
out += space + " ememory::SharedPtr<type> tmp; \\\n"
out += space + " tmp = ememory::makeShared<type>(_destination>>16); \\\n"
out += space + " ememory::SharedPtr<" + self.prop["name_class"] + "> tmp2 = tmp; \\\n"
out += space + " _iface->answerValue(_transactionId, uint32_t(_iface->getAddress())<<16, _destination, tmp2); \\\n"
out += space + " }\n"
out += space + "\n"
return [self.prop["file_name_class_register_header"], out]
##
## registerClass.cpp
##
def generate_register_code(self):
out = self.licence_header
for elem in self.imports:
prop = zeus_object_to_dictionary(elem)
out += "#include <" + prop["file_name_class_header"] + ">\n"
out += "#include <" + prop["file_name_class_proxy_header"] + ">\n"
out += "#include <" + self.prop["file_name_class_register_header"] + ">\n"
out += "#include <zeus/debug.hpp>\n"
out += "\n"
space = ""
out += space + "void " + self.prop["name_class_register"] + "(zeus::ObjectType<" + self.prop["name_class"] + ">& _interface) {\n"
space += " "
out += space + 'ZEUS_VERBOSE("===========================================================");\n';
out += space + 'ZEUS_VERBOSE("== Instanciate service: ' + self.prop["name_class"] + '");\n';
out += space + 'ZEUS_VERBOSE("===========================================================");\n';
#out += space + '_serviceInterface.propertyNameService.set("' + self.name[-1].lower() + '");\n'
if self.brief != "":
out += space + '_interface.setDescription("' + self.brief + '");\n';
if self.version != "":
out += space + '_interface.setVersion("' + self.version + '");\n';
if self.api != "":
out += space + '_interface.setType("' + self.api + '");\n';
for elem in self.authors:
out += space + '_interface.addAuthor("' + elem.split("<")[0] + '", "' + elem.split("<")[1].replace(">","") + '");\n';
if len(self.functions) != 0 \
or len(self.attributes) != 0:
out += space + "zeus::AbstractFunction* func = null;\n"
for elem in self.attributes:
out += space + 'func = _interface.advertise("' + elem.name + '.set", &' + self.prop["name_class"] + '::_internalWrapperProperty_set' + capital_first(elem.name) + ');\n'
out += space + 'if (func != null) {\n'
if elem.brief != "":
out += space + ' func->setDescription("Set parameter ' + elem.brief + '");\n'
out += space + '}\n'
out += space + 'func = _interface.advertise("' + elem.name + '.get", &' + self.prop["name_class"] + '::_internalWrapperProperty_get' + capital_first(elem.name) + ');\n'
out += space + 'if (func != null) {\n'
if elem.brief != "":
out += space + ' func->setDescription("Get parameter ' + elem.brief + '");\n'
out += space + '}\n'
for elem in self.functions:
out += space + 'func = _interface.advertise("' + elem.name + '", &' + self.prop["name_class"] + '::' + elem.name + ');\n'
out += space + 'if (func != null) {\n'
space += " "
if elem.brief != "":
out += space + 'func->setDescription("' + elem.brief + '");\n'
for elem_p in elem.parameters:
if elem_p["name"] == "" \
and elem_p["brief"] == "":
continue
out += space + 'func->addParam("'
if elem_p["name"] != "":
out += elem_p["name"]
out += '", "'
if elem_p["brief"] != "":
out += elem_p["brief"]
out += '");\n'
if elem.return_brief != "":
out += space + 'func->setReturn("' + elem.return_brief + '");\n'
space = space[:-1]
out += space + '}\n'
out += space + 'ZEUS_VERBOSE("===========================================================");\n';
out += space + 'ZEUS_VERBOSE("== Instanciate service: ' + self.prop["name_class"] + ' [DONE]");\n';
out += space + 'ZEUS_VERBOSE("===========================================================");\n';
out += "}\n"
out += "\n"
return [self.prop["file_name_class_register_src"], out]
##
## ProxyClass.hpp
##
def generate_proxy_header(self):
out = ""
out += self.licence_header
out += "#pragma once\n"
out += "\n"
out += "#include <zeus/ObjectRemote.hpp>\n"
out += "#include <zeus/Proxy.hpp>\n"
out += "#include <zeus/RemoteProperty.hpp>\n"
out += "#include <etk/String.hpp>\n"
out += "#include <etk/Vector.hpp>\n"
out += "#include <" + self.prop["file_name_class_header"] + ">\n"
for elem in self.imports:
prop = zeus_object_to_dictionary(elem)
#out += "#include <" + prop["file_name_class_header"] + ">\n"
out += "#include <" + prop["file_name_class_proxy_header"] + ">\n"
out += "\n"
space = ""
for elem in self.name[:-1]:
out += space + "namespace " + elem + " {\n"
space += " "
out += space + " /**\n"
if self.brief != "":
out += space + " * @brief " + self.brief + " \n"
if self.version != "":
out += space + " * version:" + self.version + "\n"
if self.api != "":
out += space + " * api:" + self.api + "\n"
for elem in self.authors:
out += space + " * authors:" + elem + "\n"
out += space + " */\n"
out += space + "class " + self.prop["name_class_proxy_short"] + " :public zeus::Proxy {\n"
space += " "
out += space + "public:\n"
out += space + " const " + self.prop["name_class_proxy_short"] + "& operator= (const zeus::ObjectRemote& _srv) {\n"
out += space + " m_obj = _srv;\n"
out += space + " return *this;\n"
out += space + " }\n"
out += space + " const " + self.prop["name_class_proxy_short"] + "& operator= (const " + self.prop["name_class_proxy_short"] + "& _obj) {\n"
out += space + " m_obj = _obj.m_obj;\n"
out += space + " return *this;\n"
out += space + " }\n"
out += space + " ~" + self.prop["name_class_proxy_short"] + "() = default;\n"
out += space + " " + self.prop["name_class_proxy_short"] + "()"
if len(self.attributes) != 0:
out += ": \n"
first = True
for elem in self.attributes:
if first == False:
out += ",\n"
out += space + " " + elem.name + "(m_obj, \"" + elem.name + "\")"
first = False
out += " {}\n"
out += space + " " + self.prop["name_class_proxy_short"] + "(const zeus::ObjectRemote& _srv) :\n"
out += space + " zeus::Proxy(_srv)"
for elem in self.attributes:
out += ",\n"
out += space + " " + elem.name + "(m_obj, \"" + elem.name + "\")"
first = False
out += " {\n"
out += space + " \n"
out += space + " }\n"
"""
out += space + " bool exist() const {\n"
out += space + " return m_obj.exist();\n"
out += space + " }\n"
"""
out += space + "public:\n"
space += " "
"""
out += space + "/**\n"
out += space + " * @brief Generic virtual destructor\n"
out += space + " */\n"
out += space + "virtual ~" + self.name[-1] + "() = default;\n"
"""
for elem in self.attributes:
out += elem.generate_hpp_proxy(space)
for elem in self.functions:
out += elem.generate_hpp_proxy(space)
space = space[:-2]
out += space + "};\n"
for elem in self.name[:-1]:
space = space[:-1]
out += space + "}\n"
return [self.prop["file_name_class_proxy_header"], out]
##
## ProxyClass.cpp
##
def generate_proxy_code(self):
out = ""
out += self.licence_header
out += "\n"
out += "#include <" + self.prop["file_name_class_proxy_header"] + ">\n"
out += "\n"
for elem in self.attributes:
out += elem.generate_cpp_proxy("", self.prop["name_class_proxy"])
for elem in self.functions:
out += elem.generate_cpp_proxy("", self.prop["name_class_proxy"])
return [self.prop["file_name_class_proxy_src"], out]
def tool_generate_idl(target, module, data_option):
data_path = data_option["path"]
debug.debug("Parsing .zeus.idl [start] " + str(data_path))
name_file = os.path.basename(data_path)
if len(name_file) < 9 \
and name_file[-9:] != ".zeus.idl":
debug.error("IDL must have an extention ended with '.zeus.idl' and not with '" + name_file[-9:] + "'")
elem_name = ""
type_of_object = "unknow"
if len(name_file) >= 13 \
and name_file[-13:] == ".srv.zeus.idl":
elem_name = name_file[:-13]
type_of_object = "srv"
elif len(name_file) >= 16 \
and name_file[-16:] == ".struct.zeus.idl":
elem_name = name_file[:-16]
type_of_object = "struct"
elif len(name_file) >= 13 \
and name_file[-13:] == ".obj.zeus.idl":
elem_name = name_file[:-13]
type_of_object = "obj"
else:
debug.error("IDL must have an extention ended with '(struct|obj|srv).zeus.idl' and not with '" + name_file + "'")
service_def = ServiceDefinition()
service_def.set_name(elem_name.split("-"))
data = tools.file_read_data(os.path.join(module.get_origin_path(), data_path))
if len(data) == 0:
debug.error("Can not parse zeus.idl ==> no data in the file, or no file : " + os.path.join(module.get_origin_path(), data_path))
return;
# standardise windows/Mac file in Linux file.
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
id_line = 0
multi_comment = False
current_def = FunctionDefinition()
current_attr = AttributeDefinition()
for line in data.split("\n"):
id_line += 1;
if len(line) == 0:
# empty line
debug.extreme_verbose("find line " + str(id_line) + " ==> empty line")
continue
if multi_comment == False:
if len(line) >= 2 \
and line[:2] == "/*":
# Comment multi-line
debug.extreme_verbose("find line " + str(id_line) + " ==> comment multi-line [START]")
if len(line) > 2:
debug.error("line " + str(id_line) + " ==> /* must be alone in the line (no text after)")
multi_comment = True
continue
if len(line) >= 2 \
and line[:2] == "*/":
debug.error("line " + str(id_line) + " ==> find '*/' Without a start multiline-comment '/*'")
else:
if len(line) >= 2 \
and line[:2] == "*/":
# Comment multi-line
debug.extreme_verbose("find line " + str(id_line) + " ==> comment multi-line [STOP]")
multi_comment = False
if len(line) > 2:
debug.error("line " + str(id_line) + " ==> find '/*' must be alone in the line (no text after)")
continue
continue
if len(line) >= 2 \
and line[:2] == "//":
# Comment line
debug.extreme_verbose("find line " + str(id_line) + " ==> comment line")
continue
if len(line) >= 1 \
and line[0] == "#":
# Documentation line
debug.extreme_verbose("find line " + str(id_line) + " ==> documentation line")
#get keyword:
list_elems = line.split(":")
if len(list_elems) < 1:
debug.error("line " + str(id_line) + " ==> Missing Keyword ... ");
doc_keyword = list_elems[0] + ":"
doc_data = line[len(doc_keyword):]
if doc_keyword == "#brief:":
debug.extreme_verbose(" BRIEF: '" + doc_data + "'")
current_def = FunctionDefinition()
current_def.set_brief(doc_data)
current_attr.set_brief(doc_data)
elif doc_keyword == "#param:":
debug.extreme_verbose(" PARAMETER: '" + doc_data + "'")
# TODO : Do it better ...
current_def.add_param_comment(doc_data.split(":")[0], doc_data.split(":")[1])
elif doc_keyword == "#return:":
debug.extreme_verbose(" RETURN: '" + doc_data + "'")
current_def.set_return_comment(doc_data)
elif doc_keyword == "#elem-brief:":
debug.extreme_verbose(" SRV-BRIEF: '" + doc_data + "'")
service_def.set_brief(doc_data)
elif doc_keyword == "#elem-version:":
debug.extreme_verbose(" SRV-VERSION: '" + doc_data + "'")
service_def.set_version(doc_data)
elif doc_keyword == "#elem-type:":
debug.extreme_verbose(" SRV-TYPE: '" + doc_data + "'")
service_def.set_api(doc_data)
elif doc_keyword == "#elem-author:":
debug.extreme_verbose(" SRV-AUTHOR: '" + doc_data + "'")
service_def.add_author(doc_data)
else:
debug.warning("line " + str(id_line) + " ==> Unknow: keyword: '" + doc_keyword + "'")
debug.error(" support only: '#brief:' '#param:' '#return:' '#elem-brief:' '#elem-version:' '#elem-type:' '#elem-author:'")
continue
debug.extreme_verbose("Need to parse the function/attribute line:")
debug.extreme_verbose(" '" + line + "'")
if line[:7] == "import ":
debug.debug("find import : " + line)
# TODO : Add check ...
service_def.add_import(line.split(" ")[1])
elif line[-1] == ")":
# Find a function ==> parse it
#debug.error("line " + str(id_line) + " Can not parse function the line dos not ended by a ')'")
#get first part (befor '('):
# get type of the function (factory, tool, action, function(default))
type_function = "function"
if line[0] == "[":
if line[:13] == "[tool-remote]":
type_function = "tool-remote"
line = line[13:]
if line[:9] == "[factory]":
type_function = "factory"
line = line[9:]
if line[:10] == "[function]":
type_function = "function"
line = line[10:]
if line[:8] == "[action ":
type_function = "action"
line = line[8:]
type_event = "";
for elem in line:
if elem == "]":
break
type_event += elem
line = line[len(type_event)+1:]
if validate_type(type_event) == False:
debug.error("line " + str(id_line) + " action type unknow : '" + type_event + "' not in " + str(get_list_type()))
# remove wihte space
while len(line)>0 \
and line[0] == " ":
line = line[1:]
if type_function == "factory":
line = " " + line
# parse the fuction
list_elems = line.split("(")
if len(list_elems) <= 1:
debug.error("line " + str(id_line) + " function parsing error missing the '(' element")
fist_part = list_elems[0].replace(" ", " ").replace(" ", " ").replace(" ", " ")
argument_list = list_elems[1].replace(" ", "").replace(" ", "").replace(" ", "")[:-1]
if len(argument_list) != 0:
argument_list = argument_list.split(",")
else:
argument_list = []
# separate the
list_elems = fist_part.split(" ")
if len(list_elems) <= 1:
debug.error("line " + str(id_line) + " function return and name is not parsable")
return_value = list_elems[0]
function_name = list_elems[1]
# check types:
debug.extreme_verbose(" Parse of function done :")
current_def.set_function_name(function_name)
if type_function == "tool":
current_def.set_return_type(return_value)
debug.extreme_verbose(" return:" + return_value)
if validate_type(return_value) == False:
debug.error("line " + str(id_line) + " function return type unknow : '" + return_value + "' not in " + str(get_list_type()))
elif type_function == "factory":
if function_name != "create":
debug.error("line " + str(id_line) + " factory function name must be 'create' not '" + function_name + "'")
debug.extreme_verbose(" return: --- ")
elif validate_type(return_value) == False:
debug.error("line " + str(id_line) + " function return type unknow : '" + return_value + "' not in " + str(get_list_type()))
else:
current_def.set_return_type(return_value)
debug.extreme_verbose(" return:" + return_value)
for elem in argument_list:
if validate_type(elem) == False:
debug.error("line " + str(id_line) + " function argument type unknow : '" + elem + "' not in " + str(get_list_type()))
debug.extreme_verbose(" name:" + function_name)
debug.extreme_verbose(" arguments:" + str(argument_list))
for elem in argument_list:
current_def.add_parameter_type(elem)
if type_function == "function":
service_def.add_function(current_def)
elif type_function == "action":
current_def.set_action(type_event)
service_def.add_function(current_def)
elif type_function == "factory":
service_def.add_factory(current_def)
elif type_function == "tool-remote":
service_def.add_tool(current_def)
else:
debug.error("line " + str(id_line) + " Unknow type : " + str(type_function))
else:
# remove optionnal "property " at the start
if line[:9] == "property ":
line = line[9:]
# attribute parsing ==> parameters
# if must be a simple element separate with a space
if len(line.split("(")) != 1:
debug.error("line " + str(id_line) + " Can not parse function the line does not ended by a ')'")
elem = line.split(" ")
if len(elem) != 2:
debug.error("line " + str(id_line) + " Can not parse attribute must be constituated with the type and the name")
if validate_type(elem[0]) == False:
debug.error("line " + str(id_line) + " Attribute type unknow : '" + elem[0] + "' not in " + str(get_list_type()))
current_attr.set_type(elem[0]);
current_attr.set_name(elem[1]);
service_def.add_attribute(current_attr)
# reset it ...
current_def = FunctionDefinition()
current_attr = AttributeDefinition()
if multi_comment == True:
debug.error("reach end of file and missing end of multi-line comment */")
debug.verbose("Parsing idl Done (no error ...)")
#service_def.display()
service_header = service_def.generate_header()
service_source = service_def.generate_source()
register_header = service_def.generate_register_header()
register_code = service_def.generate_register_code()
proxy_header = service_def.generate_proxy_header()
proxy_code = service_def.generate_proxy_code()
debug.verbose("----------------- " + service_header[0] + " -----------------")
debug.verbose("\n" + service_header[1])
debug.verbose("----------------- " + service_source[0] + " -----------------")
debug.verbose("\n" + service_source[1])
debug.verbose("----------------- " + register_header[0] + " -----------------")
debug.verbose("\n" + register_header[1])
debug.verbose("----------------- " + register_code[0] + " -----------------")
debug.verbose("\n" + register_code[1])
debug.verbose("----------------- " + proxy_header[0] + " -----------------")
debug.verbose("\n" + proxy_header[1])
debug.verbose("----------------- " + proxy_code[0] + " -----------------")
debug.verbose("\n" + proxy_code[1])
tmp_path = os.path.join(target.get_build_path_temporary_generate(module.get_name()), "idl_src")
module.add_generated_header_file(service_header[1], service_header[0], install_element=True)
module.add_generated_src_file(service_source[1], service_source[0])
module.add_generated_header_file(register_header[1], register_header[0], install_element=True)
module.add_generated_src_file(register_code[1], register_code[0])
module.add_generated_header_file(proxy_header[1], proxy_header[0], install_element=True)
module.add_generated_src_file(proxy_code[1], proxy_code[0])
# if service, we need to intall a simple empty file to register the service as availlable ...
if type_of_object == "srv":
module.add_generated_data_file("", "zeus/" + elem_name + ".srv", install_element=True)
debug.debug("Parsing .zeus.idl [DONE]")
def parse_object_idl(module, idl_path):
module.add_action(tool_generate_idl, data={"path":idl_path, "type":"object"})
def parse_struct_idl(module, idl_path):
module.add_action(tool_generate_idl, data={"path":idl_path, "type":"struct"})
| apache-2.0 | -5,072,047,862,792,280,000 | 36.233886 | 188 | 0.571694 | false | 2.846603 | false | false | false |
rohitwaghchaure/erpnext_smart | erpnext/manufacturing/doctype/work_management/work_management.py | 1 | 3038 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import add_days, cint, cstr, date_diff, flt, getdate, nowdate, \
get_first_day, get_last_day
from frappe.model.document import Document
class WorkManagement(Document):
def get_invoice_details(self, invoice_no=None):
self.set('production_details', [])
sales_invoices = self.get_invoice(invoice_no)
if sales_invoices:
for si_no in sales_invoices:
branch = frappe.db.get_value('User',frappe.session.user,'branch')
if frappe.db.get_value('Process Log',{'branch':branch,'parent':si_no.name},'name'):
si = self.append('production_details', {})
self.create_invoice_bundle(si_no, si)
return "Done"
def get_invoice(self, invoice_no=None):
cond = "1=1"
if invoice_no and not self.services:
cond = "sales_invoice_no='%s'"%(invoice_no)
elif self.services and not invoice_no:
cond = "tailoring_service='%s'"%(self.services)
elif self.services and invoice_no:
cond = "sales_invoice_no='%s' and tailoring_service='%s'"%(invoice_no, self.services)
return frappe.db.sql("select * from `tabProduction Dashboard Details` where %s order by sales_invoice_no desc"%(cond),as_dict=1, debug=1)
def create_invoice_bundle(self, invoice_detail, si):
color = {'Completed':'green','Pending':'red', 'Trial':'#1F8C83'}
value = '<h style="color:red">Pending</h>'
si.sales_invoice = invoice_detail.sales_invoice_no
si.article_code = invoice_detail.article_code
si.article_qty = invoice_detail.article_qty
si.work_order = invoice_detail.work_order
si.stock_entry = invoice_detail.stock_entry
si.process_allotment = invoice_detail.name
si.actual_qty = invoice_detail.fabric_qty
si.fabric_code = invoice_detail.fabric_code
si.serial_no = invoice_detail.serial_no
si.size = invoice_detail.size
if invoice_detail.status == 'Completed':
value = '<h style="color:%s">%s</h>'%(color.get(invoice_detail.status), invoice_detail.status)
elif cint(invoice_detail.trial_no) > 0:
value = '<h style="color:%s">Ready For %s %s</h>'%(color.get(invoice_detail.status), invoice_detail.status, invoice_detail.trial_no)
si.process_status = value
si.cut_order_status ='<h style="color:%s">%s</h>'%(color.get(invoice_detail.cut_order_status), invoice_detail.cut_order_status)
def save_data(self, args):
for d in self.get('production_details'):
if cint(args.get('select')) ==1 and cint(d.idx)==cint(args.get('idx')):
self.save(ignore_permissions=True)
elif cint(args.get('select')) ==0 and cint(d.idx)==cint(args.get('idx')):
self.clear_data(args.get('sales_invoice'), args.get('article_code'))
def clear_data(self, inv_no=None, item_code=None):
self.get_invoice_details()
cond = "1=1"
if inv_no and item_code:
cond = "sales_invoice= '%s' and article_code='%s'"%(inv_no, item_code)
frappe.db.sql("delete from `tabProduction Details` where %s"%(cond),debug=1) | agpl-3.0 | 7,046,307,035,014,875,000 | 45.753846 | 139 | 0.703423 | false | 2.996055 | false | false | false |
bl4de/security-tools | redir_gen/redirgen.py | 1 | 1060 | #!/usr/bin/env python3
# Forked from https://gist.github.com/zPrototype/b211ae91e2b082420c350c28b6674170
import re
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--target", "-t", action="store", help="Enter the target address", required=True)
parser.add_argument("--dest", "-d", action="store", help="Enter the address where you want to redirect to",
required=True)
parser.add_argument("--output", "-o", action="store", help="Enter output file name")
args = parser.parse_args()
payloads = []
# Remove protocol from url
junk = re.compile(r"https?://")
target = junk.sub("", args.target)
dest = junk.sub("", args.dest)
with open("payloads.txt", "r") as handle:
templates = handle.readlines()
for payload in templates:
payload = payload.rstrip()
payload = re.sub("TARGET", target, payload)
payload = re.sub("DEST", dest, payload)
print(payload)
payloads.append(payload)
if args.output:
with open(args.output, "w")as handle:
[handle.write(f"{x.rstrip()}\n") for x in payloads] | mit | -8,893,788,218,060,954,000 | 31.151515 | 107 | 0.679245 | false | 3.365079 | false | false | false |
suitmyself/Physika | Documentation/Cuda_Scons_Tool/cuda.py | 1 | 5959 | """
SCons.Tool.cuda
@author: WeiChen, 07/02/2016
@breif: Tool for Scons used to support compiling of cuda code
@usage:
1. this file is used in SConstruct script through codes like:
env.Tool('cuda', toolpath = 'documentation/Cuda_Scons_Tool/')
2. you can also put this file to PYTHON_HOME/Lib/site-packages/scons-x.x.x/SCons/Tool
@reference:
https://bitbucket.org/scons/scons/wiki/CudaTool
https://github.com/bryancatanzaro/cuda-scons/blob/master/nvcc.py
"""
import SCons.Tool
import SCons.Scanner.C
import SCons.Defaults
import os
import sys
import platform
#cuda suffix
cuda_suffix = '.cu'
# make a CUDAScanner for finding #includes
# cuda uses the c preprocessor, so we can use the CScanner
cuda_scanner = SCons.Scanner.C.CScanner()
def generate(env):
os_name = platform.system()
os_architecture = platform.architecture()[0]
#cuda path
cuda_bin_path = ''
cuda_inc_path = ''
cuda_lib_path = ''
cuda_dll_path = ''
cuda_path = None
if 'CUDA_PATH' in os.environ:
cuda_path = os.environ['CUDA_PATH']
elif 'CUDA_PATH' in env:
cuda_path = env['CUDA_PATH']
else:
guess_path = [ '/usr/local/NVIDIA_CUDA_TOOLKIT',
'/usr/local/CUDA_TOOLKIT',
'/usr/local/cuda_toolkit',
'/usr/local/CUDA',
'/usr/local/cuda'
]
for path in guess_path:
if os.path.isdir(path):
cuda_path = path
break
if cuda_path == None:
sys.exit("Cannot find the CUDA_PATH. Please install CUDA OR add CUDA_PATH in your environment variables OR explictly specify env['CUDA_PATH']!")
cuda_inc_path = cuda_path+'/include/'
cuda_bin_path = cuda_path+'/bin/'
cuda_version_str = os.path.basename(cuda_path)
cuda_version_id = filter(str.isdigit, cuda_version_str)
if os_name == 'Windows':
if os_architecture == '32bit':
cuda_lib_path = cuda_path+'/lib/Win32/'
cuda_dll_path = cuda_path+'/bin/cudart32_'+cuda_version_id+'.dll'
else:
cuda_lib_path = cuda_path+'/lib/X64/'
cuda_dll_path = cuda_path+'/bin/cudart64_'+cuda_version_id+'.dll'
elif os_name == 'Linux':
if os_architecture == '32bit':
cuda_lib_path = cuda_path+'/lib/'
else:
cuda_lib_path = cuda_path+'/lib64/'
elif os_name == 'Darwin':
cuda_lib_path = cuda_path+'/lib/'
#add include path
env.Append(CPPPATH = cuda_inc_path)
#add cuda runtime libpath and lib
env.Append(LIBPATH = cuda_lib_path)
env.Append(LIBS = 'cudart')
env.Append(LIBS = 'cudadevrt')
env.Append(LIBS = 'curand')
env['CUDA_DLL_PATH'] = cuda_dll_path
# "NVCC common command line"
if not env.has_key('_NVCCCOMCOM'):
# nvcc needs '-I' prepended before each include path, regardless of platform
env['_NVCCWRAPCPPPATH'] = '${_concat("-I ", CPPPATH, "", __env__)}'
# prepend -Xcompiler before each flag
env['_NVCCWRAPCFLAGS'] = '${_concat("-Xcompiler ", CFLAGS, "", __env__)}'
env['_NVCCWRAPSHCFLAGS'] = '${_concat("-Xcompiler ", SHCFLAGS, "", __env__)}'
#special treatment for Darwin(Mac)
#since clang could report an error if '-Xcompiler -std-gnu++11' is used
#while g++ just report a warning
if os_name == 'Darwin':
DARWIN_CCFLAGS = env['CCFLAGS'][:] #copy
if '-std=gnu++11' in DARWIN_CCFLAGS:
DARWIN_CCFLAGS.remove('-std=gnu++11')
env['DARWIN_CCFLAGS'] = DARWIN_CCFLAGS
DARWIN_SHCCFLAGS = env['SHCCFLAGS'][:] #copy
if '-std=gnu++11' in DARWIN_SHCCFLAGS:
DARWIN_SHCCFLAGS.remove('-std=gnu++11')
env['DARWIN_SHCCFLAGS'] = DARWIN_SHCCFLAGS
env['_NVCCWRAPCCFLAGS'] = '${_concat("-Xcompiler ", DARWIN_CCFLAGS, "", __env__)}'
env['_NVCCWRAPSHCCFLAGS'] = '${_concat("-Xcompiler ", DARWIN_SHCCFLAGS, "", __env__)}'
else:
env['_NVCCWRAPCCFLAGS'] = '${_concat("-Xcompiler ", CCFLAGS, "", __env__)}'
env['_NVCCWRAPSHCCFLAGS'] = '${_concat("-Xcompiler ", SHCCFLAGS, "", __env__)}'
# assemble the common command line
env['_NVCCCOMCOM'] = '${_concat("-Xcompiler ", CPPFLAGS, "", __env__)} $_CPPDEFFLAGS $_NVCCWRAPCPPPATH'
# set the include path, and pass both c compiler flags and c++ compiler flags
env['NVCCFLAGS'] = SCons.Util.CLVar('')
env['SHNVCCFLAGS'] = SCons.Util.CLVar('') + ' -shared'
# set cuda complier
env['NVCC'] = 'nvcc'
env['SHNVCC'] = 'nvcc'
# set cuda compute arch
env['CUDA_ARCH'] = '-arch=compute_52'
# 'NVCC Command'
env['NVCCCOM'] = '$NVCC -o $TARGET $CUDA_ARCH -dlink -c -dc -std=c++11 $NVCCFLAGS $_NVCCWRAPCFLAGS $_NVCCWRAPCCFLAGS $_NVCCCOMCOM $SOURCES'
env['SHNVCCCOM'] = '$SHNVCC -o $TARGET $CUDA_ARCH -dlink -c -dc -std=c++11 $SHNVCCFLAGS $_NVCCWRAPSHCFLAGS $_NVCCWRAPSHCCFLAGS $_NVCCCOMCOM $SOURCES'
# create builders that make static & shared objects from .cu files
static_obj_builder, shared_obj_builder = SCons.Tool.createObjBuilders(env)
# Add this suffix to the list of things buildable by Object
static_obj_builder.add_action(cuda_suffix, '$NVCCCOM')
shared_obj_builder.add_action(cuda_suffix, '$SHNVCCCOM')
static_obj_builder.add_emitter(cuda_suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj_builder.add_emitter(cuda_suffix, SCons.Defaults.SharedObjectEmitter)
# Add this suffix to the list of things scannable
SCons.Tool.SourceFileScanner.add_scanner(cuda_suffix, cuda_scanner)
# Prepend cuda_bin_path
env.PrependENVPath('PATH', cuda_bin_path)
def exists(env):
return env.Detect('nvcc')
| gpl-2.0 | -7,234,352,249,998,130,000 | 37.947712 | 153 | 0.594563 | false | 3.334639 | false | false | false |
josuemontano/python_intro | tema_6/Leccion_3.py | 1 | 1116 | # -*- coding: utf-8 -*-
# Conexión a bases de datos
# Lección 3
# SQLAlchemy: Queries
from Leccion_1 import Libro
from Leccion_2 import session
def todos_libros():
return session.query(Libro).all()
def libros_recientes():
return session.query(Libro).filter(Libro.anio_publicacion >= 2010).all()
def libros_por_titulo(titulo):
return session.query(Libro).filter(Libro.titulo.startswith(titulo)).all()
def libros_recientes_por_titulo(titulo):
return session.query(Libro).filter(Libro.anio_publicacion >= 2010, Libro.titulo.startswith(titulo)).all()
def cantidad_libros_editorial(editorial):
return session.query(Libro).filter(Libro.editorial == editorial).count()
def main():
print("Libros registrados:")
for libro in todos_libros():
print(libro.titulo)
print("\nLibros recientes:")
for libro in libros_recientes():
print(libro.anio_publicacion, libro.titulo)
print("\nLibros cuyo titulo empieza con 'Math':")
for libro in libros_por_titulo('Math'):
print(libro.anio_publicacion, libro.titulo)
if __name__ == '__main__':
main()
| gpl-3.0 | 690,082,055,112,073,500 | 24.318182 | 109 | 0.692998 | false | 2.91623 | false | false | false |
ColumbiaCMB/kid_readout | apps/data_taking_scripts/2016-06-jpl-hex-271/heterodyne_scan_with_source.py | 1 | 2650 | import time
import numpy as np
from kid_readout.interactive import *
from kid_readout.measurement import acquire
from kid_readout.roach import r2heterodyne, attenuator, hardware_tools
from equipment.custom import mmwave_source
from equipment.hittite import signal_generator
from equipment.srs import lockin
logger.setLevel(logging.DEBUG)
hittite = signal_generator.Hittite(ipaddr='192.168.0.200')
hittite.set_power(0)
hittite.on()
hittite.set_freq(148e9/12.)
lockin = lockin.Lockin(LOCKIN_SERIAL_PORT)
tic = time.time()
print lockin.identification
print time.time()-tic
tic = time.time()
print lockin.fast_state
print time.time()-tic
source = mmwave_source.MMWaveSource()
source.set_attenuator_turns(3.0,3.0)
source.multiplier_input = 'hittite'
source.waveguide_twist_angle = 45
source.ttl_modulation_source = 'roach'
setup = hardware.Hardware(hittite, source,lockin)
ri = hardware_tools.r2_with_mk1(1000.)
ri.iq_delay=-1
ri.set_dac_atten(20)
ri.set_fft_gain(6)
nsamp = 2**15
step = 1
nstep = 32
#f0binned = np.round(f0s * nsamp / 512.0) * 512.0 / nsamp
offset_bins = np.arange(-(nstep), (nstep)) * step
offsets = offset_bins * 512.0 / nsamp
ri.set_modulation_output('low')
ri.set_lo(1250.)
#legacy.load_heterodyne_sweep_tones(ri,(np.arange(1,129)[None,:]*7/4.+ri.lo_frequency + offsets[:,None]),
# num_tone_samples=nsamp)
state = dict(field_canceling_magnet=False,magnetic_shield=True,cryostat='starcryo')
state.update(**setup.state())
for hittite_power in np.arange(-3.0,1,.4):
logger.info("Measuring at %.1f dBm" % hittite_power)
hittite.set_power(hittite_power)
tic = time.time()
for lo in 830.+190*np.arange(0,4):
logger.info("Measuring at LO %.1f" % lo)
ri.set_lo(lo)
df = acquire.new_nc_file(suffix='scan_lo_%.1f_MHz' % lo)
ri.set_modulation_output(7)
logger.info("autogain lockin")
time.sleep(1)
lockin.auto_gain(wait_until_done=True)
time.sleep(3)
logger.info("new sensitivity: %d values %s" % (lockin.sensitivity,str(lockin.fast_state)))
state.update(**setup.state())
ri.set_modulation_output('low')
swa = acquire.run_sweep(ri, (np.arange(1, 257)[None, :] * 7 / 8. + ri.lo_frequency + offsets[:, None]),
num_tone_samples=nsamp, length_seconds=0.1, state=state, verbose=True)
df.write(swa)
df.close()
print "elapsed:", (time.time()-tic)/60.0,'minutes'
#time.sleep(60.)
# while time.time() - tic < 5*60:
# print "waiting... %.1f min remaining" % ((5*60 - (time.time() - tic))/60)
# time.sleep(60)
| bsd-2-clause | 6,223,402,488,871,364,000 | 30.176471 | 111 | 0.660377 | false | 2.774869 | false | false | false |
eoss-cloud/madxxx_catalog_api | catalog/client/services/catalog_status.py | 1 | 2495 | #-*- coding: utf-8 -*-
""" EOSS catalog system
functionality for the catalog status endpoint
"""
__author__ = "Thilo Wehrmann, Steffen Gebhardt"
__copyright__ = "Copyright 2016, EOSS GmbH"
__credits__ = ["Thilo Wehrmann", "Steffen Gebhardt"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Thilo Wehrmann"
__email__ = "[email protected]"
__status__ = "Production"
import logging
import falcon
import ujson
from api import max_body
from client.services.db_calls import Persistance
from client.services.static_maps import j2_env
from client.services.tools import can_zip_response, compress_body, make_GeoJson
from api_logging import logger
class CatalogStatus(object):
"""
EOSS catalog class from web API
"""
def __init__(self):
self.logger = logging.getLogger('eoss.' + __name__)
@falcon.before(max_body(64 * 1024)) # max 64kB request size
def on_get(self, req, resp, sensor):
logger.info('[GET] /catalog/status/count/%s' % (sensor))
results = dict()
minx,maxx, miny, maxy = -180,180,-90,90
if 'last_days' in req.params:
last_days = int(req.params['last_days'])
else:
last_days = 4
global_extent = [[miny, minx], [maxy, maxx]]
res = Persistance().get_observation_coverage(int(sensor), last_days=last_days)
results['geojson'] = make_GeoJson(res['geojson'], res['attr'])
content_type = 'text/html'
results = j2_env.get_template('leaflet_map.html').render(title='Reference object: %s' % sensor, center='[%f, %f]' % (21.5, -102),
zoomlevel=5, geojson=ujson.dumps(results['geojson']),
label_attribute=None,
extent=ujson.dumps(global_extent))
if can_zip_response(req.headers):
resp.set_header('Content-Type', content_type)
resp.set_header('Content-Encoding', 'gzip')
if content_type == 'application/json':
resp.body = compress_body(ujson.dumps(results))
else:
resp.body = compress_body(results)
else:
resp.set_header('Content-Type', content_type)
if content_type == 'application/json':
resp.body = ujson.dumps(results)
else:
resp.body = results
resp.status = falcon.HTTP_200 | mit | 4,771,464,230,048,616,000 | 35.173913 | 137 | 0.569539 | false | 3.658358 | false | false | false |
patois/IDACyber | cyber/prototype.py | 1 | 2866 | from PyQt5.QtGui import qRgb
from PyQt5.QtCore import Qt
from idacyber import ColorFilter
from ida_kernwin import ask_text, warning
from types import FunctionType
class Prototype(ColorFilter):
name = "Prototype"
help = "Right click: edit current filter function"
highlight_cursor = False
def __init__(self, pw):
self.pw = pw
self.func_call = None
self.func_def=(
"""
def process(base, offs, b, size, width, moffs):
# print("%x+%x: %02x (total pxls %d, width %d, mouse pos %d)" % (base, offs, b, size, width, moffs))
# return (b,b,b)
if (b == 0x70 or b == 0x47):
# detect potential thumb-mode pattern
color = (0x59, 0x7c, 0x92)
elif (b & 0xf0 == 0xe0):
# detect potential ARM pattern
color = (0x00, 0x40, 0x67)
else:
# default color
color = (0x00, 0x10, 0x1b)
# cross-hair
if offs%width == moffs%width or int(offs/width) == int(moffs/width):
color = (min(color[0]+0x00,0xff),
min(color[1]+0x04,0xff),
min(color[2]+0x04,0xff))
return color""")
self._compile(self.func_def)
def _compile(self, text):
self.func_def = text
try:
self.func_code = compile(text, "", "exec")
self.func_call = FunctionType(self.func_code.co_consts[0], globals(), "")
return (True, "")
except Exception as e:
return (False, e)
return (False, "")
def _set_user_func(self):
while True:
func_def = ask_text(0, self.func_def, "Please define function (must return tuple(RR,GG,BB) format")
if func_def is None:
break
res, s = self._compile(func_def)
if res:
break
warning("%s" % s)
def on_mb_click(self, event, addr, size, mouse_offs):
if event.button() == Qt.RightButton:
self._set_user_func()
def on_process_buffer(self, buffers, addr, size, mouse_offs):
colors = []
width = self.pw.get_pixel_qty_per_line()
for mapped, buf in buffers:
if mapped:
for offs in range(len(buf)):
try:
r, g, b = self.func_call(
addr,
offs,
buf[offs]&0xff,
size,
width,
mouse_offs)
colors.append((True, qRgb(r&0xFF, g&0xFF, b&0xFF)))
except:
colors.append((False, None))
else:
colors += [(False, None)]*len(buf)
return colors
def FILTER_INIT(pw):
return Prototype(pw)
def FILTER_EXIT():
return | mit | -5,844,695,650,592,457,000 | 29.866667 | 111 | 0.495115 | false | 3.632446 | false | false | false |
craws/OpenAtlas | openatlas/database/gis.py | 1 | 4367 | import ast
from typing import Any, Dict, List
from flask import g
class Gis:
@staticmethod
def add_example_geom(id_: int) -> None:
sql = """INSERT INTO gis.point (entity_id, name, description, type, geom) VALUES (
(%(location_id)s),
'',
'',
'centerpoint',
public.ST_SetSRID(public.ST_GeomFromGeoJSON('{"type":"Point","coordinates":[9,17]}'),4326));
"""
g.cursor.execute(sql, {'location_id': id_})
@staticmethod
def get_by_id(id_: int) -> List[Dict[str, Any]]:
geometries = []
for shape in ['point', 'polygon', 'linestring']:
sql = f"""
SELECT
{shape}.id,
{shape}.name,
{shape}.description,
{shape}.type,
public.ST_AsGeoJSON({shape}.geom) AS geojson
FROM model.entity place
JOIN gis.{shape} {shape} ON place.id = {shape}.entity_id
WHERE place.id = %(id_)s;"""
g.cursor.execute(sql, {'id_': id_})
for row in g.cursor.fetchall():
geometry = ast.literal_eval(row['geojson'])
geometry['title'] = row['name'].replace('"', '\"') if row['name'] else ''
geometry['description'] = \
row['description'].replace('"', '\"') if row['description'] else ''
geometries.append(geometry)
return geometries
@staticmethod
def get_by_shape(shape: str, extra_ids: List[int]) -> List[Dict[str, Any]]:
polygon_sql = '' if shape != 'polygon' else \
'public.ST_AsGeoJSON(public.ST_PointOnSurface(polygon.geom)) AS polygon_point, '
sql = f"""
SELECT
object.id AS object_id,
{shape}.id,
{shape}.name,
{shape}.description,
{shape}.type,
public.ST_AsGeoJSON({shape}.geom) AS geojson, {polygon_sql}
object.name AS object_name,
object.description AS object_desc,
string_agg(CAST(t.range_id AS text), ',') AS types
FROM model.entity place
JOIN model.link l ON place.id = l.range_id
JOIN model.entity object ON l.domain_id = object.id
JOIN gis.{shape} {shape} ON place.id = {shape}.entity_id
LEFT JOIN model.link t ON object.id = t.domain_id AND t.property_code = 'P2'
WHERE place.class_code = 'E53'
AND l.property_code = 'P53'
AND (object.system_class = 'place' OR object.id IN %(extra_ids)s)
GROUP BY object.id, {shape}.id;"""
g.cursor.execute(sql, {'extra_ids': tuple(extra_ids)})
return [dict(row) for row in g.cursor.fetchall()]
@staticmethod
def test_geom(geometry: str) -> None:
from openatlas.models.gis import InvalidGeomException
sql = "SELECT st_isvalid(public.ST_SetSRID(public.ST_GeomFromGeoJSON(%(geojson)s),4326));"
g.cursor.execute(sql, {'geojson': geometry})
if not g.cursor.fetchone()['st_isvalid']:
raise InvalidGeomException
return
@staticmethod
def insert(data: Dict[str, Any], shape: str) -> None:
sql = f"""
INSERT INTO gis.{shape} (entity_id, name, description, type, geom) VALUES (
%(entity_id)s,
%(name)s,
%(description)s,
%(type)s,
public.ST_SetSRID(public.ST_GeomFromGeoJSON(%(geojson)s),4326));"""
g.cursor.execute(sql, data)
@staticmethod
def insert_import(data: Dict[str, Any]) -> None:
sql = """
INSERT INTO gis.point (entity_id, name, description, type, geom) VALUES (
%(entity_id)s,
'',
%(description)s,
'centerpoint',
public.ST_SetSRID(public.ST_GeomFromGeoJSON(%(geojson)s),4326));"""
g.cursor.execute(sql, data)
@staticmethod
def delete_by_entity_id(id_: int) -> None:
g.cursor.execute('DELETE FROM gis.point WHERE entity_id = %(id)s;', {'id': id_})
g.cursor.execute('DELETE FROM gis.linestring WHERE entity_id = %(id)s;', {'id': id_})
g.cursor.execute('DELETE FROM gis.polygon WHERE entity_id = %(id)s;', {'id': id_})
| gpl-2.0 | -4,000,327,028,606,807,000 | 40.590476 | 100 | 0.530112 | false | 3.854369 | false | false | false |
cjrd/TMA | src/backend/aux/create_wiki_cooccurence.py | 1 | 5270 | #!/usr/bin/env python
import pdb
import os
import re
import cPickle as pickle
from src.backend.tma_utils import TextCleaner, ids_to_key
from lib.porter2 import stem
import sqlite3 as sqlite
from time import time
import bsddb
import random
def db_transfer(termterm_dict, termterm_db):
for t1 in termterm_dict:
for t2 in termterm_dict[t1]:
ikey = '%i,%i' % (t1,t2)
if termterm_db.has_key(ikey):
termterm_db[ikey] = str(int(termterm_db[ikey]) + termterm_dict[t1][t2])
else:
termterm_db[ikey] = str(termterm_dict[t1][t2])
if __name__ == '__main__':
# DATA
outdata_dir = '/Users/cradreed/Research/TMBrowse/develarea/'
wikfile = outdata_dir + 'enwiki_abstracts-20120307.dat' #'/Users/cradreed/Research/TMBrowse/develarea/enwiki-latest-abstract18.xml'#
# use bsd to create to cooccurence file then write to sqlite to maintain database consistency and reduce dependencies
# set up the dbase
#dbfile = '/Users/cradreed/Research/TMBrowse/develarea/wiki-terms.sqlite'
# wikivocab_file = outdata_dir + 'wikivocab_full.bdb'
# wikivocab_ct_file = outdata_dir + 'wikivocab_ct_full.bdb'
wiki_termterm_file = outdata_dir + 'wiki_termterm_full_100percent.bdb'
# os.remove(dbfile) # TESTING
# if os.path.exists(wikivocab_file):
# os.remove(wikivocab_file)
# if os.path.exists(wikivocab_ct_file):
# os.remove(wikivocab_ct_file)
# if os.path.exists(wiki_termterm_file):
# os.remove(wiki_termterm_file)
vocab_dict = pickle.load(open(outdata_dir + 'wiki_vocab_dic_full_100percent.obj','rb'))#{}#bsddb.hashopen(wikivocab_file)
vocab_ct_dict = pickle.load(open(outdata_dir + 'wiki_vocab_ct_full_100percent.obj','rb'))#{}#bsddb.hashopen(wikivocab_ct_file)
termterm_db = bsddb.btopen(wiki_termterm_file)
termterm_dict = {}
text_cleaner = TextCleaner(stopword_file='/Users/cradreed/Research/TMBrowse/trunk/src/backend/aux/stop_words.txt')
# add the cooccurence information to the table
st_time = time()
num_ab = 0
term_ct = 0
tot_ab_len = 0
dep_no = 0
print_no = 50000
transfer_no = 10*print_no
ltime = time()
with open(wikfile,'r') as wikxml:
for i, line in enumerate(wikxml):
# only sample % 20
# if random.random() > 0.20:
# continue
num_ab += 1
if num_ab <= 3500000:
continue
if num_ab % print_no == 0 and not num_ab == 0:
print 'Parsed {0:8d} of 3925809 abstracts; last {1:5d} abstracts took {2:0.1f} seconds. Average {3:4d} terms per doc.'.format(num_ab, print_no,time()-ltime, int(tot_ab_len/print_no))
ltime = time()
tot_ab_len = 0
if num_ab % transfer_no == 0 and not num_ab == 0:
print '---- Transfering %i abstracts to db -----' % transfer_no
db_transfer(termterm_dict, termterm_db)
dep_no += 1
del(termterm_dict)
termterm_dict = {}
print '---- %i transfer complete, took %0.1f seconds ----' % (dep_no, (time() - ltime))
ltime = time()
text = line.strip() # remove the abstract tags
text = text_cleaner.parse_text(text)
text = list(set(text))
tot_ab_len += len(text)
for nt1, term1 in enumerate(text):
if not vocab_dict.has_key(term1):
t1_id = term_ct
vocab_dict[term1] = t1_id
vocab_ct_dict[t1_id] = 1
term_ct += 1
else:
t1_id = vocab_dict[term1]
vocab_ct_dict[t1_id] += 1
for nt2 in xrange(nt1+1, len(text)): # 173.271281 vs 185s TODO make sure the counting is correct
term2 = text[nt2]
if not vocab_dict.has_key(term2):
t2_id = term_ct
vocab_dict[term2] = t2_id
vocab_ct_dict[t2_id] = 0 # avoid overcounting
term_ct += 1
else:
t2_id = vocab_dict[term2]
t_keys = ids_to_key(t1_id, t2_id)
if not termterm_dict.has_key(t_keys[0]):
termterm_dict[t_keys[0]] = {t_keys[1]:1}
elif termterm_dict[t_keys[0]].has_key(t_keys[1]):
termterm_dict[t_keys[0]][t_keys[1]] += 1
else:
termterm_dict[t_keys[0]][t_keys[1]] = 1
db_transfer(termterm_dict, termterm_db)
print 'Added %i terms to dic' % len(vocab_dict)
# vocab_dict.close()
# vocab_ct_dict.close()
# print termterm_db
# print vocab_dict
# print vocab_ct_dict
termterm_db.close()
pickle.dump(vocab_dict, open(outdata_dir + 'wiki_vocab_dic_full_100percent2.obj','wb'))
pickle.dump(vocab_ct_dict, open(outdata_dir + 'wiki_vocab_ct_full_100percent2.obj','wb'))
time_parse = time() - st_time
print 'Parsing %i abstracts took %f seconds' % (num_ab, time_parse) | gpl-3.0 | 1,383,182,657,101,620,200 | 40.503937 | 198 | 0.551803 | false | 3.241082 | false | false | false |
LABETE/TestYourProject | casedetails/migrations/0001_initial.py | 1 | 1298 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CaseDetail',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('status', models.SmallIntegerField(choices=[(1, 'Not Started'), (2, 'In Progress'), (3, 'Passed'), (4, 'Failed'), (5, 'Not Applicable')], default=1)),
('step', models.IntegerField()),
('description', models.TextField()),
('expected', models.TextField(blank=True)),
('actual', models.TextField(blank=True)),
('input_data', models.TextField(blank=True)),
('output_data', models.TextField(blank=True)),
('defect_id', models.IntegerField(blank=True, null=True)),
('defect_id_displayed', models.IntegerField(blank=True, null=True)),
('case_id', models.IntegerField()),
],
options={
'verbose_name': 'CaseDetailModel',
'verbose_name_plural': 'CaseDetailModels',
},
),
]
| bsd-3-clause | 3,029,143,579,787,670,500 | 38.333333 | 167 | 0.539291 | false | 4.445205 | false | false | false |
chemelnucfin/tensorflow | tensorflow/python/keras/mixed_precision/experimental/keras_test.py | 1 | 43595 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests mixed precision works correctly with Keras layers and models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import models
from tensorflow.python.keras import optimizers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras import saving
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import recurrent
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.mixed_precision.experimental import test_util as mp_test_util
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training.experimental import loss_scale as loss_scale_module
from tensorflow.python.training.tracking import util as trackable_utils
from tensorflow.python.util import nest
class AssertTypeLayer(base_layer.Layer):
"""A layer which asserts it's inputs are a certain type."""
def __init__(self, assert_type=None, **kwargs):
self._assert_type = (dtypes.as_dtype(assert_type).name if assert_type
else None)
super(AssertTypeLayer, self).__init__(**kwargs)
def assert_input_types(self, inputs):
"""Asserts `inputs` are of the correct type. Should be called in call()."""
if self._assert_type:
inputs_flattened = nest.flatten(inputs)
for inp in inputs_flattened:
assert inp.dtype.base_dtype == self._assert_type, (
'Input tensor has type %s which does not match assert type %s' %
(inp.dtype.name, self._assert_type.name))
class AddLayer(AssertTypeLayer):
"""A layer which adds it's input to a scalar variable."""
def __init__(self,
regularizer=None,
use_operator=False,
var_name='v',
**kwargs):
"""Initializes the AddLayer.
Args:
regularizer: The regularizer on the scalar variable.
use_operator: If True, add using the + operator. If False, add using
tf.add.
var_name: The name of the variable. It can be useful to pass a name other
than 'v', to test having the attribute name (self.v) being different
from the variable name.
**kwargs: Passed to AssertTypeLayer constructor.
"""
self._regularizer = regularizer
self._use_operator = use_operator
self._var_name = var_name
super(AddLayer, self).__init__(**kwargs)
def build(self, _):
self.v = self.add_weight(
self._var_name, (), initializer='ones', regularizer=self._regularizer)
self.built = True
def call(self, inputs):
self.assert_input_types(inputs)
assert inputs.dtype == self.v.dtype
return self._add(inputs, self.v)
def _add(self, x, y):
if self._use_operator:
return x + y
else:
return math_ops.add(x, y)
def get_config(self):
config = super(AddLayer, self).get_config()
assert self._regularizer is None, (
'regularizer must be None to get config for AddLayer')
config['use_operator'] = self._use_operator
config['var_name'] = self._var_name
config['assert_type'] = self._assert_type
return config
class AddLayerWithoutAutoCast(AddLayer):
"""Same as AddLayer, but does not use AutoCastVariables."""
def build(self, _):
dtype = self.dtype
if dtype in ('float16', 'bfloat16'):
dtype = 'float32'
self.v = self.add_weight(
'v', (),
initializer='ones',
dtype=dtype,
experimental_autocast=False,
regularizer=self._regularizer)
self.built = True
def call(self, inputs):
self.assert_input_types(inputs)
assert self.v.dtype in (dtypes.float32, dtypes.float64)
return self._add(inputs, math_ops.cast(self.v, inputs.dtype))
class AddLayerWithFunction(AddLayer):
"""Same as AddLayer, but _add is decorated with a tf.function."""
@def_function.function
def _add(self, x, y):
return super(AddLayerWithFunction, self)._add(x, y)
class IdentityRegularizer(regularizers.Regularizer):
def __call__(self, x):
assert x.dtype == dtypes.float32
return array_ops.identity(x)
# If called outside any strategy.scope() calls, this will return the default
# strategy.
default_strategy_fn = distribution_strategy_context.get_strategy
def create_mirrored_strategy():
if context.num_gpus() >= 1:
return mirrored_strategy.MirroredStrategy(['cpu:0', 'gpu:0'])
else:
return mirrored_strategy.MirroredStrategy(['cpu:0'])
TESTCASES = ({
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy
})
class KerasLayerTest(keras_parameterized.TestCase):
"""Test mixed precision with Keras layers."""
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_infer_with_float32_vars(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope(), policy.policy_scope('infer_float32_vars'):
layer = AddLayer(assert_type=dtypes.float16)
self.assertEqual(layer.dtype, dtypes.float32)
y = layer(x)
self.assertEqual(layer.v.dtype, dtypes.float32)
self.assertEqual(y.dtype, dtypes.float16)
self.assertEqual(layer.dtype, dtypes.float32)
self.assertEqual(layer._dtype_policy._name, 'float16_with_float32_vars')
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(y), 2.)
if base_layer_utils.v2_dtype_behavior_enabled():
# Layer should now cast inputs to float16
x = constant_op.constant([1.], dtype=dtypes.float32)
y = layer(x)
self.assertEqual(y.dtype, dtypes.float16)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
@testing_utils.enable_v2_dtype_behavior
def test_floating_point_policies_with_float32_vars(self, strategy_fn):
for dtype in 'bfloat16', 'float16', 'float64':
x = constant_op.constant([1.])
policy_name = dtype + '_with_float32_vars'
with strategy_fn().scope(), policy.policy_scope(policy_name):
layer = AddLayer(assert_type=dtype)
self.assertEqual(layer.dtype, dtypes.float32)
self.assertEqual(layer._dtype_policy._name, policy_name)
y = layer(x)
self.assertEqual(layer.v.dtype, dtypes.float32)
self.assertEqual(y.dtype, dtype)
self.assertEqual(layer.dtype, dtypes.float32)
self.assertEqual(layer._dtype_policy._name, policy_name)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(y), 2.)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
@testing_utils.enable_v2_dtype_behavior
def test_int32_with_float32_vars(self, strategy_fn):
# The policy int32_with_float32_vars is not useful at all (nor is any other
# non-float policy with float32 variables), but we have it for consistency,
# and so we test it.
class IdentityLayerWithVar(base_layer.Layer):
def build(self, _):
self.v = self.add_weight('v', ())
def call(self, inputs):
# Variables are only casted to other floats, not ints
assert array_ops.identity(self.v).dtype == 'float32'
return array_ops.identity(inputs)
x = constant_op.constant([1])
with strategy_fn().scope(), policy.policy_scope('int32_with_float32_vars'):
layer = IdentityLayerWithVar()
self.assertEqual(layer.dtype, dtypes.float32)
self.assertEqual(layer._dtype_policy._name, 'int32_with_float32_vars')
y = layer(x)
self.assertEqual(layer.v.dtype, dtypes.float32)
self.assertEqual(y.dtype, dtypes.int32)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_layer_with_int_variable(self, strategy_fn):
class LayerWithIntVar(base_layer.Layer):
def build(self, _):
self.v = self.add_weight('v', dtype='int32', trainable=False)
def call(self, inputs):
# Only float variables should be autocasted. This will fail if self.v is
# autocasted to float32
return math_ops.cast(inputs, 'int32') + self.v
x = constant_op.constant([1.])
layer = LayerWithIntVar(dtype=policy.Policy('mixed_float16'))
self.assertEqual(layer(x).dtype, 'int32')
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_layer_with_non_autocast_variable(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope():
with policy.policy_scope('infer_float32_vars'):
layer = AddLayerWithoutAutoCast(assert_type=dtypes.float16)
y = layer(x)
self.assertEqual(layer.v.dtype, dtypes.float32)
self.assertEqual(y.dtype, dtypes.float16)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(y), 2.)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_layer_calling_tf_function(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope():
with policy.policy_scope('infer_float32_vars'):
layer = AddLayerWithFunction(assert_type=dtypes.float16)
y = layer(x)
self.assertEqual(layer.v.dtype, dtypes.float32)
self.assertEqual(y.dtype, dtypes.float16)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(y), 2.)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_layer_regularizer_runs_in_var_dtype(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope():
with policy.policy_scope('infer_float32_vars'):
# Test on AddLayer
layer = AddLayer(
assert_type=dtypes.float16, regularizer=IdentityRegularizer())
layer(x)
(regularizer_loss,) = layer.losses
self.assertEqual(regularizer_loss.dtype, dtypes.float32)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(regularizer_loss), 1.)
# Test on AddLayerWithoutAutoCast
layer = AddLayerWithoutAutoCast(
assert_type=dtypes.float16, regularizer=IdentityRegularizer())
layer(x)
(regularizer_loss,) = layer.losses
self.assertEqual(regularizer_loss.dtype, dtypes.float32)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(regularizer_loss), 1.)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_passing_policy_to_layer(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope():
# Passing a Policy to 'dtype' sets the policy for that layer.
layer = AddLayer(
assert_type=dtypes.float16, dtype=policy.Policy('infer_float32_vars'))
# layer.dtype refers to the variable dtype
self.assertEqual(layer.dtype, dtypes.float32)
layer(x)
self.assertEqual(layer.v.dtype, dtypes.float32)
with policy.policy_scope('infer_float32_vars'):
# Passing a Policy to dtype overrides the global Policy
layer = AddLayer(
assert_type=dtypes.float16, dtype=policy.Policy('infer'))
# layer dtype is not yet known
self.assertEqual(layer.dtype, None)
layer(x)
self.assertEqual(layer.v.dtype, dtypes.float16)
self.assertEqual(layer.dtype, dtypes.float16)
@test_util.run_in_graph_and_eager_modes
def test_error_passing_policy_string_to_layer(self):
with self.assertRaisesRegexp(
TypeError, "Cannot convert value 'float16_with_float32_vars' to a "
"TensorFlow DType"):
# This is not allowed, as otherwise a "float16_with_float32_vars" policy
# could be created without an API call that has the name "experimental" in
# it.
AddLayer(dtype='float16_with_float32_vars')
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_gradient(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope() as strategy:
with policy.policy_scope('infer_float32_vars'):
layer = AddLayer(assert_type=dtypes.float16)
def run_fn():
with backprop.GradientTape() as tape:
y = layer(x)
# Divide by num_replicas_in_sync, as the effective total loss is the
# sum of each of the replica's losses.
y /= strategy.num_replicas_in_sync
# Learning rate is small enough that if applied to a float16 variable,
# the variable will not change. So this tests the learning rate is not
# applied to a float16 value, but instead the float32 variable.
opt = gradient_descent.SGD(2**-14)
grad = tape.gradient(y, layer.v)
return opt.apply_gradients([(grad, layer.v)])
op = strategy.experimental_run(run_fn)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.evaluate(op)
# The gradient with respective to the variable is 1. Since the
# variable is initialized with 1 and the learning rate is 2**-14, the
# new variable value should be: init_val - gradient * learning_rate,
# which is 1 - 1 * 2**-14
self.assertEqual(self.evaluate(layer.v), 1 - 2**-14)
def _test_checkpointing_layer_weights(self, strategy_fn,
mixed_prec_when_saving,
mixed_prec_when_loading):
# In this test, we potentially save with mixed precision enabled and load
# with mixed precision disabled, or vice versa. This is possible because
# variables are float32 regardless of whether mixed precision is enabled.
save_policy = 'infer_float32_vars' if mixed_prec_when_saving else 'infer'
load_policy = 'infer_float32_vars' if mixed_prec_when_loading else 'infer'
save_input_dtype = 'float16' if mixed_prec_when_saving else 'float32'
load_input_dtype = 'float16' if mixed_prec_when_loading else 'float32'
# Create a layer and save a checkpoint.
x = constant_op.constant([1.], dtype=save_input_dtype)
with strategy_fn().scope():
with policy.policy_scope(save_policy):
layer = AddLayer(assert_type=save_input_dtype)
layer(x) # Build layer
layer.set_weights([np.array(100.)])
self.assertEqual(self.evaluate(layer(x)), 101.)
checkpoint = trackable_utils.Checkpoint(layer=layer)
prefix = os.path.join(self.get_temp_dir(), 'ckpt')
save_path = checkpoint.save(prefix)
# Create a new layer and restore the checkpoint.
x = constant_op.constant([1.], dtype=load_input_dtype)
with strategy_fn().scope():
with policy.policy_scope(load_policy):
layer = AddLayer(assert_type=load_input_dtype)
layer(x) # Build layer
layer.set_weights([np.array(200.)])
self.assertEqual(self.evaluate(layer(x)), 201.)
checkpoint = trackable_utils.Checkpoint(layer=layer)
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.assertEqual(layer.get_weights(), [100.])
self.assertEqual(self.evaluate(layer(x)), 101.)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_checkpointing_layer_weights(self, strategy_fn):
self._test_checkpointing_layer_weights(
strategy_fn, mixed_prec_when_saving=True, mixed_prec_when_loading=True)
self._test_checkpointing_layer_weights(
strategy_fn, mixed_prec_when_saving=True, mixed_prec_when_loading=False)
self._test_checkpointing_layer_weights(
strategy_fn, mixed_prec_when_saving=False, mixed_prec_when_loading=True)
@test_util.run_in_graph_and_eager_modes
def test_delete_variable(self):
layer = base_layer.Layer(dtype=policy.Policy('mixed_float16'))
layer.x = layer.add_weight('x')
self.assertEqual(layer.trainable_weights, [layer.x])
del layer.x
self.assertEqual(layer.trainable_weights, [])
class KerasModelTest(keras_parameterized.TestCase):
"""Test mixed precision with Keras models."""
def _is_strategy_supported(self, strategy_fn, check_model_type=False):
if (strategy_fn != default_strategy_fn and
(testing_utils.should_run_eagerly() or
(check_model_type and testing_utils.get_model_type() == 'subclass'))):
# Distribution strategies do not support subclassed models or running with
# `run_eagerly=True`.
return False
else:
return True
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'operator',
'strategy_fn': create_mirrored_strategy,
'use_operator': True
}, {
'testcase_name': 'regularizer',
'strategy_fn': create_mirrored_strategy,
'use_regularizer': True
}, {
'testcase_name': 'infer',
'strategy_fn': create_mirrored_strategy,
'policy_name': 'mixed_float16'
}, {
'testcase_name': 'norun_distributed',
'strategy_fn': create_mirrored_strategy,
'experimental_run_tf_function': False
})
@testing_utils.enable_v2_dtype_behavior
def test_model(self,
strategy_fn,
use_operator=False,
use_regularizer=False,
policy_name='mixed_float16',
experimental_run_tf_function=True):
if not self._is_strategy_supported(strategy_fn, check_model_type=True):
return
regularizer = IdentityRegularizer() if use_regularizer else None
with strategy_fn().scope():
# Pass loss_scale=None, as this test will fail if the DynamicLossScale
# skips applying gradients for a step
with policy.policy_scope(policy.Policy(policy_name, loss_scale=None)):
layer_list = []
if testing_utils.get_model_type() == 'subclass':
# Subclassed models do not have an Input layer, so the model does not
# cast inputs to the Input layer's dtype. Therefore, we need to
# manually insert a float16 cast.
cast_f16_layer = layers.Lambda(
lambda x: math_ops.cast(x, 'float16'), input_shape=(1,))
layer_list.append(cast_f16_layer)
layer = AddLayer(
assert_type=dtypes.float16,
use_operator=use_operator,
regularizer=regularizer,
input_shape=(1,))
cast_f32_layer = layers.Lambda(lambda x: math_ops.cast(x, 'float32'))
layer_list += [layer, cast_f32_layer]
model = testing_utils.get_model_from_layers(
layer_list, input_shape=(1,), input_dtype=dtypes.float16)
def loss_fn(y_true, y_pred):
del y_true
return math_ops.reduce_mean(y_pred)
# Learning rate is small enough that if applied to a float16 variable,
# the variable will not change. So this tests the learning rate not
# applied to a float16 value, but instead the float32 variable.
opt = gradient_descent.SGD(2**-14)
model.compile(
opt,
loss=loss_fn,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones((2, 1))
y = np.ones((2, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(2)
model.fit(dataset)
# Variable starts at 1, and should have gradient of 2 ** -14 subtracted
# from it.
expected = 1 - 2**-14
if use_regularizer:
# Regularizer adds another 2 ** -14 to the gradient.
expected -= 2**-14
self.assertEqual(backend.eval(layer.v), expected)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'norun_distributed',
'strategy_fn': create_mirrored_strategy,
'experimental_run_tf_function': False,
})
def test_fixed_loss_scaling(self,
strategy_fn,
experimental_run_tf_function=True):
# Note: We do not test mixed precision in this method, only loss scaling.
if not self._is_strategy_supported(strategy_fn):
return
loss_scale = 8.
batch_size = 4
with strategy_fn().scope():
x = layers.Input(shape=(1,), batch_size=batch_size)
layer = AddLayer()
y = layer(x)
# The gradient of 'y' at this point is 1. With loss scaling, the gradient
# is 'loss_scale'. We divide by the batch size since the loss is averaged
# across batch elements.
expected_gradient = loss_scale / batch_size
identity_with_grad_check_fn = (
mp_test_util.create_identity_with_grad_check_fn([expected_gradient]))
y = core.Lambda(identity_with_grad_check_fn)(y)
model = models.Model(inputs=x, outputs=y)
def loss_fn(y_true, y_pred):
del y_true
return math_ops.reduce_mean(y_pred)
opt = gradient_descent.SGD(1.)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
model.compile(
opt,
loss=loss_fn,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(backend.eval(layer.v), 1)
x = np.ones((batch_size, 1))
y = np.ones((batch_size, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(batch_size)
model.fit(dataset)
# Variable starts at 1, and should have gradient of 1 subtracted from it.
expected = 0
self.assertEqual(backend.eval(layer.v), expected)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'loss_scaling',
'strategy_fn': create_mirrored_strategy,
'use_loss_scaling': True
})
@testing_utils.enable_v2_dtype_behavior
def test_advanced_model(self, strategy_fn, use_loss_scaling=False):
# The advanced model tests mixed-precision-related features that would occur
# in a resnet50 model. It tests a model that has:
# * Multiple layers, some which use auto-cast variables and some which do
# not
# * Regularization on some variables and not others.
# * A fixed loss scale (if use_loss_scaling is True)
if not self._is_strategy_supported(strategy_fn):
return
strategy = strategy_fn()
if use_loss_scaling:
loss_scale = 8.
else:
loss_scale = None
learning_rate = 2**-14
with strategy.scope():
with policy.policy_scope(policy.Policy('mixed_float16',
loss_scale=loss_scale)):
x = layers.Input(shape=(1,), batch_size=2)
layer1 = AddLayer(
assert_type=dtypes.float16,
regularizer=IdentityRegularizer(),
use_operator=True)
layer2 = AddLayerWithoutAutoCast(
assert_type=dtypes.float16, use_operator=True)
layer3 = AddLayer(assert_type=dtypes.float16, use_operator=False)
layer4 = AddLayerWithoutAutoCast(
assert_type=dtypes.float16,
regularizer=IdentityRegularizer(),
use_operator=False)
y = layer1(x)
y = layer2(y)
y = layer3(y)
y = layer4(y)
if use_loss_scaling:
# The gradient of 'y' at this point is 1. With loss scaling, the
# gradient is 'loss_scale'. We divide by the batch size of 2 since the
# loss is averaged across batch elements.
expected_gradient = loss_scale / 2
identity_with_grad_check_fn = (
mp_test_util.create_identity_with_grad_check_fn(
expected_dtype=dtypes.float16,
expected_gradient=[expected_gradient]))
y = core.Lambda(identity_with_grad_check_fn)(y)
y = math_ops.cast(y, dtypes.float32)
model = models.Model(inputs=x, outputs=y)
def loss_fn(y_true, y_pred):
self.assertEqual(y_true.dtype, dtypes.float32)
self.assertEqual(y_pred.dtype, dtypes.float32)
return math_ops.reduce_mean(y_pred)
opt = gradient_descent.SGD(learning_rate)
model.compile(
opt,
loss=loss_fn,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones((2, 1))
y = np.ones((2, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(2)
model.fit(dataset)
for layer in (layer1, layer2, layer3, layer4):
if layer.losses:
# Layer has weight regularizer
self.assertEqual(backend.eval(layer.v), 1 - 2 * learning_rate)
else:
# Layer does not have weight regularizer
self.assertEqual(backend.eval(layer.v), 1 - learning_rate)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'pass_loss_scale_to_policy',
'strategy_fn': create_mirrored_strategy,
'pass_loss_scale_to_policy': True,
}, {
'testcase_name': 'norun_distributed',
'strategy_fn': create_mirrored_strategy,
'experimental_run_tf_function': False,
})
def test_dynamic_loss_scaling(self,
strategy_fn,
pass_loss_scale_to_policy=False,
experimental_run_tf_function=True):
if not self._is_strategy_supported(strategy_fn):
return
strategy = strategy_fn()
initial_loss_scale = 2.
batch_size = 4
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=initial_loss_scale, increment_period=2)
expected_gradient = backend.variable([initial_loss_scale / batch_size],
dtype=dtypes.float16)
# If this variable is set to True, the model below will have NaN gradients
have_nan_gradients = backend.variable(False, dtype=dtypes.bool)
with strategy.scope():
opt = gradient_descent.SGD(1.)
if pass_loss_scale_to_policy:
p = policy.Policy('infer_float32_vars', loss_scale=loss_scale)
else:
p = policy.Policy('infer_float32_vars')
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
with policy.policy_scope(p):
x = layers.Input(
shape=(1,), batch_size=batch_size, dtype=dtypes.float16)
layer = AddLayer(assert_type=dtypes.float16)
y = layer(x)
identity_with_nan_grads = (
mp_test_util.create_identity_with_nan_gradients_fn(
have_nan_gradients))
y = core.Lambda(identity_with_nan_grads)(y)
identity_with_grad_check_fn = (
mp_test_util.create_identity_with_grad_check_fn(
expected_dtype=dtypes.float16,
expected_gradient=expected_gradient))
y = core.Lambda(identity_with_grad_check_fn)(y)
y = math_ops.cast(y, dtypes.float32)
model = models.Model(inputs=x, outputs=y)
def loss_fn(y_true, y_pred):
del y_true
return math_ops.reduce_mean(y_pred)
model.compile(
opt,
loss=loss_fn,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(backend.eval(layer.v), 1)
x = np.ones((batch_size, 1))
y = np.ones((batch_size, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(batch_size)
model.fit(dataset)
# The variables starts with 1 and has a gradient of 1, so will go down by 1
# each step.
self.assertEqual(backend.eval(layer.v), 0)
model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -1)
# There have been two steps without NaNs, so the loss scale will double
backend.set_value(expected_gradient,
backend.get_value(expected_gradient * 2))
model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -2)
# Next test with NaN gradients.
backend.set_value(have_nan_gradients, True)
model.fit(dataset)
# Variable should not be updated
self.assertEqual(backend.eval(layer.v), -2)
# Test with finite gradients again
backend.set_value(have_nan_gradients, False)
# The loss scale will be halved due to the NaNs, so the gradient will also
# be halved
backend.set_value(expected_gradient,
backend.get_value(expected_gradient / 2))
model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -3)
@test_util.run_in_graph_and_eager_modes
@testing_utils.enable_v2_dtype_behavior
def test_loss_scale_optimizer_overrides_policy_loss_scale(self):
with policy.policy_scope(policy.Policy('float32', loss_scale=10.)):
opt = gradient_descent.SGD(1.)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=5.)
x = layers.Input(shape=(1,))
y = AddLayer()(x)
model = models.Model(x, y)
model.compile(opt, loss='mse')
self.assertEqual(self.evaluate(model.optimizer.loss_scale()), 5.)
@test_util.run_in_graph_and_eager_modes
@testing_utils.enable_v2_dtype_behavior
def test_pass_invalid_optimizer_with_loss_scaling(self):
with policy.policy_scope(policy.Policy('float32', loss_scale=10.)):
x = layers.Input(shape=(1,))
y = AddLayer()(x)
model = models.Model(x, y)
with self.assertRaisesRegexp(ValueError,
'optimizer" must be an instance of '):
model.compile(optimizers.SGD(1.), 'mse')
@test_util.run_in_graph_and_eager_modes
@testing_utils.enable_v2_dtype_behavior
def test_functional_model_loss_dtype(self):
with policy.policy_scope('float16'):
x = layers.Input(shape=(1,))
y = AddLayer()(x)
model = models.Model(x, y)
model.add_loss(math_ops.cast(y, 'float32'))
# The loss should not be casted to the policy's dtype.
self.assertEqual(model.losses[0].dtype, 'float32')
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn,
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'base_h5',
'strategy_fn': default_strategy_fn,
'h5': True,
}, {
'testcase_name': 'distribute_h5',
'strategy_fn': create_mirrored_strategy,
'h5': True,
})
@test_util.run_in_graph_and_eager_modes
def test_save_weights_with_autocast_vars(self, strategy_fn, h5=False):
with strategy_fn().scope():
with policy.policy_scope('infer_float32_vars'):
x = layers.Input(shape=(1,), batch_size=2, dtype=dtypes.float16)
layer = AddLayer(assert_type=dtypes.float16)
y = layer(x)
y = math_ops.cast(y, dtypes.float32)
model = models.Model(inputs=x, outputs=y)
model.set_weights([np.array(100.)])
x = np.ones((2, 1), dtype=np.float16)
self.assertAllClose(backend.get_value(model(x)), x + 100.)
suffix = '.h5' if h5 else ''
weights_file = os.path.join(self.get_temp_dir(), 'weights' + suffix)
model.save_weights(weights_file)
model.set_weights([np.array(200.)])
self.assertAllClose(backend.get_value(model(x)), x + 200.)
model.load_weights(weights_file)
self.assertAllClose(backend.get_value(model(x)), x + 100.)
self.assertEqual(model.get_weights(), [np.array(100.)])
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn,
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'different_var_name',
'strategy_fn': default_strategy_fn,
'var_name': 'w'
}, {
'testcase_name': 'different_var_name_distribute',
'strategy_fn': create_mirrored_strategy,
'var_name': 'w'
})
def test_save_slot_variables_with_autocast_vars(self,
strategy_fn,
var_name='v'):
if not self._is_strategy_supported(strategy_fn):
return
with strategy_fn().scope(), policy.policy_scope('infer_float32_vars'):
x = layers.Input(shape=(2,), batch_size=2, dtype=dtypes.float16)
# Having a var_name other than 'v' tests that a fixed bug (b/134713714)
# does not reoccur. The bug was that a crash would occur when saving a
# checkpoint where an AutoCastVariable with a slot variable would have a
# different name than the layer attribute's name (layer.v in this case).
layer = AddLayer(assert_type=dtypes.float16, var_name=var_name)
y = layer(x)
y = math_ops.cast(y, dtypes.float32)
model = models.Model(inputs=x, outputs=y)
opt = gradient_descent.SGD(1., 1.)
model.compile(
optimizer=opt,
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2)
weights_file = os.path.join(self.get_temp_dir(), 'weights')
model.save_weights(weights_file)
saved_slot = backend.get_value(opt.get_slot(layer.v, 'momentum'))
model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2)
new_slot = backend.get_value(opt.get_slot(layer.v, 'momentum'))
self.assertNotEqual(new_slot, saved_slot)
model.load_weights(weights_file)
restored_slot = backend.get_value(opt.get_slot(layer.v, 'momentum'))
self.assertEqual(restored_slot, saved_slot)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(*TESTCASES)
def test_save_weights_with_dynamic_loss_scaling(self, strategy_fn):
if not self._is_strategy_supported(strategy_fn):
return
strategy = strategy_fn()
if (isinstance(strategy, mirrored_strategy.MirroredStrategy) and
not context.executing_eagerly()):
# TODO(b/121381184): Enable running the test in this case.
return
# Create and run model.
with strategy.scope():
x = layers.Input(shape=(2,), batch_size=2, dtype=dtypes.float32)
y = AddLayer(assert_type=dtypes.float32)(x)
model = models.Model(inputs=x, outputs=y)
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=1., increment_period=2., multiplier=2.)
opt = gradient_descent.SGD(1.)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
model.compile(
optimizer=opt,
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
# Run for 3 steps (6 examples with a batch size of 2)
model.fit(np.zeros((6, 2)), np.zeros((6, 2)), batch_size=2)
self.assertEqual(backend.get_value(loss_scale()), 2)
self.assertEqual(backend.get_value(loss_scale._num_good_steps), 1)
# Save model weights.
save_prefix = os.path.join(self.get_temp_dir(), 'ckpt')
model.save_weights(save_prefix)
# Run model again for 1 step (2 examples with a batch size of 2)
model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2)
self.assertEqual(backend.get_value(loss_scale()), 4)
self.assertEqual(backend.get_value(loss_scale._num_good_steps), 0)
# Load model weights and ensure loss scale weights are restored.
model.load_weights(save_prefix)
self.assertEqual(backend.get_value(loss_scale()), 2)
self.assertEqual(backend.get_value(loss_scale._num_good_steps), 1)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn,
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'base_h5',
'strategy_fn': default_strategy_fn,
'h5': True,
}, {
'testcase_name': 'distribute_h5',
'strategy_fn': create_mirrored_strategy,
'h5': True,
})
def test_save_model_with_dynamic_loss_scaling(self, strategy_fn, h5=False):
if not self._is_strategy_supported(strategy_fn):
return
strategy = strategy_fn()
if (isinstance(strategy, mirrored_strategy.MirroredStrategy) and
not context.executing_eagerly()):
# TODO(b/121381184): Enable running the test in this case.
return
# Create and run model.
with strategy.scope():
x = layers.Input(shape=(2,), batch_size=2, dtype=dtypes.float32)
y = AddLayer()(x)
model = models.Model(inputs=x, outputs=y)
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=1., increment_period=2., multiplier=2.)
opt = gradient_descent.SGD(1.)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
model.compile(
optimizer=opt,
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
# Run for 3 steps (6 examples with a batch size of 2)
model.fit(np.zeros((6, 2)), np.zeros((6, 2)), batch_size=2)
self.assertEqual(backend.get_value(loss_scale()), 2)
self.assertEqual(backend.get_value(loss_scale._num_good_steps), 1)
(weight,) = model.trainable_weights
orig_weight = backend.get_value(weight)
# Save model weights.
save_path = os.path.join(self.get_temp_dir(), 'model')
model.save(save_path, save_format='h5' if h5 else 'tf')
# Run model again for 1 step (2 examples with a batch size of 2)
model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2)
new_weight = backend.get_value(weight)
self.assertNotEqual(new_weight, orig_weight)
self.assertEqual(backend.get_value(loss_scale()), 4)
self.assertEqual(backend.get_value(loss_scale._num_good_steps), 0)
# Load model weights and ensure loss scale weights are restored.
model = saving.load_model(save_path, custom_objects={'AddLayer': AddLayer})
loss_scale = model.optimizer.loss_scale
(weight,) = model.trainable_weights
loaded_weight = backend.get_value(weight)
self.assertEqual(loaded_weight, orig_weight)
# Currently the loss scale isn't always saved when the model is saved with
# Model.save(). So we assert the loss scale either has the value when it was
# saved, or the value it was initialized with.
# TODO(reedwm): Always save/restore the loss scale with Model.save().
self.assertIn(backend.get_value(loss_scale()), (1, 2))
self.assertIn(backend.get_value(loss_scale._num_good_steps), (0, 1))
class RnnTest(keras_parameterized.TestCase):
"""Test mixed precision with RNNs."""
# TODO(b/136512020): Support and test recurrent_v2.GRU.
@parameterized.named_parameters(
{
'testcase_name': 'base_simple',
'strategy_fn': default_strategy_fn,
'rnn_class': recurrent.SimpleRNN,
}, {
'testcase_name': 'distribute_simple',
'strategy_fn': create_mirrored_strategy,
'rnn_class': recurrent.SimpleRNN,
}, {
'testcase_name': 'base_gru',
'strategy_fn': default_strategy_fn,
'rnn_class': recurrent.GRU,
}, {
'testcase_name': 'distribute_gru',
'strategy_fn': create_mirrored_strategy,
'rnn_class': recurrent.GRU,
})
@test_util.run_in_graph_and_eager_modes
# RNNs do not work properly with GradientTape in graph mode when V1 control
# flow is used.
@test_util.enable_control_flow_v2
def test_rnn(self, strategy_fn, rnn_class):
x = array_ops.ones((2, 3, 4), dtype=dtypes.float16)
strategy = strategy_fn()
with strategy.scope(), policy.policy_scope('infer_float32_vars'):
layer = rnn_class(units=4)
def run_fn():
with backprop.GradientTape() as tape:
y = layer(x)
self.assertEqual(y.dtype, dtypes.float16)
opt = gradient_descent.SGD(1.)
grads = tape.gradient(y, layer.trainable_weights)
return opt.apply_gradients(zip(grads, layer.trainable_weights))
op = strategy.experimental_run(run_fn)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.evaluate(op)
for v in layer.weights:
self.assertEqual(v.dtype, dtypes.float32)
if __name__ == '__main__':
test.main()
| apache-2.0 | 3,689,826,003,535,400,000 | 39.629077 | 90 | 0.64957 | false | 3.680456 | true | false | false |
clone1612/appstore | nextcloudappstore/core/api/v1/serializers.py | 1 | 5704 | from nextcloudappstore.core.models import PhpExtensionDependency, \
DatabaseDependency, Category, AppAuthor, AppRelease, Screenshot, \
AppRating, App
from nextcloudappstore.core.validators import HttpsUrlValidator
from parler_rest.fields import TranslatedFieldsField
from parler_rest.serializers import TranslatableModelSerializer
from rest_framework import serializers
from rest_framework.fields import SerializerMethodField, DateTimeField
from django.contrib.auth import get_user_model
class PhpExtensionDependencySerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField(source='php_extension.id')
version_spec = SerializerMethodField()
raw_version_spec = SerializerMethodField()
class Meta:
model = PhpExtensionDependency
fields = ('id', 'version_spec', 'raw_version_spec')
def get_version_spec(self, obj):
return obj.version_spec.replace(',', ' ')
def get_raw_version_spec(self, obj):
return obj.raw_version_spec.replace(',', ' ')
class DatabaseDependencySerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField(source='database.id')
version_spec = SerializerMethodField()
raw_version_spec = SerializerMethodField()
class Meta:
model = DatabaseDependency
fields = ('id', 'version_spec', 'raw_version_spec')
def get_version_spec(self, obj):
return obj.version_spec.replace(',', ' ')
def get_raw_version_spec(self, obj):
return obj.raw_version_spec.replace(',', ' ')
class CategorySerializer(TranslatableModelSerializer):
translations = TranslatedFieldsField(shared_model=Category)
class Meta:
model = Category
fields = ('id', 'translations')
class AuthorSerializer(serializers.ModelSerializer):
class Meta:
model = AppAuthor
fields = ('name', 'mail', 'homepage')
class AppReleaseSerializer(serializers.ModelSerializer):
databases = DatabaseDependencySerializer(many=True, read_only=True,
source='databasedependencies')
php_extensions = \
PhpExtensionDependencySerializer(many=True, read_only=True,
source='phpextensiondependencies')
php_version_spec = SerializerMethodField()
platform_version_spec = SerializerMethodField()
raw_php_version_spec = SerializerMethodField()
raw_platform_version_spec = SerializerMethodField()
translations = TranslatedFieldsField(shared_model=AppRelease)
class Meta:
model = AppRelease
fields = (
'version', 'php_extensions', 'databases', 'shell_commands',
'php_version_spec', 'platform_version_spec', 'min_int_size',
'download', 'created', 'licenses', 'last_modified', 'is_nightly',
'raw_php_version_spec', 'raw_platform_version_spec', 'signature',
'translations',
)
def get_platform_version_spec(self, obj):
return obj.platform_version_spec.replace(',', ' ')
def get_php_version_spec(self, obj):
return obj.php_version_spec.replace(',', ' ')
def get_raw_platform_version_spec(self, obj):
return obj.raw_platform_version_spec.replace(',', ' ')
def get_raw_php_version_spec(self, obj):
return obj.raw_php_version_spec.replace(',', ' ')
class ScreenshotSerializer(serializers.ModelSerializer):
class Meta:
model = Screenshot
fields = ('url', 'small_thumbnail')
class AppSerializer(serializers.ModelSerializer):
releases = SerializerMethodField()
screenshots = ScreenshotSerializer(many=True, read_only=True)
authors = AuthorSerializer(many=True, read_only=True)
translations = TranslatedFieldsField(shared_model=App)
last_modified = DateTimeField(source='last_release')
def __init__(self, *args, **kwargs):
self.version = kwargs.pop('version')
super().__init__(*args, **kwargs)
class Meta:
model = App
fields = (
'id', 'categories', 'user_docs', 'admin_docs', 'developer_docs',
'issue_tracker', 'website', 'created', 'last_modified', 'releases',
'screenshots', 'translations', 'is_featured', 'authors',
'rating_recent', 'rating_overall', 'rating_num_recent',
'rating_num_overall', 'certificate',
)
def get_releases(self, obj):
releases = obj.releases.prefetch_related(
'translations',
'databases',
'licenses',
'phpextensiondependencies__php_extension',
'databasedependencies__database',
'shell_commands',
).all()
if self.version:
data = [r for r in releases if r.is_compatible(self.version)]
else:
data = releases
return AppReleaseSerializer(data, many=True, read_only=True).data
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ('id', 'first_name', 'last_name')
class AppRatingSerializer(serializers.ModelSerializer):
user = UserSerializer(many=False, read_only=True)
translations = TranslatedFieldsField(shared_model=AppRating)
class Meta:
model = AppRating
fields = ('rating', 'rated_at', 'translations', 'user', 'app')
class AppReleaseDownloadSerializer(serializers.Serializer):
download = serializers.URLField(validators=[HttpsUrlValidator()])
signature = serializers.CharField()
nightly = serializers.BooleanField(required=False, default=False)
class AppRegisterSerializer(serializers.Serializer):
certificate = serializers.CharField()
signature = serializers.CharField()
| agpl-3.0 | 219,983,444,380,250,140 | 34.874214 | 79 | 0.66655 | false | 4.244048 | false | false | false |
SgtHotshot/forge-cortex | cortex/core/views.py | 1 | 1448 | import django.shortcuts
import django.conf
import django.contrib.auth
import django.contrib.auth.forms
import django.core.urlresolvers
import django.views.generic
# pylint: disable=too-few-public-methods, too-many-ancestors, unused-argument
class RootView(django.views.generic.TemplateView):
template_name = 'root.html'
class LoginView(django.views.generic.FormView):
form_class = django.contrib.auth.forms.AuthenticationForm
redirect_arg = 'next'
template_name = 'login.html'
def get(self, *args, **kwargs):
if self.request.user.is_authenticated():
return django.shortcuts.redirect(self.get_success_url())
return super(LoginView, self).get(*args, **kwargs)
def get_success_url(self):
request_params = getattr(self.request, self.request.method)
if self.redirect_arg in request_params:
return request_params[self.redirect_arg]
elif self.redirect_arg in self.kwargs:
return self.kwargs[self.redirect_arg]
else:
return django.conf.settings.LOGIN_REDIRECT_URL
def form_valid(self, form):
django.contrib.auth.login(self.request, form.get_user())
return super(LoginView, self).form_valid(form)
class LogoutView(django.views.generic.View):
redirect_url = django.core.urlresolvers.reverse_lazy('root')
def get(self, *args, **kwargs):
return self.post(*args, **kwargs)
def post(self, *args, **kwargs):
django.contrib.auth.logout(self.request)
return django.shortcuts.redirect(self.redirect_url)
| mit | 7,560,867,296,937,909,000 | 28.55102 | 77 | 0.755525 | false | 3.217778 | false | false | false |
gbanegas/Red_with_mult | main_thread.py | 1 | 1607 | '''
Created on 10 Sep 2014
@author: gustavo
'''
from reduction import Reduction
from polynomial import Polynomial
from threadc import ThreadCount
import os
import threading
import threading
def recoverfile(saved, readed):
if not os.path.exists(saved):
return True, []
f = open(saved,'r')
if(not os.stat(saved).st_size==0):
pols = []
pols_done = []
for line in readed:
pol = Polynomial(line)
pols.append(pol)
for line in f:
line = line.replace("[","")
line = line.replace("]","")
spl = line.split(',')
p = ""
for i in xrange(0,len(spl)-1):
p = p + " + x^" + str(spl[i].replace(" ",""))
p = p + " + 1"
p = p.replace("+","",1)
#print p
pol_ = Polynomial(p)
pols_done.append(pol_)
pols_set = set(pols)
pols_set_done = set(pols_done)
result = pols_set - pols_set_done
return False, list(result)
else:
return True, []
if __name__ == '__main__':
lock = threading.Lock()
lockScreen = threading.Lock()
files = ["pol_163_.txt"]
#degrees = [21, 97, 139, 163, 233, 283, 571, 1021, 1163]
#degree = 571
for fileName in files:
save = 'result_pol_'+fileName
f = open(fileName,'r')
read, pols = recoverfile(save, f)
if read:
for line in f:
pol = Polynomial(line)
pols.append(pol)
print len(pols)
threads = []
i = 0
j = 30
print "starting...."
for temp in range(0, len(pols)):
if (j > len(pols)):
j = len(pols)
thread = ThreadCount(temp,lockScreen, lock, pols[i:j], save)
i = j+1
j += 1
threads.append(thread)
for thread in threads:
thread.start()
for current in threads:
current.join()
| apache-2.0 | 2,271,769,503,056,894,200 | 20.144737 | 63 | 0.605476 | false | 2.630115 | false | false | false |
opencog/ros-behavior-scripting | sensors/face_track.py | 1 | 6042 | #
# face_track.py - Registery and tracking of visible human faces
# Copyright (C) 2014,2015,2016 Hanson Robotics
# Copyright (C) 2015,2016 Linas Vepstas
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import rospy
import logging
from std_msgs.msg import Int32
# FIXME: In developer role of hrtool hr_msgs and pi_face_tracker conflict, why?
# from hr_msgs.msg import FaceEvent, Faces
from pi_face_tracker.msg import FaceEvent, Faces
from atomic_msgs import AtomicMsgs
logger = logging.getLogger('hr.eva_behavior.face_track')
# Thin python wrapper, to subscribe to face-tracking ROS messages,
# (face ID's, 3D face locations) and then re-wrap these as OpenCog
# atoms, via AtomicMsgs, and forward them on into the OpenCog
# space-time server.
#
class FaceTrack:
# Control flags. Ideally, FaceTrack should publish targets using
# ros_commo EvaControl class.
C_EYES = 16
C_FACE = 32
# Face tracking will be disabled if neither of these flags are set.
# (this allows for a manual over-ride of face-tracking by other
# control processes.)
C_FACE_TRACKING = C_FACE | C_EYES
def __init__(self):
# The OpenCog API. This is used to send face data to OpenCog.
self.atomo = AtomicMsgs()
self.atomo.create_face_octomap()
# List of currently visible faces
self.visible_faces = []
# Subscribed pi_vision topics and events
self.TOPIC_FACE_EVENT = "/camera/face_event"
self.EVENT_NEW_FACE = "new_face"
self.EVENT_LOST_FACE = "lost_face"
self.EVENT_RECOGNIZED_FACE = "recognized_face"
# Overrides current face being tracked by WebUI
self.EVENT_TRACK_FACE = "track_face"
self.TOPIC_FACE_LOCATIONS = "/camera/face_locations"
# Face appearance/disappearance from pi_vision
rospy.Subscriber(self.TOPIC_FACE_EVENT, FaceEvent, self.face_event_cb)
# Face location information from pi_vision
rospy.Subscriber(self.TOPIC_FACE_LOCATIONS, Faces, self.face_loc_cb)
rospy.Subscriber("/behavior_control", Int32, self.behavior_control_cb)
# Control Eyes and face by default
self.control_mode = 255
# ----------------------------------------------------------
# Start tracking a face
def add_face(self, faceid):
if faceid in self.visible_faces:
return
self.visible_faces.append(faceid)
logger.info("New face added to visibile faces: " +
str(self.visible_faces))
self.atomo.add_face_to_atomspace(faceid)
# Stop tracking a face
def remove_face(self, faceid):
self.atomo.remove_face_from_atomspace(faceid)
if faceid in self.visible_faces:
self.visible_faces.remove(faceid)
logger.info("Lost face; visibile faces now: " + str(self.visible_faces))
# Force the robot to turn its attention to the given
# face (to interact with, talk with) that face.
def track_face(self, faceid):
if faceid in self.visible_faces:
logger.info("Face requested interaction: " + str(faceid))
self.atomo.add_tracked_face_to_atomspace(faceid)
# ----------------------------------------------------------
# pi_vision ROS callbacks
# pi_vision ROS callback, called when a new face is detected,
# or a face is lost. Also called for recognized faces.
#
# This callback handles recognized faces using a special message
# format, published on the `/camera/face_locations`. Note that
# there is also a different topic for recognized faces, called
# `/camera/face_recognition`. See the `face-recog.py` file for
# details. I am not sure what subsystem published which message
# type. XXX FIXME - figure out why there are two different
# face recognition subsystems, and standardize one which we
# should use.
def face_event_cb(self, data):
if not self.control_mode & self.C_FACE_TRACKING:
return
if data.face_event == self.EVENT_NEW_FACE:
self.add_face(data.face_id)
elif data.face_event == self.EVENT_LOST_FACE:
self.remove_face(data.face_id)
elif data.face_event == self.EVENT_TRACK_FACE:
self.track_face(data.face_id)
elif data.face_event == self.EVENT_RECOGNIZED_FACE:
self.atomo.face_recognition(data.face_id, data.recognized_id)
# pi_vision ROS callback, called when pi_vision has new face
# location data for us. This happens frequently (about 10x/second)
def face_loc_cb(self, data):
if not self.control_mode & self.C_FACE_TRACKING:
return
for face in data.faces:
# Update location of a face. The location is stored in the
# OpenCog space server (octomap).
if face.id in self.visible_faces:
self.atomo.update_face_octomap(face.id,
face.point.x, face.point.y, face.point.z)
# Enable/disable Opencog face-tracking. This is driven by the
# master control GUI. XXX FIXME -- why should this ever be disabled?
# OpenCog should always know about faces; perhaps it is congtrol of
# head and eye movements that should be disabled?
def behavior_control_cb(self, data):
# Is facetracking currently enabled?
facetracking = self.control_mode & self.C_FACE_TRACKING
self.control_mode = data.data
print("New Control mode %i" % self.control_mode )
# If face-tracking was enabled, and is now disabled ...
if facetracking > 0 and self.control_mode & self.C_FACE_TRACKING == 0:
self.atomo.update_ft_state_to_atomspace(False)
# Need to clear faces:
for face in self.visible_faces[:]:
self.remove_face(face)
elif self.control_mode & self.C_FACE_TRACKING > 0:
self.atomo.update_ft_state_to_atomspace(True)
| agpl-3.0 | -381,146,016,300,767,550 | 34.751479 | 80 | 0.719298 | false | 3.260658 | false | false | false |
abinit/abinit | tests/pymods/yaml_tools/structures/commons.py | 1 | 2155 | """
Define basic structures.
"""
from __future__ import print_function, division, unicode_literals
from ..register_tag import yaml_auto_map, yaml_implicit_scalar
@yaml_auto_map
class GenericMap(object):
"""A generic tag definition for test and example."""
@yaml_implicit_scalar
class YAMLComplex(complex):
# > [1] <
yaml_pattern = (r'[+-]?(\d+(\.\d*)?|\.\d+)([eEdD][+-]?\d+)?'
r' *[+-] *[+-]?(\d+(\.\d*)?|\.\d+)([eEdD][+-]?\d+)?i')
# > [2] <> [3] <
# [1] and [3] float with optional sign and exponential notation, will
# also match integers and .1 like (fortran does not produce this though)
# [2] + or - with optional blanks around
@staticmethod
def __new__(*args, **kwargs):
return complex.__new__(*args, **kwargs)
@classmethod
def from_scalar(cls, scal):
return cls(scal
# python always uses double and only recognise E and e
.replace('d', 'e')
.replace('D', 'e')
# python uses j instead of i (as in electro magnetism)
.replace('i', 'j')
# spaces have to be stripped out around the central + or -
.replace(' ', '')
# python expects only one + or - in string form
.replace('+-', '-')
.replace('-+', '-'))
def to_scalar(self):
return repr(self)[1:-1] # remove parentheses
class AbinitMessage(object):
_is_abinit_message = True
@yaml_auto_map
class AbinitError(AbinitMessage):
"""Base class for Abinit messages."""
__yaml_tag = 'ERROR'
@yaml_auto_map
class AbinitWarning(AbinitMessage):
__yaml_tag = 'WARNING'
# MG: Is this uses somewhere?
@yaml_auto_map
class AbinitInfo(AbinitMessage):
__yaml_tag = 'INFO'
@yaml_auto_map
class AbinitComment(AbinitMessage):
__yaml_tag = 'COMMENT'
@yaml_auto_map
class DatasetInfo(object):
__yaml_tag = 'DatasetInfo'
@yaml_auto_map
class BeginCycle(object):
__yaml_tag = 'BeginCycle'
| gpl-3.0 | -7,643,572,895,698,716,000 | 26.628205 | 77 | 0.538747 | false | 3.696398 | false | false | false |
cloudtrends/env_sh_py_scripts | comm_funcs.py | 1 | 4217 | # -*- coding: utf-8 –*-
import sys
reload(sys)
import os
sys.path.append(os.getcwd())
#sys.setdefaultencoding('utf8')
### ### ### ### ### ### ### ### ### ### ### ###
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
#following from Python cookbook, #475186
def has_colours(stream):
if not hasattr(stream, "isatty"):
return False
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
# guess false in case of error
return False
has_colours = has_colours(sys.stdout)
def print_color(text, colour=WHITE):
if has_colours:
seq = "\x1b[1;%dm" % (30+colour) + text + "\x1b[0m"
sys.stdout.write(seq)
else:
sys.stdout.write(text)
def print_ok( text ):
print_color(text + "\n", GREEN)
def print_error(text):
print_color(text + "\n", RED)
def helloworld():
print "helloworld"
def listdir_fullpath(d):
if not os.path.exists( d ):
print_error("listdir_full path: ERROR dir not exit :" + d )
sys.exit(1)
return [os.path.join(d, f) for f in os.listdir(d)]
def add_to_exist_file( file_name , line ):
with open( file_name , "r+U") as f:
try:
f.seek(-1, 2)
while f.read(1) == "\n":
f.seek(-2, 1) # go back two characters, since
# reading advances by one character
except IOError: # seek failed, so the file consists
f.seek(0) # exclusively of newline characters
#else:
# f.write("\n") # Add exactly one newline character
if not line.endswith("\n"):
line = line + "\n"
f.write( line ) # Add a new line
def get_file_content_as_list( file_name ):
f = open( file_name )
lines = f.readlines()
f.close()
return lines
def get_gopath():
return os.environ['GOPATH']
def get_projects_str_by_app_type(app_type=""):
#if "gobbs" == app_type:
# return get_projects_str()
gopath = get_gopath()
if 0 == len(gopath):
print_error( "ERROR GOPATH not set." )
sys.exit(1)
proj_file = gopath + "/" + app_type + "_projects.txt"
if not os.path.exists( proj_file ):
print_error("ERROR proj_file not exit :" + proj_file )
sys.exit(1)
contents = get_file_content_as_list(proj_file)
cs = ""
for one in contents:
cs = cs + " " + one
print cs
return cs
def get_projects_str():
gopath = get_gopath()
if 0 == len(gopath):
print_error( "ERROR GOPATH not set." )
sys.exit(1)
proj_file = gopath + "/gobbs_projects.txt"
if not os.path.exists( proj_file ):
print_error("ERROR proj_file not exit :" + proj_file )
sys.exit(1)
contents = get_file_content_as_list(proj_file)
cs = ""
for one in contents:
cs = cs + " " + one
return cs
def check_app_type_and_instance_name( app_type , instance_name ):
print "app type:", app_type
if app_type not in get_projects_str_by_app_type(app_type):
print_error("ERROR\n")
print_error( "not find app type:"+ app_type )
return False
print "instance name:", instance_name
# how to check instance name?
mydir = os.getcwd()
src_dir = mydir + "/src"
app_dir = src_dir + "/" + app_type
app_views_dir = src_dir + "/" + app_type +"_views"
files = listdir_fullpath( app_views_dir )
all_instance_name = ""
find_instance = False
for one in files:
if not os.path.isdir( one ):
continue
one = one.replace( app_views_dir , "" )[1:]
if one == instance_name:
find_instance = True
all_instance_name = all_instance_name + " " + one
if not find_instance :
print_error( "instance :" + instance_name + " not in instance_name list :" )
print_error( " check your app_views_dir: " + app_views_dir )
print all_instance_name
return False
return True
if __name__ == "__main__":
print os.environ['HOME']
print os.environ['GOPATH']
| gpl-2.0 | 7,682,343,216,183,510,000 | 28.068966 | 89 | 0.559668 | false | 3.326756 | false | false | false |
potatolondon/djangoappengine-1-4 | utils.py | 1 | 2473 | import os
from google.appengine.api import apiproxy_stub_map
from google.appengine.api.app_identity import get_application_id
from google.appengine.api.datastore import Entity, Put
have_appserver = bool(apiproxy_stub_map.apiproxy.GetStub('datastore_v3'))
if not have_appserver:
from .boot import PROJECT_DIR
from google.appengine.tools import old_dev_appserver as dev_appserver
appconfig = dev_appserver.LoadAppConfig(PROJECT_DIR, {},
default_partition='dev')[0]
def appid():
if have_appserver:
return get_application_id()
else:
try:
return appconfig.application.split('~', 1)[-1]
except ImportError, e:
raise Exception("Could not get appid. Is your app.yaml file missing? "
"Error was: %s" % e)
on_production_server = 'SERVER_SOFTWARE' in os.environ and not os.environ['SERVER_SOFTWARE'].startswith("Development")
def bulk_create(instances, connection=None):
"""
Uses AppEngine's bulk Put() call on a number of instances
this will NOT call save() but it will return the instances
with their primary_key populated (unlike Django's bulk_create)
"""
if connection is None:
from django.db import connection
from .fields import AncestorKey
def prepare_entity(instance):
if isinstance(instance.pk, AncestorKey):
parent = instance.pk._parent_key
else:
parent = None
result = Entity(instance._meta.db_table, parent=parent)
for field in instance._meta.fields:
if field.name == "id":
continue
value = field.pre_save(instance, True)
setattr(instance, field.name, value)
value = field.get_db_prep_save(getattr(instance, field.attname), connection)
if isinstance(value, (list, set)):
value = list(value)
if not value:
value = None
result[field.column] = value
return result
entities = [ prepare_entity(x) for x in instances ]
keys = Put(entities)
assert(len(keys) == len(entities))
for i, key in enumerate(keys):
assert(key)
if key.parent():
instances[i]._parent_key = key.parent()
instances[i].pk.key_id = key.id_or_name()
else:
instances[i].id = key.id_or_name()
return instances
| bsd-3-clause | 3,023,978,416,118,690,000 | 31.116883 | 118 | 0.606146 | false | 4.142379 | false | false | false |
ray-project/ray | python/ray/tune/trial.py | 1 | 25468 | from typing import Callable, Dict, Sequence, Union
import json
import ray
import ray.cloudpickle as cloudpickle
from collections import deque
import copy
import logging
import platform
import shutil
import uuid
import time
import os
from numbers import Number
from ray.tune import TuneError
from ray.tune.checkpoint_manager import Checkpoint, CheckpointManager
# NOTE(rkn): We import ray.tune.registry here instead of importing the names we
# need because there are cyclic imports that may cause specific names to not
# have been defined yet. See https://github.com/ray-project/ray/issues/1716.
from ray.tune.registry import get_trainable_cls, validate_trainable
from ray.tune.result import DEFAULT_RESULTS_DIR, DONE, TRAINING_ITERATION
from ray.tune.resources import Resources, \
json_to_resources, resources_to_json
from ray.tune.utils.placement_groups import PlacementGroupFactory, \
resource_dict_to_pg_factory
from ray.tune.utils.serialization import TuneFunctionEncoder
from ray.tune.utils.trainable import TrainableUtil
from ray.tune.utils import date_str, flatten_dict
from ray.util import log_once
from ray._private.utils import binary_to_hex, hex_to_binary
DEBUG_PRINT_INTERVAL = 5
logger = logging.getLogger(__name__)
class Location:
"""Describes the location at which Trial is placed to run."""
def __init__(self, hostname=None, pid=None):
self.hostname = hostname
self.pid = pid
def __str__(self):
if not self.pid:
return ""
elif self.hostname == platform.node():
return "pid={}".format(self.pid)
else:
return "{}:{}".format(self.hostname, self.pid)
class ExportFormat:
"""Describes the format to import/export the trial Trainable.
This may correspond to different file formats based on the
Trainable implementation.
"""
CHECKPOINT = "checkpoint"
MODEL = "model"
H5 = "h5"
@staticmethod
def validate(formats):
"""Validates formats.
Raises:
ValueError if the format is unknown.
"""
for i in range(len(formats)):
formats[i] = formats[i].strip().lower()
if formats[i] not in [
ExportFormat.CHECKPOINT, ExportFormat.MODEL,
ExportFormat.H5
]:
raise TuneError("Unsupported import/export format: " +
formats[i])
def checkpoint_deleter(trial_id, runner):
"""Returns a checkpoint deleter callback for a runner."""
if not runner:
return lambda checkpoint: None
def delete(checkpoint):
"""Requests checkpoint deletion asynchronously.
Args:
checkpoint (Checkpoint): Checkpoint to delete.
"""
if checkpoint.storage == Checkpoint.PERSISTENT and checkpoint.value:
logger.debug("Trial %s: Deleting checkpoint %s", trial_id,
checkpoint.value)
checkpoint_path = checkpoint.value
# Delete local copy, if any exists.
if os.path.exists(checkpoint_path):
try:
checkpoint_dir = TrainableUtil.find_checkpoint_dir(
checkpoint_path)
shutil.rmtree(checkpoint_dir)
except FileNotFoundError:
logger.warning("Checkpoint dir not found during deletion.")
# TODO(ujvl): Batch remote deletes.
runner.delete_checkpoint.remote(checkpoint.value)
return delete
class TrialInfo:
"""Serializable struct for holding information for a Trial.
Attributes:
trial_name (str): String name of the current trial.
trial_id (str): trial_id of the trial
"""
def __init__(self, trial):
self._trial_name = str(trial)
self._trial_id = trial.trial_id
@property
def trial_name(self):
return self._trial_name
@property
def trial_id(self):
return self._trial_id
def create_logdir(dirname, local_dir):
local_dir = os.path.expanduser(local_dir)
logdir = os.path.join(local_dir, dirname)
if os.path.exists(logdir):
old_dirname = dirname
dirname += "_" + uuid.uuid4().hex[:4]
logger.info(f"Creating a new dirname {dirname} because "
f"trial dirname '{old_dirname}' already exists.")
logdir = os.path.join(local_dir, dirname)
os.makedirs(logdir, exist_ok=True)
return logdir
class Trial:
"""A trial object holds the state for one model training run.
Trials are themselves managed by the TrialRunner class, which implements
the event loop for submitting trial runs to a Ray cluster.
Trials start in the PENDING state, and transition to RUNNING once started.
On error it transitions to ERROR, otherwise TERMINATED on success.
Attributes:
trainable_name (str): Name of the trainable object to be executed.
config (dict): Provided configuration dictionary with evaluated params.
trial_id (str): Unique identifier for the trial.
local_dir (str): Local_dir as passed to tune.run.
logdir (str): Directory where the trial logs are saved.
evaluated_params (dict): Evaluated parameters by search algorithm,
experiment_tag (str): Identifying trial name to show in the console.
resources (Resources): Amount of resources that this trial will use.
status (str): One of PENDING, RUNNING, PAUSED, TERMINATED, ERROR/
error_file (str): Path to the errors that this trial has raised.
"""
_nonjson_fields = [
"results",
"best_result",
"param_config",
"extra_arg",
]
PENDING = "PENDING"
RUNNING = "RUNNING"
PAUSED = "PAUSED"
TERMINATED = "TERMINATED"
ERROR = "ERROR"
def __init__(self,
trainable_name,
config=None,
trial_id=None,
local_dir=DEFAULT_RESULTS_DIR,
evaluated_params=None,
experiment_tag="",
resources=None,
placement_group_factory=None,
stopping_criterion=None,
remote_checkpoint_dir=None,
sync_to_cloud=None,
checkpoint_freq=0,
checkpoint_at_end=False,
sync_on_checkpoint=True,
keep_checkpoints_num=None,
checkpoint_score_attr=TRAINING_ITERATION,
export_formats=None,
restore_path=None,
trial_name_creator=None,
trial_dirname_creator=None,
log_to_file=None,
max_failures=0):
"""Initialize a new trial.
The args here take the same meaning as the command line flags defined
in ray.tune.config_parser.
"""
validate_trainable(trainable_name)
# Trial config
self.trainable_name = trainable_name
self.trial_id = Trial.generate_id() if trial_id is None else trial_id
self.config = config or {}
self.local_dir = local_dir # This remains unexpanded for syncing.
#: Parameters that Tune varies across searches.
self.evaluated_params = evaluated_params or {}
self.experiment_tag = experiment_tag
trainable_cls = self.get_trainable_cls()
if trainable_cls:
default_resources = trainable_cls.default_resource_request(
self.config)
# If Trainable returns resources, do not allow manual override via
# `resources_per_trial` by the user.
if default_resources:
if resources or placement_group_factory:
raise ValueError(
"Resources for {} have been automatically set to {} "
"by its `default_resource_request()` method. Please "
"clear the `resources_per_trial` option.".format(
trainable_cls, default_resources))
# New way: Trainable returns a PlacementGroupFactory object.
if isinstance(default_resources, PlacementGroupFactory):
placement_group_factory = default_resources
resources = None
# Set placement group factory to None for backwards
# compatibility.
else:
placement_group_factory = None
resources = default_resources
self.location = Location()
self.resources = resources or Resources(cpu=1, gpu=0)
self.placement_group_factory = placement_group_factory
self._setup_resources()
self.stopping_criterion = stopping_criterion or {}
self.log_to_file = log_to_file
# Make sure `stdout_file, stderr_file = Trial.log_to_file` works
if not self.log_to_file or not isinstance(self.log_to_file, Sequence) \
or not len(self.log_to_file) == 2:
self.log_to_file = (None, None)
self.max_failures = max_failures
# Local trial state that is updated during the run
self.last_result = {}
self.last_update_time = -float("inf")
# stores in memory max/min/avg/last-n-avg/last result for each
# metric by trial
self.metric_analysis = {}
# keep a moving average over these last n steps
self.n_steps = [5, 10]
self.metric_n_steps = {}
self.export_formats = export_formats
self.status = Trial.PENDING
self.start_time = None
self.logdir = None
self.runner = None
self.last_debug = 0
self.error_file = None
self.error_msg = None
self.trial_name_creator = trial_name_creator
self.custom_trial_name = None
self.custom_dirname = None
# Checkpointing fields
self.saving_to = None
if remote_checkpoint_dir:
self.remote_checkpoint_dir_prefix = remote_checkpoint_dir
else:
self.remote_checkpoint_dir_prefix = None
self.sync_to_cloud = sync_to_cloud
self.checkpoint_freq = checkpoint_freq
self.checkpoint_at_end = checkpoint_at_end
self.keep_checkpoints_num = keep_checkpoints_num
self.checkpoint_score_attr = checkpoint_score_attr
self.sync_on_checkpoint = sync_on_checkpoint
self.checkpoint_manager = CheckpointManager(
keep_checkpoints_num, checkpoint_score_attr,
checkpoint_deleter(self._trainable_name(), self.runner))
# Restoration fields
self.restore_path = restore_path
self.restoring_from = None
self.num_failures = 0
# AutoML fields
self.results = None
self.best_result = None
self.param_config = None
self.extra_arg = None
if trial_name_creator:
self.custom_trial_name = trial_name_creator(self)
if trial_dirname_creator:
self.custom_dirname = trial_dirname_creator(self)
if os.path.sep in self.custom_dirname:
raise ValueError("Trial dirname must not contain '/'. "
"Got {self.custom_dirname}")
self._state_json = None
self._state_valid = False
def _setup_resources(self, log_always: bool = False):
"""Set up resource and placement group requirements.
This will try to convert the resource request in ``self.resources``
to a placement group factory object. If this is unsuccessful,
placement groups will not be used.
Args:
log_always (bool): If True, this will always log a warning if
conversion from a resource dict to a placement group
definition was unsuccessful (e.g. when passing ``extra_``
requests).
"""
if not self.placement_group_factory and \
not int(os.getenv("TUNE_PLACEMENT_GROUP_AUTO_DISABLED", "0")):
try:
self.placement_group_factory = resource_dict_to_pg_factory(
self.resources)
except ValueError as exc:
if log_always or log_once("tune_pg_extra_resources"):
logger.warning(exc)
self.placement_group_factory = None
# Set placement group factory flag to True in Resources object.
if self.placement_group_factory:
resource_kwargs = self.resources._asdict()
resource_kwargs["has_placement_group"] = True
self.resources = Resources(**resource_kwargs)
@property
def node_ip(self):
return self.location.hostname
@property
def checkpoint(self):
"""Returns the most recent checkpoint.
If the trial is in ERROR state, the most recent PERSISTENT checkpoint
is returned.
"""
if self.status == Trial.ERROR:
checkpoint = self.checkpoint_manager.newest_persistent_checkpoint
else:
checkpoint = self.checkpoint_manager.newest_checkpoint
if checkpoint.value is None:
checkpoint = Checkpoint(Checkpoint.PERSISTENT, self.restore_path)
return checkpoint
@classmethod
def generate_id(cls):
return str(uuid.uuid1().hex)[:8]
@property
def remote_checkpoint_dir(self):
assert self.logdir, "Trial {}: logdir not initialized.".format(self)
if not self.remote_checkpoint_dir_prefix:
return None
logdir_name = os.path.basename(self.logdir)
return os.path.join(self.remote_checkpoint_dir_prefix, logdir_name)
@property
def uses_placement_groups(self):
return bool(self.placement_group_factory)
def reset(self):
return Trial(
self.trainable_name,
config=self.config,
trial_id=None,
local_dir=self.local_dir,
evaluated_params=self.evaluated_params,
experiment_tag=self.experiment_tag,
resources=self.resources,
placement_group_factory=self.placement_group_factory,
stopping_criterion=self.stopping_criterion,
remote_checkpoint_dir=self.remote_checkpoint_dir,
checkpoint_freq=self.checkpoint_freq,
checkpoint_at_end=self.checkpoint_at_end,
sync_on_checkpoint=self.sync_on_checkpoint,
keep_checkpoints_num=self.keep_checkpoints_num,
checkpoint_score_attr=self.checkpoint_score_attr,
export_formats=self.export_formats,
restore_path=self.restore_path,
trial_name_creator=self.trial_name_creator,
log_to_file=self.log_to_file,
max_failures=self.max_failures,
)
def init_logdir(self):
"""Init logdir."""
if not self.logdir:
self.logdir = create_logdir(self._generate_dirname(),
self.local_dir)
else:
os.makedirs(self.logdir, exist_ok=True)
self.invalidate_json_state()
def update_resources(
self, resources: Union[Dict, Callable, PlacementGroupFactory]):
"""EXPERIMENTAL: Updates the resource requirements.
Should only be called when the trial is not running.
Raises:
ValueError if trial status is running.
"""
if self.status is Trial.RUNNING:
raise ValueError("Cannot update resources while Trial is running.")
if isinstance(resources, PlacementGroupFactory):
self.placement_group_factory = resources
else:
self.resources = Resources(**resources)
self._setup_resources()
self.invalidate_json_state()
def set_runner(self, runner):
self.runner = runner
self.checkpoint_manager.delete = checkpoint_deleter(
self._trainable_name(), runner)
# No need to invalidate state cache: runner is not stored in json
# self.invalidate_json_state()
def set_location(self, location):
"""Sets the location of the trial."""
self.location = location
# No need to invalidate state cache: location is not stored in json
# self.invalidate_json_state()
def set_status(self, status):
"""Sets the status of the trial."""
self.status = status
if status == Trial.RUNNING:
if self.start_time is None:
self.start_time = time.time()
self.invalidate_json_state()
def set_config(self, config):
self.config = config
self.invalidate_json_state()
def set_experiment_tag(self, experiment_tag):
self.experiment_tag = experiment_tag
self.invalidate_json_state()
def write_error_log(self, error_msg):
if error_msg and self.logdir:
self.num_failures += 1
self.error_file = os.path.join(self.logdir, "error.txt")
with open(self.error_file, "a+") as f:
f.write("Failure # {} (occurred at {})\n".format(
self.num_failures, date_str()))
f.write(error_msg + "\n")
self.error_msg = error_msg
self.invalidate_json_state()
def should_stop(self, result):
"""Whether the given result meets this trial's stopping criteria."""
if result.get(DONE):
return True
for criteria, stop_value in self.stopping_criterion.items():
if criteria not in result:
raise TuneError(
"Stopping criteria {} not provided in result {}.".format(
criteria, result))
elif isinstance(criteria, dict):
raise ValueError(
"Stopping criteria is now flattened by default. "
"Use forward slashes to nest values `key1/key2/key3`.")
elif result[criteria] >= stop_value:
return True
return False
def should_checkpoint(self):
"""Whether this trial is due for checkpointing."""
result = self.last_result or {}
if result.get(DONE) and self.checkpoint_at_end:
return True
return (self.checkpoint_freq and
result.get(TRAINING_ITERATION, 0) % self.checkpoint_freq == 0)
def has_checkpoint(self):
return self.checkpoint.value is not None
def clear_checkpoint(self):
self.checkpoint.value = None
self.restoring_from = None
self.invalidate_json_state()
def on_checkpoint(self, checkpoint):
"""Hook for handling checkpoints taken by the Trainable.
Args:
checkpoint (Checkpoint): Checkpoint taken.
"""
self.checkpoint_manager.on_checkpoint(checkpoint)
self.invalidate_json_state()
def on_restore(self):
"""Handles restoration completion."""
assert self.is_restoring
self.last_result = self.restoring_from.result
self.restoring_from = None
self.invalidate_json_state()
def should_recover(self):
"""Returns whether the trial qualifies for retrying.
This is if the trial has not failed more than max_failures. Note this
may return true even when there is no checkpoint, either because
`self.checkpoint_freq` is `0` or because the trial failed before
a checkpoint has been made.
"""
return self.num_failures < self.max_failures or self.max_failures < 0
def update_last_result(self, result, terminate=False):
if self.experiment_tag:
result.update(experiment_tag=self.experiment_tag)
self.set_location(Location(result.get("node_ip"), result.get("pid")))
self.last_result = result
self.last_update_time = time.time()
for metric, value in flatten_dict(result).items():
if isinstance(value, Number):
if metric not in self.metric_analysis:
self.metric_analysis[metric] = {
"max": value,
"min": value,
"avg": value,
"last": value
}
self.metric_n_steps[metric] = {}
for n in self.n_steps:
key = "last-{:d}-avg".format(n)
self.metric_analysis[metric][key] = value
# Store n as string for correct restore.
self.metric_n_steps[metric][str(n)] = deque(
[value], maxlen=n)
else:
step = result["training_iteration"] or 1
self.metric_analysis[metric]["max"] = max(
value, self.metric_analysis[metric]["max"])
self.metric_analysis[metric]["min"] = min(
value, self.metric_analysis[metric]["min"])
self.metric_analysis[metric]["avg"] = 1 / step * (
value +
(step - 1) * self.metric_analysis[metric]["avg"])
self.metric_analysis[metric]["last"] = value
for n in self.n_steps:
key = "last-{:d}-avg".format(n)
self.metric_n_steps[metric][str(n)].append(value)
self.metric_analysis[metric][key] = sum(
self.metric_n_steps[metric][str(n)]) / len(
self.metric_n_steps[metric][str(n)])
self.invalidate_json_state()
def get_trainable_cls(self):
return get_trainable_cls(self.trainable_name)
def is_finished(self):
return self.status in [Trial.ERROR, Trial.TERMINATED]
@property
def is_restoring(self):
return self.restoring_from is not None
@property
def is_saving(self):
return self.saving_to is not None
def __repr__(self):
return self._trainable_name(include_trial_id=True)
def __str__(self):
return self._trainable_name(include_trial_id=True)
def _trainable_name(self, include_trial_id=False):
"""Combines ``env`` with ``trainable_name`` and ``trial_id``.
Can be overridden with a custom string creator.
"""
if self.custom_trial_name:
return self.custom_trial_name
if "env" in self.config:
env = self.config["env"]
if isinstance(env, type):
env = env.__name__
identifier = "{}_{}".format(self.trainable_name, env)
else:
identifier = self.trainable_name
if include_trial_id:
identifier += "_" + self.trial_id
return identifier.replace("/", "_")
def _generate_dirname(self):
if self.custom_dirname:
generated_dirname = self.custom_dirname
else:
if "MAX_LEN_IDENTIFIER" in os.environ:
logger.error("The MAX_LEN_IDENTIFIER environment variable is "
"deprecated and will be removed in the future. "
"Use TUNE_MAX_LEN_IDENTIFIER instead.")
MAX_LEN_IDENTIFIER = int(
os.environ.get("TUNE_MAX_LEN_IDENTIFIER",
os.environ.get("MAX_LEN_IDENTIFIER", 130)))
generated_dirname = f"{str(self)}_{self.experiment_tag}"
generated_dirname = generated_dirname[:MAX_LEN_IDENTIFIER]
generated_dirname += f"_{date_str()}"
return generated_dirname.replace("/", "_")
def invalidate_json_state(self):
self._state_valid = False
def get_json_state(self) -> str:
if not self._state_json or not self._state_valid:
json_state = json.dumps(
self.__getstate__(), indent=2, cls=TuneFunctionEncoder)
self._state_json = json_state
self._state_valid = True
return self._state_json
def __getstate__(self):
"""Memento generator for Trial.
Sets RUNNING trials to PENDING.
Note this can only occur if the trial holds a PERSISTENT checkpoint.
"""
state = self.__dict__.copy()
state["resources"] = resources_to_json(self.resources)
for key in self._nonjson_fields:
state[key] = binary_to_hex(cloudpickle.dumps(state.get(key)))
state["runner"] = None
state["location"] = Location()
# Avoid waiting for events that will never occur on resume.
state["restoring_from"] = None
state["saving_to"] = None
state["_state_json"] = None
state["_state_valid"] = False
return copy.deepcopy(state)
def __setstate__(self, state):
state["resources"] = json_to_resources(state["resources"])
if state["status"] == Trial.RUNNING:
state["status"] = Trial.PENDING
for key in self._nonjson_fields:
state[key] = cloudpickle.loads(hex_to_binary(state[key]))
self.__dict__.update(state)
validate_trainable(self.trainable_name)
# Avoid creating logdir in client mode for returned trial results,
# since the dir might not be creatable locally. TODO(ekl) thsi is kind
# of a hack.
if not ray.util.client.ray.is_connected():
self.init_logdir() # Create logdir if it does not exist
| apache-2.0 | 8,356,033,517,362,972,000 | 36.071325 | 79 | 0.590427 | false | 4.37294 | true | false | false |
robertsj/poropy | examples/large_core_ex_1.py | 1 | 1787 | # examples/large_core_ex_1.py
#
# In this example, we investigate "by hand" the
# large reactor example.
import large_core
import time
import numpy as np
# Get the reactor from the premade script.
reactor = large_core.make_large_core()
# View all the diagnostics down the chain.
reactor.display()
# Evaluate the default pattern. We can grab the eigenvalue
# and peaking as return values.
k, p = reactor.evaluate()
print "k = ",k," p = ",p
# Alternatively, we can use print_params to display current
# values of all optimization parameters. Currently only
# keff and the max peaking are retained.
reactor.print_params()
## We can also print the power peaking.
reactor.print_peaking()
# With this, we can try optimizing by hand a bit. Peaking
# occurs at (0, 1). Printing the pattern helps visualize this.
reactor.print_pattern()
# We can also see what fuel type is where by looking, for
# example, at the burnup
reactor.print_map('burnup')
# or the enrichment
reactor.print_map('enrichment')
# Now, something that tends to work is to swap a peaking
# bundle with lower burnup with a lower peaking bundle
# with higher burnup. Let's switch the peaker with
# its 15 GWd/MTU neighbor at [0,2]. Then print and
# and evaluate.
reactor.swap([0,1],[0,2])
reactor.print_pattern()
reactor.evaluate()
reactor.print_params()
reactor.print_peaking()
# That's a slight peaking reduction with ja slight increase
# in keff. However, there is a better pattern. Try the
# "ring of fire":
pattern = np.array([48,36,5,6,19,23,17,40,3,10,15,25,32,1,44,7,9,18,33,31,8,43,11,20,26,\
24,21,16,35,27,28,29,30,12,41,34,22,13,2,45,37,14,0,4,42,47,46,38,39])
reactor.shuffle(pattern)
reactor.evaluate()
reactor.print_params()
reactor.print_peaking()
reactor.plot_peaking()
| mit | -5,657,274,151,031,554,000 | 27.822581 | 90 | 0.722999 | false | 2.998322 | false | false | false |
Manouchehri/pychdk | ptp2/camera.py | 1 | 18415 | import logging
import usb
import struct
import binascii
import time
from os import path
import ptp2.util
from ptp2.typedefs import *
from ptp2.chdk_ptp_values import *
from ptp2.ptp_values import StandardResponses
__all__ = ['PTPCamera', 'CHDKCamera']
class _CameraBase(object):
def __init__(self, usb_device=None, log_level=logging.WARNING):
self._intf = None
self._handle = None
self._ep_in = None
self._ep_out = None
self._ep_intr = None
self.logger = logging.getLogger('_CameraBase')
self.logger.setLevel(log_level)
self._transaction_id = 0
if usb_device is not None:
self.open(usb_device)
def __del__(self):
self.close()
def open(self, usb_device):
intf = ptp2.util.get_ptp_interface(usb_device)
if intf is None:
raise TypeError('USB Device %s not a PTP Camera' % (usb_device))
self._intf = intf
self._handle = usb_device
# Grab endpoints
for ep in self._intf:
ep_type = usb.util.endpoint_type(ep.bmAttributes)
ep_dir = usb.util.endpoint_direction(ep.bEndpointAddress)
if ep_type == usb.util.ENDPOINT_TYPE_BULK:
if ep_dir == usb.util.ENDPOINT_IN:
self._ep_in = ep.bEndpointAddress
elif ep_dir == usb.util.ENDPOINT_OUT:
self._ep_out = ep.bEndpointAddress
elif ep_type == usb.util.ENDPOINT_TYPE_INTR:
self._ep_intr = ep.bEndpointAddress
def close(self):
# Excplicity release usb device
if self._handle is not None:
usb.util.dispose_resources(self._handle)
# _, self._handle = self._handle, None
_, self._intf = self._intf, None
self._ep_in = None
self._ep_out = None
self._ep_intr = None
def reopen(self):
if self._handle is None:
raise ValueError('No USB Device assigned. (Did you open it first?)')
if self._intf is not None:
raise ValueError('Already open')
self.open(self._handle)
def _bulk_write(self, bytestr, timeout=0):
return self._handle.write(self._ep_out, bytestr, timeout=timeout)
def _bulk_read(self, size, timeout=0):
return self._handle.read(self._ep_in, size, timeout=timeout).tostring()
def check_event(self, size=512, timeout=5000):
buf = self._handle.read(self._ep_intr, size=size, timeout=timeout).tostring()
p = ParamContainer(buf)
self.logger.debug('Received Event ' + buf.encode('hex'))
self.logger.debug(repr(p))
if p.type != PTP_CONTAINER_TYPE.EVENT:
raise ValueError('Received non-event container of type {t} on interrupt endpoint!'.format(t=p.type))
return p
def send_ptp_message(self, bytestr, timeout=0):
self.logger.debug('Sending ' + binascii.hexlify(bytestr).decode('utf-8')) # .encode('hex'))
return self._bulk_write(bytestr, timeout)
def recv_ptp_message(self, timeout=0):
buf = self._bulk_read(size=512, timeout=timeout)
self.logger.debug('Received ' + binascii.hexlify(buf).decode('utf-8'))
msg_len = struct.unpack('<I', buf[:4])[0]
bytes_left = msg_len - 512
if bytes_left > 0:
buf += self._bulk_read(size=bytes_left, timeout=timeout)
return buf
def new_ptp_command(self, op_code, params=[]):
ptp_command = ParamContainer()
ptp_command.type = PTP_CONTAINER_TYPE.COMMAND
ptp_command.code = op_code
ptp_command.transaction_id = self._transaction_id
ptp_command.params = params
self._transaction_id += 1
return ptp_command
def ptp_transaction(self, command, params=[], tx_data=None, receiving=True, timeout=0):
recvd_data = None
recvd_response = None
ptp_request = self.new_ptp_command(command, params)
ptp_request_data = None
if tx_data is not None:
assert isinstance(tx_data, str)
ptp_request_data = DataContainer()
ptp_request_data.code = ptp_request.code
ptp_request_data.transaction_id = ptp_request.transaction_id
ptp_request_data.data = tx_data
# Send request
bytes_xfrered = self.send_ptp_message(ptp_request.pack(), timeout)
# Send data
if ptp_request_data is not None:
bytes_xfered = self.send_ptp_message(ptp_request_data.pack(), timeout)
if receiving:
# read first 512 bytes to grab total data length
buf = self.recv_ptp_message(timeout)
_, type_ = struct.unpack('<IH', buf[:6])
if type_ == PTP_CONTAINER_TYPE.DATA:
recvd_data = DataContainer(buf)
elif type_ == PTP_CONTAINER_TYPE.RESPONSE:
recvd_response = ParamContainer(buf)
elif type_ in [PTP_CONTAINER_TYPE.COMMAND, PTP_CONTAINER_TYPE.EVENT]:
recvd_data = ParamContainer(buf)
else:
raise TypeError('Unknown PTP USB container type: %d' % (type_))
# If we haven't got the response yet, try again
if recvd_response is None:
buf = self.recv_ptp_message(timeout=timeout)
_, type_ = struct.unpack('<IH', buf[:6])
if type_ == PTP_CONTAINER_TYPE.RESPONSE:
recvd_response = ParamContainer(buf)
else:
raise TypeError('Expected response container, received type: %d' % (type_))
if recvd_response is not None:
self.logger.debug('Response: ' + repr(recvd_response))
self.logger.debug('ptp_transaction end')
return recvd_response, recvd_data
class PTPCamera(_CameraBase):
"""
If the PTPCamera class is not initialized with a usb_device handle, the first
PTP device found will be used.
"""
def __init__(self, usb_device=None, log_level=logging.WARNING):
self.logger = logging.getLogger('PTPCamera')
self.logger.setLevel(log_level)
if usb_device is None:
cams = ptp2.util.list_ptp_cameras()
if not cams:
raise IOError('No PTP Devices Found')
usb_device = cams[0]
self.logger.debug('Init with PTP device ' + usb_device.product)
self.session_id = 0x1
_CameraBase.__init__(self, usb_device=usb_device, log_level=log_level)
def open_session(self):
response, data = self.ptp_transaction(PTP_OPCODE.OPEN_SESSION, params=[self.session_id])
if (response.code != PTP_RESPONSE_CODE.OK) and (response.code != PTP_RESPONSE_CODE.SESSION_ALREADY_OPENED):
raise ValueError('Could not open PTP session (got 0x{:x})'.format(response.code))
return True
def close_session(self):
response, data = self.ptp_transaction(PTP_OPCODE.CLOSE_SESSION)
return self.check_response(response)
def initiate_capture(self):
response, data = self.ptp_transaction(PTP_OPCODE.INITIATE_CAPTURE, params=[0x0, 0x0])
self.check_response(response)
return response, data
def capture(self):
self.open_session()
response, data = self.initiate_capture()
self.check_response(response)
# We should now receive an ObjectAdded event followed by a CaptureComplete event
# However, the Nikon J3 often (but not always) sends these two events out of order.
# TODO: sometimes we receive DevicePropChanged instead of ObjectAdded from the Nikon J3
obj_added_event = None
capture_complete_event = None
event1 = self.check_event()
event2 = self.check_event()
for event in [event1, event2]:
if event.code == PTP_EVENT_CODE.OBJECT_ADDED:
obj_added_event = event
elif event.code == PTP_EVENT_CODE.CAPTURE_COMPLETE:
capture_complete_event = event
if obj_added_event is None:
raise IOError('ObjectAdded event was not received')
if capture_complete_event is None:
raise IOError('CaptureComplete event was not received')
# self.close_session()
object_handle = obj_added_event.params[0]
return object_handle
def capture_and_download(self):
start_time = time.time()
object_handle = self.capture()
response, data = self.ptp_transaction(PTP_OPCODE.GET_OBJECT, params=[object_handle])
total_time = time.time() - start_time
self.logger.info('total time to capture and download: {s:0.4f} seconds'.format(s=total_time))
img_size = data.length
self.logger.debug('image size ' + str(img_size - 12))
# f = open('/tmp/foo.jpg', 'w')
# f.write(data.data)
# self.logger.debug('wrote tmp file')
def check_response(self, response):
if response.code != PTP_RESPONSE_CODE.OK:
raise ValueError('PTP response code was not OK (got 0x{:x})'.format(response.code))
return True
class CHDKCamera(_CameraBase):
"""
For use with Canon cameras using the CHDK firmware.
Available functions (see docstrings for info):
get_chdk_version
upload_file
download_file
get_live_view_data
execute_lua
read_script_message
write_script_message
"""
def __init__(self, usb_device=None):
_CameraBase.__init__(self, usb_device)
def get_chdk_version(self):
"""
Retrieves the PTP-core (MAJOR,MINOR) version tuple from the
camera.
Note: This is different than the (MAJOR,MINOR) version tuple
for the live_view PTP extensions.
"""
recvd_response, _ = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.Version],
tx_data=None, receiving=False, timeout=0)
major, minor = recvd_response.params
return major, minor
def check_script_status(self):
"""
:returns: CHDKScriptStatus
Check status of running scripts on camera
"""
recvd_response, _ = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.ScriptStatus],
tx_data=None, receiving=False, timeout=0)
status = recvd_response.params[0]
return status
def execute_lua(self, script, block=False):
"""
:param script: LUA script to execute on camera
:type script: str
:param block: Wait for script to return before continuing
:type block: bool
:returns: (script_id, script_error, [msgs])
Execute a script on the camera.
Values returned by the LUA script are passed in individual
messages.
"""
# NULL terminate script if necessary
if not script.endswith('\0'):
script += '\0'
recvd_response, _ = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.ExecuteScript, CHDKScriptLanguage.LUA],
tx_data=script, receiving=False, timeout=0)
script_id, script_error = recvd_response.params
if not block:
return script_id, script_error, []
else:
msgs = self._wait_for_script_return()
return script_id, script_error, msgs
def read_script_message(self):
"""
Checks camera for messages created by running scripts.
"""
recvd_response, recvd_data = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.ReadScriptMsg, CHDKScriptLanguage.LUA],
tx_data=None, receiving=True, timeout=0)
return recvd_response, recvd_data
def write_script_message(self, message, script_id=0):
"""
:param message: Message to send
:type message: str
:param script_id: ID of script to deliver message to.
:type script_id: int
Passes a message to a running script.
"""
recvd_response, _ = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.WriteScriptMsg, script_id],
tx_data=message, receiving=False, timeout=0)
msg_status = recvd_response.params[0]
return msg_status
@classmethod
def __pack_file_for_upload(cls, local_filename, remote_filename=None):
"""
Private method to create a buffer holding
filename's contents for uploading to the camera.
called in `CHDKCamera.upload_file'
"""
if remote_filename is None:
remote_filename = path.basename(remote_filename)
if not remote_filename.endswith('\0'):
remote_filename += '\0'
filename_len = len(remote_filename)
fmt = '<I%dc' % (filename_len)
filebuf = struct.pack(fmt, filename_len, remote_filename)
with open(local_filename, 'rb') as fid:
contents = fid.read(-1)
fmt = '<%dB' % (len(contents))
filebuf += struct.pack(fmt, *contents)
return filebuf
def upload_file(self, local_filename, remote_filename=None, timeout=0):
"""
:param local_filename: Name of file on computer
:type local_filename: str
:param remote_filename: Name of file on camera
:type remote_filename: str
Upload a file to the camera. If remote_filename is None, the
file is uploaded to the root folder on the SD card.
"""
filestr = self.__pack_file_for_upload(local_filename, remote_filename)
dlfile_response, dlfile_data = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.UploadFile],
tx_data=filestr, receiving=False, timeout=timeout)
if ret_code != CHDKResponses.OK:
raise PTPError(tempdata_response.params[0], CHDKResponses.message[ret_code])
def download_file(self, filename, timeout=0):
"""
:param filename: Full path of file to download
:type filename: str
Download a file from the camera
"""
# CHDK Download process:
# - Store desried filename on camera w/ TempData
# - Send DownloadFile command
if not filename.endswith('\0'):
filename += '\0'
tempdata_response, _ = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.TempData, 0],
tx_data=filename, receiving=False, timeout=timeout)
ret_code = tempdata_response.params[0]
# check response for problems
if ret_code != CHDKResponses.OK:
raise PTPError(tempdata_response.params[0], CHDKResponses.message[ret_code])
dlfile_response, dlfile_data = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.DownloadFile],
tx_data=None, receiving=True, timeout=timeout)
ret_code = tempdata_response.params[0]
# check response for problems
if ret_code != CHDKResponses.OK:
raise PTPError(tempdata_response.params[0], CHDKResponses.message[ret_code])
# Clear tempdata field
clear_response, _ = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.TempData, CHDKTempData.CLEAR],
tx_data=None, receiving=False, timeout=timeout)
# Return the raw string buffer
return dlfile_data.data
def get_live_view_data(self, liveview=True, overlay=False, palette=False):
"""
:param liveview: Return the liveview image
:type liveview: bool
:param overlay: Return the overlay image
:type overlay: bool
:param palette: Return the overlay palette
:type palette: bool
:returns: :class:`typdefs.CHDK_LV_Data`
Grabs a live view image from the camera.
"""
flags = 0
if liveview:
flags |= CHDKLVTransfer.VIEWPORT
if overlay:
flags |= CHDKLVTransfer.BITMAP
if palette:
flags |= CHDKLVTransfer.PALETTE
recvd_response, recvd_data = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.GetDisplayData, flags],
tx_data=None, receiving=True, timeout=0)
if recvd_data.type == PTP_CONTAINER_TYPE.DATA:
lv_data = CHDK_LV_Data(recvd_data.data)
else:
lv_data = None
return recvd_response, lv_data
def _wait_for_script_return(self, timeout=0):
"""
Polls the camera every 50ms.
Reads queued messages if present, sleeps again if
a script is currently running.
Returns read messages when no scripts are running.
"""
msg_count = 1
msgs = []
t_start = time.time()
while True:
STATUS = self.check_script_status()
if STATUS & CHDKScriptStatus.RUN:
# log.debug('Script running, sleeping 50ms')
time.sleep(50e-3)
if timeout > 0 and timeout > (time.time() - t_start):
raise PTPError(StandardResponses.TRANSACTION_CANCELLED, "Timeout waiting for script to return")
elif STATUS & CHDKScriptStatus.MSG:
msg, msg_buf = self.read_script_message()
msg_count += 1
msgs.append((msg, msg_buf))
elif STATUS == CHDKScriptStatus.NONE:
break
else:
raise PTPError(StandardResponses.UNDEFINED, "Invalid response for script status: 0x%X" % (STATUS))
return msgs
| gpl-3.0 | -3,705,542,958,739,789,000 | 34.07619 | 120 | 0.576595 | false | 4.054381 | false | false | false |
allmyservos/allmyservos | __bootstrap.py | 1 | 6408 | #!/usr/bin/python
#######################################################################
# AllMyServos - Fun with PWM
# Copyright (C) 2015 Donate BTC:14rVTppdYQzLrqay5fp2FwP3AXvn3VSZxQ
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#######################################################################
import sys, traceback, logging, os, re, time
from subprocess import Popen, PIPE
from StringIO import StringIO
## The AmsEnvironment object collects required information from the host pi
class AmsEnvironment:
patterns = {
'pid': re.compile(r'(?P<pid>\d+)')
}
info = {}
@staticmethod
def Now():
return int(round(time.time() * 1000))
@staticmethod
def AppInfo():
""" Returns environment info
"""
if (not any(AmsEnvironment.info)):
a = AmsEnvironment.info
a['app_path'] = os.path.dirname(__file__)
a['contrib_path'] = os.path.join(a['app_path'],'contrib')
a['file_path'] = os.path.join(a['app_path'],'files')
a['command_script'] = sys.argv[0]
a['command_args'] = sys.argv[1:]
try:
a['terminal'] = os.ttyname(sys.stdout.fileno())
except:
a['terminal'] = '';
return AmsEnvironment.info
@staticmethod
def AppPath():
""" Returns app path
"""
try:
AmsEnvironment.info['app_path']
except:
AmsEnvironment.AppInfo()
return AmsEnvironment.info['app_path']
@staticmethod
def ContribPath():
""" Returns contrib path
"""
try:
AmsEnvironment.info['contrib_path']
except:
AmsEnvironment.AppInfo()
return AmsEnvironment.info['contrib_path']
@staticmethod
def FilePath():
""" Returns file path
"""
try:
AmsEnvironment.info['file_path']
except:
AmsEnvironment.AppInfo()
return AmsEnvironment.info['file_path']
@staticmethod
def Terminal():
""" Returns the current terminal
"""
try:
AmsEnvironment.info['terminal']
except:
AmsEnvironment.AppInfo()
return AmsEnvironment.info['terminal']
@staticmethod
def Vendors():
""" Returns list of vendor names
"""
try:
AmsEnvironment.__vendors
except:
AmsEnvironment.__vendors = os.listdir(AmsEnvironment.ContribPath())
AmsEnvironment.__vendors = [ x for x in AmsEnvironment.__vendors if os.path.isdir(os.path.join(AmsEnvironment.ContribPath(), x)) ]
return AmsEnvironment.__vendors
@staticmethod
def IsLxdeRunning():
""" Returns whether lxde is running
"""
try:
AmsEnvironment.__lxdeRunning
except:
AmsEnvironment.__lxdeRunning = AmsEnvironment.__isLxdeRunning()
return AmsEnvironment.__lxdeRunning
@staticmethod
def Scan():
""" Adds system paths required to import modules in the contrib folder
"""
try:
AmsEnvironment.__scanned
except:
AmsEnvironment.__scanned = True
vendors = AmsEnvironment.Vendors()
if (any(vendors)):
for v in vendors:
vpath = os.path.join(AmsEnvironment.ContribPath(), v)
mods = os.listdir(vpath)
mods = [ x for x in mods if os.path.isdir(os.path.join(vpath, x)) ]
for m in mods:
sys.path.append(os.path.join(vpath, m))
@staticmethod
def EnableErrorLogging():
logpath = os.path.join(AmsEnvironment.FilePath(),'logs')
if (not os.path.exists(logpath)):
os.makedirs(logpath)
logging.basicConfig(filename=os.path.join(AmsEnvironment.FilePath(),'logs','exception.log'),filemode='a',level=logging.DEBUG, format= '%(asctime)s - %(levelname)s - %(message)s')
AmsEnvironment.logger = logging.getLogger('amslogger')
sys.excepthook = AmsEnvironment.errorHandler
@staticmethod
def EnableOutputLogging():
AmsEnvironment._old_stdout = sys.stdout
AmsEnvironment._old_stderr = sys.stderr
AmsEnvironment.outlogger = OutLogger(AmsEnvironment._old_stdout, AmsEnvironment._old_stderr, os.path.join(AmsEnvironment.FilePath(),'logs'))
sys.stdout = AmsEnvironment.outlogger
sys.stderr = AmsEnvironment.outlogger
@staticmethod
def outputHandler(value):
AmsEnvironment.logger.debug(value)
@staticmethod
def errorHandler(type, value, tb):
AmsEnvironment.logger.exception("Uncaught exception: {0}".format(str(value)))
@staticmethod
def __extract_function_name():
tb = sys.exc_info()[-1]
stk = traceback.extract_tb(tb, 1)
fname = stk[0][3]
return fname
def LogException(e):
logging.error(
"Function {function_name} raised {exception_class} ({exception_docstring}): {exception_message}".format(
function_name = AmsEnvironment.__extract_function_name(), #this is optional
exception_class = e.__class__,
exception_docstring = e.__doc__,
exception_message = e.message))
@staticmethod
def __isLxdeRunning():
""" Utility
"""
if(not 'console' in AmsEnvironment.Terminal()):
#not running from rc.local
for l in AmsEnvironment.__pgrepX().split('\n'):
match = AmsEnvironment.patterns['pid'].match(l)
if(match):
return True
return False
@staticmethod
def __pgrepX():
""" Utility
"""
p = Popen(['pgrep', 'X'], stdout=PIPE)
o = p.communicate()[0]
if(p.returncode == 0):
return o
return ''
## Custom StdOut handler to copy ouput to a log file.
class OutLogger(StringIO):
def __init__(self, old_stdout, old_stderr, logpath, useold = True):
""" Initializes the Logger object
Extends StringIO in order to capture stdout and stderr
@param parent
@param gui
@param options
"""
StringIO.__init__(self) #overriding object must implement StringIO
self.logpath = logpath
if (not os.path.exists(self.logpath)):
os.makedirs(self.logpath)
self.logfile = os.path.join(self.logpath, 'output.log')
self.useold = useold
self.old_stdout = old_stdout
self.old_stderr = old_stderr
def write(self, value):
''' capture and reverse console output
'''
try:
StringIO.write(self,value)
f = open(self.logfile, 'a')
f.write(value)
f.close()
except Exception as e:
pass
if(self.useold):
self.old_stdout.write(value) #repeat to command line
AmsEnvironment.Scan() | gpl-2.0 | 8,564,679,286,356,368,000 | 30.571429 | 180 | 0.686954 | false | 3.216867 | false | false | false |
TheJJ100100/bedrock | bedrock/releasenotes/tests/test_base.py | 1 | 16925 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.core.cache import get_cache
from django.http import Http404
from django.test.client import RequestFactory
from django.test.utils import override_settings
from bedrock.base.urlresolvers import reverse
from mock import patch, Mock
from nose.tools import eq_
from pathlib import Path
from pyquery import PyQuery as pq
from rna.models import Release
from bedrock.firefox.firefox_details import FirefoxDesktop
from bedrock.mozorg.tests import TestCase
from bedrock.releasenotes import views
from bedrock.thunderbird.details import ThunderbirdDesktop
DATA_PATH = str(Path(__file__).parent / 'data')
firefox_desktop = FirefoxDesktop(json_dir=DATA_PATH)
thunderbird_desktop = ThunderbirdDesktop(json_dir=DATA_PATH)
class TestRNAViews(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.request = self.factory.get('/')
self.render_patch = patch('bedrock.releasenotes.views.l10n_utils.render')
self.mock_render = self.render_patch.start()
self.mock_render.return_value.has_header.return_value = False
def tearDown(self):
self.render_patch.stop()
@property
def last_ctx(self):
"""
Convenient way to access the context of the last rendered
response.
"""
return self.mock_render.call_args[0][2]
@patch('bedrock.releasenotes.views.get_object_or_404')
@patch('bedrock.releasenotes.views.Q')
def test_get_release_or_404(self, Q, get_object_or_404):
eq_(views.get_release_or_404('version', 'product'),
get_object_or_404.return_value)
get_object_or_404.assert_called_with(
Release, Q.return_value, version='version')
Q.assert_called_once_with(product='product')
@patch('bedrock.releasenotes.views.get_object_or_404')
@patch('bedrock.releasenotes.views.Q')
def test_get_release_or_404_esr(self, Q, get_object_or_404):
eq_(views.get_release_or_404('24.5.0', 'Firefox'),
get_object_or_404.return_value)
Q.assert_any_call(product='Firefox')
Q.assert_any_call(product='Firefox Extended Support Release')
@patch('bedrock.releasenotes.views.get_object_or_404')
@patch('bedrock.releasenotes.views.Q')
def test_get_release_or_404_endswith_esr(self, Q, get_object_or_404):
eq_(views.get_release_or_404('45.0esr', 'Firefox'),
get_object_or_404.return_value)
Q.assert_any_call(product='Firefox')
Q.assert_any_call(product='Firefox Extended Support Release')
@override_settings(DEV=False)
@patch('bedrock.releasenotes.views.release_notes_template')
@patch('bedrock.releasenotes.views.get_release_or_404')
@patch('bedrock.releasenotes.views.equivalent_release_url')
def test_release_notes(self, mock_equiv_rel_url, get_release_or_404,
mock_release_notes_template):
"""
Should use release returned from get_release_or_404 with the
correct params and pass the correct context variables and
template to l10n_utils.render.
"""
mock_release = get_release_or_404.return_value
mock_release.major_version.return_value = '34'
mock_release.notes.return_value = ([Release(id=1), Release(id=2)],
[Release(id=3), Release(id=4)])
views.release_notes(self.request, '27.0')
get_release_or_404.assert_called_with('27.0', 'Firefox')
mock_release.notes.assert_called_with(public_only=True)
eq_(self.last_ctx['version'], '27.0')
eq_(self.last_ctx['release'], mock_release)
eq_(self.last_ctx['new_features'], [Release(id=1), Release(id=2)])
eq_(self.last_ctx['known_issues'], [Release(id=3), Release(id=4)])
eq_(self.mock_render.call_args[0][1],
mock_release_notes_template.return_value)
mock_equiv_rel_url.assert_called_with(mock_release)
mock_release_notes_template.assert_called_with(
mock_release.channel, 'Firefox', 34)
@patch('bedrock.releasenotes.views.get_release_or_404')
@patch('bedrock.releasenotes.views.releasenotes_url')
def test_release_notes_beta_redirect(self, releasenotes_url,
get_release_or_404):
"""
Should redirect to url for beta release
"""
get_release_or_404.side_effect = [Http404, 'mock release']
releasenotes_url.return_value = '/firefox/27.0beta/releasenotes/'
response = views.release_notes(self.request, '27.0')
eq_(response.status_code, 302)
eq_(response['location'], '/firefox/27.0beta/releasenotes/')
get_release_or_404.assert_called_with('27.0beta', 'Firefox')
releasenotes_url.assert_called_with('mock release')
@patch('bedrock.releasenotes.views.get_release_or_404')
def test_system_requirements(self, get_release_or_404):
"""
Should use release returned from get_release_or_404, with a
default channel of Release and default product of Firefox,
and pass the version to l10n_utils.render
"""
views.system_requirements(self.request, '27.0.1')
get_release_or_404.assert_called_with('27.0.1', 'Firefox')
eq_(self.last_ctx['release'], get_release_or_404.return_value)
eq_(self.last_ctx['version'], '27.0.1')
eq_(self.mock_render.call_args[0][1],
'firefox/releases/system_requirements.html')
def test_release_notes_template(self):
"""
Should return correct template name based on channel
and product
"""
eq_(views.release_notes_template('Nightly', 'Firefox'),
'firefox/releases/nightly-notes.html')
eq_(views.release_notes_template('Aurora', 'Firefox'),
'firefox/releases/aurora-notes.html')
eq_(views.release_notes_template('Aurora', 'Firefox', 35),
'firefox/releases/dev-browser-notes.html')
eq_(views.release_notes_template('Aurora', 'Firefox', 34),
'firefox/releases/aurora-notes.html')
eq_(views.release_notes_template('Beta', 'Firefox'),
'firefox/releases/beta-notes.html')
eq_(views.release_notes_template('Release', 'Firefox'),
'firefox/releases/release-notes.html')
eq_(views.release_notes_template('ESR', 'Firefox'),
'firefox/releases/esr-notes.html')
eq_(views.release_notes_template('Release', 'Thunderbird'),
'thunderbird/releases/release-notes.html')
eq_(views.release_notes_template('Beta', 'Thunderbird'),
'thunderbird/releases/beta-notes.html')
eq_(views.release_notes_template('', ''),
'firefox/releases/release-notes.html')
@override_settings(DEV=False)
@patch('bedrock.releasenotes.views.get_object_or_404')
def test_non_public_release(self, get_object_or_404):
"""
Should raise 404 if not release.is_public and not settings.DEV
"""
get_object_or_404.return_value = Release(is_public=False)
with self.assertRaises(Http404):
views.get_release_or_404('42', 'Firefox')
@patch('bedrock.releasenotes.views.releasenotes_url')
def test_no_equivalent_release_url(self, mock_releasenotes_url):
"""
Should return None without calling releasenotes_url
"""
release = Mock()
release.equivalent_android_release.return_value = None
release.equivalent_desktop_release.return_value = None
eq_(views.equivalent_release_url(release), None)
eq_(mock_releasenotes_url.called, 0)
@patch('bedrock.releasenotes.views.releasenotes_url')
def test_android_equivalent_release_url(self, mock_releasenotes_url):
"""
Should return the url for the equivalent android release
"""
release = Mock()
eq_(views.equivalent_release_url(release),
mock_releasenotes_url.return_value)
mock_releasenotes_url.assert_called_with(
release.equivalent_android_release.return_value)
@patch('bedrock.releasenotes.views.releasenotes_url')
def test_desktop_equivalent_release_url(self, mock_releasenotes_url):
"""
Should return the url for the equivalent desktop release
"""
release = Mock()
release.equivalent_android_release.return_value = None
eq_(views.equivalent_release_url(release),
mock_releasenotes_url.return_value)
mock_releasenotes_url.assert_called_with(
release.equivalent_desktop_release.return_value)
@patch('bedrock.releasenotes.views.android_builds')
def test_get_download_url_android(self, mock_android_builds):
"""
Shoud return the download link for the release.channel from
android_builds
"""
mock_android_builds.return_value = [{'download_link': '/download'}]
release = Mock(product='Firefox for Android')
link = views.get_download_url(release)
eq_(link, '/download')
mock_android_builds.assert_called_with(release.channel)
def test_get_download_url_thunderbird(self):
release = Mock(product='Thunderbird')
link = views.get_download_url(release)
eq_(link, '/en-US/thunderbird/')
def test_get_download_url_thunderbird_beta(self):
release = Mock(product='Thunderbird', channel='Beta')
link = views.get_download_url(release)
eq_(link, '/en-US/thunderbird/channel/')
def test_check_url(self):
eq_(views.check_url('Firefox for Android', '42.0'),
'https://support.mozilla.org/kb/will-firefox-work-my-mobile-device')
eq_(views.check_url('Firefox for iOS', '1.4'),
'/en-US/firefox/ios/1.4/system-requirements/')
eq_(views.check_url('Firefox', '42.0'),
'/en-US/firefox/42.0/system-requirements/')
class TestReleaseNotesIndex(TestCase):
pd_cache = get_cache('product-details')
def setUp(self):
self.pd_cache.clear()
@patch('bedrock.releasenotes.views.l10n_utils.render')
@patch('bedrock.releasenotes.views.firefox_desktop', firefox_desktop)
def test_relnotes_index_firefox(self, render_mock):
with self.activate('en-US'):
self.client.get(reverse('firefox.releases.index'))
releases = render_mock.call_args[0][2]['releases']
eq_(len(releases), len(firefox_desktop.firefox_history_major_releases))
eq_(releases[0][0], 36.0)
eq_(releases[0][1]['major'], '36.0')
eq_(releases[0][1]['minor'], [])
eq_(releases[3][0], 33.1)
eq_(releases[3][1]['major'], '33.1')
eq_(releases[3][1]['minor'], ['33.1.1'])
eq_(releases[4][0], 33.0)
eq_(releases[4][1]['major'], '33.0')
eq_(releases[4][1]['minor'], ['33.0.1', '33.0.2', '33.0.3'])
eq_(releases[6][0], 31.0)
eq_(releases[6][1]['major'], '31.0')
eq_(releases[6][1]['minor'],
['31.1.0', '31.1.1', '31.2.0', '31.3.0', '31.4.0', '31.5.0'])
@patch('bedrock.releasenotes.views.thunderbird_desktop', thunderbird_desktop)
def test_relnotes_index_thunderbird(self):
with self.activate('en-US'):
response = self.client.get(reverse('thunderbird.releases.index'))
doc = pq(response.content)
eq_(len(doc('a[href="0.1.html"]')), 1)
eq_(len(doc('a[href="1.5.0.2.html"]')), 1)
eq_(len(doc('a[href="../2.0.0.0/releasenotes/"]')), 1)
eq_(len(doc('a[href="../3.0.1/releasenotes/"]')), 1)
class TestNotesRedirects(TestCase):
def _test(self, url_from, url_to):
with self.activate('en-US'):
url = '/en-US' + url_from
response = self.client.get(url)
eq_(response.status_code, 302)
eq_(response['Location'], 'http://testserver/en-US' + url_to)
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='22.0'))
def test_desktop_release_version(self):
self._test('/firefox/notes/',
'/firefox/22.0/releasenotes/')
self._test('/firefox/latest/releasenotes/',
'/firefox/22.0/releasenotes/')
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='23.0b1'))
def test_desktop_beta_version(self):
self._test('/firefox/beta/notes/',
'/firefox/23.0beta/releasenotes/')
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='24.0a2'))
def test_desktop_developer_version(self):
self._test('/firefox/developer/notes/',
'/firefox/24.0a2/auroranotes/')
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='24.2.0esr'))
def test_desktop_esr_version(self):
self._test('/firefox/organizations/notes/',
'/firefox/24.2.0/releasenotes/')
@patch('bedrock.releasenotes.views.firefox_android.latest_version',
Mock(return_value='22.0'))
def test_android_release_version(self):
self._test('/firefox/android/notes/',
'/firefox/android/22.0/releasenotes/')
@patch('bedrock.releasenotes.views.firefox_android.latest_version',
Mock(return_value='23.0b1'))
def test_android_beta_version(self):
self._test('/firefox/android/beta/notes/',
'/firefox/android/23.0beta/releasenotes/')
@patch('bedrock.releasenotes.views.firefox_android.latest_version',
Mock(return_value='24.0a2'))
def test_android_aurora_version(self):
self._test('/firefox/android/aurora/notes/',
'/firefox/android/24.0a2/auroranotes/')
@patch('bedrock.releasenotes.views.firefox_ios.latest_version',
Mock(return_value='1.4'))
def test_ios_release_version(self):
self._test('/firefox/ios/notes/',
'/firefox/ios/1.4/releasenotes/')
@patch('bedrock.releasenotes.views.thunderbird_desktop.latest_version',
Mock(return_value='22.0'))
def test_thunderbird_release_version(self):
self._test('/thunderbird/notes/',
'/thunderbird/22.0/releasenotes/')
self._test('/thunderbird/latest/releasenotes/',
'/thunderbird/22.0/releasenotes/')
@patch('bedrock.releasenotes.views.thunderbird_desktop.latest_version',
Mock(return_value='41.0b1'))
def test_thunderbird_beta_version(self):
self._test('/thunderbird/beta/notes/',
'/thunderbird/41.0beta/releasenotes/')
class TestSysreqRedirect(TestCase):
def _test(self, url_from, url_to):
with self.activate('en-US'):
url = '/en-US' + url_from
response = self.client.get(url)
eq_(response.status_code, 302)
eq_(response['Location'], 'http://testserver/en-US' + url_to)
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='22.0'))
def test_desktop_release_version(self):
self._test('/firefox/system-requirements/',
'/firefox/22.0/system-requirements/')
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='23.0b1'))
def test_desktop_beta_version(self):
self._test('/firefox/beta/system-requirements/',
'/firefox/23.0beta/system-requirements/')
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='24.0a2'))
def test_desktop_developer_version(self):
self._test('/firefox/developer/system-requirements/',
'/firefox/24.0a2/system-requirements/')
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='24.2.0esr'))
def test_desktop_esr_version(self):
self._test('/firefox/organizations/system-requirements/',
'/firefox/24.0/system-requirements/')
@patch('bedrock.releasenotes.views.thunderbird_desktop.latest_version',
Mock(return_value='22.0'))
def test_thunderbird_release_version(self):
self._test('/thunderbird/system-requirements/',
'/thunderbird/22.0/system-requirements/')
self._test('/thunderbird/latest/system-requirements/',
'/thunderbird/22.0/system-requirements/')
@patch('bedrock.releasenotes.views.thunderbird_desktop.latest_version',
Mock(return_value='41.0b1'))
def test_thunderbird_beta_version(self):
self._test('/thunderbird/beta/system-requirements/',
'/thunderbird/41.0beta/system-requirements/')
| mpl-2.0 | 5,681,859,812,142,583,000 | 42.84715 | 81 | 0.633383 | false | 3.574446 | true | false | false |
mozilla/FlightDeck | apps/search/views.py | 1 | 5465 | import commonware.log
from django.core.paginator import Paginator, EmptyPage
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from jetpack.models import Package
from .helpers import package_search, get_activity_scale
from .forms import SearchForm
from pyes.urllib3.connectionpool import TimeoutError
log = commonware.log.getLogger('f.search')
SORT_MAPPING = {
'score':'_score',
'activity':'-activity',
'forked':'-copies_count',
'used':'-times_depended',
'new':'-created_at',
'size':'-size',
}
REVERSE_SORT_MAPPING = dict((v, k) for k, v in SORT_MAPPING.items())
def search(request):
form = SearchForm(request.GET)
form.is_valid()
query = form.cleaned_data
q = query.get('q').lower()
type_ = query.get('type') or None
types = {'a': 'addon', 'l': 'library'}
page = query.get('page') or 1
limit = 20
activity_map = get_activity_scale()
if q and query.get('sort') == '':
sort = '_score'
elif query.get('sort') == '':
sort = '-activity'
else:
sort = SORT_MAPPING.get(query.get('sort'), '_score')
query['sort'] = REVERSE_SORT_MAPPING.get(sort)
filters = {}
filters['user'] = request.user
author = query.get('author')
if author:
filters['author'] = author.id
if query.get('copies'):
filters['copies_count__gte'] = query['copies']
else:
query['copies'] = 0
if query.get('used') and type_ != 'a':
# Add-ons can't be depended upon, so this query would filter out
# every single Add-on
filters['times_depended__gte'] = query['used']
else:
query['used'] = 0
if query.get('example'):
filters['example'] = 'true'
if query.get('featured'):
filters['featured'] = 'true'
if query.get('activity'):
filters['activity__gte'] = activity_map.get(str(query['activity']), 0)
copies_facet = {'terms': {'field': 'copies_count'}}
times_depended_facet = {'terms': {'field': 'times_depended'}}
examples_facet = {'query': {'term': {'example': 'true' }}}
featured_facet = {'query': {'term': {'featured': 'true' }}}
facets_ = {
'copies': copies_facet,
'times_depended': times_depended_facet,
'example': examples_facet,
'featured': featured_facet
}
template = ''
results={}
facets={}
if type_:
filters['type'] = type_
qs = package_search(q, **filters).order_by(sort).facet(**facets_)
try:
results['pager'] = Paginator(qs, per_page=limit).page(page)
except EmptyPage:
results['pager'] = Paginator(qs, per_page=limit).page(1)
facets = _facets(results['pager'].object_list.facets)
facets['everyone_total'] = len(qs)
template = 'results.html'
else:
# combined view
results['addons'] = package_search(q, type='a', **filters) \
.order_by(sort)[:5]
results['libraries'] = package_search(q, type='l', **filters) \
.order_by(sort)[:5]
results['all'] = package_search(q, **filters).facet(**facets_)[:0]
facets = _facets(results['all'].facets)
facets['everyone_total'] = facets['combined_total']
template = 'aggregate.html'
ctx = {
'q': q,
'page': 'search',
'form': form,
'query': query,
'type': types.get(type_, None)
}
ctx.update(results)
ctx.update(facets)
if request.is_ajax():
template = 'ajax/' + template
return _render(request, template, ctx)
def rss_redirect(request, type_):
from base.helpers import urlparams
form = SearchForm(request.GET)
form.is_valid()
query = dict(form.cleaned_data)
if type_ != 'combined':
query['type'] = type_[0]
return redirect(urlparams(reverse('search.rss'), **query), permanent=True)
def _render(request, template, data={}):
return render_to_response(template, data, RequestContext(request))
def _facets(facets):
type_totals = dict((t['term'], t['count']) for t in facets['types'])
my_total = 0
if 'author' in facets and len(facets['author']):
my_total = facets['author'][0]['count']
max_copies = 0
if 'copies' in facets:
copies_steps = [t['term'] for t in facets['copies']]
if copies_steps:
copies_steps.sort()
max_ = copies_steps.pop()
max_copies = max(max_copies, max_)
max_times_depended = 0
if 'times_depended' in facets:
depended_steps = [t['term'] for t in facets['times_depended']]
if depended_steps:
depended_steps.sort()
max_ = depended_steps.pop()
max_times_depended = max(max_times_depended, max_)
example_count = 0
if 'example' in facets:
example_count = facets['example']
featured_count = 0
if 'featured' in facets:
featured_count = facets['featured']
return {
'addon_total': type_totals.get('a', 0),
'library_total': type_totals.get('l', 0),
'my_total': my_total,
'combined_total': type_totals.get('a', 0) + type_totals.get('l', 0),
'max_copies': max_copies,
'max_times_depended': max_times_depended,
'examples_total': example_count,
'featured_total': featured_count
}
| bsd-3-clause | 6,986,333,226,037,713,000 | 29.19337 | 78 | 0.58097 | false | 3.548701 | false | false | false |
jonfoster/pyxb1 | tests/drivers/test-wildcard.py | 1 | 9405 | import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
schema_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../schemas/test-wildcard.xsd'))
code = pyxb.binding.generate.GeneratePython(schema_location=schema_path)
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
def nc_not (ns_or_absent):
return ( pyxb.xmlschema.structures.Wildcard.NC_not, ns_or_absent )
class TestIntensionalSet (unittest.TestCase):
def testTest (self):
ns = 'URN:namespace'
not_nc = nc_not(ns)
self.assert_(isinstance(not_nc, tuple))
self.assertEqual(2, len(not_nc))
self.assertEqual(pyxb.xmlschema.structures.Wildcard.NC_not, not_nc[0])
self.assertEqual(ns, not_nc[1])
def testUnion_1 (self):
UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_any, UNION([ nc_any, nc_any ]))
self.assertEqual(nc_not(ns1), UNION([ nc_not(ns1), nc_not(ns1) ]))
self.assertEqual(set([ns1]), UNION([ set([ns1]), set([ns1]) ]))
def testUnion_2 (self):
UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_any, UNION([ nc_any, set([ns1]) ]))
self.assertEqual(nc_any, UNION([ nc_any, nc_not(ns1) ]))
self.assertEqual(nc_any, UNION([ nc_any, nc_not(None) ]))
def testUnion_3 (self):
UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(set([ns1, ns2]), UNION([set([ns1]), set([ns2])]))
self.assertEqual(set([None, ns1]), UNION([set([None]), set([ns1])]))
self.assertEqual(set([None]), UNION([set([None]), set([None])]))
def testUnion_4 (self):
UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_not(None), UNION([nc_not(ns1), nc_not(ns2)]))
self.assertEqual(nc_not(None), UNION([nc_not(ns1), nc_not(None)]))
def testUnion_5 (self):
UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_any, UNION([nc_not(ns1), set([ns1, None])])) # 5.1
self.assertEqual(nc_not(None), UNION([nc_not(ns1), set([ns1, ns2])])) # 5.2
self.assertRaises(SchemaValidationError, UNION, [nc_not(ns1), set([None, ns2])]) # 5.3
self.assertEqual(nc_not(ns1), UNION([nc_not(ns1), set([ns2])])) # 5.4
def testUnion_6 (self):
UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_any, UNION([nc_not(None), set([ns1, ns2, None])])) # 6.1
self.assertEqual(nc_not(None), UNION([nc_not(None), set([ns1, ns2])])) # 6.2
def testIntersection_1 (self):
ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_any, ISECT([ nc_any, nc_any ]))
self.assertEqual(nc_not(ns1), ISECT([ nc_not(ns1), nc_not(ns1) ]))
self.assertEqual(set([ns1]), ISECT([ set([ns1]), set([ns1]) ]))
def testIntersection_2 (self):
ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(set([ns1]), ISECT([ nc_any, set([ns1]) ]))
self.assertEqual(nc_not(ns1), ISECT([ nc_any, nc_not(ns1) ]))
self.assertEqual(nc_not(None), ISECT([ nc_any, nc_not(None) ]))
def testIntersection_3 (self):
ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(set([ns2]), ISECT([nc_not(ns1), set([ns1, ns2, None])]))
self.assertEqual(set([ns2]), ISECT([nc_not(ns1), set([ns1, ns2])]))
self.assertEqual(set([ns2]), ISECT([nc_not(ns1), set([ns2])]))
def testIntersection_4 (self):
ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(set([ns2]), ISECT([set([ns1, ns2]), set([ns2, None])]))
self.assertEqual(set([ns2, None]), ISECT([set([None, ns1, ns2]), set([ns2, None])]))
self.assertEqual(set([]), ISECT([set([ns1]), set([ns2, None])]))
self.assertEqual(set([]), ISECT([set([ns1]), set([ns2, ns1]), set([ns2, None])]))
self.assertEqual(set([ns1]), ISECT([set([ns1, None]), set([None, ns2, ns1]), set([ns1, ns2])]))
def testIntersection_5 (self):
ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertRaises(SchemaValidationError, ISECT, [nc_not(ns1), nc_not(ns2)])
def testIntersection_6 (self):
ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_not(ns1), ISECT([nc_not(ns1), nc_not(None)]))
class TestWildcard (unittest.TestCase):
def testElement (self):
# NB: Test on CTD, not element
self.assert_(wrapper_._HasWildcardElement)
xmls = '<wrapper><first/><second/><third/></wrapper>'
doc = pyxb.utils.domutils.StringToDOM(xmls)
instance = wrapper.createFromDOM(doc.documentElement)
self.assert_(isinstance(instance.wildcardElements(), list))
self.assertEquals(1, len(instance.wildcardElements()))
# Alternative parser path
instance = CreateFromDocument(xmls)
self.assert_(isinstance(instance.wildcardElements(), list))
self.assertEquals(1, len(instance.wildcardElements()))
def _validateWildcardWrappingRecognized (self, instance):
self.assert_(isinstance(instance.wildcardElements(), list))
self.assertEquals(1, len(instance.wildcardElements()))
dom = instance.wildcardElements()[0]
self.assertTrue(isinstance(dom, Node))
self.assertEquals(Node.ELEMENT_NODE, dom.nodeType)
self.assertEquals('third', dom.nodeName)
self.assertEquals(1, len(dom.childNodes))
cdom = dom.firstChild
self.assertTrue(isinstance(cdom, Node))
self.assertEquals(Node.ELEMENT_NODE, cdom.nodeType)
self.assertEquals('selt', cdom.nodeName)
ccdom = cdom.firstChild
self.assertTrue(isinstance(ccdom, Node))
self.assertEquals(Node.TEXT_NODE, ccdom.nodeType)
self.assertEquals('text', ccdom.data)
def testWildcardWrappingRecognized (self):
# NB: Test on CTD, not element
self.assert_(wrapper_._HasWildcardElement)
xmls = '<wrapper><first/><second/><third><selt>text</selt></third></wrapper>'
doc = pyxb.utils.domutils.StringToDOM(xmls)
instance = wrapper.createFromDOM(doc.documentElement)
self._validateWildcardWrappingRecognized(instance)
# Alternative parser path
instance = CreateFromDocument(xmls)
self._validateWildcardWrappingRecognized(instance)
def testMultiElement (self):
tested_overmax = False
for rep in range(0, 6):
xmls = '<wrapper><first/><second/>%s</wrapper>' % (''.join(rep * ['<third/>']),)
doc = pyxb.utils.domutils.StringToDOM(xmls)
if 3 >= rep:
instance = wrapper.createFromDOM(doc.documentElement)
self.assert_(isinstance(instance.wildcardElements(), list))
self.assertEquals(rep, len(instance.wildcardElements()))
for i in range(0, rep):
self.assertEquals('third', instance.wildcardElements()[i].nodeName)
else:
tested_overmax = True
self.assertRaises(ExtraContentError, wrapper.createFromDOM, doc.documentElement)
self.assert_(tested_overmax)
def testAttribute (self):
# NB: Test on CTD, not element
self.assert_(isinstance(wrapper_._AttributeWildcard, pyxb.binding.content.Wildcard))
xmls = '<wrapper myattr="true" auxattr="somevalue"/>'
doc = pyxb.utils.domutils.StringToDOM(xmls)
instance = wrapper.createFromDOM(doc.documentElement)
self.assert_(isinstance(instance.wildcardAttributeMap(), dict))
self.assertEquals(1, len(instance.wildcardAttributeMap()))
self.assertEquals('somevalue', instance.wildcardAttributeMap()['auxattr'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 3,425,045,343,297,419,000 | 44.216346 | 103 | 0.632749 | false | 3.373386 | true | false | false |
jkyeung/XlsxWriter | xlsxwriter/test/worksheet/test_merge_range02.py | 1 | 5265 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
from ...format import Format
from ...sharedstrings import SharedStringTable
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test merged cell range"""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.str_table = SharedStringTable()
worksheet.select()
cell_format1 = Format({'xf_index': 1})
cell_format2 = Format({'xf_index': 2})
worksheet.merge_range('B3:C3', 'Foo', cell_format1)
worksheet.merge_range('A2:D2', '', cell_format2)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A2:D3"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" spans="1:4">
<c r="A2" s="2"/>
<c r="B2" s="2"/>
<c r="C2" s="2"/>
<c r="D2" s="2"/>
</row>
<row r="3" spans="1:4">
<c r="B3" s="1" t="s">
<v>0</v>
</c>
<c r="C3" s="1"/>
</row>
</sheetData>
<mergeCells count="2">
<mergeCell ref="B3:C3"/>
<mergeCell ref="A2:D2"/>
</mergeCells>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
def test_assemble_xml_file_write(self):
"""Test writing a worksheet with a blank cell with write() method."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({'xf_index': 1})
# No format. Should be ignored.
worksheet.write(0, 0, None)
worksheet.write(1, 2, None, cell_format)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="C2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" spans="3:3">
<c r="C2" s="1"/>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
def test_assemble_xml_file_A1(self):
"""Test writing a worksheet with a blank cell with A1 notation."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({'xf_index': 1})
# No format. Should be ignored.
worksheet.write_blank('A1', None)
worksheet.write_blank('C2', None, cell_format)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="C2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" spans="3:3">
<c r="C2" s="1"/>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| bsd-2-clause | -8,920,905,395,351,431,000 | 34.816327 | 171 | 0.491738 | false | 3.94382 | true | false | false |
llvm/llvm-zorg | llvmbisect/llvmlab/shell.py | 1 | 1123 | """
shell like utilities
"""
import os
def execute(args):
import subprocess
"""execute(command) - Run the given command (or argv list) in a shell and
return the exit code."""
return subprocess.Popen(args).wait()
def capture(args, include_stderr=False):
import subprocess
"""capture(command) - Run the given command (or argv list) in a shell and
return the standard output."""
stderr = subprocess.PIPE
if include_stderr:
stderr = subprocess.STDOUT
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=stderr)
out, _ = p.communicate()
return p.wait(), out
def mkdir_p(path):
"""mkdir_p(path) - Make the "path" directory, if it does not exist; this
will also make directories for any missing parent directories."""
import errno
if not path or os.path.exists(path):
return
parent = os.path.dirname(path)
if parent != path:
mkdir_p(parent)
try:
os.mkdir(path)
except OSError as e:
# Ignore EEXIST, which may occur during a race condition.
if e.errno != errno.EEXIST:
raise
| apache-2.0 | 3,373,758,457,936,435,000 | 24.522727 | 77 | 0.643811 | false | 4.010714 | false | false | false |
cqcarrow/tarrow | Client/Core/controller.py | 1 | 8459 | """ Create an set of stocks (input by symbol) and open communications
between them and the server via the Gateway.
Copyright (c) Cambridge Quantum Computing ltd. All rights reserved.
Licensed under the MIT License. See LICENSE file
in the project root for full license information.
"""
import sys
import datetime
import smtplib
import copy
import importlib
from .gateway import Gateway
from .pricebar import PriceBar
class Controller:
""" Responsible for managing a set of stocks in one process.
"""
def __init__(self, version, environment, global_settings, client_id, symbols):
self.version = version
self.environment = environment
self.gateway = Gateway()
self.account = self.gateway.getAccounts()[0]
self.stocks = {}
self.new_bars = {}
self.global_settings = global_settings
self.loadStocks(symbols)
if "reporter" not in global_settings:
global_settings["reporter"] = NullReporter()
self.reporter = global_settings["reporter"]
self.reporter.initiate(version, environment, client_id)
def loadStock(self, symbol, exchange, currency):
self.report("Getting stock {:s} from gateway".format(symbol))
return self.gateway.getStock(
self.account,
symbol,
exchange,
currency
)
def loadStocks(self, symbols):
for symbol in symbols:
stock_settings = self.getStockSettings(symbol)
stock = self.loadStock(
symbol,
stock_settings['exchange'],
stock_settings['currency']
)
for option_name in stock_settings:
if not hasattr(stock, option_name):
self.report(
"Warning: attribute {:s} hasn't got a default value".format(
option_name
)
)
setattr(stock, option_name, stock_settings[option_name])
self.report("Initiating signallers")
stock.signaller.initialise()
# now we store the stock object in self.stocks, referenced by its
# symbol.
self.stocks[symbol] = stock
def getStockSettings(self, symbol):
self.report("getting settings for {:s}".format(symbol))
# Create a shallow copy of settings so that settings can be overwritten by each stock.
stock_settings = copy.copy(self.global_settings)
self.report("after copy")
self.report("copied settings for {:s}".format(symbol))
self.report("loading version file for {:s}".format(symbol))
try:
# try to load the generic index file for the stock
version_file = importlib.import_module(
"Versions.{:s}.Stocks.{:s}.index".format(
self.version,
symbol
)
)
self.report("loaded version file for {:s}".format(symbol))
stock_settings.update(version_file.settings)
except Exception as error:
self.report("Exception type: ", error)
self.report("Stock file {:s} has no index.py file, continuing anyway".format(
symbol
))
self.report("loading version file for {:s}".format(symbol))
try:
# try to load the environment-specific file for the stock
environment_file = importlib.import_module(
"Versions.{:s}.Stocks.{:s}.{:s}".format(
self.version,
symbol,
self.environment
)
)
self.report("loaded environment file for {:s}".format(symbol))
stock_settings.update(environment_file.settings)
except Exception as error:
self.report("Exception type: ", error)
self.report("Stock file {:s} has no {:s}.py file, continuing anyway".format(
symbol,
self.environment
))
# verify that we have the exchange and currency. If not, then
# we don't have enough information to launch the client.
if "exchange" not in stock_settings:
raise ValueError(
"Stock {:s}'s exchange should be in it's index file or {:s} file".format(
symbol,
self.environment
)
)
elif "currency" not in stock_settings:
raise ValueError(
"Stock {:s}'s currency should be in it's index file or {:s} file".format(
symbol,
self.environment
)
)
self.report("loaded all settings for {:s}".format(symbol))
return stock_settings
def goLive(self):
# these are done in blocks rather than all-in-one, to allow separate stocks
# to get to the same stage before moving on
for symbol in self.stocks:
# load the stock data from 2012 to the current year
# (this also performs detection, classification, learning)
self.report("Loading Stock Data", symbol)
self.stocks[symbol].load()
for symbol in self.stocks:
self.stocks[symbol].analyse()
for symbol in self.stocks:
# Grab historical data from the stock. This is just in case
# this client starts up after the market opens or misses the previous
# day, etc.
self.report(
"Requesting historical data for stock '" + symbol + "'")
self.stocks[symbol].addHistoricalPriceBars(
self.gateway.getHistory(self.stocks[symbol])
)
for symbol in self.stocks:
# Subscribe to live market data
self.report("Requesting live data for stock '" + symbol + "'")
self.gateway.subscribeToMarketData(self.stocks[symbol])
self.gateway.finalise()
# Run the listening loop.
self.listen()
def getLogTag(self):
return "Controller"
def listen(self):
""" Run the main listening loop, handling responses from the gateway. """
while True:
# wait or the gateway to send us something
listen_input = self.gateway.listen()
if listen_input["Type"] == "Prepare for Live Bars":
# PriceBars are going to come in - store them all and process in bulk afterwards,
# so that the message queue isn't blocked
self.new_bars = {}
# We have received a live bar, pass it to the stock.
elif listen_input["Type"] == "Live Bar":
# Find the stock based on the returned live data request's ID
symbol = self.gateway.request_to_stock[listen_input['RequestID']]
stock = self.stocks[symbol]
# Pass the new bar to the stock
self.new_bars[symbol] = PriceBar(listen_input['Bar'])
elif listen_input["Type"] == "End of Live Bars":
self.report("All bars are in. Adding them to the stock...")
for symbol in self.new_bars:
self.stocks[symbol].addLivePriceBar(self.new_bars[symbol])
self.report("Ready to process!")
for symbol in self.new_bars:
self.stocks[symbol].processNewBar()
self.current_time = self.stocks[symbol].current_time
self.report("Done. Flushing signallers.")
for symbol in self.stocks:
self.stocks[symbol].signaller.flush()
self.reporter.newBars(self, self.current_time)
# now tell the Arrow Server that we are done processing, for bookkeeping purposes.
self.gateway.finalise()
elif listen_input["Type"] == "Server Exit":
self.report("Server has closed.")
self.report("Generating complete report.")
self.report("Trades:", sum(len(self.stocks[symbol].closed_trades) for symbol in self.stocks))
self.reporter.endOfDay(self)
sys.exit(0) | mit | 902,031,004,339,659,300 | 42.067708 | 109 | 0.554084 | false | 4.733632 | false | false | false |
rickerbh/tictactoe_py | tests/ai_strategy_factory_tests.py | 1 | 1186 | from nose.tools import *
from tictactoe.ai_strategy_factory import AIStrategyFactory
from tictactoe.ai_strategies.hard import Hard
from tictactoe.ai_strategies.easy import Easy
def factory_returns_hard_strategy_test():
factory = AIStrategyFactory()
strategy = factory.strategy("Hard", "X", "O")
assert isinstance(strategy, Hard)
def factory_returns_easy_strategy_test():
factory = AIStrategyFactory()
strategy = factory.strategy("Easy", "X", "O")
assert isinstance(strategy, Easy)
def factory_returns_easy_strategy_case_insensitive_test():
factory = AIStrategyFactory()
strategy = factory.strategy("eAsY", "X", "O")
assert isinstance(strategy, Easy)
def factory_handles_bad_strategy_test():
factory = AIStrategyFactory()
ex = None
try:
strategy = factory.strategy("NoStrategyHereSorry", None, None)
except Exception as e:
ex = e
assert isinstance(ex, ValueError)
def factor_handles_no_strategy_test():
factory = AIStrategyFactory()
ex = None
try:
strategy = factory.strategy(None, None, None)
except Exception as e:
ex = e
assert isinstance(ex, ValueError)
| mit | 1,108,111,440,951,364,200 | 27.926829 | 70 | 0.689713 | false | 3.717868 | false | false | false |
stevearc/python-pike | pike/env.py | 1 | 15498 | """ Environments for running groups of graphs. """
import os
import time
from datetime import datetime
import copy
import logging
import six
import tempfile
import threading
from six.moves import cPickle as pickle # pylint: disable=F0401
from .exceptions import StopProcessing
from .items import FileMeta
from .nodes import (ChangeListenerNode, ChangeEnforcerNode, CacheNode, Edge,
NoopNode)
from .sqlitedict import SqliteDict
from .util import resource_spec
LOG = logging.getLogger(__name__)
def commit(cache):
""" Commit if SqliteDict, else do nothing. """
try:
cache.commit()
except AttributeError:
pass
def watch_graph(graph, partial=False, cache=None, fingerprint='md5'):
"""
Construct a copy of a graph that will watch source nodes for changes.
Parameters
----------
graph : :class:`~pike.Graph`
partial : bool, optional
If True, the :class:`~pike.ChangeListenerNode` will only propagate
changed files and the graph will rely on a :class:`~pike.CacheNode` to
produce the total output.
cache : str, optional
If present, cache the file fingerprints and other data in this file.
fingerprint : str or callable, optional
The method to use for fingerprinting files when ``watch=True``. See
:class:`~pike.nodes.watch.ChangeListenerNode` for details. (default
'md5')
"""
new_graph = copy.deepcopy(graph)
with new_graph:
# If we only pass through the changed files, we'll need a CacheNode at
# the end
if partial:
sink = CacheNode(cache, new_graph.name + '_cache')
new_graph.sink.connect(sink, '*', '*')
enforcer = ChangeEnforcerNode()
for i, node in enumerate(new_graph.source_nodes()):
# Find the outbound edge of the source
if node.eout:
edge = node.eout[0]
edge_index = edge.n2.ein.index(edge)
edge.remove()
else:
# If source has no outbound edge, make one.
edge = Edge(n2=NoopNode())
edge_index = 0
# Funnel files through a change listener
key = new_graph.name + '_listen_' + str(i)
listener = ChangeListenerNode(stop=False, cache=cache, key=key,
fingerprint=fingerprint)
node.connect(listener)
# Create a fan-in, fan-out with the changed files that goes through
# a ChangeEnforcer. That way processing will continue even if only
# one of the sources has changed files.
listener.connect(enforcer, input_name=str(i))
if not partial:
listener.connect(enforcer, output_name='all', input_name=str(i)
+ '_all')
if edge.input_name == '*':
edge.input_name = None
input_name = edge.input_name
new_edge = enforcer.connect(edge.n2, str(i), input_name)
# Preserve edge ordering to preserve argument ordering
edge.n2.ein.remove(new_edge)
edge.n2.ein.insert(edge_index, new_edge)
return new_graph
class IExceptionHandler(object):
"""
Interface for exception handlers.
This class can intercept exceptions raised while running a graph in an
environment and perform some processing.
"""
def handle_exception(self, graph, exc, node):
"""
Handle an exception.
Parameters
----------
graph : :class:`~pike.graph.Graph`
exc : :class:`Exception`
node : :class:`~pike.nodes.base.Node`
Returns
-------
handled : bool
If True, the Environment will not raise the exception
"""
raise NotImplementedError
def apply_error_style(node, error_style):
"""
Apply error styles to a graph.
Parameters
----------
node : :class:`~pike.nodes.base.Node`
The node that threw the exception
error_style : dict
The styles to apply to nodes and edges involved in the traceback.
Returns
-------
style : dict
Style dict for passing to :meth:`pike.graph.Graph.dot`.
"""
styles = {}
for node in node.walk_up(True):
styles[node] = error_style
for edge in node.ein:
styles[edge] = error_style
return styles
class RenderException(IExceptionHandler):
"""
Render traceback as a png in a directory.
Parameters
----------
output_dir : str, optional
Directory to render exception into (defaults to temporary directory)
error_style : dict, optional
Dict of attributes to apply to nodes and edges involved in the
traceback (default {'color': 'red'}).
"""
def __init__(self, output_dir=None, error_style=None):
super(RenderException, self).__init__()
self.error_style = error_style or {'color': 'red'}
if output_dir is None:
self.output_dir = tempfile.gettempdir()
else:
self.output_dir = output_dir
def handle_exception(self, graph, exc, node):
filename = 'exc_%s.png' % datetime.now().isoformat()
fullpath = os.path.join(self.output_dir, filename)
styles = apply_error_style(node, self.error_style)
graph.render(fullpath, style=styles)
LOG.error("Exception rendered as %s", fullpath)
class ShowException(IExceptionHandler):
"""
When an exception occurs, this will auto-open the visual traceback.
Parameters
----------
error_style : dict, optional
Dict of attributes to apply to nodes and edges involved in the
traceback (default {'color': 'red'}).
**kwargs : dict, optional
These will be passed to :meth:`~pike.graph.Graph.show`
"""
def __init__(self, error_style=None, show_kwargs=None):
super(ShowException, self).__init__()
self.error_style = error_style or {'color': 'red'}
self.show_kwargs = show_kwargs or {}
def handle_exception(self, graph, exc, node):
styles = apply_error_style(node, self.error_style)
graph.show(style=styles, **self.show_kwargs)
class Environment(object):
"""
Environment for running multiple Graphs and caching the results.
Parameters
----------
watch : bool, optional
If True, watch all graphs for changes in the source files and rerun
them if changes are detected (default False)
cache : str, optional
The sqlite file to use as a persistent cache (defaults to in-memory
dict)
fingerprint : str or callable, optional
The method to use for fingerprinting files when ``watch=True``. See
:class:`~pike.nodes.watch.ChangeListenerNode` for details. (default
'md5')
exception_handler : :class:`~.IExceptionHandler`, optional
When running a graph throws an exception, this handler will do
something useful, like rendering a graph that visually shows you where
the error happened.
Notes
-----
"""
def __init__(self,
watch=False,
cache=None,
fingerprint='md5',
exception_handler=None,
):
self._fingerprint = fingerprint
self._graphs = {}
self._cache_file = cache
if cache is not None:
self._cache = SqliteDict(cache, 'processed', autocommit=False,
synchronous=0)
self._gen_files = SqliteDict(cache, 'file_paths', autocommit=False,
synchronous=0)
else:
self._cache = {}
self._gen_files = {}
self.default_output = None
self.watch = watch
self._exc_handler = exception_handler
def add(self, graph, ignore_default_output=False, partial=False):
"""
Add a graph to the Environment.
Parameters
----------
graph : :class:`~pike.Graph`
The graph to add
ignore_default_output : bool, optional
If True, will *not* run the ``default_output`` graph on the output
of this graph (default False)
partial : bool, optional
This argument will be passed to :meth:`~.watch_graph`
"""
name = graph.name
if name in self._graphs:
raise KeyError("Graph '%s' already exists in environment!" %
graph.name)
if self.default_output is not None and not ignore_default_output:
wrapper = copy.deepcopy(graph)
wrapper.name += '-wrapper'
with wrapper:
edge = wrapper.sink.connect(self.default_output, '*', '*')
graph = wrapper
if self.watch:
graph = watch_graph(graph, partial, self._cache_file,
self._fingerprint)
self._graphs[name] = graph
def set_default_output(self, graph):
"""
Set a default operation to be run after every graph.
By default, every time you :meth:`~.add` a Graph, that Graph will have
this process tacked on to the end. This can be used to do common
operations, such as writing files or generating urls.
Parameters
----------
graph : :class:`~pike.Graph` or :class:`~pike.Node`
The graph to run after other graphs.
"""
self.default_output = graph
def get(self, name):
""" Get the cached results of a graph. """
return self._cache.get(name)
def save(self, filename):
""" Saved the cached asset metadata to a file """
self.run_all(True)
with open(filename, 'wb') as ofile:
pickle.dump(dict(self._cache), ofile)
def load(self, filename):
""" Load cached asset metadata from a file """
with open(filename, 'rb') as ifile:
self._cache = pickle.load(ifile)
def run(self, name, bust=False):
"""
Run a graph and cache the result.
Returns the cached result if one exists.
Parameters
----------
name : str
Name of the graph to run
bust : bool, optional
If True, will ignore the cache and rerun (default False)
Returns
-------
results : dict
Same output as the graph
"""
if bust or self.watch or name not in self._cache:
LOG.debug("Running %s", name)
try:
start = time.time() * 1000
results = self._graphs[name].run()
elapsed = int(time.time() * 1000 - start)
LOG.info("Ran %s in %d ms", name, elapsed)
for items in six.itervalues(results):
for item in items:
if isinstance(item, FileMeta):
# Remove data to save memory
if hasattr(item, 'data'):
del item.data
self._gen_files[item.filename] = item.fullpath
commit(self._gen_files)
self._cache[name] = results
commit(self._cache)
except StopProcessing:
LOG.debug("No changes for %s", name)
except Exception as e:
if hasattr(e, 'node') and self._exc_handler is not None:
LOG.error("Exception at node %s", e.node)
graph = self._graphs[name]
ret = False
try:
ret = self._exc_handler.handle_exception(graph, e,
e.node)
except Exception:
LOG.exception("Error while handling exception")
if not ret:
raise e
else:
raise
return self._cache.get(name)
def run_all(self, bust=False):
""" Run all graphs. """
for name in self._graphs:
self.run(name, bust)
def clean(self, directory, dry_run=False):
"""
Remove all files in a directory that were not generated by the env
Parameters
----------
directory : str
The location to look for unnecessary files
dry_run : bool, optional
If True, will not actually delete the files (default False)
Returns
-------
removed : list
List of file paths that were deleted by the operation
Raises
------
exc : :class:`~ValueError`
If there are no known generated files. That would delete all files
in the directory, which is probably not the intended behavior.
"""
if not self._gen_files:
raise ValueError("No generated files found. Have you run "
"`run_all()`?")
directory = resource_spec(directory)
all_files = set()
for fullpath in six.itervalues(self._gen_files):
all_files.add(os.path.abspath(fullpath))
removed = []
for root, _, files in os.walk(directory):
for filename in files:
fullpath = os.path.abspath(os.path.join(root, filename))
if fullpath not in all_files:
removed.append(fullpath)
if not dry_run:
LOG.info("Removing %s", fullpath)
os.remove(fullpath)
return removed
def run_forever(self, sleep=2, daemon=False, daemon_proc=False):
"""
Rerun graphs forever, busting the env cache each time.
This is generally only useful if ``watch=True``.
Parameters
----------
sleep : int, optional
How long to sleep between runs. Default 2 seconds.
daemon : bool, optional
If True, will run in a background thread (default False)
daemon_proc : bool, optional
If True, will run in a child process (default False)
"""
if daemon and daemon_proc:
raise TypeError("daemon and daemon_proc cannot both be True")
if daemon:
thread = threading.Thread(target=self.run_forever,
kwargs={'sleep': sleep})
thread.daemon = True
thread.start()
return thread
elif daemon_proc:
pid = os.fork()
if pid != 0:
return pid
while True:
try:
self.run_all(bust=True)
except KeyboardInterrupt:
break
except Exception:
LOG.exception("Error while running forever!")
time.sleep(sleep)
def lookup(self, path):
"""
Get a generated asset path
Parameters
----------
path : str
Relative path of the asset
Returns
-------
path : str or None
Absolute path of the generated asset (if it exists). If the path is
known to be invalid, this value will be None.
"""
if path not in self._gen_files:
return None
fullpath = self._gen_files[path]
return fullpath
| mit | -6,384,054,855,440,314,000 | 32.115385 | 79 | 0.552975 | false | 4.617998 | false | false | false |
HRODEV/Frequency | Frequency/Board/ActionPanel/ActionPanel.py | 1 | 16633 | import Game
from Board.ActionPanel.ArrowItem import *
from Board.ActionPanel.BuyUnitItems import *
from GameLogic.Map import Tile
from GameLogic.Unit import Soldier
from GameLogic.UnitFactory import getUnitPrice
from Helpers import Colors
from Helpers.EventHelpers import EventExist
from Vector2 import Vector2
class ActionPanel:
def __init__(self, game: Game, tile: Tile = None, endturnButtonRect=None, newSelection=None):
self.Size = Vector2((game.Settings.Resolution.X - game.Settings.GetMapSize().X) // 2,
game.Settings.Resolution.Y)
self.Position = Vector2(0, 0)
self.Tile = tile
self.EndturnButtonRect = endturnButtonRect
self.NewSelection = newSelection
self.Map = None
self.EndTurnButtonImage = pygame.transform.scale(
pygame.image.load('images/buttons/endturnButton.png').convert_alpha(), [150, 25])
game.Settings.SetMenuLeftSize(self.Size)
def Update(self, game: Game) -> 'ActionPanel':
# End turn
if self.EndturnButtonIsClickedByMouse(game):
game.Logic.EndTurn()
return DefaultActionPanel(game)
return ActionPanel(game, self.Tile, self.EndturnButtonRect)
def Draw(self, game: Game):
font = pygame.font.Font(None, 30)
font.set_bold(True)
# Draw the background
pygame.draw.rect(game.Settings.GetScreen(), Colors.WHITE,
(self.Position.X, self.Position.Y, self.Size.X, self.Size.Y))
game.Settings.GetScreen().blit(font.render("Action panel", True, Colors.BLACK), (10, 10))
# Draw end turn button
self.EndturnButtonRect = game.Settings.GetScreen().blit(self.EndTurnButtonImage,
(10, game.Settings.Resolution.Y - 50))
def EndturnButtonIsHoverdByMouse(self):
return self.EndturnButtonRect is not None and self.EndturnButtonRect.collidepoint(pygame.mouse.get_pos())
def EndturnButtonIsClickedByMouse(self, game):
return self.EndturnButtonIsHoverdByMouse() and EventExist(game.Events, pygame.MOUSEBUTTONUP)
class DefaultActionPanel(ActionPanel):
def Update(self, game: Game):
nself = super().Update(game)
return DefaultActionPanel(game, self.Tile, nself.EndturnButtonRect)
def Draw(self, game: Game):
super().Draw(game)
font = pygame.font.Font(None, 20)
game.Settings.GetScreen().blit(font.render("Default", True, Colors.BLACK), (10, 35))
game.Settings.GetScreen().blit(font.render("Choose an tile or end the turn",
True, Colors.BLACK), (10, 55))
class SimpleTextButton:
def __init__(self, text, position):
self._text = text
self._position = position
self.clicked = False
self.rect = None
def Draw(self, screen):
font = pygame.font.Font(None, 20)
textColor = Colors.RED if self.clicked else Colors.BLACK
if self.IsHoverdByMouse():
self.rect = screen.blit(font.render(self._text, True, textColor, Colors.DIMGREY), self._position)
else:
self.rect = screen.blit(font.render(self._text, True, textColor), self._position)
def IsHoverdByMouse(self):
return self.rect is not None and self.rect.collidepoint(pygame.mouse.get_pos())
def IsClickedByMouse(self, game):
return self.IsHoverdByMouse() and EventExist(game.Events, pygame.MOUSEBUTTONUP)
class UnitActionPanel(ActionPanel):
def __init__(self, game: Game, tile: Tile = None, endturnButtonRect=None, buttons=None, newSelection=None,
_barackButton=None, _moveButton=None, _moveUnitFromBoatButton=None):
super().__init__(game, tile, endturnButtonRect, newSelection)
self._barrackButton = _barackButton if _barackButton is not None else SimpleTextButton("Buy Barrack", (10, 100))
if _moveButton is not None:
self._moveButton = _moveButton
else:
self._moveButton = SimpleTextButton("Move Unit", (10, 130))
self._moveButton.clicked = True
self._moveUnitFromBoatButton = _moveUnitFromBoatButton if _moveUnitFromBoatButton is not None \
else SimpleTextButton("move Unit to land", (10, 160))
if buttons is not None:
self.Buttons = buttons
else:
import GameLogic.MapHelpers
self.Buttons = []
for pos in GameLogic.MapHelpers.getAroundingTiles(tile, game.Logic.Map):
if pos.Position.X == tile.Position.X + 1 and pos.Position.Y == tile.Position.Y:
self.Buttons.append(ArrowButtonRight(Vector2(40, 0)))
elif pos.Position.X == tile.Position.X + 1 and pos.Position.Y == tile.Position.Y + 1:
self.Buttons.append(ArrowButtonDownRight(Vector2(40, 40)))
elif pos.Position.X == tile.Position.X and pos.Position.Y == tile.Position.Y + 1:
self.Buttons.append(ArrowButtonDown(Vector2(0, 40)))
elif pos.Position.X == tile.Position.X - 1 and pos.Position.Y == tile.Position.Y + 1:
self.Buttons.append(ArrowButtonDownLeft(Vector2(-40, 40)))
elif pos.Position.X == tile.Position.X - 1 and pos.Position.Y == tile.Position.Y:
self.Buttons.append(ArrowButtonLeft(Vector2(-40, 0)))
elif pos.Position.X == tile.Position.X - 1 and pos.Position.Y == tile.Position.Y - 1:
self.Buttons.append(ArrowButtonUpLeft(Vector2(-40, -40)))
elif pos.Position.X == tile.Position.X and pos.Position.Y == tile.Position.Y - 1:
self.Buttons.append(ArrowButtonUp(Vector2(0, -40)))
elif pos.Position.X == tile.Position.X + 1 and pos.Position.Y == tile.Position.Y - 1:
self.Buttons.append(ArrowButtonUpRight(Vector2(40, -40)))
def Update(self, game: Game):
nself = super().Update(game)
if type(nself) is DefaultActionPanel:
return nself
if self._barrackButton.IsClickedByMouse(game) or self._moveButton.IsClickedByMouse(game) \
or self._moveUnitFromBoatButton.IsClickedByMouse(game):
if self._moveButton.IsClickedByMouse(game):
self._moveButton.clicked = True
self._barrackButton.clicked = False
self._moveUnitFromBoatButton.clicked = False
elif self._barrackButton.IsClickedByMouse(game):
self._barrackButton.clicked = True
self._moveButton.clicked = False
self._moveUnitFromBoatButton.clicked = False
else:
self._moveUnitFromBoatButton.clicked = True
self._barrackButton.clicked = False
self._moveButton.clicked = False
clickedButton = next((btn for btn in self.Buttons if btn.IsClickedByMouse(game)), None)
if self._moveButton.clicked:
if clickedButton is not None:
self.Tile.Unit.MoveTo(game.Logic.Map.GetTile(clickedButton.GetDestinationPosition(self.Tile.Position)))
return UnitActionPanel(game, self.Tile, nself.EndturnButtonRect, None,
clickedButton.GetDestinationPosition(self.Tile.Position))
elif self._barrackButton.clicked:
if clickedButton is not None:
barrack = game.Logic.BuyBarrack(
game.Logic.Map.GetTile(clickedButton.GetDestinationPosition(self.Tile.Position)))
if barrack is not None:
return BarrackActionPanel(game, game.Logic.Map.GetTile(
clickedButton.GetDestinationPosition(self.Tile.Position)))
else:
if clickedButton is not None:
self.Tile.Unit.Unit.MoveTo(
game.Logic.Map.GetTile(clickedButton.GetDestinationPosition(self.Tile.Position)))
self.Tile.Unit.Unit = None
return UnitActionPanel(game, self.Tile, nself.EndturnButtonRect, None,
clickedButton.GetDestinationPosition(self.Tile.Position))
return UnitActionPanel(game, self.Tile, nself.EndturnButtonRect, self.Buttons, None, self._barrackButton,
self._moveButton, self._moveUnitFromBoatButton)
def Draw(self, game: Game):
super().Draw(game)
screen = game.Settings.GetScreen()
font = pygame.font.Font(None, 20)
game.Settings.GetScreen().blit(font.render("Unit actions", True, Colors.BLACK), (10, 35))
screen.blit(font.render("Choose you actions with the unit",
True, Colors.BLACK), (10, 55))
screen.blit(font.render("Attack points: %i" % self.Tile.Unit.AttackPoints, True, Colors.BLACK), (10, 190))
screen.blit(font.render("Defense points: %i" % self.Tile.Unit.DefencePoints, True, Colors.BLACK), (10, 210))
# choose between buy a barrack or move the unit
self._barrackButton.Draw(screen)
self._moveButton.Draw(screen)
if type(self.Tile.Unit) is Boat:
self._moveUnitFromBoatButton.Draw(screen)
# Draw the Arrow Buttons
for arrowButton in self.Buttons:
arrowButton.Draw(game)
class BarrackActionPanel(ActionPanel):
def __init__(self, game: Game, tile: Tile = None, endturnButtonRect=None, buttons=None, buyUnits=None):
super().__init__(game, tile, endturnButtonRect)
if buttons is not None:
self.Buttons = buttons
else:
import GameLogic.MapHelpers
self.Buttons = []
for pos in GameLogic.MapHelpers.getAroundingTiles(tile, game.Logic.Map):
if pos.Position.X == tile.Position.X + 1 and pos.Position.Y == tile.Position.Y:
self.Buttons.append(ArrowButtonRight(Vector2(40, 0)))
elif pos.Position.X == tile.Position.X + 1 and pos.Position.Y == tile.Position.Y + 1:
self.Buttons.append(ArrowButtonDownRight(Vector2(40, 40)))
elif pos.Position.X == tile.Position.X and pos.Position.Y == tile.Position.Y + 1:
self.Buttons.append(ArrowButtonDown(Vector2(0, 40)))
elif pos.Position.X == tile.Position.X - 1 and pos.Position.Y == tile.Position.Y + 1:
self.Buttons.append(ArrowButtonDownLeft(Vector2(-40, 40)))
elif pos.Position.X == tile.Position.X - 1 and pos.Position.Y == tile.Position.Y:
self.Buttons.append(ArrowButtonLeft(Vector2(-40, 0)))
elif pos.Position.X == tile.Position.X - 1 and pos.Position.Y == tile.Position.Y - 1:
self.Buttons.append(ArrowButtonUpLeft(Vector2(-40, -40)))
elif pos.Position.X == tile.Position.X and pos.Position.Y == tile.Position.Y - 1:
self.Buttons.append(ArrowButtonUp(Vector2(0, -40)))
elif pos.Position.X == tile.Position.X + 1 and pos.Position.Y == tile.Position.Y - 1:
self.Buttons.append(ArrowButtonUpRight(Vector2(40, -40)))
if buyUnits is not None:
self.BuyUnits = buyUnits
else:
self.BuyUnits = []
self.BuyUnits.append(SoldierButton(Vector2(0, 100), game.Logic.PlayingPlayer.Character.Id))
self.BuyUnits.append(RobotButton(Vector2(1, 100), game.Logic.PlayingPlayer.Character.Id))
self.BuyUnits.append(TankButton(Vector2(2, 100), game.Logic.PlayingPlayer.Character.Id))
self.BuyUnits.append(BoatButton(Vector2(3, 100), game.Logic.PlayingPlayer.Character.Id))
def Update(self, game: Game):
nself = super().Update(game)
if type(nself) is DefaultActionPanel:
return nself
clickedArrowButton = next((btn for btn in self.Buttons if btn.IsClickedByMouse(game)), None)
clickedUnitButton = next((btn for btn in self.BuyUnits if btn.IsClickedByMouse(game)), None)
if clickedUnitButton is not None:
for btn in self.BuyUnits:
btn.clicked = False
clickedUnitButton.clicked = True
clickedUnitButton = next((btn for btn in self.BuyUnits if btn.clicked), None)
if clickedUnitButton is not None and clickedArrowButton is not None:
game.Logic.BuyUnit(
clickedUnitButton.GetUnitType(),
game.Logic.Map.GetTile(clickedArrowButton.GetDestinationPosition(self.Tile.Position))
)
return BarrackActionPanel(game, self.Tile, nself.EndturnButtonRect, self.Buttons, self.BuyUnits)
def Draw(self, game: Game):
super().Draw(game)
screen = game.Settings.GetScreen()
font = pygame.font.Font(None, 20)
screen.blit(font.render("Barrack actions", True, Colors.BLACK), (10, 35))
screen.blit(font.render("Choose you actions with the Barrack",
True, Colors.BLACK), (10, 55))
screen.blit(font.render("Defence Points: %i" % self.Tile.Building.DefencePoints, True, Colors.BLACK), (10, 75))
# Draw the Arrow Buttons
for arrowButton in self.Buttons:
arrowButton.Draw(game)
# Draw the Buy Unit Buttons
for unitBuyButton in self.BuyUnits:
unitBuyButton.Draw(game)
current_money = game.Logic.PlayingPlayer.Money
# Draw the price of the units and check if the user can buy the unit
if current_money >= getUnitPrice(Soldier, self.Tile.Building.Owner.Character):
# Soldier
screen.blit(font.render('ƒ ' + str(getUnitPrice(Soldier, self.Tile.Building.Owner.Character)),
True, Colors.BLACK), (15, 150))
else:
screen.blit(font.render('ƒ ' + str(getUnitPrice(Soldier, self.Tile.Building.Owner.Character)),
True, Colors.RED), (15, 150))
if current_money >= getUnitPrice(Robot, self.Tile.Building.Owner.Character):
# Robot
screen.blit(font.render('ƒ ' + str(getUnitPrice(Robot, self.Tile.Building.Owner.Character)),
True, Colors.BLACK), (73, 150))
else:
screen.blit(font.render('ƒ ' + str(getUnitPrice(Robot, self.Tile.Building.Owner.Character)),
True, Colors.RED), (73, 150))
if current_money >= getUnitPrice(Tank, self.Tile.Building.Owner.Character):
# Tank
screen.blit(font.render('ƒ ' + str(getUnitPrice(Tank, self.Tile.Building.Owner.Character)),
True, Colors.BLACK), (131, 150))
else:
screen.blit(font.render('ƒ ' + str(getUnitPrice(Tank, self.Tile.Building.Owner.Character)),
True, Colors.RED), (131, 150))
if current_money >= getUnitPrice(Boat, self.Tile.Building.Owner.Character):
# Boat
screen.blit(font.render('ƒ ' + str(getUnitPrice(Boat, self.Tile.Building.Owner.Character)),
True, Colors.BLACK), (189, 150))
else:
screen.blit(font.render('ƒ ' + str(getUnitPrice(Boat, self.Tile.Building.Owner.Character)),
True, Colors.RED), (189, 150))
class InfoActionTile(ActionPanel):
def Update(self, game: Game):
nself = super().Update(game)
if type(nself) is DefaultActionPanel:
return nself
return InfoActionTile(game, self.Tile, nself.EndturnButtonRect)
def Draw(self, game: Game):
super().Draw(game)
font = pygame.font.Font(None, 20)
game.Settings.GetScreen().blit(font.render("Tile Info", True, Colors.BLACK), (10, 35))
game.Settings.GetScreen().blit(font.render("Here you can find info about the tile",
True, Colors.BLACK), (10, 55))
if self.Tile.Building is not None:
game.Settings.GetScreen().blit(
font.render("Defence Points: %i" % self.Tile.Building.DefencePoints, True, Colors.BLACK), (10, 75))
if self.Tile.Unit is not None:
game.Settings.GetScreen().blit(
font.render("Attack points: %i" % self.Tile.Unit.AttackPoints, True, Colors.BLACK), (10, 190))
game.Settings.GetScreen().blit(
font.render("Defense points: %i" % self.Tile.Unit.DefencePoints, True, Colors.BLACK), (10, 210)) | mit | 4,173,592,840,113,008,000 | 48.183432 | 120 | 0.613307 | false | 3.854162 | false | false | false |
aabdulwahed/cloudpots | agent/client/container/services.py | 1 | 1693 | import simplejson as json
from docker import Client
_DOCKER_BASEURL_= 'unix://var/run/docker.sock'
_PORTS_ = [2200]
class ContainerEngine():
def __init__(self):
self.ports = [22]
def newClient(self,base_url=_DOCKER_BASEURL_):
return Client(base_url)
def createContainer(self,client,
image_id,
command=None,
mem_limit=None,
cpu_shares=None,
private_ports = []):
"""Initiate and Create Container"""
return client.create_container(image_id,
command,
detach=True,
ports = private_ports ,
mem_limit=mem_limit,
cpu_shares=cpu_shares)
def startContainer(self,client, container_id, container_endpoints={}):
"""Start Container"""
return client.start(container_id, port_bindings = container_endpoints)
def searchImage(self, client, image_id):
"""Search Public Image Repo -- docker hub"""
try:
return client.search(image_id)[0]
except:
return {'ERROR':'Image is not found'}
def pullImage(self, client, image_id):
"""Pull Image from Public Repo"""
try:
for line in client.pull(image_id, stream=True):
print(json.dumps(json.loads(line), indent=4))
return self.list_image(client,image=image_id)
except:
return {'ERROR':'Unable to pull image with a record id "%s" from docker hub!!'%(image_id)}
def list_images(self, client, image=None):
"""list local repo images"""
if image != None:
return client.images(image=image)
return client.images()
def removeContainer(self, client, container):
"""stop container then remove the stopped one"""
try:
client.stop(container)
except:
pass
try:
client.remove_container(container)
except:
return False
return True
| apache-2.0 | -1,505,548,805,681,197,800 | 22.513889 | 93 | 0.677496 | false | 3.255769 | false | false | false |
MuckRock/muckrock | muckrock/foiamachine/urls.py | 1 | 3617 | """
FOIA Machine urls
"""
# Django
from django.conf import settings
from django.conf.urls import include, url
from django.views.defaults import page_not_found, server_error
from django.views.generic import RedirectView, TemplateView
from django.views.static import serve
# Third Party
import debug_toolbar
# MuckRock
from muckrock.accounts import views as account_views
from muckrock.agency.urls import agency_url
from muckrock.agency.views import AgencyAutocomplete
from muckrock.foiamachine import views
from muckrock.jurisdiction.urls import jur_url
from muckrock.jurisdiction.views import JurisdictionAutocomplete
def handler404(request, exception):
"""404 handler"""
return page_not_found(request, exception, template_name="foiamachine/404.html")
def handler500(request):
"""500 handler"""
return server_error(request, template_name="foiamachine/500.html")
urlpatterns = [
url(r"^$", views.Homepage.as_view(), name="index"),
url(
r"^accounts/signup/$",
RedirectView.as_view(
url=settings.SQUARELET_URL + "/accounts/signup/?intent=foiamachine"
),
name="signup",
),
url(r"^accounts/login/$", views.LoginView.as_view(), name="login"),
url(r"^accounts/logout/$", views.account_logout, name="acct-logout"),
url(r"^accounts/profile/$", views.Profile.as_view(), name="profile"),
url(
r"^foi/create/$",
views.FoiaMachineRequestCreateView.as_view(),
name="foi-create",
),
url(
r"^foi/(?P<slug>[\w-]+)-(?P<pk>\d+)/$",
views.FoiaMachineRequestDetailView.as_view(),
name="foi-detail",
),
url(
r"^foi/(?P<slug>[\w-]+)-(?P<pk>\d+)/update/$",
views.FoiaMachineRequestUpdateView.as_view(),
name="foi-update",
),
url(
r"^foi/(?P<slug>[\w-]+)-(?P<pk>\d+)/delete/$",
views.FoiaMachineRequestDeleteView.as_view(),
name="foi-delete",
),
url(
r"^foi/(?P<slug>[\w-]+)-(?P<pk>\d+)/share/$",
views.FoiaMachineRequestShareView.as_view(),
name="foi-share",
),
url(
r"^foi/(?P<foi_slug>[\w-]+)-(?P<foi_pk>\d+)/comms/create/$",
views.FoiaMachineCommunicationCreateView.as_view(),
name="comm-create",
),
url(
r"^foi/(?P<foi_slug>[\w-]+)-(?P<foi_pk>\d+)/comms/(?P<pk>\d+)/update/$",
views.FoiaMachineCommunicationUpdateView.as_view(),
name="comm-update",
),
url(
r"^foi/(?P<foi_slug>[\w-]+)-(?P<foi_pk>\d+)/comms/(?P<pk>\d+)/delete/$",
views.FoiaMachineCommunicationDeleteView.as_view(),
name="comm-delete",
),
url(r"^agency/%s/$" % agency_url, views.agency_detail, name="agency-detail"),
url(
r"^jurisdiction/%s/$" % jur_url,
views.jurisdiction_detail,
name="jurisdiction-detail",
),
url(
r"^agency-autocomplete/$",
AgencyAutocomplete.as_view(),
name="agency-autocomplete",
),
url(
r"^jurisdiction-autocomplete/$",
JurisdictionAutocomplete.as_view(),
name="jurisdiction-autocomplete",
),
url(r"^__debug__/", include(debug_toolbar.urls)),
url(r"^accounts/", include("social_django.urls", namespace="social")),
url(r"^rp_iframe/$", account_views.rp_iframe, name="acct-rp-iframe"),
]
if settings.DEBUG:
urlpatterns += [
url(r"^media/(?P<path>.*)$", serve, {"document_root": settings.MEDIA_ROOT}),
url(r"^500/$", TemplateView.as_view(template_name="foiamachine/500.html")),
url(r"^404/$", TemplateView.as_view(template_name="foiamachine/404.html")),
]
| agpl-3.0 | -3,732,254,894,878,999,000 | 31.294643 | 84 | 0.614598 | false | 3.273303 | false | false | false |
epeios-q37/epeios | other/exercises/Hangman/fr/k.py | 1 | 1586 | # coding: utf-8
import sys
sys.path.append(".")
from workshop.fr.k import *
DIVULGUER_MOT_SECRET = VRAI
def choisirMot(*args):
return workshop.rfPickWord(*args)
def lettreEstDansMot(*args):
return workshop.rfIsLetterInWord(*args)
def donnerMasque(*args):
return workshop.rfGetMask(*args)
def majCorps(*args):
return workshop.rfUpdateBody(*args)
"""
Ajout de la gestion du mot à deviner ('motSecret').
"""
class Pendu:
def raz(self,suggestion,motAuHasard):
self.motSecret = choisirMot(suggestion,motAuHasard)
self.bonnesPioches = ""
self.nombreErreurs = 0
def __init__(self):
self.motSecret = ""
self.bonnesPioches = ""
self.nombreErreurs = 0
def traiterEtTesterPioche(self,pioche):
if lettreEstDansMot(pioche,self.motSecret):
if not lettreEstDansMot(pioche,self.bonnesPioches):
self.bonnesPioches += pioche
return VRAI
else:
self.nombreErreurs += 1
return FAUX
"""
Utilisation du mot à deviner stocké dans 'pendu' ('pendu.motSecret').
Divulgation ou non du mot à deviner selon configuration.
"""
def raz(pendu,suggestion,motAuHasard):
pendu.raz(suggestion,motAuHasard)
print(pendu.motSecret)
afficher(donnerMasque(pendu.motSecret,""))
if DIVULGUER_MOT_SECRET:
divulguerMotSecret( pendu.motSecret )
def traiterPioche(pendu,pioche):
if pendu.traiterEtTesterPioche(pioche):
afficher(donnerMasque(pendu.motSecret,pendu.bonnesPioches))
else:
majCorps(pendu.nombreErreurs)
go(globals())
| agpl-3.0 | 6,059,636,515,371,799,000 | 20.927536 | 69 | 0.684576 | false | 2.775439 | false | false | false |
gorantornqvist/nagios-plugins | check_freenas.py | 1 | 5941 | #!/usr/bin/env python
# The MIT License (MIT)
# Copyright (c) 2015 Goran Tornqvist
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ******************************************************************************
# check_freenas.py - Simple script for monitoring freenas status and replication
# PS: you may want to check out this script as well:
# https://github.com/Doctor-love/check_freenas_api/blob/master/check_freenas_api.py
#
#
# Tip: To ignore capacity warnings which are set quite low, change these rows in check_alerts():
# if alert['level'] != 'OK':
# if alert['message'].find('capacity for the volume') == -1:
# errors = errors + 1
#
# Troubleshooting: If you receive an error from the script, make sure you can access the api of your freenas using a web browser.
# Example: http://freenas/api/v1.0/system/alert/?format=json (login: root)
import argparse
import json
import sys
import string
import requests
class Startup(object):
def __init__(self, hostname, user, secret):
self._hostname = hostname
self._user = user
self._secret = secret
self._ep = 'http://%s/api/v1.0' % hostname
def request(self, resource, method='GET', data=None):
if data is None:
data = ''
try:
r = requests.request(
method,
'%s/%s/' % (self._ep, resource),
data=json.dumps(data),
headers={'Content-Type': "application/json"},
auth=(self._user, self._secret),
)
except:
print 'UNKNOWN - Error when contacting freenas server: ' + str(sys.exc_info())
sys.exit(3)
if r.ok:
try:
return r.json()
except:
print 'UNKNOWN - Error when contacting freenas server: ' + str(sys.exc_info())
sys.exit(3)
def check_repl(self):
repls = self.request('storage/replication')
errors=0
msg=''
try:
for repl in repls:
if repl['repl_status'] != 'Succeeded' and repl['repl_status'] != None and repl['repl_status'][:7] != 'Sending' and repl['repl_status'] != 'Up to date':
errors = errors + 1
msg = msg + repl['repl_zfs'] + ' [' + repl['repl_status'] + '] ' ;
except:
print 'UNKNOWN - Error when contacting freenas server: ' + str(sys.exc_info())
sys.exit(3)
if errors > 0:
print 'WARNING - ' + msg.strip() + '. Go to Storage > Replication Tasks > View Replication Tasks in FreeNAS for more details.'
sys.exit(1)
else:
print 'OK - No replication errors'
sys.exit(0)
def check_alerts(self):
alerts = self.request('system/alert')
warn=0
crit=0
msg=''
try:
for alert in alerts:
if alert['dismissed'] == False:
if alert['level'] == 'CRIT':
crit = crit + 1
msg = msg + '- (C) ' + string.replace(alert['message'], '\n', '. ') + ' '
elif alert['level'] == 'WARN':
warn = warn + 1
msg = msg + '- (W) ' + string.replace(alert['message'], '\n', '. ') + ' '
except:
print 'UNKNOWN - Error when contacting freenas server: ' + str(sys.exc_info())
sys.exit(3)
if crit > 0:
print 'CRITICAL ' + msg
sys.exit(2)
elif warn > 0:
print 'WARNING ' + msg
sys.exit(1)
else:
print 'OK - No problem alerts'
sys.exit(0)
def check_updates(self):
updates = self.request('system/update/check')
if not updates:
print 'OK - No pending updates.'
sys.exit(0)
else:
print 'WARNING - There are pending updates. Go to System > Update to apply pending updates.'
sys.exit(1)
def main():
parser = argparse.ArgumentParser(description='Checks a freenas server using the API')
parser.add_argument('-H', '--hostname', required=True, type=str, help='Hostname or IP address')
parser.add_argument('-u', '--user', required=True, type=str, help='Normally only root works')
parser.add_argument('-p', '--passwd', required=True, type=str, help='Password')
parser.add_argument('-t', '--type', required=True, type=str, help='Type of check, either repl, alerts or updates')
args = parser.parse_args(sys.argv[1:])
startup = Startup(args.hostname, args.user, args.passwd)
if args.type == 'alerts':
startup.check_alerts()
elif args.type == 'repl':
startup.check_repl()
elif args.type == 'updates':
startup.check_updates()
else:
print "Unknown type: " + args.type
sys.exit(3)
if __name__ == '__main__':
main()
| mit | 2,421,966,631,558,977,000 | 37.083333 | 167 | 0.577176 | false | 3.979236 | false | false | false |
CityGrid/arsenal | server/arsenalweb/models/nodes.py | 1 | 7921 | '''Arsenal nodes DB Model'''
# Copyright 2015 CityGrid Media, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from sqlalchemy import (
Column,
ForeignKey,
Integer,
TIMESTAMP,
Text,
)
from sqlalchemy.orm import relationship
from sqlalchemy.orm import backref
from arsenalweb.models.common import (
Base,
BaseAudit,
check_null_dict,
check_null_string,
get_name_id_dict,
get_name_id_list,
hypervisor_vm_assignments,
jsonify,
)
LOG = logging.getLogger(__name__)
class Node(Base):
'''Arsenal Node object.'''
__tablename__ = 'nodes'
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Text, nullable=False)
unique_id = Column(Text, nullable=False)
status_id = Column(Integer, ForeignKey('statuses.id'), nullable=False)
hardware_profile_id = Column(Integer, ForeignKey('hardware_profiles.id'), nullable=False)
operating_system_id = Column(Integer, ForeignKey('operating_systems.id'), nullable=False)
ec2_id = Column(Integer, ForeignKey('ec2_instances.id'))
data_center_id = Column(Integer, ForeignKey('data_centers.id'))
uptime = Column(Text, nullable=False)
serial_number = Column(Text, ForeignKey('physical_devices.serial_number'))
processor_count = Column(Integer)
last_registered = Column(TIMESTAMP)
created = Column(TIMESTAMP, nullable=False)
updated = Column(TIMESTAMP, nullable=False)
updated_by = Column(Text, nullable=False)
status = relationship('Status', backref='nodes', lazy='joined')
hardware_profile = relationship('HardwareProfile', backref=backref('nodes'), lazy='joined')
operating_system = relationship('OperatingSystem', backref=backref('nodes'), lazy='joined')
ec2_instance = relationship('Ec2Instance', backref=backref('nodes'), lazy='joined')
data_center = relationship('DataCenter', backref=backref('nodes'), lazy='joined')
physical_device = relationship('PhysicalDevice',
backref=backref('nodes'),
lazy='joined',
foreign_keys=[serial_number])
node_groups = relationship('NodeGroup',
secondary='node_group_assignments',
backref='nodes',
lazy='dynamic')
tags = relationship('Tag',
secondary='tag_node_assignments',
backref='nodes',
lazy='dynamic')
network_interfaces = relationship('NetworkInterface',
secondary='network_interface_assignments',
backref='nodes',
lazy='dynamic')
hypervisor = relationship('Node',
secondary='hypervisor_vm_assignments',
primaryjoin=hypervisor_vm_assignments.c.hypervisor_id == id,
secondaryjoin=hypervisor_vm_assignments.c.guest_vm_id == id,
backref='guest_vms',
lazy='dynamic')
def __json__(self, request):
try:
fields = request.params['fields']
if fields == 'all':
# Everything.
all_fields = dict(
id=self.id,
name=self.name,
unique_id=self.unique_id,
status=get_name_id_dict([self.status]),
hardware_profile=get_name_id_dict([self.hardware_profile]),
operating_system=get_name_id_dict([self.operating_system]),
ec2_instance=check_null_dict(self.ec2_instance),
data_center=get_name_id_dict([self.data_center]),
uptime=check_null_string(self.uptime),
serial_number=check_null_string(self.serial_number),
processor_count=check_null_string(self.processor_count),
node_groups=get_name_id_list(self.node_groups),
tags=get_name_id_list(self.tags, extra_keys=['value']),
network_interfaces=get_name_id_list(self.network_interfaces,
extra_keys=[
'unique_id',
]),
guest_vms=get_name_id_list(self.guest_vms),
hypervisor=get_name_id_list(self.hypervisor),
physical_device=self.physical_device,
last_registered=self.last_registered,
created=self.created,
updated=self.updated,
updated_by=self.updated_by,
)
return jsonify(all_fields)
else:
# Always return name id and unique_id, then return whatever additional fields
# are asked for.
resp = get_name_id_dict([self], extra_keys=['unique_id'])
my_fields = fields.split(',')
# Backrefs are not in the instance dict, so we handle them here.
if 'node_groups' in my_fields:
resp['node_groups'] = get_name_id_list(self.node_groups)
if 'hypervisor' in my_fields:
resp['hypervisor'] = get_name_id_list(self.hypervisor)
if 'guest_vms' in my_fields:
my_guest_vms = get_name_id_list(self.guest_vms)
if my_guest_vms:
resp['guest_vms'] = my_guest_vms
# Need this so we don't return an empty list of guest_vms
# for each guest vm.
else:
del resp['guest_vms']
if 'tags' in my_fields:
resp['tags'] = get_name_id_list(self.tags,
extra_keys=['value'])
if 'network_interfaces' in my_fields:
resp['network_interfaces'] = get_name_id_list(self.network_interfaces,
extra_keys=[
'unique_id',
'ip_address',
'bond_master',
'port_description',
'port_number',
'port_switch',
'port_vlan',
])
resp.update((key, getattr(self, key)) for key in my_fields if
key in self.__dict__)
return jsonify(resp)
# Default to returning only name, id, and unique_id.
except KeyError:
resp = get_name_id_dict([self], extra_keys=['unique_id'])
return resp
class NodeAudit(BaseAudit):
'''Arsenal NodeAudit object.'''
__tablename__ = 'nodes_audit'
| apache-2.0 | -1,277,032,278,955,777,500 | 45.052326 | 95 | 0.505239 | false | 4.871464 | false | false | false |
shashankasharma/commons-csv | analysis/sec_analysis.py | 1 | 2470 | from os import walk
import sys, os, fnmatch, re
mypath = ''
if len(sys.argv)==2 and os.path.exists(sys.argv[1]):
mypath = sys.argv[1]
else:
mypath = os.getcwd()
filelist = []
keyfilelist = []
opbufferinit = '\nRunning security analysis:'
opbuffer = ''
for (dirpath, dirnames, filenames) in walk(mypath):
for filename in fnmatch.filter(filenames, '*.c'):
filelist.append(os.path.join(dirpath,filename))
for filename in fnmatch.filter(filenames, '*.cpp'):
filelist.append(os.path.join(dirpath,filename))
for filename in fnmatch.filter(filenames, '*.java'):
filelist.append(os.path.join(dirpath,filename))
for filename in fnmatch.filter(filenames, '*.json'):
filelist.append(os.path.join(dirpath,filename))
for filename in fnmatch.filter(filenames, '*.key'):
keyfilelist.append(os.path.join(dirpath,filename))
for filename in fnmatch.filter(filenames, '*.pem'):
keyfilelist.append(os.path.join(dirpath,filename))
doregex = re.compile('([A-Z0-9]{64})[\s\'\"\;\)\]\}]*$')
awsregex = re.compile('([A-Za-z\/]*[0-9][a-zA-Z0-9\/]+)[\s\'\"\;\)\]\}]*$')
for filename in filelist:
filetype = filename.split('.')[-1]
linenum = 0
with open(filename) as f:
for eachline in f:
linenum+=1
eachline = eachline.lstrip().rstrip()
if len(doregex.findall(eachline)):
opbuffer+='\n\n' + 'Filename: {}\nLine number: {}'.format(filename, linenum)
break
elif len(awsregex.findall(eachline)):
flag = False
for eachtoken in awsregex.findall(eachline):
if len(eachtoken) == 40:
opbuffer+='\n\n' + 'Filename: {}\nLine number: {}'.format(filename, linenum)
flag = True
break
if flag:
break
if len(keyfilelist):
opbuffer+="\n\nFound files with security keys."
for eachfile in keyfilelist:
opbuffer+='\n' + 'Filename: {}'.format(eachfile)
opbuffer+="\n\nPlease remove these files before pushing changes."
with open(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])),'secanalysis.result'),'w') as opfile:
opfile.write(opbufferinit)
if len(opbuffer) or len(keyfilelist):
opbuffer+='\n\nSTATUS: FAILURE'
else:
opbuffer+='\n\nSTATUS: SUCCESS'
opfile.write(opbuffer)
print opbufferinit + opbuffer + '\n'
| apache-2.0 | -3,758,016,491,021,055,500 | 38.83871 | 106 | 0.601215 | false | 3.57453 | false | false | false |
IQSS/gentb-site | R/Neural_Network/program/Load_Data_EMB.py | 2 | 1128 | # -*- coding: utf-8 -*-
# Read raw data
# Author: Jimmy Royer
# [email protected]
# June 20, 2016
import pandas as pd
# Training Sample -- All the Mutations
data = pd.read_csv("./input/emb.csv")
# Create target variable
data['y'] = (data['dr'] == "r") * 1
data.drop('dr', axis=1, inplace=True)
# List of Features to Keep in the Analysis
features = [var for var in data.columns if var != "y"]
# List subset of Features
features_small = ["SNP_CN_4247429_A916G_M306V_embB", "SNP_CN_4247431_G918A_M306I_embB", "SNP_CN_4247431_G918C_M306I_embB", "SNP_CN_4247730_G1217C_G406A_embB",
"SNP_CN_4248003_A1490G_Q497R_embB", "SNP_CN_4249518_A3005G_H1002R_embB", "SNP_CN_409569_G208A_A70T_iniB", "SNP_CN_4247729_G1216A_G406S_embB",
"SNP_CN_4247431_G918T_M306I_embB", "SNP_CN_4247429_A916C_M306L_embB", "SNP_P_4243222_C11A_promoter_embA.embB", "SNP_CN_4247574_A1061C_D354A_embB",
"SNP_CN_4247495_G982T_D328Y_embB", "SNP_CN_4249583_G3070A_D1024N_embB", "SNP_CN_4243392_A160G_N54D_embA", "SNP_P_4243225_C8T_promoter_embA.embB",
"SNP_CN_4242182_G2320T_A774S_embC", "SNP_CN_4247729_G1216T_G406C_embB"] | agpl-3.0 | -4,881,976,249,242,843,000 | 46.041667 | 159 | 0.697695 | false | 2.017889 | false | false | false |
radiocosmology/alpenhorn | alpenhorn/auto_import.py | 1 | 12008 | """Routines for the importing of new files on a node."""
import logging
import os
import time
import peewee as pw
from watchdog.events import FileSystemEventHandler
from watchdog.observers.polling import PollingObserver
from . import acquisition as ac
from . import archive as ar
from . import config, db, util
log = logging.getLogger(__name__)
def import_file(node, file_path):
done = False
while not done:
try:
_import_file(node, file_path)
done = True
except pw.OperationalError as e:
log.exception(e)
log.error(
"MySQL connexion dropped. Will attempt to reconnect in " "five seconds."
)
time.sleep(5)
# TODO: handle reconnection
db.database_proxy.connect()
def in_directory(file, directory):
"""Test if file is contained within the directory. Does not check existence."""
directory = os.path.join(directory, "")
# return true, if the common prefix of both is equal to directory
# e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b
return os.path.commonprefix([file, directory]) == directory
def _import_file(node, file_path):
"""Import a file into the DB.
This routine adds the following to the database, if they do not already exist
(or might be corrupted).
- The acquisition that the file is a part of.
- Information on the acquisition, if it is of type "corr".
- The file.
- Information on the file, if it is of type "corr".
- Indicates that the file exists on this node.
Parameters
----------
node : storage.StorageNode
The node we are processing.
file_path : string
Path of file on the node to import. If is is an absolute path it must
be within the node root, otherwise is is assumed to be relative to
the node root.
"""
log.debug('Considering "%s" for import.', file_path)
# Occasionally the watchdog sends events on the node root directory itself. Skip these.
if file_path == node.root:
log.debug('Skipping import request on the node root itself "%s"', node.root)
return
# Ensure the path is an absolute path within the node
if os.path.isabs(file_path):
if not in_directory(file_path, node.root):
log.error(
'File "%s" was not an absolute path within the node "%s"',
file_path,
node.root,
)
return
else:
file_path = os.path.join(node.root, file_path)
abspath = os.path.normpath(file_path)
# Skip requests to import a directory. Again these are occasionally sent by the watchdog
if os.path.isdir(file_path):
log.debug('Path to import "%s" is a directory. Skipping...', file_path)
return
relpath = os.path.relpath(abspath, node.root)
# Skip the file if there is still a lock on it.
dir_name, base_name = os.path.split(abspath)
if os.path.isfile(os.path.join(dir_name, ".%s.lock" % base_name)):
log.debug('Skipping "%s", which is locked by ch_master.py.', file_path)
return
# Check if we can handle this acquisition, and skip if we can't
acq_type_name = ac.AcqType.detect(relpath, node)
if acq_type_name is None:
log.info('Skipping non-acquisition path "%s".', file_path)
return
# Figure out which acquisition this is; add if necessary.
acq_type, acq_name = acq_type_name
try:
acq = ac.ArchiveAcq.get(ac.ArchiveAcq.name == acq_name)
log.debug('Acquisition "%s" already in DB. Skipping.', acq_name)
except pw.DoesNotExist:
acq = add_acq(acq_type, acq_name, node)
log.info('Acquisition "%s" added to DB.', acq_name)
# What kind of file do we have?
file_name = os.path.relpath(relpath, acq_name)
ftype = ac.FileType.detect(file_name, acq, node)
if ftype is None:
log.info('Skipping unrecognised file "%s/%s".', acq_name, file_name)
return
# Add the file, if necessary.
try:
file_ = ac.ArchiveFile.get(
ac.ArchiveFile.name == file_name, ac.ArchiveFile.acq == acq
)
log.debug('File "%s/%s" already in DB. Skipping.', acq_name, file_name)
except pw.DoesNotExist:
log.debug('Computing md5sum of "%s".', file_name)
md5sum = util.md5sum_file(abspath, cmd_line=False)
size_b = os.path.getsize(abspath)
done = False
while not done:
try:
with db.database_proxy.atomic():
file_ = ac.ArchiveFile.create(
acq=acq,
type=ftype,
name=file_name,
size_b=size_b,
md5sum=md5sum,
)
ftype.file_info.new(file_, node)
done = True
except pw.OperationalError as e:
log.exception(e)
log.error(
"MySQL connexion dropped. Will attempt to reconnect in "
"five seconds."
)
time.sleep(5)
# TODO: re-implement
# di.connect_database(True)
log.info('File "%s/%s" added to DB.', acq_name, file_name)
# Register the copy of the file here on the collection server, if (1) it
# does not exist, or (2) if there has previously been a copy here ensure it
# is checksummed to ensure the archives integrity.
if not file_.copies.where(ar.ArchiveFileCopy.node == node).count():
copy_size_b = os.stat(abspath).st_blocks * 512
copy = ar.ArchiveFileCopy.create(
file=file_, node=node, has_file="Y", wants_file="Y", size_b=copy_size_b
)
log.info('Registered file copy "%s/%s" to DB.', acq_name, file_name)
else:
# Mark any previous copies as not being present...
query = ar.ArchiveFileCopy.update(has_file="N").where(
ar.ArchiveFileCopy.file == file_, ar.ArchiveFileCopy.node == node
)
query.execute()
# ... then take the latest and mark it with has_file=M to force it to be
# checked.
copy = (
ar.ArchiveFileCopy.select()
.where(ar.ArchiveFileCopy.file == file_, ar.ArchiveFileCopy.node == node)
.order_by(ar.ArchiveFileCopy.id)
.get()
)
copy.has_file = "M"
copy.wants_file = "Y"
copy.save()
# TODO: imported files caching
# if import_done is not None:
# bisect.insort_left(import_done, file_path)
# with open(LOCAL_IMPORT_RECORD, "w") as fp:
# fp.write("\n".join(import_done))
# Routines for registering files, acquisitions, copies and info in the DB.
# ========================================================================
def add_acq(acq_type, name, node, comment=""):
"""Add an aquisition to the database.
This looks for an appropriate acquisition type, and if successful creates
the ArchiveAcq and AcqInfo entries for the acquisition.
Parameters
----------
acq_type : AcqType
Type of the acquisition
name : string
Name of the acquisition directory.
node : StorageNode
Node that the acquisition is on.
comment : string, optional
An optional comment.
Returns
-------
acq : ArchiveAcq
The ArchiveAcq entry.
acqinfo : AcqInfoBase
The AcqInfo entry.
"""
# Is the acquisition already in the database?
if ac.ArchiveAcq.select(ac.ArchiveAcq.id).where(ac.ArchiveAcq.name == name).count():
raise AlreadyExists('Acquisition "%s" already exists in DB.' % name)
# Create the ArchiveAcq entry and the AcqInfo entry for the acquisition. Run
# in a transaction so we don't end up with inconsistency.
with db.database_proxy.atomic():
# Insert the archive record
acq = ac.ArchiveAcq.create(name=name, type=acq_type, comment=comment)
# Generate the metadata table
acq_type.acq_info.new(acq, node)
return acq
# Exceptions
# ==========
class Validation(Exception):
"""Raise when validation of a name or field fails."""
class DataBaseError(Exception):
"""Raise when there is some internal inconsistency with the database."""
class AlreadyExists(Exception):
"""Raise when a record already exists in the database."""
class DataFlagged(Exception):
"""Raised when data is affected by a global flag."""
# Watchdog stuff
# ==============
class RegisterFile(FileSystemEventHandler):
def __init__(self, node):
log.info('Registering node "%s" for auto_import watchdog.', node.name)
self.node = node
self.root = node.root
if self.root[-1] == "/":
self.root = self.root[0:-1]
super(RegisterFile, self).__init__()
def on_created(self, event):
import_file(self.node, event.src_path)
return
def on_modified(self, event):
import_file(self.node, event.src_path)
return
def on_moved(self, event):
import_file(self.node, event.src_path)
return
def on_deleted(self, event):
# For lockfiles: ensure that the file that was locked is added: it is
# possible that the watchdog notices that a file has been closed before the
# lockfile is deleted.
dirname, basename = os.path.split(event.src_path)
if basename[0] == "." and basename[-5:] == ".lock":
basename = basename[1:-5]
import_file(self.node, os.path.join(dirname, basename))
# Routines to control the filesystem watchdogs.
# =============================================
obs_list = None
def setup_observers(node_list):
"""Setup the watchdogs to look for new files in the nodes."""
global obs_list
# If any node has auto_import set, look for new files and add them to the
# DB. Then set up a watchdog for it.
obs_list = []
for node in node_list:
if node.auto_import:
# TODO: Normal observers don't work via NFS so we use the polling
# observer, however, we could try and detect this and switch back
obs_list.append(
PollingObserver(
timeout=config.config["service"]["auto_import_interval"]
)
)
obs_list[-1].schedule(RegisterFile(node), node.root, recursive=True)
else:
obs_list.append(None)
# Start up the watchdog threads
for obs in obs_list:
if obs:
obs.start()
def catchup(node_list):
"""Traverse the node directory for new files and importem"""
for node in node_list:
if node.auto_import:
# Get list of all files that exist on the node
q = (
ar.ArchiveFileCopy.select(ac.ArchiveFile.name, ac.ArchiveAcq.name)
.where(
ar.ArchiveFileCopy.node == node, ar.ArchiveFileCopy.has_file == "Y"
)
.join(ac.ArchiveFile)
.join(ac.ArchiveAcq)
)
already_imported_files = [os.path.join(a, f) for a, f in q.tuples()]
log.info('Crawling base directory "%s" for new files.', node.root)
for dirpath, d, f_list in os.walk(node.root):
log.info('Crawling "%s".', dirpath)
for file_name in sorted(f_list):
if file_name in already_imported_files:
log.debug('Skipping already-registered file "%s".', file_name)
else:
import_file(node, os.path.join(dirpath, file_name))
def stop_observers():
"""Stop watchidog threads."""
for obs in obs_list:
if obs:
obs.stop()
def join_observers():
"""Wait for watchdog threads to terminate."""
for obs in obs_list:
if obs:
obs.join()
| mit | 535,321,243,543,484,740 | 31.719346 | 92 | 0.587692 | false | 3.879806 | false | false | false |
yukezhu/visual7w-qa-models | prepare_dataset.py | 1 | 15804 | import os
import sys
import json
import argparse
from random import shuffle, seed
import string
# non-standard dependencies:
import h5py
import numpy as np
import skimage.io
def prepro_question_answer(imgs):
'''
tokenize all questions, answers and multiple choices
in the dataset. all punctuations are removed.
'''
# preprocess all the questions and answers
print 'example processed tokens:'
for i,img in enumerate(imgs):
img['processed_question_tokens'] = []
img['processed_answer_tokens'] = []
img['processed_mc_tokens'] = []
for j, qa_pair in enumerate(img['qa_pairs']):
question_txt = str(qa_pair['question']).lower().translate(None, string.punctuation).strip().split()
img['processed_question_tokens'].append(question_txt)
answer_txt = str(qa_pair['answer']).lower().translate(None, string.punctuation).strip().split()
img['processed_answer_tokens'].append(answer_txt)
processed_mc_tokens = []
if 'multiple_choices' in qa_pair:
for mc in qa_pair['multiple_choices']:
mc_txt = str(mc).lower().translate(None, string.punctuation).strip().split()
processed_mc_tokens.append(mc_txt)
img['processed_mc_tokens'].append(processed_mc_tokens)
if i < 10 and j == 0: print question_txt, answer_txt
def build_vocab(imgs, params):
'''
we build a word vocabulary from the questions and answers.
rare words with frequency lower than a threshold are replaced
by a special token UNK (last token in the vocabulary).
'''
count_thr = params['word_count_threshold']
# count up the number of words
counts = {}
for img in imgs:
if img['split'] in ['train', 'val']: # test set shouldn't be used for building vocab
for txt in img['processed_question_tokens']:
for w in txt: counts[w] = counts.get(w, 0) + 1
for txt in img['processed_answer_tokens']:
for w in txt: counts[w] = counts.get(w, 0) + 1
cw = sorted([(count,w) for w,count in counts.iteritems()], reverse=True)
print 'top words and their counts:'
print '\n'.join(map(str,cw[:20]))
# print some stats
total_words = sum(counts.itervalues())
print 'total words:', total_words
bad_words = [w for w,n in counts.iteritems() if n <= count_thr]
vocab = [w for w,n in counts.iteritems() if n > count_thr]
bad_count = sum(counts[w] for w in bad_words)
print 'number of bad words: %d/%d = %.2f%%' % (len(bad_words), len(counts), len(bad_words)*100.0/len(counts))
print 'number of words in vocab would be %d' % (len(vocab), )
print 'number of UNKs: %d/%d = %.2f%%' % (bad_count, total_words, bad_count*100.0/total_words)
# lets look at the distribution of lengths as well
sent_lengths = {}
for img in imgs:
for txt in img['processed_question_tokens']:
nw = len(txt)
sent_lengths[nw] = sent_lengths.get(nw, 0) + 1
for txt in img['processed_answer_tokens']:
nw = len(txt)
sent_lengths[nw] = sent_lengths.get(nw, 0) + 1
max_len = max(sent_lengths.keys())
print 'max length sentence in raw data: ', max_len
print 'sentence length distribution (count, number of words):'
sum_len = sum(sent_lengths.values())
for i in xrange(max_len+1):
print '%2d: %10d %f%%' % (i, sent_lengths.get(i,0), sent_lengths.get(i,0)*100.0/sum_len)
# lets now produce the final annotations
# additional special UNK token we will use below to map infrequent words to
print 'inserting the special UNK token'
vocab.append('UNK')
for img in imgs:
img['final_questions'] = []
for txt in img['processed_question_tokens']:
question = [w if counts.get(w,0) > count_thr else 'UNK' for w in txt]
img['final_questions'].append(question)
img['final_answers'] = []
for txt in img['processed_answer_tokens']:
answer = [w if counts.get(w,0) > count_thr else 'UNK' for w in txt]
img['final_answers'].append(answer)
img['final_mcs'] = []
for mc in img['processed_mc_tokens']:
mcs = []
for txt in mc:
mc = [w if counts.get(w,0) > count_thr else 'UNK' for w in txt]
mcs.append(mc)
img['final_mcs'].append(mcs)
return vocab
def encode_question_answer(imgs, params, wtoi):
'''
encode all questions and answers into one large array, which will be 1-indexed.
also produces label_start_ix and label_end_ix which store 1-indexed
and inclusive (Lua-style) pointers to the first and last caption for
each image in the dataset.
'''
max_question_length = params['max_question_length']
max_answer_length = params['max_answer_length']
MC = params['num_multiple_choice']
N = len(imgs) # total number of images
M = sum(len(img['final_answers']) for img in imgs) # total number of QA pairs
assert M == sum(len(img['final_questions']) for img in imgs), \
'error: total numbers of questions and answers don\'t match'
question_label_arrays = []
answer_label_arrays = []
mc_label_arrays = []
question_label_length = np.zeros(M, dtype='uint32')
answer_label_length = np.zeros(M, dtype='uint32')
mc_label_length = np.zeros([M, MC], dtype='uint32')
label_start_ix = np.zeros(N, dtype='uint32') # note: these will be one-indexed
label_end_ix = np.zeros(N, dtype='uint32')
label_id = np.zeros(M, dtype='uint32') # id of the QA pair
question_counter = 0
answer_counter = 0
mc_counter = 0
counter = 1
for i,img in enumerate(imgs):
n = len(img['final_questions'])
assert n > 0, 'error: some image has no QA pairs'
# getting the labels for questions
Li = np.zeros((n, max_question_length), dtype='uint32')
for j,s in enumerate(img['final_questions']):
question_label_length[question_counter] = min(max_question_length, len(s)) # record the length of this sequence
label_id[question_counter] = img['qa_pairs'][j]['qa_id']
question_counter += 1
for k,w in enumerate(s):
if k < max_question_length:
Li[j,k] = wtoi[w]
# note: word indices are 1-indexed, and captions are padded with zeros
question_label_arrays.append(Li)
# getting the labels for answers
Li = np.zeros((n, max_answer_length), dtype='uint32')
for j,s in enumerate(img['final_answers']):
answer_label_length[answer_counter] = min(max_answer_length, len(s)) # record the length of this sequence
assert label_id[answer_counter] == img['qa_pairs'][j]['qa_id'], 'order of answers doesn\'t match order of questions'
answer_counter += 1
for k,w in enumerate(s):
if k < max_answer_length:
Li[j,k] = wtoi[w]
# note: word indices are 1-indexed, and QAs are padded with zeros
answer_label_arrays.append(Li)
# getting the labels for multiple choices
Li = np.zeros((n, MC, max_answer_length), dtype='uint32')
for h,m in enumerate(img['final_mcs']):
# assert len(m) == MC, 'question has %d multiple choices (expected %d)' % (len(m), MC)
for j,s in enumerate(m):
mc_label_length[mc_counter,j] = min(max_answer_length, len(s)) # record the length of this sequence
for k,w in enumerate(s):
if k < max_answer_length:
Li[h,j,k] = wtoi[w]
mc_counter += 1
# note: word indices are 1-indexed, and QAs are padded with zeros
mc_label_arrays.append(Li)
label_start_ix[i] = counter
label_end_ix[i] = counter + n - 1
counter += n
Lq = np.concatenate(question_label_arrays, axis=0)
La = np.concatenate(answer_label_arrays, axis=0)
Lmc = np.concatenate(mc_label_arrays, axis=0) # put all the labels together
assert La.shape[0] == M, 'error: La dimension not matched.'
assert Lq.shape[0] == M, 'error: Lq dimension not matched.'
assert Lmc.shape[0] == M, 'error: Lmc dimension not matched.'
#assert np.all(question_label_length > 0), 'error: some question had no words?'
#assert np.all(answer_label_length > 0), 'error: some answer had no words?'
#assert np.all(mc_label_length > 0), 'error: some multiple choices had no words?'
print 'encoded questions to array of size ', `Lq.shape`
print 'encoded answers to array of size ', `La.shape`
print 'encoded multiple choices to array of size ', `Lmc.shape`
return Lq, La, Lmc, label_start_ix, label_end_ix, question_label_length, answer_label_length, label_id
def load_image(filename, color=True):
'''
Load image from file into a numpy array
-color is the flag for whether to load rgb or grayscale image
return img as a 3d tensor (HxWx3)
'''
img_data = skimage.io.imread(filename, as_grey=not color)
img = skimage.img_as_float(img_data).astype(np.float32)
if img.ndim == 2:
img = img[:, :, np.newaxis]
if color: img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def reduce_along_dim(img, dim, weights, indicies):
'''
Perform bilinear interpolation given along the image dimension dim
-weights are the kernel weights
-indicies are the crossponding indicies location
return img resize along dimension dim
'''
other_dim = abs(dim-1)
if other_dim == 0: #resizing image width
weights = np.tile(weights[np.newaxis,:,:,np.newaxis],(img.shape[other_dim],1,1,3))
out_img = img[:,indicies,:]*weights
out_img = np.sum(out_img,axis=2)
else: # resize image height
weights = np.tile(weights[:,:,np.newaxis,np.newaxis],(1,1,img.shape[other_dim],3))
out_img = img[indicies,:,:]*weights
out_img = np.sum(out_img,axis=1)
return out_img
def cubic_spline(x):
'''
Compute the kernel weights
See Keys, "Cubic Convolution Interpolation for Digital Image
Processing," IEEE Transactions on Acoustics, Speech, and Signal
Processing, Vol. ASSP-29, No. 6, December 1981, p. 1155.
'''
absx = np.abs(x)
absx2 = absx**2
absx3 = absx**3
kernel_weight = (1.5*absx3 - 2.5*absx2 + 1) * (absx<=1) + (-0.5*absx3 + 2.5* absx2 - 4*absx + 2) * ((1<absx) & (absx<=2))
return kernel_weight
def contribution(in_dim_len , out_dim_len, scale):
'''
Compute the weights and indicies of the pixels involved in the cubic interpolation along each dimension.
output:
weights a list of size 2 (one set of weights for each dimension). Each item is of size OUT_DIM_LEN*Kernel_Width
indicies a list of size 2(one set of pixel indicies for each dimension) Each item is of size OUT_DIM_LEN*kernel_width
note that if the entire column weights is zero, it gets deleted since those pixels don't contribute to anything
'''
kernel_width = 4
if scale < 1:
kernel_width = 4 / scale
x_out = np.array(range(1,out_dim_len+1))
#project to the input space dimension
u = x_out/scale + 0.5*(1-1/scale)
#position of the left most pixel in each calculation
l = np.floor( u - kernel_width/2)
#maxium number of pixels in each computation
p = int(np.ceil(kernel_width) + 2)
indicies = np.zeros((l.shape[0],p) , dtype = int)
indicies[:,0] = l
for i in range(1,p):
indicies[:,i] = indicies[:,i-1]+1
#compute the weights of the vectors
u = u.reshape((u.shape[0],1))
u = np.repeat(u,p,axis=1)
if scale < 1:
weights = scale*cubic_spline(scale*(indicies-u ))
else:
weights = cubic_spline((indicies-u))
weights_sums = np.sum(weights,1)
weights = weights/ weights_sums[:, np.newaxis]
indicies = indicies - 1
indicies[indicies<0] = 0
indicies[indicies>in_dim_len-1] = in_dim_len-1 #clamping the indicies at the ends
valid_cols = np.all( weights==0 , axis = 0 ) == False #find columns that are not all zeros
indicies = indicies[:,valid_cols]
weights = weights[:,valid_cols]
return weights , indicies
def imresize(img , cropped_width , cropped_height):
'''
Function implementing matlab's imresize functionality default behaviour
Cubic spline interpolation with antialiasing correction when scaling down the image.
'''
width_scale = float(cropped_width) / img.shape[1]
height_scale = float(cropped_height) / img.shape[0]
order = np.argsort([height_scale , width_scale])
scale = [height_scale , width_scale]
out_dim = [cropped_height , cropped_width]
weights = [0,0]
indicies = [0,0]
for i in range(0, 2):
weights[i] , indicies[i] = contribution(img.shape[ i ],out_dim[i], scale[i])
for i in range(0, len(order)):
img = reduce_along_dim(img , order[i] , weights[order[i]] , indicies[order[i]])
return img
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# default arguments work fine with Visual7W
parser.add_argument('--dataset_json', default='visual7w-toolkit/datasets/visual7w-telling/dataset.json', help='input dataset json file')
parser.add_argument('--output_json', default='data/qa_data.json', help='output json file')
parser.add_argument('--output_h5', default='data/qa_data.h5', help='output h5 file')
parser.add_argument('--num_multiple_choice', default=3, type=int, help='number of multiple choices of each question.')
parser.add_argument('--max_question_length', default=15, type=int, help='max length of a question, in number of words. questions longer than this get clipped.')
parser.add_argument('--max_answer_length', default=5, type=int, help='max length of an answer, in number of words. answers longer than this get clipped.')
parser.add_argument('--word_count_threshold', default=5, type=int, help='only words that occur more than this number of times will be put in vocab')
parser.add_argument('--image_dim', default=224, type=int, help='dimension of image after rescale (224 is the input image dimension for VGGNet-16)')
parser.add_argument('--image_path', default='images/v7w_%s.jpg', help='path template based on image id')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
print 'parsed input parameters:'
print json.dumps(params, indent=2)
dataset = json.load(open(params['dataset_json'], 'r'))
prepro_question_answer(dataset['images'])
# create the vocab
vocab = build_vocab(dataset['images'], params)
itow = {i+1:w for i,w in enumerate(vocab)} # a 1-indexed vocab translation table
wtoi = {w:i+1 for i,w in enumerate(vocab)} # inverse table
image_id = list(set([x['image_id'] for x in dataset['images']]))
# create output json file
out = {}
out['ix_to_word'] = itow # encode the (1-indexed) vocab
out['word_to_ix'] = wtoi
json.dump(out, open(params['output_json'], 'w'))
print 'wrote ', params['output_json']
# encode answers in large arrays, ready to ship to hdf5 file
Lq, La, Lmc, label_start_ix, label_end_ix, question_label_length, answer_label_length, label_id = encode_question_answer(dataset['images'], params, wtoi)
# create output h5 file
f = h5py.File(params['output_h5'], "w")
f.create_dataset("question_label", dtype='uint32', data=Lq)
f.create_dataset("answer_label", dtype='uint32', data=La)
f.create_dataset("mc_label", dtype='uint32', data=Lmc)
f.create_dataset("qa_start_ix", dtype='uint32', data=label_start_ix)
f.create_dataset("qa_end_ix", dtype='uint32', data=label_end_ix)
f.create_dataset("question_label_length", dtype='uint32', data=question_label_length)
f.create_dataset("answer_label_length", dtype='uint32', data=answer_label_length)
f.create_dataset("qa_id", dtype='uint32', data=label_id)
# loading image dataset
print 'start to process images into hdf5'
f.create_dataset("image_id", dtype='uint32', data=image_id)
img_num = len(image_id)
img_dim = params['image_dim']
img_data = f.create_dataset("image_data", (img_num, 3, img_dim, img_dim))
for k, img_id in enumerate(image_id):
img_path = params['image_path'] % str(img_id)
img = load_image(img_path)
img = imresize(img, img_dim, img_dim)
img_data[k] = img.transpose(2, 0, 1)
if k % 500 == 0:
print 'processed %d / %d images' % (k, img_num)
f.close()
print 'wrote ', params['output_h5']
| mit | 2,191,950,445,973,915,600 | 40.699208 | 162 | 0.666477 | false | 3.241181 | false | false | false |
torshid/foodnow | jinja.py | 1 | 4189 | def foodnow():
return "Food — Now !"
def isMobile():
from flask import request
from common import phones
agent = request.headers.get('User-Agent')
return any(phone in agent.lower() for phone in phones)
def fileExists(name):
import os
if name[:1] == '/':
name = name[1:]
return os.path.isfile(os.path.dirname(os.path.abspath(__file__)) + '/' + name)
def checkSessions():
from flask import request, session
if 'mail' in request.cookies and 'password' in request.cookies:
session['mail'] = request.cookies['mail']
session['password'] = request.cookies['password']
if 'mail' in session and 'password' in session:
from tables import users
user = users.getUser(session['mail'], session['password'])
if user:
session['user'] = user
else :
if 'mail' in session: del session['mail']
if 'password' in session: del session['password']
if 'user' in session: del session['user']
return
def dishImageExists(dishid):
from config import dishesthumbspath
return fileExists(dishesthumbspath + str(dishid) + '.png')
def nl2br(value):
import re
from jinja2 import evalcontextfilter, Markup, escape
_paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}')
result = u'\n\n'.join(u'<p>%s</p>' % p.replace('\n', '<br/>\n') \
for p in _paragraph_re.split(escape(value)))
return Markup(result)
def random(min, max):
from random import randint
return randint(min, max)
def istrue(s):
return s == '1' or s == 1
def isfalse(s):
return not istrue(s)
def isLogged():
from flask import session
return 'user' in session
def getUser():
from flask import session
return session['user']
def getUserEmployments():
from tables import employees
return employees.getUserEmployments(getUser()[0])
def isManager(employee):
from tables import employees
return employees.isManager(employee)
def isWorker(employee):
from tables import employees
return employees.isWorker(employee)
def isDriver(employee):
from tables import employees
return employees.isDriver(employee)
def getRoles():
from common import roles
return roles
def getThumbWidth():
from common import dishthumbsize
return dishthumbsize[0]
def getMenuDishes(menuid):
from tables import dishes
return dishes.getMenuDishes(menuid)
def getRoleTitle(role):
from common import roles
for rol in roles:
if rol[0] == role:
return rol[1]
return 'Unknown'
def panel_for(entity, **data):
from flask.helpers import url_for
return '/'.join(url_for(entity, **data).split('/')[3:]).replace('/', '-')
def getResto(id = None, pseudo = None):
from tables import restos
if id:
return restos.getRestoFromId(id)
else:
return restos.getResto(pseudo)
def getLikedRestos(userId):
from tables import restolikes
return restolikes.getLikedRestos(userId)
def updateProfile(userId, name = None, email = None, password = None):
from entities import user
user.updateProfile(name, email, password)
return
def getMostLikedRestos():
from tables import restos
return restos.getMostLikedRestos()
def getMostLikedDishes():
from tables import dishes
return dishes.getMostLikedDishes()
def getUserFromId(id):
from tables import users
return users.getUserFromId(id)
def getLikedDishes(userId):
from tables import dishlikes
return dishlikes.getLikedDishes(userId)
def getUserLikedDishesId(userId):
from tables import dishlikes
return dishlikes.getUserLikedDishesId(userId)
def getLikedRestos(userId):
from tables import restolikes
return restolikes.getLikedRestos(userId)
def getUserLikedRestosId(userId):
from tables import restolikes
return restolikes.getUserLikedRestosId(userId)
def addReview(userId, restoId, dishId, content):
from entities import reviews
inserted = reviews.addReview(userId, restoId, dishId, content)
return inserted
def getAllReviews(userId):
from tables import reviews
list = reviews.getAllReviews(userId)
return list
| gpl-3.0 | 4,369,039,640,209,388,000 | 26.366013 | 82 | 0.685455 | false | 3.460331 | false | false | false |
joshwalawender/KeckUtilities | telescopeSchedule/queryTelSched.py | 1 | 5013 | #!/usr/env/python
'''
Name: queryTelSched.py
Purpose:
Query the telescope database and return the value of `field` for the given
`date` and `tel`. Try to replicate functionality of the old queryTelSched
which was located at: ~kics/instr/bin/queryTelSched (on a summit machine).
This program tries to be backward compatible with the old telescope
schedule database and programs which called it. Some field names have
changed with the new database, so a translation step is included in the
queryTelSched function below. To add additional translations, just add to
the translations dictionary
Example Use:
python queryTelSched.py 2018-12-18 1 Instrument
Arguments:
date: The date for the query in a string with YYYY-MM-DD format.
tel: An int (1 or 2) indicating the telescope.
field: A string with the field to return. For more information on the API
and on what fields are returnable, see the web liks below.
Additional Information on the Telescope Schedule API can be found here:
https://www.keck.hawaii.edu/software/db_api/telSchedule.php
Details on the getSchedule command and what it returns can be found here:
https://www.keck.hawaii.edu/software/db_api/telSchedule.php?cmd=getSchedule
Modification history:
2018-12-18 jwalawender Original version (adapted from old version for
old database API).
'''
## Import General Tools
import argparse
import logging
import requests
import json
##-------------------------------------------------------------------------
## Parse Command Line Arguments
##-------------------------------------------------------------------------
## create a parser object for understanding command-line arguments
p = argparse.ArgumentParser(description='''
''')
## add arguments
p.add_argument('date', type=str,
help="Date (HST) in YYYY-MM-DD format.")
p.add_argument('tel', type=int,
help="Telescope number as int (i.e. 1 or 2).")
p.add_argument('field', type=str,
help="Field to query (e.g. Instrument).")
## add flags
p.add_argument("-v", "--verbose", dest="verbose",
default=False, action="store_true",
help="Be verbose! (default = False)")
args = p.parse_args()
##-------------------------------------------------------------------------
## Create logger object
##-------------------------------------------------------------------------
log = logging.getLogger('queryTelSched')
log.setLevel(logging.DEBUG)
LogConsoleHandler = logging.StreamHandler()
if args.verbose is True:
LogConsoleHandler.setLevel(logging.DEBUG)
else:
LogConsoleHandler.setLevel(logging.INFO)
LogFormat = logging.Formatter('%(levelname)9s: %(message)s')
LogConsoleHandler.setFormatter(LogFormat)
log.addHandler(LogConsoleHandler)
##-------------------------------------------------------------------------
## Define some useful functions
##-------------------------------------------------------------------------
def querydb(req):
'''A simple wrapper to form a generic API level query to the telescope
schedule web API. Returns a JSON object with the result of the query.
'''
log.debug('Querying telescope schedule')
url = f"https://www.keck.hawaii.edu/software/db_api/telSchedule.php?{req}"
r = requests.get(url)
return json.loads(r.text)
def get_schedule(date, tel):
'''Use the querydb function and getSchedule of the telescope schedule web
API with arguments for date and telescope number. Returns a JSON object
with the schedule result.
'''
if tel not in [1,2]:
log.error("Telescope number must be 1 or 2.")
return
req = f"cmd=getSchedule&date={date}&telnr={tel}"
result = querydb(req)
log.debug('Got result from schedule database')
return result
##-------------------------------------------------------------------------
## Main Program: queryTelSched
##-------------------------------------------------------------------------
def queryTelSched(date, tel, field):
result = get_schedule(date, tel)
log.debug(f"Found {len(result)} programs")
translations = {'InstrAcc': 'Account',
}
output_list = []
for i,entry in enumerate(sorted(result, key=lambda x: x['StartTime'])):
log.debug(f"Entry {i+1}:")
for key in entry.keys():
log.debug(f" {key:>15s}: {entry[key]}")
try:
output_list.append(entry[field])
except KeyError:
log.error(f'Field "{field}" not found')
if field in translations.keys():
log.debug(f'Trying tranlated key "{translations[field]}"')
output_list.append(entry[translations[field]])
log.warning(f'Please update the script calling for "{field}" '
f'to use "{translations[field]}" instead.')
print('/'.join(output_list))
return output_list
if __name__ == '__main__':
queryTelSched(args.date, args.tel, args.field)
| bsd-2-clause | 2,792,296,178,803,762,000 | 35.591241 | 78 | 0.595053 | false | 4.198492 | false | false | false |