filename
stringlengths
4
198
content
stringlengths
25
939k
environment
sequence
variablearg
sequence
constarg
sequence
variableargjson
stringclasses
1 value
constargjson
stringlengths
2
3.9k
lang
stringclasses
3 values
constargcount
float64
0
129
variableargcount
float64
0
0
sentence
stringclasses
1 value
reseval/server/heroku.py
import http.client import json import os import tarfile import tempfile import time from pathlib import Path import reseval ############################################################################### # Heroku server management ############################################################################### def create(config): """Create a Heroku server""" # Connect to Heroku connection = http.client.HTTPSConnection('api.heroku.com') # Maybe install client client_directory = reseval.CACHE / 'client' if not (client_directory / 'node_modules').exists(): with reseval.chdir(client_directory): reseval.npm.install().wait() # Build client with reseval.chdir(reseval.CACHE / 'client'): reseval.npm.build().wait() # Create a tarball of all files needed by the Heroku server with tempfile.TemporaryDirectory() as directory: tarball = Path(directory) / 'reseval.tar.gz' files = [ 'client/build', 'server', 'package-lock.json', 'package.json', 'Procfile', 'server.ts', 'tsconfig.json'] with tarfile.open(tarball, 'w:gz') as tar: for file in files: tar.add(reseval.CACHE / file) # Upload tarball and get a URL tarball_url = reseval.storage.upload(config['name'], tarball) # Server configuration data = { 'source_blob': {'url': tarball_url}, 'buildpacks': [{ 'url': 'https://github.com/heroku/heroku-buildpack-nodejs', 'name': 'heroku/nodejs'}]} headers = { 'Accept': 'application/vnd.heroku+json; version=3', 'Authorization': f'Bearer {os.environ["HerokuAccessKey"]}'} # Create server name = reseval.load.credentials_by_name(config['name'], 'app')['name'] connection.request( 'POST', f'/apps/{name}/builds', json.dumps(data), headers=headers) # Get response from server response = json.loads(connection.getresponse().read().decode()) # if app doesn't exist, raise error if response['id'] == 'not_found': raise ValueError(f'app name: {config["name"]} does not exist') # Close the connection connection.close() # Wait until server is setup while status(config['name']) == 'pending': time.sleep(3) if status(config['name']) == 'failure': raise ValueError('Heroku server failed to start') # Return application URL return {'URL': f'http://{name}.herokuapp.com/'} def destroy(config, credentials): """Destroy a Heroku server""" reseval.app.heroku.destroy(config) ############################################################################### # Utilities ############################################################################### def status(name): """Get current build status. One of ['succeeded', 'failed', 'pending']""" # Connect to Heroku connection = http.client.HTTPSConnection('api.heroku.com') # Send request reseval.load.api_keys() headers = { 'Accept': 'application/vnd.heroku+json; version=3', 'Authorization': f'Bearer {os.environ["HerokuAccessKey"]}'} unique_name = reseval.load.credentials_by_name(name, 'app')['name'] connection.request('GET', f'/apps/{unique_name}/builds', headers=headers) # Get response data = json.loads(connection.getresponse().read().decode()) # Close connection connection.close() # Get most recent build status from response status = list(map(lambda x: x['status'], data)) return status[-1]
[]
[]
[ "HerokuAccessKey" ]
[]
["HerokuAccessKey"]
python
1
0
python/dglke/models/general_models.py
# -*- coding: utf-8 -*- # # general_models.py # # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Graph Embedding Model 1. TransE 2. TransR 3. RESCAL 4. DistMult 5. ComplEx 6. RotatE 7. SimplE 8. ConvE """ import os import numpy as np import math import dgl.backend as F backend = os.environ.get('DGLBACKEND', 'pytorch') if backend.lower() == 'mxnet': from .mxnet.tensor_models import masked_select from .mxnet.tensor_models import logsigmoid from .mxnet.tensor_models import abs from .mxnet.tensor_models import get_device, get_dev from .mxnet.tensor_models import norm from .mxnet.tensor_models import get_scalar from .mxnet.tensor_models import reshape from .mxnet.tensor_models import cuda from .mxnet.tensor_models import ExternalEmbedding from .mxnet.tensor_models import InferEmbedding from .mxnet.score_fun import * DEFAULT_INFER_BATCHSIZE = 1024 else: from .pytorch.tensor_models import logsigmoid from .pytorch.tensor_models import abs from .pytorch.tensor_models import masked_select from .pytorch.tensor_models import get_device, get_dev from .pytorch.tensor_models import norm from .pytorch.tensor_models import get_scalar from .pytorch.tensor_models import reshape from .pytorch.tensor_models import cuda from .pytorch.tensor_models import ExternalEmbedding from .pytorch.tensor_models import InferEmbedding from .pytorch.score_fun import * from .pytorch.loss import LossGenerator DEFAULT_INFER_BATCHSIZE = 2048 EMB_INIT_EPS = 2.0 class InferModel(object): def __init__(self, device, model_name, hidden_dim, double_entity_emb=False, double_relation_emb=False, gamma=0., batch_size=DEFAULT_INFER_BATCHSIZE): super(InferModel, self).__init__() self.device = device self.model_name = model_name entity_dim = 2 * hidden_dim if double_entity_emb else hidden_dim relation_dim = 2 * hidden_dim if double_relation_emb else hidden_dim self.entity_emb = InferEmbedding(device) self.relation_emb = InferEmbedding(device) self.batch_size = batch_size if model_name == 'TransE' or model_name == 'TransE_l2': self.score_func = TransEScore(gamma, 'l2') elif model_name == 'TransE_l1': self.score_func = TransEScore(gamma, 'l1') elif model_name == 'TransR': assert False, 'Do not support inference of TransR model now.' elif model_name == 'DistMult': self.score_func = DistMultScore() elif model_name == 'ComplEx': self.score_func = ComplExScore() elif model_name == 'RESCAL': self.score_func = RESCALScore(relation_dim, entity_dim) elif model_name == 'RotatE': emb_init = (gamma + EMB_INIT_EPS) / hidden_dim self.score_func = RotatEScore(gamma, emb_init) elif model_name == 'SimplE': self.score_func = SimplEScore() elif model_name == 'ConvE': self.score_func = ConvEScore(entity_dim) def load_emb(self, path, dataset): """Load the model. Parameters ---------- path : str Directory to load the model. dataset : str Dataset name as prefix to the saved embeddings. """ self.entity_emb.load(path, dataset+'_'+self.model_name+'_entity') self.relation_emb.load(path, dataset+'_'+self.model_name+'_relation') self.score_func.load(path, dataset+'_'+self.model_name) def score(self, head, rel, tail, triplet_wise=False): head_emb = self.entity_emb(head) rel_emb = self.relation_emb(rel) tail_emb = self.entity_emb(tail) num_head = F.shape(head)[0] num_rel = F.shape(rel)[0] num_tail = F.shape(tail)[0] batch_size = self.batch_size score = [] if triplet_wise: class FakeEdge(object): def __init__(self, head_emb, rel_emb, tail_emb): self._hobj = {} self._robj = {} self._tobj = {} self._hobj['emb'] = head_emb self._robj['emb'] = rel_emb self._tobj['emb'] = tail_emb @property def src(self): return self._hobj @property def dst(self): return self._tobj @property def data(self): return self._robj for i in range((num_head + batch_size - 1) // batch_size): sh_emb = head_emb[i * batch_size : (i + 1) * batch_size \ if (i + 1) * batch_size < num_head \ else num_head] sr_emb = rel_emb[i * batch_size : (i + 1) * batch_size \ if (i + 1) * batch_size < num_head \ else num_head] st_emb = tail_emb[i * batch_size : (i + 1) * batch_size \ if (i + 1) * batch_size < num_head \ else num_head] edata = FakeEdge(sh_emb, sr_emb, st_emb) score.append(F.copy_to(self.score_func.edge_func(edata)['score'], F.cpu())) score = F.cat(score, dim=0) return score else: for i in range((num_head + batch_size - 1) // batch_size): sh_emb = head_emb[i * batch_size : (i + 1) * batch_size \ if (i + 1) * batch_size < num_head \ else num_head] s_score = [] for j in range((num_tail + batch_size - 1) // batch_size): st_emb = tail_emb[j * batch_size : (j + 1) * batch_size \ if (j + 1) * batch_size < num_tail \ else num_tail] s_score.append(F.copy_to(self.score_func.infer(sh_emb, rel_emb, st_emb), F.cpu())) score.append(F.cat(s_score, dim=2)) score = F.cat(score, dim=0) return F.reshape(score, (num_head * num_rel * num_tail,)) @property def num_entity(self): return self.entity_emb.emb.shape[0] @property def num_rel(self): return self.relation_emb.emb.shape[0] class KEModel(object): """ DGL Knowledge Embedding Model. Parameters ---------- args: Global configs. model_name : str Which KG model to use, including 'TransE_l1', 'TransE_l2', 'TransR', 'RESCAL', 'DistMult', 'ComplEx', 'RotatE', 'SimplE', 'ConvE n_entities : int Num of entities. n_relations : int Num of relations. hidden_dim : int Dimension size of embedding. gamma : float Gamma for score function. double_entity_emb : bool If True, entity embedding size will be 2 * hidden_dim. Default: False double_relation_emb : bool If True, relation embedding size will be 2 * hidden_dim. Default: False """ def __init__(self, args, model_name, n_entities, n_relations, hidden_dim, gamma, double_entity_emb=False, double_relation_emb=False): super(KEModel, self).__init__() self.args = args self.has_edge_importance = args.has_edge_importance self.n_entities = n_entities self.n_relations = n_relations self.model_name = model_name self.hidden_dim = hidden_dim self.eps = EMB_INIT_EPS self.emb_init = (gamma + self.eps) / hidden_dim entity_dim = 2 * hidden_dim if double_entity_emb else hidden_dim relation_dim = 2 * hidden_dim if double_relation_emb else hidden_dim device = get_device(args) self.loss_gen = LossGenerator(args, args.loss_genre, args.neg_adversarial_sampling, args.adversarial_temperature, args.pairwise) self.entity_emb = ExternalEmbedding(args, n_entities, entity_dim, F.cpu() if args.mix_cpu_gpu else device) # For RESCAL, relation_emb = relation_dim * entity_dim if model_name == 'RESCAL': rel_dim = relation_dim * entity_dim else: rel_dim = relation_dim self.rel_dim = rel_dim self.entity_dim = entity_dim self.strict_rel_part = args.strict_rel_part self.soft_rel_part = args.soft_rel_part if not self.strict_rel_part and not self.soft_rel_part: self.relation_emb = ExternalEmbedding(args, n_relations, rel_dim, F.cpu() if args.mix_cpu_gpu else device) else: self.global_relation_emb = ExternalEmbedding(args, n_relations, rel_dim, F.cpu()) if model_name == 'TransE' or model_name == 'TransE_l2': self.score_func = TransEScore(gamma, 'l2') elif model_name == 'TransE_l1': self.score_func = TransEScore(gamma, 'l1') elif model_name == 'TransR': projection_emb = ExternalEmbedding(args, n_relations, entity_dim * relation_dim, F.cpu() if args.mix_cpu_gpu else device) self.score_func = TransRScore(gamma, projection_emb, relation_dim, entity_dim) elif model_name == 'DistMult': self.score_func = DistMultScore() elif model_name == 'ComplEx': self.score_func = ComplExScore() elif model_name == 'RESCAL': self.score_func = RESCALScore(relation_dim, entity_dim) elif model_name == 'RotatE': self.score_func = RotatEScore(gamma, self.emb_init) elif model_name == 'SimplE': self.score_func = SimplEScore() elif model_name == 'ConvE': self.score_func = ConvEScore(entity_dim) self.model_name = model_name self.head_neg_score = self.score_func.create_neg(True) self.tail_neg_score = self.score_func.create_neg(False) self.head_neg_prepare = self.score_func.create_neg_prepare(True) self.tail_neg_prepare = self.score_func.create_neg_prepare(False) self.reset_parameters() def share_memory(self): """Use torch.tensor.share_memory_() to allow cross process embeddings access. """ self.entity_emb.share_memory() if self.strict_rel_part or self.soft_rel_part: self.global_relation_emb.share_memory() else: self.relation_emb.share_memory() if self.model_name == 'TransR': self.score_func.share_memory() def save_emb(self, path, dataset): """Save the model. Parameters ---------- path : str Directory to save the model. dataset : str Dataset name as prefix to the saved embeddings. """ self.entity_emb.save(path, dataset+'_'+self.model_name+'_entity') if self.strict_rel_part or self.soft_rel_part: self.global_relation_emb.save(path, dataset+'_'+self.model_name+'_relation') else: self.relation_emb.save(path, dataset+'_'+self.model_name+'_relation') self.score_func.save(path, dataset+'_'+self.model_name) def load_emb(self, path, dataset): """Load the model. Parameters ---------- path : str Directory to load the model. dataset : str Dataset name as prefix to the saved embeddings. """ self.entity_emb.load(path, dataset+'_'+self.model_name+'_entity') self.relation_emb.load(path, dataset+'_'+self.model_name+'_relation') self.score_func.load(path, dataset+'_'+self.model_name) def reset_parameters(self): """Re-initialize the model. """ self.entity_emb.init(self.emb_init) self.score_func.reset_parameters() if (not self.strict_rel_part) and (not self.soft_rel_part): self.relation_emb.init(self.emb_init) else: self.global_relation_emb.init(self.emb_init) def predict_score(self, g): """Predict the positive score. Parameters ---------- g : DGLGraph Graph holding positive edges. Returns ------- tensor The positive score """ self.score_func(g) return g.edata['score'] def predict_neg_score(self, pos_g, neg_g, to_device=None, gpu_id=-1, trace=False, neg_deg_sample=False): """Calculate the negative score. Parameters ---------- pos_g : DGLGraph Graph holding positive edges. neg_g : DGLGraph Graph holding negative edges. to_device : func Function to move data into device. gpu_id : int Which gpu to move data to. trace : bool If True, trace the computation. This is required in training. If False, do not trace the computation. Default: False neg_deg_sample : bool If True, we use the head and tail nodes of the positive edges to construct negative edges. Default: False Returns ------- tensor The negative score """ num_chunks = neg_g.num_chunks chunk_size = neg_g.chunk_size neg_sample_size = neg_g.neg_sample_size mask = F.ones((num_chunks, chunk_size * (neg_sample_size + chunk_size)), dtype=F.float32, ctx=F.context(pos_g.ndata['emb'])) if neg_g.neg_head: neg_head_ids = neg_g.ndata['id'][neg_g.head_nid] neg_head = self.entity_emb(neg_head_ids, gpu_id, trace) head_ids, tail_ids = pos_g.all_edges(order='eid') if to_device is not None and gpu_id >= 0: tail_ids = to_device(tail_ids, gpu_id) tail = pos_g.ndata['emb'][tail_ids] rel = pos_g.edata['emb'] # When we train a batch, we could use the head nodes of the positive edges to # construct negative edges. We construct a negative edge between a positive head # node and every positive tail node. # When we construct negative edges like this, we know there is one positive # edge for a positive head node among the negative edges. We need to mask # them. if neg_deg_sample: head = pos_g.ndata['emb'][head_ids] head = head.reshape(num_chunks, chunk_size, -1) neg_head = neg_head.reshape(num_chunks, neg_sample_size, -1) neg_head = F.cat([head, neg_head], 1) neg_sample_size = chunk_size + neg_sample_size mask[:,0::(neg_sample_size + 1)] = 0 neg_head = neg_head.reshape(num_chunks * neg_sample_size, -1) neg_head, tail = self.head_neg_prepare(pos_g.edata['id'], num_chunks, neg_head, tail, gpu_id, trace) neg_score = self.head_neg_score(neg_head, rel, tail, num_chunks, chunk_size, neg_sample_size) else: neg_tail_ids = neg_g.ndata['id'][neg_g.tail_nid] neg_tail = self.entity_emb(neg_tail_ids, gpu_id, trace) head_ids, tail_ids = pos_g.all_edges(order='eid') if to_device is not None and gpu_id >= 0: head_ids = to_device(head_ids, gpu_id) head = pos_g.ndata['emb'][head_ids] rel = pos_g.edata['emb'] # This is negative edge construction similar to the above. if neg_deg_sample: tail = pos_g.ndata['emb'][tail_ids] tail = tail.reshape(num_chunks, chunk_size, -1) neg_tail = neg_tail.reshape(num_chunks, neg_sample_size, -1) neg_tail = F.cat([tail, neg_tail], 1) neg_sample_size = chunk_size + neg_sample_size mask[:,0::(neg_sample_size + 1)] = 0 neg_tail = neg_tail.reshape(num_chunks * neg_sample_size, -1) head, neg_tail = self.tail_neg_prepare(pos_g.edata['id'], num_chunks, head, neg_tail, gpu_id, trace) neg_score = self.tail_neg_score(head, rel, neg_tail, num_chunks, chunk_size, neg_sample_size) if neg_deg_sample: neg_g.neg_sample_size = neg_sample_size mask = mask.reshape(num_chunks, chunk_size, neg_sample_size) return neg_score * mask else: return neg_score def forward_test(self, pos_g, neg_g, logs, gpu_id=-1): """Do the forward and generate ranking results. Parameters ---------- pos_g : DGLGraph Graph holding positive edges. neg_g : DGLGraph Graph holding negative edges. logs : List Where to put results in. gpu_id : int Which gpu to accelerate the calculation. if -1 is provided, cpu is used. """ pos_g.ndata['emb'] = self.entity_emb(pos_g.ndata['id'], gpu_id, False) pos_g.edata['emb'] = self.relation_emb(pos_g.edata['id'], gpu_id, False) self.score_func.prepare(pos_g, gpu_id, False) batch_size = pos_g.number_of_edges() pos_scores = self.predict_score(pos_g) pos_scores = reshape(pos_scores, batch_size, -1) neg_scores = self.predict_neg_score(pos_g, neg_g, to_device=cuda, gpu_id=gpu_id, trace=False, neg_deg_sample=self.args.neg_deg_sample_eval) neg_scores = reshape(neg_scores, batch_size, -1) # We need to filter the positive edges in the negative graph. if self.args.eval_filter: filter_bias = reshape(neg_g.edata['bias'], batch_size, -1) if gpu_id >= 0: filter_bias = cuda(filter_bias, gpu_id) # find all indices where it is not false negative sample mask = filter_bias != -1 # To compute the rank of a positive edge among all negative edges, # we need to know how many negative edges have higher scores than # the positive edge. for i in range(batch_size): if self.args.eval_filter: # select all the true negative samples where its score >= positive sample ranking = F.asnumpy(F.sum(masked_select(neg_scores[i] >= pos_scores[i], mask[i]), dim=0) + 1) else: ranking = F.asnumpy(F.sum(neg_scores[i] >= pos_scores[i], dim=0) + 1) logs.append({ 'MRR': 1.0 / ranking, 'MR': float(ranking), 'HITS@1': 1.0 if ranking <= 1 else 0.0, 'HITS@3': 1.0 if ranking <= 3 else 0.0, 'HITS@10': 1.0 if ranking <= 10 else 0.0 }) def forward_test_wikikg(self, query, ans, candidate, mode, logs, gpu_id=-1): """Do the forward and generate ranking results. Parameters ---------- query : Tensor input head and relation for test or valid ans : Tenseor the correct tail entity index cadidate : Tensor negative sampled tail entity """ scores = self.predict_score_wikikg(query, candidate, mode, to_device=cuda, gpu_id=gpu_id, trace=False) if mode == "Valid": batch_size = query.shape[0] neg_scores = reshape(scores, batch_size, -1) for i in range(batch_size): ranking = F.asnumpy(F.sum(neg_scores[i] >= neg_scores[i][ans[i]], dim=0) + 1) logs.append({ 'MRR': 1.0 / ranking, 'MR': float(ranking), 'HITS@1': 1.0 if ranking <= 1 else 0.0, 'HITS@3': 1.0 if ranking <= 3 else 0.0, 'HITS@10': 1.0 if ranking <= 10 else 0.0 }) else: argsort = F.argsort(scores, dim=1, descending=True) logs.append(argsort[:,:10]) def predict_score_wikikg(self, query, candidate, mode, to_device=None, gpu_id=-1, trace=False): num_chunks = len(query) chunk_size = 1 neg_sample_size = candidate.shape[1] neg_tail = self.entity_emb(candidate.view(-1), gpu_id, False) head = self.entity_emb(query[:,0], gpu_id, False) rel = self.relation_emb(query[:,1], gpu_id, False) neg_score = self.tail_neg_score(head, rel, neg_tail, num_chunks, chunk_size, neg_sample_size) return neg_score.squeeze() # @profile def forward(self, pos_g, neg_g, gpu_id=-1): """Do the forward. Parameters ---------- pos_g : DGLGraph Graph holding positive edges. neg_g : DGLGraph Graph holding negative edges. gpu_id : int Which gpu to accelerate the calculation. if -1 is provided, cpu is used. Returns ------- tensor loss value dict loss info """ pos_g.ndata['emb'] = self.entity_emb(pos_g.ndata['id'], gpu_id, True) pos_g.edata['emb'] = self.relation_emb(pos_g.edata['id'], gpu_id, True) self.score_func.prepare(pos_g, gpu_id, True) pos_score = self.predict_score(pos_g) if gpu_id >= 0: neg_score = self.predict_neg_score(pos_g, neg_g, to_device=cuda, gpu_id=gpu_id, trace=True, neg_deg_sample=self.args.neg_deg_sample) else: neg_score = self.predict_neg_score(pos_g, neg_g, trace=True, neg_deg_sample=self.args.neg_deg_sample) neg_score = reshape(neg_score, -1, neg_g.neg_sample_size) # subsampling weight # TODO: add subsampling to new sampler #if self.args.non_uni_weight: # subsampling_weight = pos_g.edata['weight'] # pos_score = (pos_score * subsampling_weight).sum() / subsampling_weight.sum() # neg_score = (neg_score * subsampling_weight).sum() / subsampling_weight.sum() #else: edge_weight = F.copy_to(pos_g.edata['impts'], get_dev(gpu_id)) if self.has_edge_importance else None loss, log = self.loss_gen.get_total_loss(pos_score, neg_score, edge_weight) # regularization: TODO(zihao) #TODO: only reg ent&rel embeddings. other params to be added. if self.args.regularization_coef > 0.0 and self.args.regularization_norm > 0: coef, nm = self.args.regularization_coef, self.args.regularization_norm reg = coef * (norm(self.entity_emb.curr_emb(), nm) + norm(self.relation_emb.curr_emb(), nm)) log['regularization'] = get_scalar(reg) loss = loss + reg return loss, log def update(self, gpu_id=-1): """ Update the embeddings in the model gpu_id : int Which gpu to accelerate the calculation. if -1 is provided, cpu is used. """ self.entity_emb.update(gpu_id) self.relation_emb.update(gpu_id) self.score_func.update(gpu_id) def prepare_relation(self, device=None): """ Prepare relation embeddings in multi-process multi-gpu training model. device : th.device Which device (GPU) to put relation embeddings in. """ self.relation_emb = ExternalEmbedding(self.args, self.n_relations, self.rel_dim, device) self.relation_emb.init(self.emb_init) if self.model_name == 'TransR': local_projection_emb = ExternalEmbedding(self.args, self.n_relations, self.entity_dim * self.rel_dim, device) self.score_func.prepare_local_emb(local_projection_emb) self.score_func.reset_parameters() def prepare_cross_rels(self, cross_rels): self.relation_emb.setup_cross_rels(cross_rels, self.global_relation_emb) if self.model_name == 'TransR': self.score_func.prepare_cross_rels(cross_rels) def writeback_relation(self, rank=0, rel_parts=None): """ Writeback relation embeddings in a specific process to global relation embedding. Used in multi-process multi-gpu training model. rank : int Process id. rel_parts : List of tensor List of tensor stroing edge types of each partition. """ idx = rel_parts[rank] if self.soft_rel_part: idx = self.relation_emb.get_noncross_idx(idx) self.global_relation_emb.emb[idx] = F.copy_to(self.relation_emb.emb, F.cpu())[idx] if self.model_name == 'TransR': self.score_func.writeback_local_emb(idx) def load_relation(self, device=None): """ Sync global relation embeddings into local relation embeddings. Used in multi-process multi-gpu training model. device : th.device Which device (GPU) to put relation embeddings in. """ self.relation_emb = ExternalEmbedding(self.args, self.n_relations, self.rel_dim, device) self.relation_emb.emb = F.copy_to(self.global_relation_emb.emb, device) if self.model_name == 'TransR': local_projection_emb = ExternalEmbedding(self.args, self.n_relations, self.entity_dim * self.rel_dim, device) self.score_func.load_local_emb(local_projection_emb) def create_async_update(self): """Set up the async update for entity embedding. """ self.entity_emb.create_async_update() def finish_async_update(self): """Terminate the async update for entity embedding. """ self.entity_emb.finish_async_update() def pull_model(self, client, pos_g, neg_g): with th.no_grad(): entity_id = F.cat(seq=[pos_g.ndata['id'], neg_g.ndata['id']], dim=0) relation_id = pos_g.edata['id'] entity_id = F.tensor(np.unique(F.asnumpy(entity_id))) relation_id = F.tensor(np.unique(F.asnumpy(relation_id))) l2g = client.get_local2global() global_entity_id = l2g[entity_id] entity_data = client.pull(name='entity_emb', id_tensor=global_entity_id) relation_data = client.pull(name='relation_emb', id_tensor=relation_id) self.entity_emb.emb[entity_id] = entity_data self.relation_emb.emb[relation_id] = relation_data def push_gradient(self, client): with th.no_grad(): l2g = client.get_local2global() for entity_id, entity_data in self.entity_emb.trace: grad = entity_data.grad.data global_entity_id =l2g[entity_id] client.push(name='entity_emb', id_tensor=global_entity_id, data_tensor=grad) for relation_id, relation_data in self.relation_emb.trace: grad = relation_data.grad.data client.push(name='relation_emb', id_tensor=relation_id, data_tensor=grad) self.entity_emb.trace = [] self.relation_emb.trace = []
[]
[]
[ "DGLBACKEND" ]
[]
["DGLBACKEND"]
python
1
0
sdk/identity/azure-identity/tests/test_client_secret_credential_async.py
# ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ import time from unittest.mock import Mock, patch from urllib.parse import urlparse from azure.core.credentials import AccessToken from azure.core.exceptions import ClientAuthenticationError from azure.core.pipeline.policies import ContentDecodePolicy, SansIOHTTPPolicy from azure.identity import TokenCachePersistenceOptions from azure.identity._constants import EnvironmentVariables from azure.identity._internal.user_agent import USER_AGENT from azure.identity.aio import ClientSecretCredential from msal import TokenCache import pytest from helpers import build_aad_response, mock_response, Request from helpers_async import async_validating_transport, AsyncMockTransport, wrap_in_future def test_tenant_id_validation(): """The credential should raise ValueError when given an invalid tenant_id""" valid_ids = {"c878a2ab-8ef4-413b-83a0-199afb84d7fb", "contoso.onmicrosoft.com", "organizations", "common"} for tenant in valid_ids: ClientSecretCredential(tenant, "client-id", "secret") invalid_ids = {"", "my tenant", "my_tenant", "/", "\\", '"my-tenant"', "'my-tenant'"} for tenant in invalid_ids: with pytest.raises(ValueError): ClientSecretCredential(tenant, "client-id", "secret") @pytest.mark.asyncio async def test_no_scopes(): """The credential should raise ValueError when get_token is called with no scopes""" credential = ClientSecretCredential("tenant-id", "client-id", "client-secret") with pytest.raises(ValueError): await credential.get_token() @pytest.mark.asyncio async def test_close(): transport = AsyncMockTransport() credential = ClientSecretCredential("tenant-id", "client-id", "client-secret", transport=transport) await credential.close() assert transport.__aexit__.call_count == 1 @pytest.mark.asyncio async def test_context_manager(): transport = AsyncMockTransport() credential = ClientSecretCredential("tenant-id", "client-id", "client-secret", transport=transport) async with credential: assert transport.__aenter__.call_count == 1 assert transport.__aenter__.call_count == 1 assert transport.__aexit__.call_count == 1 @pytest.mark.asyncio async def test_policies_configurable(): policy = Mock(spec_set=SansIOHTTPPolicy, on_request=Mock()) async def send(*_, **__): return mock_response(json_payload=build_aad_response(access_token="**")) credential = ClientSecretCredential( "tenant-id", "client-id", "client-secret", policies=[ContentDecodePolicy(), policy], transport=Mock(send=send) ) await credential.get_token("scope") assert policy.on_request.called @pytest.mark.asyncio async def test_user_agent(): transport = async_validating_transport( requests=[Request(required_headers={"User-Agent": USER_AGENT})], responses=[mock_response(json_payload=build_aad_response(access_token="**"))], ) credential = ClientSecretCredential("tenant-id", "client-id", "client-secret", transport=transport) await credential.get_token("scope") @pytest.mark.asyncio async def test_client_secret_credential(): client_id = "fake-client-id" secret = "fake-client-secret" tenant_id = "fake-tenant-id" access_token = "***" transport = async_validating_transport( requests=[Request(url_substring=tenant_id, required_data={"client_id": client_id, "client_secret": secret})], responses=[ mock_response( json_payload={ "token_type": "Bearer", "expires_in": 42, "ext_expires_in": 42, "access_token": access_token, } ) ], ) token = await ClientSecretCredential( tenant_id=tenant_id, client_id=client_id, client_secret=secret, transport=transport ).get_token("scope") # not validating expires_on because doing so requires monkeypatching time, and this is tested elsewhere assert token.token == access_token @pytest.mark.asyncio @pytest.mark.parametrize("authority", ("localhost", "https://localhost")) async def test_request_url(authority): """the credential should accept an authority, with or without scheme, as an argument or environment variable""" tenant_id = "expected-tenant" access_token = "***" parsed_authority = urlparse(authority) expected_netloc = parsed_authority.netloc or authority # "localhost" parses to netloc "", path "localhost" async def mock_send(request, **kwargs): actual = urlparse(request.url) assert actual.scheme == "https" assert actual.netloc == expected_netloc assert actual.path.startswith("/" + tenant_id) return mock_response(json_payload={"token_type": "Bearer", "expires_in": 42, "access_token": access_token}) credential = ClientSecretCredential( tenant_id, "client-id", "secret", transport=Mock(send=mock_send), authority=authority ) token = await credential.get_token("scope") assert token.token == access_token # authority can be configured via environment variable with patch.dict("os.environ", {EnvironmentVariables.AZURE_AUTHORITY_HOST: authority}, clear=True): credential = ClientSecretCredential(tenant_id, "client-id", "secret", transport=Mock(send=mock_send)) await credential.get_token("scope") assert token.token == access_token @pytest.mark.asyncio async def test_cache(): expired = "this token's expired" now = int(time.time()) expired_on = now - 3600 expired_token = AccessToken(expired, expired_on) token_payload = { "access_token": expired, "expires_in": 0, "ext_expires_in": 0, "expires_on": expired_on, "not_before": now, "token_type": "Bearer", } mock_send = Mock(return_value=mock_response(json_payload=token_payload)) transport = Mock(send=wrap_in_future(mock_send)) scope = "scope" credential = ClientSecretCredential("tenant-id", "client-id", "secret", transport=transport) # get_token initially returns the expired token because the credential # doesn't check whether tokens it receives from the service have expired token = await credential.get_token(scope) assert token == expired_token access_token = "new token" token_payload["access_token"] = access_token token_payload["expires_on"] = now + 3600 valid_token = AccessToken(access_token, now + 3600) # second call should observe the cached token has expired, and request another token = await credential.get_token(scope) assert token == valid_token assert mock_send.call_count == 2 def test_token_cache(): """the credential should default to an in memory cache, and optionally use a persistent cache""" with patch(ClientSecretCredential.__module__ + "._load_persistent_cache") as load_persistent_cache: with patch(ClientSecretCredential.__module__ + ".msal") as mock_msal: ClientSecretCredential("tenant", "client-id", "secret") assert mock_msal.TokenCache.call_count == 1 assert not load_persistent_cache.called ClientSecretCredential( "tenant", "client-id", "secret", cache_persistence_options=TokenCachePersistenceOptions() ) assert load_persistent_cache.call_count == 1 @pytest.mark.asyncio async def test_cache_multiple_clients(): """the credential shouldn't use tokens issued to other service principals""" access_token_a = "token a" access_token_b = "not " + access_token_a transport_a = async_validating_transport( requests=[Request()], responses=[mock_response(json_payload=build_aad_response(access_token=access_token_a))] ) transport_b = async_validating_transport( requests=[Request()], responses=[mock_response(json_payload=build_aad_response(access_token=access_token_b))] ) cache = TokenCache() with patch(ClientSecretCredential.__module__ + "._load_persistent_cache") as mock_cache_loader: mock_cache_loader.return_value = Mock(wraps=cache) credential_a = ClientSecretCredential( "tenant", "client-a", "secret", transport=transport_a, cache_persistence_options=TokenCachePersistenceOptions(), ) assert mock_cache_loader.call_count == 1, "credential should load the persistent cache" credential_b = ClientSecretCredential( "tenant", "client-b", "secret", transport=transport_b, cache_persistence_options=TokenCachePersistenceOptions(), ) assert mock_cache_loader.call_count == 2, "credential should load the persistent cache" # A caches a token scope = "scope" token_a = await credential_a.get_token(scope) assert token_a.token == access_token_a assert transport_a.send.call_count == 1 # B should get a different token for the same scope token_b = await credential_b.get_token(scope) assert token_b.token == access_token_b assert transport_b.send.call_count == 1 assert len(cache.find(TokenCache.CredentialType.ACCESS_TOKEN)) == 2 @pytest.mark.asyncio async def test_multitenant_authentication(): first_tenant = "first-tenant" first_token = "***" second_tenant = "second-tenant" second_token = first_token * 2 async def send(request, **_): parsed = urlparse(request.url) tenant = parsed.path.split("/")[1] assert tenant in (first_tenant, second_tenant), 'unexpected tenant "{}"'.format(tenant) token = first_token if tenant == first_tenant else second_token return mock_response(json_payload=build_aad_response(access_token=token)) credential = ClientSecretCredential( first_tenant, "client-id", "secret", transport=Mock(send=send) ) token = await credential.get_token("scope") assert token.token == first_token token = await credential.get_token("scope", tenant_id=first_tenant) assert token.token == first_token token = await credential.get_token("scope", tenant_id=second_tenant) assert token.token == second_token # should still default to the first tenant token = await credential.get_token("scope") assert token.token == first_token @pytest.mark.asyncio async def test_multitenant_authentication_not_allowed(): expected_tenant = "expected-tenant" expected_token = "***" async def send(request, **_): parsed = urlparse(request.url) tenant = parsed.path.split("/")[1] token = expected_token if tenant == expected_tenant else expected_token * 2 return mock_response(json_payload=build_aad_response(access_token=token)) credential = ClientSecretCredential(expected_tenant, "client-id", "secret", transport=Mock(send=send)) token = await credential.get_token("scope") assert token.token == expected_token token = await credential.get_token("scope", tenant_id=expected_tenant) assert token.token == expected_token token = await credential.get_token("scope", tenant_id="un" + expected_tenant) assert token.token == expected_token * 2 with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}): token = await credential.get_token("scope", tenant_id="un" + expected_tenant) assert token.token == expected_token
[]
[]
[]
[]
[]
python
0
0
wgengine/magicsock/magicsock_test.go
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package magicsock import ( "bytes" "context" crand "crypto/rand" "crypto/tls" "encoding/binary" "encoding/json" "errors" "fmt" "io/ioutil" "net" "net/http" "net/http/httptest" "os" "runtime" "strconv" "strings" "sync" "sync/atomic" "testing" "time" "unsafe" "github.com/google/go-cmp/cmp" "github.com/tailscale/wireguard-go/device" "github.com/tailscale/wireguard-go/tun/tuntest" "golang.org/x/crypto/nacl/box" "inet.af/netaddr" "tailscale.com/derp" "tailscale.com/derp/derphttp" "tailscale.com/derp/derpmap" "tailscale.com/ipn/ipnstate" "tailscale.com/net/stun/stuntest" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/tstest/natlab" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/nettype" "tailscale.com/types/wgkey" "tailscale.com/util/cibuild" "tailscale.com/wgengine/filter" "tailscale.com/wgengine/tstun" "tailscale.com/wgengine/wgcfg" "tailscale.com/wgengine/wgcfg/nmcfg" "tailscale.com/wgengine/wglog" ) func init() { os.Setenv("IN_TS_TEST", "1") } // WaitReady waits until the magicsock is entirely initialized and connected // to its home DERP server. This is normally not necessary, since magicsock // is intended to be entirely asynchronous, but it helps eliminate race // conditions in tests. In particular, you can't expect two test magicsocks // to be able to connect to each other through a test DERP unless they are // both fully initialized before you try. func (c *Conn) WaitReady(t testing.TB) { t.Helper() timer := time.NewTimer(10 * time.Second) defer timer.Stop() select { case <-c.derpStarted: return case <-c.connCtx.Done(): t.Fatalf("magicsock.Conn closed while waiting for readiness") case <-timer.C: t.Fatalf("timeout waiting for readiness") } } func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, stunIP netaddr.IP) (derpMap *tailcfg.DERPMap, cleanup func()) { var serverPrivateKey key.Private if _, err := crand.Read(serverPrivateKey[:]); err != nil { t.Fatal(err) } d := derp.NewServer(serverPrivateKey, logf) httpsrv := httptest.NewUnstartedServer(derphttp.Handler(d)) httpsrv.Config.ErrorLog = logger.StdLogger(logf) httpsrv.Config.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) httpsrv.StartTLS() stunAddr, stunCleanup := stuntest.ServeWithPacketListener(t, l) m := &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 1: &tailcfg.DERPRegion{ RegionID: 1, RegionCode: "test", Nodes: []*tailcfg.DERPNode{ { Name: "t1", RegionID: 1, HostName: "test-node.unused", IPv4: "127.0.0.1", IPv6: "none", STUNPort: stunAddr.Port, DERPTestPort: httpsrv.Listener.Addr().(*net.TCPAddr).Port, STUNTestIP: stunIP.String(), }, }, }, }, } cleanup = func() { httpsrv.CloseClientConnections() httpsrv.Close() d.Close() stunCleanup() } return m, cleanup } // magicStack is a magicsock, plus all the stuff around it that's // necessary to send and receive packets to test e2e wireguard // happiness. type magicStack struct { privateKey wgkey.Private epCh chan []string // endpoint updates produced by this peer conn *Conn // the magicsock itself tun *tuntest.ChannelTUN // TUN device to send/receive packets tsTun *tstun.TUN // wrapped tun that implements filtering and wgengine hooks dev *device.Device // the wireguard-go Device that connects the previous things wgLogger *wglog.Logger // wireguard-go log wrapper } // newMagicStack builds and initializes an idle magicsock and // friends. You need to call conn.SetNetworkMap and dev.Reconfig // before anything interesting happens. func newMagicStack(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap, disableLegacy bool) *magicStack { t.Helper() privateKey, err := wgkey.NewPrivate() if err != nil { t.Fatalf("generating private key: %v", err) } epCh := make(chan []string, 100) // arbitrary conn, err := NewConn(Options{ Logf: logf, PacketListener: l, EndpointsFunc: func(eps []string) { epCh <- eps }, SimulatedNetwork: l != nettype.Std{}, DisableLegacyNetworking: disableLegacy, }) if err != nil { t.Fatalf("constructing magicsock: %v", err) } conn.Start() conn.SetDERPMap(derpMap) if err := conn.SetPrivateKey(privateKey); err != nil { t.Fatalf("setting private key in magicsock: %v", err) } tun := tuntest.NewChannelTUN() tsTun := tstun.WrapTUN(logf, tun.TUN()) tsTun.SetFilter(filter.NewAllowAllForTest(logf)) wgLogger := wglog.NewLogger(logf) dev := device.NewDevice(tsTun, &device.DeviceOptions{ Logger: wgLogger.DeviceLogger, CreateEndpoint: conn.CreateEndpoint, CreateBind: conn.CreateBind, SkipBindUpdate: true, }) dev.Up() // Wait for magicsock to connect up to DERP. conn.WaitReady(t) // Wait for first endpoint update to be available deadline := time.Now().Add(2 * time.Second) for len(epCh) == 0 && time.Now().Before(deadline) { time.Sleep(100 * time.Millisecond) } return &magicStack{ privateKey: privateKey, epCh: epCh, conn: conn, tun: tun, tsTun: tsTun, dev: dev, wgLogger: wgLogger, } } func (s *magicStack) Reconfig(cfg *wgcfg.Config) error { s.wgLogger.SetPeers(cfg.Peers) return wgcfg.ReconfigDevice(s.dev, cfg, s.conn.logf) } func (s *magicStack) String() string { pub := s.Public() return pub.ShortString() } func (s *magicStack) Close() { s.dev.Close() s.conn.Close() } func (s *magicStack) Public() key.Public { return key.Public(s.privateKey.Public()) } func (s *magicStack) Status() *ipnstate.Status { var sb ipnstate.StatusBuilder s.conn.UpdateStatus(&sb) return sb.Status() } // IP returns the Tailscale IP address assigned to this magicStack. // // Something external needs to provide a NetworkMap and WireGuard // configs to the magicStack in order for it to acquire an IP // address. See meshStacks for one possible source of netmaps and IPs. func (s *magicStack) IP(t *testing.T) netaddr.IP { for deadline := time.Now().Add(5 * time.Second); time.Now().Before(deadline); time.Sleep(10 * time.Millisecond) { st := s.Status() if len(st.TailscaleIPs) > 0 { return st.TailscaleIPs[0] } } t.Fatal("timed out waiting for magicstack to get an IP assigned") panic("unreachable") // compiler doesn't know t.Fatal panics } // meshStacks monitors epCh on all given ms, and plumbs network maps // and WireGuard configs into everyone to form a full mesh that has up // to date endpoint info. Think of it as an extremely stripped down // and purpose-built Tailscale control plane. // // meshStacks only supports disco connections, not legacy logic. func meshStacks(logf logger.Logf, ms []*magicStack) (cleanup func()) { ctx, cancel := context.WithCancel(context.Background()) // Serialize all reconfigurations globally, just to keep things // simpler. var ( mu sync.Mutex eps = make([][]string, len(ms)) ) buildNetmapLocked := func(myIdx int) *netmap.NetworkMap { me := ms[myIdx] nm := &netmap.NetworkMap{ PrivateKey: me.privateKey, NodeKey: tailcfg.NodeKey(me.privateKey.Public()), Addresses: []netaddr.IPPrefix{{IP: netaddr.IPv4(1, 0, 0, byte(myIdx+1)), Bits: 32}}, } for i, peer := range ms { if i == myIdx { continue } addrs := []netaddr.IPPrefix{{IP: netaddr.IPv4(1, 0, 0, byte(i+1)), Bits: 32}} peer := &tailcfg.Node{ ID: tailcfg.NodeID(i + 1), Name: fmt.Sprintf("node%d", i+1), Key: tailcfg.NodeKey(peer.privateKey.Public()), DiscoKey: peer.conn.DiscoPublicKey(), Addresses: addrs, AllowedIPs: addrs, Endpoints: eps[i], DERP: "127.3.3.40:1", } nm.Peers = append(nm.Peers, peer) } return nm } updateEps := func(idx int, newEps []string) { mu.Lock() defer mu.Unlock() eps[idx] = newEps for i, m := range ms { nm := buildNetmapLocked(i) m.conn.SetNetworkMap(nm) peerSet := make(map[key.Public]struct{}, len(nm.Peers)) for _, peer := range nm.Peers { peerSet[key.Public(peer.Key)] = struct{}{} } m.conn.UpdatePeers(peerSet) wg, err := nmcfg.WGCfg(nm, logf, netmap.AllowSingleHosts, "") if err != nil { // We're too far from the *testing.T to be graceful, // blow up. Shouldn't happen anyway. panic(fmt.Sprintf("failed to construct wgcfg from netmap: %v", err)) } if err := m.Reconfig(wg); err != nil { panic(fmt.Sprintf("device reconfig failed: %v", err)) } } } var wg sync.WaitGroup wg.Add(len(ms)) for i := range ms { go func(myIdx int) { defer wg.Done() for { select { case <-ctx.Done(): return case eps := <-ms[myIdx].epCh: logf("conn%d endpoints update", myIdx+1) updateEps(myIdx, eps) } } }(i) } return func() { cancel() wg.Wait() } } func TestNewConn(t *testing.T) { tstest.PanicOnLog() tstest.ResourceCheck(t) epCh := make(chan string, 16) epFunc := func(endpoints []string) { for _, ep := range endpoints { epCh <- ep } } stunAddr, stunCleanupFn := stuntest.Serve(t) defer stunCleanupFn() port := pickPort(t) conn, err := NewConn(Options{ Port: port, EndpointsFunc: epFunc, Logf: t.Logf, DisableLegacyNetworking: true, }) if err != nil { t.Fatal(err) } defer conn.Close() conn.SetDERPMap(stuntest.DERPMapOf(stunAddr.String())) conn.SetPrivateKey(wgkey.Private(key.NewPrivate())) conn.Start() go func() { var pkt [64 << 10]byte for { _, _, err := conn.ReceiveIPv4(pkt[:]) if err != nil { return } } }() timeout := time.After(10 * time.Second) var endpoints []string suffix := fmt.Sprintf(":%d", port) collectEndpoints: for { select { case ep := <-epCh: endpoints = append(endpoints, ep) if strings.HasSuffix(ep, suffix) { break collectEndpoints } case <-timeout: t.Fatalf("timeout with endpoints: %v", endpoints) } } } func pickPort(t testing.TB) uint16 { t.Helper() conn, err := net.ListenPacket("udp4", "127.0.0.1:0") if err != nil { t.Fatal(err) } defer conn.Close() return uint16(conn.LocalAddr().(*net.UDPAddr).Port) } func TestPickDERPFallback(t *testing.T) { tstest.PanicOnLog() tstest.ResourceCheck(t) c := newConn() c.derpMap = derpmap.Prod() a := c.pickDERPFallback() if a == 0 { t.Fatalf("pickDERPFallback returned 0") } // Test that it's consistent. for i := 0; i < 50; i++ { b := c.pickDERPFallback() if a != b { t.Fatalf("got inconsistent %d vs %d values", a, b) } } // Test that that the pointer value of c is blended in and // distribution over nodes works. got := map[int]int{} for i := 0; i < 50; i++ { c = newConn() c.derpMap = derpmap.Prod() got[c.pickDERPFallback()]++ } t.Logf("distribution: %v", got) if len(got) < 2 { t.Errorf("expected more than 1 node; got %v", got) } // Test that stickiness works. const someNode = 123456 c.myDerp = someNode if got := c.pickDERPFallback(); got != someNode { t.Errorf("not sticky: got %v; want %v", got, someNode) } // But move if peers are elsewhere. const otherNode = 789 c.addrsByKey = map[key.Public]*addrSet{ key.Public{1}: &addrSet{ipPorts: []netaddr.IPPort{{IP: derpMagicIPAddr, Port: otherNode}}}, } if got := c.pickDERPFallback(); got != otherNode { t.Errorf("didn't join peers: got %v; want %v", got, someNode) } } func makeConfigs(t *testing.T, addrs []netaddr.IPPort) []wgcfg.Config { t.Helper() var privKeys []wgcfg.PrivateKey var addresses [][]netaddr.IPPrefix for i := range addrs { privKey, err := wgkey.NewPrivate() if err != nil { t.Fatal(err) } privKeys = append(privKeys, wgcfg.PrivateKey(privKey)) addresses = append(addresses, []netaddr.IPPrefix{ parseCIDR(t, fmt.Sprintf("1.0.0.%d/32", i+1)), }) } var cfgs []wgcfg.Config for i, addr := range addrs { cfg := wgcfg.Config{ Name: fmt.Sprintf("peer%d", i+1), PrivateKey: privKeys[i], Addresses: addresses[i], ListenPort: addr.Port, } for peerNum, addr := range addrs { if peerNum == i { continue } peer := wgcfg.Peer{ PublicKey: privKeys[peerNum].Public(), AllowedIPs: addresses[peerNum], Endpoints: addr.String(), PersistentKeepalive: 25, } cfg.Peers = append(cfg.Peers, peer) } cfgs = append(cfgs, cfg) } return cfgs } func parseCIDR(t *testing.T, addr string) netaddr.IPPrefix { t.Helper() cidr, err := netaddr.ParseIPPrefix(addr) if err != nil { t.Fatal(err) } return cidr } // TestDeviceStartStop exercises the startup and shutdown logic of // wireguard-go, which is intimately intertwined with magicsock's own // lifecycle. We seem to be good at generating deadlocks here, so if // this test fails you should suspect a deadlock somewhere in startup // or shutdown. It may be an infrequent flake, so run with // -count=10000 to be sure. func TestDeviceStartStop(t *testing.T) { tstest.PanicOnLog() tstest.ResourceCheck(t) conn, err := NewConn(Options{ EndpointsFunc: func(eps []string) {}, Logf: t.Logf, DisableLegacyNetworking: true, }) if err != nil { t.Fatal(err) } conn.Start() defer conn.Close() tun := tuntest.NewChannelTUN() dev := device.NewDevice(tun.TUN(), &device.DeviceOptions{ Logger: wglog.NewLogger(t.Logf).DeviceLogger, CreateEndpoint: conn.CreateEndpoint, CreateBind: conn.CreateBind, SkipBindUpdate: true, }) dev.Up() dev.Close() } // Exercise a code path in sendDiscoMessage if the connection has been closed. func TestConnClosed(t *testing.T) { mstun := &natlab.Machine{Name: "stun"} m1 := &natlab.Machine{Name: "m1"} m2 := &natlab.Machine{Name: "m2"} inet := natlab.NewInternet() sif := mstun.Attach("eth0", inet) m1if := m1.Attach("eth0", inet) m2if := m2.Attach("eth0", inet) d := &devices{ m1: m1, m1IP: m1if.V4(), m2: m2, m2IP: m2if.V4(), stun: mstun, stunIP: sif.V4(), } logf, closeLogf := logger.LogfCloser(t.Logf) defer closeLogf() derpMap, cleanup := runDERPAndStun(t, logf, d.stun, d.stunIP) defer cleanup() ms1 := newMagicStack(t, logger.WithPrefix(logf, "conn1: "), d.m1, derpMap, true) defer ms1.Close() ms2 := newMagicStack(t, logger.WithPrefix(logf, "conn2: "), d.m2, derpMap, true) defer ms2.Close() cleanup = meshStacks(t.Logf, []*magicStack{ms1, ms2}) defer cleanup() pkt := tuntest.Ping(ms2.IP(t).IPAddr().IP, ms1.IP(t).IPAddr().IP) if len(ms1.conn.activeDerp) == 0 { t.Errorf("unexpected DERP empty got: %v want: >0", len(ms1.conn.activeDerp)) } ms1.conn.Close() ms2.conn.Close() // This should hit a c.closed conditional in sendDiscoMessage() and return immediately. ms1.tun.Outbound <- pkt select { case <-ms2.tun.Inbound: t.Error("unexpected response with connection closed") case <-time.After(100 * time.Millisecond): } if len(ms1.conn.activeDerp) > 0 { t.Errorf("unexpected DERP active got: %v want:0", len(ms1.conn.activeDerp)) } } func makeNestable(t *testing.T) (logf logger.Logf, setT func(t *testing.T)) { var mu sync.RWMutex cur := t setT = func(t *testing.T) { mu.Lock() cur = t mu.Unlock() } logf = func(s string, args ...interface{}) { mu.RLock() t := cur t.Helper() t.Logf(s, args...) mu.RUnlock() } return logf, setT } func TestTwoDevicePing(t *testing.T) { l, ip := nettype.Std{}, netaddr.IPv4(127, 0, 0, 1) n := &devices{ m1: l, m1IP: ip, m2: l, m2IP: ip, stun: l, stunIP: ip, } testTwoDevicePing(t, n) } func TestActiveDiscovery(t *testing.T) { t.Run("simple_internet", func(t *testing.T) { t.Parallel() mstun := &natlab.Machine{Name: "stun"} m1 := &natlab.Machine{Name: "m1"} m2 := &natlab.Machine{Name: "m2"} inet := natlab.NewInternet() sif := mstun.Attach("eth0", inet) m1if := m1.Attach("eth0", inet) m2if := m2.Attach("eth0", inet) n := &devices{ m1: m1, m1IP: m1if.V4(), m2: m2, m2IP: m2if.V4(), stun: mstun, stunIP: sif.V4(), } testActiveDiscovery(t, n) }) t.Run("facing_easy_firewalls", func(t *testing.T) { mstun := &natlab.Machine{Name: "stun"} m1 := &natlab.Machine{ Name: "m1", PacketHandler: &natlab.Firewall{}, } m2 := &natlab.Machine{ Name: "m2", PacketHandler: &natlab.Firewall{}, } inet := natlab.NewInternet() sif := mstun.Attach("eth0", inet) m1if := m1.Attach("eth0", inet) m2if := m2.Attach("eth0", inet) n := &devices{ m1: m1, m1IP: m1if.V4(), m2: m2, m2IP: m2if.V4(), stun: mstun, stunIP: sif.V4(), } testActiveDiscovery(t, n) }) t.Run("facing_nats", func(t *testing.T) { mstun := &natlab.Machine{Name: "stun"} m1 := &natlab.Machine{ Name: "m1", PacketHandler: &natlab.Firewall{}, } nat1 := &natlab.Machine{ Name: "nat1", } m2 := &natlab.Machine{ Name: "m2", PacketHandler: &natlab.Firewall{}, } nat2 := &natlab.Machine{ Name: "nat2", } inet := natlab.NewInternet() lan1 := &natlab.Network{ Name: "lan1", Prefix4: mustPrefix("192.168.0.0/24"), } lan2 := &natlab.Network{ Name: "lan2", Prefix4: mustPrefix("192.168.1.0/24"), } sif := mstun.Attach("eth0", inet) nat1WAN := nat1.Attach("wan", inet) nat1LAN := nat1.Attach("lan1", lan1) nat2WAN := nat2.Attach("wan", inet) nat2LAN := nat2.Attach("lan2", lan2) m1if := m1.Attach("eth0", lan1) m2if := m2.Attach("eth0", lan2) lan1.SetDefaultGateway(nat1LAN) lan2.SetDefaultGateway(nat2LAN) nat1.PacketHandler = &natlab.SNAT44{ Machine: nat1, ExternalInterface: nat1WAN, Firewall: &natlab.Firewall{ TrustedInterface: nat1LAN, }, } nat2.PacketHandler = &natlab.SNAT44{ Machine: nat2, ExternalInterface: nat2WAN, Firewall: &natlab.Firewall{ TrustedInterface: nat2LAN, }, } n := &devices{ m1: m1, m1IP: m1if.V4(), m2: m2, m2IP: m2if.V4(), stun: mstun, stunIP: sif.V4(), } testActiveDiscovery(t, n) }) } func mustPrefix(s string) netaddr.IPPrefix { pfx, err := netaddr.ParseIPPrefix(s) if err != nil { panic(err) } return pfx } type devices struct { m1 nettype.PacketListener m1IP netaddr.IP m2 nettype.PacketListener m2IP netaddr.IP stun nettype.PacketListener stunIP netaddr.IP } // newPinger starts continuously sending test packets from srcM to // dstM, until cleanup is invoked to stop it. Each ping has 1 second // to transit the network. It is a test failure to lose a ping. func newPinger(t *testing.T, logf logger.Logf, src, dst *magicStack) (cleanup func()) { ctx, cancel := context.WithCancel(context.Background()) done := make(chan struct{}) one := func() bool { // TODO(danderson): requiring exactly zero packet loss // will probably be too strict for some tests we'd like to // run (e.g. discovery switching to a new path on // failure). Figure out what kind of thing would be // acceptable to test instead of "every ping must // transit". pkt := tuntest.Ping(dst.IP(t).IPAddr().IP, src.IP(t).IPAddr().IP) select { case src.tun.Outbound <- pkt: case <-ctx.Done(): return false } select { case <-dst.tun.Inbound: return true case <-time.After(10 * time.Second): // Very generous timeout here because depending on // magicsock setup races, the first handshake might get // eaten by the receiving end (if wireguard-go hasn't been // configured quite yet), so we have to wait for at least // the first retransmit from wireguard before we declare // failure. t.Errorf("timed out waiting for ping to transit") return true case <-ctx.Done(): // Try a little bit longer to consume the packet we're // waiting for. This is to deal with shutdown races, where // natlab may still be delivering a packet to us from a // goroutine. select { case <-dst.tun.Inbound: case <-time.After(time.Second): } return false } } cleanup = func() { cancel() <-done } // Synchronously transit one ping to get things started. This is // nice because it means that newPinger returning means we've // worked through initial connectivity. if !one() { cleanup() return } go func() { logf("sending ping stream from %s (%s) to %s (%s)", src, src.IP(t), dst, dst.IP(t)) defer close(done) for one() { } }() return cleanup } // testActiveDiscovery verifies that two magicStacks tied to the given // devices can establish a direct p2p connection with each other. See // TestActiveDiscovery for the various configurations of devices that // get exercised. func testActiveDiscovery(t *testing.T, d *devices) { tstest.PanicOnLog() tstest.ResourceCheck(t) tlogf, setT := makeNestable(t) setT(t) start := time.Now() wlogf := func(msg string, args ...interface{}) { t.Helper() msg = fmt.Sprintf("%s: %s", time.Since(start).Truncate(time.Microsecond), msg) tlogf(msg, args...) } logf, closeLogf := logger.LogfCloser(wlogf) defer closeLogf() derpMap, cleanup := runDERPAndStun(t, logf, d.stun, d.stunIP) defer cleanup() m1 := newMagicStack(t, logger.WithPrefix(logf, "conn1: "), d.m1, derpMap, true) defer m1.Close() m2 := newMagicStack(t, logger.WithPrefix(logf, "conn2: "), d.m2, derpMap, true) defer m2.Close() cleanup = meshStacks(logf, []*magicStack{m1, m2}) defer cleanup() m1IP := m1.IP(t) m2IP := m2.IP(t) logf("IPs: %s %s", m1IP, m2IP) cleanup = newPinger(t, logf, m1, m2) defer cleanup() // Everything is now up and running, active discovery should find // a direct path between our peers. Wait for it to switch away // from DERP. mustDirect := func(m1, m2 *magicStack) { lastLog := time.Now().Add(-time.Minute) // See https://github.com/tailscale/tailscale/issues/654 for a discussion of this deadline. for deadline := time.Now().Add(10 * time.Second); time.Now().Before(deadline); time.Sleep(10 * time.Millisecond) { pst := m1.Status().Peer[m2.Public()] if pst.CurAddr != "" { logf("direct link %s->%s found with addr %s", m1, m2, pst.CurAddr) return } if now := time.Now(); now.Sub(lastLog) > time.Second { logf("no direct path %s->%s yet, addrs %v", m1, m2, pst.Addrs) lastLog = now } } t.Errorf("magicsock did not find a direct path from %s to %s", m1, m2) } mustDirect(m1, m2) mustDirect(m2, m1) logf("starting cleanup") } func testTwoDevicePing(t *testing.T, d *devices) { tstest.PanicOnLog() tstest.ResourceCheck(t) // This gets reassigned inside every test, so that the connections // all log using the "current" t.Logf function. Sigh. nestedLogf, setT := makeNestable(t) logf, closeLogf := logger.LogfCloser(nestedLogf) defer closeLogf() derpMap, cleanup := runDERPAndStun(t, logf, d.stun, d.stunIP) defer cleanup() m1 := newMagicStack(t, logf, d.m1, derpMap, false) defer m1.Close() m2 := newMagicStack(t, logf, d.m2, derpMap, false) defer m2.Close() addrs := []netaddr.IPPort{ {IP: d.m1IP, Port: m1.conn.LocalPort()}, {IP: d.m2IP, Port: m2.conn.LocalPort()}, } cfgs := makeConfigs(t, addrs) if err := m1.Reconfig(&cfgs[0]); err != nil { t.Fatal(err) } if err := m2.Reconfig(&cfgs[1]); err != nil { t.Fatal(err) } // In the normal case, pings succeed immediately. // However, in the case of a handshake race, we need to retry. // With very bad luck, we can need to retry multiple times. allowedRetries := 3 if cibuild.On() { // Allow extra retries on small/flaky/loaded CI machines. allowedRetries *= 2 } // Retries take 5s each. Add 1s for some processing time. pingTimeout := 5*time.Second*time.Duration(allowedRetries) + time.Second // sendWithTimeout sends msg using send, checking that it is received unchanged from in. // It resends once per second until the send succeeds, or pingTimeout time has elapsed. sendWithTimeout := func(msg []byte, in chan []byte, send func()) error { start := time.Now() for time.Since(start) < pingTimeout { send() select { case recv := <-in: if !bytes.Equal(msg, recv) { return errors.New("ping did not transit correctly") } return nil case <-time.After(time.Second): // try again } } return errors.New("ping timed out") } ping1 := func(t *testing.T) { msg2to1 := tuntest.Ping(net.ParseIP("1.0.0.1"), net.ParseIP("1.0.0.2")) send := func() { m2.tun.Outbound <- msg2to1 t.Log("ping1 sent") } in := m1.tun.Inbound if err := sendWithTimeout(msg2to1, in, send); err != nil { t.Error(err) } } ping2 := func(t *testing.T) { msg1to2 := tuntest.Ping(net.ParseIP("1.0.0.2"), net.ParseIP("1.0.0.1")) send := func() { m1.tun.Outbound <- msg1to2 t.Log("ping2 sent") } in := m2.tun.Inbound if err := sendWithTimeout(msg1to2, in, send); err != nil { t.Error(err) } } outerT := t t.Run("ping 1.0.0.1", func(t *testing.T) { setT(t) defer setT(outerT) ping1(t) }) t.Run("ping 1.0.0.2", func(t *testing.T) { setT(t) defer setT(outerT) ping2(t) }) t.Run("ping 1.0.0.2 via SendPacket", func(t *testing.T) { setT(t) defer setT(outerT) msg1to2 := tuntest.Ping(net.ParseIP("1.0.0.2"), net.ParseIP("1.0.0.1")) send := func() { if err := m1.tsTun.InjectOutbound(msg1to2); err != nil { t.Fatal(err) } t.Log("SendPacket sent") } in := m2.tun.Inbound if err := sendWithTimeout(msg1to2, in, send); err != nil { t.Error(err) } }) t.Run("no-op dev1 reconfig", func(t *testing.T) { setT(t) defer setT(outerT) if err := m1.Reconfig(&cfgs[0]); err != nil { t.Fatal(err) } ping1(t) ping2(t) }) // TODO: Remove this once the following tests are reliable. if run, _ := strconv.ParseBool(os.Getenv("RUN_CURSED_TESTS")); !run { t.Skip("skipping following tests because RUN_CURSED_TESTS is not set.") } pingSeq := func(t *testing.T, count int, totalTime time.Duration, strict bool) { msg := func(i int) []byte { b := tuntest.Ping(net.ParseIP("1.0.0.2"), net.ParseIP("1.0.0.1")) b[len(b)-1] = byte(i) // set seq num return b } // Space out ping transmissions so that the overall // transmission happens in totalTime. // // We do this because the packet spray logic in magicsock is // time-based to allow for reliable NAT traversal. However, // for the packet spraying test further down, there needs to // be at least 1 sprayed packet that is not the handshake, in // case the handshake gets eaten by the race resolution logic. // // This is an inherent "race by design" in our current // magicsock+wireguard-go codebase: sometimes, racing // handshakes will result in a sub-optimal path for a few // hundred milliseconds, until a subsequent spray corrects the // issue. In order for the test to reflect that magicsock // works as designed, we have to space out packet transmission // here. interPacketGap := totalTime / time.Duration(count) if interPacketGap < 1*time.Millisecond { interPacketGap = 0 } for i := 0; i < count; i++ { b := msg(i) m1.tun.Outbound <- b time.Sleep(interPacketGap) } for i := 0; i < count; i++ { b := msg(i) select { case msgRecv := <-m2.tun.Inbound: if !bytes.Equal(b, msgRecv) { if strict { t.Errorf("return ping %d did not transit correctly: %s", i, cmp.Diff(b, msgRecv)) } } case <-time.After(pingTimeout): if strict { t.Errorf("return ping %d did not transit", i) } } } } t.Run("ping 1.0.0.1 x50", func(t *testing.T) { setT(t) defer setT(outerT) pingSeq(t, 50, 0, true) }) // Add DERP relay. derpEp := "127.3.3.40:1" ep0 := cfgs[0].Peers[0].Endpoints ep0 = derpEp + "," + ep0 cfgs[0].Peers[0].Endpoints = ep0 ep1 := cfgs[1].Peers[0].Endpoints ep1 = derpEp + "," + ep1 cfgs[1].Peers[0].Endpoints = ep1 if err := m1.Reconfig(&cfgs[0]); err != nil { t.Fatal(err) } if err := m2.Reconfig(&cfgs[1]); err != nil { t.Fatal(err) } t.Run("add DERP", func(t *testing.T) { setT(t) defer setT(outerT) pingSeq(t, 20, 0, true) }) // Disable real route. cfgs[0].Peers[0].Endpoints = derpEp cfgs[1].Peers[0].Endpoints = derpEp if err := m1.Reconfig(&cfgs[0]); err != nil { t.Fatal(err) } if err := m2.Reconfig(&cfgs[1]); err != nil { t.Fatal(err) } time.Sleep(250 * time.Millisecond) // TODO remove t.Run("all traffic over DERP", func(t *testing.T) { setT(t) defer setT(outerT) defer func() { if t.Failed() || true { logf("cfg0: %v", stringifyConfig(cfgs[0])) logf("cfg1: %v", stringifyConfig(cfgs[1])) } }() pingSeq(t, 20, 0, true) }) m1.dev.RemoveAllPeers() m2.dev.RemoveAllPeers() // Give one peer a non-DERP endpoint. We expect the other to // accept it via roamAddr. cfgs[0].Peers[0].Endpoints = ep0 if ep2 := cfgs[1].Peers[0].Endpoints; len(ep2) != 1 { t.Errorf("unexpected peer endpoints in dev2: %v", ep2) } if err := m2.Reconfig(&cfgs[1]); err != nil { t.Fatal(err) } if err := m1.Reconfig(&cfgs[0]); err != nil { t.Fatal(err) } // Dear future human debugging a test failure here: this test is // flaky, and very infrequently will drop 1-2 of the 50 ping // packets. This does not affect normal operation of tailscaled, // but makes this test fail. // // TODO(danderson): finish root-causing and de-flake this test. t.Run("one real route is enough thanks to spray", func(t *testing.T) { setT(t) defer setT(outerT) pingSeq(t, 50, 700*time.Millisecond, false) cfg, err := wgcfg.DeviceConfig(m2.dev) if err != nil { t.Fatal(err) } ep2 := cfg.Peers[0].Endpoints if len(ep2) != 2 { t.Error("handshake spray failed to find real route") } }) } // TestAddrSet tests addrSet appendDests and updateDst. func TestAddrSet(t *testing.T) { tstest.PanicOnLog() tstest.ResourceCheck(t) mustIPPortPtr := func(s string) *netaddr.IPPort { ipp := netaddr.MustParseIPPort(s) return &ipp } ipps := func(ss ...string) (ret []netaddr.IPPort) { t.Helper() for _, s := range ss { ret = append(ret, netaddr.MustParseIPPort(s)) } return ret } joinUDPs := func(in []netaddr.IPPort) string { var sb strings.Builder for i, ua := range in { if i > 0 { sb.WriteByte(',') } sb.WriteString(ua.String()) } return sb.String() } var ( regPacket = []byte("some regular packet") sprayPacket = []byte("0000") ) binary.LittleEndian.PutUint32(sprayPacket[:4], device.MessageInitiationType) if !shouldSprayPacket(sprayPacket) { t.Fatal("sprayPacket should be classified as a spray packet for testing") } // A step is either a b+want appendDests tests, or an // UpdateDst call, depending on which fields are set. type step struct { // advance is the time to advance the fake clock // before the step. advance time.Duration // updateDst, if set, does an UpdateDst call and // b+want are ignored. updateDst *netaddr.IPPort b []byte want string // comma-separated } tests := []struct { name string as *addrSet steps []step logCheck func(t *testing.T, logged []byte) }{ { name: "reg_packet_no_curaddr", as: &addrSet{ ipPorts: ipps("127.3.3.40:1", "123.45.67.89:123", "10.0.0.1:123"), curAddr: -1, // unknown roamAddr: nil, }, steps: []step{ {b: regPacket, want: "127.3.3.40:1"}, }, }, { name: "reg_packet_have_curaddr", as: &addrSet{ ipPorts: ipps("127.3.3.40:1", "123.45.67.89:123", "10.0.0.1:123"), curAddr: 1, // global IP roamAddr: nil, }, steps: []step{ {b: regPacket, want: "123.45.67.89:123"}, }, }, { name: "reg_packet_have_roamaddr", as: &addrSet{ ipPorts: ipps("127.3.3.40:1", "123.45.67.89:123", "10.0.0.1:123"), curAddr: 2, // should be ignored roamAddr: mustIPPortPtr("5.6.7.8:123"), }, steps: []step{ {b: regPacket, want: "5.6.7.8:123"}, {updateDst: mustIPPortPtr("10.0.0.1:123")}, // no more roaming {b: regPacket, want: "10.0.0.1:123"}, }, }, { name: "start_roaming", as: &addrSet{ ipPorts: ipps("127.3.3.40:1", "123.45.67.89:123", "10.0.0.1:123"), curAddr: 2, }, steps: []step{ {b: regPacket, want: "10.0.0.1:123"}, {updateDst: mustIPPortPtr("4.5.6.7:123")}, {b: regPacket, want: "4.5.6.7:123"}, {updateDst: mustIPPortPtr("5.6.7.8:123")}, {b: regPacket, want: "5.6.7.8:123"}, {updateDst: mustIPPortPtr("123.45.67.89:123")}, // end roaming {b: regPacket, want: "123.45.67.89:123"}, }, }, { name: "spray_packet", as: &addrSet{ ipPorts: ipps("127.3.3.40:1", "123.45.67.89:123", "10.0.0.1:123"), curAddr: 2, // should be ignored roamAddr: mustIPPortPtr("5.6.7.8:123"), }, steps: []step{ {b: sprayPacket, want: "127.3.3.40:1,123.45.67.89:123,10.0.0.1:123,5.6.7.8:123"}, {advance: 300 * time.Millisecond, b: regPacket, want: "127.3.3.40:1,123.45.67.89:123,10.0.0.1:123,5.6.7.8:123"}, {advance: 300 * time.Millisecond, b: regPacket, want: "127.3.3.40:1,123.45.67.89:123,10.0.0.1:123,5.6.7.8:123"}, {advance: 3, b: regPacket, want: "5.6.7.8:123"}, {advance: 2 * time.Millisecond, updateDst: mustIPPortPtr("10.0.0.1:123")}, {advance: 3, b: regPacket, want: "10.0.0.1:123"}, }, }, { name: "low_pri", as: &addrSet{ ipPorts: ipps("127.3.3.40:1", "123.45.67.89:123", "10.0.0.1:123"), curAddr: 2, }, steps: []step{ {updateDst: mustIPPortPtr("123.45.67.89:123")}, {updateDst: mustIPPortPtr("123.45.67.89:123")}, }, logCheck: func(t *testing.T, logged []byte) { if n := bytes.Count(logged, []byte(", keeping current ")); n != 1 { t.Errorf("low-prio keeping current logged %d times; want 1", n) } }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { faket := time.Unix(0, 0) var logBuf bytes.Buffer tt.as.Logf = func(format string, args ...interface{}) { fmt.Fprintf(&logBuf, format, args...) t.Logf(format, args...) } tt.as.clock = func() time.Time { return faket } for i, st := range tt.steps { faket = faket.Add(st.advance) if st.updateDst != nil { if err := tt.as.updateDst(*st.updateDst); err != nil { t.Fatal(err) } continue } got, _ := tt.as.appendDests(nil, st.b) if gotStr := joinUDPs(got); gotStr != st.want { t.Errorf("step %d: got %v; want %v", i, gotStr, st.want) } } if tt.logCheck != nil { tt.logCheck(t, logBuf.Bytes()) } }) } } func TestDiscoMessage(t *testing.T) { c := newConn() c.logf = t.Logf c.privateKey = key.NewPrivate() peer1Pub := c.DiscoPublicKey() peer1Priv := c.discoPrivate c.endpointOfDisco = map[tailcfg.DiscoKey]*discoEndpoint{ tailcfg.DiscoKey(peer1Pub): &discoEndpoint{ // ... (enough for this test) }, } c.nodeOfDisco = map[tailcfg.DiscoKey]*tailcfg.Node{ tailcfg.DiscoKey(peer1Pub): &tailcfg.Node{ // ... (enough for this test) }, } const payload = "why hello" var nonce [24]byte crand.Read(nonce[:]) pkt := append([]byte("TS💬"), peer1Pub[:]...) pkt = append(pkt, nonce[:]...) pkt = box.Seal(pkt, []byte(payload), &nonce, c.discoPrivate.Public().B32(), peer1Priv.B32()) got := c.handleDiscoMessage(pkt, netaddr.IPPort{}) if !got { t.Error("failed to open it") } } // tests that having a discoEndpoint.String prevents wireguard-go's // log.Printf("%v") of its conn.Endpoint values from using reflect to // walk into read mutex while they're being used and then causing data // races. func TestDiscoStringLogRace(t *testing.T) { de := new(discoEndpoint) var wg sync.WaitGroup wg.Add(2) go func() { defer wg.Done() fmt.Fprintf(ioutil.Discard, "%v", de) }() go func() { defer wg.Done() de.mu.Lock() }() wg.Wait() } func stringifyConfig(cfg wgcfg.Config) string { j, err := json.Marshal(cfg) if err != nil { panic(err) } return string(j) } func Test32bitAlignment(t *testing.T) { var de discoEndpoint var c Conn if off := unsafe.Offsetof(de.lastRecvUnixAtomic); off%8 != 0 { t.Fatalf("discoEndpoint.lastRecvUnixAtomic is not 8-byte aligned") } if off := unsafe.Offsetof(c.derpRecvCountAtomic); off%8 != 0 { t.Fatalf("Conn.derpRecvCountAtomic is not 8-byte aligned") } if !de.isFirstRecvActivityInAwhile() { // verify this doesn't panic on 32-bit t.Error("expected true") } if de.isFirstRecvActivityInAwhile() { t.Error("expected false on second call") } atomic.AddInt64(&c.derpRecvCountAtomic, 1) } // newNonLegacyTestConn returns a new Conn with DisableLegacyNetworking set true. func newNonLegacyTestConn(t testing.TB) *Conn { t.Helper() port := pickPort(t) conn, err := NewConn(Options{ Logf: t.Logf, Port: port, EndpointsFunc: func(eps []string) { t.Logf("endpoints: %q", eps) }, DisableLegacyNetworking: true, }) if err != nil { t.Fatal(err) } return conn } // Tests concurrent DERP readers pushing DERP data into ReceiveIPv4 // (which should blend all DERP reads into UDP reads). func TestDerpReceiveFromIPv4(t *testing.T) { conn := newNonLegacyTestConn(t) defer conn.Close() sendConn, err := net.ListenPacket("udp4", "127.0.0.1:0") if err != nil { t.Fatal(err) } defer sendConn.Close() nodeKey, _ := addTestEndpoint(conn, sendConn) var sends int = 250e3 // takes about a second if testing.Short() { sends /= 10 } senders := runtime.NumCPU() sends -= (sends % senders) var wg sync.WaitGroup defer wg.Wait() t.Logf("doing %v sends over %d senders", sends, senders) ctx, cancel := context.WithCancel(context.Background()) defer conn.Close() defer cancel() doneCtx, cancelDoneCtx := context.WithCancel(context.Background()) cancelDoneCtx() for i := 0; i < senders; i++ { wg.Add(1) regionID := i + 1 go func() { defer wg.Done() for i := 0; i < sends/senders; i++ { res := derpReadResult{ regionID: regionID, n: 123, src: key.Public(nodeKey), copyBuf: func(dst []byte) int { return 123 }, } // First send with the closed context. ~50% of // these should end up going through the // send-a-zero-derpReadResult path, returning // true, in which case we don't want to send again. // We test later that we hit the other path. if conn.sendDerpReadResult(doneCtx, res) { continue } if !conn.sendDerpReadResult(ctx, res) { t.Error("unexpected false") return } } }() } zeroSendsStart := testCounterZeroDerpReadResultSend.Value() buf := make([]byte, 1500) for i := 0; i < sends; i++ { n, ep, err := conn.ReceiveIPv4(buf) if err != nil { t.Fatal(err) } _ = n _ = ep } t.Logf("did %d ReceiveIPv4 calls", sends) zeroSends, zeroRecv := testCounterZeroDerpReadResultSend.Value(), testCounterZeroDerpReadResultRecv.Value() if zeroSends != zeroRecv { t.Errorf("did %d zero sends != %d corresponding receives", zeroSends, zeroRecv) } zeroSendDelta := zeroSends - zeroSendsStart if zeroSendDelta == 0 { t.Errorf("didn't see any sends of derpReadResult zero value") } if zeroSendDelta == int64(sends) { t.Errorf("saw %v sends of the derpReadResult zero value which was unexpectedly high (100%% of our %v sends)", zeroSendDelta, sends) } } // addTestEndpoint sets conn's network map to a single peer expected // to receive packets from sendConn (or DERP), and returns that peer's // nodekey and discokey. func addTestEndpoint(conn *Conn, sendConn net.PacketConn) (tailcfg.NodeKey, tailcfg.DiscoKey) { // Give conn just enough state that it'll recognize sendConn as a // valid peer and not fall through to the legacy magicsock // codepath. discoKey := tailcfg.DiscoKey{31: 1} nodeKey := tailcfg.NodeKey{0: 'N', 1: 'K'} conn.SetNetworkMap(&netmap.NetworkMap{ Peers: []*tailcfg.Node{ { Key: nodeKey, DiscoKey: discoKey, Endpoints: []string{sendConn.LocalAddr().String()}, }, }, }) conn.SetPrivateKey(wgkey.Private{0: 1}) conn.CreateEndpoint([32]byte(nodeKey), "0000000000000000000000000000000000000000000000000000000000000001.disco.tailscale:12345") conn.addValidDiscoPathForTest(discoKey, netaddr.MustParseIPPort(sendConn.LocalAddr().String())) return nodeKey, discoKey } func setUpReceiveFrom(tb testing.TB) (roundTrip func()) { conn := newNonLegacyTestConn(tb) tb.Cleanup(func() { conn.Close() }) conn.logf = logger.Discard sendConn, err := net.ListenPacket("udp4", "127.0.0.1:0") if err != nil { tb.Fatal(err) } tb.Cleanup(func() { sendConn.Close() }) addTestEndpoint(conn, sendConn) var dstAddr net.Addr = conn.pconn4.LocalAddr() sendBuf := make([]byte, 1<<10) for i := range sendBuf { sendBuf[i] = 'x' } buf := make([]byte, 2<<10) return func() { if _, err := sendConn.WriteTo(sendBuf, dstAddr); err != nil { tb.Fatalf("WriteTo: %v", err) } n, ep, err := conn.ReceiveIPv4(buf) if err != nil { tb.Fatal(err) } _ = n _ = ep } } // goMajorVersion reports the major Go version and whether it is a Tailscale fork. // If parsing fails, goMajorVersion returns 0, false. func goMajorVersion(s string) (version int, isTS bool) { if !strings.HasPrefix(s, "go1.") { return 0, false } mm := s[len("go1."):] var major, rest string for _, sep := range []string{".", "rc", "beta"} { i := strings.Index(mm, sep) if i > 0 { major, rest = mm[:i], mm[i:] break } } if major == "" { major = mm } n, err := strconv.Atoi(major) if err != nil { return 0, false } return n, strings.Contains(rest, "ts") } func TestGoMajorVersion(t *testing.T) { tests := []struct { version string wantN int wantTS bool }{ {"go1.15.8", 15, false}, {"go1.16rc1", 16, false}, {"go1.16rc1", 16, false}, {"go1.15.5-ts3bd89195a3", 15, true}, {"go1.15", 15, false}, } for _, tt := range tests { n, ts := goMajorVersion(tt.version) if tt.wantN != n || tt.wantTS != ts { t.Errorf("goMajorVersion(%s) = %v, %v, want %v, %v", tt.version, n, ts, tt.wantN, tt.wantTS) } } } func TestReceiveFromAllocs(t *testing.T) { // Go 1.16 and before: allow 3 allocs. // Go Tailscale fork, Go 1.17+: only allow 2 allocs. major, ts := goMajorVersion(runtime.Version()) maxAllocs := 3 if major >= 17 || ts { maxAllocs = 2 } t.Logf("allowing %d allocs for Go version %q", maxAllocs, runtime.Version()) roundTrip := setUpReceiveFrom(t) avg := int(testing.AllocsPerRun(100, roundTrip)) if avg > maxAllocs { t.Fatalf("expected %d allocs in ReceiveFrom, got %v", maxAllocs, avg) } } func BenchmarkReceiveFrom(b *testing.B) { roundTrip := setUpReceiveFrom(b) for i := 0; i < b.N; i++ { roundTrip() } } func BenchmarkReceiveFrom_Native(b *testing.B) { recvConn, err := net.ListenPacket("udp4", "127.0.0.1:0") if err != nil { b.Fatal(err) } defer recvConn.Close() recvConnUDP := recvConn.(*net.UDPConn) sendConn, err := net.ListenPacket("udp4", "127.0.0.1:0") if err != nil { b.Fatal(err) } defer sendConn.Close() var dstAddr net.Addr = recvConn.LocalAddr() sendBuf := make([]byte, 1<<10) for i := range sendBuf { sendBuf[i] = 'x' } buf := make([]byte, 2<<10) for i := 0; i < b.N; i++ { if _, err := sendConn.WriteTo(sendBuf, dstAddr); err != nil { b.Fatalf("WriteTo: %v", err) } if _, _, err := recvConnUDP.ReadFromUDP(buf); err != nil { b.Fatalf("ReadFromUDP: %v", err) } } } // Test that a netmap update where node changes its node key but // doesn't change its disco key doesn't result in a broken state. // // https://github.com/tailscale/tailscale/issues/1391 func TestSetNetworkMapChangingNodeKey(t *testing.T) { conn := newNonLegacyTestConn(t) t.Cleanup(func() { conn.Close() }) var logBuf bytes.Buffer conn.logf = func(format string, a ...interface{}) { fmt.Fprintf(&logBuf, format, a...) if !bytes.HasSuffix(logBuf.Bytes(), []byte("\n")) { logBuf.WriteByte('\n') } } conn.SetPrivateKey(wgkey.Private{0: 1}) discoKey := tailcfg.DiscoKey{31: 1} nodeKey1 := tailcfg.NodeKey{0: 'N', 1: 'K', 2: '1'} nodeKey2 := tailcfg.NodeKey{0: 'N', 1: 'K', 2: '2'} conn.SetNetworkMap(&netmap.NetworkMap{ Peers: []*tailcfg.Node{ { Key: nodeKey1, DiscoKey: discoKey, Endpoints: []string{"192.168.1.2:345"}, }, }, }) _, err := conn.CreateEndpoint([32]byte(nodeKey1), "0000000000000000000000000000000000000000000000000000000000000001.disco.tailscale:12345") if err != nil { t.Fatal(err) } for i := 0; i < 3; i++ { conn.SetNetworkMap(&netmap.NetworkMap{ Peers: []*tailcfg.Node{ { Key: nodeKey2, DiscoKey: discoKey, Endpoints: []string{"192.168.1.2:345"}, }, }, }) } de := conn.endpointOfDisco[discoKey] if de != nil && de.publicKey != nodeKey2 { t.Fatalf("discoEndpoint public key = %q; want %q", de.publicKey[:], nodeKey2[:]) } log := logBuf.String() wantSub := map[string]int{ "magicsock: got updated network map; 1 peers (1 with discokey)": 2, "magicsock: disco key discokey:0000000000000000000000000000000000000000000000000000000000000001 changed from node key [TksxA] to [TksyA]": 1, } for sub, want := range wantSub { got := strings.Count(log, sub) if got != want { t.Errorf("in log, count of substring %q = %v; want %v", sub, got, want) } } if t.Failed() { t.Logf("log output: %s", log) } }
[ "\"RUN_CURSED_TESTS\"" ]
[]
[ "RUN_CURSED_TESTS" ]
[]
["RUN_CURSED_TESTS"]
go
1
0
run_summarization.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # Modifications Copyright 2017 Abigail See # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """This is the top-level file to train, evaluate or test your summarization model""" # coding=utf-8 import sys import time import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" import tensorflow as tf import numpy as np from collections import namedtuple from data import Vocab, get_stop_word_ids from batcher import Batcher from model import SummarizationModel from decode import BeamSearchDecoder import util from tensorflow.python import debug as tf_debug FLAGS = tf.app.flags.FLAGS # Where to find data tf.app.flags.DEFINE_string('data_path', '/tmp/tmp_input.txt', 'Path expression to tf.Example datafiles. Can include wildcards to access multiple datafiles.') tf.app.flags.DEFINE_string('vocab_path', '/data/nssd_data/finished_files/vocab', 'Path expression to text vocabulary file.') tf.app.flags.DEFINE_string('stop_words_path', '/data/nssd_data/stopword/stopword_cn.txt', 'Path expression to stop words file') tf.app.flags.DEFINE_string('ref_dir', '/data/__DATASET__/val_reference/', 'Path to reference words') # Important settings tf.app.flags.DEFINE_string('mode', 'decode', 'must be one of train/eval/decode') tf.app.flags.DEFINE_boolean('single_pass', True, 'For decode mode only. If True, run eval on the full dataset using a fixed checkpoint, i.e. take the current checkpoint, and use it to produce one summary for each example in the dataset, write the summaries to file and then get ROUGE scores for the whole dataset. If False (default), run concurrent decoding, i.e. repeatedly load latest checkpoint, use it to produce summaries for randomly-chosen examples and log the results to screen, indefinitely.') tf.app.flags.DEFINE_boolean('decode_only', True, 'If True, only decode, do not calculate f1 score. only for chinese, only for special format data_path') # Where to save output tf.app.flags.DEFINE_string('log_root', '/data/nssd_data/model/', 'Root directory for all logging.') tf.app.flags.DEFINE_string('exp_name', 'nssd_COPM_experiment', 'Name for experiment. Logs will be saved in a directory with this name, under log_root.') tf.app.flags.DEFINE_string('language', 'chinese', 'language') # Encoder and decoder settings tf.app.flags.DEFINE_string('cell_type', 'LSTM', 'LSTM or GRU') tf.app.flags.DEFINE_float('dropout', 0.0, 'for dropout') # Hyperparameters tf.app.flags.DEFINE_integer('hidden_dim', 256, 'dimension of RNN hidden states') tf.app.flags.DEFINE_integer('emb_dim', 128, 'dimension of word embeddings') tf.app.flags.DEFINE_integer('batch_size', 16, 'minibatch size') tf.app.flags.DEFINE_integer('max_enc_steps', 400, 'max timesteps of encoder (max source text tokens)') tf.app.flags.DEFINE_integer('max_dec_steps', 6, 'max timesteps of decoder (max summary tokens)') tf.app.flags.DEFINE_integer('beam_size', 50, 'beam size for beam search decoding.') tf.app.flags.DEFINE_integer('beam_depth', 6, 'beam depth for beam search decoding') tf.app.flags.DEFINE_integer('min_dec_steps', 1, 'Minimum sequence length of generated summary. Applies only for beam search decoding mode') tf.app.flags.DEFINE_integer('vocab_size', 50000, 'Size of vocabulary. These will be read from the vocabulary file in order. If the vocabulary file contains fewer words than this number, or if this number is set to 0, will take all words in the vocabulary file.') tf.app.flags.DEFINE_integer('max_keyphrase_num', 10, 'max keyphrase number') tf.app.flags.DEFINE_integer('max_title_len', 20, 'max title length') tf.app.flags.DEFINE_integer('occurrence_window_size', 3, 'window size while calculating occurrence matrix') tf.app.flags.DEFINE_float('lr', 0.15, 'learning rate') tf.app.flags.DEFINE_float('adagrad_init_acc', 0.1, 'initial accumulator value for Adagrad') tf.app.flags.DEFINE_float('rand_unif_init_mag', 0.02, 'magnitude for lstm cells random uniform inititalization') tf.app.flags.DEFINE_float('trunc_norm_init_std', 1e-4, 'std of trunc norm init, used for initializing everything else') tf.app.flags.DEFINE_float('max_grad_norm', 2.0, 'for gradient clipping') tf.app.flags.DEFINE_string('optimizer', 'Adagrad', 'Adagrad or Adam') # Pointer-generator or baseline model tf.app.flags.DEFINE_boolean('pointer_gen', True, 'If True, use pointer-generator model. If False, use baseline model.') # Coverage hyperparameters tf.app.flags.DEFINE_boolean('coverage', True, 'Use coverage mechanism. Note, the experiments reported in the ACL paper train WITHOUT coverage until converged, and then train for a short phase WITH coverage afterwards. i.e. to reproduce the results in the ACL paper, turn this off for most of training then turn on for a short phase at the end.') tf.app.flags.DEFINE_float('cov_loss_wt', 1.0, 'Weight of coverage loss (lambda in the paper). If zero, then no incentive to minimize coverage loss.') # VieZhong Improvement Hyperparameters tf.app.flags.DEFINE_boolean('attention_weighted', False, 'Whether attention mechanism is weighted.') tf.app.flags.DEFINE_boolean('coverage_weighted', False, 'Whether coverage mechanism is weighted.') tf.app.flags.DEFINE_boolean('coverage_weighted_expansion', False, 'Whether coverage mechanism is weighted.') tf.app.flags.DEFINE_boolean('co_occurrence', False, 'Whether to use co_occurrence factor.') tf.app.flags.DEFINE_boolean('co_occurrence_h', True, 'Whether to use co_occurrence_h factor.') tf.app.flags.DEFINE_boolean('co_occurrence_i', False, 'Whether to concat co_occurrence matrix to encoder embeddings.') tf.app.flags.DEFINE_boolean('prev_relation', False, 'Whether to use the previous output word to predict the next output word.') tf.app.flags.DEFINE_boolean('source_siding_bridge', False, 'Whether to use source siding bridging model.') tf.app.flags.DEFINE_boolean('target_siding_bridge', False, 'Whether to use target siding bridging model.') tf.app.flags.DEFINE_boolean('markov_attention', False, 'Whether to use markov attention mechanism.') tf.app.flags.DEFINE_boolean('markov_attention_contribution', False, 'Whether to use markov attention contribution mechanism.') tf.app.flags.DEFINE_boolean('markov_attention_contribution_used_x', False, 'Whether to use markov attention contribution mechanism.') tf.app.flags.DEFINE_boolean('tagger_attention', False, 'Whether to use tagger_attention factor') tf.app.flags.DEFINE_boolean('tagger_encoding', False, 'Whether to use tagger_attention factor') tf.app.flags.DEFINE_boolean('title_engaged', False, 'Whether to use title_engaged factor') tf.app.flags.DEFINE_boolean('title_guided', False, 'Whether to use title_guided factor') tf.app.flags.DEFINE_boolean('top_ten_kept', False, 'Whether to use top_ten_kept factor') tf.app.flags.DEFINE_boolean('generation_only', False, 'Whether in generation mode only') tf.app.flags.DEFINE_boolean('copy_only', False, 'Whether in copy mode only') # Utility flags, for restoring and changing checkpoints tf.app.flags.DEFINE_boolean('convert_to_coverage_model', False, 'Convert a non-coverage model to a coverage model. Turn this on and run in train mode. Your current training model will be copied to a new version (same name with _cov_init appended) that will be ready to run with coverage flag turned on, for the coverage training stage.') tf.app.flags.DEFINE_boolean('restore_best_model', False, 'Restore the best model in the eval/ dir and save it in the train/ dir, ready to be used for further training. Useful for early stopping, or if your training checkpoint has become corrupted with e.g. NaN values.') # Debugging. See https://www.tensorflow.org/programmers_guide/debugger tf.app.flags.DEFINE_boolean('debug', False, "Run in tensorflow's debug mode (watches for NaN/inf values)") def calc_running_avg_loss(loss, running_avg_loss, summary_writer, step, decay=0.99): """Calculate the running average loss via exponential decay. This is used to implement early stopping w.r.t. a more smooth loss curve than the raw loss curve. Args: loss: loss on the most recent eval step running_avg_loss: running_avg_loss so far summary_writer: FileWriter object to write for tensorboard step: training iteration step decay: rate of exponential decay, a float between 0 and 1. Larger is smoother. Returns: running_avg_loss: new running average loss """ if running_avg_loss == 0: # on the first iteration just take the loss running_avg_loss = loss else: running_avg_loss = running_avg_loss * decay + (1 - decay) * loss running_avg_loss = min(running_avg_loss, 12) # clip loss_sum = tf.Summary() tag_name = 'running_avg_loss/decay=%f' % (decay) loss_sum.value.add(tag=tag_name, simple_value=running_avg_loss) summary_writer.add_summary(loss_sum, step) tf.logging.info('running_avg_loss: %f', running_avg_loss) return running_avg_loss def restore_best_model(): """Load bestmodel file from eval directory, add variables for adagrad, and save to train directory""" tf.logging.info("Restoring bestmodel for training...") # Initialize all vars in the model sess = tf.Session(config=util.get_config()) print("Initializing all variables...") sess.run(tf.initialize_all_variables()) # Restore the best model from eval dir saver = tf.train.Saver([v for v in tf.all_variables() if "Adagrad" not in v.name]) print("Restoring all non-adagrad variables from best model in eval dir...") curr_ckpt = util.load_ckpt(saver, sess, "eval") print ("Restored %s." % curr_ckpt) # Save this model to train dir and quit new_model_name = curr_ckpt.split("/")[-1].replace("bestmodel", "model") new_fname = os.path.join(FLAGS.log_root, "train", new_model_name) print ("Saving model to %s..." % (new_fname)) new_saver = tf.train.Saver() # this saver saves all variables that now exist, including Adagrad variables new_saver.save(sess, new_fname) print ("Saved.") exit() def convert_to_coverage_model(): """Load non-coverage checkpoint, add initialized extra variables for coverage, and save as new checkpoint""" tf.logging.info("converting non-coverage model to coverage model..") # initialize an entire coverage model from scratch sess = tf.Session(config=util.get_config()) print("initializing everything...") sess.run(tf.global_variables_initializer()) # load all non-coverage weights from checkpoint saver = tf.train.Saver([v for v in tf.global_variables() if "coverage" not in v.name and "Adagrad" not in v.name]) print("restoring non-coverage variables...") curr_ckpt = util.load_ckpt(saver, sess) print("restored.") # save this model and quit new_fname = curr_ckpt + '_cov_init' print("saving model to %s..." % (new_fname)) new_saver = tf.train.Saver() # this one will save all variables that now exist new_saver.save(sess, new_fname) print("saved.") exit() def setup_training(model, batcher): """Does setup before starting training (run_training)""" train_dir = os.path.join(FLAGS.log_root, "train") if not os.path.exists(train_dir): os.makedirs(train_dir) model.build_graph() # build the graph if FLAGS.convert_to_coverage_model: assert FLAGS.coverage, "To convert your non-coverage model to a coverage model, run with convert_to_coverage_model=True and coverage=True" convert_to_coverage_model() if FLAGS.restore_best_model: restore_best_model() saver = tf.train.Saver(max_to_keep=3, keep_checkpoint_every_n_hours=6) # keep 3 checkpoints at a time sv = tf.train.Supervisor(logdir=train_dir, is_chief=True, saver=saver, summary_op=None, save_summaries_secs=60, # save summaries for tensorboard every 60 secs save_model_secs=60, # checkpoint every 60 secs global_step=model.global_step) summary_writer = sv.summary_writer tf.logging.info("Preparing or waiting for session...") sess_context_manager = sv.prepare_or_wait_for_session(config=util.get_config()) tf.logging.info("Created session.") try: run_training(model, batcher, sess_context_manager, sv, summary_writer) # this is an infinite loop until interrupted except KeyboardInterrupt: tf.logging.info("Caught keyboard interrupt on worker. Stopping supervisor...") sv.stop() def run_training(model, batcher, sess_context_manager, sv, summary_writer): """Repeatedly runs training iterations, logging loss to screen and writing summaries""" tf.logging.info("starting run_training") with sess_context_manager as sess: if FLAGS.debug: # start the tensorflow debugger sess = tf_debug.LocalCLIDebugWrapperSession(sess) sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan) while True: # repeats until interrupted batch = batcher.next_batch() tf.logging.info('running training step...') t0=time.time() results = model.run_train_step(sess, batch) t1=time.time() tf.logging.info('seconds for training step: %.3f', t1-t0) loss = results['loss'] tf.logging.info('loss: %f', loss) # print the loss to screen if not np.isfinite(loss): raise Exception("Loss is not finite. Stopping.") # continue if FLAGS.coverage: coverage_loss = results['coverage_loss'] tf.logging.info("coverage_loss: %f", coverage_loss) # print the coverage loss to screen # get the summaries and iteration number so we can write summaries to tensorboard summaries = results['summaries'] # we will write these summaries to tensorboard using summary_writer train_step = results['global_step'] # we need this to update our running average loss summary_writer.add_summary(summaries, train_step) # write the summaries if train_step % 100 == 0: # flush the summary writer every so often summary_writer.flush() def run_eval(model, batcher, vocab): """Repeatedly runs eval iterations, logging to screen and writing summaries. Saves the model with the best loss seen so far.""" model.build_graph() # build the graph saver = tf.train.Saver(max_to_keep=3) # we will keep 3 best checkpoints at a time sess = tf.Session(config=util.get_config()) eval_dir = os.path.join(FLAGS.log_root, "eval") # make a subdir of the root dir for eval data bestmodel_save_path = os.path.join(eval_dir, 'bestmodel') # this is where checkpoints of best models are saved summary_writer = tf.summary.FileWriter(eval_dir) running_avg_loss = 0 # the eval job keeps a smoother, running average loss to tell it when to implement early stopping best_loss = None # will hold the best loss achieved so far while True: _ = util.load_ckpt(saver, sess) # load a new checkpoint batch = batcher.next_batch() # get the next batch # run eval on the batch t0=time.time() results = model.run_eval_step(sess, batch) t1=time.time() tf.logging.info('seconds for batch: %.2f', t1-t0) # print the loss and coverage loss to screen loss = results['loss'] tf.logging.info('loss: %f', loss) if FLAGS.coverage: coverage_loss = results['coverage_loss'] tf.logging.info("coverage_loss: %f", coverage_loss) # add summaries summaries = results['summaries'] train_step = results['global_step'] summary_writer.add_summary(summaries, train_step) # calculate running avg loss running_avg_loss = calc_running_avg_loss(np.asscalar(loss), running_avg_loss, summary_writer, train_step) # If running_avg_loss is best so far, save this checkpoint (early stopping). # These checkpoints will appear as bestmodel-<iteration_number> in the eval dir if best_loss is None or running_avg_loss < best_loss: tf.logging.info('Found new best model with %.3f running_avg_loss. Saving to %s', running_avg_loss, bestmodel_save_path) saver.save(sess, bestmodel_save_path, global_step=train_step, latest_filename='checkpoint_best') best_loss = running_avg_loss # flush the summary writer every so often if train_step % 100 == 0: summary_writer.flush() def main(unused_argv): if len(unused_argv) != 1: # prints a message if you've entered flags incorrectly raise Exception("Problem with flags: %s" % unused_argv) tf.logging.set_verbosity(tf.logging.INFO) # choose what level of logging you want tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode)) # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name) if not os.path.exists(FLAGS.log_root): if FLAGS.mode=="train": os.makedirs(FLAGS.log_root) else: raise Exception("Logdir %s doesn't exist. Run in train mode to create it." % (FLAGS.log_root)) vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size) # create a vocabulary stop_word_ids = get_stop_word_ids(FLAGS.stop_words_path, vocab) if FLAGS.pointer_gen and (FLAGS.co_occurrence or FLAGS.prev_relation or FLAGS.co_occurrence_h or FLAGS.co_occurrence_i or (FLAGS.coverage and FLAGS.coverage_weighted)) or FLAGS.attention_weighted or FLAGS.markov_attention or FLAGS.markov_attention_contribution else None # If in decode mode, set batch_size = beam_size # Reason: in decode mode, we decode one example at a time. # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses. if FLAGS.mode == 'decode': FLAGS.batch_size = FLAGS.beam_size # If single_pass=True, check we're in decode mode if FLAGS.single_pass and FLAGS.mode!='decode': raise Exception("The single_pass flag should only be True in decode mode") # if FLAGS.prev_relation and not FLAGS.co_occurrence: # raise Exception("The co_occurrence flag should be True when the prev_relation flag is True") # Make a namedtuple hps, containing the values of the hyperparameters that the model needs hparam_list = ['top_ten_kept', 'decode_only', 'generation_only', 'copy_only', 'occurrence_window_size', 'max_title_len', 'title_engaged', 'title_guided', 'ref_dir', 'tagger_encoding', 'tagger_attention', 'source_siding_bridge', 'target_siding_bridge', 'language', 'dropout', 'optimizer', 'mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag', 'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim', 'batch_size', 'beam_depth', 'max_dec_steps', 'max_enc_steps', 'max_keyphrase_num', 'attention_weighted', 'coverage', 'coverage_weighted', 'coverage_weighted_expansion', 'co_occurrence', 'prev_relation', 'co_occurrence_h', 'co_occurrence_i', 'cov_loss_wt', 'pointer_gen', 'cell_type', 'markov_attention', 'markov_attention_contribution', 'markov_attention_contribution_used_x'] hps_dict = {} for key,val in FLAGS.__flags.items(): # for each flag if key in hparam_list: # if it's in the list hps_dict[key] = val # add it to the dict hps = namedtuple("HParams", hps_dict.keys())(**hps_dict) # Create a batcher object that will create minibatches of data batcher = Batcher(FLAGS.data_path, vocab, hps, single_pass=FLAGS.single_pass, stop_words=stop_word_ids) tf.set_random_seed(111) # a seed value for randomness if hps.mode == 'train': print("creating model...") model = SummarizationModel(hps, vocab) setup_training(model, batcher) elif hps.mode == 'eval': model = SummarizationModel(hps, vocab) run_eval(model, batcher, vocab) elif hps.mode == 'decode': decode_model_hps = hps # This will be the hyperparameters for the decoder model decode_model_hps = hps._replace(max_dec_steps=1) # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries model = SummarizationModel(decode_model_hps, vocab) decoder = BeamSearchDecoder(model, batcher, vocab) decoder.decode() # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once) else: raise ValueError("The 'mode' flag must be one of train/eval/decode") if __name__ == '__main__': tf.app.run()
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
pkg/api/testapi/testapi.go
/* Copyright 2014 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package testapi provides a helper for retrieving the KUBE_API_VERSION environment variable. package testapi import ( "fmt" "os" "strings" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" ) // Version returns the API version to test against, as set by the KUBE_API_VERSION env var. func Version() string { version := os.Getenv("KUBE_API_VERSION") if version == "" { version = latest.Version } return version } // Codec returns the codec for the API version to test against, as set by the // KUBE_API_VERSION env var. func Codec() runtime.Codec { interfaces, err := latest.InterfacesFor(Version()) if err != nil { panic(err) } return interfaces.Codec } // Converter returns the api.Scheme for the API version to test against, as set by the // KUBE_API_VERSION env var. func Converter() runtime.ObjectConvertor { interfaces, err := latest.InterfacesFor(Version()) if err != nil { panic(err) } return interfaces.ObjectConvertor } // MetadataAccessor returns the MetadataAccessor for the API version to test against, // as set by the KUBE_API_VERSION env var. func MetadataAccessor() meta.MetadataAccessor { interfaces, err := latest.InterfacesFor(Version()) if err != nil { panic(err) } return interfaces.MetadataAccessor } // SelfLink returns a self link that will appear to be for the version Version(). // 'resource' should be the resource path, e.g. "pods" for the Pod type. 'name' should be // empty for lists. func SelfLink(resource, name string) string { if name == "" { return fmt.Sprintf("/api/%s/%s", Version(), resource) } return fmt.Sprintf("/api/%s/%s/%s", Version(), resource, name) } // Returns the appropriate path for the given prefix (watch, proxy, redirect, etc), resource, namespace and name. // For ex, this is of the form: // /api/v1beta1/watch/pods/pod0 for v1beta1 and // /api/v1beta3/watch/namespaces/foo/pods/pod0 for v1beta3. func ResourcePathWithPrefix(prefix, resource, namespace, name string) string { path := "/api/" + Version() if prefix != "" { path = path + "/" + prefix } if !api.PreV1Beta3(Version()) { if namespace != "" { path = path + "/namespaces/" + namespace } // Resource names in v1beta3 are lower case. resource = strings.ToLower(resource) } if resource != "" { path = path + "/" + resource } if name != "" { path = path + "/" + name } return path } // Returns the appropriate path for the given resource, namespace and name. // For ex, this is of the form: // /api/v1beta1/pods/pod0 for v1beta1 and // /api/v1beta3/namespaces/foo/pods/pod0 for v1beta3. func ResourcePath(resource, namespace, name string) string { return ResourcePathWithPrefix("", resource, namespace, name) } // Returns the appropriate path along with the query params for the given resource, namespace and name. // For ex, this is of the form: // /api/v1beta1/pods/pod0?namespace=foo for v1beta1 and // /api/v1beta3/namespaces/foo/pods/pod0 for v1beta3. func ResourcePathWithQueryParams(resource, namespace, name string) string { path := ResourcePath(resource, namespace, name) // Add namespace as query param for pre v1beta3. if api.PreV1Beta3(Version()) && namespace != "" { path = path + "?namespace=" + namespace } return path }
[ "\"KUBE_API_VERSION\"" ]
[]
[ "KUBE_API_VERSION" ]
[]
["KUBE_API_VERSION"]
go
1
0
commons/facts.go
package commons import ( log "github.com/Sirupsen/logrus" "net" "os/exec" "os" ) //Facts store useful info about the node type Facts struct { Addresses map[string]string HasChanged bool `json:"-"` Container string City string CountryCode string CountryName string RegionCode string RegionName string ZipCode string TimeZone string MetroCode int Latitude float32 Longitude float32 ContinentCode string PublicKey string Hostname string } //Dumps contain the results of tinc dump commands type Dumps struct { Nodes string Edges string Subnets string Connections string Graph string Invitations string } //Get the dump commands output func (d *Dumps) Get(c Config) { if d == nil { d = &Dumps{} } out, err := exec.Command("/usr/sbin/tinc", "-n", c.Vpn.Name, "dump", "nodes").Output() if err != nil { log.Error(err) } d.Nodes = string(out) out, err = exec.Command("/usr/sbin/tinc", "-n", c.Vpn.Name, "dump", "edges").Output() if err != nil { log.Error(err) } d.Edges = string(out) out, err = exec.Command("/usr/sbin/tinc", "-n", c.Vpn.Name, "dump", "subnets"). Output() if err != nil { log.Error(err) } d.Subnets = string(out) out, err = exec.Command("/usr/sbin/tinc", "-n", c.Vpn.Name, "dump", "connections"). Output() if err != nil { log.Error(err) } d.Connections = string(out) out, err = exec.Command("/usr/sbin/tinc", "-n", c.Vpn.Name, "dump", "graph").Output() if err != nil { log.Error(err) } d.Graph = string(out) out, err = exec.Command("/usr/sbin/tinc", "-n", c.Vpn.Name, "dump", "invitations"). Output() if err != nil { log.Error(err) } d.Invitations = string(out) } //AddAddress add ipv4 or ipv6 address to the Fact map //can only add global unicast address func (f *Facts) AddAddress(addr string) { ip := net.ParseIP(addr) if ip == nil { log.Errorf("%s is not a valid Address\n", addr) } if ip.IsGlobalUnicast() { if f.Addresses == nil { f.Addresses = make(map[string]string) } if _, ok := f.Addresses[addr]; !ok { f.Addresses[addr] = "" f.HasChanged = true } } } //GetContainerStatus ... func (f *Facts) GetContainerStatus() { if f.Container != os.Getenv("container") { f.HasChanged = true } f.Container = os.Getenv("container") }
[ "\"container\"", "\"container\"" ]
[]
[ "container" ]
[]
["container"]
go
1
0
update_pic.py
import os import django os.environ.setdefault("DJANGO_SETTINGS_MODULE", "movie.settings") django.setup() base_dir = '../media/movie_cover' files = os.listdir(base_dir) from populate_data.populate_movies import replace_special_char for file in files: os.rename(os.path.join(base_dir, file), os.path.join(base_dir, replace_special_char(file))) # from user.models import Movie # # movies = Movie.objects.all() # for movie in movies: # movie.image_link = replace_special_char(str(movie.image_link)) # # movie.pic.file.name=movie.pic.file.name.replac # # movie.pic=movie.pic.replace(' ','_') # movie.save()
[]
[]
[]
[]
[]
python
0
0
setup.py
#!/usr/bin/env python # # Copyright 2017 Pixar Animation Studios # # Licensed under the Apache License, Version 2.0 (the "Apache License") # with the following modification; you may not use this file except in # compliance with the Apache License and the following modification to it: # Section 6. Trademarks. is deleted and replaced with: # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor # and its affiliates, except as required to comply with Section 4(c) of # the License and to reproduce the content of the NOTICE file. # # You may obtain a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the Apache License with the above modification is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the Apache License for the specific # language governing permissions and limitations under the Apache License. # """ Configuration file for the OpenTimelineIO Python Package. """ import os import sys import unittest from setuptools import setup import setuptools.command.build_py import distutils.version import pip # Make sure the environment contains an up to date enough version of pip. PIP_VERSION = pip.__version__ REQUIRED_PIP_VERSION = "6.0.0" if ( distutils.version.StrictVersion(PIP_VERSION) <= distutils.version.StrictVersion(REQUIRED_PIP_VERSION) ): sys.stderr.write( "Your pip version is: '{}', OpenTimelineIO requires at least " "version '{}'. Please update pip by running:\n" "pip install -U pip\n".format( PIP_VERSION, REQUIRED_PIP_VERSION, ) ) sys.exit(1) # Make sure the environment contains an up to date enough version of setuptools. try: import setuptools.version SETUPTOOLS_VERSION = setuptools.version.__version__ except ImportError: SETUPTOOLS_VERSION = setuptools.__version__ REQUIRED_SETUPTOOLS_VERSION = '20.5.0' if ( distutils.version.StrictVersion(SETUPTOOLS_VERSION) <= distutils.version.StrictVersion(REQUIRED_SETUPTOOLS_VERSION) ): sys.stderr.write( "Your setuptools version is: '{}', OpenTimelineIO requires at least " "version '{}'. Please update setuptools by running:\n" "pip install -U setuptools\n".format( SETUPTOOLS_VERSION, REQUIRED_SETUPTOOLS_VERSION, ) ) sys.exit(1) # check the python version first if ( sys.version_info[0] < 2 or (sys.version_info[0] == 2 and sys.version_info[1] < 7) ): sys.exit( 'OpenTimelineIO requires python2.7 or greater, detected version:' ' {}.{}'.format( sys.version_info[0], sys.version_info[1] ) ) # Metadata that gets stamped into the __init__ files during the build phase. PROJECT_METADATA = { "version": "0.11.0.dev1", "author": 'Pixar Animation Studios', "author_email": '[email protected]', "license": 'Modified Apache 2.0 License', } METADATA_TEMPLATE = """ __version__ = "{version}" __author__ = "{author}" __author_email__ = "{author_email}" __license__ = "{license}" """ def _append_version_info_to_init_scripts(build_lib): """Stamp PROJECT_METADATA into __init__ files.""" for module in [ "opentimelineio", "opentimelineio_contrib", "opentimelineview", ]: target_file = os.path.join(build_lib, module, "__init__.py") source_file = os.path.join( os.path.dirname(__file__), module, "__init__.py" ) # get the base data from the original file with open(source_file, 'r') as fi: src_data = fi.read() # write that + the suffix to the target file with open(target_file, 'w') as fo: fo.write(src_data) fo.write(METADATA_TEMPLATE.format(**PROJECT_METADATA)) class AddMetadataToInits(setuptools.command.build_py.build_py): """Stamps PROJECT_METADATA into __init__ files.""" def run(self): setuptools.command.build_py.build_py.run(self) if not self.dry_run: _append_version_info_to_init_scripts(self.build_lib) def test_otio(): """Discovers and runs tests""" try: # Clear the environment of a preset media linker del os.environ['OTIO_DEFAULT_MEDIA_LINKER'] except KeyError: pass return unittest.TestLoader().discover('tests') # copied from first paragraph of README.md LONG_DESCRIPTION = """OpenTimelineIO is an interchange format and API for editorial cut information. OTIO is not a container format for media, rather it contains information about the order and length of cuts and references to external media. OTIO includes both a file format and an API for manipulating that format. It also includes a plugin architecture for writing adapters to convert from/to existing editorial timeline formats. It also implements a dependency- less library for dealing strictly with time, opentime. You can provide adapters for your video editing tool or pipeline as needed. Each adapter allows for import/export between that proprietary tool and the OpenTimelineIO format.""" setup( name='OpenTimelineIO', description='Editorial interchange format and API', long_description=LONG_DESCRIPTION, url='http://opentimeline.io', project_urls={ 'Source': 'https://github.com/PixarAnimationStudios/OpenTimelineIO', 'Documentation': 'https://opentimelineio.readthedocs.io/', 'Issues': 'https://github.com/PixarAnimationStudios/OpenTimelineIO/issues', }, classifiers=[ 'Development Status :: 4 - Beta', 'Topic :: Multimedia :: Graphics', 'Topic :: Multimedia :: Video', 'Topic :: Multimedia :: Video :: Display', 'Topic :: Multimedia :: Video :: Non-Linear Editor', 'Topic :: Software Development :: Libraries :: Python Modules', 'License :: Other/Proprietary License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Operating System :: OS Independent', 'Natural Language :: English', ], keywords='film tv editing editorial edit non-linear edl time', platforms='any', packages=[ 'opentimelineio', 'opentimelineio.adapters', 'opentimelineio.algorithms', 'opentimelineio.core', 'opentimelineio.schema', 'opentimelineio.schemadef', 'opentimelineio.plugins', 'opentimelineio.console', 'opentimelineio_contrib', 'opentimelineio_contrib.adapters', 'opentimelineio_contrib.adapters.aaf_adapter', 'opentimelineview', ], package_data={ 'opentimelineio': [ 'adapters/builtin_adapters.plugin_manifest.json', ], 'opentimelineio_contrib': [ 'adapters/contrib_adapters.plugin_manifest.json', ] }, install_requires=[ 'pyaaf2==1.2.0' ], entry_points={ 'console_scripts': [ 'otioview = opentimelineview.console:main', 'otiocat = opentimelineio.console.otiocat:main', 'otioconvert = opentimelineio.console.otioconvert:main', 'otiostat = opentimelineio.console.otiostat:main', ], }, extras_require={ 'dev': [ 'flake8>=3.5', 'coverage>=4.5', 'tox>=3.0', 'urllib3>=1.24.3' ], 'view': [ 'PySide2~=5.11' ] }, test_suite='setup.test_otio', tests_require=[ 'mock;python_version<"3.3"', ], # because we need to open() the adapters manifest, we aren't zip-safe zip_safe=False, # Use the code that wires the PROJECT_METADATA into the __init__ files. cmdclass={'build_py': AddMetadataToInits}, # expand the project metadata dictionary to fill in those values **PROJECT_METADATA )
[]
[]
[ "OTIO_DEFAULT_MEDIA_LINKER" ]
[]
["OTIO_DEFAULT_MEDIA_LINKER"]
python
1
0
nodeup/pkg/model/containerd_test.go
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package model import ( "crypto/sha1" "encoding/hex" "io" "net/http" "os" "path" "strings" "testing" "k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/flagbuilder" "k8s.io/kops/pkg/testutils" "k8s.io/kops/upup/pkg/fi" ) func TestContainerdPackageNames(t *testing.T) { for _, containerdVersion := range containerdVersions { if containerdVersion.PlainBinary { continue } sanityCheckContainerdPackageName(t, containerdVersion.Source, containerdVersion.Version, containerdVersion.Name) for k, p := range containerdVersion.ExtraPackages { sanityCheckContainerdPackageName(t, p.Source, p.Version, k) } } } func sanityCheckContainerdPackageName(t *testing.T, u string, version string, name string) { filename := u lastSlash := strings.LastIndex(filename, "/") if lastSlash != -1 { filename = filename[lastSlash+1:] } expectedNames := []string{} // Match known RPM formats for _, v := range []string{"-1.", "-2.", "-3.", "-3.2."} { for _, d := range []string{"el7", "el7.centos", "el7_6"} { for _, a := range []string{"noarch", "x86_64"} { expectedNames = append(expectedNames, name+"-"+version+v+d+"."+a+".rpm") } } } // Match known DEB formats for _, a := range []string{"amd64", "armhf"} { expectedNames = append(expectedNames, name+"_"+version+"_"+a+".deb") } found := false for _, s := range expectedNames { if s == filename { found = true } } if !found { t.Errorf("unexpected name=%q, version=%q for %s", name, version, u) } } func TestContainerdPackageHashes(t *testing.T) { if os.Getenv("VERIFY_HASHES") == "" { t.Skip("VERIFY_HASHES not set, won't download & verify docker hashes") } for _, containerdVersion := range containerdVersions { t.Run(containerdVersion.Source, func(t *testing.T) { verifyContainerdPackageHash(t, containerdVersion.Source, containerdVersion.Hash) for _, p := range containerdVersion.ExtraPackages { verifyContainerdPackageHash(t, p.Source, p.Hash) } }) } } func verifyContainerdPackageHash(t *testing.T, u string, hash string) { resp, err := http.Get(u) if err != nil { t.Errorf("%s: error fetching: %v", u, err) return } defer resp.Body.Close() hasher := sha1.New() if _, err := io.Copy(hasher, resp.Body); err != nil { t.Errorf("%s: error reading: %v", u, err) return } actualHash := hex.EncodeToString(hasher.Sum(nil)) if hash != actualHash { t.Errorf("%s: hash was %q", u, actualHash) return } } func TestContainerdBuilder_Simple(t *testing.T) { runContainerdBuilderTest(t, "simple") } func TestContainerdBuilder_SkipInstall(t *testing.T) { runDockerBuilderTest(t, "skipinstall") } func TestContainerdBuilder_BuildFlags(t *testing.T) { grid := []struct { config kops.ContainerdConfig expected string }{ { kops.ContainerdConfig{}, "", }, { kops.ContainerdConfig{ SkipInstall: false, ConfigOverride: fi.String("test"), Version: fi.String("test"), }, "", }, { kops.ContainerdConfig{ Address: fi.String("/run/containerd/containerd.sock"), }, "--address=/run/containerd/containerd.sock", }, { kops.ContainerdConfig{ LogLevel: fi.String("info"), }, "--log-level=info", }, { kops.ContainerdConfig{ Root: fi.String("/var/lib/containerd"), }, "--root=/var/lib/containerd", }, { kops.ContainerdConfig{ State: fi.String("/run/containerd"), }, "--state=/run/containerd", }, { kops.ContainerdConfig{ SkipInstall: false, Address: fi.String("/run/containerd/containerd.sock"), ConfigOverride: fi.String("test"), LogLevel: fi.String("info"), Root: fi.String("/var/lib/containerd"), State: fi.String("/run/containerd"), Version: fi.String("test"), }, "--address=/run/containerd/containerd.sock --log-level=info --root=/var/lib/containerd --state=/run/containerd", }, { kops.ContainerdConfig{ SkipInstall: true, Address: fi.String("/run/containerd/containerd.sock"), ConfigOverride: fi.String("test"), LogLevel: fi.String("info"), Root: fi.String("/var/lib/containerd"), State: fi.String("/run/containerd"), Version: fi.String("test"), }, "--address=/run/containerd/containerd.sock --log-level=info --root=/var/lib/containerd --state=/run/containerd", }, } for _, g := range grid { actual, err := flagbuilder.BuildFlags(&g.config) if err != nil { t.Errorf("error building flags for %v: %v", g.config, err) continue } if actual != g.expected { t.Errorf("flags did not match. actual=%q expected=%q", actual, g.expected) } } } func runContainerdBuilderTest(t *testing.T, key string) { basedir := path.Join("tests/containerdbuilder/", key) nodeUpModelContext, err := BuildNodeupModelContext(basedir) if err != nil { t.Fatalf("error parsing cluster yaml %q: %v", basedir, err) return } context := &fi.ModelBuilderContext{ Tasks: make(map[string]fi.Task), } builder := ContainerdBuilder{NodeupModelContext: nodeUpModelContext} err = builder.Build(context) if err != nil { t.Fatalf("error from ContainerdBuilder Build: %v", err) return } testutils.ValidateTasks(t, basedir, context) }
[ "\"VERIFY_HASHES\"" ]
[]
[ "VERIFY_HASHES" ]
[]
["VERIFY_HASHES"]
go
1
0
plugins/inputs/eii_msgbus/config.go
/* Copyright (c) 2021 Intel Corporation Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package eii_msgbus import ( json "encoding/json" "fmt" "os" "strconv" "strings" ) // Retuns the config object for a given topic func (pluginConfigObj *eiiMsgbusInputPluginConfig) getPrefixConfigForTopic(tpName string) *topicPrefixConfig { pxfLen := 0 var config *topicPrefixConfig = nil // Match the longest prefix for the topic name for tpPrefix, tempConfigObj := range pluginConfigObj.mapOfPrefixToConfig { if strings.HasPrefix(tpName, tpPrefix) { if len(tpPrefix) > pxfLen { pxfLen = len(tpPrefix) config = tempConfigObj } } } return config } func (pluginConfigObj *eiiMsgbusInputPluginConfig) getDefaultPrefix(tpName string) (*topicPrefixConfig, error) { tempArray := []string{"", ""} tempArray[0] = tpName tempArray[1] = tpName tempObj := getTopicPrefixConfig(tempArray) if tempObj == nil { return nil, fmt.Errorf("wrong prefix configuration:%v", tempArray) } return tempObj, nil } // Converts the plugin configuration into plugin configuration object of type pluginConfigObj. func (pluginConfigObj *eiiMsgbusInputPluginConfig) initConfig(emb *EiiMsgbus) error { appConfig, err := emb.confMgr.GetAppConfig() if err != nil { return fmt.Errorf("Error in getting config: %v", err) } var instanceConfig map[string]interface{} var found bool if instanceConfig, found = appConfig[emb.Instance_name].(map[string]interface{}); found == false { return fmt.Errorf("Could not get the configuration for %v: %v", emb.Instance_name, err) } pluginConfigObj.mapOfPrefixToConfig = make(map[string]*topicPrefixConfig) for _, tpPfxConfLine := range instanceConfig["topics_info"].([]interface{}) { // Parse each prefix config line and create an prefix config object tempArray := strings.Split(tpPfxConfLine.(string), ":") tempObj := getTopicPrefixConfig(tempArray) if tempObj == nil { return fmt.Errorf("wrong prefix configuration:%v", tpPfxConfLine) } pluginConfigObj.mapOfPrefixToConfig[tempObj.tpPrefix] = tempObj } numInt, err := instanceConfig["queue_len"].(json.Number).Int64() if err != nil { return fmt.Errorf("json number conversion failed %v: %v", instanceConfig["queue_len"], err) } pluginConfigObj.globalQueueLen = int(numInt) numInt, err = instanceConfig["num_worker"].(json.Number).Int64() if err != nil { return fmt.Errorf("json number conversion failed %v: %v", instanceConfig["num_worker"], err) } pluginConfigObj.globalPoolSize = int(numInt) value := instanceConfig["profiling"].(string) pluginConfigObj.profiling, err = strconv.ParseBool(value) if err != nil { return fmt.Errorf("Parsing profiling mode failed: %v", err) } value = os.Getenv("DEV_MODE") pluginConfigObj.devmode, err = strconv.ParseBool(value) if err != nil { return fmt.Errorf("Parsing dev mode failed: %v", err) } pluginConfigObj.instanceName = emb.Instance_name return nil } // Converts the topic-prefix specific configuration into an object of type topicPrefixConfig. func getTopicPrefixConfig(tempArray []string) *topicPrefixConfig { // The topic prefix configuration can be in three different forms // Option 1. ${eii-msg-topic-prefix}:${measurement-name}:${queue_len}:${num_of_workers_in_pool} // Option 2. ${eii-msg-topic-name}:${measurement-name}:: // Option 3. ${eii-msg-topic-name}:${measurement-name} // All three parsing scenarios has been addressed in this function. if len(tempArray) < 2 || len(tempArray) > 4 { return nil } tpPrefix := strings.TrimSpace(tempArray[0]) mName := strings.TrimSpace(tempArray[1]) if len(tpPrefix) <= 0 || len(mName) <= 0 { return nil } obj := new(topicPrefixConfig) obj.isSyncProc = false obj.tpPrefix = tpPrefix obj.mName = mName if len(tempArray) == 2 { // Option 3. (*obj).isSyncProc = true return obj } queueLenInStr := strings.TrimSpace(tempArray[2]) poolSizeInStr := strings.TrimSpace(tempArray[3]) if len(queueLenInStr) > 0 && len(poolSizeInStr) > 0 { // Option 1. if queueLen, err := strconv.Atoi(queueLenInStr); err == nil { obj.queueLen = queueLen } else { return nil } if poolSize, err := strconv.Atoi(poolSizeInStr); err == nil { obj.poolSize = poolSize } else { return nil } } // Option 2. in case of above if block skipped return obj }
[ "\"DEV_MODE\"" ]
[]
[ "DEV_MODE" ]
[]
["DEV_MODE"]
go
1
0
test/TEX/multi-run.py
#!/usr/bin/env python # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" """ Validate that both .tex and .ltx files can handle a LaTeX-style bibliography (by calling $BIBTEX to generate a .bbl file) and correctly re-run to resolve undefined references. Also verifies that package warnings are caught and re-run as needed. """ import TestSCons test = TestSCons.TestSCons() tex = test.where_is('tex') latex = test.where_is('latex') if not latex: test.skip_test("Could not find latex; skipping test(s).\n") if not tex and not latex: test.skip_test("Could not find tex or latex; skipping test(s).\n") test.subdir('work1', 'work2', 'work3', 'work4') input_file = r""" \documentclass{article} \begin{document} As stated in \cite{X}, this is a bug-a-boo. \bibliography{fooref} \bibliographystyle{plain} \end{document} """ input_file2 = r""" \documentclass{article} \begin{document} Hello world. % \bibliography{fooref} % \bibliographystyle{plain} \end{document} """ input_file3 = r""" \documentclass{article} \usepackage{longtable} \begin{document} As stated in the last paper, this is a bug-a-boo. here is some more junk and another table here is some more junk and another table \begin{longtable}[l]{rlll} Isotope &\multicolumn{1}{c}{Abar} &Name\\ \\ 1001 &1.0078 &Proton &$p$\\ 1002 &2.0141 &Deuterium &$d$\\ 1003 &3.0170 &Tritium &$t$\\ 2003 &3.0160 &Helium 3 &He$^3$\\ 2004 &4.0026 &Helium 4 &He$^{4}$\\ \end{longtable} and a closing comment These parameters and arrays are filled in when the parameter \textbf{iftnrates} is set to 1: \begin{longtable}[l]{ll} \\ \textbf{nxxxx} &Total number of particles made by xxxx reaction\\ \textbf{pxxxx} &Total number of particles made by xxxx reaction\\ \textbf{nxxxxx} &Total number of particles made by xxxxx reaction\\ \textbf{nxxxx} &Total number of particles made by xxxx reaction\\ \textbf{pxxxx} &Total number of particles made by xxxx reaction\\ \textbf{nxxxx} &Total number of particles made by xxxx reaction\\ \textbf{pxxxx} &Total number of particles made by xxxx reaction\\ \textbf{nxxxxx} &Total number of particles made by xxxxx reaction\\ \textbf{nxxxx} &Total number of particles made by xxxx reaction\\ \textbf{pxxxx} &Total number of particles made by xxxx reaction\\ \\ \textbf{rnxxxx} &Regional total of particles made by xxxx reaction\\ \textbf{rpxxxx} &Regional total of particles made by xxxx reaction\\ \textbf{rnxxxxx} &Regional total of particles made by xxxxx reaction\\ \textbf{rnxxxx} &Regional total of particles made by xxxx reaction\\ \textbf{rpxxxx} &Regional total of particles made by xxxx reaction\\ \textbf{rnxxxx} &Regional total of particles made by xxxx reaction\\ \textbf{rpxxxx} &Regional total of particles made by xxxx reaction\\ \textbf{rnxxxxx} &Regional total of particles made by xxxxx reaction\\ \textbf{rnxxxx} &Regional total of particles made by xxxx reaction\\ \textbf{rpxxxx} &Regional total of particles made by xxxx reaction\\ \\ \textbf{reactot}(r) &Total number of reactions for reaction r\\ \textbf{reacreg}(r,ir) &Total number of reactions for reaction r in region ir\\ \end{longtable} \end{document} """ bibfile = r""" @Article{X, author = "Mr. X", title = "A determination of bug-a-boo-ness", journal = "Journal of B.a.B.", year = 1920, volume = 62, pages = 291 } """ if tex: test.write(['work1', 'SConstruct'], """\ import os env = Environment(tools = ['pdftex', 'dvipdf', 'tex', 'latex']) env.DVI( "foo.tex" ) env.PDF( "foo.tex" ) """) test.write(['work1', 'foo.tex'], input_file) test.write(['work1', 'fooref.bib'], bibfile) test.run(chdir = 'work1', arguments = '.') test.must_exist(['work1', 'foo.bbl']) foo_log = test.read(['work1', 'foo.log'], mode='r') test.must_not_contain_any_line(foo_log, ['undefined references'], 'foo.log') test.write(['work3', 'SConstruct'], """\ import os env = Environment(tools = ['tex', 'latex'], ENV = {'PATH' : os.environ['PATH']}) env.DVI( "foo3.tex" ) """) test.write(['work3', 'foo3.tex'], input_file3) test.run(chdir = 'work3', arguments = '.') foo_log = test.read(['work3', 'foo3.log'], mode='r') test.must_not_contain_any_line(foo_log, ['Rerun LaTeX'], 'foo3.log') if latex: test.write(['work2', 'SConstruct'], """\ import os env = Environment(tools = ['dvi', 'pdf', 'pdftex', 'dvipdf', 'pdflatex', 'tex', 'latex'], ENV = {'PATH' : os.environ['PATH']}) env.DVI( "foo.ltx" ) env.PDF( "foo.ltx" ) """) test.write(['work2', 'foo.ltx'], input_file) test.write(['work2', 'fooref.bib'], bibfile) test.run(chdir = 'work2', arguments = '.') test.must_exist(['work2', 'foo.bbl']) foo_log = test.read(['work2', 'foo.log'], mode='r') test.must_not_contain_any_line(foo_log, ['undefined references'], 'foo.log') test.write(['work3', 'SConstruct'], """\ import os env = Environment(tools = ['pdftex', 'dvipdf', 'tex', 'latex'], ENV = {'PATH' : os.environ['PATH']}) env.DVI( "foo3.tex" ) env.PDF( "foo3.tex" ) """) test.write(['work3', 'foo3.tex'], input_file3) test.run(chdir = 'work3', arguments = '.') foo_log = test.read(['work3', 'foo3.log'], mode='r') test.must_not_contain_any_line(foo_log, ['Rerun LaTeX'], 'foo3.log') test.write(['work4', 'SConstruct'], """\ import os env = Environment(tools = ['tex', 'latex'], ENV = {'PATH' : os.environ['PATH']}) env.DVI( "foo.ltx" ) """) test.write(['work4', 'foo.ltx'], input_file2) test.run(chdir = 'work4', arguments = '.') test.up_to_date(chdir = 'work4', arguments = '.') test.pass_test() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
[]
[]
[ "PATH" ]
[]
["PATH"]
python
1
0
SimpleCV/Features/HaarLikeFeature.py
from SimpleCV.base import * from SimpleCV.ImageClass import Image class HaarLikeFeature(): """ Create a single Haar feature and optionally set the regions that define the Haar feature and its name. The formal of the feature is The format is [[[TL],[BR],SIGN],[[TL],[BR],SIGN].....] Where TR and BL are the unit coorinates for the top right and bottom left coodinates. For example [[[0,0],[0.5,0.5],1],[[0.5.0],[1.0,1.0],-1]] Takes the right side of the image and subtracts from the left hand side of the image. """ mName = None mRegions = None def __init__(self, name=None,regions=None): self.mName = name; self.mRegions = regions; def setRegions(self,regions): """ Set the list of regions. The regions are square coordinates on a unit sized image followed by the sign of a region. The format is [[[TL],[BR],SIGN],[[TL],[BR],SIGN].....] Where TR and BL are the unit coorinates for the top right and bottom left coodinates. For example [[[0,0],[0.5,0.5],1],[[0.5.0],[1.0,1.0],-1]] Takes the right side of the image and subtracts from the left hand side of the image. """ self.mRegions = regions def setName(self,name): """ Set the name of this feature, the name must be unique. """ self.mName = name def apply(self, intImg ): """ This method takes in an integral image and applies the haar-cascade to the image, and returns the result. """ w = intImg.shape[0]-1 h = intImg.shape[1]-1 accumulator = 0 for i in range(len(self.mRegions)): # using the integral image # A = Lower Right Hand Corner # B = upper right hand corner # C = lower left hand corner # D = upper left hand corner # sum = A - B - C + D # regions are in # (p,q,r,s,t) format p = self.mRegions[i][0] # p = left (all are unit length) q = self.mRegions[i][1] # q = top r = self.mRegions[i][2] # r = right s = self.mRegions[i][3] # s = bottom sign = self.mRegions[i][4] # t = sign xA = int(w*r) yA = int(h*s) xB = int(w*r) yB = int(h*q) xC = int(w*p) yC = int(h*s) xD = int(w*p) yD = int(h*q) accumulator += sign*(intImg[xA,yA]-intImg[xB,yB]-intImg[xC,yC]+intImg[xD,yD]) return accumulator def writeToFile(self,file): """ Write the Haar cascade to a human readable file. file is an open file pointer. """ file.write(self.mName) file.write(" "+str(len(self.mRegions))+"\n") for i in range(len(self.mRegions)): temp = self.mRegions[i] for j in range(len(temp)): file.write(str(temp[j])+' ') file.write('\n') file.write('\n')
[]
[]
[]
[]
[]
python
null
null
null
trypy.py
from flask import Flask, render_template, request, redirect, url_for import base64 import re import numpy as np from io import BytesIO from tkinter import * from PIL import Image, ImageTk import time import threading import cv2 import os from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import tensorflow as tf from keras import backend as k from keras.models import load_model app = Flask(__name__) img_w = 75 img_h = 75 weight_dir = os.path.join(os.getcwd(), 'weights/') model_name = 'face_model.h5' model_dir = os.path.join(os.getcwd(), 'models/') predictedAge='NA' graph = tf.get_default_graph() @app.route('/result' , methods=['GET']) def result(): global predictedAge print(predictedAge) return render_template('result.html',predictedAge=predictedAge) @app.route('/', methods=['POST','GET']) def index(): print(request.method) if request.method == 'POST': with graph.as_default(): global predictedAge print("INSIDE POST") print(request.form['number']) image_b64 = request.form['image'] print(e) image_b64 = image_b64.split(',')[1] print(image_b64[:100]) sbuf = BytesIO() sbuf.write(base64.b64decode(image_b64)) pimg = Image.open(sbuf) image = cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR) print('image produced') print(image.shape) #cv2.imread('captured image', (image)) #cv2.waitKey(0) global weight_dir, img_w, img_h img = image gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) face_cascade = cv2.CascadeClassifier('C:/Python35/Scripts/env/haarcascade_frontalface_default.xml') faces = face_cascade.detectMultiScale(gray, 1.3, 5) print('displaying image') roi = None for (x,y,w,h) in faces: roi = gray[y:y+h, x:x+w] try: print('using face only') gray_img = cv2.resize(roi, (img_w,img_h)) gray_img = np.expand_dims(gray_img, axis=2) gray_img = np.array([gray_img])/255.0 #cv2.imshow('face', (gray_img)) except: print('Unable to find face') print('using whole picture') gray = cv2.resize(gray, (img_w,img_h)) gray = np.expand_dims(gray, axis=2) gray = np.array([gray])/255.0 print(gray.shape) #cv2.imshow('face', (gray)) predicted_age = 0 sum=0.0 counter=0.0 try: for wt in os.listdir(weight_dir): counter+=1.0 model.load_weights(weight_dir+wt) print("wt: ",wt) try: ynew = model.predict_classes(gray_img) except: ynew = model.predict_classes(gray) sum+=ynew[0] except Exception as e: print('line 217 ',e) predicted_age = sum/counter predictedAge = predicted_age # predictedAge = 22 print('predict_age=', predictedAge) return redirect(url_for('result')) else: return render_template('index.html') if __name__ =="__main__": os.environ['TF_CPP_MIN_LOG_LEVEL']='3' model = load_model(model_dir+model_name) print('model prepared') app.run(debug=True,port=10080)
[]
[]
[ "TF_CPP_MIN_LOG_LEVEL" ]
[]
["TF_CPP_MIN_LOG_LEVEL"]
python
1
0
src/server/oasisapi/wsgi.py
""" WSGI config for oasisapi project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "src.server.oasisapi.settings") application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
conanfile.py
# -*- coding: utf-8 -*- # # Copyright 2012-2019 CNRS-UM LIRMM, CNRS-AIST JRL # from conans import ConanFile, CMake, tools from conans.tools import os_info, SystemPackageTool import os import shutil import subprocess import sys def get_python_version(cmd = 'python'): # Get the version of the `cmd` command assumed to be a python interpreter try: return '.'.join(subprocess.check_output('{} -V'.format(cmd).split(), stderr = subprocess.STDOUT).strip().split()[1].decode().split('.')[0:2]) except OSError: return None def get_default_options(): if os_info.is_windows: return { "python2_version": None, "python3_version": get_python_version() } else: return { "python2_version": get_python_version('python2'), "python3_version": get_python_version('python3') } def enable_python2_and_python3(options): return options['python2_version'] is not None and options['python3_version'] is not None and not os_info.is_windows class Eigen3ToPythonConan(ConanFile): name = "Eigen3ToPython" version = "1.0.2" description = "Python bindings for the Eigen library" # topics can get used for searches, GitHub topics, Bintray tags etc. Add here keywords about the library topics = ("eigen", "python") url = "https://github.com/jrl-umi3218/Eigen3ToPython" homepage = "https://github.com/jrl-umi3218/Eigen3ToPython" author = "Pierre Gergondet <[email protected]>" license = "BSD-2-Clause" # Indicates license type of the packaged library; please use SPDX Identifiers https://spdx.org/licenses/ exports = ["LICENSE"] # Packages the license for the conanfile.py exports_sources = ["CMakeLists.txt", "requirements.txt", "setup.in.py", "conan/CMakeLists.txt", "eigen/*", "include/*", "tests/*", "utils/*"] generators = "cmake" options = { "python2_version": [None, "2.7"], "python3_version": [None, "3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9"] } default_options = get_default_options() settings = "os", "arch", "compiler" requires = ( "eigen/3.3.4@conan/stable" ) def system_requirements(self): if os_info.is_linux: installer = SystemPackageTool() packages = '' if self.default_options['python2_version'] is not None: packages = 'cython python-coverage python-nose python-numpy ' if self.default_options['python3_version'] is not None: packages += 'cython3 python3-coverage python3-nose python3-numpy' if len(packages): installer.install(packages) else: if enable_python2_and_python3(self.default_options): subprocess.run("pip2 install --user Cython>=0.2 coverage nose numpy>=1.8.2".split()) subprocess.run("pip3 install --user Cython>=0.2 coverage nose numpy>=1.8.2".split()) else: subprocess.run("pip install --user Cython>=0.2 coverage nose numpy>=1.8.2".split()) def source(self): # Wrap the original CMake file to call conan_basic_setup shutil.move("CMakeLists.txt", "CMakeListsOriginal.txt") shutil.move(os.path.join("conan", "CMakeLists.txt"), "CMakeLists.txt") def _extra_path(self): return os.path.join(self.package_folder, 'bin') def _extra_python_path(self): return os.path.join(self.package_folder, 'lib', 'python{}'.format(get_python_version()), 'site-packages') def _configure_cmake(self): os.environ['PATH'] = self._extra_path() + os.pathsep + os.environ.get('PATH', '') os.environ['PYTHONPATH'] = self._extra_python_path() + os.pathsep + os.environ.get('PYTHONPATH', '') cmake = CMake(self) cmake.definitions['DISABLE_TESTS'] = True cmake.definitions['CMAKE_BUILD_TYPE'] = self.settings.get_safe("build_type", "Release") cmake.definitions['PIP_INSTALL_PREFIX'] = self.package_folder cmake.definitions['PYTHON_BINDING_BUILD_PYTHON2_AND_PYTHON3'] = enable_python2_and_python3(self.options) cmake.configure() return cmake def build(self): cmake = self._configure_cmake() cmake.build() def package(self): cmake = self._configure_cmake() cmake.install() def deploy(self): self.copy("*") self.copy_deps("*") def package_info(self): self.env_info.PATH.append(self._extra_path()) self.env_info.PYTHONPATH.append(self._extra_python_path()) def package_id(self): del self.info.settings.compiler.runtime if self.options.python2_version == "None": self.info.options.python2_version = "2.7" if self.options.python3_version == "None": for v3 in ["3.9", "3.8", "3.7", "3.6", "3.5", "3.4", "3.3"]: compatible_pkg = self.info.clone() compatible_pkg.options.python3_version = v3 self.compatible_packages.append(compatible_pkg)
[]
[]
[ "PATH", "PYTHONPATH" ]
[]
["PATH", "PYTHONPATH"]
python
2
0
owner/manager.go
// Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package owner import ( "fmt" "math" "os" "strconv" "sync/atomic" "time" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/clientv3/concurrency" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" "github.com/coreos/etcd/mvcc/mvccpb" "github.com/juju/errors" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/terror" "github.com/pingcap/tidb/util" log "github.com/sirupsen/logrus" "golang.org/x/net/context" "google.golang.org/grpc" ) const ( newSessionRetryInterval = 200 * time.Millisecond logIntervalCnt = int(3 * time.Second / newSessionRetryInterval) ) // Manager is used to campaign the owner and manage the owner information. type Manager interface { // ID returns the ID of the manager. ID() string // IsOwner returns whether the ownerManager is the owner. IsOwner() bool // SetOwner sets whether the ownerManager is the owner. SetOwner(isOwner bool) // GetOwnerID gets the owner ID. GetOwnerID(ctx context.Context) (string, error) // CampaignOwner campaigns the owner. CampaignOwner(ctx context.Context) error // Cancel cancels this etcd ownerManager campaign. Cancel() } const ( // NewSessionDefaultRetryCnt is the default retry times when create new session. NewSessionDefaultRetryCnt = 3 // NewSessionRetryUnlimited is the unlimited retry times when create new session. NewSessionRetryUnlimited = math.MaxInt64 ) // ownerManager represents the structure which is used for electing owner. type ownerManager struct { owner int32 id string // id is the ID of the manager. key string prompt string etcdCli *clientv3.Client cancel context.CancelFunc } // NewOwnerManager creates a new Manager. func NewOwnerManager(etcdCli *clientv3.Client, prompt, id, key string, cancel context.CancelFunc) Manager { return &ownerManager{ etcdCli: etcdCli, id: id, key: key, prompt: prompt, cancel: cancel, } } // ID implements Manager.ID interface. func (m *ownerManager) ID() string { return m.id } // IsOwner implements Manager.IsOwner interface. func (m *ownerManager) IsOwner() bool { return atomic.LoadInt32(&m.owner) == 1 } // SetOwner implements Manager.SetOwner interface. func (m *ownerManager) SetOwner(isOwner bool) { if isOwner { atomic.StoreInt32(&m.owner, 1) } else { atomic.StoreInt32(&m.owner, 0) } } // Cancel implements Manager.Cancel interface. func (m *ownerManager) Cancel() { m.cancel() } // ManagerSessionTTL is the etcd session's TTL in seconds. It's exported for testing. var ManagerSessionTTL = 60 // setManagerSessionTTL sets the ManagerSessionTTL value, it's used for testing. func setManagerSessionTTL() error { ttlStr := os.Getenv("tidb_manager_ttl") if len(ttlStr) == 0 { return nil } ttl, err := strconv.Atoi(ttlStr) if err != nil { return errors.Trace(err) } ManagerSessionTTL = ttl return nil } // NewSession creates a new etcd session. func NewSession(ctx context.Context, logPrefix string, etcdCli *clientv3.Client, retryCnt, ttl int) (*concurrency.Session, error) { var err error var etcdSession *concurrency.Session failedCnt := 0 for i := 0; i < retryCnt; i++ { if err = contextDone(ctx, err); err != nil { return etcdSession, errors.Trace(err) } // gofail: var closeClient bool // if closeClient { // etcdCli.Close() // } // gofail: var closeGrpc bool // if closeGrpc { // etcdCli.ActiveConnection().Close() // } startTime := time.Now() etcdSession, err = concurrency.NewSession(etcdCli, concurrency.WithTTL(ttl), concurrency.WithContext(ctx)) metrics.NewSessionHistogram.WithLabelValues(logPrefix, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds()) if err == nil { break } if failedCnt%logIntervalCnt == 0 { log.Warnf("%s failed to new session to etcd, err %v", logPrefix, err) } time.Sleep(newSessionRetryInterval) failedCnt++ } return etcdSession, errors.Trace(err) } // CampaignOwner implements Manager.CampaignOwner interface. func (m *ownerManager) CampaignOwner(ctx context.Context) error { logPrefix := fmt.Sprintf("[%s] %s", m.prompt, m.key) session, err := NewSession(ctx, logPrefix, m.etcdCli, NewSessionDefaultRetryCnt, ManagerSessionTTL) if err != nil { return errors.Trace(err) } cancelCtx, _ := context.WithCancel(ctx) go m.campaignLoop(cancelCtx, session) return nil } func (m *ownerManager) campaignLoop(ctx context.Context, etcdSession *concurrency.Session) { defer func() { if r := recover(); r != nil { buf := util.GetStack() log.Errorf("[%s] recover panic:%v, %s", m.prompt, r, buf) metrics.PanicCounter.WithLabelValues(metrics.LabelDDLOwner).Inc() } }() logPrefix := fmt.Sprintf("[%s] %s ownerManager %s", m.prompt, m.key, m.id) var err error for { if err != nil { metrics.CampaignOwnerCounter.WithLabelValues(m.prompt, err.Error()).Inc() } select { case <-etcdSession.Done(): log.Infof("%s etcd session is done, creates a new one", logPrefix) leaseID := etcdSession.Lease() etcdSession, err = NewSession(ctx, logPrefix, m.etcdCli, NewSessionRetryUnlimited, ManagerSessionTTL) if err != nil { log.Infof("%s break campaign loop, NewSession err %v", logPrefix, err) m.revokeSession(logPrefix, leaseID) return } case <-ctx.Done(): m.revokeSession(logPrefix, etcdSession.Lease()) return default: } // If the etcd server turns clocks forward,the following case may occur. // The etcd server deletes this session's lease ID, but etcd session doesn't find it. // In this time if we do the campaign operation, the etcd server will return ErrLeaseNotFound. if terror.ErrorEqual(err, rpctypes.ErrLeaseNotFound) { if etcdSession != nil { err = etcdSession.Close() log.Infof("%s etcd session encounters the error of lease not found, closes it err %s", logPrefix, err) } continue } elec := concurrency.NewElection(etcdSession, m.key) err = elec.Campaign(ctx, m.id) if err != nil { log.Infof("%s failed to campaign, err %v", logPrefix, err) continue } ownerKey, err := GetOwnerInfo(ctx, elec, logPrefix, m.id) if err != nil { continue } m.SetOwner(true) m.watchOwner(ctx, etcdSession, ownerKey) m.SetOwner(false) metrics.CampaignOwnerCounter.WithLabelValues(m.prompt, metrics.NoLongerOwner).Inc() log.Warnf("%s isn't the owner", logPrefix) } } func (m *ownerManager) revokeSession(logPrefix string, leaseID clientv3.LeaseID) { // Revoke the session lease. // If revoke takes longer than the ttl, lease is expired anyway. cancelCtx, cancel := context.WithTimeout(context.Background(), time.Duration(ManagerSessionTTL)*time.Second) _, err := m.etcdCli.Revoke(cancelCtx, leaseID) cancel() log.Infof("%s break campaign loop, revoke err %v", logPrefix, err) } // GetOwnerID implements Manager.GetOwnerID interface. func (m *ownerManager) GetOwnerID(ctx context.Context) (string, error) { resp, err := m.etcdCli.Get(ctx, m.key, clientv3.WithFirstCreate()...) if err != nil { return "", errors.Trace(err) } if len(resp.Kvs) == 0 { return "", concurrency.ErrElectionNoLeader } return string(resp.Kvs[0].Value), nil } // GetOwnerInfo gets the owner information. func GetOwnerInfo(ctx context.Context, elec *concurrency.Election, logPrefix, id string) (string, error) { resp, err := elec.Leader(ctx) if err != nil { // If no leader elected currently, it returns ErrElectionNoLeader. log.Infof("%s failed to get leader, err %v", logPrefix, err) return "", errors.Trace(err) } ownerID := string(resp.Kvs[0].Value) log.Infof("%s, owner is %v", logPrefix, ownerID) if ownerID != id { log.Warnf("%s isn't the owner", logPrefix) return "", errors.New("ownerInfoNotMatch") } return string(resp.Kvs[0].Key), nil } func (m *ownerManager) watchOwner(ctx context.Context, etcdSession *concurrency.Session, key string) { logPrefix := fmt.Sprintf("[%s] ownerManager %s watch owner key %v", m.prompt, m.id, key) log.Debugf("%s", logPrefix) watchCh := m.etcdCli.Watch(ctx, key) for { select { case resp, ok := <-watchCh: if !ok { metrics.WatchOwnerCounter.WithLabelValues(m.prompt, metrics.WatcherClosed).Inc() log.Infof("%s watcher is closed, no owner", logPrefix) return } if resp.Canceled { metrics.WatchOwnerCounter.WithLabelValues(m.prompt, metrics.Cancelled).Inc() log.Infof("%s canceled, no owner", logPrefix) return } for _, ev := range resp.Events { if ev.Type == mvccpb.DELETE { metrics.WatchOwnerCounter.WithLabelValues(m.prompt, metrics.Deleted).Inc() log.Infof("%s failed, owner is deleted", logPrefix) return } } case <-etcdSession.Done(): metrics.WatchOwnerCounter.WithLabelValues(m.prompt, metrics.SessionDone).Inc() return case <-ctx.Done(): metrics.WatchOwnerCounter.WithLabelValues(m.prompt, metrics.CtxDone).Inc() return } } } func init() { err := setManagerSessionTTL() if err != nil { log.Warnf("set manager session TTL failed %v", err) } } func contextDone(ctx context.Context, err error) error { select { case <-ctx.Done(): return errors.Trace(ctx.Err()) default: } // Sometime the ctx isn't closed, but the etcd client is closed, // we need to treat it as if context is done. // TODO: Make sure ctx is closed with etcd client. if terror.ErrorEqual(err, context.Canceled) || terror.ErrorEqual(err, context.DeadlineExceeded) || terror.ErrorEqual(err, grpc.ErrClientConnClosing) { return errors.Trace(err) } return nil }
[ "\"tidb_manager_ttl\"" ]
[]
[ "tidb_manager_ttl" ]
[]
["tidb_manager_ttl"]
go
1
0
bot.py
''' MIT License Copyright (c) 2017 Kyb3r Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' GUILD_ID = 0 # your guild id here import discord from discord.ext import commands from urllib.parse import urlparse import asyncio import textwrap import datetime import time import json import sys import os import re import string import traceback import io import inspect from contextlib import redirect_stdout class Modmail(commands.Bot): def __init__(self): super().__init__(command_prefix=self.get_pre) self.uptime = datetime.datetime.utcnow() self._add_commands() def _add_commands(self): '''Adds commands automatically''' for attr in dir(self): cmd = getattr(self, attr) if isinstance(cmd, commands.Command): self.add_command(cmd) @property def token(self): '''Returns your token wherever it is''' try: with open('config.json') as f: config = json.load(f) if config.get('TOKEN') == "your_token_here": if not os.environ.get('TOKEN'): self.run_wizard() else: token = config.get('TOKEN').strip('\"') except FileNotFoundError: token = None return os.environ.get('TOKEN') or token @staticmethod async def get_pre(bot, message): '''Returns the prefix.''' with open('config.json') as f: prefix = json.load(f).get('PREFIX') return os.environ.get('PREFIX') or prefix or 'm.' @staticmethod def run_wizard(): '''Wizard for first start''' print('------------------------------------------') token = input('Enter your token:\n> ') print('------------------------------------------') data = { "TOKEN" : token, } with open('config.json','w') as f: f.write(json.dumps(data, indent=4)) print('------------------------------------------') print('Restarting...') print('------------------------------------------') os.execv(sys.executable, ['python'] + sys.argv) @classmethod def init(cls, token=None): '''Starts the actual bot''' bot = cls() if token: to_use = token.strip('"') else: to_use = bot.token.strip('"') try: bot.run(to_use, activity=discord.Game(os.getenv('STATUS')), reconnect=True) except Exception as e: raise e async def on_connect(self): print('---------------') print('Modmail connected!') status = os.getenv('STATUS') if status: print(f'Setting Status to {status}') else: print('No status set.') @property def guild_id(self): from_heroku = os.environ.get('GUILD_ID') return int(from_heroku) if from_heroku else GUILD_ID async def on_ready(self): '''Bot startup, sets uptime.''' self.guild = discord.utils.get(self.guilds, id=self.guild_id) print(textwrap.dedent(f''' --------------- Client is ready! --------------- Author: Kyb3r#7220 --------------- Logged in as: {self.user} User ID: {self.user.id} --------------- ''')) def overwrites(self, ctx, modrole=None): '''Permision overwrites for the guild.''' overwrites = { ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False) } if modrole: overwrites[modrole] = discord.PermissionOverwrite(read_messages=True) else: for role in self.guess_modroles(ctx): overwrites[role] = discord.PermissionOverwrite(read_messages=True) return overwrites def help_embed(self, prefix): em = discord.Embed(color=0x00FFFF) em.set_author(name='Mod Mail - Help', icon_url=self.user.avatar_url) em.description = 'This bot is a python implementation of a stateless "Mod Mail" bot. ' \ 'Made by Kyb3r and improved by the suggestions of others. This bot ' \ 'saves no data and utilises channel topics for storage and syncing.' cmds = f'`{prefix}setup [modrole] <- (optional)` - Command that sets up the bot.\n' \ f'`{prefix}reply <message...>` - Sends a message to the current thread\'s recipient.\n' \ f'`{prefix}close` - Closes the current thread and deletes the channel.\n' \ f'`{prefix}disable` - Closes all threads and disables modmail for the server.\n' \ f'`{prefix}customstatus` - Sets the Bot status to whatever you want.' \ f'`{prefix}block` - Blocks a user from using modmail!' \ f'`{prefix}unblock` - Unblocks a user from using modmail!' warn = 'Do not manually delete the category or channels as it will break the system. ' \ 'Modifying the channel topic will also break the system.' em.add_field(name='Commands', value=cmds) em.add_field(name='Warning', value=warn) em.add_field(name='Github', value='https://github.com/verixx/modmail') em.set_footer(text='Star the repository to unlock hidden features!') return em @commands.command() @commands.has_permissions(administrator=True) async def setup(self, ctx, *, modrole: discord.Role=None): '''Sets up a server for modmail''' if discord.utils.get(ctx.guild.categories, name='Mod Mail'): return await ctx.send('This server is already set up.') categ = await ctx.guild.create_category( name='Mod Mail', overwrites=self.overwrites(ctx, modrole=modrole) ) await categ.edit(position=0) c = await ctx.guild.create_text_channel(name='bot-info', category=categ) await c.edit(topic='Manually add user id\'s to block users.\n\n' 'Blocked\n-------\n\n') await c.send(embed=self.help_embed(ctx.prefix)) await ctx.send('Successfully set up server.') @commands.command() @commands.has_permissions(administrator=True) async def disable(self, ctx): '''Close all threads and disable modmail.''' categ = discord.utils.get(ctx.guild.categories, name='Mod Mail') if not categ: return await ctx.send('This server is not set up.') for category, channels in ctx.guild.by_category(): if category == categ: for chan in channels: if 'User ID:' in str(chan.topic): user_id = int(chan.topic.split(': ')[1]) user = self.get_user(user_id) await user.send(f'**{ctx.author}** has closed this modmail session.') await chan.delete() await categ.delete() await ctx.send('Disabled modmail.') @commands.command(name='close') @commands.has_permissions(manage_channels=True) async def _close(self, ctx): '''Close the current thread.''' if 'User ID:' not in str(ctx.channel.topic): return await ctx.send('This is not a modmail thread.') user_id = int(ctx.channel.topic.split(': ')[1]) user = self.get_user(user_id) em = discord.Embed(title='Thread Closed') em.description = f'**{ctx.author}** has closed this modmail session.' em.color = discord.Color.red() try: await user.send(embed=em) except: pass await ctx.channel.delete() @commands.command() async def ping(self, ctx): """Pong! Returns your websocket latency.""" em = discord.Embed() em.title ='Pong! Websocket Latency:' em.description = f'{self.ws.latency * 1000:.4f} ms' em.color = 0x00FF00 await ctx.send(embed=em) def guess_modroles(self, ctx): '''Finds roles if it has the manage_guild perm''' for role in ctx.guild.roles: if role.permissions.manage_guild: yield role def format_info(self, message): '''Get information about a member of a server supports users from the guild or not.''' user = message.author server = self.guild member = self.guild.get_member(user.id) avi = user.avatar_url time = datetime.datetime.utcnow() desc = 'Modmail thread started.' color = 0 if member: roles = sorted(member.roles, key=lambda c: c.position) rolenames = ', '.join([r.name for r in roles if r.name != "@everyone"]) or 'None' member_number = sorted(server.members, key=lambda m: m.joined_at).index(member) + 1 for role in roles: if str(role.color) != "#000000": color = role.color em = discord.Embed(colour=color, description=desc, timestamp=time) em.add_field(name='Account Created', value=str((time - user.created_at).days)+' days ago.') em.set_footer(text='User ID: '+str(user.id)) em.set_thumbnail(url=avi) em.set_author(name=user, icon_url=server.icon_url) if member: em.add_field(name='Joined', value=str((time - member.joined_at).days)+' days ago.') em.add_field(name='Member No.',value=str(member_number),inline = True) em.add_field(name='Nick', value=member.nick, inline=True) em.add_field(name='Roles', value=rolenames, inline=True) em.add_field(name='Message', value=message.content, inline=False) return em async def send_mail(self, message, channel, mod): author = message.author fmt = discord.Embed() fmt.description = message.content fmt.timestamp = message.created_at urls = re.findall(r'(https?://[^\s]+)', message.content) types = ['.png', '.jpg', '.gif', '.jpeg', '.webp'] for u in urls: if any(urlparse(u).path.endswith(x) for x in types): fmt.set_image(url=u) break if mod: fmt.color=discord.Color.green() fmt.set_author(name=str(author), icon_url=author.avatar_url) fmt.set_footer(text='Moderator') else: fmt.color=discord.Color.gold() fmt.set_author(name=str(author), icon_url=author.avatar_url) fmt.set_footer(text='User') embed = None if message.attachments: fmt.set_image(url=message.attachments[0].url) await channel.send(embed=fmt) async def process_reply(self, message): try: await message.delete() except discord.errors.NotFound: pass await self.send_mail(message, message.channel, mod=True) user_id = int(message.channel.topic.split(': ')[1]) user = self.get_user(user_id) await self.send_mail(message, user, mod=True) def format_name(self, author): name = author.name new_name = '' for letter in name: if letter in string.ascii_letters + string.digits: new_name += letter if not new_name: new_name = 'null' new_name += f'-{author.discriminator}' return new_name @property def blocked_em(self): em = discord.Embed(title='Message not sent!', color=discord.Color.red()) em.description = 'You have been blocked from using modmail.' return em async def process_modmail(self, message): '''Processes messages sent to the bot.''' try: await message.add_reaction('✅') except: pass guild = self.guild author = message.author topic = f'User ID: {author.id}' channel = discord.utils.get(guild.text_channels, topic=topic) categ = discord.utils.get(guild.categories, name='Mod Mail') top_chan = categ.channels[0] #bot-info blocked = top_chan.topic.split('Blocked\n-------')[1].strip().split('\n') blocked = [x.strip() for x in blocked] if str(message.author.id) in blocked: return await message.author.send(embed=self.blocked_em) em = discord.Embed(title='Thanks for the message!') em.description = 'The moderation team will get back to you as soon as possible!' em.color = discord.Color.green() if channel is not None: await self.send_mail(message, channel, mod=False) else: await message.author.send(embed=em) channel = await guild.create_text_channel( name=self.format_name(author), category=categ ) await channel.edit(topic=topic) await channel.send('@here', embed=self.format_info(message)) async def on_message(self, message): if message.author.bot: return await self.process_commands(message) if isinstance(message.channel, discord.DMChannel): await self.process_modmail(message) @commands.command() async def reply(self, ctx, *, msg): '''Reply to users using this command.''' categ = discord.utils.get(ctx.guild.categories, id=ctx.channel.category_id) if categ is not None: if categ.name == 'Mod Mail': if 'User ID:' in ctx.channel.topic: ctx.message.content = msg await self.process_reply(ctx.message) @commands.command(name="customstatus", aliases=['status', 'presence']) @commands.has_permissions(administrator=True) async def _status(self, ctx, *, message): '''Set a custom playing status for the bot.''' if message == 'clear': return await self.change_presence(activity=None) await self.change_presence(activity=discord.Game(message)) await ctx.send(f"Changed status to **{message}**") @commands.command() @commands.has_permissions(manage_channels=True) async def block(self, ctx, id=None): '''Block a user from using modmail.''' if id is None: if 'User ID:' in str(ctx.channel.topic): id = ctx.channel.topic.split('User ID: ')[1].strip() else: return await ctx.send('No User ID provided.') categ = discord.utils.get(ctx.guild.categories, name='Mod Mail') top_chan = categ.channels[0] #bot-info topic = str(top_chan.topic) topic += id + '\n' if id not in top_chan.topic: await top_chan.edit(topic=topic) await ctx.send('User successfully blocked!') else: await ctx.send('User is already blocked.') @commands.command() @commands.has_permissions(manage_channels=True) async def unblock(self, ctx, id=None): '''Unblocks a user from using modmail.''' if id is None: if 'User ID:' in str(ctx.channel.topic): id = ctx.channel.topic.split('User ID: ')[1].strip() else: return await ctx.send('No User ID provided.') categ = discord.utils.get(ctx.guild.categories, name='Mod Mail') top_chan = categ.channels[0] #bot-info topic = str(top_chan.topic) topic = topic.replace(id+'\n', '') if id in top_chan.topic: await top_chan.edit(topic=topic) await ctx.send('User successfully unblocked!') else: await ctx.send('User is not already blocked.') @commands.command(hidden=True, name='eval') async def _eval(self, ctx, *, body: str): """Evaluates python code""" allowed = [int(x) for x in os.getenv('OWNERS', '').split(',')] if ctx.author.id not in allowed: return env = { 'bot': self, 'ctx': ctx, 'channel': ctx.channel, 'author': ctx.author, 'guild': ctx.guild, 'message': ctx.message, 'source': inspect.getsource } env.update(globals()) body = self.cleanup_code(body) stdout = io.StringIO() err = out = None to_compile = f'async def func():\n{textwrap.indent(body, " ")}' try: exec(to_compile, env) except Exception as e: err = await ctx.send(f'```py\n{e.__class__.__name__}: {e}\n```') return await err.add_reaction('\u2049') func = env['func'] try: with redirect_stdout(stdout): ret = await func() except Exception as e: value = stdout.getvalue() err = await ctx.send(f'```py\n{value}{traceback.format_exc()}\n```') else: value = stdout.getvalue() if ret is None: if value: try: out = await ctx.send(f'```py\n{value}\n```') except: await ctx.send('```Result is too long to send.```') else: self._last_result = ret try: out = await ctx.send(f'```py\n{value}{ret}\n```') except: await ctx.send('```Result is too long to send.```') if out: await ctx.message.add_reaction('\u2705') #tick if err: await ctx.message.add_reaction('\u2049') #x else: await ctx.message.add_reaction('\u2705') def cleanup_code(self, content): """Automatically removes code blocks from the code.""" # remove ```py\n``` if content.startswith('```') and content.endswith('```'): return '\n'.join(content.split('\n')[1:-1]) # remove `foo` return content.strip('` \n') if __name__ == '__main__': Modmail.init()
[]
[]
[ "OWNERS", "STATUS", "TOKEN", "PREFIX", "GUILD_ID" ]
[]
["OWNERS", "STATUS", "TOKEN", "PREFIX", "GUILD_ID"]
python
5
0
locallibrary/settings.py
""" Django settings for locallibrary project. Generated by 'django-admin startproject' using Django 1.10. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # SECRET_KEY = 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag' import os SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag') # SECURITY WARNING: don't run with debug turned on in production! # DEBUG = True DEBUG = bool(os.environ.get('DJANGO_DEBUG', True)) # Set hosts to allow any app on Heroku and the local testing URL ALLOWED_HOSTS = ['.herokuapp.com', '127.0.0.1', 'localhost'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Add our new application 'catalog.apps.CatalogConfig', # This object was created for us in /catalog/apps.py 'django_extensions'] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'locallibrary.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': ['./templates', ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'locallibrary.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Redirect to home URL after login (Default redirects to /accounts/profile/) LOGIN_REDIRECT_URL = '/' # Add to test email: EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # Heroku: Update database configuration from $DATABASE_URL. import dj_database_url db_from_env = dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env) # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ # The absolute path to the directory where collectstatic will collect static files for deployment. STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') # The URL to use when referring to static files (where they will be served from) STATIC_URL = '/static/' # Simplified static file serving. # https://warehouse.python.org/project/whitenoise/ # STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage' TEST_RUNNER = 'xmlrunner.extra.djangotestrunner.XMLTestRunner' TEST_OUTPUT_VERBOSE = 2 TEST_OUTPUT_DIR = 'test-results'
[]
[]
[ "DJANGO_DEBUG", "DJANGO_SECRET_KEY" ]
[]
["DJANGO_DEBUG", "DJANGO_SECRET_KEY"]
python
2
0
data/raw/hbase/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java
/** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.thrift; import java.io.UnsupportedEncodingException; import java.nio.ByteBuffer; import java.security.Principal; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Base64; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import javax.security.auth.Subject; import javax.security.auth.login.AppConfigurationEntry; import javax.security.auth.login.Configuration; import javax.security.auth.login.LoginContext; import org.apache.hadoop.hbase.thrift.generated.AlreadyExists; import org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor; import org.apache.hadoop.hbase.thrift.generated.Hbase; import org.apache.hadoop.hbase.thrift.generated.TCell; import org.apache.hadoop.hbase.thrift.generated.TRowResult; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClientUtils; import org.apache.thrift.protocol.TBinaryProtocol; import org.apache.thrift.protocol.TProtocol; import org.apache.thrift.transport.THttpClient; import org.apache.thrift.transport.TSocket; import org.apache.thrift.transport.TTransport; import org.apache.yetus.audience.InterfaceAudience; import org.ietf.jgss.GSSContext; import org.ietf.jgss.GSSCredential; import org.ietf.jgss.GSSException; import org.ietf.jgss.GSSManager; import org.ietf.jgss.GSSName; import org.ietf.jgss.Oid; /** * See the instructions under hbase-examples/README.txt */ @InterfaceAudience.Private public class HttpDoAsClient { static protected int port; static protected String host; private static boolean secure = false; static protected String doAsUser = null; static protected String principal = null; public static void main(String[] args) throws Exception { if (args.length < 3 || args.length > 4) { System.out.println("Invalid arguments!"); System.out.println("Usage: HttpDoAsClient host port doAsUserName [security=true]"); System.exit(-1); } host = args[0]; port = Integer.parseInt(args[1]); doAsUser = args[2]; if (args.length > 3) { secure = Boolean.parseBoolean(args[3]); principal = getSubject().getPrincipals().iterator().next().getName(); } final HttpDoAsClient client = new HttpDoAsClient(); Subject.doAs(getSubject(), new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { client.run(); return null; } }); } HttpDoAsClient() { } // Helper to translate strings to UTF8 bytes private byte[] bytes(String s) { try { return s.getBytes("UTF-8"); } catch (UnsupportedEncodingException e) { e.printStackTrace(); return null; } } private void run() throws Exception { TTransport transport = new TSocket(host, port); transport.open(); String url = "http://" + host + ":" + port; THttpClient httpClient = new THttpClient(url); httpClient.open(); TProtocol protocol = new TBinaryProtocol(httpClient); Hbase.Client client = new Hbase.Client(protocol); byte[] t = bytes("demo_table"); // // Scan all tables, look for the demo table and delete it. // System.out.println("scanning tables..."); for (ByteBuffer name : refresh(client, httpClient).getTableNames()) { System.out.println(" found: " + ClientUtils.utf8(name.array())); if (ClientUtils.utf8(name.array()).equals(ClientUtils.utf8(t))) { if (refresh(client, httpClient).isTableEnabled(name)) { System.out.println(" disabling table: " + ClientUtils.utf8(name.array())); refresh(client, httpClient).disableTable(name); } System.out.println(" deleting table: " + ClientUtils.utf8(name.array())); refresh(client, httpClient).deleteTable(name); } } // // Create the demo table with two column families, entry: and unused: // ArrayList<ColumnDescriptor> columns = new ArrayList<>(2); ColumnDescriptor col; col = new ColumnDescriptor(); col.name = ByteBuffer.wrap(bytes("entry:")); col.timeToLive = Integer.MAX_VALUE; col.maxVersions = 10; columns.add(col); col = new ColumnDescriptor(); col.name = ByteBuffer.wrap(bytes("unused:")); col.timeToLive = Integer.MAX_VALUE; columns.add(col); System.out.println("creating table: " + ClientUtils.utf8(t)); try { refresh(client, httpClient).createTable(ByteBuffer.wrap(t), columns); } catch (AlreadyExists ae) { System.out.println("WARN: " + ae.message); } System.out.println("column families in " + ClientUtils.utf8(t) + ": "); Map<ByteBuffer, ColumnDescriptor> columnMap = refresh(client, httpClient) .getColumnDescriptors(ByteBuffer.wrap(t)); for (ColumnDescriptor col2 : columnMap.values()) { System.out.println(" column: " + ClientUtils.utf8(col2.name.array()) + ", maxVer: " + col2.maxVersions); } transport.close(); httpClient.close(); } private Hbase.Client refresh(Hbase.Client client, THttpClient httpClient) { httpClient.setCustomHeader("doAs", doAsUser); if(secure) { try { httpClient.setCustomHeader("Authorization", generateTicket()); } catch (GSSException e) { e.printStackTrace(); } } return client; } private String generateTicket() throws GSSException { final GSSManager manager = GSSManager.getInstance(); // Oid for kerberos principal name Oid krb5PrincipalOid = new Oid("1.2.840.113554.1.2.2.1"); Oid KERB_V5_OID = new Oid("1.2.840.113554.1.2.2"); final GSSName clientName = manager.createName(principal, krb5PrincipalOid); final GSSCredential clientCred = manager.createCredential(clientName, 8 * 3600, KERB_V5_OID, GSSCredential.INITIATE_ONLY); final GSSName serverName = manager.createName(principal, krb5PrincipalOid); final GSSContext context = manager.createContext(serverName, KERB_V5_OID, clientCred, GSSContext.DEFAULT_LIFETIME); context.requestMutualAuth(true); context.requestConf(false); context.requestInteg(true); final byte[] outToken = context.initSecContext(new byte[0], 0, 0); StringBuffer outputBuffer = new StringBuffer(); outputBuffer.append("Negotiate "); outputBuffer.append(Bytes.toString(Base64.getEncoder().encode(outToken))); System.out.print("Ticket is: " + outputBuffer); return outputBuffer.toString(); } private void printVersions(ByteBuffer row, List<TCell> versions) { StringBuilder rowStr = new StringBuilder(); for (TCell cell : versions) { rowStr.append(ClientUtils.utf8(cell.value.array())); rowStr.append("; "); } System.out.println("row: " + ClientUtils.utf8(row.array()) + ", values: " + rowStr); } private void printRow(TRowResult rowResult) { ClientUtils.printRow(rowResult); } static Subject getSubject() throws Exception { if (!secure) return new Subject(); /* * To authenticate the DemoClient, kinit should be invoked ahead. * Here we try to get the Kerberos credential from the ticket cache. */ LoginContext context = new LoginContext("", new Subject(), null, new Configuration() { @Override public AppConfigurationEntry[] getAppConfigurationEntry(String name) { Map<String, String> options = new HashMap<>(); options.put("useKeyTab", "false"); options.put("storeKey", "false"); options.put("doNotPrompt", "true"); options.put("useTicketCache", "true"); options.put("renewTGT", "true"); options.put("refreshKrb5Config", "true"); options.put("isInitiator", "true"); String ticketCache = System.getenv("KRB5CCNAME"); if (ticketCache != null) { options.put("ticketCache", ticketCache); } options.put("debug", "true"); return new AppConfigurationEntry[]{ new AppConfigurationEntry("com.sun.security.auth.module.Krb5LoginModule", AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, options)}; } }); context.login(); return context.getSubject(); } }
[ "\"KRB5CCNAME\"" ]
[]
[ "KRB5CCNAME" ]
[]
["KRB5CCNAME"]
java
1
0
torch/testing/_internal/common_distributed.py
from contextlib import contextmanager from datetime import timedelta from enum import Enum import faulthandler import multiprocessing from io import StringIO import os import sys import tempfile import threading import time import unittest import logging import traceback import types from typing import NamedTuple, Optional, Union from functools import wraps import torch import torch.distributed as c10d import torch.cuda.nccl from functools import partial, reduce from torch.testing._internal.common_utils import ( TestCase, TEST_WITH_ROCM, FILE_SCHEMA, find_free_port, retry_on_connect_failures, IS_SANDCASTLE ) logger = logging.getLogger(__name__) class TestSkip(NamedTuple): exit_code: int message: str TEST_SKIPS = { "backend_unavailable": TestSkip(72, "Skipped because distributed backend is not available."), "small_worldsize": TestSkip(73, "Skipped due to small world size."), "no_cuda": TestSkip(74, "CUDA is not available."), "multi-gpu-1": TestSkip(75, "Need at least 1 CUDA device"), "multi-gpu-2": TestSkip(77, "Need at least 2 CUDA devices"), "multi-gpu-3": TestSkip(80, "Need at least 3 CUDA devices"), "multi-gpu-4": TestSkip(81, "Need at least 4 CUDA devices"), "multi-gpu-5": TestSkip(82, "Need at least 5 CUDA devices"), "multi-gpu-6": TestSkip(83, "Need at least 6 CUDA devices"), "multi-gpu-7": TestSkip(84, "Need at least 7 CUDA devices"), "multi-gpu-8": TestSkip(85, "Need at least 8 CUDA devices"), "nccl": TestSkip(76, "c10d not compiled with NCCL support"), "skipIfRocm": TestSkip(78, "Test skipped for ROCm"), "no_peer_access": TestSkip(79, "Test skipped because no GPU peer access"), "generic": TestSkip(86, "Test skipped at subprocess level, look at subprocess log for skip reason"), } def skip_if_no_gpu(func): """ Nccl multigpu tests require at least 2 GPUS. Skip if this is not met""" @wraps(func) def wrapper(*args, **kwargs): if not torch.cuda.is_available(): sys.exit(TEST_SKIPS["no_cuda"].exit_code) world_size = int(os.environ["WORLD_SIZE"]) if torch.cuda.device_count() < world_size: sys.exit(TEST_SKIPS[f"multi-gpu-{world_size}"].exit_code) return func(*args, **kwargs) return wrapper def skip_if_small_worldsize(func): @wraps(func) def wrapper(*args, **kwargs): if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) <= 2: sys.exit(TEST_SKIPS["small_worldsize"].exit_code) return func(*args, **kwargs) return wrapper def require_n_gpus_for_nccl_backend(n, backend): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if backend == "nccl" and torch.cuda.device_count() < n: sys.exit(TEST_SKIPS[f'multi-gpu-{n}'].exit_code) else: return func(*args, **kwargs) return wrapper return decorator def skip_if_lt_x_gpu(x): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if torch.cuda.is_available() and torch.cuda.device_count() >= x: return func(*args, **kwargs) sys.exit(TEST_SKIPS[f'multi-gpu-{x}'].exit_code) return wrapper return decorator # This decorator helps avoiding initializing cuda while testing other backends def nccl_skip_if_lt_x_gpu(backend, x): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if backend != "nccl": return func(*args, **kwargs) if torch.cuda.is_available() and torch.cuda.device_count() >= x: return func(*args, **kwargs) sys.exit(TEST_SKIPS[f'multi-gpu-{x}'].exit_code) return wrapper return decorator def verify_ddp_error_logged(model_DDP, err_substr): # Verify error was logged in ddp_logging_data. ddp_logging_data = model_DDP._get_ddp_logging_data() assert "has_error" in ddp_logging_data assert "error" in ddp_logging_data assert err_substr in ddp_logging_data["error"] def with_nccl_blocking_wait(func): """ Convenience decorator to set/unset NCCL_BLOCKING_WAIT flag. Note that use of this decorator will override the setting of NCCL_ASYNC_ERROR_HANDLING for the particular test. After the test, both NCCL_BLOCKING_WAIT and NCCL_ASYNC_ERROR_HANDLING will be restored to their original values. """ @wraps(func) def wrapper(*args, **kwargs): # Save and unset NCCL_ASYNC_ERROR_HANDLING try: cached_nccl_async_error_handling: Union[str, None] = os.environ[ "NCCL_ASYNC_ERROR_HANDLING" ] del os.environ["NCCL_ASYNC_ERROR_HANDLING"] except KeyError: # NCCL_ASYNC_ERROR_HANDLING was unset cached_nccl_async_error_handling = None # Save val of NCCL_BLOCKING_WAIT and set it. try: cached_nccl_blocking_wait: Union[str, None] = os.environ[ "NCCL_BLOCKING_WAIT" ] except KeyError: cached_nccl_blocking_wait = None finally: os.environ["NCCL_BLOCKING_WAIT"] = "1" try: ret = func(*args, **kwargs) return ret finally: # restore old values. if cached_nccl_async_error_handling is not None: os.environ[ "NCCL_ASYNC_ERROR_HANDLING" ] = cached_nccl_async_error_handling if cached_nccl_blocking_wait is not None: os.environ["NCCL_BLOCKING_WAIT"] = cached_nccl_blocking_wait return wrapper def with_dist_debug_levels(levels): """ Runs a test for each distributed debug level specified in levels. """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): old_level = os.environ.get("TORCH_DISTRIBUTED_DEBUG", None) for level in levels: os.environ["TORCH_DISTRIBUTED_DEBUG"] = level ret = func(*args, **kwargs) if old_level is not None: os.environ["TORCH_DISTRIBUTED_DEBUG"] = old_level # Only returns test return for last test, but since these are # unittests the return value is not really used and earlier tests # would've raised had they failed. return ret return wrapper return decorator def requires_gloo(): return unittest.skipUnless( c10d.is_gloo_available(), "c10d was not compiled with the Gloo backend", ) def requires_nccl_version(version, msg): if not c10d.is_nccl_available(): return unittest.skip( "c10d was not compiled with the NCCL backend", ) else: return unittest.skipIf( torch.cuda.nccl.version() < version, "Requires NCCL version greater than or equal to: {}, found: {}, reason: {}".format( version, torch.cuda.nccl.version(), msg), ) def requires_nccl(): return unittest.skipUnless( c10d.is_nccl_available(), "c10d was not compiled with the NCCL backend", ) def requires_mpi(): return unittest.skipUnless( c10d.is_mpi_available(), "c10d was not compiled with the MPI backend", ) def skip_if_rocm_single_process(func): """Skips a test for ROCm in a single process environment""" func.skip_if_rocm = True @wraps(func) def wrapper(*args, **kwargs): if not TEST_WITH_ROCM: return func(*args, **kwargs) raise unittest.SkipTest("Test skipped for ROCm") return wrapper def skip_if_rocm(func): """Skips a test for ROCm""" func.skip_if_rocm = True @wraps(func) def wrapper(*args, **kwargs): if not TEST_WITH_ROCM: return func(*args, **kwargs) sys.exit(TEST_SKIPS['skipIfRocm'].exit_code) return wrapper def skip_if_win32(): return unittest.skipIf( sys.platform == 'win32', "This unit test case is not supportted on Windows platform", ) @retry_on_connect_failures def create_tcp_store(addr="localhost", world_size=1, is_master=True, timeout=timedelta(minutes=5), wait_for_workers=True, jit_class=False): """ Creates a TCP store. Retries if the chosen port is already in use. """ port = find_free_port() if jit_class: timeout_millisecond = int(timeout / timedelta(milliseconds=1)) return torch.classes.dist_c10d.TCPStore(addr, port, world_size, is_master, timeout_millisecond) else: return c10d.TCPStore(addr, port, world_size, is_master, wait_for_workers=wait_for_workers) TIMEOUT_DEFAULT = 100 TIMEOUT_OVERRIDE = {"test_ddp_uneven_inputs": 400} def create_device(interface=None): if sys.platform == 'win32' or interface is None: return c10d.ProcessGroupGloo.create_device(hostname="127.0.0.1") else: return c10d.ProcessGroupGloo.create_device(interface=interface) def get_timeout(test_id) -> int: return TIMEOUT_OVERRIDE.get(test_id.split('.')[-1], TIMEOUT_DEFAULT) @contextmanager def captured_output(): new_out, new_err = StringIO(), StringIO() old_out, old_err = sys.stdout, sys.stderr try: sys.stdout, sys.stderr = new_out, new_err yield sys.stdout, sys.stderr finally: sys.stdout, sys.stderr = old_out, old_err def simple_sparse_reduce_tests(rank: int, world_size: int, num_inputs: int = 1): """ Generate a number of basic test cases for sparse reduction. These cover tensors with a varying number of sparse dimensions and a varying number of dense dimensions. The only reduction operation we support is sum. """ def generate(rank: int, world_size: int, sparse_dims: int = 1, dense_dims: int = 0): # First sparse dimension is [0..rank]. # Subsequent dimensions are always 0, so we know there is # a non-empty intersection between any two sparse tensors. indices = torch.reshape(torch.arange(rank + 1), (1, rank + 1)) shape = [world_size] + [2 for _ in range(dense_dims)] for _ in range(sparse_dims - 1): indices = torch.cat((indices, torch.zeros(1, rank + 1))) shape.append(world_size) values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)]) return torch.sparse_coo_tensor(indices, values, shape) def compute_sum(fn, world_size: int): return reduce(lambda a, b: a + b, [fn(rank, world_size) for rank in range(world_size)]) return [ ( [ fn(num_inputs * rank + i, num_inputs * world_size) for i in range(num_inputs) ], [ compute_sum(fn, num_inputs * world_size) for i in range(num_inputs) ], ) for fn in [ partial(generate, sparse_dims=1), partial(generate, sparse_dims=2), partial(generate, sparse_dims=3), partial(generate, dense_dims=1), partial(generate, dense_dims=2), partial(generate, dense_dims=3), ] ] tmp_dir: Optional[tempfile.TemporaryDirectory] = None def initialize_temp_directories(init_method: Optional[str] = None) -> None: global tmp_dir tmp_dir = tempfile.TemporaryDirectory() os.environ["TEMP_DIR"] = tmp_dir.name os.mkdir(os.path.join(tmp_dir.name, "barrier")) os.mkdir(os.path.join(tmp_dir.name, "test_dir")) init_dir_path = os.path.join(tmp_dir.name, "init_dir") os.mkdir(init_dir_path) # Set init method if specified. if init_method is not None: os.environ["INIT_METHOD"] = init_method else: os.environ["INIT_METHOD"] = FILE_SCHEMA + os.path.join( init_dir_path, "shared_init_file" ) def cleanup_temp_dir() -> None: if tmp_dir is not None: tmp_dir.cleanup() # [How does MultiProcessTestCase work?] # Each MultiProcessTestCase instance uses 1 + `world_size()` processes, by # default `world_size()` returns 4. Let's take `test_rpc_spawn.py` as an # example which inherits from this class. Its `Setup()` methods calls into # `MultiProcessTestCase._spawn_processes()` which spawns `world_size()` # subprocesses. During the spawn, the main process passes the test name to # subprocesses, and the name is acquired from self.id(). The subprocesses # then use the provided test function name to retrieve the function attribute # from the test instance and run it. The main process simply waits for all # subprocesses to join. class MultiProcessTestCase(TestCase): MAIN_PROCESS_RANK = -1 # This exit code is used to indicate that the test code had an error and # exited abnormally. There are certain tests that might use sys.exit() to # simulate failures and in those cases, we can't have an exit code of 0, # but we still want to ensure we didn't run into any other errors. TEST_ERROR_EXIT_CODE = 10 # do not early terminate for distributed tests. def _should_stop_test_suite(self) -> bool: return False @property def world_size(self) -> int: return 4 def join_or_run(self, fn): @wraps(fn) def wrapper(self): if self.rank == self.MAIN_PROCESS_RANK: self._join_processes(fn) else: fn() return types.MethodType(wrapper, self) # The main process spawns N subprocesses that run the test. # Constructor patches current instance test method to # assume the role of the main process and join its subprocesses, # or run the underlying test function. def __init__(self, method_name: str = 'runTest') -> None: super().__init__(method_name) fn = getattr(self, method_name) setattr(self, method_name, self.join_or_run(fn)) def setUp(self) -> None: super().setUp() self.skip_return_code_checks = [] # type: ignore[var-annotated] self.processes = [] # type: ignore[var-annotated] self.rank = self.MAIN_PROCESS_RANK self.file_name = tempfile.NamedTemporaryFile(delete=False).name # pid to pipe consisting of error message from process. self.pid_to_pipe = {} # type: ignore[var-annotated] def tearDown(self) -> None: super().tearDown() for p in self.processes: p.terminate() # Each Process instance holds a few open file descriptors. The unittest # runner creates a new TestCase instance for each test method and keeps # it alive until the end of the entire suite. We must thus reset the # processes to prevent an effective file descriptor leak. self.processes = [] def _current_test_name(self) -> str: # self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank' return self.id().split(".")[-1] def _start_processes(self, proc) -> None: self.processes = [] for rank in range(int(self.world_size)): parent_conn, child_conn = torch.multiprocessing.Pipe() process = proc( target=self.__class__._run, name='process ' + str(rank), args=(rank, self._current_test_name(), self.file_name, child_conn)) process.start() logger.info(f'Started process {rank} with pid {process.pid}') self.pid_to_pipe[process.pid] = parent_conn self.processes.append(process) def _fork_processes(self) -> None: proc = torch.multiprocessing.get_context("fork").Process self._start_processes(proc) def _spawn_processes(self) -> None: proc = torch.multiprocessing.get_context("spawn").Process self._start_processes(proc) class Event(Enum): GET_TRACEBACK = 1 @staticmethod def _event_listener(parent_pipe, signal_pipe, rank: int): logger.info(f'Starting event listener thread for {rank}') while True: ready_pipes = multiprocessing.connection.wait([parent_pipe, signal_pipe]) if parent_pipe in ready_pipes: if parent_pipe.closed: logger.info(f'Pipe closed for process {rank}, stopping event listener thread') return event = parent_pipe.recv() logger.info(f'Received event {event} on process {rank}') if event == MultiProcessTestCase.Event.GET_TRACEBACK: # Return traceback to the parent process. with tempfile.NamedTemporaryFile(mode='r+') as tmp_file: faulthandler.dump_traceback(tmp_file) # Flush buffers and seek to read from the beginning tmp_file.flush() tmp_file.seek(0) parent_pipe.send(tmp_file.read()) logger.info(f'Process {rank} sent traceback') if signal_pipe in ready_pipes: return @classmethod def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe) -> None: self = cls(test_name) # Start event listener thread. signal_recv_pipe, signal_send_pipe = torch.multiprocessing.Pipe(duplex=False) event_listener_thread = threading.Thread( target=MultiProcessTestCase._event_listener, args=(parent_pipe, signal_recv_pipe, rank), daemon=True) event_listener_thread.start() self.rank = rank self.file_name = file_name self.run_test(test_name, parent_pipe, signal_send_pipe, event_listener_thread) # exit to avoid run teardown() for fork processes sys.exit(0) def run_test(self, test_name: str, parent_pipe, signal_pipe=None, event_listener_thread=None) -> None: if sys.platform != 'win32' and sys.platform != 'darwin': # Register signal handler to dump stack traces on FATALs. # Windows and MacOS do not support the signal handlers. torch._C._set_print_stack_traces_on_fatal_signal(True) # self.id() == e.g. '__main__.TestDistributed.test_get_rank' # We're retrieving a corresponding test and executing it. try: getattr(self, test_name)() except unittest.SkipTest as se: logger.info(f'Process {self.rank} skipping test {test_name} for following reason: {str(se)}') sys.exit(TEST_SKIPS["generic"].exit_code) except Exception as e: logger.error( f'Caught exception: \n{traceback.format_exc()} exiting ' f'process {self.rank} with exit code: {MultiProcessTestCase.TEST_ERROR_EXIT_CODE}') # Send error to parent process. parent_pipe.send(traceback.format_exc()) sys.exit(MultiProcessTestCase.TEST_ERROR_EXIT_CODE) finally: if signal_pipe is not None: signal_pipe.send(None) if event_listener_thread is not None: event_listener_thread.join() # Close pipe after done with test. parent_pipe.close() def _get_timedout_process_traceback(self) -> None: pipes = [] for i, process in enumerate(self.processes): if process.exitcode is None: pipe = self.pid_to_pipe[process.pid] try: pipe.send(MultiProcessTestCase.Event.GET_TRACEBACK) pipes.append((i, pipe)) except ConnectionError as e: logger.error(f'Encountered error while trying to get traceback for process {i}: {e}') # Wait for results. for rank, pipe in pipes: try: # Wait for traceback if pipe.poll(5): if pipe.closed: logger.info(f'Pipe closed for process {rank}, cannot retrieve traceback') continue traceback = pipe.recv() logger.error(f'Process {rank} timed out with traceback: \n\n{traceback}') else: logger.error(f'Could not retrieve traceback for timed out process: {rank}') except ConnectionError as e: logger.error(f'Encountered error while trying to get traceback for process {rank}: {e}') def _join_processes(self, fn) -> None: timeout = get_timeout(self.id()) start_time = time.time() subprocess_error = False try: while True: # check to see if any subprocess exited with an error early. for (i, p) in enumerate(self.processes): # This is the exit code processes exit with if they # encountered an exception. if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE: print(f'Process {i} terminated with exit code {p.exitcode}, terminating remaining processes.') active_children = torch.multiprocessing.active_children() for ac in active_children: ac.terminate() subprocess_error = True break if subprocess_error: break # All processes have joined cleanly if they all a valid exitcode if all([p.exitcode is not None for p in self.processes]): break # Check if we should time out the test. If so, we terminate each process. elapsed = time.time() - start_time if elapsed > timeout: self._get_timedout_process_traceback() print(f'Timing out after {timeout} seconds and killing subprocesses.') for p in self.processes: p.terminate() break # Sleep to avoid excessive busy polling. time.sleep(0.1) elapsed_time = time.time() - start_time if fn in self.skip_return_code_checks: self._check_no_test_errors(elapsed_time) else: self._check_return_codes(elapsed_time) finally: # Close all pipes for pid, pipe in self.pid_to_pipe.items(): pipe.close() def _check_no_test_errors(self, elapsed_time) -> None: """ Checks that we didn't have any errors thrown in the child processes. """ for i, p in enumerate(self.processes): if p.exitcode is None: raise RuntimeError('Process {} timed out after {} seconds'.format(i, elapsed_time)) self.assertNotEqual(self.TEST_ERROR_EXIT_CODE, p.exitcode) def _check_return_codes(self, elapsed_time) -> None: """ Checks that the return codes of all spawned processes match, and skips tests if they returned a return code indicating a skipping condition. """ first_process = self.processes[0] # first, we check if there are errors in actual processes # (via TEST_ERROR_EXIT CODE), and raise an exception for those. # the reason we do this is to attempt to raise a more helpful error # message than "Process x terminated/timed out" # TODO: we should pipe the exception of the failed subprocess here. # Currently, the actual exception is displayed as a logging output. errored_processes = [ (i, p) for i, p in enumerate(self.processes) if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE ] if errored_processes: error = "" for i, process in errored_processes: # Get error from pipe. error_message = self.pid_to_pipe[process.pid].recv() error += "Process {} exited with error code {} and exception:\n{}\n".format( i, MultiProcessTestCase.TEST_ERROR_EXIT_CODE, error_message) raise RuntimeError(error) # If no process exited uncleanly, we check for timeouts, and then ensure # each process exited cleanly. for i, p in enumerate(self.processes): if p.exitcode is None: raise RuntimeError('Process {} terminated or timed out after {} seconds'.format(i, elapsed_time)) self.assertEqual( p.exitcode, first_process.exitcode, msg="Expect process {} exit code to match Process 0 exit code of {}, but got {}".format( i, first_process.exitcode, p.exitcode ), ) for skip in TEST_SKIPS.values(): if first_process.exitcode == skip.exit_code: if IS_SANDCASTLE: # Don't use unittest.skip to skip the test on sandcastle # since it creates tasks for skipped tests assuming there # is some follow-up needed. Instead just "pass" the test # with an appropriate message. logger.info(f'Skipping {self.id()} on sandcastle for the following reason: {skip.message}') return else: raise unittest.SkipTest(skip.message) self.assertEqual( first_process.exitcode, 0, msg="Expected zero exit code but got {}".format(first_process.exitcode) ) @property def is_master(self) -> bool: return self.rank == 0
[]
[]
[ "TEMP_DIR", "INIT_METHOD", "NCCL_ASYNC_ERROR_HANDLING\"\n ", "NCCL_ASYNC_ERROR_HANDLING", "BACKEND", "NCCL_BLOCKING_WAIT", "TORCH_DISTRIBUTED_DEBUG", "NCCL_ASYNC_ERROR_HANDLING\"\n ", "NCCL_BLOCKING_WAIT\"\n ", "WORLD_SIZE" ]
[]
["TEMP_DIR", "INIT_METHOD", "NCCL_ASYNC_ERROR_HANDLING\"\n ", "NCCL_ASYNC_ERROR_HANDLING", "BACKEND", "NCCL_BLOCKING_WAIT", "TORCH_DISTRIBUTED_DEBUG", "NCCL_ASYNC_ERROR_HANDLING\"\n ", "NCCL_BLOCKING_WAIT\"\n ", "WORLD_SIZE"]
python
10
0
testrunner.py
#!/usr/bin/env python # -*- coding: UTF-8 -*- # Copyright (c) 2017, wradlib developers. # Distributed under the MIT License. See LICENSE.txt for more info. import sys import os import io import getopt import unittest import doctest import inspect from multiprocessing import Process, Queue import nbformat from nbconvert.preprocessors import ExecutePreprocessor from nbconvert.preprocessors.execute import CellExecutionError import coverage VERBOSE = 2 def create_examples_testsuite(): # gather information on examples # all functions inside the examples starting with 'ex_' or 'recipe_' # are considered as tests # find example files in examples directory root_dir = 'examples/' files = [] skip = ['__init__.py'] for root, _, filenames in os.walk(root_dir): for filename in filenames: if filename in skip or filename[-3:] != '.py': continue if 'examples/data' in root: continue f = os.path.join(root, filename) f = f.replace('/', '.') f = f[:-3] files.append(f) # create empty testsuite suite = unittest.TestSuite() # find matching functions in for idx, module in enumerate(files): module1, func = module.split('.') module = __import__(module) func = getattr(module, func) funcs = inspect.getmembers(func, inspect.isfunction) [suite.addTest(unittest.FunctionTestCase(v)) for k, v in funcs if k.startswith(("ex_", "recipe_"))] return suite class NotebookTest(unittest.TestCase): def __init__(self, nbfile, cov): super(NotebookTest, self).__init__() self.nbfile = nbfile self.cov = cov def id(self): return self.nbfile def runTest(self): print(self.id()) kernel = 'python%d' % sys.version_info[0] cur_dir = os.path.dirname(self.nbfile) with open(self.nbfile) as f: nb = nbformat.read(f, as_version=4) if self.cov: covdict = {'cell_type': 'code', 'execution_count': 1, 'metadata': {'collapsed': True}, 'outputs': [], 'nbsphinx': 'hidden', 'source': 'import coverage\n' 'coverage.process_startup()\n' 'import sys\n' 'sys.path.append("{0}")\n'.format(cur_dir) } nb['cells'].insert(0, nbformat.from_dict(covdict)) exproc = ExecutePreprocessor(kernel_name=kernel, timeout=500) try: run_dir = os.getenv('WRADLIB_BUILD_DIR', cur_dir) exproc.preprocess(nb, {'metadata': {'path': run_dir}}) except CellExecutionError as e: raise e if self.cov: nb['cells'].pop(0) with io.open(self.nbfile, 'wt') as f: nbformat.write(nb, f) self.assertTrue(True) def create_notebooks_testsuite(**kwargs): # gather information on notebooks # all notebooks in the notebooks folder # are considered as tests # find notebook files in notebooks directory cov = kwargs.pop('cov') root_dir = os.getenv('WRADLIB_NOTEBOOKS', 'notebooks') files = [] skip = [] for root, _, filenames in os.walk(root_dir): for filename in filenames: if filename in skip or filename[-6:] != '.ipynb': continue # skip checkpoints if '/.' in root: continue f = os.path.join(root, filename) files.append(f) # create one TestSuite per Notebook to treat testrunners # memory overconsumption on travis-ci suites = [] for file in files: suite = unittest.TestSuite() suite.addTest(NotebookTest(file, cov)) suites.append(suite) return suites def create_doctest_testsuite(): # gather information on doctests, search in only wradlib folder root_dir = 'wradlib/' files = [] skip = ['__init__.py', 'version.py', 'bufr.py', 'test_'] for root, _, filenames in os.walk(root_dir): for filename in filenames: if filename in skip or filename[-3:] != '.py': continue if 'wradlib/tests' in root: continue f = os.path.join(root, filename) f = f.replace('/', '.') f = f[:-3] files.append(f) # put modules in doctest suite suite = unittest.TestSuite() for module in files: suite.addTest(doctest.DocTestSuite(module)) return suite def create_unittest_testsuite(): # gather information on tests (unittest etc) root_dir = 'wradlib/tests/' return unittest.defaultTestLoader.discover(root_dir) def single_suite_process(queue, test, verbosity, **kwargs): test_cov = kwargs.pop('coverage', 0) test_nb = kwargs.pop('notebooks', 0) if test_cov and not test_nb: cov = coverage.coverage() cov.start() all_success = 1 for ts in test: if ts.countTestCases() != 0: res = unittest.TextTestRunner(verbosity=verbosity).run(ts) all_success = all_success & res.wasSuccessful() if test_cov and not test_nb: cov.stop() cov.save() queue.put(all_success) def keep_tests(suite, arg): newsuite = unittest.TestSuite() try: for tc in suite: try: if tc.id().find(arg) != -1: newsuite.addTest(tc) except AttributeError: new = keep_tests(tc, arg) if new.countTestCases() != 0: newsuite.addTest(new) except TypeError: pass return newsuite def main(args): usage_message = """Usage: python testrunner.py options arg If run without options, testrunner displays the usage message. If all tests suites should be run,, use the -a option. If arg is given, only tests containing arg are run. options: -a --all Run all tests (examples, test, doctest, notebooks) -m Run all tests within a single testsuite [default] -M Run each suite as separate instance -e --example Run only examples tests -d --doc Run only doctests -u --unit Run only unit test -n --notebook Run only notebook test -s --use-subprocess Run every testsuite in a subprocess. -c --coverage Run notebook tests with code coverage -v level Set the level of verbosity. 0 - Silent 1 - Quiet (produces a dot for each succesful test) 2 - Verbose (default - produces a line of output for each test) -h Display usage information. """ test_all = 0 test_examples = 0 test_docs = 0 test_notebooks = 0 test_units = 0 test_subprocess = 0 test_cov = 0 verbosity = VERBOSE try: options, arg = getopt.getopt(args, 'aednuschv:', ['all', 'example', 'doc', 'notebook', 'unit', 'use-subprocess', 'coverage', 'help']) except getopt.GetoptError as e: err_exit(e.msg) if not options: err_exit(usage_message) for name, value in options: if name in ('-a', '--all'): test_all = 1 elif name in ('-e', '--example'): test_examples = 1 elif name in ('-d', '--doc'): test_docs = 1 elif name in ('-n', '--notebook'): test_notebooks = 1 elif name in ('-u', '--unit'): test_units = 1 elif name in ('-s', '--use-subprocess'): test_subprocess = 1 elif name in ('-c', '--coverage'): test_cov = 1 elif name in ('-h', '--help'): err_exit(usage_message, 0) elif name == '-v': verbosity = int(value) else: err_exit(usage_message) if not (test_all or test_examples or test_docs or test_notebooks or test_units): err_exit('must specify one of: -a -e -d -n -u') # change to main package path, where testrunner.py lives path = os.path.dirname(__file__) if path: os.chdir(path) testSuite = [] if test_all: testSuite.append(create_examples_testsuite()) testSuite.append(create_notebooks_testsuite(cov=test_cov)) testSuite.append(create_doctest_testsuite()) testSuite.append(create_unittest_testsuite()) elif test_examples: testSuite.append(create_examples_testsuite()) elif test_notebooks: testSuite.append(create_notebooks_testsuite(cov=test_cov)) elif test_docs: testSuite.append(unittest.TestSuite(create_doctest_testsuite())) elif test_units: testSuite.append(create_unittest_testsuite()) all_success = 1 if test_subprocess: for test in testSuite: if arg: test = keep_tests(test, arg[0]) queue = Queue() keywords = {'coverage': test_cov, 'notebooks': test_notebooks} proc = Process(target=single_suite_process, args=(queue, test, verbosity), kwargs=keywords) proc.start() result = queue.get() proc.join() # all_success should be 0 in the end all_success = all_success & result else: if test_cov and not test_notebooks: cov = coverage.coverage() cov.start() for ts in testSuite: if arg: ts = keep_tests(ts, arg[0]) for test in ts: if test.countTestCases() != 0: result = unittest.TextTestRunner(verbosity=verbosity).\ run(test) # all_success should be 0 in the end all_success = all_success & result.wasSuccessful() if test_cov and not test_notebooks: cov.stop() cov.save() if all_success: sys.exit(0) else: # This will return exit code 1 sys.exit("At least one test has failed. " "Please see test report for details.") def err_exit(message, rc=2): sys.stderr.write("\n%s\n" % message) sys.exit(rc) if __name__ == '__main__': main(sys.argv[1:])
[]
[]
[ "WRADLIB_NOTEBOOKS", "WRADLIB_BUILD_DIR" ]
[]
["WRADLIB_NOTEBOOKS", "WRADLIB_BUILD_DIR"]
python
2
0
bot_tg.py
import logging import os import random import redis import telegram from dotenv import load_dotenv from functools import partial from bot_utils import get_arguments from bot_utils import get_quiz_qa from enum import Enum from telegram.ext import ConversationHandler from telegram.ext import CommandHandler from telegram.ext import Filters from telegram.ext import MessageHandler from telegram.ext import Updater QUIZ = Enum('Quiz', 'Question Answer') def start(update, _): custom_keyboard = [['Новый вопрос', 'Сдаться'], ['Мой счёт']] reply_markup = telegram.ReplyKeyboardMarkup(custom_keyboard) update.message.reply_text( 'Привет. Готов к викторине? Начнем!', reply_markup=reply_markup ) return QUIZ.Question def cancel(update, _): update.message.reply_text( 'Пока-пока!', reply_markup=telegram.ReplyKeyboardRemove() ) return ConversationHandler.END def handle_new_question_request(update, _, quiz_qa, redis_connection): question = random.choice([*quiz_qa]) redis_connection.set(f"tg-{update.message.from_user['id']}", question) update.message.reply_text(f'Вопрос: {question}') return QUIZ.Answer def handle_solution_attempt(update, _, quiz_qa, redis_connection): quiz_question = redis_connection.get( f"tg-{update.message.from_user['id']}" ).decode('utf-8') message = 'Неправильно… Попробуешь ещё раз?' if update.message.text.lower() in quiz_qa[quiz_question].lower(): update.message.reply_text( '''Правильно! Поздравляю! Для следующего вопроса нажми «Новый вопрос»''') return QUIZ.Question update.message.reply_text(message) def handle_give_up(update, context, quiz_qa, redis_connection): quiz_question = redis_connection.get( f"tg-{update.message.from_user['id']}" ).decode('utf-8') answer = f'Ответ: {quiz_qa[quiz_question]}' update.message.reply_text(answer) handle_new_question_request(update, context, quiz_qa, redis_connection) if __name__ == '__main__': arguments = get_arguments() level = logging.DEBUG if arguments.debug else logging.INFO logging.basicConfig(level=level) load_dotenv() telegram_token = os.environ['TELEGRAM-TOKEN'] redis_host = os.environ['REDIS-BASE'] redis_port = os.environ['REDIS-PORT'] redis_password = os.environ['REDIS-PASSWORD'] logging.debug('Open Redis connection') redis_connection = redis.Redis( host=redis_host, port=redis_port, password=redis_password ) logging.debug( 'Read questions and answers from files & make QA dictionary' ) quiz_qa = get_quiz_qa('questions') logging.debug('Prepare telegram bot') updater = Updater(token=telegram_token) dispatcher = updater.dispatcher partial_handle_new_question_request = partial( handle_new_question_request, quiz_qa=quiz_qa, redis_connection=redis_connection, ) partial_handle_solution_attempt = partial( handle_solution_attempt, quiz_qa=quiz_qa, redis_connection=redis_connection, ) partial_handle_give_up = partial( handle_give_up, quiz_qa=quiz_qa, redis_connection=redis_connection, ) conversation_handler = ConversationHandler( entry_points=[CommandHandler('start', start)], states={ QUIZ.Question: [ MessageHandler( Filters.regex('^(Новый вопрос)$'), partial_handle_new_question_request ) ], QUIZ.Answer: [ MessageHandler( Filters.regex('^(Сдаться)$'), partial_handle_give_up ), MessageHandler( Filters.text & ~Filters.command, partial_handle_solution_attempt ), ] }, fallbacks=[CommandHandler('cancel', cancel)] ) dispatcher.add_handler(conversation_handler) logging.debug('Run telegram bot') updater.start_polling() updater.idle()
[]
[]
[ "REDIS-BASE", "TELEGRAM-TOKEN", "REDIS-PASSWORD", "REDIS-PORT" ]
[]
["REDIS-BASE", "TELEGRAM-TOKEN", "REDIS-PASSWORD", "REDIS-PORT"]
python
4
0
hackpack/app.py
import re from flask import Flask from flask import render_template from flask import url_for from flask import request from flask import session import random import json import os import redis from twilio import twiml from twilio.util import TwilioCapability # Declare and configure application app = Flask(__name__, static_url_path='/static') app.secret_key = '2F34255D3EC24192CABC88752C88A2AED9825B9C2C49C7E013644E647596CC73' app.config.from_pyfile('local_settings.py') redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379') redis = redis.from_url(redis_url) numbers = [] restList = [] @app.before_first_request def startup(): PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__)) restListFile = os.path.join(PROJECT_ROOT, 'restaurants.json') global numbers global restList if redis.get('nums') is not None: numbers_json = redis.get('nums') numbers = json.loads(numbers_json) # attempt to pull from redis redis_dat = redis.get('restList') if redis_dat is None: data = json.load(open(restListFile)) restList = data["list"] json_data = json.dumps(restList) redis.set('restList', json_data) else: restList = json.loads(redis_dat) # Voice Request URL @app.route('/voice', methods=['GET', 'POST']) def voice(): response = twiml.Response() response.say("Welcome to RootRec. We do not support voice at this time. Sorry.") return str(response) # SMS Request URL @app.route('/sms1', methods=['GET']) def sms(): response = twiml.Response() response.sms("Nothing here") return str(response) # SMS Request URL @app.route('/sms', methods=['GET']) def smsGet(): response = twiml.Response() response.sms('Welcome to RootRec! Your number has been added to the list. Reply with "Stop" at any time to be removed from this service!') return str(response) # SMS Request URL @app.route('/sms', methods=['POST']) def smsPost(): global numbers global restList # setup response response = twiml.Response() # pull basic data from every message body = request.form['Body'].lower() num = request.form['From'] # cookie data lastRecIndex = session.get('lastrec', -1) if num not in numbers: response.sms('Welcome to RootRec! Your number has been added to the list. Reply with "Stop" at any time to be removed from this service') numbers.append(num) json_data = json.dumps(numbers) redis.set("nums", json_data) elif "yes" in body: # handle follow up response if lastRecIndex == -1: response.sms("Sorry, something went wrong") else: rest = restList[lastRecIndex] response.sms("Great choice! {} is at {}, you can call them at {}".format(rest["name"], rest["addr"], rest["phone"])) else: rest = random.choice(restList) index = restList.index(rest) session['lastrec'] = index randNum = random.randrange(1, 3) opt = "opt" + str(randNum) optPrice = "opt" + str(randNum) + "price" response.sms('Hello, here is a healthy option nearby: you could go to "{}" and get "{}" for {}. Reply with "next" for another option, or "yes" to get the address.'.format(rest["name"], rest[opt], rest[optPrice])) return str(response) # Twilio Client demo template @app.route('/client') def client(): configuration_error = None for key in ('TWILIO_ACCOUNT_SID', 'TWILIO_AUTH_TOKEN', 'TWILIO_APP_SID', 'TWILIO_CALLER_ID'): if not app.config.get(key, None): configuration_error = "Missing from local_settings.py: " \ "{0}".format(key) token = None if not configuration_error: capability = TwilioCapability(app.config['TWILIO_ACCOUNT_SID'], app.config['TWILIO_AUTH_TOKEN']) capability.allow_client_incoming("joey_ramone") capability.allow_client_outgoing(app.config['TWILIO_APP_SID']) token = capability.generate() params = {'token': token} return render_template('client.html', params=params, configuration_error=configuration_error) @app.route('/client/incoming', methods=['POST']) def client_incoming(): try: from_number = request.values.get('PhoneNumber', None) resp = twiml.Response() if not from_number: resp.say("Your app is missing a Phone Number. " "Make a request with a Phone Number to make outgoing " "calls with the Twilio hack pack.") return str(resp) if 'TWILIO_CALLER_ID' not in app.config: resp.say( "Your app is missing a Caller ID parameter. " "Please add a Caller ID to make outgoing calls with Twilio " "Client") return str(resp) with resp.dial(callerId=app.config['TWILIO_CALLER_ID']) as r: # If we have a number, and it looks like a phone number: if from_number and re.search('^[\d\(\)\- \+]+$', from_number): r.number(from_number) else: r.say("We couldn't find a phone number to dial. Make sure " "you are sending a Phone Number when you make a " "request with Twilio Client") return str(resp) except: resp = twiml.Response() resp.say("An error occurred. Check your debugger at twilio dot com " "for more information.") return str(resp) # Installation success page @app.route('/') def index(): params = { 'Voice Request URL': url_for('.voice', _external=True), 'SMS Request URL': url_for('.sms', _external=True), 'Client URL': url_for('.client', _external=True)} return render_template('index.html', params=params, configuration_error=None)
[]
[]
[ "REDISTOGO_URL" ]
[]
["REDISTOGO_URL"]
python
1
0
recipes_exam/wsgi.py
""" WSGI config for recipes_exam project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'recipes_exam.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
pkg/terminal/terminal.go
package terminal import ( "fmt" "io" "net/rpc" "os" "os/signal" "runtime" "strings" "sync" "syscall" "github.com/peterh/liner" "github.com/go-delve/delve/pkg/config" "github.com/go-delve/delve/pkg/terminal/starbind" "github.com/go-delve/delve/service" "github.com/go-delve/delve/service/api" ) const ( historyFile string = ".dbg_history" terminalHighlightEscapeCode string = "\033[%2dm" terminalResetEscapeCode string = "\033[0m" ) const ( ansiBlack = 30 ansiRed = 31 ansiGreen = 32 ansiYellow = 33 ansiBlue = 34 ansiMagenta = 35 ansiCyan = 36 ansiWhite = 37 ansiBrBlack = 90 ansiBrRed = 91 ansiBrGreen = 92 ansiBrYellow = 93 ansiBrBlue = 94 ansiBrMagenta = 95 ansiBrCyan = 96 ansiBrWhite = 97 ) // Term represents the terminal running dlv. type Term struct { client service.Client conf *config.Config prompt string line *liner.State cmds *Commands dumb bool stdout io.Writer InitFile string displays []string historyFile *os.File starlarkEnv *starbind.Env // quitContinue is set to true by exitCommand to signal that the process // should be resumed before quitting. quitContinue bool quittingMutex sync.Mutex quitting bool } // New returns a new Term. func New(client service.Client, conf *config.Config) *Term { cmds := DebugCommands(client) if conf != nil && conf.Aliases != nil { cmds.Merge(conf.Aliases) } if conf == nil { conf = &config.Config{} } var w io.Writer dumb := strings.ToLower(os.Getenv("TERM")) == "dumb" if dumb { w = os.Stdout } else { w = getColorableWriter() } if (conf.SourceListLineColor > ansiWhite && conf.SourceListLineColor < ansiBrBlack) || conf.SourceListLineColor < ansiBlack || conf.SourceListLineColor > ansiBrWhite { conf.SourceListLineColor = ansiBlue } t := &Term{ client: client, conf: conf, prompt: "(dlv) ", line: liner.NewLiner(), cmds: cmds, dumb: dumb, stdout: w, } if client != nil { lcfg := t.loadConfig() client.SetReturnValuesLoadConfig(&lcfg) } t.starlarkEnv = starbind.New(starlarkContext{t}) return t } // Close returns the terminal to its previous mode. func (t *Term) Close() { t.line.Close() } func (t *Term) sigintGuard(ch <-chan os.Signal, multiClient bool) { for range ch { t.starlarkEnv.Cancel() state, err := t.client.GetStateNonBlocking() if err == nil && state.Recording { fmt.Printf("received SIGINT, stopping recording (will not forward signal)\n") err := t.client.StopRecording() if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) } continue } if multiClient { answer, err := t.line.Prompt("Would you like to [s]top the target or [q]uit this client, leaving the target running [s/q]? ") if err != nil { fmt.Fprintf(os.Stderr, "%v", err) continue } answer = strings.TrimSpace(answer) switch answer { case "s": _, err := t.client.Halt() if err != nil { fmt.Fprintf(os.Stderr, "%v", err) } case "q": t.quittingMutex.Lock() t.quitting = true t.quittingMutex.Unlock() err := t.client.Disconnect(false) if err != nil { fmt.Fprintf(os.Stderr, "%v", err) } else { t.Close() } default: fmt.Println("only s or q allowed") } } else { fmt.Printf("received SIGINT, stopping process (will not forward signal)\n") _, err := t.client.Halt() if err != nil { fmt.Fprintf(os.Stderr, "%v", err) } } } } // Run begins running dlv in the terminal. func (t *Term) Run() (int, error) { defer t.Close() multiClient := t.client.IsMulticlient() // Send the debugger a halt command on SIGINT ch := make(chan os.Signal, 1) signal.Notify(ch, syscall.SIGINT) go t.sigintGuard(ch, multiClient) t.line.SetCompleter(func(line string) (c []string) { if strings.HasPrefix(line, "break ") || strings.HasPrefix(line, "b ") { filter := line[strings.Index(line, " ")+1:] funcs, _ := t.client.ListFunctions(filter) for _, f := range funcs { c = append(c, "break "+f) } return } for _, cmd := range t.cmds.cmds { for _, alias := range cmd.aliases { if strings.HasPrefix(alias, strings.ToLower(line)) { c = append(c, alias) } } } return }) fullHistoryFile, err := config.GetConfigFilePath(historyFile) if err != nil { fmt.Printf("Unable to load history file: %v.", err) } t.historyFile, err = os.OpenFile(fullHistoryFile, os.O_RDWR|os.O_CREATE, 0600) if err != nil { fmt.Printf("Unable to open history file: %v. History will not be saved for this session.", err) } if _, err := t.line.ReadHistory(t.historyFile); err != nil { fmt.Printf("Unable to read history file: %v", err) } fmt.Println("Type 'help' for list of commands.") if t.InitFile != "" { err := t.cmds.executeFile(t, t.InitFile) if err != nil { if _, ok := err.(ExitRequestError); ok { return t.handleExit() } fmt.Fprintf(os.Stderr, "Error executing init file: %s\n", err) } } var lastCmd string // Ensure that the target process is neither running nor recording by // making a blocking call. _, _ = t.client.GetState() for { cmdstr, err := t.promptForInput() if err != nil { if err == io.EOF { fmt.Println("exit") return t.handleExit() } return 1, fmt.Errorf("Prompt for input failed.\n") } if strings.TrimSpace(cmdstr) == "" { cmdstr = lastCmd } lastCmd = cmdstr if err := t.cmds.Call(cmdstr, t); err != nil { if _, ok := err.(ExitRequestError); ok { return t.handleExit() } // The type information gets lost in serialization / de-serialization, // so we do a string compare on the error message to see if the process // has exited, or if the command actually failed. if strings.Contains(err.Error(), "exited") { fmt.Fprintln(os.Stderr, err.Error()) } else { t.quittingMutex.Lock() quitting := t.quitting t.quittingMutex.Unlock() if quitting { return t.handleExit() } fmt.Fprintf(os.Stderr, "Command failed: %s\n", err) } } } } // Println prints a line to the terminal. func (t *Term) Println(prefix, str string) { if !t.dumb { terminalColorEscapeCode := fmt.Sprintf(terminalHighlightEscapeCode, t.conf.SourceListLineColor) prefix = fmt.Sprintf("%s%s%s", terminalColorEscapeCode, prefix, terminalResetEscapeCode) } fmt.Fprintf(t.stdout, "%s%s\n", prefix, str) } // Substitutes directory to source file. // // Ensures that only directory is substituted, for example: // substitute from `/dir/subdir`, substitute to `/new` // for file path `/dir/subdir/file` will return file path `/new/file`. // for file path `/dir/subdir-2/file` substitution will not be applied. // // If more than one substitution rule is defined, the rules are applied // in the order they are defined, first rule that matches is used for // substitution. func (t *Term) substitutePath(path string) string { path = crossPlatformPath(path) if t.conf == nil { return path } // On windows paths returned from headless server are as c:/dir/dir // though os.PathSeparator is '\\' separator := "/" //make it default if strings.Index(path, "\\") != -1 { //dependent on the path separator = "\\" } for _, r := range t.conf.SubstitutePath { from := crossPlatformPath(r.From) to := r.To if !strings.HasSuffix(from, separator) { from = from + separator } if !strings.HasSuffix(to, separator) { to = to + separator } if strings.HasPrefix(path, from) { return strings.Replace(path, from, to, 1) } } return path } func crossPlatformPath(path string) string { if runtime.GOOS == "windows" { return strings.ToLower(path) } return path } func (t *Term) promptForInput() (string, error) { l, err := t.line.Prompt(t.prompt) if err != nil { return "", err } l = strings.TrimSuffix(l, "\n") if l != "" { t.line.AppendHistory(l) } return l, nil } func yesno(line *liner.State, question string) (bool, error) { for { answer, err := line.Prompt(question) if err != nil { return false, err } answer = strings.ToLower(strings.TrimSpace(answer)) switch answer { case "n", "no": return false, nil case "y", "yes": return true, nil } } } func (t *Term) handleExit() (int, error) { if t.historyFile != nil { if _, err := t.line.WriteHistory(t.historyFile); err != nil { fmt.Println("readline history error:", err) } if err := t.historyFile.Close(); err != nil { fmt.Printf("error closing history file: %s\n", err) } } t.quittingMutex.Lock() quitting := t.quitting t.quittingMutex.Unlock() if quitting { return 0, nil } s, err := t.client.GetState() if err != nil { if isErrProcessExited(err) { if t.client.IsMulticlient() { answer, err := yesno(t.line, "Remote process has exited. Would you like to kill the headless instance? [Y/n] ") if err != nil { return 2, io.EOF } if answer { if err := t.client.Detach(true); err != nil { return 1, err } } return 0, err } return 0, nil } return 1, err } if !s.Exited { if t.quitContinue { err := t.client.Disconnect(true) if err != nil { return 2, err } return 0, nil } doDetach := true if t.client.IsMulticlient() { answer, err := yesno(t.line, "Would you like to kill the headless instance? [Y/n] ") if err != nil { return 2, io.EOF } doDetach = answer } if doDetach { kill := true if t.client.AttachedToExistingProcess() { answer, err := yesno(t.line, "Would you like to kill the process? [Y/n] ") if err != nil { return 2, io.EOF } kill = answer } if err := t.client.Detach(kill); err != nil { return 1, err } } } return 0, nil } // loadConfig returns an api.LoadConfig with the parameterss specified in // the configuration file. func (t *Term) loadConfig() api.LoadConfig { r := api.LoadConfig{FollowPointers: true, MaxVariableRecurse: 1, MaxStringLen: 64, MaxArrayValues: 64, MaxStructFields: -1} if t.conf != nil && t.conf.MaxStringLen != nil { r.MaxStringLen = *t.conf.MaxStringLen } if t.conf != nil && t.conf.MaxArrayValues != nil { r.MaxArrayValues = *t.conf.MaxArrayValues } if t.conf != nil && t.conf.MaxVariableRecurse != nil { r.MaxVariableRecurse = *t.conf.MaxVariableRecurse } return r } func (t *Term) removeDisplay(n int) error { if n < 0 || n >= len(t.displays) { return fmt.Errorf("%d is out of range", n) } t.displays[n] = "" for i := len(t.displays) - 1; i >= 0; i-- { if t.displays[i] != "" { t.displays = t.displays[:i+1] return nil } } t.displays = t.displays[:0] return nil } func (t *Term) addDisplay(expr string) { t.displays = append(t.displays, expr) } func (t *Term) printDisplay(i int) { expr := t.displays[i] val, err := t.client.EvalVariable(api.EvalScope{GoroutineID: -1}, expr, ShortLoadConfig) if err != nil { if isErrProcessExited(err) { return } fmt.Printf("%d: %s = error %v\n", i, expr, err) return } fmt.Printf("%d: %s = %s\n", i, val.Name, val.SinglelineString()) } func (t *Term) printDisplays() { for i := range t.displays { if t.displays[i] != "" { t.printDisplay(i) } } } func (t *Term) onStop() { t.printDisplays() } // isErrProcessExited returns true if `err` is an RPC error equivalent of proc.ErrProcessExited func isErrProcessExited(err error) bool { rpcError, ok := err.(rpc.ServerError) return ok && strings.Contains(rpcError.Error(), "has exited with status") }
[ "\"TERM\"" ]
[]
[ "TERM" ]
[]
["TERM"]
go
1
0
mongo/change_stream_test.go
package mongo import ( "context" "os" "testing" "time" "github.com/Vincent20101/mongo-go-driver/bson" "github.com/Vincent20101/mongo-go-driver/core/command" "github.com/Vincent20101/mongo-go-driver/core/option" "github.com/stretchr/testify/require" ) func isServerError(err error) bool { _, ok := err.(command.Error) return ok } // TODO(GODRIVER-251): Replace manual check with functionality of improved testing framework. func skipIfBelow36(t *testing.T) { serverVersion, err := getServerVersion(createTestDatabase(t, nil)) require.NoError(t, err) if compareVersions(t, serverVersion, "3.6") < 0 { t.Skip() } } func getNextChange(changes Cursor) { for !changes.Next(context.Background()) { } } func TestChangeStream_firstStage(t *testing.T) { t.Parallel() if testing.Short() { t.Skip() } skipIfBelow36(t) if os.Getenv("TOPOLOGY") != "replica_set" { t.Skip() } coll := createTestCollection(t, nil, nil) // Ensure the database is created. _, err := coll.InsertOne(context.Background(), bson.NewDocument(bson.EC.Int32("x", 1))) require.NoError(t, err) changes, err := coll.Watch(context.Background(), nil) require.NoError(t, err) elem, err := changes.(*changeStream).pipeline.Lookup(0) require.NoError(t, err) doc := elem.MutableDocument() require.Equal(t, 1, doc.Len()) _, err = doc.LookupErr("$changeStream") require.NoError(t, err) } func TestChangeStream_noCustomStandaloneError(t *testing.T) { t.Parallel() if testing.Short() { t.Skip() } skipIfBelow36(t) topology := os.Getenv("TOPOLOGY") if topology == "replica_set" || topology == "sharded_cluster" { t.Skip() } coll := createTestCollection(t, nil, nil) // Ensure the database is created. _, err := coll.InsertOne(context.Background(), bson.NewDocument(bson.EC.Int32("x", 1))) require.NoError(t, err) _, err = coll.Watch(context.Background(), nil) require.Error(t, err) if _, ok := err.(command.Error); !ok { t.Errorf("Should have returned command error, but got %T", err) } } func TestChangeStream_trackResumeToken(t *testing.T) { t.Parallel() if testing.Short() { t.Skip() } skipIfBelow36(t) if os.Getenv("TOPOLOGY") != "replica_set" { t.Skip() } coll := createTestCollection(t, nil, nil) // Ensure the database is created. _, err := coll.InsertOne(context.Background(), bson.NewDocument(bson.EC.Int32("y", 1))) require.NoError(t, err) changes, err := coll.Watch(context.Background(), nil) require.NoError(t, err) for i := 1; i <= 4; i++ { _, err = coll.InsertOne(context.Background(), bson.NewDocument(bson.EC.Interface("x", i))) require.NoError(t, err) } for i := 1; i <= 4; i++ { getNextChange(changes) doc := bson.NewDocument() err := changes.Decode(doc) require.NoError(t, err) id, err := doc.LookupErr("_id") require.NoError(t, err) require.Equal(t, id.MutableDocument(), changes.(*changeStream).resumeToken) } } func TestChangeStream_errorMissingResponseToken(t *testing.T) { t.Parallel() if testing.Short() { t.Skip() } skipIfBelow36(t) if os.Getenv("TOPOLOGY") != "replica_set" { t.Skip() } coll := createTestCollection(t, nil, nil) // Ensure the database is created. _, err := coll.InsertOne(context.Background(), bson.NewDocument(bson.EC.Int32("y", 1))) require.NoError(t, err) // Project out the response token changes, err := coll.Watch(context.Background(), []*bson.Document{ bson.NewDocument( bson.EC.SubDocumentFromElements("$project", bson.EC.Int32("_id", 0))), }) require.NoError(t, err) _, err = coll.InsertOne(context.Background(), bson.NewDocument(bson.EC.Int32("x", 1))) require.NoError(t, err) getNextChange(changes) require.Error(t, changes.Decode(bson.NewDocument())) } func TestChangeStream_resumableError(t *testing.T) { t.Parallel() if testing.Short() { t.Skip() } skipIfBelow36(t) if os.Getenv("TOPOLOGY") != "replica_set" { t.Skip() } coll := createTestCollection(t, nil, nil) // Ensure the database is created. _, err := coll.InsertOne(context.Background(), bson.NewDocument(bson.EC.Int32("y", 1))) require.NoError(t, err) changes, err := coll.Watch(context.Background(), nil) require.NoError(t, err) // Create a context that will expire before the operation can finish. ctx, cancel := context.WithTimeout(context.Background(), 100*time.Nanosecond) // "Use" the cancel function, which go vet complains if we throw away. func(context.CancelFunc) {}(cancel) require.False(t, changes.Next(ctx)) err = changes.Err() require.Error(t, err) require.False(t, isServerError(err)) // If the ResumeAfter option is present, the the operation attempted to resume. hasResume := false for _, opt := range changes.(*changeStream).options { if _, ok := opt.(option.OptResumeAfter); ok { hasResume = true break } } require.True(t, hasResume) } // TODO: GODRIVER-247 Test that a change stream does not attempt to resume after a server error. func TestChangeStream_resumeAfterKillCursors(t *testing.T) { t.Parallel() if testing.Short() { t.Skip() } skipIfBelow36(t) if os.Getenv("TOPOLOGY") != "replica_set" { t.Skip() } coll := createTestCollection(t, nil, nil) // Ensure the database is created. _, err := coll.InsertOne(context.Background(), bson.NewDocument(bson.EC.Int32("y", 1))) require.NoError(t, err) changes, err := coll.Watch(context.Background(), nil) require.NoError(t, err) oldns := coll.namespace() killCursors := command.KillCursors{ NS: command.Namespace{DB: oldns.DB, Collection: oldns.Collection}, IDs: []int64{changes.ID()}, } ss, err := coll.client.topology.SelectServer(context.Background(), coll.readSelector) require.NoError(t, err) conn, err := ss.Connection(context.Background()) require.NoError(t, err) defer conn.Close() _, err = killCursors.RoundTrip(context.Background(), ss.Description(), conn) require.NoError(t, err) require.False(t, changes.Next(context.Background())) require.NoError(t, changes.Err()) _, err = coll.InsertOne(context.Background(), bson.NewDocument(bson.EC.Int32("x", 1))) require.NoError(t, err) getNextChange(changes) require.NoError(t, changes.Decode(bson.NewDocument())) }
[ "\"TOPOLOGY\"", "\"TOPOLOGY\"", "\"TOPOLOGY\"", "\"TOPOLOGY\"", "\"TOPOLOGY\"", "\"TOPOLOGY\"" ]
[]
[ "TOPOLOGY" ]
[]
["TOPOLOGY"]
go
1
0
src/main/java/com/github/quartz/config/QuartzAutoConfiguration.java
package com.github.quartz.config; import com.github.quartz.PropertyPlaceholder; import com.github.quartz.http.ScheduleExecutor; import com.github.quartz.http.ScheduleExecutorImpl; import com.github.quartz.http.ScheduleServlet; import com.github.quartz.jdbc.QuartzRepository; import com.github.quartz.model.assist.STATUS; import com.github.quartz.model.entity.QrtzTimedTask; import com.github.quartz.schedule.ScheduleRefresh; import com.github.quartz.schedule.SqlScriptExecute; import com.github.quartz.schedule.util.QuartzUtil; import com.github.quartz.schedule.util.ScheduleUtil; import org.quartz.Scheduler; import org.quartz.Trigger; import org.quartz.TriggerKey; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.BeansException; import org.springframework.boot.autoconfigure.AutoConfigureAfter; import org.springframework.boot.autoconfigure.condition.ConditionalOnBean; import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.boot.web.servlet.ServletRegistrationBean; import org.springframework.context.ApplicationContext; import org.springframework.context.ApplicationContextAware; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.scheduling.annotation.EnableScheduling; import org.springframework.scheduling.quartz.SchedulerFactoryBean; import javax.annotation.Resource; import javax.sql.DataSource; import java.io.File; import java.io.FileInputStream; import java.util.ArrayList; import java.util.List; import java.util.Properties; import static com.github.quartz.model.constant.HttpConstant.QUARTZ_API; /** * @author 陈敏 * Create date :2017/10/19. * My blog: http://artislong.github.io */ @Configuration @EnableScheduling @ConditionalOnBean(QuartzRepository.class) @AutoConfigureAfter({QuartzDataBaseConfiguration.class}) @EnableConfigurationProperties(QuartzProperties.class) public class QuartzAutoConfiguration implements ApplicationContextAware { private static final Logger logger = LoggerFactory.getLogger(QuartzAutoConfiguration.class); private ApplicationContext applicationContext; private List<QrtzTimedTask> qrtzTimedTaskList = new ArrayList<QrtzTimedTask>(); public static boolean isStart; public QuartzAutoConfiguration(SqlScriptExecute sqlScriptExecute, QuartzProperties quartzProperties, QuartzRepository quartzRepository, QuartzUtil quartzUtil) { // 当前quartz节点是否启动 isStart = quartzUtil.quartzIsStart(quartzProperties); if (isStart) { // 当前集群节点可以启动时,加载任务 qrtzTimedTaskList.addAll(getTaskExecutors(quartzRepository)); // 启动节点之前,清楚数据库已存留的任务信息 Boolean durability = quartzProperties.getDurability(); // 任务完成后,不允许保留在数据库,需要清理 if (!durability) { sqlScriptExecute.execute("cleanTask.sql"); } } } /** * 注册任务 * @param dataSource * @param quartzProperties * @param quartzUtil * @return */ @Bean @Resource public SchedulerFactoryBean schedulerFactoryBean(DataSource dataSource, QuartzProperties quartzProperties, QuartzUtil quartzUtil) { if (isStart) { // 动态创建JobDetail并注册到Spring中 quartzUtil.createJobDetailBeans(qrtzTimedTaskList); // 动态创建CronTrigger并注册到Spring中 quartzUtil.createCronTriggerBeans(qrtzTimedTaskList); } else { return null; } List<Trigger> triggers = new ArrayList<Trigger>(); for (QrtzTimedTask qrtzTimedTask : qrtzTimedTaskList) { Trigger trigger = (Trigger) applicationContext.getBean(qrtzTimedTask.getTaskName() + "Trigger"); triggers.add(trigger); } // 创建Quartz任务调度工厂,并将任务加入到工厂中 SchedulerFactoryBean schedulerFactoryBean = new SchedulerFactoryBean(); schedulerFactoryBean.setTriggers(triggers.toArray(new Trigger[0])); // 加载Quartz配置(分为单机配置或集群配置,并支持Quartz配置分离) Properties properties = quartzProperties(); PropertyPlaceholder quartzPlaceholder; if (properties.isEmpty()) { if (quartzProperties.getCluster()) { quartzPlaceholder = (PropertyPlaceholder) applicationContext.getBean("quartzClusterPlaceholder"); } else { quartzPlaceholder = (PropertyPlaceholder) applicationContext.getBean("quartzPlaceholder"); } properties.putAll(quartzPlaceholder.getProperties()); } schedulerFactoryBean.setQuartzProperties(properties); // 用于quartz集群,QuartzScheduler 启动时更新己存在的Job,这样就不用每次修改targetObject后删除qrtz_job_details表对应记录了 if (quartzProperties.getCluster()) { schedulerFactoryBean.setOverwriteExistingJobs(quartzProperties.getOverwriteExistingJobs()); schedulerFactoryBean.setDataSource(dataSource); } // QuartzScheduler 延时启动,应用启动完n秒后 QuartzScheduler 再启动 schedulerFactoryBean.setStartupDelay(quartzProperties.getStartupDelay()); schedulerFactoryBean.setAutoStartup(quartzProperties.getAutoStartup()); return schedulerFactoryBean; } /** * 任务控制工具类 * @param scheduler * @param quartzProperties * @return */ @Bean public ScheduleUtil scheduleUtil(Scheduler scheduler, QuartzProperties quartzProperties) { return new ScheduleUtil() // .setQuartzProperties(quartzProperties) // .setScheduler(scheduler); } /** * 注册任务动态刷新器 * @param scheduler * @param quartzRepository * @param quartzUtil * @return */ @Bean public ScheduleRefresh scheduleRefresh(Scheduler scheduler, QuartzRepository quartzRepository, QuartzUtil quartzUtil) { return new ScheduleRefresh() // .setScheduler(scheduler) // .setQuartzRepository(quartzRepository) // .setQuartzUtil(quartzUtil); } /** * 注册定时任务调度器 * @param scheduler * @param quartzUtil * @return */ @Bean public ScheduleExecutor scheduleExecutor(Scheduler scheduler, QuartzUtil quartzUtil) { return new ScheduleExecutorImpl() .setApplicationContext(applicationContext) .setScheduler(scheduler) .setQuartzUtil(quartzUtil); } @Bean public ScheduleServlet scheduleServlet(ScheduleExecutor scheduleExecutor) { ScheduleServlet scheduleServlet = new ScheduleServlet(); scheduleServlet.setScheduleExecutor(scheduleExecutor); return scheduleServlet; } /** * 注册Quartz提供对外访问接口 * @param scheduleServlet * @return */ @Bean public ServletRegistrationBean servletRegistrationBean(ScheduleServlet scheduleServlet) { ServletRegistrationBean servletRegister = new ServletRegistrationBean(scheduleServlet, QUARTZ_API); servletRegister.setAsyncSupported(true); return servletRegister; } @Override public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { this.applicationContext = applicationContext; } /** * 获取所有任务的配置信息 * @param quartzRepository * @return */ private List<QrtzTimedTask> getTaskExecutors(QuartzRepository quartzRepository) { List<QrtzTimedTask> qrtzTimedTasks = quartzRepository.queryValidTaskAndParam(STATUS.U); if (qrtzTimedTasks.isEmpty()) { qrtzTimedTasks.addAll(getDefaultTask()); } return qrtzTimedTasks; } private static Properties quartzProperties() { Properties properties = new Properties(); String path = System.getProperty("quartz.config.location"); if (path == null) { logger.info("jvm的quartz.config.location参数未配置,读取QUARTZ_CONFIG_LOCATION环境变量"); path = System.getenv("QUARTZ_CONFIG_LOCATION"); } if (path == null) { logger.info("QUARTZ_CONFIG_LOCATION环境变量未配置,将使用默认配置"); return properties; } File application = new File(path + "quartz.properties"); if (!application.exists()) { logger.error(path + "quartz.properties配置文件不存在,将使用默认配置"); return properties; } try { Properties applicationProperties = new Properties(); applicationProperties.load(new FileInputStream(application)); properties.putAll(applicationProperties); logger.info("加载" + path + "quartz.properties" + "配置文件完成"); } catch (Exception e) { logger.error("加载" + path + "quartz.properties" + "配置文件失败,错误信息: {}", e); return properties; } return properties; } /** * 构建默认的quartz任务 * @return */ private static List<QrtzTimedTask> getDefaultTask() { List<QrtzTimedTask> defaultTask = new ArrayList<QrtzTimedTask>(); String defaultScheduler = "defaultScheduler"; defaultTask.add(new QrtzTimedTask() .setTaskName(defaultScheduler) .setTaskClass("com.quartz.config.QuartzAutoConfiguration.DefaultScheduler") .setTaskExpres("0 0 4 ? * *") .setTaskMethod("execute") .setTaskDesc("default scheduler") .setTaskGroup(TriggerKey.DEFAULT_GROUP)); return defaultTask; } @Bean public DefaultScheduler defaultScheduler() { return new DefaultScheduler(); } public static class DefaultScheduler { public void execute() { } } }
[ "\"QUARTZ_CONFIG_LOCATION\"" ]
[]
[ "QUARTZ_CONFIG_LOCATION" ]
[]
["QUARTZ_CONFIG_LOCATION"]
java
1
0
scripts/mixexpDemoOneToMany.py
import pyprobml_utils as pml import matplotlib.pyplot as plt import numpy as np import pandas as pd from scipy.special import logsumexp from sklearn.linear_model import LinearRegression from scipy.stats import multivariate_normal import pyprobml_utils as pml n = 200 np.random.seed(1) y = np.random.rand(n, 1) eta = np.random.randn(n,1)*0.05 x = y + 0.3*np.sin(2*3.1415*y) + eta data = np.concatenate((x, y), axis=1) K = 3 X = x.reshape(-1, 1) y = y.reshape(-1, 1) xtest = (x) ytest = (y) plt.figure() plt.scatter(x, y, edgecolors='blue', color="none") plt.title('Inverse problem') pml.savefig('mixexp_inverse.pdf') plt.show() def normalizelogspace(x): L = logsumexp(x, axis=1).reshape(-1, 1) Lnew = np.repeat(L, 3, axis=1) y = x - Lnew return y, Lnew def is_pos_def(x): return np.all(np.linalg.eigvals(x) > 0) K = 3 #nmix D = np.size(X, axis=1) N = np.size(X, axis=0) norm = 50 max_iter = 39 iteration = 0 r = np.zeros((N, K)) while iteration < max_iter: #E-step : np.random.seed(iteration) Wy = 0.1*np.random.randn(D, K) bias = 0.3*np.random.randn(D, K) mixweights = np.random.rand(1, K) normmw = np.linalg.norm(mixweights) mixweights = mixweights/normmw sigma2 = 0.1*np.random.randn(1, K) q = np.log(mixweights) logprior = np.repeat(q, N, axis=0) loglik = np.zeros((N, K)) for k in range(K): vecM = X*Wy[:, k] + bias[:, k] vecM = vecM.reshape(200, ) cov = sigma2[0, k] cov = np.abs(cov) vecX = y x = multivariate_normal.logpdf(vecX, mean=vecM, cov=cov) x = x /norm loglik[:, k] = x logpost = loglik + logprior logpost, logZ = normalizelogspace(logpost) ll = np.sum(logZ) post = np.exp(logpost) #M-step: r = post mixweights = np.sum(r, axis=0)/N mixweights = mixweights.reshape(1, -1) for k in range(K): reg = LinearRegression() model = reg.fit(X, y, r[:, k]) Wy[:, k] = model.coef_ bias[:, k] = model.intercept_ yhat_ = np.multiply(X, Wy[:, k]) + bias[:, k] sigma2[:, k] = np.sum(np.multiply(r[:, k], np.square(y-yhat_))) / sum(r[:, k]) iteration = iteration + 1 N = np.size(X, axis=0) D = np.size(X, axis=1) K = 3 weights = np.repeat(mixweights, N, axis=0) muk = np.zeros((N, K)) vk = np.zeros((N, K)) mu = np.zeros((N, )) v = np.zeros((N, 1)) b = 0.3*np.random.randn(D, K) for k in range(K): w = X*Wy[:, k] + bias[:, k] w = w.reshape(-1, ) muk[:, k] = w q = np.multiply(weights[:, k], muk[:, k]) mu = mu + q vk[:, k] = sigma2[:, k] v = v + np.multiply(weights[:, k], (vk[:, k] + np.square(muk[:, k]))).reshape(-1, 1) v = v - np.square(mu).reshape(-1, 1) plt.figure() plt.scatter(xtest, y, edgecolors='blue', color="none") plt.plot(xtest, muk[:, 0]) plt.plot(xtest, muk[:, 1]) plt.plot(xtest, muk[:, 2]) plt.title('Expert-predictions') pml.savefig('mixexp_expert_predictions.pdf') plt.show() plt.figure() for i in range(K): plt.scatter(y, post[:, i]) plt.title('Gating functions') pml.savefig('mixexp_gating_functions.pdf') plt.show() map = np.empty((K, 1)) map = np.argmax(post, axis=1) map = map.reshape(-1, 1) yhat = np.empty((N, 1)) for i in range(N): yhat[i, 0] = muk[i, map[i, 0]] plt.figure() plt.scatter(xtest, yhat, marker=6, color='black') plt.scatter(xtest, mu, marker='X', color='red') plt.scatter(xtest, y, edgecolors='blue', color="none") plt.title('prediction') plt.legend(['mode', 'mean']) pml.savefig('mixexp_predictions.pdf') plt.show()
[]
[]
[]
[]
[]
python
null
null
null
ddtrace/tracer/sampler.go
// Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016 Datadog, Inc. package tracer import ( "encoding/json" "fmt" "io" "math" "os" "regexp" "strconv" "strings" "sync" "time" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" "gopkg.in/DataDog/dd-trace-go.v1/internal/log" "gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames" "golang.org/x/time/rate" ) // Sampler is the generic interface of any sampler. It must be safe for concurrent use. type Sampler interface { // Sample returns true if the given span should be sampled. Sample(span Span) bool } // RateSampler is a sampler implementation which randomly selects spans using a // provided rate. For example, a rate of 0.75 will permit 75% of the spans. // RateSampler implementations should be safe for concurrent use. type RateSampler interface { Sampler // Rate returns the current sample rate. Rate() float64 // SetRate sets a new sample rate. SetRate(rate float64) } // rateSampler samples from a sample rate. type rateSampler struct { sync.RWMutex rate float64 } // NewAllSampler is a short-hand for NewRateSampler(1). It is all-permissive. func NewAllSampler() RateSampler { return NewRateSampler(1) } // NewRateSampler returns an initialized RateSampler with a given sample rate. func NewRateSampler(rate float64) RateSampler { return &rateSampler{rate: rate} } // Rate returns the current rate of the sampler. func (r *rateSampler) Rate() float64 { r.RLock() defer r.RUnlock() return r.rate } // SetRate sets a new sampling rate. func (r *rateSampler) SetRate(rate float64) { r.Lock() r.rate = rate r.Unlock() } // constants used for the Knuth hashing, same as agent. const knuthFactor = uint64(1111111111111111111) // Sample returns true if the given span should be sampled. func (r *rateSampler) Sample(spn ddtrace.Span) bool { if r.rate == 1 { // fast path return true } s, ok := spn.(*span) if !ok { return false } r.RLock() defer r.RUnlock() return sampledByRate(s.TraceID, r.rate) } // sampledByRate verifies if the number n should be sampled at the specified // rate. func sampledByRate(n uint64, rate float64) bool { if rate < 1 { return n*knuthFactor < uint64(rate*math.MaxUint64) } return true } // prioritySampler holds a set of per-service sampling rates and applies // them to spans. type prioritySampler struct { mu sync.RWMutex rates map[string]float64 defaultRate float64 } func newPrioritySampler() *prioritySampler { return &prioritySampler{ rates: make(map[string]float64), defaultRate: 1., } } // readRatesJSON will try to read the rates as JSON from the given io.ReadCloser. func (ps *prioritySampler) readRatesJSON(rc io.ReadCloser) error { var payload struct { Rates map[string]float64 `json:"rate_by_service"` } if err := json.NewDecoder(rc).Decode(&payload); err != nil { return err } rc.Close() const defaultRateKey = "service:,env:" ps.mu.Lock() defer ps.mu.Unlock() ps.rates = payload.Rates if v, ok := ps.rates[defaultRateKey]; ok { ps.defaultRate = v delete(ps.rates, defaultRateKey) } return nil } // getRate returns the sampling rate to be used for the given span. Callers must // guard the span. func (ps *prioritySampler) getRate(spn *span) float64 { key := "service:" + spn.Service + ",env:" + spn.Meta[ext.Environment] ps.mu.RLock() defer ps.mu.RUnlock() if rate, ok := ps.rates[key]; ok { return rate } return ps.defaultRate } // apply applies sampling priority to the given span. Caller must ensure it is safe // to modify the span. func (ps *prioritySampler) apply(spn *span) { rate := ps.getRate(spn) if sampledByRate(spn.TraceID, rate) { spn.setSamplingPriority(ext.PriorityAutoKeep, samplernames.AgentRate, rate) } else { spn.setSamplingPriority(ext.PriorityAutoReject, samplernames.AgentRate, rate) } spn.SetTag(keySamplingPriorityRate, rate) } // rulesSampler allows a user-defined list of rules to apply to spans. // These rules can match based on the span's Service, Name or both. // When making a sampling decision, the rules are checked in order until // a match is found. // If a match is found, the rate from that rule is used. // If no match is found, and the DD_TRACE_SAMPLE_RATE environment variable // was set to a valid rate, that value is used. // Otherwise, the rules sampler didn't apply to the span, and the decision // is passed to the priority sampler. // // The rate is used to determine if the span should be sampled, but an upper // limit can be defined using the DD_TRACE_RATE_LIMIT environment variable. // Its value is the number of spans to sample per second. // Spans that matched the rules but exceeded the rate limit are not sampled. type rulesSampler struct { rules []SamplingRule // the rules to match spans with globalRate float64 // a rate to apply when no rules match a span limiter *rateLimiter // used to limit the volume of spans sampled } // newRulesSampler configures a *rulesSampler instance using the given set of rules. // Invalid rules or environment variable values are tolerated, by logging warnings and then ignoring them. func newRulesSampler(rules []SamplingRule) *rulesSampler { return &rulesSampler{ rules: rules, globalRate: globalSampleRate(), limiter: newRateLimiter(), } } // samplingRulesFromEnv parses sampling rules from the DD_TRACE_SAMPLING_RULES // environment variable. func samplingRulesFromEnv() ([]SamplingRule, error) { rulesFromEnv := os.Getenv("DD_TRACE_SAMPLING_RULES") if rulesFromEnv == "" { return nil, nil } jsonRules := []struct { Service string `json:"service"` Name string `json:"name"` Rate json.Number `json:"sample_rate"` }{} err := json.Unmarshal([]byte(rulesFromEnv), &jsonRules) if err != nil { return nil, fmt.Errorf("error unmarshalling JSON: %v", err) } rules := make([]SamplingRule, 0, len(jsonRules)) var errs []string for i, v := range jsonRules { if v.Rate == "" { errs = append(errs, fmt.Sprintf("at index %d: rate not provided", i)) continue } rate, err := v.Rate.Float64() if err != nil { errs = append(errs, fmt.Sprintf("at index %d: %v", i, err)) continue } if !(rate >= 0.0 && rate <= 1.0) { log.Warn("at index %d: ignoring rule %+v: rate is out of [0.0, 1.0] range", i, v) continue } switch { case v.Service != "" && v.Name != "": rules = append(rules, NameServiceRule(v.Name, v.Service, rate)) case v.Service != "": rules = append(rules, ServiceRule(v.Service, rate)) case v.Name != "": rules = append(rules, NameRule(v.Name, rate)) } } if len(errs) != 0 { return rules, fmt.Errorf("found errors:\n\t%s", strings.Join(errs, "\n\t")) } return rules, nil } // globalSampleRate returns the sampling rate found in the DD_TRACE_SAMPLE_RATE environment variable. // If it is invalid or not within the 0-1 range, NaN is returned. func globalSampleRate() float64 { defaultRate := math.NaN() v := os.Getenv("DD_TRACE_SAMPLE_RATE") if v == "" { return defaultRate } r, err := strconv.ParseFloat(v, 64) if err != nil { log.Warn("ignoring DD_TRACE_SAMPLE_RATE: error: %v", err) return defaultRate } if r >= 0.0 && r <= 1.0 { return r } log.Warn("ignoring DD_TRACE_SAMPLE_RATE: out of range %f", r) return defaultRate } // defaultRateLimit specifies the default trace rate limit used when DD_TRACE_RATE_LIMIT is not set. const defaultRateLimit = 100.0 // newRateLimiter returns a rate limiter which restricts the number of traces sampled per second. // This defaults to 100.0. The DD_TRACE_RATE_LIMIT environment variable may override the default. func newRateLimiter() *rateLimiter { limit := defaultRateLimit v := os.Getenv("DD_TRACE_RATE_LIMIT") if v != "" { l, err := strconv.ParseFloat(v, 64) if err != nil { log.Warn("using default rate limit because DD_TRACE_RATE_LIMIT is invalid: %v", err) } else if l < 0.0 { log.Warn("using default rate limit because DD_TRACE_RATE_LIMIT is negative: %f", l) } else { // override the default limit limit = l } } return &rateLimiter{ limiter: rate.NewLimiter(rate.Limit(limit), int(math.Ceil(limit))), prevTime: time.Now(), } } // apply uses the sampling rules to determine the sampling rate for the // provided span. If the rules don't match, and a default rate hasn't been // set using DD_TRACE_SAMPLE_RATE, then it returns false and the span is not // modified. func (rs *rulesSampler) apply(span *span) bool { if len(rs.rules) == 0 && math.IsNaN(rs.globalRate) { // short path when disabled return false } var matched bool rate := rs.globalRate for _, rule := range rs.rules { if rule.match(span) { matched = true rate = rule.Rate break } } if !matched && math.IsNaN(rate) { // no matching rule or global rate, so we want to fall back // to priority sampling return false } rs.applyRate(span, rate, time.Now()) return true } func (rs *rulesSampler) applyRate(span *span, rate float64, now time.Time) { span.SetTag(keyRulesSamplerAppliedRate, rate) if !sampledByRate(span.TraceID, rate) { span.setSamplingPriority(ext.PriorityUserReject, samplernames.RuleRate, rate) return } sampled, rate := rs.limiter.allowOne(now) if sampled { span.setSamplingPriority(ext.PriorityUserKeep, samplernames.RuleRate, rate) } else { span.setSamplingPriority(ext.PriorityUserReject, samplernames.RuleRate, rate) } span.SetTag(keyRulesSamplerLimiterRate, rate) } // SamplingRule is used for applying sampling rates to spans that match // the service name, operation name or both. // For basic usage, consider using the helper functions ServiceRule, NameRule, etc. type SamplingRule struct { Service *regexp.Regexp Name *regexp.Regexp Rate float64 exactService string exactName string } // ServiceRule returns a SamplingRule that applies the provided sampling rate // to spans that match the service name provided. func ServiceRule(service string, rate float64) SamplingRule { return SamplingRule{ exactService: service, Rate: rate, } } // NameRule returns a SamplingRule that applies the provided sampling rate // to spans that match the operation name provided. func NameRule(name string, rate float64) SamplingRule { return SamplingRule{ exactName: name, Rate: rate, } } // NameServiceRule returns a SamplingRule that applies the provided sampling rate // to spans matching both the operation and service names provided. func NameServiceRule(name string, service string, rate float64) SamplingRule { return SamplingRule{ exactService: service, exactName: name, Rate: rate, } } // RateRule returns a SamplingRule that applies the provided sampling rate to all spans. func RateRule(rate float64) SamplingRule { return SamplingRule{ Rate: rate, } } // match returns true when the span's details match all the expected values in the rule. func (sr *SamplingRule) match(s *span) bool { if sr.Service != nil && !sr.Service.MatchString(s.Service) { return false } else if sr.exactService != "" && sr.exactService != s.Service { return false } if sr.Name != nil && !sr.Name.MatchString(s.Name) { return false } else if sr.exactName != "" && sr.exactName != s.Name { return false } return true } // MarshalJSON implements the json.Marshaler interface. func (sr *SamplingRule) MarshalJSON() ([]byte, error) { s := struct { Service string `json:"service"` Name string `json:"name"` Rate float64 `json:"sample_rate"` }{} if sr.exactService != "" { s.Service = sr.exactService } else if sr.Service != nil { s.Service = fmt.Sprintf("%s", sr.Service) } if sr.exactName != "" { s.Name = sr.exactName } else if sr.Name != nil { s.Name = fmt.Sprintf("%s", sr.Name) } s.Rate = sr.Rate return json.Marshal(&s) } // rateLimiter is a wrapper on top of golang.org/x/time/rate which implements a rate limiter but also // returns the effective rate of allowance. type rateLimiter struct { limiter *rate.Limiter mu sync.Mutex // guards below fields prevTime time.Time // time at which prevAllowed and prevSeen were set allowed float64 // number of spans allowed in the current period seen float64 // number of spans seen in the current period prevAllowed float64 // number of spans allowed in the previous period prevSeen float64 // number of spans seen in the previous period } // allowOne returns the rate limiter's decision to allow the span to be sampled, and the // effective rate at the time it is called. The effective rate is computed by averaging the rate // for the previous second with the current rate func (r *rateLimiter) allowOne(now time.Time) (bool, float64) { r.mu.Lock() defer r.mu.Unlock() if d := now.Sub(r.prevTime); d >= time.Second { // enough time has passed to reset the counters if d.Truncate(time.Second) == time.Second && r.seen > 0 { // exactly one second, so update prev r.prevAllowed = r.allowed r.prevSeen = r.seen } else { // more than one second, so reset previous rate r.prevAllowed = 0 r.prevSeen = 0 } r.prevTime = now r.allowed = 0 r.seen = 0 } r.seen++ var sampled bool if r.limiter.AllowN(now, 1) { r.allowed++ sampled = true } er := (r.prevAllowed + r.allowed) / (r.prevSeen + r.seen) return sampled, er }
[ "\"DD_TRACE_SAMPLING_RULES\"", "\"DD_TRACE_SAMPLE_RATE\"", "\"DD_TRACE_RATE_LIMIT\"" ]
[]
[ "DD_TRACE_SAMPLE_RATE", "DD_TRACE_RATE_LIMIT", "DD_TRACE_SAMPLING_RULES" ]
[]
["DD_TRACE_SAMPLE_RATE", "DD_TRACE_RATE_LIMIT", "DD_TRACE_SAMPLING_RULES"]
go
3
0
tests/test___main__.py
import os import random import pytest from click.testing import CliRunner from finance.__main__ import ( create_all, drop_all, fetch_stock_values, import_fund, import_sp500_records, import_stock_records, import_stock_values, insert_stock_assets, insert_test_data, ) from finance.exceptions import AssetNotFoundException from finance.models import StockAsset, deposit from finance.utils import load_stock_codes @pytest.fixture(autouse=True) def monkeypatch_db_url(monkeypatch): monkeypatch.setitem(os.environ, "SBF_DB_URL", os.environ["SBF_TEST_DB_URL"]) def test_drop_all(): runner = CliRunner() result = runner.invoke(drop_all) assert result.exit_code == 0 def test_create_all(): runner = CliRunner() result = runner.invoke(create_all) assert result.exit_code == 0 @pytest.mark.skip def test_insert_test_data_all(): runner = CliRunner() result = runner.invoke(insert_test_data) assert result.exit_code == 0 @pytest.mark.skip def test_import_sp500_records(): runner = CliRunner() result = runner.invoke(import_sp500_records) assert result.exit_code == 0 def test_import_fund(asset_sp500): runner = CliRunner() result = runner.invoke(import_fund, ["KR5223941018", "2016-01-01", "2016-01-31"]) assert result.exit_code == 0 def test_import_non_existing_fund(): runner = CliRunner() result = runner.invoke(import_fund, ["???", "2016-01-01", "2016-01-31"]) assert isinstance(result.exception, AssetNotFoundException) def test_fetch_stock_values(): runner = CliRunner() result = runner.invoke( fetch_stock_values, ["NVDA", "-s", "2017-01-01", "-e", "2017-01-15"] ) assert result.exit_code == 0 # NOTE: This test case may intermittently fail as some of the stock codes # is not available for download in Google Finance def test_import_stock_values(): with open("stock_codes.csv", "r") as fin: codes = list(load_stock_codes(fin)) code, name = random.choice(codes) StockAsset.create(code=code) runner = CliRunner() result = runner.invoke( import_stock_values, [code], input="2017-08-28, 31100.0, 31150.0, 30400.0, 31000.0, 856210, test", catch_exceptions=False, ) assert result.exit_code == 0 asset = StockAsset.get_by_symbol(code) asset_value = asset.asset_values[0] assert asset_value.open == 31100 assert asset_value.high == 31150 assert asset_value.low == 30400 assert asset_value.close == 31000 assert asset_value.volume == 856210 def test_import_stock_records(session, asset_krw, account_stock, account_checking): for _ in insert_stock_assets(): pass runner = CliRunner() result = runner.invoke( import_stock_records, ["tests/samples/shinhan_stock_records.csv"], catch_exceptions=False, ) assert result.exit_code == 0
[]
[]
[ "SBF_TEST_DB_URL" ]
[]
["SBF_TEST_DB_URL"]
python
1
0
client/lcd/lcd_test.go
package lcd import ( "encoding/base64" "encoding/hex" "fmt" "net/http" "os" "regexp" "strings" "testing" "time" "github.com/stretchr/testify/require" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/keys" clienttx "github.com/cosmos/cosmos-sdk/client/tx" "github.com/cosmos/cosmos-sdk/cmd/gaia/app" "github.com/cosmos/cosmos-sdk/crypto/keys/mintkey" "github.com/cosmos/cosmos-sdk/tests" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/rest" "github.com/cosmos/cosmos-sdk/version" "github.com/cosmos/cosmos-sdk/x/auth" "github.com/cosmos/cosmos-sdk/x/bank" dclcommon "github.com/cosmos/cosmos-sdk/x/distribution/client/common" distrrest "github.com/cosmos/cosmos-sdk/x/distribution/client/rest" "github.com/cosmos/cosmos-sdk/x/gov" "github.com/cosmos/cosmos-sdk/x/slashing" "github.com/cosmos/cosmos-sdk/x/staking" ) const ( name1 = "test1" name2 = "test2" name3 = "test3" memo = "LCD test tx" pw = app.DefaultKeyPass altPw = "12345678901" ) var fees = sdk.Coins{sdk.NewInt64Coin(sdk.DefaultBondDenom, 5)} func init() { mintkey.BcryptSecurityParameter = 1 version.Version = os.Getenv("VERSION") } func TestVersion(t *testing.T) { // skip the test if the VERSION environment variable has not been set if version.Version == "" { t.SkipNow() } cleanup, _, _, port := InitializeTestLCD(t, 1, []sdk.AccAddress{}, true) defer cleanup() // node info res, body := Request(t, port, "GET", "/version", nil) require.Equal(t, http.StatusOK, res.StatusCode, body) reg, err := regexp.Compile(`\d+\.\d+\.\d+.*`) require.Nil(t, err) match := reg.MatchString(body) require.True(t, match, body, body) // node info res, body = Request(t, port, "GET", "/node_version", nil) require.Equal(t, http.StatusOK, res.StatusCode, body) reg, err = regexp.Compile(`\d+\.\d+\.\d+.*`) require.Nil(t, err) match = reg.MatchString(body) require.True(t, match, body) } func TestNodeStatus(t *testing.T) { cleanup, _, _, port := InitializeTestLCD(t, 1, []sdk.AccAddress{}, true) defer cleanup() getNodeInfo(t, port) getSyncStatus(t, port, false) } func TestBlock(t *testing.T) { cleanup, _, _, port := InitializeTestLCD(t, 1, []sdk.AccAddress{}, true) defer cleanup() getBlock(t, port, -1, false) getBlock(t, port, 2, false) getBlock(t, port, 100000000, true) } func TestValidators(t *testing.T) { cleanup, _, _, port := InitializeTestLCD(t, 1, []sdk.AccAddress{}, true) defer cleanup() resultVals := getValidatorSets(t, port, -1, false) require.Contains(t, resultVals.Validators[0].Address.String(), "cosmosvalcons") require.Contains(t, resultVals.Validators[0].PubKey, "cosmosvalconspub") getValidatorSets(t, port, 2, false) getValidatorSets(t, port, 10000000, true) } func TestCoinSend(t *testing.T) { kb, err := keys.NewKeyBaseFromDir(InitClientHome(t, "")) require.NoError(t, err) addr, seed := CreateAddr(t, name1, pw, kb) cleanup, _, _, port := InitializeTestLCD(t, 1, []sdk.AccAddress{addr}, true) defer cleanup() bz, err := hex.DecodeString("8FA6AB57AD6870F6B5B2E57735F38F2F30E73CB6") require.NoError(t, err) someFakeAddr := sdk.AccAddress(bz) // query empty res, body := Request(t, port, "GET", fmt.Sprintf("/auth/accounts/%s", someFakeAddr), nil) require.Equal(t, http.StatusNoContent, res.StatusCode, body) acc := getAccount(t, port, addr) initialBalance := acc.GetCoins() // create TX receiveAddr, resultTx := doTransfer(t, port, seed, name1, memo, pw, addr, fees) tests.WaitForHeight(resultTx.Height+1, port) // check if tx was committed require.Equal(t, uint32(0), resultTx.Code) // query sender acc = getAccount(t, port, addr) coins := acc.GetCoins() expectedBalance := initialBalance[0].Sub(fees[0]) require.Equal(t, sdk.DefaultBondDenom, coins[0].Denom) require.Equal(t, expectedBalance.Amount.SubRaw(1), coins[0].Amount) expectedBalance = coins[0] // query receiver acc2 := getAccount(t, port, receiveAddr) coins2 := acc2.GetCoins() require.Equal(t, sdk.DefaultBondDenom, coins2[0].Denom) require.Equal(t, int64(1), coins2[0].Amount.Int64()) // test failure with too little gas res, body, _ = doTransferWithGas(t, port, seed, name1, memo, pw, addr, "100", 0, false, true, fees) require.Equal(t, http.StatusInternalServerError, res.StatusCode, body) require.Nil(t, err) // test failure with negative gas res, body, _ = doTransferWithGas(t, port, seed, name1, memo, pw, addr, "-200", 0, false, false, fees) require.Equal(t, http.StatusBadRequest, res.StatusCode, body) // test failure with negative adjustment res, body, _ = doTransferWithGas(t, port, seed, name1, memo, pw, addr, "10000", -0.1, true, false, fees) require.Equal(t, http.StatusBadRequest, res.StatusCode, body) // test failure with 0 gas res, body, _ = doTransferWithGas(t, port, seed, name1, memo, pw, addr, "0", 0, false, true, fees) require.Equal(t, http.StatusInternalServerError, res.StatusCode, body) // test failure with wrong adjustment res, body, _ = doTransferWithGas(t, port, seed, name1, memo, pw, addr, client.GasFlagAuto, 0.1, false, true, fees) require.Equal(t, http.StatusInternalServerError, res.StatusCode, body) // run simulation and test success with estimated gas res, body, _ = doTransferWithGas( t, port, seed, name1, memo, pw, addr, "10000", 1.0, true, false, fees, ) require.Equal(t, http.StatusOK, res.StatusCode, body) var gasEstResp rest.GasEstimateResponse require.Nil(t, cdc.UnmarshalJSON([]byte(body), &gasEstResp)) require.NotZero(t, gasEstResp.GasEstimate) acc = getAccount(t, port, addr) require.Equal(t, expectedBalance.Amount, acc.GetCoins().AmountOf(sdk.DefaultBondDenom)) // run successful tx gas := fmt.Sprintf("%d", gasEstResp.GasEstimate) res, body, _ = doTransferWithGas(t, port, seed, name1, memo, pw, addr, gas, 1.0, false, true, fees) require.Equal(t, http.StatusOK, res.StatusCode, body) err = cdc.UnmarshalJSON([]byte(body), &resultTx) require.Nil(t, err) tests.WaitForHeight(resultTx.Height+1, port) require.Equal(t, uint32(0), resultTx.Code) acc = getAccount(t, port, addr) expectedBalance = expectedBalance.Sub(fees[0]) require.Equal(t, expectedBalance.Amount.SubRaw(1), acc.GetCoins().AmountOf(sdk.DefaultBondDenom)) } func TestCoinSendAccAuto(t *testing.T) { kb, err := keys.NewKeyBaseFromDir(InitClientHome(t, "")) require.NoError(t, err) addr, seed := CreateAddr(t, name1, pw, kb) cleanup, _, _, port := InitializeTestLCD(t, 1, []sdk.AccAddress{addr}, true) defer cleanup() acc := getAccount(t, port, addr) initialBalance := acc.GetCoins() // send a transfer tx without specifying account number and sequence res, body, _ := doTransferWithGasAccAuto( t, port, seed, name1, memo, pw, addr, "200000", 1.0, false, true, fees, ) require.Equal(t, http.StatusOK, res.StatusCode, body) // query sender acc = getAccount(t, port, addr) coins := acc.GetCoins() expectedBalance := initialBalance[0].Sub(fees[0]) require.Equal(t, sdk.DefaultBondDenom, coins[0].Denom) require.Equal(t, expectedBalance.Amount.SubRaw(1), coins[0].Amount) } func TestCoinMultiSendGenerateOnly(t *testing.T) { kb, err := keys.NewKeyBaseFromDir(InitClientHome(t, "")) require.NoError(t, err) addr, seed := CreateAddr(t, name1, pw, kb) cleanup, _, _, port := InitializeTestLCD(t, 1, []sdk.AccAddress{addr}, true) defer cleanup() // generate only res, body, _ := doTransferWithGas(t, port, seed, "", memo, "", addr, "200000", 1, false, false, fees) require.Equal(t, http.StatusOK, res.StatusCode, body) var stdTx auth.StdTx require.Nil(t, cdc.UnmarshalJSON([]byte(body), &stdTx)) require.Equal(t, len(stdTx.Msgs), 1) require.Equal(t, stdTx.GetMsgs()[0].Route(), "bank") require.Equal(t, stdTx.GetMsgs()[0].GetSigners(), []sdk.AccAddress{addr}) require.Equal(t, 0, len(stdTx.Signatures)) require.Equal(t, memo, stdTx.Memo) require.NotZero(t, stdTx.Fee.Gas) require.IsType(t, stdTx.GetMsgs()[0], bank.MsgSend{}) require.Equal(t, addr, stdTx.GetMsgs()[0].(bank.MsgSend).FromAddress) } func TestCoinSendGenerateSignAndBroadcast(t *testing.T) { kb, err := keys.NewKeyBaseFromDir(InitClientHome(t, "")) require.NoError(t, err) addr, seed := CreateAddr(t, name1, pw, kb) cleanup, _, _, port := InitializeTestLCD(t, 1, []sdk.AccAddress{addr}, true) defer cleanup() acc := getAccount(t, port, addr) // simulate tx res, body, _ := doTransferWithGas( t, port, seed, name1, memo, "", addr, client.GasFlagAuto, 1.0, true, false, fees, ) require.Equal(t, http.StatusOK, res.StatusCode, body) var gasEstResp rest.GasEstimateResponse require.Nil(t, cdc.UnmarshalJSON([]byte(body), &gasEstResp)) require.NotZero(t, gasEstResp.GasEstimate) // generate tx gas := fmt.Sprintf("%d", gasEstResp.GasEstimate) res, body, _ = doTransferWithGas(t, port, seed, name1, memo, "", addr, gas, 1, false, false, fees) require.Equal(t, http.StatusOK, res.StatusCode, body) var tx auth.StdTx require.Nil(t, cdc.UnmarshalJSON([]byte(body), &tx)) require.Equal(t, len(tx.Msgs), 1) require.Equal(t, tx.Msgs[0].Route(), "bank") require.Equal(t, tx.Msgs[0].GetSigners(), []sdk.AccAddress{addr}) require.Equal(t, 0, len(tx.Signatures)) require.Equal(t, memo, tx.Memo) require.NotZero(t, tx.Fee.Gas) gasEstimate := int64(tx.Fee.Gas) _, body = signAndBroadcastGenTx(t, port, name1, pw, body, acc, 1.0, false) // check if tx was committed var txResp sdk.TxResponse require.Nil(t, cdc.UnmarshalJSON([]byte(body), &txResp)) require.Equal(t, uint32(0), txResp.Code) require.Equal(t, gasEstimate, txResp.GasWanted) } func TestEncodeTx(t *testing.T) { kb, err := keys.NewKeyBaseFromDir(InitClientHome(t, "")) require.NoError(t, err) addr, seed := CreateAddr(t, name1, pw, kb) cleanup, _, _, port := InitializeTestLCD(t, 1, []sdk.AccAddress{addr}, true) defer cleanup() res, body, _ := doTransferWithGas(t, port, seed, name1, memo, "", addr, "2", 1, false, false, fees) var tx auth.StdTx cdc.UnmarshalJSON([]byte(body), &tx) req := clienttx.EncodeReq{Tx: tx} encodedJSON, _ := cdc.MarshalJSON(req) res, body = Request(t, port, "POST", "/txs/encode", encodedJSON) // Make sure it came back ok, and that we can decode it back to the transaction // 200 response. require.Equal(t, http.StatusOK, res.StatusCode, body) encodeResp := struct { Tx string `json:"tx"` }{} require.Nil(t, cdc.UnmarshalJSON([]byte(body), &encodeResp)) // verify that the base64 decodes decodedBytes, err := base64.StdEncoding.DecodeString(encodeResp.Tx) require.Nil(t, err) // check that the transaction decodes as expected var decodedTx auth.StdTx require.Nil(t, cdc.UnmarshalBinaryLengthPrefixed(decodedBytes, &decodedTx)) require.Equal(t, memo, decodedTx.Memo) } func TestTxs(t *testing.T) { kb, err := keys.NewKeyBaseFromDir(InitClientHome(t, "")) require.NoError(t, err) addr, seed := CreateAddr(t, name1, pw, kb) cleanup, _, _, port := InitializeTestLCD(t, 1, []sdk.AccAddress{addr}, true) defer cleanup() var emptyTxs []sdk.TxResponse txs := getTransactions(t, port) require.Equal(t, emptyTxs, txs) // query empty txs = getTransactions(t, port, fmt.Sprintf("sender=%s", addr.String())) require.Equal(t, emptyTxs, txs) // also tests url decoding txs = getTransactions(t, port, fmt.Sprintf("sender=%s", addr.String())) require.Equal(t, emptyTxs, txs) txs = getTransactions(t, port, fmt.Sprintf("action=submit%%20proposal&proposer=%s", addr.String())) require.Equal(t, emptyTxs, txs) // create tx receiveAddr, resultTx := doTransfer(t, port, seed, name1, memo, pw, addr, fees) tests.WaitForHeight(resultTx.Height+1, port) // check if tx is queryable tx := getTransaction(t, port, resultTx.TxHash) require.Equal(t, resultTx.TxHash, tx.TxHash) // query sender txs = getTransactions(t, port, fmt.Sprintf("sender=%s", addr.String())) require.Len(t, txs, 1) require.Equal(t, resultTx.Height, txs[0].Height) // query recipient txs = getTransactions(t, port, fmt.Sprintf("recipient=%s", receiveAddr.String())) require.Len(t, txs, 1) require.Equal(t, resultTx.Height, txs[0].Height) // query transaction that doesn't exist validTxHash := "9ADBECAAD8DACBEC3F4F535704E7CF715C765BDCEDBEF086AFEAD31BA664FB0B" res, body := getTransactionRequest(t, port, validTxHash) require.True(t, strings.Contains(body, validTxHash)) require.Equal(t, http.StatusNotFound, res.StatusCode) // bad query string res, body = getTransactionRequest(t, port, "badtxhash") require.True(t, strings.Contains(body, "encoding/hex")) require.Equal(t, http.StatusInternalServerError, res.StatusCode) } func TestPoolParamsQuery(t *testing.T) { kb, err := keys.NewKeyBaseFromDir(InitClientHome(t, "")) require.NoError(t, err) addr, _ := CreateAddr(t, name1, pw, kb) cleanup, _, _, port := InitializeTestLCD(t, 1, []sdk.AccAddress{addr}, true) defer cleanup() defaultParams := staking.DefaultParams() params := getStakingParams(t, port) require.True(t, defaultParams.Equal(params)) pool := getStakingPool(t, port) initialPool := staking.InitialPool() tokens := sdk.TokensFromTendermintPower(100) freeTokens := sdk.TokensFromTendermintPower(50) initialPool.NotBondedTokens = initialPool.NotBondedTokens.Add(tokens) initialPool.BondedTokens = initialPool.BondedTokens.Add(tokens) // Delegate tx on GaiaAppGenState initialPool.NotBondedTokens = initialPool.NotBondedTokens.Add(freeTokens) // freeTokensPerAcc = 50 on GaiaAppGenState require.Equal(t, initialPool.BondedTokens, pool.BondedTokens) //TODO include this test once REST for distribution is online, need to include distribution tokens from inflation // for this equality to make sense //require.Equal(t, initialPool.NotBondedTokens, pool.NotBondedTokens) } func TestValidatorsQuery(t *testing.T) { cleanup, valPubKeys, operAddrs, port := InitializeTestLCD(t, 1, []sdk.AccAddress{}, true) defer cleanup() require.Equal(t, 1, len(valPubKeys)) require.Equal(t, 1, len(operAddrs)) validators := getValidators(t, port) require.Equal(t, 1, len(validators), fmt.Sprintf("%+v", validators)) // make sure all the validators were found (order unknown because sorted by operator addr) foundVal := false if validators[0].ConsPubKey == valPubKeys[0] { foundVal = true } require.True(t, foundVal, "pk %v, operator %v", operAddrs[0], validators[0].OperatorAddress) } func TestValidatorQuery(t *testing.T) { cleanup, valPubKeys, operAddrs, port := InitializeTestLCD(t, 1, []sdk.AccAddress{}, true) defer cleanup() require.Equal(t, 1, len(valPubKeys)) require.Equal(t, 1, len(operAddrs)) validator := getValidator(t, port, operAddrs[0]) require.Equal(t, validator.OperatorAddress, operAddrs[0], "The returned validator does not hold the correct data") } func TestBonding(t *testing.T) { kb, err := keys.NewKeyBaseFromDir(InitClientHome(t, "")) require.NoError(t, err) addr, _ := CreateAddr(t, name1, pw, kb) cleanup, valPubKeys, operAddrs, port := InitializeTestLCD(t, 2, []sdk.AccAddress{addr}, false) tests.WaitForHeight(1, port) defer cleanup() require.Equal(t, 2, len(valPubKeys)) require.Equal(t, 2, len(operAddrs)) amt := sdk.TokensFromTendermintPower(60) amtDec := amt.ToDec() validator := getValidator(t, port, operAddrs[0]) acc := getAccount(t, port, addr) initialBalance := acc.GetCoins() // create bond TX delTokens := sdk.TokensFromTendermintPower(60) resultTx := doDelegate(t, port, name1, pw, addr, operAddrs[0], delTokens, fees) tests.WaitForHeight(resultTx.Height+1, port) require.Equal(t, uint32(0), resultTx.Code) // query tx txs := getTransactions(t, port, fmt.Sprintf("action=delegate&delegator=%s", addr), fmt.Sprintf("destination-validator=%s", operAddrs[0]), ) require.Len(t, txs, 1) require.Equal(t, resultTx.Height, txs[0].Height) // verify balance acc = getAccount(t, port, addr) coins := acc.GetCoins() expectedBalance := initialBalance[0].Sub(fees[0]) require.Equal(t, expectedBalance.Amount.Sub(delTokens), coins.AmountOf(sdk.DefaultBondDenom)) expectedBalance = coins[0] // query delegation bond := getDelegation(t, port, addr, operAddrs[0]) require.Equal(t, amtDec, bond.Shares) delegatorDels := getDelegatorDelegations(t, port, addr) require.Len(t, delegatorDels, 1) require.Equal(t, amtDec, delegatorDels[0].Shares) // query all delegations to validator bonds := getValidatorDelegations(t, port, operAddrs[0]) require.Len(t, bonds, 2) bondedValidators := getDelegatorValidators(t, port, addr) require.Len(t, bondedValidators, 1) require.Equal(t, operAddrs[0], bondedValidators[0].OperatorAddress) require.Equal(t, validator.DelegatorShares.Add(amtDec).String(), bondedValidators[0].DelegatorShares.String()) bondedValidator := getDelegatorValidator(t, port, addr, operAddrs[0]) require.Equal(t, operAddrs[0], bondedValidator.OperatorAddress) // testing unbonding unbondingTokens := sdk.TokensFromTendermintPower(30) resultTx = doUndelegate(t, port, name1, pw, addr, operAddrs[0], unbondingTokens, fees) tests.WaitForHeight(resultTx.Height+1, port) require.Equal(t, uint32(0), resultTx.Code) // sender should have not received any coins as the unbonding has only just begun acc = getAccount(t, port, addr) coins = acc.GetCoins() expectedBalance = expectedBalance.Sub(fees[0]) require.True(t, expectedBalance.Amount.LT(coins.AmountOf(sdk.DefaultBondDenom)) || expectedBalance.Amount.Equal(coins.AmountOf(sdk.DefaultBondDenom)), "should get tokens back from automatic withdrawal after an unbonding delegation", ) expectedBalance = coins[0] // query tx txs = getTransactions(t, port, fmt.Sprintf("action=begin_unbonding&delegator=%s", addr), fmt.Sprintf("source-validator=%s", operAddrs[0]), ) require.Len(t, txs, 1) require.Equal(t, resultTx.Height, txs[0].Height) ubd := getUnbondingDelegation(t, port, addr, operAddrs[0]) require.Len(t, ubd.Entries, 1) require.Equal(t, delTokens.QuoRaw(2), ubd.Entries[0].Balance) // test redelegation rdTokens := sdk.TokensFromTendermintPower(30) resultTx = doBeginRedelegation(t, port, name1, pw, addr, operAddrs[0], operAddrs[1], rdTokens, fees) require.Equal(t, uint32(0), resultTx.Code) tests.WaitForHeight(resultTx.Height+1, port) validator2 := getValidator(t, port, operAddrs[1]) // query delegations, unbondings and redelegations from validator and delegator delegatorDels = getDelegatorDelegations(t, port, addr) require.Len(t, delegatorDels, 1) require.Equal(t, operAddrs[1], delegatorDels[0].ValidatorAddress) // because the second validator never signs during these tests, if this // this test takes a long time to run, eventually this second validator // will get slashed, meaning that it's exchange rate is no-longer 1-to-1, // hence we utilize the exchange rate in the following test delTokensAfterRedelegation := validator2.ShareTokens(delegatorDels[0].GetShares()) require.Equal(t, rdTokens.ToDec(), delTokensAfterRedelegation) // verify balance after paying fees acc = getAccount(t, port, addr) expectedBalance = expectedBalance.Sub(fees[0]) require.True(t, expectedBalance.Amount.LT(coins.AmountOf(sdk.DefaultBondDenom)) || expectedBalance.Amount.Equal(coins.AmountOf(sdk.DefaultBondDenom)), "should get tokens back from automatic withdrawal after an unbonding delegation", ) // query tx txs = getTransactions(t, port, fmt.Sprintf("action=begin_redelegate&delegator=%s", addr), fmt.Sprintf("source-validator=%s", operAddrs[0]), fmt.Sprintf("destination-validator=%s", operAddrs[1]), ) require.Len(t, txs, 1) require.Equal(t, resultTx.Height, txs[0].Height) redelegation := getRedelegations(t, port, addr, operAddrs[0], operAddrs[1]) require.Len(t, redelegation, 1) require.Len(t, redelegation[0].Entries, 1) delegatorUbds := getDelegatorUnbondingDelegations(t, port, addr) require.Len(t, delegatorUbds, 1) require.Len(t, delegatorUbds[0].Entries, 1) require.Equal(t, rdTokens, delegatorUbds[0].Entries[0].Balance) delegatorReds := getRedelegations(t, port, addr, nil, nil) require.Len(t, delegatorReds, 1) require.Len(t, delegatorReds[0].Entries, 1) validatorUbds := getValidatorUnbondingDelegations(t, port, operAddrs[0]) require.Len(t, validatorUbds, 1) require.Len(t, validatorUbds[0].Entries, 1) require.Equal(t, rdTokens, validatorUbds[0].Entries[0].Balance) validatorReds := getRedelegations(t, port, nil, operAddrs[0], nil) require.Len(t, validatorReds, 1) require.Len(t, validatorReds[0].Entries, 1) // TODO Undonding status not currently implemented // require.Equal(t, sdk.Unbonding, bondedValidators[0].Status) // query txs txs = getBondingTxs(t, port, addr, "") require.Len(t, txs, 3, "All Txs found") txs = getBondingTxs(t, port, addr, "bond") require.Len(t, txs, 1, "All bonding txs found") txs = getBondingTxs(t, port, addr, "unbond") require.Len(t, txs, 1, "All unbonding txs found") txs = getBondingTxs(t, port, addr, "redelegate") require.Len(t, txs, 1, "All redelegation txs found") } func TestSubmitProposal(t *testing.T) { kb, err := keys.NewKeyBaseFromDir(InitClientHome(t, "")) require.NoError(t, err) addr, seed := CreateAddr(t, name1, pw, kb) cleanup, _, _, port := InitializeTestLCD(t, 1, []sdk.AccAddress{addr}, true) defer cleanup() acc := getAccount(t, port, addr) initialBalance := acc.GetCoins() // create SubmitProposal TX proposalTokens := sdk.TokensFromTendermintPower(5) resultTx := doSubmitProposal(t, port, seed, name1, pw, addr, proposalTokens, fees) tests.WaitForHeight(resultTx.Height+1, port) // check if tx was committed require.Equal(t, uint32(0), resultTx.Code) var proposalID uint64 cdc.MustUnmarshalBinaryLengthPrefixed(resultTx.Data, &proposalID) // verify balance acc = getAccount(t, port, addr) expectedBalance := initialBalance[0].Sub(fees[0]) require.Equal(t, expectedBalance.Amount.Sub(proposalTokens), acc.GetCoins().AmountOf(sdk.DefaultBondDenom)) // query proposal proposal := getProposal(t, port, proposalID) require.Equal(t, "Test", proposal.GetTitle()) proposer := getProposer(t, port, proposalID) require.Equal(t, addr.String(), proposer.Proposer) require.Equal(t, proposalID, proposer.ProposalID) } func TestDeposit(t *testing.T) { kb, err := keys.NewKeyBaseFromDir(InitClientHome(t, "")) require.NoError(t, err) addr, seed := CreateAddr(t, name1, pw, kb) cleanup, _, _, port := InitializeTestLCD(t, 1, []sdk.AccAddress{addr}, true) defer cleanup() acc := getAccount(t, port, addr) initialBalance := acc.GetCoins() // create SubmitProposal TX proposalTokens := sdk.TokensFromTendermintPower(5) resultTx := doSubmitProposal(t, port, seed, name1, pw, addr, proposalTokens, fees) tests.WaitForHeight(resultTx.Height+1, port) // check if tx was committed require.Equal(t, uint32(0), resultTx.Code) var proposalID uint64 cdc.MustUnmarshalBinaryLengthPrefixed(resultTx.Data, &proposalID) // verify balance acc = getAccount(t, port, addr) coins := acc.GetCoins() expectedBalance := initialBalance[0].Sub(fees[0]) require.Equal(t, expectedBalance.Amount.Sub(proposalTokens), coins.AmountOf(sdk.DefaultBondDenom)) expectedBalance = coins[0] // query proposal proposal := getProposal(t, port, proposalID) require.Equal(t, "Test", proposal.GetTitle()) // create SubmitProposal TX depositTokens := sdk.TokensFromTendermintPower(5) resultTx = doDeposit(t, port, seed, name1, pw, addr, proposalID, depositTokens, fees) tests.WaitForHeight(resultTx.Height+1, port) // verify balance after deposit and fee acc = getAccount(t, port, addr) expectedBalance = expectedBalance.Sub(fees[0]) require.Equal(t, expectedBalance.Amount.Sub(depositTokens), acc.GetCoins().AmountOf(sdk.DefaultBondDenom)) // query tx txs := getTransactions(t, port, fmt.Sprintf("action=deposit&depositor=%s", addr)) require.Len(t, txs, 1) require.Equal(t, resultTx.Height, txs[0].Height) // query proposal totalCoins := sdk.Coins{sdk.NewCoin(sdk.DefaultBondDenom, sdk.TokensFromTendermintPower(10))} proposal = getProposal(t, port, proposalID) require.True(t, proposal.TotalDeposit.IsEqual(totalCoins)) // query deposit deposit := getDeposit(t, port, proposalID, addr) require.True(t, deposit.Amount.IsEqual(totalCoins)) } func TestVote(t *testing.T) { kb, err := keys.NewKeyBaseFromDir(InitClientHome(t, "")) require.NoError(t, err) addr, seed := CreateAddr(t, name1, pw, kb) cleanup, _, operAddrs, port := InitializeTestLCD(t, 1, []sdk.AccAddress{addr}, true) defer cleanup() acc := getAccount(t, port, addr) initialBalance := acc.GetCoins() // create SubmitProposal TX proposalTokens := sdk.TokensFromTendermintPower(10) resultTx := doSubmitProposal(t, port, seed, name1, pw, addr, proposalTokens, fees) tests.WaitForHeight(resultTx.Height+1, port) // check if tx was committed require.Equal(t, uint32(0), resultTx.Code) var proposalID uint64 cdc.MustUnmarshalBinaryLengthPrefixed(resultTx.Data, &proposalID) // verify balance acc = getAccount(t, port, addr) coins := acc.GetCoins() expectedBalance := initialBalance[0].Sub(fees[0]) require.Equal(t, expectedBalance.Amount.Sub(proposalTokens), coins.AmountOf(sdk.DefaultBondDenom)) expectedBalance = coins[0] // query proposal proposal := getProposal(t, port, proposalID) require.Equal(t, "Test", proposal.GetTitle()) require.Equal(t, gov.StatusVotingPeriod, proposal.Status) // vote resultTx = doVote(t, port, seed, name1, pw, addr, proposalID, "Yes", fees) tests.WaitForHeight(resultTx.Height+1, port) // verify balance after vote and fee acc = getAccount(t, port, addr) coins = acc.GetCoins() expectedBalance = expectedBalance.Sub(fees[0]) require.Equal(t, expectedBalance.Amount, coins.AmountOf(sdk.DefaultBondDenom)) expectedBalance = coins[0] // query tx txs := getTransactions(t, port, fmt.Sprintf("action=vote&voter=%s", addr)) require.Len(t, txs, 1) require.Equal(t, resultTx.Height, txs[0].Height) vote := getVote(t, port, proposalID, addr) require.Equal(t, proposalID, vote.ProposalID) require.Equal(t, gov.OptionYes, vote.Option) tally := getTally(t, port, proposalID) require.Equal(t, sdk.ZeroInt(), tally.Yes, "tally should be 0 as the address is not bonded") // create bond TX delTokens := sdk.TokensFromTendermintPower(60) resultTx = doDelegate(t, port, name1, pw, addr, operAddrs[0], delTokens, fees) tests.WaitForHeight(resultTx.Height+1, port) // verify balance acc = getAccount(t, port, addr) coins = acc.GetCoins() expectedBalance = expectedBalance.Sub(fees[0]) require.Equal(t, expectedBalance.Amount.Sub(delTokens), coins.AmountOf(sdk.DefaultBondDenom)) expectedBalance = coins[0] tally = getTally(t, port, proposalID) require.Equal(t, delTokens, tally.Yes, "tally should be equal to the amount delegated") // change vote option resultTx = doVote(t, port, seed, name1, pw, addr, proposalID, "No", fees) tests.WaitForHeight(resultTx.Height+1, port) // verify balance acc = getAccount(t, port, addr) expectedBalance = expectedBalance.Sub(fees[0]) require.Equal(t, expectedBalance.Amount, acc.GetCoins().AmountOf(sdk.DefaultBondDenom)) tally = getTally(t, port, proposalID) require.Equal(t, sdk.ZeroInt(), tally.Yes, "tally should be 0 the user changed the option") require.Equal(t, delTokens, tally.No, "tally should be equal to the amount delegated") } func TestUnjail(t *testing.T) { kb, err := keys.NewKeyBaseFromDir(InitClientHome(t, "")) require.NoError(t, err) addr, _ := CreateAddr(t, name1, pw, kb) cleanup, valPubKeys, _, port := InitializeTestLCD(t, 1, []sdk.AccAddress{addr}, true) defer cleanup() // XXX: any less than this and it fails tests.WaitForHeight(3, port) pkString, _ := sdk.Bech32ifyConsPub(valPubKeys[0]) signingInfo := getSigningInfo(t, port, pkString) tests.WaitForHeight(4, port) require.Equal(t, true, signingInfo.IndexOffset > 0) require.Equal(t, time.Unix(0, 0).UTC(), signingInfo.JailedUntil) require.Equal(t, true, signingInfo.MissedBlocksCounter == 0) } func TestProposalsQuery(t *testing.T) { kb, err := keys.NewKeyBaseFromDir(InitClientHome(t, "")) require.NoError(t, err) addrs, seeds, names, passwords := CreateAddrs(t, kb, 2) cleanup, _, _, port := InitializeTestLCD(t, 1, []sdk.AccAddress{addrs[0], addrs[1]}, true) defer cleanup() depositParam := getDepositParam(t, port) halfMinDeposit := depositParam.MinDeposit.AmountOf(sdk.DefaultBondDenom).QuoRaw(2) getVotingParam(t, port) getTallyingParam(t, port) // Addr1 proposes (and deposits) proposals #1 and #2 resultTx := doSubmitProposal(t, port, seeds[0], names[0], passwords[0], addrs[0], halfMinDeposit, fees) var proposalID1 uint64 cdc.MustUnmarshalBinaryLengthPrefixed(resultTx.Data, &proposalID1) tests.WaitForHeight(resultTx.Height+1, port) resultTx = doSubmitProposal(t, port, seeds[0], names[0], passwords[0], addrs[0], halfMinDeposit, fees) var proposalID2 uint64 cdc.MustUnmarshalBinaryLengthPrefixed(resultTx.Data, &proposalID2) tests.WaitForHeight(resultTx.Height+1, port) // Addr2 proposes (and deposits) proposals #3 resultTx = doSubmitProposal(t, port, seeds[1], names[1], passwords[1], addrs[1], halfMinDeposit, fees) var proposalID3 uint64 cdc.MustUnmarshalBinaryLengthPrefixed(resultTx.Data, &proposalID3) tests.WaitForHeight(resultTx.Height+1, port) // Addr2 deposits on proposals #2 & #3 resultTx = doDeposit(t, port, seeds[1], names[1], passwords[1], addrs[1], proposalID2, halfMinDeposit, fees) tests.WaitForHeight(resultTx.Height+1, port) resultTx = doDeposit(t, port, seeds[1], names[1], passwords[1], addrs[1], proposalID3, halfMinDeposit, fees) tests.WaitForHeight(resultTx.Height+1, port) // check deposits match proposal and individual deposits deposits := getDeposits(t, port, proposalID1) require.Len(t, deposits, 1) deposit := getDeposit(t, port, proposalID1, addrs[0]) require.Equal(t, deposit, deposits[0]) deposits = getDeposits(t, port, proposalID2) require.Len(t, deposits, 2) deposit = getDeposit(t, port, proposalID2, addrs[0]) require.True(t, deposit.Equals(deposits[0])) deposit = getDeposit(t, port, proposalID2, addrs[1]) require.True(t, deposit.Equals(deposits[1])) deposits = getDeposits(t, port, proposalID3) require.Len(t, deposits, 1) deposit = getDeposit(t, port, proposalID3, addrs[1]) require.Equal(t, deposit, deposits[0]) // increasing the amount of the deposit should update the existing one depositTokens := sdk.TokensFromTendermintPower(1) resultTx = doDeposit(t, port, seeds[0], names[0], passwords[0], addrs[0], proposalID1, depositTokens, fees) tests.WaitForHeight(resultTx.Height+1, port) deposits = getDeposits(t, port, proposalID1) require.Len(t, deposits, 1) // Only proposals #1 should be in Deposit Period proposals := getProposalsFilterStatus(t, port, gov.StatusDepositPeriod) require.Len(t, proposals, 1) require.Equal(t, proposalID1, proposals[0].ProposalID) // Only proposals #2 and #3 should be in Voting Period proposals = getProposalsFilterStatus(t, port, gov.StatusVotingPeriod) require.Len(t, proposals, 2) require.Equal(t, proposalID2, proposals[0].ProposalID) require.Equal(t, proposalID3, proposals[1].ProposalID) // Addr1 votes on proposals #2 & #3 resultTx = doVote(t, port, seeds[0], names[0], passwords[0], addrs[0], proposalID2, "Yes", fees) tests.WaitForHeight(resultTx.Height+1, port) resultTx = doVote(t, port, seeds[0], names[0], passwords[0], addrs[0], proposalID3, "Yes", fees) tests.WaitForHeight(resultTx.Height+1, port) // Addr2 votes on proposal #3 resultTx = doVote(t, port, seeds[1], names[1], passwords[1], addrs[1], proposalID3, "Yes", fees) tests.WaitForHeight(resultTx.Height+1, port) // Test query all proposals proposals = getProposalsAll(t, port) require.Equal(t, proposalID1, (proposals[0]).ProposalID) require.Equal(t, proposalID2, (proposals[1]).ProposalID) require.Equal(t, proposalID3, (proposals[2]).ProposalID) // Test query deposited by addr1 proposals = getProposalsFilterDepositor(t, port, addrs[0]) require.Equal(t, proposalID1, (proposals[0]).ProposalID) // Test query deposited by addr2 proposals = getProposalsFilterDepositor(t, port, addrs[1]) require.Equal(t, proposalID2, (proposals[0]).ProposalID) require.Equal(t, proposalID3, (proposals[1]).ProposalID) // Test query voted by addr1 proposals = getProposalsFilterVoter(t, port, addrs[0]) require.Equal(t, proposalID2, (proposals[0]).ProposalID) require.Equal(t, proposalID3, (proposals[1]).ProposalID) // Test query voted by addr2 proposals = getProposalsFilterVoter(t, port, addrs[1]) require.Equal(t, proposalID3, (proposals[0]).ProposalID) // Test query voted and deposited by addr1 proposals = getProposalsFilterVoterDepositor(t, port, addrs[0], addrs[0]) require.Equal(t, proposalID2, (proposals[0]).ProposalID) // Test query votes on Proposal 2 votes := getVotes(t, port, proposalID2) require.Len(t, votes, 1) require.Equal(t, addrs[0], votes[0].Voter) // Test query votes on Proposal 3 votes = getVotes(t, port, proposalID3) require.Len(t, votes, 2) require.True(t, addrs[0].String() == votes[0].Voter.String() || addrs[0].String() == votes[1].Voter.String()) require.True(t, addrs[1].String() == votes[0].Voter.String() || addrs[1].String() == votes[1].Voter.String()) } func TestSlashingGetParams(t *testing.T) { cleanup, _, _, port := InitializeTestLCD(t, 1, []sdk.AccAddress{}, true) defer cleanup() res, body := Request(t, port, "GET", "/slashing/parameters", nil) require.Equal(t, http.StatusOK, res.StatusCode, body) var params slashing.Params err := cdc.UnmarshalJSON([]byte(body), &params) require.NoError(t, err) } func TestDistributionGetParams(t *testing.T) { cleanup, _, _, port := InitializeTestLCD(t, 1, []sdk.AccAddress{}, true) defer cleanup() res, body := Request(t, port, "GET", "/distribution/parameters", nil) require.Equal(t, http.StatusOK, res.StatusCode, body) require.NoError(t, cdc.UnmarshalJSON([]byte(body), &dclcommon.PrettyParams{})) } func TestDistributionFlow(t *testing.T) { kb, err := keys.NewKeyBaseFromDir(InitClientHome(t, "")) require.NoError(t, err) addr, seed := CreateAddr(t, name1, pw, kb) cleanup, _, valAddrs, port := InitializeTestLCD(t, 1, []sdk.AccAddress{addr}, true) defer cleanup() valAddr := valAddrs[0] operAddr := sdk.AccAddress(valAddr) var rewards sdk.DecCoins res, body := Request(t, port, "GET", fmt.Sprintf("/distribution/validators/%s/outstanding_rewards", valAddr), nil) require.Equal(t, http.StatusOK, res.StatusCode, body) require.NoError(t, cdc.UnmarshalJSON([]byte(body), &rewards)) var valDistInfo distrrest.ValidatorDistInfo res, body = Request(t, port, "GET", "/distribution/validators/"+valAddr.String(), nil) require.Equal(t, http.StatusOK, res.StatusCode, body) require.NoError(t, cdc.UnmarshalJSON([]byte(body), &valDistInfo)) require.Equal(t, valDistInfo.OperatorAddress.String(), sdk.AccAddress(valAddr).String()) // Delegate some coins delTokens := sdk.TokensFromTendermintPower(60) resultTx := doDelegate(t, port, name1, pw, addr, valAddr, delTokens, fees) tests.WaitForHeight(resultTx.Height+1, port) require.Equal(t, uint32(0), resultTx.Code) // send some coins _, resultTx = doTransfer(t, port, seed, name1, memo, pw, addr, fees) tests.WaitForHeight(resultTx.Height+5, port) require.Equal(t, uint32(0), resultTx.Code) // Query outstanding rewards changed res, body = Request(t, port, "GET", fmt.Sprintf("/distribution/validators/%s/outstanding_rewards", valAddr), nil) require.Equal(t, http.StatusOK, res.StatusCode, body) require.NoError(t, cdc.UnmarshalJSON([]byte(body), &rewards)) // Query validator distribution info res, body = Request(t, port, "GET", "/distribution/validators/"+valAddr.String(), nil) require.Equal(t, http.StatusOK, res.StatusCode, body) require.NoError(t, cdc.UnmarshalJSON([]byte(body), &valDistInfo)) // Query validator's rewards res, body = Request(t, port, "GET", fmt.Sprintf("/distribution/validators/%s/rewards", valAddr), nil) require.Equal(t, http.StatusOK, res.StatusCode, body) require.NoError(t, cdc.UnmarshalJSON([]byte(body), &rewards)) // Query self-delegation res, body = Request(t, port, "GET", fmt.Sprintf("/distribution/delegators/%s/rewards/%s", operAddr, valAddr), nil) require.Equal(t, http.StatusOK, res.StatusCode, body) require.NoError(t, cdc.UnmarshalJSON([]byte(body), &rewards)) // Query delegation res, body = Request(t, port, "GET", fmt.Sprintf("/distribution/delegators/%s/rewards/%s", addr, valAddr), nil) require.Equal(t, http.StatusOK, res.StatusCode, body) require.NoError(t, cdc.UnmarshalJSON([]byte(body), &rewards)) // Query delegator's rewards total res, body = Request(t, port, "GET", fmt.Sprintf("/distribution/delegators/%s/rewards", operAddr), nil) require.Equal(t, http.StatusOK, res.StatusCode, body) require.NoError(t, cdc.UnmarshalJSON([]byte(body), &rewards)) // Query delegator's withdrawal address var withdrawAddr string res, body = Request(t, port, "GET", fmt.Sprintf("/distribution/delegators/%s/withdraw_address", operAddr), nil) require.Equal(t, http.StatusOK, res.StatusCode, body) require.NoError(t, cdc.UnmarshalJSON([]byte(body), &withdrawAddr)) require.Equal(t, operAddr.String(), withdrawAddr) // Withdraw delegator's rewards resultTx = doWithdrawDelegatorAllRewards(t, port, seed, name1, pw, addr, fees) require.Equal(t, uint32(0), resultTx.Code) }
[ "\"VERSION\"" ]
[]
[ "VERSION" ]
[]
["VERSION"]
go
1
0
pkg/controller/jmeter_integration_test.go
package controller import ( "context" "log" "os" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" batchV1 "k8s.io/api/batch/v1" coreV1 "k8s.io/api/core/v1" metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" _ "github.com/hellofresh/kangal/pkg/backends/jmeter" "github.com/hellofresh/kangal/pkg/core/waitfor" loadTestV1 "github.com/hellofresh/kangal/pkg/kubernetes/apis/loadtest/v1" clientSetV "github.com/hellofresh/kangal/pkg/kubernetes/generated/clientset/versioned" ) var ( clientSet clientSetV.Clientset ) func TestMain(m *testing.M) { clientSet = kubeTestClient() res := m.Run() os.Exit(res) } func TestIntegrationJMeter(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode") } t.Log() distributedPods := int32(2) loadtestType := loadTestV1.LoadTestTypeJMeter expectedLoadtestName := "loadtest-jmeter-integration" testFile := "testdata/valid/loadtest.jmx" envVars := map[string]string{"foo": "bar", "foo2": "bar2"} testData := "testdata/valid/testdata.csv" client := kubeClient(t) err := CreateLoadTest(clientSet, distributedPods, expectedLoadtestName, testFile, testData, envVars, loadtestType) require.NoError(t, err) err = WaitLoadTest(clientSet, expectedLoadtestName) require.NoError(t, err) t.Cleanup(func() { err := DeleteLoadTest(clientSet, expectedLoadtestName, t.Name()) assert.NoError(t, err) }) t.Run("Checking the name of created loadtest", func(t *testing.T) { createdName, err := GetLoadTest(clientSet, expectedLoadtestName) require.NoError(t, err) assert.Equal(t, expectedLoadtestName, createdName) }) var jmeterNamespace *coreV1.Namespace t.Run("Checking namespace is created", func(t *testing.T) { for i := 0; i < 5; i++ { jmeterNamespace, _ = client.CoreV1().Namespaces().Get(context.Background(), expectedLoadtestName, metaV1.GetOptions{}) if jmeterNamespace != nil { break } } // loadtest namespace name is equal to loadtest name require.Equal(t, expectedLoadtestName, jmeterNamespace.Name) }) var cm *coreV1.ConfigMapList t.Run("Checking JMeter configmap is created", func(t *testing.T) { for i := 0; i < 5; i++ { cm, _ = client.CoreV1().ConfigMaps(jmeterNamespace.Name).List(context.Background(), metaV1.ListOptions{LabelSelector: "app=hf-jmeter"}) if len(cm.Items) != 0 { break } } assert.NotEmpty(t, cm.Items) }) t.Run("Checking env vars secret is created and not empty", func(t *testing.T) { var secretsCount int var secretItem coreV1.Secret for i := 0; i < 5; i++ { secrets, err := GetSecret(client.CoreV1(), jmeterNamespace.Name) require.NoError(t, err, "Could not get namespace secrets") if len(secrets.Items) == 1 { secretsCount = len(secrets.Items) secretItem = secrets.Items[0] break } } assert.Equal(t, 1, secretsCount) assert.NotEmpty(t, secretItem) }) t.Run("Checking all worker pods are created", func(t *testing.T) { var podsCount int for i := 0; i < 5; i++ { pods, _ := GetDistributedPods(client.CoreV1(), jmeterNamespace.Name) if len(pods.Items) == int(distributedPods) { podsCount = len(pods.Items) break } } assert.Equal(t, distributedPods, int32(podsCount)) }) t.Run("Checking master pod is created", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() watchObj, _ := client.CoreV1().Pods(expectedLoadtestName).Watch(context.Background(), metaV1.ListOptions{ LabelSelector: "app=loadtest-master", }) watchEvent, err := waitfor.ResourceWithContext(ctx, watchObj, (waitfor.Condition{}).PodRunning) require.NoError(t, err) pod := watchEvent.Object.(*coreV1.Pod) assert.Equal(t, coreV1.PodRunning, pod.Status.Phase) }) t.Run("Checking Job is created", func(t *testing.T) { var job *batchV1.Job for i := 0; i < 5; i++ { job, err = client.BatchV1().Jobs(jmeterNamespace.Name).Get(context.Background(), "loadtest-master", metaV1.GetOptions{}) require.NoError(t, err, "Could not get job") if job.Name == "loadtest-master" { break } } assert.Equal(t, "loadtest-master", job.Name) }) t.Run("Checking loadtest is in Running state", func(t *testing.T) { var phase string phase, err = GetLoadTestPhase(clientSet, expectedLoadtestName) require.NoError(t, err) assert.Equal(t, string(loadTestV1.LoadTestRunning), phase) }) } func kubeTestClient() clientSetV.Clientset { if len(os.Getenv("KUBECONFIG")) == 0 { log.Println("Skipping kube config builder, KUBECONFIG is missed") return clientSetV.Clientset{} } config, err := BuildConfig() if err != nil { log.Fatal(err) } clientSet, err := clientSetV.NewForConfig(config) if err != nil { log.Fatal(err) } return *clientSet } func kubeClient(t *testing.T) *kubernetes.Clientset { t.Helper() config, err := BuildConfig() require.NoError(t, err) cSet, err := kubernetes.NewForConfig(config) require.NoError(t, err) return cSet }
[ "\"KUBECONFIG\"" ]
[]
[ "KUBECONFIG" ]
[]
["KUBECONFIG"]
go
1
0
test/integration/test_pulsar_embedded_containers.py
import os from galaxy_test.base.populators import ( DatasetPopulator, ) from galaxy_test.driver import integration_util from .test_containerized_jobs import ( disable_dependency_resolution, MulledJobTestCases, skip_if_container_type_unavailable, ) SCRIPT_DIRECTORY = os.path.abspath(os.path.dirname(__file__)) EMBEDDED_PULSAR_JOB_CONFIG_FILE_SINGULARITY = os.path.join(SCRIPT_DIRECTORY, "embedded_pulsar_singularity_job_conf.yml") EMBEDDED_PULSAR_JOB_CONFIG_FILE_DOCKER = os.path.join(SCRIPT_DIRECTORY, "embedded_pulsar_docker_job_conf.yml") class BaseEmbeddedPulsarContainerIntegrationTestCase(integration_util.IntegrationTestCase): framework_tool_and_types = True @classmethod def handle_galaxy_config_kwds(cls, config): cls.jobs_directory = cls._test_driver.mkdtemp() config["jobs_directory"] = cls.jobs_directory config["job_config_file"] = cls.job_config_file disable_dependency_resolution(config) def setUp(self): super(BaseEmbeddedPulsarContainerIntegrationTestCase, self).setUp() self.dataset_populator = DatasetPopulator(self.galaxy_interactor) self.history_id = self.dataset_populator.new_history() @classmethod def setUpClass(cls): skip_if_container_type_unavailable(cls) super(BaseEmbeddedPulsarContainerIntegrationTestCase, cls).setUpClass() class EmbeddedSingularityPulsarIntegrationTestCase(BaseEmbeddedPulsarContainerIntegrationTestCase, MulledJobTestCases): # singularity passes $HOME by default default_container_home_dir = os.environ.get('HOME', '/') job_config_file = EMBEDDED_PULSAR_JOB_CONFIG_FILE_SINGULARITY container_type = 'singularity' class EmbeddedDockerPulsarIntegrationTestCase(BaseEmbeddedPulsarContainerIntegrationTestCase, MulledJobTestCases): job_config_file = EMBEDDED_PULSAR_JOB_CONFIG_FILE_DOCKER container_type = 'docker'
[]
[]
[ "HOME" ]
[]
["HOME"]
python
1
0
djpsilobus/bin/delete_item.py
# -*- coding: utf-8 -*- import os, sys # env sys.path.append('/usr/lib/python2.7/dist-packages/') sys.path.append('/usr/lib/python2.7/') sys.path.append('/usr/local/lib/python2.7/dist-packages/') sys.path.append('/data2/django_1.9/') sys.path.append('/data2/django_projects/') sys.path.append('/data2/django_third/') os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djpsilobus.settings") from django.conf import settings from djpsilobus.core.dspace import Manager import argparse # set up command-line options desc = """ Accepts as input an ID of Item that will be deleted """ parser = argparse.ArgumentParser(description=desc) parser.add_argument( "-i", "--item", required=True, help="Item ID", dest="item" ) def main(): """ Using the DSpace REST API, delete an item based on ID """ manager = Manager() jason = manager.request( "items/{}".format(item), "delete" ) print jason ###################### # shell command line ###################### if __name__ == "__main__": args = parser.parse_args() item = args.item sys.exit(main())
[]
[]
[]
[]
[]
python
0
0
current-service/svc/server/cli/cli.go
// Code generated by truss. DO NOT EDIT. // Rerunning truss will overwrite this file. // Version: d5b3153b9f // Version Date: Thu Jul 27 18:20:46 UTC 2017 package cli import ( "flag" "fmt" "os" "github.com/zaquestion/current/current-service/svc/server" ) // Config will be populated by ENV vars on initilization // flags will overwrite ENV vars in Config on flag.Parse() var Config server.Config func init() { flag.StringVar(&Config.DebugAddr, "debug.addr", ":5060", "Debug and metrics listen address") flag.StringVar(&Config.HTTPAddr, "http.addr", ":5050", "HTTP listen address") flag.StringVar(&Config.GRPCAddr, "grpc.addr", ":5040", "gRPC (HTTP) listen address") // Use environment variables, if set. Flags have priority over Env vars. if addr := os.Getenv("DEBUG_ADDR"); addr != "" { Config.DebugAddr = addr } if port := os.Getenv("PORT"); port != "" { Config.HTTPAddr = fmt.Sprintf(":%s", port) } if addr := os.Getenv("HTTP_ADDR"); addr != "" { Config.HTTPAddr = addr } if addr := os.Getenv("GRPC_ADDR"); addr != "" { Config.GRPCAddr = addr } }
[ "\"DEBUG_ADDR\"", "\"PORT\"", "\"HTTP_ADDR\"", "\"GRPC_ADDR\"" ]
[]
[ "PORT", "DEBUG_ADDR", "HTTP_ADDR", "GRPC_ADDR" ]
[]
["PORT", "DEBUG_ADDR", "HTTP_ADDR", "GRPC_ADDR"]
go
4
0
rrs/tools/common.py
# Common functionality for RRS tools. # # Copyright (C) 2015 Intel Corporation # Author: Anibal Limon <[email protected]> # # Licensed under the MIT license, see COPYING.MIT for details import sys import os import logging class DryRunRollbackException(Exception): pass def common_setup(): sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '../../'))) sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '../../layerindex'))) # We don't want git to prompt for any passwords (e.g. when accessing renamed/hidden github repos) os.environ['SSH_ASKPASS'] = '' os.environ['GIT_ASKPASS'] = '' os.environ['GIT_TERMINAL_PROMPT'] = '0' def get_logger(name, settings): from logging.handlers import RotatingFileHandler logger = logging.getLogger(name) formatter = logging.Formatter("%(asctime)s: %(levelname)s: %(message)s") handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) filename = os.path.join(settings.TOOLS_LOG_DIR, name) maxBytes = 8388608 # 8MB handler = RotatingFileHandler(filename, maxBytes=maxBytes) handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.INFO) return logger def get_pv_type(pv): pv_type = '' if '+git' in pv: pv_type = 'git' elif '+svn' in pv: pv_type = 'svn' elif '+hg' in pv: pv_type = 'hg' return pv_type def get_recipe_files(layerdir): from layerindex import recipeparse sublayer_dirs = [] for root, dirs, files in os.walk(layerdir): for d in dirs: if os.path.exists(os.path.join(root, d, 'conf', 'layer.conf')): sublayer_dirs.append(os.path.join(root, d) + os.sep) recipe_files = [] for root, dirs, files in os.walk(layerdir): if '.git' in dirs: dirs.remove('.git') # remove sublayer dirs for d in dirs[:]: fullpath = os.path.join(root, d) + os.sep if fullpath in sublayer_dirs: dirs.remove(d) for f in files: fullpath = os.path.join(root, f) (typename, _, filename) = recipeparse.detect_file_type(fullpath, layerdir + os.sep) if typename == 'recipe': recipe_files.append(fullpath) return recipe_files def load_recipes(layerbranch, bitbakepath, fetchdir, settings, logger, recipe_files=None, nocheckout=False): from layerindex import recipeparse from bb.fetch import FetchError try: (tinfoil, tempdir) = recipeparse.init_parser(settings, layerbranch.branch, bitbakepath, nocheckout=nocheckout, logger=logger) except recipeparse.RecipeParseError as e: logger.error(str(e)) sys.exit(1) layer = layerbranch.layer urldir = str(layer.get_fetch_dir()) repodir = os.path.join(fetchdir, urldir) layerdir = os.path.join(repodir, str(layerbranch.vcs_subdir)) d = recipeparse.setup_layer(tinfoil.config_data, fetchdir, layerdir, layer, layerbranch, logger) if recipe_files is None: recipe_files = get_recipe_files(layerdir) recipes = [] for fn in recipe_files: try: logger.debug('Parsing %s' % fn) if hasattr(tinfoil, 'parse_recipe_file'): data = tinfoil.parse_recipe_file(fn, appends=False, config_data=d) else: data = bb.cache.Cache.loadDataFull(str(fn), [], d) try: pv = data.getVar('PV', True) except FetchError: data.setVar('SRCPV', '') recipes.append(data) except Exception as e: logger.error("%s: branch %s couldn't be parsed, %s" \ % (layerbranch, fn, str(e))) continue return (tinfoil, d, recipes, tempdir) # XXX: Copied from oe-core recipeutils to avoid import errors. def get_recipe_pv_without_srcpv(pv, uri_type): """ Get PV without SRCPV common in SCM's for now only support git. Returns tuple with pv, prefix and suffix. """ import re pfx = '' sfx = '' if uri_type == 'git': git_regex = re.compile("(?P<pfx>(v|r|))(?P<ver>((\d+[\.\-_]*)+))(?P<sfx>(\+|)(git|)(r|)(AUTOINC|)(\+|))(?P<rev>.*)") m = git_regex.match(pv) if m: pv = m.group('ver') pfx = m.group('pfx') sfx = m.group('sfx') else: regex = re.compile("(?P<pfx>(v|r|))(?P<ver>((\d+[\.\-_]*)+))") m = regex.match(pv) if m: pv = m.group('ver') pfx = m.group('pfx') return (pv, pfx, sfx)
[]
[]
[ "SSH_ASKPASS", "GIT_ASKPASS", "GIT_TERMINAL_PROMPT" ]
[]
["SSH_ASKPASS", "GIT_ASKPASS", "GIT_TERMINAL_PROMPT"]
python
3
0
scripts/deta-micros/quotes-api/main.py
import json import os import random from deta import App, Deta from dotenv import load_dotenv from fastapi import FastAPI load_dotenv('env') app = App(FastAPI()) deta = Deta(os.environ.get('PROJECT_KEY')) db = deta.Base('quotes') db_appends = { 'posted_facebook_at': 'NULL', 'posted_telegram_at': 'NULL', 'posted_twitter_at': 'NULL', } def db_get_quotes(query): return list(db.fetch(query))[0] def db_cleanup_quotes(quotes): for quote in quotes: for k in db_appends.keys(): quote.pop(k, None) return quotes @app.get('/') def http_index(): quotes = db_get_quotes({}) return db_cleanup_quotes(quotes) @app.get('/random') def http_random(): quotes = db_get_quotes({}) quotes = db_cleanup_quotes(quotes) return random.choice(quotes) @app.get('/random-ready') def http_random_ready(): query = db_appends quotes = db_get_quotes(query) quotes = db_cleanup_quotes(quotes) return random.choice(quotes) @app.lib.run(action='refresh-database') def run_refresh_database(event): # delete old data # TODO: find a method drop database for quote in db_get_quotes({}): db.delete(quote.get('key')) # insert new ones with open('quotes.min.json') as file: quotes = json.load(file) for quote in quotes: quote.update(db_appends) db.put(quote) return '✔ success'
[]
[]
[ "PROJECT_KEY" ]
[]
["PROJECT_KEY"]
python
1
0
runtime/bots/irc/main.py
import socket import random import os import requests import re import github import minecraft import string import sys HOST = "irc.libera.chat" PORT = 6667 NICK = "DoveBot" #PASSWORD = os.getenv("PASSWORD") CHANNEL = "#dovegaming" SERVER = "" readbuffer = "" def send(message): s.send(message) print(message) s = socket.socket() s.connect((HOST, PORT)) send(bytes("NICK %s\r\n" % NICK, "UTF-8")) send(bytes("USER %s %s %s :%s\r\n" % (NICK, NICK, NICK, NICK), "UTF-8")) #s.send(bytes("PRIVMSG NickServ regain {} {}\r\n".format(NICK, PASSWORD), "UTF-8")) #s.send(bytes("PRIVMSG NickServ identify {} {}\r\n".format(NICK, PASSWORD), "UTF-8")) send(bytes("JOIN {}\r\n".format(CHANNEL), "UTF-8")) #s.send(bytes("PRIVMSG NickServ :identify {}\r\n".format(PASSWORD), "UTF-8")) readbuffer = readbuffer + s.recv(1024).decode("UTF-8") temp = str.split(readbuffer, "\n") readbuffer = temp.pop() for line in temp: SERVER = str.rstrip(line)[1:].split()[0] print(str.rstrip(line)) while 1: readbuffer = readbuffer + s.recv(1024).decode("UTF-8") temp = str.split(readbuffer, "\n") readbuffer = temp.pop() for line in temp: print(str.rstrip(line)) message = str.rstrip(line).split(" PRIVMSG {} :".format(CHANNEL)) if "PING" in line: send("PONG :{}\r\n".format(SERVER).encode("utf-8")) msg = message[-1] tokens = msg.split() if msg == "$hello": send("PRIVMSG {} :Hello!\r\n".format(CHANNEL).encode("utf-8")) if msg == "$ping": send("PRIVMSG {} :Pong!\r\n".format(CHANNEL).encode("utf-8")) if msg == "$random": send("PRIVMSG {} :{}\r\n".format(CHANNEL, random.randint(0, 100)).encode("utf-8")) if msg.startswith("$youtube "): html = requests.get("https://www.youtube.com/results?search_query=" + " ".join(msg.split()[1:])).content video_ids = re.findall(r"watch\?v=(\S{11})", html.decode()) send("PRIVMSG {} :https://www.youtube.com/watch?v={}\r\n".format(CHANNEL, video_ids[0]).encode("utf-8")) #if msg.startswith("$google "): send("PRIVMSG {} :{}\r\n".format(CHANNEL, googlesearch.search(" ".join(msg.split()[1:]))[0]).encode("utf-8")) #if msg.startswith("$wolfram "): send("PRIVMSG {} :{}\r\n".format(CHANNEL, wolfram.get(" ".join(msg.split()[1:]))).encode("utf-8")) if msg.startswith("$github "): if tokens[1] == "url": send("PRIVMSG {} :https://github.com/{}/{}\r\n".format(CHANNEL, tokens[2], tokens[3]).encode("utf-8")) if tokens[1] == "issues": send("PRIVMSG {} :#{}: {}\r\n".format(CHANNEL, tokens[4], github.get_issue_title(tokens[2], tokens[3], tokens[4])).encode("utf-8")) if msg == "$server": send("PRIVMSG {} :{}\r\n".format(CHANNEL, minecraft.get()).encode("utf-8")) if msg == "$help": send("PRIVMSG {} :Avalible commands: $hello, $ping, $youtube, $google, $github, $wolfram.\r\n".format(CHANNEL).encode("utf-8")) if msg.startswith("$help "): if tokens[1] == "hello": send("PRIVMSG {} :Syntax: $hello Action: Says \"Hello!\".\r\n".format(CHANNEL).encode("utf-8")) if tokens[1] == "ping":send("PRIVMSG {} :Syntax: $ping Action: Says \"Ping!\".\r\n".format(CHANNEL).encode("utf-8")) if tokens[1] == "youtube": send("PRIVMSG {} :Syntax: $youtube <keyword> Action: Sends the URL of a YouTube video matching the keyword given.\r\n".format(CHANNEL).encode("utf-8")) #if tokens[1] == "google": send("PRIVMSG {} :Syntax: $google <keyword> Action: Sends the URL of a google search with the keyword given\r\n".format(CHANNEL).encode("utf-8")) if tokens[1] == "github": send("PRIVMSG {} :Syntax: $github <topic> <user> <repo> <number> Action: Returns data about a github repo.\r\n".format(CHANNEL).encode("utf-8")) #if tokens[1] == "wolfram": send("PRIVMSG {} :Syntax: $wolfram <query> Action: Asks Wolfram|Alpha the query given.\r\n".format(CHANNEL).encode("utf-8"))
[]
[]
[ "PASSWORD" ]
[]
["PASSWORD"]
python
1
0
pkg/csi/service/wcpguest/controller.go
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package wcpguest import ( "fmt" "net/http" "path/filepath" "strconv" "strings" "sync" "time" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/davecgh/go-spew/spew" "github.com/fsnotify/fsnotify" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/log" vmoperatortypes "github.com/vmware-tanzu/vm-operator-api/api/v1alpha1" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "sigs.k8s.io/controller-runtime/pkg/client" cnsoperatorv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v2/pkg/apis/cnsoperator" cnsfileaccessconfigv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v2/pkg/apis/cnsoperator/cnsfileaccessconfig/v1alpha1" cnsconfig "sigs.k8s.io/vsphere-csi-driver/v2/pkg/common/config" csifault "sigs.k8s.io/vsphere-csi-driver/v2/pkg/common/fault" "sigs.k8s.io/vsphere-csi-driver/v2/pkg/common/prometheus" "sigs.k8s.io/vsphere-csi-driver/v2/pkg/csi/service/common" "sigs.k8s.io/vsphere-csi-driver/v2/pkg/csi/service/common/commonco" "sigs.k8s.io/vsphere-csi-driver/v2/pkg/csi/service/logger" csitypes "sigs.k8s.io/vsphere-csi-driver/v2/pkg/csi/types" k8s "sigs.k8s.io/vsphere-csi-driver/v2/pkg/kubernetes" ) var ( // controllerCaps represents the capability of controller service controllerCaps = []csi.ControllerServiceCapability_RPC_Type{ csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, csi.ControllerServiceCapability_RPC_EXPAND_VOLUME, } // virtualMachineLock is used for handling race conditions during concurrent Attach/Detach calls virtualMachineLock = &sync.Mutex{} ) type controller struct { supervisorClient clientset.Interface restClientConfig *rest.Config vmOperatorClient client.Client cnsOperatorClient client.Client vmWatcher *cache.ListWatch supervisorNamespace string tanzukubernetesClusterUID string } // New creates a CNS controller func New() csitypes.CnsController { return &controller{} } // Init is initializing controller struct func (c *controller) Init(config *cnsconfig.Config, version string) error { ctx, log := logger.GetNewContextWithLogger() log.Infof("Initializing WCPGC CSI controller") var err error // connect to the CSI controller in supervisor cluster c.supervisorNamespace, err = cnsconfig.GetSupervisorNamespace(ctx) if err != nil { return err } c.tanzukubernetesClusterUID = config.GC.TanzuKubernetesClusterUID c.restClientConfig = k8s.GetRestClientConfigForSupervisor(ctx, config.GC.Endpoint, config.GC.Port) c.supervisorClient, err = k8s.NewSupervisorClient(ctx, c.restClientConfig) if err != nil { log.Errorf("failed to create supervisorClient. Error: %+v", err) return err } c.vmOperatorClient, err = k8s.NewClientForGroup(ctx, c.restClientConfig, vmoperatortypes.GroupName) if err != nil { log.Errorf("failed to create vmOperatorClient. Error: %+v", err) return err } c.cnsOperatorClient, err = k8s.NewClientForGroup(ctx, c.restClientConfig, cnsoperatorv1alpha1.GroupName) if err != nil { log.Errorf("failed to create cnsOperatorClient. Error: %+v", err) return err } c.vmWatcher, err = k8s.NewVirtualMachineWatcher(ctx, c.restClientConfig, c.supervisorNamespace) if err != nil { log.Errorf("failed to create vmWatcher. Error: %+v", err) return err } pvcsiConfigPath := common.GetConfigPath(ctx) watcher, err := fsnotify.NewWatcher() if err != nil { log.Errorf("failed to create fsnotify watcher. err=%v", err) return err } go func() { for { log.Debugf("Waiting for event on fsnotify watcher") select { case event, ok := <-watcher.Events: if !ok { return } log.Debugf("fsnotify event: %q", event.String()) if event.Op&fsnotify.Remove == fsnotify.Remove { for { reloadConfigErr := c.ReloadConfiguration() if reloadConfigErr == nil { log.Infof("Successfully reloaded configuration from: %q", pvcsiConfigPath) break } log.Errorf("failed to reload configuration. will retry again in 5 seconds. err: %+v", reloadConfigErr) time.Sleep(5 * time.Second) } } case err, ok := <-watcher.Errors: if !ok { return } log.Errorf("fsnotify error: %+v", err) } log.Debugf("fsnotify event processed") } }() cfgDirPath := filepath.Dir(pvcsiConfigPath) log.Infof("Adding watch on path: %q", cfgDirPath) err = watcher.Add(cfgDirPath) if err != nil { log.Errorf("failed to watch on path: %q. err=%v", cfgDirPath, err) return err } log.Infof("Adding watch on path: %q", cnsconfig.DefaultpvCSIProviderPath) err = watcher.Add(cnsconfig.DefaultpvCSIProviderPath) if err != nil { log.Errorf("failed to watch on path: %q. err=%v", cnsconfig.DefaultpvCSIProviderPath, err) return err } // Go module to keep the metrics http server running all the time. go func() { prometheus.CsiInfo.WithLabelValues(version).Set(1) for { log.Info("Starting the http server to expose Prometheus metrics..") http.Handle("/metrics", promhttp.Handler()) err = http.ListenAndServe(":2112", nil) if err != nil { log.Warnf("Http server that exposes the Prometheus exited with err: %+v", err) } log.Info("Restarting http server to expose Prometheus metrics..") } }() return nil } // ReloadConfiguration reloads configuration from the secret, and reset restClientConfig, supervisorClient // and re-create vmOperatorClient using new config func (c *controller) ReloadConfiguration() error { ctx, log := logger.GetNewContextWithLogger() log.Info("Reloading Configuration") cfg, err := common.GetConfig(ctx) if err != nil { log.Errorf("failed to read config. Error: %+v", err) return err } if cfg != nil { c.restClientConfig = k8s.GetRestClientConfigForSupervisor(ctx, cfg.GC.Endpoint, cfg.GC.Port) c.supervisorClient, err = k8s.NewSupervisorClient(ctx, c.restClientConfig) if err != nil { log.Errorf("failed to create supervisorClient. Error: %+v", err) return err } log.Infof("successfully re-created supervisorClient using updated configuration") c.vmOperatorClient, err = k8s.NewClientForGroup(ctx, c.restClientConfig, vmoperatortypes.GroupName) if err != nil { log.Errorf("failed to create vmOperatorClient. Error: %+v", err) return err } c.vmWatcher, err = k8s.NewVirtualMachineWatcher(ctx, c.restClientConfig, c.supervisorNamespace) if err != nil { log.Errorf("failed to create vmWatcher. Error: %+v", err) return err } log.Infof("successfully re-created vmOperatorClient using updated configuration") c.cnsOperatorClient, err = k8s.NewClientForGroup(ctx, c.restClientConfig, cnsoperatorv1alpha1.GroupName) if err != nil { log.Errorf("failed to create cnsOperatorClient. Error: %+v", err) return err } } return nil } // CreateVolume is creating CNS Volume using volume request specified // in CreateVolumeRequest func (c *controller) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) ( *csi.CreateVolumeResponse, error) { start := time.Now() volumeType := prometheus.PrometheusUnknownVolumeType createVolumeInternal := func() ( *csi.CreateVolumeResponse, string, error) { ctx = logger.NewContextWithLogger(ctx) log := logger.GetLogger(ctx) log.Infof("CreateVolume: called with args %+v", *req) //TODO: If the err is returned by invoking CNS API, then faultType should be // populated by the underlying layer. // If the request failed due to validate the request, "csi.fault.InvalidArgument" will be return. // If thr reqeust failed due to object not found, "csi.fault.NotFound" will be return. // For all other cases, the faultType will be set to "csi.fault.Internal" for now. // Later we may need to define different csi faults. err := validateGuestClusterCreateVolumeRequest(ctx, req) if err != nil { msg := fmt.Sprintf("Validation for CreateVolume Request: %+v has failed. Error: %+v", *req, err) log.Error(msg) return nil, csifault.CSIInvalidArgumentFault, err } isFileVolumeRequest := common.IsFileVolumeRequest(ctx, req.GetVolumeCapabilities()) if isFileVolumeRequest { volumeType = prometheus.PrometheusFileVolumeType } else { volumeType = prometheus.PrometheusBlockVolumeType } // Get PVC name and disk size for the supervisor cluster // We use default prefix 'pvc-' for pvc created in the guest cluster, it is mandatory. supervisorPVCName := c.tanzukubernetesClusterUID + "-" + req.Name[4:] // Volume Size - Default is 10 GiB volSizeBytes := int64(common.DefaultGbDiskSize * common.GbInBytes) if req.GetCapacityRange() != nil && req.GetCapacityRange().RequiredBytes != 0 { volSizeBytes = int64(req.GetCapacityRange().GetRequiredBytes()) } volSizeMB := int64(common.RoundUpSize(volSizeBytes, common.MbInBytes)) // Get supervisorStorageClass and accessMode var supervisorStorageClass string for param := range req.Parameters { paramName := strings.ToLower(param) if paramName == common.AttributeSupervisorStorageClass { supervisorStorageClass = req.Parameters[param] } } accessMode := req.GetVolumeCapabilities()[0].GetAccessMode().GetMode() pvc, err := c.supervisorClient.CoreV1().PersistentVolumeClaims(c.supervisorNamespace).Get( ctx, supervisorPVCName, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { diskSize := strconv.FormatInt(volSizeMB, 10) + "Mi" claim := getPersistentVolumeClaimSpecWithStorageClass(supervisorPVCName, c.supervisorNamespace, diskSize, supervisorStorageClass, getAccessMode(accessMode)) log.Debugf("PVC claim spec is %+v", spew.Sdump(claim)) pvc, err = c.supervisorClient.CoreV1().PersistentVolumeClaims(c.supervisorNamespace).Create( ctx, claim, metav1.CreateOptions{}) if err != nil { msg := fmt.Sprintf("failed to create pvc with name: %s on namespace: %s in supervisorCluster. Error: %+v", supervisorPVCName, c.supervisorNamespace, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } } else { msg := fmt.Sprintf("failed to get pvc with name: %s on namespace: %s from supervisorCluster. Error: %+v", supervisorPVCName, c.supervisorNamespace, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } } isBound, err := isPVCInSupervisorClusterBound(ctx, c.supervisorClient, pvc, time.Duration(getProvisionTimeoutInMin(ctx))*time.Minute) if !isBound { msg := fmt.Sprintf("failed to create volume on namespace: %s in supervisor cluster. Error: %+v", c.supervisorNamespace, err) log.Error(msg) eventList, err := c.supervisorClient.CoreV1().Events(c.supervisorNamespace).List(ctx, metav1.ListOptions{FieldSelector: "involvedObject.name=" + pvc.Name}) if err != nil { log.Errorf("Unable to fetch events for pvc %q/%q from supervisor cluster with err: %+v", c.supervisorNamespace, pvc.Name, err) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } log.Errorf("Last observed events on the pvc %q/%q in supervisor cluster: %+v", c.supervisorNamespace, pvc.Name, spew.Sdump(eventList.Items)) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } attributes := make(map[string]string) if commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.FileVolume) && isFileVolumeRequest { attributes[common.AttributeDiskType] = common.DiskTypeFileVolume } else { attributes[common.AttributeDiskType] = common.DiskTypeBlockVolume } resp := &csi.CreateVolumeResponse{ Volume: &csi.Volume{ VolumeId: supervisorPVCName, CapacityBytes: int64(volSizeMB * common.MbInBytes), VolumeContext: attributes, }, } return resp, "", nil } resp, faultType, err := createVolumeInternal() log := logger.GetLogger(ctx) log.Debugf("createVolumeInternal: returns fault %q", faultType) if err != nil { prometheus.CsiControlOpsHistVec.WithLabelValues(volumeType, prometheus.PrometheusCreateVolumeOpType, prometheus.PrometheusFailStatus).Observe(time.Since(start).Seconds()) } else { prometheus.CsiControlOpsHistVec.WithLabelValues(volumeType, prometheus.PrometheusCreateVolumeOpType, prometheus.PrometheusPassStatus).Observe(time.Since(start).Seconds()) } return resp, err } // DeleteVolume is deleting CNS Volume specified in DeleteVolumeRequest func (c *controller) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) ( *csi.DeleteVolumeResponse, error) { start := time.Now() volumeType := prometheus.PrometheusUnknownVolumeType deleteVolumeInternal := func() ( *csi.DeleteVolumeResponse, string, error) { ctx = logger.NewContextWithLogger(ctx) log := logger.GetLogger(ctx) log.Infof("DeleteVolume: called with args: %+v", *req) //TODO: If the err is returned by invoking CNS API, then faultType should be // populated by the underlying layer. // If the request failed due to validate the request, "csi.fault.InvalidArgument" will be return. // If thr reqeust failed due to object not found, "csi.fault.NotFound" will be return. // For all other cases, the faultType will be set to "csi.fault.Internal" for now. // Later we may need to define different csi faults. var err error err = validateGuestClusterDeleteVolumeRequest(ctx, req) if err != nil { msg := fmt.Sprintf("Validation for Delete Volume Request: %+v has failed. Error: %+v", *req, err) log.Error(msg) return nil, csifault.CSIInvalidArgumentFault, err } // Retrieve Supervisor PVC svPVC, err := c.supervisorClient.CoreV1().PersistentVolumeClaims(c.supervisorNamespace).Get( ctx, req.VolumeId, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { log.Debugf("PVC: %q not found in the Supervisor cluster. Assuming the volume is already deleted.", req.VolumeId) return &csi.DeleteVolumeResponse{}, "", nil } msg := fmt.Sprintf("failed to retrieve supervisor PVC %q in %q namespace. Error: %+v", req.VolumeId, c.supervisorNamespace, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Error(codes.Internal, msg) } volumeType = prometheus.PrometheusBlockVolumeType for _, accessMode := range svPVC.Spec.AccessModes { if accessMode == corev1.ReadWriteMany || accessMode == corev1.ReadOnlyMany { volumeType = prometheus.PrometheusFileVolumeType } } err = c.supervisorClient.CoreV1().PersistentVolumeClaims(c.supervisorNamespace).Delete( ctx, req.VolumeId, *metav1.NewDeleteOptions(0)) if err != nil { if errors.IsNotFound(err) { log.Debugf("PVC: %q not found in the Supervisor cluster. Assuming this volume to be deleted.", req.VolumeId) return &csi.DeleteVolumeResponse{}, "", nil } msg := fmt.Sprintf("DeleteVolume Request: %+v has failed. Error: %+v", *req, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } log.Infof("DeleteVolume: Volume deleted successfully. VolumeID: %q", req.VolumeId) return &csi.DeleteVolumeResponse{}, "", nil } resp, faultType, err := deleteVolumeInternal() log := logger.GetLogger(ctx) log.Debugf("deleteVolumeInternal: returns fault %q for volume %q", faultType, req.VolumeId) if err != nil { prometheus.CsiControlOpsHistVec.WithLabelValues(volumeType, prometheus.PrometheusDeleteVolumeOpType, prometheus.PrometheusFailStatus).Observe(time.Since(start).Seconds()) } else { prometheus.CsiControlOpsHistVec.WithLabelValues(volumeType, prometheus.PrometheusDeleteVolumeOpType, prometheus.PrometheusPassStatus).Observe(time.Since(start).Seconds()) } return resp, err } // ControllerPublishVolume attaches a volume to the Node VM. // volume id and node name is retrieved from ControllerPublishVolumeRequest func (c *controller) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) ( *csi.ControllerPublishVolumeResponse, error) { start := time.Now() volumeType := prometheus.PrometheusUnknownVolumeType controllerPublishVolumeInternal := func() ( *csi.ControllerPublishVolumeResponse, string, error) { ctx = logger.NewContextWithLogger(ctx) log := logger.GetLogger(ctx) log.Infof("ControllerPublishVolume: called with args %+v", *req) //TODO: If the err is returned by invoking CNS API, then faultType should be // populated by the underlying layer. // If the request failed due to validate the request, "csi.fault.InvalidArgument" will be return. // If thr reqeust failed due to object not found, "csi.fault.NotFound" will be return. // For all other cases, the faultType will be set to "csi.fault.Internal" for now. // Later we may need to define different csi faults. // Check whether the request is for a block or file volume isFileVolumeRequest := common.IsFileVolumeRequest(ctx, []*csi.VolumeCapability{req.GetVolumeCapability()}) err := validateGuestClusterControllerPublishVolumeRequest(ctx, req) if err != nil { msg := fmt.Sprintf("Validation for PublishVolume Request: %+v has failed. Error: %v", *req, err) log.Error(msg) return nil, csifault.CSIInvalidArgumentFault, status.Errorf(codes.Internal, msg) } // File volumes support if isFileVolumeRequest { volumeType = prometheus.PrometheusFileVolumeType // Check the feature state for file volume support if !commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.FileVolume) { // Feature is disabled on the cluster return nil, csifault.CSIInternalFault, status.Error(codes.InvalidArgument, "File volume not supported.") } return controllerPublishForFileVolume(ctx, req, c) } volumeType = prometheus.PrometheusBlockVolumeType // Block volumes support return controllerPublishForBlockVolume(ctx, req, c) } resp, faultType, err := controllerPublishVolumeInternal() if err != nil { log.Debugf("controllerPublishVolumeInternal: returns fault %q for volume %q", faultType, req.VolumeId) prometheus.CsiControlOpsHistVec.WithLabelValues(volumeType, prometheus.PrometheusAttachVolumeOpType, prometheus.PrometheusFailStatus).Observe(time.Since(start).Seconds()) } else { prometheus.CsiControlOpsHistVec.WithLabelValues(volumeType, prometheus.PrometheusAttachVolumeOpType, prometheus.PrometheusPassStatus).Observe(time.Since(start).Seconds()) } return resp, err } // controllerPublishForBlockVolume is a helper mthod for handling ControllerPublishVolume request for Block volumes func controllerPublishForBlockVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest, c *controller) ( *csi.ControllerPublishVolumeResponse, string, error) { log := logger.GetLogger(ctx) virtualMachine := &vmoperatortypes.VirtualMachine{} vmKey := types.NamespacedName{ Namespace: c.supervisorNamespace, Name: req.NodeId, } var err error if err = c.vmOperatorClient.Get(ctx, vmKey, virtualMachine); err != nil { msg := fmt.Sprintf("failed to get VirtualMachines for the node: %q. Error: %+v", req.NodeId, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } // Check if volume is already present in the virtualMachine.Spec.Volumes var isVolumePresentInSpec, isVolumeAttached bool var diskUUID string for _, volume := range virtualMachine.Spec.Volumes { if volume.PersistentVolumeClaim != nil && volume.Name == req.VolumeId { log.Infof("Volume %q is already present in the virtualMachine.Spec.Volumes", volume.Name) isVolumePresentInSpec = true break } } timeoutSeconds := int64(getAttacherTimeoutInMin(ctx) * 60) // if volume is present in the virtualMachine.Spec.Volumes check if volume's status is attached and DiskUuid is set if isVolumePresentInSpec { for _, volume := range virtualMachine.Status.Volumes { if volume.Name == req.VolumeId && volume.Attached && volume.DiskUuid != "" { diskUUID = volume.DiskUuid isVolumeAttached = true log.Infof("Volume %q is already attached in the virtualMachine.Spec.Volumes. Disk UUID: %q", volume.Name, volume.DiskUuid) break } } } else { timeout := time.Now().Add(time.Duration(timeoutSeconds) * time.Second) for { // Volume is not present in the virtualMachine.Spec.Volumes, so adding // volume in the spec and patching virtualMachine instance. vmvolumes := vmoperatortypes.VirtualMachineVolume{ Name: req.VolumeId, PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: req.VolumeId, }, } virtualMachineLock.Lock() virtualMachine.Spec.Volumes = append(virtualMachine.Spec.Volumes, vmvolumes) err := c.vmOperatorClient.Update(ctx, virtualMachine) virtualMachineLock.Unlock() if err == nil || time.Now().After(timeout) { break } if err := c.vmOperatorClient.Get(ctx, vmKey, virtualMachine); err != nil { msg := fmt.Sprintf("failed to get VirtualMachines for the node: %q. Error: %+v", req.NodeId, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } log.Debugf("Found virtualMachine instance for node: %q", req.NodeId) } if err != nil { msg := fmt.Sprintf("Time out to update VirtualMachines %q with Error: %+v", virtualMachine.Name, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } } // volume is not attached, so wait until volume is attached and DiskUuid is set if !isVolumeAttached { watchVirtualMachine, err := c.vmWatcher.Watch(metav1.ListOptions{ FieldSelector: fields.SelectorFromSet(fields.Set{"metadata.name": string(virtualMachine.Name)}).String(), ResourceVersion: virtualMachine.ResourceVersion, TimeoutSeconds: &timeoutSeconds, }) if err != nil { msg := fmt.Sprintf("failed to watch virtualMachine %q with Error: %v", virtualMachine.Name, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } defer watchVirtualMachine.Stop() // Watch all update events made on VirtualMachine instance until volume.DiskUuid is set for diskUUID == "" { // blocking wait for update event log.Debugf("waiting for update on virtualmachine: %q", virtualMachine.Name) event := <-watchVirtualMachine.ResultChan() vm, ok := event.Object.(*vmoperatortypes.VirtualMachine) if !ok { msg := fmt.Sprintf("Watch on virtualmachine %q timed out", virtualMachine.Name) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } if vm.Name != virtualMachine.Name { log.Debugf("Observed vm name: %q, expecting vm name: %q, volumeID: %q", vm.Name, virtualMachine.Name, req.VolumeId) continue } log.Debugf("observed update on virtualmachine: %q. checking if disk UUID is set for volume: %q ", virtualMachine.Name, req.VolumeId) for _, volume := range vm.Status.Volumes { if volume.Name == req.VolumeId { if volume.Attached && volume.DiskUuid != "" && volume.Error == "" { diskUUID = volume.DiskUuid log.Infof("observed disk UUID %q is set for the volume %q on virtualmachine %q", volume.DiskUuid, volume.Name, vm.Name) } else { if volume.Error != "" { msg := fmt.Sprintf("observed Error: %q is set on the volume %q on virtualmachine %q", volume.Error, volume.Name, vm.Name) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } } break } } if diskUUID == "" { log.Debugf("disk UUID is not set for volume: %q ", req.VolumeId) } } log.Debugf("disk UUID %v is set for the volume: %q ", diskUUID, req.VolumeId) } //return PublishContext with diskUUID of the volume attached to node. publishInfo := make(map[string]string) publishInfo[common.AttributeDiskType] = common.DiskTypeBlockVolume publishInfo[common.AttributeFirstClassDiskUUID] = common.FormatDiskUUID(diskUUID) resp := &csi.ControllerPublishVolumeResponse{ PublishContext: publishInfo, } log.Infof("ControllerPublishVolume: Volume attached successfully %q", req.VolumeId) return resp, "", nil } // controllerPublishForFileVolume is a helper mthod for handling ControllerPublishVolume request for File volumes func controllerPublishForFileVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest, c *controller) ( *csi.ControllerPublishVolumeResponse, string, error) { log := logger.GetLogger(ctx) // Build the CnsFileAccessConfig instance name and namespace cnsFileAccessConfigInstance := &cnsfileaccessconfigv1alpha1.CnsFileAccessConfig{} cnsFileAccessConfigInstanceName := req.NodeId + "-" + req.VolumeId cnsFileAccessConfigInstanceKey := types.NamespacedName{ Namespace: c.supervisorNamespace, Name: cnsFileAccessConfigInstanceName, } // Check whether the CnsFileAccessConfig instance exist in the supervisor cluster if err := c.cnsOperatorClient.Get(ctx, cnsFileAccessConfigInstanceKey, cnsFileAccessConfigInstance); err != nil { if !errors.IsNotFound(err) { // Get() on the CnsFileAccessConfig instance failed with different error msg := fmt.Sprintf("failed to get CnsFileAccessConfig instance: %q/%q. Error: %+v", c.supervisorNamespace, cnsFileAccessConfigInstance.Name, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } // Create the CnsFileAccessConfig instance since it is not found cnsFileAccessConfigInstance = &cnsfileaccessconfigv1alpha1.CnsFileAccessConfig{ ObjectMeta: metav1.ObjectMeta{ Name: cnsFileAccessConfigInstanceName, Namespace: c.supervisorNamespace}, Spec: cnsfileaccessconfigv1alpha1.CnsFileAccessConfigSpec{ VMName: req.NodeId, PvcName: req.VolumeId, }, } log.Debugf("Creating CnsFileAccessConfig instance: %+v", cnsFileAccessConfigInstance) log.Infof("Creating CnsFileAccessConfig instance with name: %q", cnsFileAccessConfigInstance.Name) if err := c.cnsOperatorClient.Create(ctx, cnsFileAccessConfigInstance); err != nil { msg := fmt.Sprintf("failed to create cnsFileAccessConfig: %q/%q. Error: %v", c.supervisorNamespace, cnsFileAccessConfigInstance.Name, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } } log.Debugf("Found CnsFileAccessConfig: %q/%q", c.supervisorNamespace, cnsFileAccessConfigInstance.Name) if cnsFileAccessConfigInstance.DeletionTimestamp != nil { // When deletionTimestamp is set, CnsOperator is in the process of // removing access for this IP. When that operation is successful, the // instance will be deleted. In a subsequent retry, a new instance will // be created. msg := fmt.Sprintf("cnsFileAccessConfigInstance %q/%q is getting deleted. "+ "A new instance will be created in the subsequent ControllerPublishVolume request", c.supervisorNamespace, cnsFileAccessConfigInstance.Name) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } publishInfo := make(map[string]string) // Verify if the CnsFileAccessConfig instance has status with done set to true and error is empty if cnsFileAccessConfigInstance.Status.Done && cnsFileAccessConfigInstance.Status.Error == "" { for key, value := range cnsFileAccessConfigInstance.Status.AccessPoints { if key == common.Nfsv4AccessPointKey { publishInfo[common.Nfsv4AccessPoint] = value break } } publishInfo[common.AttributeDiskType] = common.DiskTypeFileVolume resp := &csi.ControllerPublishVolumeResponse{ PublishContext: publishInfo, } log.Infof("ControllerPublishVolume: Volume %q attached successfully on the node: %q", req.VolumeId, req.NodeId) return resp, "", nil } cnsFileAccessConfigWatcher, err := k8s.NewCnsFileAccessConfigWatcher(ctx, c.restClientConfig, c.supervisorNamespace) if err != nil { msg := fmt.Sprintf("failed to create cnsFileAccessConfigWatcher. Error: %+v", err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } // Attacher timeout, default is set to 4 minutes timeoutSeconds := int64(getAttacherTimeoutInMin(ctx) * 60) // Adding watch on the CnsFileAccessConfig instance to register for updates watchCnsFileAccessConfig, err := cnsFileAccessConfigWatcher.Watch(metav1.ListOptions{ FieldSelector: fields.SelectorFromSet(fields.Set{"metadata.name": cnsFileAccessConfigInstance.Name}).String(), ResourceVersion: cnsFileAccessConfigInstance.ResourceVersion, TimeoutSeconds: &timeoutSeconds, }) if err != nil { msg := fmt.Sprintf("failed to watch cnsfileaccessconfig %q with Error: %v", cnsFileAccessConfigInstance.Name, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } defer watchCnsFileAccessConfig.Stop() var cnsFileAccessConfigInstanceErr string // Watch all update events made on CnsFileAccessConfig instance until accessPoints is set for { log.Debugf("Waiting for update on cnsfileaccessconfigs: %q", cnsFileAccessConfigInstance.Name) event := <-watchCnsFileAccessConfig.ResultChan() cnsfileaccessconfig, ok := event.Object.(*cnsfileaccessconfigv1alpha1.CnsFileAccessConfig) if !ok { msg := fmt.Sprintf("Watch on cnsfileaccessconfig instance %q timed out. Last seen error on the instance=%q", cnsFileAccessConfigInstance.Name, cnsFileAccessConfigInstanceErr) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } if cnsfileaccessconfig.Name != cnsFileAccessConfigInstanceName { log.Debugf("Observed cnsFileAccessConfig instance name: %q, expecting cnsFileAccessConfig instance name: %q", cnsfileaccessconfig.Name, cnsFileAccessConfigInstanceName) continue } // Check if SV PVC Name match with VolumeId from the request if cnsfileaccessconfig.Spec.PvcName != req.VolumeId { log.Debugf("Observed SV PVC Name: %q, expecting SV PVC Name: %q", cnsfileaccessconfig.Spec.PvcName, req.VolumeId) continue } // Check if VM name in the cnsfileaccessconfig instance match with NodeId from the request if cnsfileaccessconfig.Spec.VMName != req.NodeId { log.Debugf("Observed vm name: %q, expecting vm name: %q", cnsfileaccessconfig.Spec.VMName, req.NodeId) continue } log.Debugf("Observed an update on cnsfileaccessconfig: %+v", cnsfileaccessconfig) if cnsfileaccessconfig.Status.Done && cnsfileaccessconfig.Status.Error == "" && cnsfileaccessconfig.DeletionTimestamp == nil { // Check if the updated instance has the AccessPoints for key, value := range cnsfileaccessconfig.Status.AccessPoints { if key == common.Nfsv4AccessPointKey { publishInfo[common.AttributeDiskType] = common.DiskTypeFileVolume publishInfo[common.Nfsv4AccessPoint] = value break } } if _, ok := publishInfo[common.Nfsv4AccessPoint]; ok { log.Debugf("Found Nfsv4AccessPoint in publishInfo. publishInfo=%+v", publishInfo) break } } cnsFileAccessConfigInstanceErr = cnsfileaccessconfig.Status.Error } resp := &csi.ControllerPublishVolumeResponse{ PublishContext: publishInfo, } log.Infof("ControllerPublishVolume: Volume %q attached successfully on the node: %q", req.VolumeId, req.NodeId) return resp, "", nil } // ControllerUnpublishVolume detaches a volume from the Node VM. // volume id and node name is retrieved from ControllerUnpublishVolumeRequest func (c *controller) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) ( *csi.ControllerUnpublishVolumeResponse, error) { start := time.Now() volumeType := prometheus.PrometheusUnknownVolumeType controllerUnpublishVolumeInternal := func() ( *csi.ControllerUnpublishVolumeResponse, string, error) { ctx = logger.NewContextWithLogger(ctx) log := logger.GetLogger(ctx) log.Infof("ControllerUnpublishVolume: called with args %+v", *req) //TODO: If the err is returned by invoking CNS API, then faultType should be // populated by the underlying layer. // If the request failed due to validate the request, "csi.fault.InvalidArgument" will be return. // If thr reqeust failed due to object not found, "csi.fault.NotFound" will be return. // For all other cases, the faultType will be set to "csi.fault.Internal" for now. // Later we may need to define different csi faults. err := validateGuestClusterControllerUnpublishVolumeRequest(ctx, req) if err != nil { msg := fmt.Sprintf("Validation for UnpublishVolume Request: %+v has failed. Error: %v", *req, err) log.Error(msg) return nil, csifault.CSIInvalidArgumentFault, err } // Retrieve Supervisor PVC svPVC, err := c.supervisorClient.CoreV1().PersistentVolumeClaims(c.supervisorNamespace).Get( ctx, req.VolumeId, metav1.GetOptions{}) if err != nil { msg := fmt.Sprintf("failed to retrieve supervisor PVC %q in %q namespace. Error: %+v", req.VolumeId, c.supervisorNamespace, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Error(codes.Internal, msg) } var isFileVolume bool for _, accessMode := range svPVC.Spec.AccessModes { if accessMode == corev1.ReadWriteMany || accessMode == corev1.ReadOnlyMany { isFileVolume = true } } if isFileVolume { volumeType = prometheus.PrometheusFileVolumeType if commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.FileVolume) { return controllerUnpublishForFileVolume(ctx, req, c) } // Feature is disabled on the cluster return nil, csifault.CSIInvalidArgumentFault, status.Error(codes.InvalidArgument, "File volume not supported.") } volumeType = prometheus.PrometheusBlockVolumeType return controllerUnpublishForBlockVolume(ctx, req, c) } resp, faultType, err := controllerUnpublishVolumeInternal() log := logger.GetLogger(ctx) log.Debugf("controllerUnpublishVolumeInternal: returns fault %q for volume %q", faultType, req.VolumeId) if err != nil { prometheus.CsiControlOpsHistVec.WithLabelValues(volumeType, prometheus.PrometheusDetachVolumeOpType, prometheus.PrometheusFailStatus).Observe(time.Since(start).Seconds()) } else { prometheus.CsiControlOpsHistVec.WithLabelValues(volumeType, prometheus.PrometheusDetachVolumeOpType, prometheus.PrometheusPassStatus).Observe(time.Since(start).Seconds()) } return resp, err } // controllerUnpublishForBlockVolume is helper method to handle ControllerPublishVolume for Block volumes func controllerUnpublishForBlockVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest, c *controller) ( *csi.ControllerUnpublishVolumeResponse, string, error) { log := logger.GetLogger(ctx) // TODO: Investigate if a race condition can exist here between multiple detach calls to the same volume. // If yes, implement some locking mechanism virtualMachine := &vmoperatortypes.VirtualMachine{} vmKey := types.NamespacedName{ Namespace: c.supervisorNamespace, Name: req.NodeId, } var err error if err := c.vmOperatorClient.Get(ctx, vmKey, virtualMachine); err != nil { if errors.IsNotFound(err) { log.Infof("VirtualMachine %s/%s not found. Assuming volume %s was detached.", c.supervisorNamespace, req.NodeId, req.VolumeId) return &csi.ControllerUnpublishVolumeResponse{}, "", nil } msg := fmt.Sprintf("failed to get VirtualMachines for node: %q. Error: %+v", req.NodeId, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } log.Debugf("Found VirtualMachine for node: %q.", req.NodeId) timeoutSeconds := int64(getAttacherTimeoutInMin(ctx) * 60) timeout := time.Now().Add(time.Duration(timeoutSeconds) * time.Second) for { for index, volume := range virtualMachine.Spec.Volumes { if volume.Name == req.VolumeId { log.Debugf("Removing volume %q from VirtualMachine %q", volume.Name, virtualMachine.Name) virtualMachineLock.Lock() virtualMachine.Spec.Volumes = append(virtualMachine.Spec.Volumes[:index], virtualMachine.Spec.Volumes[index+1:]...) err = c.vmOperatorClient.Update(ctx, virtualMachine) virtualMachineLock.Unlock() break } } if err == nil || time.Now().After(timeout) { break } if err := c.vmOperatorClient.Get(ctx, vmKey, virtualMachine); err != nil { if errors.IsNotFound(err) { log.Infof("VirtualMachine %s/%s not found. Assuming volume %s was detached.", c.supervisorNamespace, req.NodeId, req.VolumeId) return &csi.ControllerUnpublishVolumeResponse{}, "", nil } msg := fmt.Sprintf("failed to get VirtualMachines for node: %q. Error: %+v", req.NodeId, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } log.Debugf("Found VirtualMachine for node: %q.", req.NodeId) } if err != nil { msg := fmt.Sprintf("Time out to update VirtualMachines %q with Error: %+v", virtualMachine.Name, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } // Watch virtual machine object and wait for volume name to be removed from the status field. watchVirtualMachine, err := c.vmWatcher.Watch(metav1.ListOptions{ FieldSelector: fields.SelectorFromSet(fields.Set{"metadata.name": string(virtualMachine.Name)}).String(), ResourceVersion: virtualMachine.ResourceVersion, TimeoutSeconds: &timeoutSeconds, }) if err != nil { msg := fmt.Sprintf("failed to watch VirtualMachine %q with Error: %v", virtualMachine.Name, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } if watchVirtualMachine == nil { msg := fmt.Sprintf("watchVirtualMachine for %q is nil", virtualMachine.Name) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } defer watchVirtualMachine.Stop() // Loop until the volume is removed from virtualmachine status isVolumeDetached := false for !isVolumeDetached { log.Debugf("Waiting for update on VirtualMachine: %q", virtualMachine.Name) // Block on update events event := <-watchVirtualMachine.ResultChan() vm, ok := event.Object.(*vmoperatortypes.VirtualMachine) if !ok { msg := fmt.Sprintf("Watch on virtualmachine %q timed out", virtualMachine.Name) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } if vm.Name != virtualMachine.Name { log.Debugf("Observed vm name: %q, expecting vm name: %q, volumeID: %q", vm.Name, virtualMachine.Name, req.VolumeId) continue } switch event.Type { case watch.Added, watch.Modified: isVolumeDetached = true for _, volume := range vm.Status.Volumes { if volume.Name == req.VolumeId { log.Debugf("Volume %q still exists in VirtualMachine %q status", volume.Name, virtualMachine.Name) isVolumeDetached = false if volume.Attached && volume.Error != "" { msg := fmt.Sprintf("failed to detach volume %q from VirtualMachine %q with Error: %v", volume.Name, virtualMachine.Name, volume.Error) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } break } } case watch.Deleted: log.Infof("VirtualMachine %s/%s deleted. Assuming volume %s was detached.", c.supervisorNamespace, req.NodeId, req.VolumeId) isVolumeDetached = true } } log.Infof("ControllerUnpublishVolume: Volume detached successfully %q", req.VolumeId) return &csi.ControllerUnpublishVolumeResponse{}, "", nil } // controllerUnpublishForFileVolume is helper method to handle ControllerPublishVolume for File volumes func controllerUnpublishForFileVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest, c *controller) ( *csi.ControllerUnpublishVolumeResponse, string, error) { log := logger.GetLogger(ctx) // Adding watch on the CnsFileAccessConfig instance to register for updates cnsFileAccessConfigWatcher, err := k8s.NewCnsFileAccessConfigWatcher(ctx, c.restClientConfig, c.supervisorNamespace) if err != nil { msg := fmt.Sprintf("failed to create cnsFileAccessConfigWatcher. Error: %+v", err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } cnsFileAccessConfigInstance := &cnsfileaccessconfigv1alpha1.CnsFileAccessConfig{} cnsFileAccessConfigInstanceName := req.NodeId + "-" + req.VolumeId cnsFileAccessConfigInstanceKey := types.NamespacedName{ Namespace: c.supervisorNamespace, Name: cnsFileAccessConfigInstanceName, } if err := c.cnsOperatorClient.Get(ctx, cnsFileAccessConfigInstanceKey, cnsFileAccessConfigInstance); err != nil { if errors.IsNotFound(err) { log.Infof("ControllerUnpublishVolume: CnsFileAccessConfig instance %q/%q not found in supervisor cluster. "+ "Returning success for the detach operation", c.supervisorNamespace, cnsFileAccessConfigInstanceName) return &csi.ControllerUnpublishVolumeResponse{}, "", nil } msg := fmt.Sprintf("failed to get CnsFileAccessConfig instance: %q/%q. Error: %+v", c.supervisorNamespace, cnsFileAccessConfigInstanceName, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } // Attach/Detach timeout, default is set to 4 minutes timeoutSeconds := int64(getAttacherTimeoutInMin(ctx) * 60) watchCnsFileAccessConfig, err := cnsFileAccessConfigWatcher.Watch(metav1.ListOptions{ FieldSelector: fields.SelectorFromSet(fields.Set{"metadata.name": cnsFileAccessConfigInstanceName}).String(), ResourceVersion: cnsFileAccessConfigInstance.ResourceVersion, TimeoutSeconds: &timeoutSeconds, }) if err != nil { msg := fmt.Sprintf("failed to watch cnsFileAccessConfig instance %q/%q with Error: %v", c.supervisorNamespace, cnsFileAccessConfigInstanceName, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } if err := c.cnsOperatorClient.Delete(ctx, &cnsfileaccessconfigv1alpha1.CnsFileAccessConfig{ ObjectMeta: metav1.ObjectMeta{ Name: cnsFileAccessConfigInstanceName, Namespace: c.supervisorNamespace, }, }); err != nil { if errors.IsNotFound(err) { log.Infof("ControllerUnpublishVolume: CnsFileAccessConfig instance %q/%q already deleted. "+ "Returning success for the detach operation", c.supervisorNamespace, cnsFileAccessConfigInstanceName) return &csi.ControllerUnpublishVolumeResponse{}, "", nil } msg := fmt.Sprintf("failed to delete CnsFileAccessConfig instance: %q/%q. Error: %+v", c.supervisorNamespace, cnsFileAccessConfigInstanceName, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } defer watchCnsFileAccessConfig.Stop() var cnsFileAccessConfigInstanceErr string isCnsFileAccessConfigInstanceDeleted := false // Watch all update events made on CnsFileAccessConfig instance until Deleted // event or a timeout occurs on the cnsfileaccessconfig instance. for !isCnsFileAccessConfigInstanceDeleted { log.Debugf("waiting for update on cnsfileaccessconfigs: %q", cnsFileAccessConfigInstanceName) event := <-watchCnsFileAccessConfig.ResultChan() cnsfileaccessconfig, ok := event.Object.(*cnsfileaccessconfigv1alpha1.CnsFileAccessConfig) if !ok { msg := fmt.Sprintf("Watch on cnsfileaccessconfig instance %q/%q timed out. Last seen error on the instance=%q", c.supervisorNamespace, cnsFileAccessConfigInstanceName, cnsFileAccessConfigInstanceErr) log.Error(msg) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } if cnsfileaccessconfig.Name != cnsFileAccessConfigInstanceName { log.Debugf("Observed CnsFileAccessConfig instance name: %q, expecting CnsFileAccessConfig instance name: %q", cnsfileaccessconfig.Name, cnsFileAccessConfigInstanceName) continue } // Check if SV PVC Name ain the cnsfileaccessconfig instance match with VolumeId from the request if cnsfileaccessconfig.Spec.PvcName != req.VolumeId { log.Debugf("Observed SV PVC Name: %q, expecting SV PVC Name: %q", cnsfileaccessconfig.Spec.PvcName, req.VolumeId) continue } // Check if VM name in the cnsfileaccessconfig instance match with NodeId from the request if cnsfileaccessconfig.Spec.VMName != req.NodeId { log.Debugf("Observed vm name: %q, expecting vm name: %q", cnsfileaccessconfig.Spec.VMName, req.NodeId) continue } if event.Type == "DELETED" { isCnsFileAccessConfigInstanceDeleted = true } cnsFileAccessConfigInstanceErr = cnsfileaccessconfig.Status.Error } log.Infof("ControllerUnpublishVolume: Volume detached successfully %q", req.VolumeId) return &csi.ControllerUnpublishVolumeResponse{}, "", nil } // ControllerExpandVolume expands a volume. // volume id and size is retrieved from ControllerExpandVolumeRequest func (c *controller) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) ( *csi.ControllerExpandVolumeResponse, error) { start := time.Now() volumeType := prometheus.PrometheusUnknownVolumeType controllerExpandVolumeInternal := func() ( *csi.ControllerExpandVolumeResponse, string, error) { ctx = logger.NewContextWithLogger(ctx) log := logger.GetLogger(ctx) if !commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.VolumeExtend) { msg := "ExpandVolume feature is disabled on the cluster." log.Warn(msg) return nil, csifault.CSIUnimplementedFault, status.Error(codes.Unimplemented, msg) } log.Infof("ControllerExpandVolume: called with args %+v", *req) //TODO: If the err is returned by invoking CNS API, then faultType should be // populated by the underlying layer. // If the request failed due to validate the request, "csi.fault.InvalidArgument" will be return. // If thr reqeust failed due to object not found, "csi.fault.NotFound" will be return. // For all other cases, the faultType will be set to "csi.fault.Internal" for now. // Later we may need to define different csi faults. err := validateGuestClusterControllerExpandVolumeRequest(ctx, req) if err != nil { return nil, csifault.CSIInvalidArgumentFault, err } // Only block volume expand is allowed. Update this when file volume expand is also supported. volumeType = prometheus.PrometheusBlockVolumeType volumeID := req.GetVolumeId() volSizeBytes := int64(req.GetCapacityRange().GetRequiredBytes()) if !commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.OnlineVolumeExtend) { vmList := &vmoperatortypes.VirtualMachineList{} err = c.vmOperatorClient.List(ctx, vmList, client.InNamespace(c.supervisorNamespace)) if err != nil { msg := fmt.Sprintf("failed to list virtualmachines with error: %+v", err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Error(codes.Internal, msg) } for _, vmInstance := range vmList.Items { for _, vmVolume := range vmInstance.Status.Volumes { if vmVolume.Name == volumeID && vmVolume.Attached { msg := fmt.Sprintf("failed to expand volume: %q. Volume is attached to pod. "+ "Only offline volume expansion is supported", volumeID) log.Error(msg) return nil, csifault.CSIInvalidArgumentFault, status.Error(codes.FailedPrecondition, msg) } } } } // Retrieve Supervisor PVC svPVC, err := c.supervisorClient.CoreV1().PersistentVolumeClaims(c.supervisorNamespace).Get( ctx, volumeID, metav1.GetOptions{}) if err != nil { msg := fmt.Sprintf("failed to retrieve supervisor PVC %q in %q namespace. Error: %+v", volumeID, c.supervisorNamespace, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Error(codes.Internal, msg) } waitForSvPvcCondition := true gcPvcRequestSize := resource.NewQuantity(volSizeBytes, resource.Format(resource.BinarySI)) svPvcRequestSize := svPVC.Spec.Resources.Requests[corev1.ResourceName(corev1.ResourceStorage)] // Check if GC PVC request size is greater than SV PVC request size switch (gcPvcRequestSize).Cmp(svPvcRequestSize) { case 1: // Update requested storage in SV PVC spec svPvcClone := svPVC.DeepCopy() svPvcClone.Spec.Resources.Requests[corev1.ResourceName(corev1.ResourceStorage)] = *gcPvcRequestSize // Make an update call to SV API server log.Infof("Increasing the size of supervisor PVC %s in namespace %s to %s", volumeID, c.supervisorNamespace, gcPvcRequestSize.String()) svPVC, err = c.supervisorClient.CoreV1().PersistentVolumeClaims(c.supervisorNamespace).Update( ctx, svPvcClone, metav1.UpdateOptions{}) if err != nil { msg := fmt.Sprintf("failed to update supervisor PVC %q in %q namespace. Error: %+v", volumeID, c.supervisorNamespace, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Error(codes.Internal, msg) } case 0: // GC PVC request size is equal to SV PVC request size log.Infof("Skipping resize call for supervisor PVC %s in namespace %s as it is already at the requested size", volumeID, c.supervisorNamespace) // SV PVC is already in FileSystemResizePending condition indicates // that SV PV has already been expanded to required size. if checkPVCCondition(ctx, svPVC, corev1.PersistentVolumeClaimFileSystemResizePending) { waitForSvPvcCondition = false } else { // SV PVC is not in FileSystemResizePending condition and GC PVC request size is equal to SV PVC capacity // indicates that SV PVC is already at required size if (gcPvcRequestSize).Cmp(svPVC.Status.Capacity[corev1.ResourceName(corev1.ResourceStorage)]) == 0 { waitForSvPvcCondition = false } } default: // GC PVC request size is lesser than SV PVC request size msg := fmt.Sprintf("the requested size of the Supervisor PVC %s in namespace %s is %s "+ "which is greater than the requested size of %s", volumeID, c.supervisorNamespace, svPvcRequestSize.String(), gcPvcRequestSize.String()) log.Error(msg) return nil, csifault.CSIInternalFault, status.Error(codes.InvalidArgument, msg) } if waitForSvPvcCondition { // Wait for Supervisor PVC to change status to FilesystemResizePending err = checkForSupervisorPVCCondition(ctx, c.supervisorClient, svPVC, corev1.PersistentVolumeClaimFileSystemResizePending, time.Duration(getResizeTimeoutInMin(ctx))*time.Minute) if err != nil { msg := fmt.Sprintf("failed to expand volume %s in namespace %s of supervisor cluster. Error: %+v", volumeID, c.supervisorNamespace, err) log.Error(msg) return nil, csifault.CSIInternalFault, status.Error(codes.Internal, msg) } } nodeExpansionRequired := true // Set NodeExpansionRequired to false for raw block volumes if _, ok := req.GetVolumeCapability().GetAccessType().(*csi.VolumeCapability_Block); ok { log.Infof("Node Expansion not supported for raw block volume ID %q in namespace %s of supervisor", volumeID, c.supervisorNamespace) nodeExpansionRequired = false } resp := &csi.ControllerExpandVolumeResponse{ CapacityBytes: volSizeBytes, NodeExpansionRequired: nodeExpansionRequired, } return resp, "", nil } resp, faultType, err := controllerExpandVolumeInternal() log := logger.GetLogger(ctx) log.Debugf("controllerExpandVolumeInternal: returns fault %q for volume %q", faultType, req.VolumeId) if err != nil { prometheus.CsiControlOpsHistVec.WithLabelValues(volumeType, prometheus.PrometheusExpandVolumeOpType, prometheus.PrometheusFailStatus).Observe(time.Since(start).Seconds()) } else { prometheus.CsiControlOpsHistVec.WithLabelValues(volumeType, prometheus.PrometheusExpandVolumeOpType, prometheus.PrometheusPassStatus).Observe(time.Since(start).Seconds()) } return resp, err } // ValidateVolumeCapabilities returns the capabilities of the volume. func (c *controller) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) ( *csi.ValidateVolumeCapabilitiesResponse, error) { log := logger.GetLogger(ctx) log.Infof("ValidateVolumeCapabilities: called with args %+v", *req) volCaps := req.GetVolumeCapabilities() var confirmed *csi.ValidateVolumeCapabilitiesResponse_Confirmed if err := common.IsValidVolumeCapabilities(ctx, volCaps); err == nil { confirmed = &csi.ValidateVolumeCapabilitiesResponse_Confirmed{VolumeCapabilities: volCaps} } return &csi.ValidateVolumeCapabilitiesResponse{ Confirmed: confirmed, }, nil } func (c *controller) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) ( *csi.ListVolumesResponse, error) { ctx = logger.NewContextWithLogger(ctx) log := logger.GetLogger(ctx) log.Infof("ListVolumes: called with args %+v", *req) return nil, status.Error(codes.Unimplemented, "") } func (c *controller) GetCapacity(ctx context.Context, req *csi.GetCapacityRequest) ( *csi.GetCapacityResponse, error) { ctx = logger.NewContextWithLogger(ctx) log := logger.GetLogger(ctx) log.Infof("GetCapacity: called with args %+v", *req) return nil, status.Error(codes.Unimplemented, "") } func (c *controller) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) ( *csi.ControllerGetCapabilitiesResponse, error) { ctx = logger.NewContextWithLogger(ctx) log := logger.GetLogger(ctx) log.Infof("ControllerGetCapabilities: called with args %+v", *req) var caps []*csi.ControllerServiceCapability for _, cap := range controllerCaps { c := &csi.ControllerServiceCapability{ Type: &csi.ControllerServiceCapability_Rpc{ Rpc: &csi.ControllerServiceCapability_RPC{ Type: cap, }, }, } caps = append(caps, c) } return &csi.ControllerGetCapabilitiesResponse{Capabilities: caps}, nil } func (c *controller) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) ( *csi.CreateSnapshotResponse, error) { ctx = logger.NewContextWithLogger(ctx) log := logger.GetLogger(ctx) log.Infof("CreateSnapshot: called with args %+v", *req) return nil, status.Error(codes.Unimplemented, "") } func (c *controller) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) ( *csi.DeleteSnapshotResponse, error) { ctx = logger.NewContextWithLogger(ctx) log := logger.GetLogger(ctx) log.Infof("DeleteSnapshot: called with args %+v", *req) return nil, status.Error(codes.Unimplemented, "") } func (c *controller) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) ( *csi.ListSnapshotsResponse, error) { ctx = logger.NewContextWithLogger(ctx) log := logger.GetLogger(ctx) log.Infof("ListSnapshots: called with args %+v", *req) return nil, status.Error(codes.Unimplemented, "") } func (c *controller) ControllerGetVolume(ctx context.Context, req *csi.ControllerGetVolumeRequest) ( *csi.ControllerGetVolumeResponse, error) { return nil, status.Error(codes.Unimplemented, "") }
[]
[]
[]
[]
[]
go
null
null
null
internal/geo/resolver_test.go
package geo import ( "os" "strings" "testing" "github.com/stretchr/testify/require" ) func Test_GeoLiteImpl(t *testing.T) { geoResolver, err := NewGeoResolver(&MaxMindGeoLiteConfig{"../../deploy/GeoLite2-City.mmdb"}) if strings.Contains(err.Error(), "no such file") { t.Skip(err.Error()) return } require.NoError(t, err) entry, err := geoResolver.Resolve("70.20.56.211") require.NoError(t, err) require.Equal(t, "United States", entry.Country) } func Test_IPStackImpl(t *testing.T) { resolver, err := NewGeoResolver(&IPStackConfig{ APIKey: os.Getenv("IPSTACK_ACCESS_KEY"), }) if strings.Contains(err.Error(), "no api key supplied") { t.Skip(err.Error()) return } require.NoError(t, err) entry, err := resolver.Resolve("70.20.56.211") require.NoError(t, err) require.Equal(t, "United States", entry.Country) }
[ "\"IPSTACK_ACCESS_KEY\"" ]
[]
[ "IPSTACK_ACCESS_KEY" ]
[]
["IPSTACK_ACCESS_KEY"]
go
1
0
pkg/api/api.go
// Copyright (c) The Thanos Authors. // Licensed under the Apache License 2.0. // Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // This package is a modified copy from // github.com/prometheus/prometheus/web/api/v1@2121b4628baa7d9d9406aa468712a6a332e77aff. package api import ( "encoding/json" "fmt" "net/http" "os" "runtime" "time" "github.com/NYTimes/gziphandler" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/opentracing/opentracing-go" "github.com/prometheus/common/route" "github.com/prometheus/common/version" extpromhttp "github.com/thanos-io/thanos/pkg/extprom/http" "github.com/thanos-io/thanos/pkg/logging" "github.com/thanos-io/thanos/pkg/server/http/middleware" "github.com/thanos-io/thanos/pkg/tracing" ) type status string const ( StatusSuccess status = "success" StatusError status = "error" ) type ErrorType string const ( ErrorNone ErrorType = "" ErrorTimeout ErrorType = "timeout" ErrorCanceled ErrorType = "canceled" ErrorExec ErrorType = "execution" ErrorBadData ErrorType = "bad_data" ErrorInternal ErrorType = "internal" ) var corsHeaders = map[string]string{ "Access-Control-Allow-Headers": "Accept, Accept-Encoding, Authorization, Content-Type, Origin", "Access-Control-Allow-Methods": "GET, OPTIONS", "Access-Control-Allow-Origin": "*", "Access-Control-Expose-Headers": "Date", } // ThanosVersion contains build information about Thanos. type ThanosVersion struct { Version string `json:"version"` Revision string `json:"revision"` Branch string `json:"branch"` BuildUser string `json:"buildUser"` BuildDate string `json:"buildDate"` GoVersion string `json:"goVersion"` } var BuildInfo = &ThanosVersion{ Version: version.Version, Revision: version.Revision, Branch: version.Branch, BuildUser: version.BuildUser, BuildDate: version.BuildDate, GoVersion: version.GoVersion, } type ApiError struct { Typ ErrorType Err error } func (e *ApiError) Error() string { return fmt.Sprintf("%s: %s", e.Typ, e.Err) } // RuntimeInfo contains runtime information about Thanos. type RuntimeInfo struct { StartTime time.Time `json:"startTime"` CWD string `json:"CWD"` GoroutineCount int `json:"goroutineCount"` GOMAXPROCS int `json:"GOMAXPROCS"` GOGC string `json:"GOGC"` GODEBUG string `json:"GODEBUG"` } // RuntimeInfoFn returns updated runtime information about Thanos. type RuntimeInfoFn func() RuntimeInfo type response struct { Status status `json:"status"` Data interface{} `json:"data,omitempty"` ErrorType ErrorType `json:"errorType,omitempty"` Error string `json:"error,omitempty"` Warnings []string `json:"warnings,omitempty"` } // SetCORS enables cross-site script calls. func SetCORS(w http.ResponseWriter) { for h, v := range corsHeaders { w.Header().Set(h, v) } } type ApiFunc func(r *http.Request) (interface{}, []error, *ApiError) type BaseAPI struct { logger log.Logger flagsMap map[string]string runtimeInfo RuntimeInfoFn buildInfo *ThanosVersion Now func() time.Time disableCORS bool } // NewBaseAPI returns a new initialized BaseAPI type. func NewBaseAPI(logger log.Logger, disableCORS bool, flagsMap map[string]string) *BaseAPI { return &BaseAPI{ logger: logger, flagsMap: flagsMap, runtimeInfo: GetRuntimeInfoFunc(logger), buildInfo: BuildInfo, disableCORS: disableCORS, Now: time.Now, } } // Register registers the common API endpoints. func (api *BaseAPI) Register(r *route.Router, tracer opentracing.Tracer, logger log.Logger, ins extpromhttp.InstrumentationMiddleware, logMiddleware *logging.HTTPServerMiddleware) { instr := GetInstr(tracer, logger, ins, logMiddleware, api.disableCORS) r.Options("/*path", instr("options", api.options)) r.Get("/status/flags", instr("status_flags", api.flags)) r.Get("/status/runtimeinfo", instr("status_runtime", api.serveRuntimeInfo)) r.Get("/status/buildinfo", instr("status_build", api.serveBuildInfo)) } func (api *BaseAPI) options(r *http.Request) (interface{}, []error, *ApiError) { return nil, nil, nil } func (api *BaseAPI) flags(r *http.Request) (interface{}, []error, *ApiError) { return api.flagsMap, nil, nil } func (api *BaseAPI) serveRuntimeInfo(r *http.Request) (interface{}, []error, *ApiError) { return api.runtimeInfo(), nil, nil } func (api *BaseAPI) serveBuildInfo(r *http.Request) (interface{}, []error, *ApiError) { return api.buildInfo, nil, nil } func GetRuntimeInfoFunc(logger log.Logger) RuntimeInfoFn { CWD, err := os.Getwd() if err != nil { CWD = "<error retrieving current working directory>" level.Warn(logger).Log("msg", "failed to retrieve current working directory", "err", err) } birth := time.Now() return func() RuntimeInfo { return RuntimeInfo{ StartTime: birth, CWD: CWD, GoroutineCount: runtime.NumGoroutine(), GOMAXPROCS: runtime.GOMAXPROCS(0), GOGC: os.Getenv("GOGC"), GODEBUG: os.Getenv("GODEBUG"), } } } type InstrFunc func(name string, f ApiFunc) http.HandlerFunc // Instr returns a http HandlerFunc with the instrumentation middleware. func GetInstr( tracer opentracing.Tracer, logger log.Logger, ins extpromhttp.InstrumentationMiddleware, logMiddleware *logging.HTTPServerMiddleware, disableCORS bool, ) InstrFunc { instr := func(name string, f ApiFunc) http.HandlerFunc { hf := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if !disableCORS { SetCORS(w) } if data, warnings, err := f(r); err != nil { RespondError(w, err, data) } else if data != nil { Respond(w, data, warnings) } else { w.WriteHeader(http.StatusNoContent) } }) return ins.NewHandler(name, logMiddleware.HTTPMiddleware(name, tracing.HTTPMiddleware(tracer, name, logger, gziphandler.GzipHandler(middleware.RequestID(hf))))) } return instr } func Respond(w http.ResponseWriter, data interface{}, warnings []error) { w.Header().Set("Content-Type", "application/json") if len(warnings) > 0 { w.Header().Set("Cache-Control", "no-store") } w.WriteHeader(http.StatusOK) resp := &response{ Status: StatusSuccess, Data: data, } for _, warn := range warnings { resp.Warnings = append(resp.Warnings, warn.Error()) } _ = json.NewEncoder(w).Encode(resp) } func RespondError(w http.ResponseWriter, apiErr *ApiError, data interface{}) { w.Header().Set("Content-Type", "application/json") w.Header().Set("Cache-Control", "no-store") var code int switch apiErr.Typ { case ErrorBadData: code = http.StatusBadRequest case ErrorExec: code = 422 case ErrorCanceled, ErrorTimeout: code = http.StatusServiceUnavailable case ErrorInternal: code = http.StatusInternalServerError default: code = http.StatusInternalServerError } w.WriteHeader(code) _ = json.NewEncoder(w).Encode(&response{ Status: StatusError, ErrorType: apiErr.Typ, Error: apiErr.Err.Error(), Data: data, }) }
[ "\"GOGC\"", "\"GODEBUG\"" ]
[]
[ "GOGC", "GODEBUG" ]
[]
["GOGC", "GODEBUG"]
go
2
0
imcsdk/mometa/mgmt/MgmtBackup.py
"""This module contains the general information for MgmtBackup ManagedObject.""" from ...imcmo import ManagedObject from ...imccoremeta import MoPropertyMeta, MoMeta from ...imcmeta import VersionMeta class MgmtBackupConsts: ADMIN_STATE_DISABLED = "disabled" ADMIN_STATE_ENABLED = "enabled" FSM_RMT_INV_ERR_CODE_ = "" FSM_RMT_INV_ERR_CODE_ABORTED = "Aborted" FSM_RMT_INV_ERR_CODE_ERROR_COLLECTING_CONFIGURATION_DATA = "Error collecting configuration data" FSM_RMT_INV_ERR_CODE_ERROR_IMPORTING_CONFIGURATION = "Error importing configuration" FSM_RMT_INV_ERR_CODE_PARTIALLY_IMPORTED = "Partially Imported" FSM_RMT_INV_ERR_CODE_TFTP_ERROR = "TFTP Error" FSM_RMT_INV_ERR_CODE_UNKNOWN_ERROR = "Unknown error" FSM_RMT_INV_ERR_CODE_NONE = "none" PROTO_FTP = "ftp" PROTO_HTTP = "http" PROTO_NONE = "none" PROTO_SCP = "scp" PROTO_SFTP = "sftp" PROTO_TFTP = "tftp" SOURCE_REMOTE = "remote" SOURCE_USB = "usb" class MgmtBackup(ManagedObject): """This is MgmtBackup class.""" consts = MgmtBackupConsts() naming_props = set([]) mo_meta = { "classic": MoMeta("MgmtBackup", "mgmtBackup", "export-config", VersionMeta.Version151f, "InputOutput", 0x1fff, [], ["admin", "read-only", "user"], [u'topSystem'], [], [None]), "modular": MoMeta("MgmtBackup", "mgmtBackup", "export-config", VersionMeta.Version2013e, "InputOutput", 0xfff, [], ["admin", "read-only", "user"], [u'equipmentChassis'], [], [None]) } prop_meta = { "classic": { "admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []), "child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []), "dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []), "fsm_descr": MoPropertyMeta("fsm_descr", "fsmDescr", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []), "fsm_rmt_inv_err_code": MoPropertyMeta("fsm_rmt_inv_err_code", "fsmRmtInvErrCode", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, ["", "Aborted", "Error collecting configuration data", "Error importing configuration", "Partially Imported", "TFTP Error", "Unknown error", "none"], ["0-4294967295"]), "fsm_rmt_inv_err_descr": MoPropertyMeta("fsm_rmt_inv_err_descr", "fsmRmtInvErrDescr", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, 0, 510, None, [], []), "fsm_stage_descr": MoPropertyMeta("fsm_stage_descr", "fsmStageDescr", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []), "hostname": MoPropertyMeta("hostname", "hostname", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, r"""(([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:) |((([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6})|(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)+)|([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]))""", [], []), "passphrase": MoPropertyMeta("passphrase", "passphrase", "string", VersionMeta.Version201a, MoPropertyMeta.READ_WRITE, 0x10, 0, 127, r"""[^\(\)\^%!#$&<;>`~""?\\|'\s]{6,127}""", [], []), "proto": MoPropertyMeta("proto", "proto", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["ftp", "http", "none", "scp", "sftp", "tftp"], []), "pwd": MoPropertyMeta("pwd", "pwd", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, None, [], []), "remote_file": MoPropertyMeta("remote_file", "remoteFile", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x80, 0, 255, r"""[^\(\)~`'\?\\"";<>\|&\*\^$%]{1,255}""", [], []), "rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x100, 0, 255, None, [], []), "status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x200, None, None, None, ["", "created", "deleted", "modified", "removed"], []), "user": MoPropertyMeta("user", "user", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x400, 0, 255, None, [], []), "source": MoPropertyMeta("source", "source", "string", VersionMeta.Version311d, MoPropertyMeta.READ_WRITE, 0x800, None, None, None, ["remote", "usb"], []), "usb_path": MoPropertyMeta("usb_path", "usbPath", "string", VersionMeta.Version311d, MoPropertyMeta.READ_WRITE, 0x1000, 1, 128, None, [], []), }, "modular": { "admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []), "child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []), "dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []), "fsm_descr": MoPropertyMeta("fsm_descr", "fsmDescr", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []), "fsm_rmt_inv_err_code": MoPropertyMeta("fsm_rmt_inv_err_code", "fsmRmtInvErrCode", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, ["", "Aborted", "Error collecting configuration data", "Error importing configuration", "Partially Imported", "TFTP Error", "Unknown error", "none"], ["0-4294967295"]), "fsm_rmt_inv_err_descr": MoPropertyMeta("fsm_rmt_inv_err_descr", "fsmRmtInvErrDescr", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, 0, 510, None, [], []), "fsm_stage_descr": MoPropertyMeta("fsm_stage_descr", "fsmStageDescr", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []), "hostname": MoPropertyMeta("hostname", "hostname", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, r"""([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []), "passphrase": MoPropertyMeta("passphrase", "passphrase", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, 0, 127, r"""[^\(\)\^%!#$&<;>`~""?\\|'\s]{6,127}""", [], []), "proto": MoPropertyMeta("proto", "proto", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["ftp", "http", "none", "scp", "sftp", "tftp"], []), "pwd": MoPropertyMeta("pwd", "pwd", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, None, [], []), "remote_file": MoPropertyMeta("remote_file", "remoteFile", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, 0, 255, r"""[^\(\)~`'\?\\"";<>\|&\*\^$%]{1,255}""", [], []), "rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x100, 0, 255, None, [], []), "status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x200, None, None, None, ["", "created", "deleted", "modified", "removed"], []), "user": MoPropertyMeta("user", "user", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x400, 0, 255, None, [], []), "entity": MoPropertyMeta("entity", "entity", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x800, 0, 256, None, [], []), }, } prop_map = { "classic": { "adminState": "admin_state", "childAction": "child_action", "dn": "dn", "fsmDescr": "fsm_descr", "fsmRmtInvErrCode": "fsm_rmt_inv_err_code", "fsmRmtInvErrDescr": "fsm_rmt_inv_err_descr", "fsmStageDescr": "fsm_stage_descr", "hostname": "hostname", "passphrase": "passphrase", "proto": "proto", "pwd": "pwd", "remoteFile": "remote_file", "rn": "rn", "status": "status", "user": "user", "source": "source", "usbPath": "usb_path", }, "modular": { "adminState": "admin_state", "childAction": "child_action", "dn": "dn", "fsmDescr": "fsm_descr", "fsmRmtInvErrCode": "fsm_rmt_inv_err_code", "fsmRmtInvErrDescr": "fsm_rmt_inv_err_descr", "fsmStageDescr": "fsm_stage_descr", "hostname": "hostname", "passphrase": "passphrase", "proto": "proto", "pwd": "pwd", "remoteFile": "remote_file", "rn": "rn", "status": "status", "user": "user", "entity": "entity", }, } def __init__(self, parent_mo_or_dn, **kwargs): self._dirty_mask = 0 self.admin_state = None self.child_action = None self.fsm_descr = None self.fsm_rmt_inv_err_code = None self.fsm_rmt_inv_err_descr = None self.fsm_stage_descr = None self.hostname = None self.passphrase = None self.proto = None self.pwd = None self.remote_file = None self.status = None self.user = None self.source = None self.usb_path = None self.entity = None ManagedObject.__init__(self, "MgmtBackup", parent_mo_or_dn, **kwargs)
[]
[]
[]
[]
[]
python
null
null
null
pkg/server/etcd.go
package server import ( "context" "io/ioutil" "os" "path/filepath" "time" "github.com/rancher/k3s/pkg/etcd" "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // setETCDLabelsAndAnnotations will set the etcd role label if not exists also it // sets special annotaitons on the node object which are etcd node id and etcd node // address, the function will also remove the controlplane and master role labels if // they exist on the node func setETCDLabelsAndAnnotations(ctx context.Context, config *Config) error { t := time.NewTicker(5 * time.Second) defer t.Stop() for range t.C { controlConfig := &config.ControlConfig sc, err := newContext(ctx, controlConfig.Runtime.KubeConfigAdmin) if err != nil { logrus.Infof("Failed to set etcd role label: %v", err) continue } if err := sc.Start(ctx); err != nil { logrus.Infof("Failed to set etcd role label: %v", err) continue } controlConfig.Runtime.Core = sc.Core nodes := sc.Core.Core().V1().Node() nodeName := os.Getenv("NODE_NAME") if nodeName == "" { logrus.Info("Failed to set etcd role label: node name not set") continue } node, err := nodes.Get(nodeName, metav1.GetOptions{}) if err != nil { logrus.Infof("Failed to set etcd role label: %v", err) continue } if node.Labels == nil { node.Labels = make(map[string]string) } // remove controlplane label if role label exists var controlRoleLabelExists bool if _, ok := node.Labels[MasterRoleLabelKey]; ok { delete(node.Labels, MasterRoleLabelKey) controlRoleLabelExists = true } if _, ok := node.Labels[ControlPlaneRoleLabelKey]; ok { delete(node.Labels, ControlPlaneRoleLabelKey) controlRoleLabelExists = true } if v, ok := node.Labels[ETCDRoleLabelKey]; ok && v == "true" && !controlRoleLabelExists { break } node.Labels[ETCDRoleLabelKey] = "true" // this is replacement to the etcd controller handleself function if node.Annotations == nil { node.Annotations = map[string]string{} } fileName := filepath.Join(controlConfig.DataDir, "db", "etcd", "name") data, err := ioutil.ReadFile(fileName) if err != nil { logrus.Infof("Waiting for etcd node name file to be available: %v", err) continue } etcdNodeName := string(data) node.Annotations[etcd.NodeID] = etcdNodeName address, err := etcd.GetAdvertiseAddress(controlConfig.PrivateIP) if err != nil { logrus.Infof("Waiting for etcd node address to be available: %v", err) continue } node.Annotations[etcd.NodeAddress] = address _, err = nodes.Update(node) if err == nil { logrus.Infof("Successfully set etcd role label and annotations on node %s", nodeName) break } select { case <-ctx.Done(): return ctx.Err() } } return nil }
[ "\"NODE_NAME\"" ]
[]
[ "NODE_NAME" ]
[]
["NODE_NAME"]
go
1
0
cmd/sqlflow/main_test.go
// Copyright 2020 The SQLFlow Authors. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "bufio" "bytes" "encoding/base64" "flag" "fmt" "io" "log" "net" "os" "os/exec" "regexp" "strings" "testing" "time" "github.com/stretchr/testify/assert" "google.golang.org/grpc" "google.golang.org/grpc/reflection" "github.com/c-bata/go-prompt" "sqlflow.org/sqlflow/pkg/database" "sqlflow.org/sqlflow/pkg/proto" srv "sqlflow.org/sqlflow/pkg/server" "sqlflow.org/sqlflow/pkg/sql" "sqlflow.org/sqlflow/pkg/sql/codegen/attribute" "sqlflow.org/sqlflow/pkg/sql/testdata" "sqlflow.org/sqlflow/pkg/step" ) var space = regexp.MustCompile(`\s+`) var dbConnStr = database.GetTestingMySQLURL() var testDBDriver = os.Getenv("SQLFLOW_TEST_DB") var serverAddr = "localhost:50051" func startServer() func() { var s *grpc.Server go func() { s = grpc.NewServer() proto.RegisterSQLFlowServer(s, srv.NewServer(sql.RunSQLProgram, "")) listenString := fmt.Sprintf(":%d", 50051) lis, err := net.Listen("tcp", listenString) if err != nil { log.Fatalf("failed to listen: %v", err) } // Register reflection service on gRPC server. reflection.Register(s) log.Printf("Server Started at %s", listenString) if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } }() return func() { s.GracefulStop() } } func serverIsReady(addr string, timeout time.Duration) bool { conn, err := net.DialTimeout("tcp", addr, timeout) if err != nil { return false } err = conn.Close() return err == nil } func waitForServer() { for i := 0; i < 10; i++ { if serverIsReady(serverAddr, 5*time.Second) { return } time.Sleep(1 * time.Second) } log.Fatal("Can't connect to sqlflow server.") } func prepareTestDataOrSkip(t *testing.T) error { // disable sixel it2Check = false assertConnectable(serverAddr, dbConnStr) testDB, _ := database.OpenAndConnectDB(dbConnStr) if testDBDriver == "mysql" { _, e := testDB.Exec("CREATE DATABASE IF NOT EXISTS sqlflow_models;") if e != nil { return e } return testdata.Popularize(testDB.DB, testdata.IrisSQL) } t.Skip("Skipping mysql tests") return nil } func TestRunStmt(t *testing.T) { stopServer := startServer() defer stopServer() waitForServer() a := assert.New(t) a.NoError(prepareTestDataOrSkip(t)) os.Setenv("SQLFLOW_log_dir", "/tmp/") currentDB = "" // TODO(yancey1989): assert should not panics in repl output, err := step.GetStdout(func() error { return runStmt(serverAddr, "show tables", true, dbConnStr) }) a.Error(err) a.Contains(output, "Error 1046: No database selected") output, err = step.GetStdout(func() error { return runStmt(serverAddr, "use iris", true, dbConnStr) }) a.NoError(err) a.Contains(output, "Database changed to iris") output, err = step.GetStdout(func() error { return runStmt(serverAddr, "show tables", true, dbConnStr) }) a.NoError(err) a.Contains(output, "| TABLES IN IRIS |") output, err = step.GetStdout(func() error { return runStmt(serverAddr, "select * from train to train DNNClassifier WITH model.hidden_units=[10,10], model.n_classes=3, validation.select=\"select * from test\" label class INTO sqlflow_models.repl_dnn_model;", true, dbConnStr) }) a.NoError(err) a.Contains(output, "'global_step': 110") output, err = step.GetStdout(func() error { return runStmt(serverAddr, "select * from train to train xgboost.gbtree WITH objective=reg:squarederror, validation.select=\"select * from test\" label class INTO sqlflow_models.repl_xgb_model;", true, dbConnStr) }) a.NoError(err) a.Contains(output, "Evaluation result: ") output, err = step.GetStdout(func() error { return runStmt(serverAddr, "select * from train to explain sqlflow_models.repl_xgb_model;", true, dbConnStr) }) a.NoError(err) a.Contains(output, "data:text/html, <div align='center'><img src='data:image/png;base64") // not supported now // a.Contains(output, "⣿") //non sixel with ascii art } func TestRepl(t *testing.T) { stopServer := startServer() defer stopServer() waitForServer() a := assert.New(t) a.Nil(prepareTestDataOrSkip(t)) sql := ` -- use iris; -- -- 1 show tables; -- 2 select * from train to train DNNClassifier WITH model.hidden_units=[10,10], model.n_classes=3, validation.select="select * from test" label class INTO sqlflow_models.repl_dnn_model; use sqlflow_models; show tables` scanner := bufio.NewScanner(strings.NewReader(sql)) output, err := step.GetStdout(func() error { repl(serverAddr, scanner, dbConnStr); return nil }) a.Nil(err) a.Contains(output, "Database changed to iris") a.Contains(output, ` +----------------+ | TABLES IN IRIS | +----------------+ | iris_empty | | test | | test_dense | | train | | train_dense | +----------------+`) a.Contains(output, ` select * from train to train DNNClassifier WITH model.hidden_units=[10,10], model.n_classes=3, validation.select="select * from test" label class INTO sqlflow_models.repl_dnn_model;`) a.Contains(output, "'global_step': 110") a.Contains(output, "Database changed to sqlflow_models") a.Contains(output, "| TABLES IN SQLFLOW MODELS |") a.Contains(output, "| repl_dnn_model |") } func TestReplWithoutSemicolon(t *testing.T) { stopServer := startServer() defer stopServer() waitForServer() a := assert.New(t) a.NoError(prepareTestDataOrSkip(t)) sql := ` select * from iris.train to train DNNClassifier WITH model.hidden_units=[10,10], model.n_classes=3, validation.select="select * from iris.test" label class INTO sqlflow_models.repl_dnn_model` scanner := bufio.NewScanner(strings.NewReader(sql)) output, err := step.GetStdout(func() error { repl(serverAddr, scanner, dbConnStr); return nil }) a.NoError(err) a.Contains(output, ` select * from iris.train to train DNNClassifier WITH model.hidden_units=[10,10], model.n_classes=3, validation.select="select * from iris.test" label class INTO sqlflow_models.repl_dnn_model;`) a.Contains(output, "'global_step': 110") } func TestMain(t *testing.T) { stopServer := startServer() defer stopServer() waitForServer() a := assert.New(t) a.Nil(prepareTestDataOrSkip(t)) os.Args = append(os.Args, "-datasource", dbConnStr, "-e", "use iris; show tables", "-sqlflow_server", serverAddr) output, _ := step.GetStdout(func() error { main(); return nil }) a.Contains(output, ` +----------------+ | TABLES IN IRIS | +----------------+ | iris_empty | | test | | test_dense | | train | | train_dense | +----------------+`) } func testGetDataSource(t *testing.T, dataSource, databaseName string) { a := assert.New(t) a.Equal(dataSource, getDataSource(dataSource, databaseName)) db, err := database.GetDatabaseName(dataSource) a.NoError(err) a.Equal(databaseName, db) a.NotEqual(dataSource, getDataSource(dataSource, databaseName+"test")) db, err = database.GetDatabaseName(getDataSource(dataSource, databaseName+"test")) a.NoError(err) a.Equal(databaseName+"test", db) } func TestGetDataSource(t *testing.T) { testGetDataSource(t, "maxcompute://test:[email protected]/api?curr_project=iris&scheme=https", "iris") testGetDataSource(t, "maxcompute://test:[email protected]/api?curr_project=&scheme=https", "") testGetDataSource(t, "mysql://root:root@tcp(127.0.0.1:3306)/", "") testGetDataSource(t, "mysql://root:root@tcp(127.0.0.1:3306)/?maxAllowedPacket=0", "") testGetDataSource(t, "mysql://root:root@tcp(127.0.0.1:3306)/iris", "iris") testGetDataSource(t, "mysql://root:root@tcp(127.0.0.1:3306)/iris?maxAllowedPacket=0", "iris") testGetDataSource(t, "hive://root:root@localhost:10000/", "") testGetDataSource(t, "hive://root:[email protected]:10000/?auth=NOSASL", "") testGetDataSource(t, "hive://root:root@localhost:10000/churn", "churn") testGetDataSource(t, "hive://root:[email protected]:10000/iris?auth=NOSASL", "iris") b64v := base64.RawURLEncoding.EncodeToString([]byte("{\"a\":\"b\"}")) testGetDataSource(t, fmt.Sprintf("alisa://admin:[email protected]?curr_project=iris&env=%s&schema=http&with=%s", b64v, b64v), "iris") } func testMainFastFail(t *testing.T, interactive bool) { a := assert.New(t) // Run the crashing code when FLAG is set if os.Getenv("SQLFLOW_TEST_REPL_FAST_FAIL_INTERACTIVE_OR_NOT") == "false" { os.Args = []string{os.Args[0], "--datasource", "database://in?imagination", "-e", ";"} main() } else if os.Getenv("SQLFLOW_TEST_REPL_FAST_FAIL_INTERACTIVE_OR_NOT") == "true" { os.Args = []string{os.Args[0], "--datasource", "database://in?imagination"} main() } // Run the test in a subprocess cmd := exec.Command(os.Args[0], "-test.run=TestMainFastFail") cmd.Env = append(os.Environ(), fmt.Sprintf("SQLFLOW_TEST_REPL_FAST_FAIL_INTERACTIVE_OR_NOT=%v", interactive)) cmd.Start() done := make(chan error) go func() { done <- cmd.Wait() }() timeout := time.After(4 * time.Second) // 4s are enough for **fast** fail select { case <-timeout: cmd.Process.Kill() assert.FailNowf(t, "subprocess main timed out", "interactive: %v", interactive) case err := <-done: a.Error(err) // Cast the error as *exec.ExitError and compare the result e, ok := err.(*exec.ExitError) expectedErrorString := "exit status 1" assert.Equal(t, true, ok) assert.Equal(t, expectedErrorString, e.Error()) } } func TestMainFastFail(t *testing.T) { stopServer := startServer() defer stopServer() waitForServer() testMainFastFail(t, true) testMainFastFail(t, false) } func TestReadStmt(t *testing.T) { a := assert.New(t) sql := `SELECT * FROM iris.train TO TRAIN DNNClassifier WITH model.hidden_units=[10,20], model.n_classes=3 LABEL class INTO sqlflow_models.my_model;` scanner := bufio.NewScanner(strings.NewReader(sql)) stmt, err := readStmt(scanner) a.Nil(err) a.Equal(1, len(stmt)) a.Equal(space.ReplaceAllString(stmt[0], " "), space.ReplaceAllString(sql, " ")) sql2 := `-- 1. test SELECT * FROM iris.train TO TRAIN DNNClassifier WITH model.hidden_units=[10,20], model.n_classes=3 LABEL class INTO sqlflow_models.my_model;` scanner = bufio.NewScanner(strings.NewReader(sql2)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(0, len(stmt)) // The leading one-line comment is considered an empty statement stmt, err = readStmt(scanner) a.Nil(err) a.Equal(1, len(stmt)) a.Equal(space.ReplaceAllString(stmt[0], " "), space.ReplaceAllString(sql, " ")) sql2 = `-- 1. test` scanner = bufio.NewScanner(strings.NewReader(sql2)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(0, len(stmt)) sql2 = `--` scanner = bufio.NewScanner(strings.NewReader(sql2)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(0, len(stmt)) sql2 = `--1. test` scanner = bufio.NewScanner(strings.NewReader(sql2)) stmt, err = readStmt(scanner) a.Equal(io.EOF, err) // Don't support standard comment a.Equal(1, len(stmt)) a.Equal(sql2, stmt[0]) sql2 = `SHOW databases;` scanner = bufio.NewScanner(strings.NewReader(sql2)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(1, len(stmt)) sql2 = `SHOW databases` scanner = bufio.NewScanner(strings.NewReader(sql2)) stmt, err = readStmt(scanner) a.Equal(err, io.EOF) // EOF is considered the same as ';' a.Equal(1, len(stmt)) sql3 := `SELECT * FROM iris.train TO TRAIN DNNClassifier WITH model.hidden_units=[10,20], model.n_classes=3 LABEL class INTO sqlflow_models.my_model;` scanner = bufio.NewScanner(strings.NewReader(sql3)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(1, len(stmt)) a.Equal(space.ReplaceAllString(stmt[0], " "), space.ReplaceAllString(sql, " ")) sql3 = `SELECT -- * -- comment FROM -- comment; iris.train -- comment ; TO -- comment ; TRAIN TRAIN DNNClassifier WITH model.hidden_units=[10,20], model.n_classes=3 LABEL class INTO sqlflow_models.my_model;` scanner = bufio.NewScanner(strings.NewReader(sql3)) stmt, err = readStmt(scanner) a.Equal(1, len(stmt)) a.Equal(space.ReplaceAllString(stmt[0], " "), space.ReplaceAllString(sql, " ")) sql3 = `SELECT * FROM tbl WHERE a==";";` scanner = bufio.NewScanner(strings.NewReader(sql3)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(1, len(stmt)) a.Equal(stmt[0], sql3) sql3 = `SELECT * FROM tbl WHERE a==";\"';` // Test unclosed quote scanner = bufio.NewScanner(strings.NewReader(sql3)) stmt, err = readStmt(scanner) a.Equal(io.EOF, err) a.Equal(1, len(stmt)) a.Equal(stmt[0], sql3) sql3 = `SELECT * FROM tbl WHERE a=="; ";` // Test cross-line quoted string scanner = bufio.NewScanner(strings.NewReader(sql3)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(1, len(stmt)) a.Equal(stmt[0], sql3) sql3 = `SELECT * FROM tbl WHERE a=="\"; ";` // Test Escaping scanner = bufio.NewScanner(strings.NewReader(sql3)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(1, len(stmt)) a.Equal(stmt[0], sql3) sql3 = `SELECT * FROM tbl WHERE a=="'; ";` // Test single quote in double-quoted string scanner = bufio.NewScanner(strings.NewReader(sql3)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(1, len(stmt)) a.Equal(stmt[0], sql3) sql3 = `SELECT * FROM tbl WHERE a=='"; ';` // Test double quote in single-quoted string scanner = bufio.NewScanner(strings.NewReader(sql3)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(1, len(stmt)) a.Equal(stmt[0], sql3) sql3 = `SELECT * FROM tbl WHERE a=="-- \"; ";` // Test double dash in quoted string scanner = bufio.NewScanner(strings.NewReader(sql3)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(1, len(stmt)) a.Equal(stmt[0], sql3) sql3 = `SELECT * FROM tbl WHERE a==--" \"; '";` // Test quoted string in standard comment (not comment actually ) scanner = bufio.NewScanner(strings.NewReader(sql3)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(1, len(stmt)) a.Equal(stmt[0], sql3) sql3 = `SELECT * FROM tbl WHERE a==-- " \"; '";` // Test quoted string in comment, note that the quoted string is unclosed scanner = bufio.NewScanner(strings.NewReader(sql3)) stmt, err = readStmt(scanner) a.Equal(io.EOF, err) a.Equal(1, len(stmt)) a.Equal(space.ReplaceAllString(stmt[0], " "), `SELECT * FROM tbl WHERE a== '";`) sql3 = `-- -- 1. test use iris; show tables; -- select * from tbl where a not like '-- %' ;` // Test multiple statements in multiple lines scanner = bufio.NewScanner(strings.NewReader(sql3)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(0, len(stmt)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(0, len(stmt)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(3, len(stmt)) a.Equal("use iris;", stmt[0]) a.Equal("show tables;", space.ReplaceAllString(stmt[1], " ")) a.Equal(" select * from tbl where a not like '-- %' ;", space.ReplaceAllString(stmt[2], " ")) sql3 = `use iris; show tables;` // Test multiple statements in single line scanner = bufio.NewScanner(strings.NewReader(sql3)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(2, len(stmt)) a.Equal("use iris;", stmt[0]) a.Equal("show tables;", space.ReplaceAllString(stmt[1], " ")) sql4 := `SELECT\t\n1;\n\n` scanner = bufio.NewScanner(strings.NewReader(sql4)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(1, len(stmt)) a.Equal("SELECT\t\n1;", stmt[0]) sql5 := `CREATE TABLE a(\na int, b int\n);` scanner = bufio.NewScanner(strings.NewReader(sql5)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(1, len(stmt)) a.Equal("CREATE TABLE a(\na int, b int\n);", stmt[0]) sql6 := `CREATE TABLE a(\na int, b int\n); ;` scanner = bufio.NewScanner(strings.NewReader(sql6)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(2, len(stmt)) a.Equal("CREATE TABLE a(\na int, b int\n);", stmt[0]) sql7 := `CREATE TABLE a(\na int, b int\n); ;; ; \n; \n ;` scanner = bufio.NewScanner(strings.NewReader(sql7)) stmt, err = readStmt(scanner) a.Nil(err) a.Equal(2, len(stmt)) a.Equal("CREATE TABLE a(\na int, b int\n);", stmt[0]) } func TestPromptState(t *testing.T) { a := assert.New(t) s := newPromptState() a.Equal(len(s.keywords), 15) sql := `SELECT * FROM iris.train TO TRAIN DNNClassifier WITH model.hidden_units=[10,20], model.n_classes=3 LABEL class INTO sqlflow_models.my_model;` words := strings.Fields(sql) keyword, ahead, last := s.lookaheadKeyword(words) a.Equal("INTO", keyword) a.Equal("class", ahead) a.Equal("sqlflow_models.my_model;", last) keyword, ahead, last = s.lookaheadKeyword(words[0 : len(words)-1]) a.Equal("INTO", keyword) a.Equal("class", ahead) a.Equal("INTO", last) keyword, ahead, last = s.lookaheadKeyword(words[0 : len(words)-2]) a.Equal("LABEL", keyword) a.Equal("model.n_classes=3", ahead) a.Equal("class", last) keyword, ahead, last = s.lookaheadKeyword(words[0 : len(words)-4]) a.Equal("WITH", keyword) a.Equal("DNNClassifier", ahead) a.Equal("model.n_classes=3", last) var stmt []string a.Equal(0, len(s.statements)) scanner := bufio.NewScanner(strings.NewReader(sql)) for scanner.Scan() { s.execute(scanner.Text(), func(s string) { stmt = append(stmt, s) }) } a.Equal(1, len(stmt)) a.Equal(space.ReplaceAllString(stmt[0], " "), space.ReplaceAllString(sql, " ")) a.Equal(0, len(s.statements)) stmt = []string{} sql2 := `-- 1. test SELECT * FROM iris.train TO TRAIN DNNClassifier WITH model.hidden_units=[10,20], model.n_classes=3 LABEL class INTO sqlflow_models.my_model;` scanner = bufio.NewScanner(strings.NewReader(sql2)) for scanner.Scan() { s.execute(scanner.Text(), func(s string) { stmt = append(stmt, s) }) } a.Equal(1, len(stmt)) a.Equal(space.ReplaceAllString(stmt[0], " "), space.ReplaceAllString(sql, " ")) } func TestInputNavigation(t *testing.T) { attribute.ExtractDocStringsOnce() a := assert.New(t) s := newPromptState() his1 := "history 1" his2 := "history 2" his3 := "SELECT * FROM iris.tran WHERE class like '%中文';" s.history = []prompt.Suggest{{his3, ""}, {his2, ""}, {his1, ""}} p := prompt.NewBuffer() // put something on input buffer p.InsertText(his3, false, true) // go backward s.navigateHistory("", true, p) a.Equal(his3, p.Text()) s.navigateHistory("", true, p) a.Equal(his2, p.Text()) s.navigateHistory("", true, p) a.Equal(his1, p.Text()) // should stop at last history s.navigateHistory("", true, p) a.Equal(his1, p.Text()) s.navigateHistory("", true, p) a.Equal(his1, p.Text()) // go forward s.navigateHistory("", false, p) a.Equal(his2, p.Text()) s.navigateHistory("", false, p) a.Equal(his3, p.Text()) s.navigateHistory("", false, p) a.Equal("", p.Text()) } func TestComplete(t *testing.T) { attribute.ExtractDocStringsOnce() a := assert.New(t) s := newPromptState() p := prompt.NewBuffer() // Imitating the `input from console` process p.InsertText(`SELECT * FROM iris.train T`, false, true) c := s.completer(*p.Document()) a.Equal(1, len(c)) a.Equal("TO", c[0].Text) p.InsertText(`O T`, false, true) c = s.completer(*p.Document()) a.Equal(1, len(c)) a.Equal("TRAIN", c[0].Text) p.InsertText(`RAIN `, false, true) c = s.completer(*p.Document()) a.Equal(18, len(c)) a.Equal("BoostedTreesClassifier", c[0].Text) p.InsertText(`DNN`, false, true) c = s.completer(*p.Document()) a.Equal(4, len(c)) p.InsertText(`c`, false, true) c = s.completer(*p.Document()) a.Equal(1, len(c)) a.Equal("DNNClassifier", c[0].Text) p.DeleteBeforeCursor(1) // TODO(shendiaomo): It's sort of case sensitive at the moment p.InsertText(`C`, false, true) c = s.completer(*p.Document()) a.Equal(1, len(c)) a.Equal("DNNClassifier", c[0].Text) p.InsertText(`lassifier w`, false, true) c = s.completer(*p.Document()) a.Equal(1, len(c)) a.Equal("WITH", c[0].Text) p.InsertText(`ith `, false, true) c = s.completer(*p.Document()) a.Equal(20, len(c)) p.InsertText(`model.f`, false, true) c = s.completer(*p.Document()) a.Equal(0, len(c)) // model.feature_columns removed by codegen/attribute.go p.DeleteBeforeCursor(1) p.InsertText(`h`, false, true) c = s.completer(*p.Document()) a.Equal(1, len(c)) a.Equal("model.hidden_units", c[0].Text) p.InsertText(`idden_units=[400,300], `, false, true) c = s.completer(*p.Document()) a.Equal(20, len(c)) p.InsertText(`o`, false, true) c = s.completer(*p.Document()) a.Equal(5, len(c)) // Adagrad has 5 parameters p.DeleteBeforeCursor(1) p.InsertText(`model.optimizer=`, false, true) c = s.completer(*p.Document()) a.Equal(8, len(c)) a.Equal("Adadelta", c[0].Text) p.InsertText(`R`, false, true) c = s.completer(*p.Document()) a.Equal(1, len(c)) a.Equal("RMSprop", c[0].Text) p.InsertText(`MSprop,`, false, true) c = s.completer(*p.Document()) p.InsertText(` o`, false, true) // FIXME(shendiaomo): copy-n-paste doesn't work here c = s.completer(*p.Document()) a.Equal(7, len(c)) // RMSprop has 7 parameters a.Equal("optimizer", c[0].Text) p.InsertText(`ptimizer.learning_rate=0.02, model.n`, false, true) c = s.completer(*p.Document()) a.Equal(1, len(c)) a.Equal("model.n_classes", c[0].Text) p.InsertText(`_classes=3 l`, false, true) c = s.completer(*p.Document()) a.Equal(1, len(c)) a.Equal("LABEL", c[0].Text) p.InsertText(`abel class i`, false, true) c = s.completer(*p.Document()) a.Equal(1, len(c)) a.Equal("INTO", c[0].Text) p.InsertText(`nto `, false, true) c = s.completer(*p.Document()) a.Equal(0, len(c)) p.InsertText(`nto sqlflow_models.my_awesome_model;`, false, true) c = s.completer(*p.Document()) a.Equal(0, len(c)) // Test cross line completion s = newPromptState() s.statements = []string{"TO"} p = prompt.NewBuffer() p.InsertText("t", false, true) c = s.completer(*p.Document()) a.Equal(1, len(c)) a.Equal("TRAIN", c[0].Text) // Test XGBoost objective parameter completion s = newPromptState() p = prompt.NewBuffer() p.InsertText("SELECT * FROM train TO TRAIN xgboost.gbtree WITH objective=", false, true) c = s.completer(*p.Document()) a.Equal(14, len(c)) p.InsertText("r", false, true) c = s.completer(*p.Document()) a.Equal(8, len(c)) p.InsertText("eg:s", false, true) c = s.completer(*p.Document()) a.Equal(2, len(c)) p.InsertText("quarederror", false, true) c = s.completer(*p.Document()) a.Equal(1, len(c)) p.InsertText("x", false, true) c = s.completer(*p.Document()) a.Equal(0, len(c)) s = newPromptState() p = prompt.NewBuffer() p.InsertText("SELECT * FROM train TO TRAIN notxgboost.gbtree WITH objective=", false, true) c = s.completer(*p.Document()) a.Equal(0, len(c)) } func TestGetTerminalColumnSize(t *testing.T) { a := assert.New(t) a.Equal(1024, getTerminalColumnSize()) oldConsoleParser := consoleParser consoleParser = newTestConsoleParser() a.Equal(238, getTerminalColumnSize()) consoleParser = oldConsoleParser } func applyEmacsMetaKeyBinding(buf *prompt.Buffer, key []byte) { for _, binding := range emacsMetaKeyBindings { if bytes.Compare(binding.ASCIICode, key) == 0 { binding.Fn(buf) } } } func applyEmacsControlKeyBinding(buf *prompt.Buffer, key prompt.Key) { for _, binding := range emacsCtrlKeyBindings { if binding.Key == key { binding.Fn(buf) } } } func TestEmacsKeyBindings(t *testing.T) { a := assert.New(t) buf := prompt.NewBuffer() buf.InsertText("USE iris", false, true) a.Equal(8, buf.DisplayCursorPosition()) applyEmacsControlKeyBinding(buf, prompt.ControlA) a.Equal(0, buf.DisplayCursorPosition()) applyEmacsControlKeyBinding(buf, prompt.ControlE) a.Equal(8, buf.DisplayCursorPosition()) applyEmacsControlKeyBinding(buf, prompt.ControlB) a.Equal(7, buf.DisplayCursorPosition()) a.Equal("iri", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("s", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsControlKeyBinding(buf, prompt.ControlF) a.Equal(8, buf.DisplayCursorPosition()) applyEmacsControlKeyBinding(buf, prompt.ControlH) // Delete the character before cursor ('s') a.Equal(7, buf.DisplayCursorPosition()) a.Equal("iri", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsControlKeyBinding(buf, prompt.ControlW) // Cut the word before cursor ('iri') to the clipboard a.Equal(4, buf.DisplayCursorPosition()) a.Equal("USE ", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsControlKeyBinding(buf, prompt.ControlY) // Paste ('iri') back a.Equal(7, buf.DisplayCursorPosition()) a.Equal("iri", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsControlKeyBinding(buf, prompt.ControlB) // Move back a character (between 'ir' and 'i') applyEmacsControlKeyBinding(buf, prompt.ControlK) // Cut the line after the cursor to the clipboard ('i') a.Equal(6, buf.DisplayCursorPosition()) a.Equal("ir", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsControlKeyBinding(buf, prompt.ControlY) // Paste ('i') back a.Equal(7, buf.DisplayCursorPosition()) a.Equal("iri", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsControlKeyBinding(buf, prompt.ControlU) // Cut the line before the cursor to the clipboard ('USE iri') a.Equal(0, buf.DisplayCursorPosition()) a.Equal("", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsControlKeyBinding(buf, prompt.ControlY) // Paste ('USE iri') back a.Equal(7, buf.DisplayCursorPosition()) a.Equal("iri", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsControlKeyBinding(buf, prompt.ControlB) // Move back a character (between 'ir' and 'i') applyEmacsControlKeyBinding(buf, prompt.ControlD) // Delete the word under the cursor ('i') a.Equal(6, buf.DisplayCursorPosition()) a.Equal("ir", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsMetaKeyBinding(buf, []byte{0x1b, 'b'}) // Move cursor left by a word (at 'i') a.Equal(4, buf.DisplayCursorPosition()) a.Equal("USE ", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("ir", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsMetaKeyBinding(buf, []byte{0x1b, 'f'}) // Move back a.Equal(6, buf.DisplayCursorPosition()) a.Equal("ir", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsMetaKeyBinding(buf, []byte{0x1b, 'B'}) // Meta B/F a.Equal(4, buf.DisplayCursorPosition()) a.Equal("USE ", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("ir", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsMetaKeyBinding(buf, []byte{0x1b, 'F'}) a.Equal(6, buf.DisplayCursorPosition()) a.Equal("ir", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsMetaKeyBinding(buf, []byte{0x1b, 0x1b, 0x5b, 0x44}) // Meta <-/-> a.Equal(4, buf.DisplayCursorPosition()) a.Equal("USE ", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("ir", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsMetaKeyBinding(buf, []byte{0x1b, 0x1b, 0x5b, 0x43}) a.Equal(6, buf.DisplayCursorPosition()) a.Equal("ir", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsMetaKeyBinding(buf, []byte{0x1b, 0x7f}) // Cut the word before cursor ('ir') to the clipboard a.Equal(4, buf.DisplayCursorPosition()) a.Equal("USE ", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsControlKeyBinding(buf, prompt.ControlY) // Paste ('ir') back a.Equal(6, buf.DisplayCursorPosition()) a.Equal("ir", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsMetaKeyBinding(buf, []byte{0x1b, 'b'}) // Move cursor left by a word (at 'i') applyEmacsMetaKeyBinding(buf, []byte{0x1b, 'd'}) // Cut the word after cursor ('ir') to the clipboard a.Equal(4, buf.DisplayCursorPosition()) a.Equal("USE ", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsControlKeyBinding(buf, prompt.ControlY) // Paste ('ir') back a.Equal(6, buf.DisplayCursorPosition()) a.Equal("ir", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsMetaKeyBinding(buf, []byte{0x1b, 'b'}) // Move cursor left by a word (at 'i') applyEmacsMetaKeyBinding(buf, []byte{0x1b, 'D'}) // Cut the word after cursor ('ir') to the clipboard a.Equal(4, buf.DisplayCursorPosition()) a.Equal("USE ", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsControlKeyBinding(buf, prompt.ControlY) // Paste ('ir') back a.Equal(6, buf.DisplayCursorPosition()) a.Equal("ir", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("", buf.Document().GetWordAfterCursorWithSpace()) applyEmacsControlKeyBinding(buf, prompt.ControlL) // cls a.Equal(6, buf.DisplayCursorPosition()) a.Equal("ir", buf.Document().GetWordBeforeCursorWithSpace()) a.Equal("", buf.Document().GetWordAfterCursorWithSpace()) } func TestDotEnv(t *testing.T) { a := assert.New(t) f, e := os.OpenFile(".test_sqlflow_env", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644) a.NoError(e) defer f.Close() w := bufio.NewWriter(f) fmt.Fprintln(w, `# This is a .env config file SQLFLOW_TEST_DOT_ENV="Alien" # Alien is a famous movie`) w.Flush() a.Equal("", os.Getenv("SQLFLOW_TEST_DOT_ENV")) // Make sure repl does not fail when the .env file is not present initEnvFromFile("not_exist") a.Equal("", os.Getenv("SQLFLOW_TEST_DOT_ENV")) initEnvFromFile(".test_sqlflow_env") a.Equal("Alien", os.Getenv("SQLFLOW_TEST_DOT_ENV")) // Make sure the existing environment variables aren't affected when the .env file is not present initEnvFromFile("not_exist") a.Equal("Alien", os.Getenv("SQLFLOW_TEST_DOT_ENV")) } func TestStdinParser(t *testing.T) { a := assert.New(t) p := newTestConsoleParser() buf, e := p.Read() a.Nil(e) a.Equal("test multiple", string(buf)) buf, e = p.Read() a.Nil(e) a.Equal(prompt.Enter, prompt.GetKey(buf)) buf, e = p.Read() a.Nil(e) a.Equal("line paste", strings.TrimSpace(string(buf))) buf, e = p.Read() a.Nil(e) a.Equal("test multiple", string(buf)) } func TestGetServerAddrFromEnv(t *testing.T) { os.Setenv("SQLFLOW_SERVER", serverAddr) os.Setenv("SQLFLOW_DATASOURCE", dbConnStr) defer os.Unsetenv("SQLFLOW_SERVER") defer os.Unsetenv("SQLFLOW_DATASOURCE") stopServer := startServer() defer stopServer() waitForServer() a := assert.New(t) a.Nil(prepareTestDataOrSkip(t)) flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError) os.Args = []string{"", "-e", "use iris; show tables"} output, _ := step.GetStdout(func() error { main(); return nil }) a.Contains(output, ` +----------------+ | TABLES IN IRIS | +----------------+ | iris_empty | | test | | test_dense | | train | | train_dense | +----------------+`) } func TestIsExitStmt(t *testing.T) { a := assert.New(t) a.True(isExitStmt("exit")) a.True(isExitStmt("quit")) a.True(isExitStmt(" ExiT ; SELECT 1")) a.False(isExitStmt("SELECT 1; EXIT")) a.False(isExitStmt("QUIT SELECT 1")) } type testConsoleParser struct{} func (p *testConsoleParser) Read() ([]byte, error) { input := `test multiple line paste` return []byte(input), nil } // newStdinParser returns ConsoleParser object to read from stdin. func newTestConsoleParser() *stdinParser { return &stdinParser{ ConsoleParser: &testConsoleParser{}, } } func (p *testConsoleParser) Setup() error { return nil } func (p *testConsoleParser) TearDown() error { return nil } func (p *testConsoleParser) GetWinSize() *prompt.WinSize { return &prompt.WinSize{73, 238} }
[ "\"SQLFLOW_TEST_DB\"", "\"SQLFLOW_TEST_REPL_FAST_FAIL_INTERACTIVE_OR_NOT\"", "\"SQLFLOW_TEST_REPL_FAST_FAIL_INTERACTIVE_OR_NOT\"", "\"SQLFLOW_TEST_DOT_ENV\"", "\"SQLFLOW_TEST_DOT_ENV\"", "\"SQLFLOW_TEST_DOT_ENV\"", "\"SQLFLOW_TEST_DOT_ENV\"" ]
[]
[ "SQLFLOW_TEST_DB", "SQLFLOW_TEST_DOT_ENV", "SQLFLOW_TEST_REPL_FAST_FAIL_INTERACTIVE_OR_NOT" ]
[]
["SQLFLOW_TEST_DB", "SQLFLOW_TEST_DOT_ENV", "SQLFLOW_TEST_REPL_FAST_FAIL_INTERACTIVE_OR_NOT"]
go
3
0
audioset/dataset.py
import io import os import random import av from torch.utils.data import Dataset as TorchDataset, ConcatDataset, DistributedSampler, WeightedRandomSampler, RandomSampler import torch import numpy as np import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) from audioset.audiodatasets import PreprocessDataset import h5py import augly.audio as audaugs LMODE = os.environ.get("LMODE", False) if LMODE: def LMODE_default_config(): cache_root_path = "/system/user/publicdata/CP/DCASE/cached_datasets/" def decode_mp3(mp3_arr): """ decodes an array if uint8 representing an mp3 file :rtype: np.array """ container = av.open(io.BytesIO(mp3_arr.tobytes())) stream = next(s for s in container.streams if s.type == 'audio') # print(stream) a = [] for i, packet in enumerate(container.demux(stream)): for frame in packet.decode(): a.append(frame.to_ndarray().reshape(-1)) waveform = np.concatenate(a) if waveform.dtype != 'float32': raise RuntimeError("Unexpected wave type") return waveform def pad_or_truncate(x, audio_length): """Pad all audio to specific length.""" if len(x) <= audio_length: return np.concatenate((x, np.zeros(audio_length - len(x), dtype=np.float32)), axis=0) else: return x[0: audio_length] def pydub_augment(waveform, gain_augment=7): if gain_augment: gain = torch.randint(gain_augment * 2, (1,)).item() - gain_augment amp = 10 ** (gain / 20) waveform = waveform * amp return waveform class MixupDataset(TorchDataset): """ Mixing Up wave forms """ def __init__(self, dataset, beta=2, rate=0.5): self.beta = beta self.rate = rate self.dataset = dataset print(f"Mixing up waveforms from dataset of len {len(dataset)}") def __getitem__(self, index): if torch.rand(1) < self.rate: x1, y1 = self.dataset[index] idx2 = torch.randint(len(self.dataset), (1,)).item() x2, y2 = self.dataset[idx2] l = np.random.beta(self.beta, self.beta) l = max(l, 1. - l) x1 = x1-x1.mean() x2 = x2-x2.mean() x = (x1 * l + x2 * (1. - l)) x = x - x.mean() return x, (y1 * l + y2 * (1. - l)) return self.dataset[index] def __len__(self): return len(self.dataset) class AudioSetDataset(TorchDataset): def __init__(self, hdf5_file, sample_rate=32000, classes_num=527, clip_length=10, augment=False, in_mem=False, extra_augment=False): """ Reads the mp3 bytes from HDF file decodes using av and returns a fixed length audio wav """ self.sample_rate = sample_rate self.hdf5_file = hdf5_file if in_mem: print("\nPreloading in memory\n") with open(hdf5_file, 'rb') as f: self.hdf5_file = io.BytesIO(f.read()) with h5py.File(hdf5_file, 'r') as f: self.length = len(f['audio_name']) print(f"Dataset from {hdf5_file} with length {self.length}.") self.dataset_file = None # lazy init self.clip_length = clip_length * sample_rate self.classes_num = classes_num self.augment = augment self.extra_augment = extra_augment if augment: print(f"Will agument data from {hdf5_file}") def open_hdf5(self): self.dataset_file = h5py.File(self.hdf5_file, 'r') def __len__(self): return self.length def __del__(self): if self.dataset_file is not None: self.dataset_file.close() self.dataset_file = None def __getitem__(self, index): """Load waveform and target of an audio clip. Args: meta: { 'hdf5_path': str, 'index_in_hdf5': int} Returns: data_dict: { 'audio_name': str, 'waveform': (clip_samples,), 'target': (classes_num,)} """ if self.dataset_file is None: self.open_hdf5() audio_name = self.dataset_file['audio_name'][index].decode() try: waveform = decode_mp3(self.dataset_file['mp3'][index]) except: print("Read Error:" + audio_name) index = random.randint(1,self.length-1) audio_name = self.dataset_file['audio_name'][index].decode() waveform = decode_mp3(self.dataset_file['mp3'][index]) #else: # waveform = decode_mp3(self.dataset_file['mp3'][index]) #waveform = decode_mp3(self.dataset_file['mp3'][index]) if self.augment: waveform = pydub_augment(waveform) waveform = self.resample(waveform) if self.extra_augment: Transforms = audaugs.Compose([ audaugs.AddBackgroundNoise(snr_level_db=random.uniform(0.0, 15.0), p=random.random()), audaugs.ChangeVolume(volume_db=random.uniform(-2.0, 2.0), p=random.random()), audaugs.HighPassFilter(cutoff_hz=random.sample([5000.0, 6000.0, 7000.0, 8000.0, 9000.0, 10000.0, 11000.0, 12000.0], 1)[0], p=random.random()), audaugs.LowPassFilter(cutoff_hz=random.sample([1000.0, 2000.0, 3000.0, 4000.0, 5000.0], 1)[0], p=random.random()), audaugs.Speed(factor=random.uniform(0.8, 1.2), p=random.random()), ]) waveform, _ = Transforms(waveform, self.sample_rate) if waveform.ndim > 1: waveform = waveform[0, :] waveform = pad_or_truncate(waveform, self.clip_length) if 'target' in self.dataset_file.keys(): target = self.dataset_file['target'][index] target = np.unpackbits(target, axis=-1, count=self.classes_num).astype(np.float32) else: target = None return waveform.reshape(1, -1), target def resample(self, waveform): """Resample. Args: waveform: (clip_samples,) Returns: (resampled_clip_samples,) """ if self.sample_rate == 32000: return waveform elif self.sample_rate == 16000: return waveform[0:: 2] elif self.sample_rate == 8000: return waveform[0:: 4] else: raise Exception('Incorrect sample rate!') def preload_mp3(balanced_train_hdf5, unbalanced_train_hdf5, num_of_classes): for hdf5_file in [balanced_train_hdf5, unbalanced_train_hdf5]: print(f"\n \n will now preload {hdf5_file} \n\n ") with h5py.File(hdf5_file, 'r') as dataset_file: target = dataset_file['mp3'][:] print(len(target)) print(f"\n \n done with {hdf5_file} \n\n ") return target[1000] def get_ft_cls_balanced_sample_weights(balanced_train_hdf5, unbalanced_train_hdf5, num_of_classes, sample_weight_offset=100, sample_weight_sum=True): """ :return: float tenosr of shape len(full_training_set) representing the weights of each sample. """ # the order of balanced_train_hdf5,unbalanced_train_hdf5 is important. # should match get_full_training_set all_y = [] for hdf5_file in [balanced_train_hdf5, unbalanced_train_hdf5]: with h5py.File(hdf5_file, 'r') as dataset_file: target = dataset_file['target'] target = np.unpackbits(target, axis=-1, count=num_of_classes) all_y.append(target) all_y = np.concatenate(all_y, axis=0) all_y = torch.as_tensor(all_y) per_class = all_y.long().sum(0).float().reshape(1, -1) # frequencies per class per_class = sample_weight_offset + per_class # offset low freq classes if sample_weight_offset > 0: print(f"Warning: sample_weight_offset={sample_weight_offset} minnow={per_class.min()}") per_class_weights = 1000. / per_class all_weight = all_y * per_class_weights if sample_weight_sum: print("\nsample_weight_sum\n") all_weight = all_weight.sum(dim=1) else: all_weight, _ = all_weight.max(dim=1) return all_weight def get_ft_weighted_sampler(balanced_train_hdf5, unbalanced_train_hdf5, num_of_classes, epoch_len=100000, sampler_replace=False): samples_weights=get_ft_cls_balanced_sample_weights(balanced_train_hdf5, unbalanced_train_hdf5, num_of_classes) num_nodes = int(os.environ.get('num_nodes', 1)) ddp = int(os.environ.get('DDP', 1)) num_nodes = max(ddp, num_nodes) print("num_nodes= ", num_nodes) rank = int(os.environ.get('NODE_RANK', 0)) return DistributedSamplerWrapper(sampler=WeightedRandomSampler(samples_weights, num_samples=epoch_len, replacement=sampler_replace), dataset=range(epoch_len), num_replicas=num_nodes, rank=rank, ) def get_random_sampler(dataset, epoch_len=100000, sampler_replace=True): num_nodes = int(os.environ.get('num_nodes', 1)) ddp = int(os.environ.get('DDP', 1)) num_nodes = max(ddp, num_nodes) print("num_nodes= ", num_nodes) rank = int(os.environ.get('NODE_RANK', 0)) return DistributedSamplerWrapper(sampler=RandomSampler(data_source=dataset, num_samples=epoch_len, replacement=sampler_replace), dataset=range(epoch_len), num_replicas=num_nodes, rank=rank, ) def get_roll_func(axis=1, shift=None, shift_range=50): print("rolling...") def roll_func(b): x, y = b x = torch.as_tensor(x) sf = shift if shift is None: sf = int(np.random.random_integers(-shift_range, shift_range)) global FirstTime return x.roll(sf, axis), y return roll_func def get_base_training_set(balanced_train_hdf5, sample_rate=32000, classes_num=527, clip_length=10, augment=False, in_mem=False, extra_augment=True, roll=True, wavmix=True): ds = AudioSetDataset( hdf5_file=balanced_train_hdf5, sample_rate=sample_rate, classes_num=classes_num, clip_length=clip_length, augment=augment, in_mem=in_mem, extra_augment=extra_augment) if roll: ds = PreprocessDataset(ds, get_roll_func()) if wavmix: ds = MixupDataset(ds) return ds def get_full_training_set(balanced_train_hdf5, unbalanced_train_hdf5, sample_rate=32000, classes_num=527, clip_length=10, augment=False, in_mem=False, extra_augment=True, roll=True, wavmix=True): sets = [ AudioSetDataset( hdf5_file=balanced_train_hdf5, sample_rate=sample_rate, classes_num=classes_num, clip_length=clip_length, augment=augment, in_mem=in_mem, extra_augment=extra_augment ), AudioSetDataset( hdf5_file=unbalanced_train_hdf5, sample_rate=sample_rate, classes_num=classes_num, clip_length=clip_length, augment=augment, in_mem=in_mem, extra_augment=extra_augment )] ds = ConcatDataset(sets) if roll: ds = PreprocessDataset(ds, get_roll_func()) if wavmix: ds = MixupDataset(ds) return ds def get_test_set(eval_hdf5, sample_rate=32000, classes_num=527, clip_length=10): ds = AudioSetDataset( hdf5_file=eval_hdf5, sample_rate=sample_rate, classes_num=classes_num, clip_length=clip_length, augment=False, in_mem=False, extra_augment=False) return ds def get_other_sets(others_hdf5_path, use_audioset, balanced_train_hdf5, unbalanced_train_hdf5, sample_rate=32000, classes_num=527, clip_length=10, augment=False, in_mem=False, extra_augment=True, roll=True, wavmix=True): sets = [] for root, dirs, files in os.walk(others_hdf5_path, topdown=False): for name in files: if name[-3:] == 'hdf': sets.append(AudioSetDataset( hdf5_file=os.path.join(root, name), sample_rate=sample_rate, classes_num=classes_num, clip_length=clip_length, augment=augment, in_mem=in_mem, extra_augment=extra_augment)) if use_audioset: sets.append(AudioSetDataset( hdf5_file=balanced_train_hdf5, sample_rate=sample_rate, classes_num=classes_num, clip_length=clip_length, augment=augment, in_mem=in_mem, extra_augment=extra_augment)) sets.append(AudioSetDataset( hdf5_file=unbalanced_train_hdf5, sample_rate=sample_rate, classes_num=classes_num, clip_length=clip_length, augment=augment, in_mem=in_mem, extra_augment=extra_augment)) sets.append(AudioSetDataset( hdf5_file=eval_hdf5, sample_rate=sample_rate, classes_num=classes_num, clip_length=clip_length, augment=augment, in_mem=in_mem, extra_augment=extra_augment)) ds = ConcatDataset(sets) if roll: ds = PreprocessDataset(ds, get_roll_func()) if wavmix: ds = MixupDataset(ds) return ds class DistributedSamplerWrapper(DistributedSampler): def __init__( self, sampler, dataset, num_replicas=None, rank=None, shuffle: bool = True): super(DistributedSamplerWrapper, self).__init__( dataset, num_replicas, rank, shuffle) # source: @awaelchli https://github.com/PyTorchLightning/pytorch-lightning/issues/3238 self.sampler = sampler def __iter__(self): if self.sampler.generator is None: self.sampler.generator = torch.Generator() self.sampler.generator.manual_seed(self.seed + self.epoch) #print(self.sampler) indices = list(self.sampler) if self.epoch == 0: print(f"\n DistributedSamplerWrapper : {indices[:10]} \n\n") indices = indices[self.rank:self.total_size:self.num_replicas] return iter(indices) if __name__ == "__main__": name = 'audioset' # dataset name roll = True # apply roll augmentation wavmix = True # apply wave-level mixup base_dir = "/data/dean/whl/audioset_Kong/" # base directory of the dataset, change it or make a link if LMODE: base_dir = "/system/user/publicdata/CP/audioset/audioset_hdf5s/" balanced_train_hdf5 = base_dir + "mp3/balanced_train_segments_mp3.hdf" eval_hdf5 = base_dir + "mp3/eval_segments_mp3.hdf" unbalanced_train_hdf5 = base_dir + "mp3/unbalanced_train_segments_mp3.hdf" if LMODE: balanced_train_hdf5 = balanced_train_hdf5.replace(base_dir, os.environ.get("TMPDIR", base_dir)+"/") unbalanced_train_hdf5 = unbalanced_train_hdf5.replace(base_dir, os.environ.get("TMPDIR", base_dir)+"/") eval_hdf5 = eval_hdf5.replace(base_dir, os.environ.get("TMPDIR", base_dir)+"/") num_of_classes = 527 print("get_base_test_set", len(get_test_set(eval_hdf5))) print("get_full_training_set", len(get_full_training_set(balanced_train_hdf5, unbalanced_train_hdf5)))
[]
[]
[ "num_nodes", "NODE_RANK", "DDP", "LMODE", "TMPDIR" ]
[]
["num_nodes", "NODE_RANK", "DDP", "LMODE", "TMPDIR"]
python
5
0
electrumsv_sdk/builtin_components/node/node.py
import logging import os from argparse import ArgumentParser from pathlib import Path from typing import Optional, Tuple, List, Set from electrumsv_sdk.sdk_types import AbstractPlugin from electrumsv_sdk.config import CLIInputs, Config from electrumsv_sdk.components import Component, ComponentTypedDict, ComponentMetadata from electrumsv_sdk.utils import get_directory_name from electrumsv_sdk.plugin_tools import PluginTools from electrumsv_node import electrumsv_node from .local_tools import LocalTools def extend_start_cli(start_parser: ArgumentParser) -> Tuple[ArgumentParser, List[str]]: """if this method is present it allows extension of the start argparser only. This occurs dynamically and adds the new cli options as attributes of the CLIInputs object""" start_parser.add_argument("--regtest", action="store_true", help="run on regtest") start_parser.add_argument("--testnet", action="store_true", help="run on testnet") # variable names to be pulled from the start_parser new_options = ['regtest', 'testnet'] return start_parser, new_options class Plugin(AbstractPlugin): BITCOIN_NETWORK = os.getenv("BITCOIN_NETWORK", "regtest") DEFAULT_PORT = 18332 DEFAULT_P2P_PORT = 18444 DEFAULT_ZMQ_PORT = 28332 # if ports == None -> set by deterministic port allocation NODE_PORT: int = int(os.environ.get("NODE_PORT") or DEFAULT_PORT) NODE_P2P_PORT: int = int(os.environ.get("NODE_P2P_PORT") or DEFAULT_P2P_PORT) NODE_ZMQ_PORT: int = int(os.environ.get("NODE_ZMQ_PORT") or DEFAULT_ZMQ_PORT) NODE_RPCALLOWIP = os.environ.get("NODE_RPCALLOWIP") # else 127.0.0.1 NODE_RPCBIND = os.environ.get("NODE_RPCBIND") RESERVED_PORTS: Set[int] = {DEFAULT_PORT, DEFAULT_P2P_PORT} COMPONENT_NAME = get_directory_name(__file__) def __init__(self, cli_inputs: CLIInputs): self.cli_inputs = cli_inputs self.config = Config() self.plugin_tools = PluginTools(self, self.cli_inputs) self.tools = LocalTools(self) self.logger = logging.getLogger(self.COMPONENT_NAME) self.src = Path(electrumsv_node.FILE_PATH).parent self.datadir: Optional[Path] = None # dynamically allocated self.id: Optional[str] = None # dynamically allocated self.port: Optional[int] = None # dynamically allocated self.p2p_port: Optional[int] = None # dynamically allocated self.zmq_port: Optional[int] = None # dynamically allocated self.component_info: Optional[Component] = None self.network = self.BITCOIN_NETWORK def install(self) -> None: """The node component has a pip installer at https://pypi.org/project/electrumsv-node/ and only official releases from pypi are supported""" self.plugin_tools.modify_pythonpath_for_portability(self.src) self.tools.fetch_node() self.logger.debug(f"Installed {self.COMPONENT_NAME}") def start(self) -> None: self.plugin_tools.modify_pythonpath_for_portability(self.src) # env vars take precedence for port and dbdir self.datadir, self.id = self.plugin_tools.allocate_datadir_and_id() self.tools.process_cli_args() # cli args may override network in env vars if self.NODE_PORT: self.port = self.NODE_PORT else: self.port = self.plugin_tools.allocate_port() if self.NODE_P2P_PORT: self.p2p_port = self.NODE_P2P_PORT else: self.p2p_port = self.plugin_tools.get_component_port(self.DEFAULT_P2P_PORT, self.COMPONENT_NAME, self.id) if self.NODE_ZMQ_PORT: self.zmq_port = self.NODE_ZMQ_PORT else: self.zmq_port = self.plugin_tools.get_component_port(self.DEFAULT_ZMQ_PORT, self.COMPONENT_NAME, self.id) extra_params = [] if self.NODE_RPCALLOWIP: extra_params.append(f"-rpcallowip={self.NODE_RPCALLOWIP}") if self.NODE_RPCBIND: extra_params.append(f"-rpcbind={self.NODE_RPCBIND}") shell_command = electrumsv_node.shell_command(data_path=str(self.datadir), rpcport=self.port, p2p_port=self.p2p_port, zmq_port=self.zmq_port, network=self.network, print_to_console=True, extra_params=extra_params) command = " ".join(shell_command) logfile = self.plugin_tools.get_logfile_path(self.id) self.plugin_tools.spawn_process(command, env_vars=os.environ.copy(), id=self.id, component_name=self.COMPONENT_NAME, src=self.src, logfile=logfile, status_endpoint=f"http://rpcuser:[email protected]:{self.port}", metadata=ComponentMetadata( datadir=str(self.datadir), rpcport=self.port, p2p_port=self.p2p_port ) ) if electrumsv_node.is_node_running(): return else: self.logger.exception("node failed to start") def stop(self) -> None: """The bitcoin node requires graceful shutdown via the RPC API - a good example of why this entrypoint is provided for user customizations (rather than always killing the process).""" def stop_node(component_dict: ComponentTypedDict) -> None: metadata = component_dict.get("metadata", {}) assert metadata is not None # typing bug rpcport = metadata.get("rpcport") if not rpcport: raise Exception("rpcport data not found") electrumsv_node.stop(rpcport=rpcport) self.plugin_tools.call_for_component_id_or_type(self.COMPONENT_NAME, callable=stop_node) self.logger.info(f"stopped selected {self.COMPONENT_NAME} instance (if running)") def reset(self) -> None: def reset_node(component_dict: ComponentTypedDict) -> None: metadata = component_dict.get("metadata", {}) assert metadata is not None # typing bug rpcport = metadata.get('rpcport') datadir = metadata.get("datadir") if not rpcport: raise Exception("rpcport data not found") electrumsv_node.reset(data_path=datadir, rpcport=rpcport) self.plugin_tools.call_for_component_id_or_type(self.COMPONENT_NAME, callable=reset_node) self.logger.info("Reset of RegTest bitcoin daemon completed successfully.")
[]
[]
[ "NODE_ZMQ_PORT", "BITCOIN_NETWORK", "NODE_P2P_PORT", "NODE_RPCALLOWIP", "NODE_RPCBIND", "NODE_PORT" ]
[]
["NODE_ZMQ_PORT", "BITCOIN_NETWORK", "NODE_P2P_PORT", "NODE_RPCALLOWIP", "NODE_RPCBIND", "NODE_PORT"]
python
6
0
src/robusta/runner/log_init.py
import logging import os import os.path import colorlog def init_logging(): logging_level = os.environ.get("LOG_LEVEL", "INFO") logging_format = "%(log_color)s%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s" logging_datefmt = "%Y-%m-%d %H:%M:%S" if os.environ.get("ENABLE_COLORED_LOGS", "false").lower() == "true": print("setting up colored logging") colorlog.basicConfig( format=logging_format, level=logging_level, datefmt=logging_datefmt ) else: print("setting up regular logging") logging.basicConfig( format=logging_format, level=logging_level, datefmt=logging_datefmt ) logging.getLogger().setLevel(logging_level) for logger_name in ["werkzeug", "telethon"]: log = logging.getLogger(logger_name) log.setLevel(logging.ERROR) logging.info(f"logger initialized using {logging_level} log level")
[]
[]
[ "ENABLE_COLORED_LOGS", "LOG_LEVEL" ]
[]
["ENABLE_COLORED_LOGS", "LOG_LEVEL"]
python
2
0
commands/qotd/qotd.go
package qotd import ( "errors" "fmt" "io/ioutil" "net/http" "os" "time" "gopkg.in/yaml.v2" "github.com/forestgiant/go-simpletime" "github.com/jesselucas/slackcmd/slack" "github.com/jesselucas/validator" ) // Command struct is only defined to add the Request method type Command struct { } // Request is used to send back to slackcmd func (cmd *Command) Request(sc *slack.SlashCommand) (*slack.CommandPayload, error) { // read credentials from environment variables slackAPIKey := os.Getenv("SLACK_KEY_QOTD") // Verify the request is coming from Slack if sc.Token != slackAPIKey { err := errors.New("Unauthorized Slack") return nil, err } // create payload cp := &slack.CommandPayload{ Channel: fmt.Sprintf("@%v", sc.UserName), Username: "QOTD", Emoji: ":question:", SlashResponse: true, SendPayload: false, } // url for QOTD YAML url := os.Getenv("QOTD_URL") if !validator.IsURL(url) { return nil, errors.New("QOTD_URL is not a valid URL") } res, err := http.Get(url) defer res.Body.Close() body, _ := ioutil.ReadAll(res.Body) if err != nil { return nil, err } // Unmarshal YAML var questions []string err = yaml.Unmarshal(body, &questions) if err != nil { return nil, err } // Get todays index index := getTodaysIndex(uint(len(questions))) cp.Text = "QOTD: " + questions[index] return cp, nil } // getTodaysIndex subjects the startDate by today's date to get the // difference in days and then will modulate based on the length // of all the indexes func getTodaysIndex(length uint) uint { startDate := time.Date(2016, 3, 16, 0, 0, 0, 0, time.UTC) // Find out day offset of today from the startDate offsetDuration := simpletime.NewSimpleTime(startDate).Since(time.Now()) offsetDays := offsetDuration.Days() fmt.Println(offsetDays) return uint(offsetDays) % length }
[ "\"SLACK_KEY_QOTD\"", "\"QOTD_URL\"" ]
[]
[ "QOTD_URL", "SLACK_KEY_QOTD" ]
[]
["QOTD_URL", "SLACK_KEY_QOTD"]
go
2
0
reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/evaluation_worker.py
'''This module is responsible for launching evaluation jobs''' import argparse import json import logging import os import time import rospy from rl_coach.base_parameters import TaskParameters from rl_coach.core_types import EnvironmentSteps from rl_coach.data_stores.data_store import SyncFiles from markov import utils from markov.log_handler.logger import Logger from markov.log_handler.exception_handler import log_and_exit from markov.log_handler.constants import (SIMAPP_SIMULATION_WORKER_EXCEPTION, SIMAPP_EVENT_ERROR_CODE_500) from markov.constants import SIMAPP_VERSION_2, DEFAULT_PARK_POSITION, ROLLOUT_WORKER_PROFILER_PATH from markov.agent_ctrl.constants import ConfigParams from markov.agents.rollout_agent_factory import create_rollout_agent, create_obstacles_agent, create_bot_cars_agent from markov.agents.utils import RunPhaseSubject from markov.defaults import reward_function from markov.log_handler.deepracer_exceptions import GenericRolloutError, GenericRolloutException from markov.environments.constants import VELOCITY_TOPICS, STEERING_TOPICS, LINK_NAMES from markov.metrics.s3_metrics import EvalMetrics from markov.metrics.iteration_data import IterationData from markov.metrics.constants import MetricsS3Keys from markov.s3_boto_data_store import S3BotoDataStore, S3BotoDataStoreParameters from markov.sagemaker_graph_manager import get_graph_manager from markov.rollout_utils import (PhaseObserver, signal_robomaker_markov_package_ready, configure_environment_randomizer, get_robomaker_profiler_env) from markov.rospy_wrappers import ServiceProxyWrapper from markov.camera_utils import configure_camera from markov.track_geom.track_data import TrackData from markov.track_geom.utils import get_start_positions from markov.s3.constants import (MODEL_METADATA_LOCAL_PATH_FORMAT, MODEL_METADATA_S3_POSTFIX, SIMTRACE_EVAL_LOCAL_PATH_FORMAT, CAMERA_PIP_MP4_LOCAL_PATH_FORMAT, CAMERA_45DEGREE_LOCAL_PATH_FORMAT, CAMERA_TOPVIEW_LOCAL_PATH_FORMAT, SimtraceVideoNames) from markov.s3.files.model_metadata import ModelMetadata from markov.s3.files.simtrace_video import SimtraceVideo from markov.s3.files.checkpoint import Checkpoint from markov.s3.utils import get_s3_key from std_srvs.srv import Empty, EmptyRequest logger = Logger(__name__, logging.INFO).get_logger() MIN_RESET_COUNT = 10000 #TODO: change when console passes float("inf") IS_PROFILER_ON, PROFILER_S3_BUCKET, PROFILER_S3_PREFIX = get_robomaker_profiler_env() def evaluation_worker(graph_manager, number_of_trials, task_parameters, simtrace_video_s3_writers, is_continuous, park_positions): """ Evaluation worker function Arguments: graph_manager(MultiAgentGraphManager): Graph manager of multiagent graph manager number_of_trials(int): Number of trails you want to run the evaluation task_parameters(TaskParameters): Information of the checkpoint, gpu/cpu, framework etc of rlcoach simtrace_video_s3_writers(list): Information to upload to the S3 bucket all the simtrace and mp4 is_continuous(bool): The termination condition for the car park_positions(list of tuple): list of (x, y) for cars to park at """ # Collect profiler information only IS_PROFILER_ON is true with utils.Profiler(s3_bucket=PROFILER_S3_BUCKET, s3_prefix=PROFILER_S3_PREFIX, output_local_path=ROLLOUT_WORKER_PROFILER_PATH, enable_profiling=IS_PROFILER_ON): subscribe_to_save_mp4_topic, unsubscribe_from_save_mp4_topic = list(), list() subscribe_to_save_mp4, unsubscribe_from_save_mp4 = list(), list() for agent_param in graph_manager.agents_params: racecar_name = 'racecar' if len(agent_param.name.split("_")) == 1 \ else "racecar_{}".format(agent_param.name.split("_")[1]) subscribe_to_save_mp4_topic.append("/{}/save_mp4/subscribe_to_save_mp4".format(racecar_name)) unsubscribe_from_save_mp4_topic.append("/{}/save_mp4/unsubscribe_from_save_mp4".format(racecar_name)) graph_manager.data_store.wait_for_checkpoints() graph_manager.data_store.modify_checkpoint_variables() # Make the clients that will allow us to pause and unpause the physics rospy.wait_for_service('/gazebo/pause_physics_dr') rospy.wait_for_service('/gazebo/unpause_physics_dr') pause_physics = ServiceProxyWrapper('/gazebo/pause_physics_dr', Empty) unpause_physics = ServiceProxyWrapper('/gazebo/unpause_physics_dr', Empty) for mp4_sub, mp4_unsub in zip(subscribe_to_save_mp4_topic, unsubscribe_from_save_mp4_topic): rospy.wait_for_service(mp4_sub) rospy.wait_for_service(mp4_unsub) for mp4_sub, mp4_unsub in zip(subscribe_to_save_mp4_topic, unsubscribe_from_save_mp4_topic): subscribe_to_save_mp4.append(ServiceProxyWrapper(mp4_sub, Empty)) unsubscribe_from_save_mp4.append(ServiceProxyWrapper(mp4_unsub, Empty)) graph_manager.create_graph(task_parameters=task_parameters, stop_physics=pause_physics, start_physics=unpause_physics, empty_service_call=EmptyRequest) logger.info("Graph manager successfully created the graph: Unpausing physics") unpause_physics(EmptyRequest()) is_save_mp4_enabled = rospy.get_param('MP4_S3_BUCKET', None) if is_save_mp4_enabled: for subscribe_mp4 in subscribe_to_save_mp4: subscribe_mp4(EmptyRequest()) configure_environment_randomizer() track_data = TrackData.get_instance() # Before each evaluation episode (single lap for non-continuous race and complete race for # continuous race), a new copy of park_positions needs to be loaded into track_data because # a park position will be pop from park_positions when a racer car need to be parked. if is_continuous: track_data.park_positions = park_positions graph_manager.evaluate(EnvironmentSteps(1)) else: for _ in range(number_of_trials): track_data.park_positions = park_positions graph_manager.evaluate(EnvironmentSteps(1)) if is_save_mp4_enabled: for unsubscribe_mp4 in unsubscribe_from_save_mp4: unsubscribe_mp4(EmptyRequest()) # upload simtrace and mp4 into s3 bucket for s3_writer in simtrace_video_s3_writers: s3_writer.persist(utils.get_s3_kms_extra_args()) time.sleep(1) pause_physics(EmptyRequest()) # Close the down the job utils.cancel_simulation_job(os.environ.get('AWS_ROBOMAKER_SIMULATION_JOB_ARN'), rospy.get_param('AWS_REGION')) def main(): """ Main function for evaluation worker """ parser = argparse.ArgumentParser() parser.add_argument('-p', '--preset', help="(string) Name of a preset to run \ (class name from the 'presets' directory.)", type=str, required=False) parser.add_argument('--s3_bucket', help='list(string) S3 bucket', type=str, nargs='+', default=rospy.get_param("MODEL_S3_BUCKET", ["gsaur-test"])) parser.add_argument('--s3_prefix', help='list(string) S3 prefix', type=str, nargs='+', default=rospy.get_param("MODEL_S3_PREFIX", ["sagemaker"])) parser.add_argument('--aws_region', help='(string) AWS region', type=str, default=rospy.get_param("AWS_REGION", "us-east-1")) parser.add_argument('--number_of_trials', help='(integer) Number of trials', type=int, default=int(rospy.get_param("NUMBER_OF_TRIALS", 10))) parser.add_argument('-c', '--local_model_directory', help='(string) Path to a folder containing a checkpoint \ to restore the model from.', type=str, default='./checkpoint') parser.add_argument('--number_of_resets', help='(integer) Number of resets', type=int, default=int(rospy.get_param("NUMBER_OF_RESETS", 0))) parser.add_argument('--penalty_seconds', help='(float) penalty second', type=float, default=float(rospy.get_param("PENALTY_SECONDS", 2.0))) parser.add_argument('--job_type', help='(string) job type', type=str, default=rospy.get_param("JOB_TYPE", "EVALUATION")) parser.add_argument('--is_continuous', help='(boolean) is continous after lap completion', type=bool, default=utils.str2bool(rospy.get_param("IS_CONTINUOUS", False))) parser.add_argument('--race_type', help='(string) Race type', type=str, default=rospy.get_param("RACE_TYPE", "TIME_TRIAL")) parser.add_argument('--off_track_penalty', help='(float) off track penalty second', type=float, default=float(rospy.get_param("OFF_TRACK_PENALTY", 2.0))) parser.add_argument('--collision_penalty', help='(float) collision penalty second', type=float, default=float(rospy.get_param("COLLISION_PENALTY", 5.0))) args = parser.parse_args() arg_s3_bucket = args.s3_bucket arg_s3_prefix = args.s3_prefix logger.info("S3 bucket: %s \n S3 prefix: %s", arg_s3_bucket, arg_s3_prefix) metrics_s3_buckets = rospy.get_param('METRICS_S3_BUCKET') metrics_s3_object_keys = rospy.get_param('METRICS_S3_OBJECT_KEY') arg_s3_bucket, arg_s3_prefix = utils.force_list(arg_s3_bucket), utils.force_list(arg_s3_prefix) metrics_s3_buckets = utils.force_list(metrics_s3_buckets) metrics_s3_object_keys = utils.force_list(metrics_s3_object_keys) validate_list = [arg_s3_bucket, arg_s3_prefix, metrics_s3_buckets, metrics_s3_object_keys] simtrace_s3_bucket = rospy.get_param('SIMTRACE_S3_BUCKET', None) mp4_s3_bucket = rospy.get_param('MP4_S3_BUCKET', None) if simtrace_s3_bucket: simtrace_s3_object_prefix = rospy.get_param('SIMTRACE_S3_PREFIX') simtrace_s3_bucket = utils.force_list(simtrace_s3_bucket) simtrace_s3_object_prefix = utils.force_list(simtrace_s3_object_prefix) validate_list.extend([simtrace_s3_bucket, simtrace_s3_object_prefix]) if mp4_s3_bucket: mp4_s3_object_prefix = rospy.get_param('MP4_S3_OBJECT_PREFIX') mp4_s3_bucket = utils.force_list(mp4_s3_bucket) mp4_s3_object_prefix = utils.force_list(mp4_s3_object_prefix) validate_list.extend([mp4_s3_bucket, mp4_s3_object_prefix]) if not all([lambda x: len(x) == len(validate_list[0]), validate_list]): log_and_exit("Eval worker error: Incorrect arguments passed: {}" .format(validate_list), SIMAPP_SIMULATION_WORKER_EXCEPTION, SIMAPP_EVENT_ERROR_CODE_500) if args.number_of_resets != 0 and args.number_of_resets < MIN_RESET_COUNT: raise GenericRolloutException("number of resets is less than {}".format(MIN_RESET_COUNT)) # Instantiate Cameras if len(arg_s3_bucket) == 1: configure_camera(namespaces=['racecar']) else: configure_camera(namespaces=[ 'racecar_{}'.format(str(agent_index)) for agent_index in range(len(arg_s3_bucket))]) agent_list = list() s3_bucket_dict = dict() s3_prefix_dict = dict() checkpoint_dict = dict() simtrace_video_s3_writers = [] start_positions = get_start_positions(len(arg_s3_bucket)) done_condition = utils.str_to_done_condition(rospy.get_param("DONE_CONDITION", any)) park_positions = utils.pos_2d_str_to_list(rospy.get_param("PARK_POSITIONS", [])) # if not pass in park positions for all done condition case, use default if not park_positions: park_positions = [DEFAULT_PARK_POSITION for _ in arg_s3_bucket] for agent_index, _ in enumerate(arg_s3_bucket): agent_name = 'agent' if len(arg_s3_bucket) == 1 else 'agent_{}'.format(str(agent_index)) racecar_name = 'racecar' if len(arg_s3_bucket) == 1 else 'racecar_{}'.format(str(agent_index)) s3_bucket_dict[agent_name] = arg_s3_bucket[agent_index] s3_prefix_dict[agent_name] = arg_s3_prefix[agent_index] # download model metadata model_metadata = ModelMetadata(bucket=arg_s3_bucket[agent_index], s3_key=get_s3_key(arg_s3_prefix[agent_index], MODEL_METADATA_S3_POSTFIX), region_name=args.aws_region, local_path=MODEL_METADATA_LOCAL_PATH_FORMAT.format(agent_name)) _, _, version = model_metadata.get_model_metadata_info() # checkpoint s3 instance checkpoint = Checkpoint(bucket=arg_s3_bucket[agent_index], s3_prefix=arg_s3_prefix[agent_index], region_name=args.aws_region, agent_name=agent_name, checkpoint_dir=args.local_model_directory) # make coach checkpoint compatible if version < SIMAPP_VERSION_2 and not checkpoint.rl_coach_checkpoint.is_compatible(): checkpoint.rl_coach_checkpoint.make_compatible(checkpoint.syncfile_ready) # get best model checkpoint string model_checkpoint_name = checkpoint.deepracer_checkpoint_json.get_deepracer_best_checkpoint() # Select the best checkpoint model by uploading rl coach .coach_checkpoint file checkpoint.rl_coach_checkpoint.update( model_checkpoint_name=model_checkpoint_name, s3_kms_extra_args=utils.get_s3_kms_extra_args()) checkpoint_dict[agent_name] = checkpoint agent_config = { 'model_metadata': model_metadata, ConfigParams.CAR_CTRL_CONFIG.value: { ConfigParams.LINK_NAME_LIST.value: [ link_name.replace('racecar', racecar_name) for link_name in LINK_NAMES], ConfigParams.VELOCITY_LIST.value: [ velocity_topic.replace('racecar', racecar_name) for velocity_topic in VELOCITY_TOPICS], ConfigParams.STEERING_LIST.value: [ steering_topic.replace('racecar', racecar_name) for steering_topic in STEERING_TOPICS], ConfigParams.CHANGE_START.value: utils.str2bool(rospy.get_param('CHANGE_START_POSITION', False)), ConfigParams.ALT_DIR.value: utils.str2bool(rospy.get_param('ALTERNATE_DRIVING_DIRECTION', False)), ConfigParams.ACTION_SPACE_PATH.value: model_metadata.local_path, ConfigParams.REWARD.value: reward_function, ConfigParams.AGENT_NAME.value: racecar_name, ConfigParams.VERSION.value: version, ConfigParams.NUMBER_OF_RESETS.value: args.number_of_resets, ConfigParams.PENALTY_SECONDS.value: args.penalty_seconds, ConfigParams.NUMBER_OF_TRIALS.value: args.number_of_trials, ConfigParams.IS_CONTINUOUS.value: args.is_continuous, ConfigParams.RACE_TYPE.value: args.race_type, ConfigParams.COLLISION_PENALTY.value: args.collision_penalty, ConfigParams.OFF_TRACK_PENALTY.value: args.off_track_penalty, ConfigParams.START_POSITION.value: start_positions[agent_index], ConfigParams.DONE_CONDITION.value: done_condition}} metrics_s3_config = {MetricsS3Keys.METRICS_BUCKET.value: metrics_s3_buckets[agent_index], MetricsS3Keys.METRICS_KEY.value: metrics_s3_object_keys[agent_index], # Replaced rospy.get_param('AWS_REGION') to be equal to the argument being passed # or default argument set MetricsS3Keys.REGION.value: args.aws_region} aws_region = rospy.get_param('AWS_REGION', args.aws_region) if simtrace_s3_bucket: simtrace_video_s3_writers.append( SimtraceVideo(upload_type=SimtraceVideoNames.SIMTRACE_EVAL.value, bucket=simtrace_s3_bucket[agent_index], s3_prefix=simtrace_s3_object_prefix[agent_index], region_name=aws_region, local_path=SIMTRACE_EVAL_LOCAL_PATH_FORMAT.format(agent_name))) if mp4_s3_bucket: simtrace_video_s3_writers.extend([ SimtraceVideo(upload_type=SimtraceVideoNames.PIP.value, bucket=mp4_s3_bucket[agent_index], s3_prefix=mp4_s3_object_prefix[agent_index], region_name=aws_region, local_path=CAMERA_PIP_MP4_LOCAL_PATH_FORMAT.format(agent_name)), SimtraceVideo(upload_type=SimtraceVideoNames.DEGREE45.value, bucket=mp4_s3_bucket[agent_index], s3_prefix=mp4_s3_object_prefix[agent_index], region_name=aws_region, local_path=CAMERA_45DEGREE_LOCAL_PATH_FORMAT.format(agent_name)), SimtraceVideo(upload_type=SimtraceVideoNames.TOPVIEW.value, bucket=mp4_s3_bucket[agent_index], s3_prefix=mp4_s3_object_prefix[agent_index], region_name=aws_region, local_path=CAMERA_TOPVIEW_LOCAL_PATH_FORMAT.format(agent_name))]) run_phase_subject = RunPhaseSubject() agent_list.append(create_rollout_agent(agent_config, EvalMetrics(agent_name, metrics_s3_config, args.is_continuous), run_phase_subject)) agent_list.append(create_obstacles_agent()) agent_list.append(create_bot_cars_agent()) # ROS service to indicate all the robomaker markov packages are ready for consumption signal_robomaker_markov_package_ready() PhaseObserver('/agent/training_phase', run_phase_subject) enable_domain_randomization = utils.str2bool(rospy.get_param('ENABLE_DOMAIN_RANDOMIZATION', False)) sm_hyperparams_dict = {} graph_manager, _ = get_graph_manager(hp_dict=sm_hyperparams_dict, agent_list=agent_list, run_phase_subject=run_phase_subject, enable_domain_randomization=enable_domain_randomization, done_condition=done_condition) ds_params_instance = S3BotoDataStoreParameters(checkpoint_dict=checkpoint_dict) graph_manager.data_store = S3BotoDataStore(params=ds_params_instance, graph_manager=graph_manager, ignore_lock=True) graph_manager.env_params.seed = 0 task_parameters = TaskParameters() task_parameters.checkpoint_restore_path = args.local_model_directory evaluation_worker( graph_manager=graph_manager, number_of_trials=args.number_of_trials, task_parameters=task_parameters, simtrace_video_s3_writers=simtrace_video_s3_writers, is_continuous=args.is_continuous, park_positions=park_positions ) if __name__ == '__main__': try: rospy.init_node('rl_coach', anonymous=True) main() except ValueError as err: if utils.is_user_error(err): log_and_exit("User modified model/model_metadata: {}".format(err), SIMAPP_SIMULATION_WORKER_EXCEPTION, SIMAPP_EVENT_ERROR_CODE_500) else: log_and_exit("Eval worker value error: {}".format(err), SIMAPP_SIMULATION_WORKER_EXCEPTION, SIMAPP_EVENT_ERROR_CODE_500) except GenericRolloutError as ex: ex.log_except_and_exit() except GenericRolloutException as ex: ex.log_except_and_exit() except Exception as ex: log_and_exit("Eval worker error: {}".format(ex), SIMAPP_SIMULATION_WORKER_EXCEPTION, SIMAPP_EVENT_ERROR_CODE_500)
[]
[]
[ "AWS_ROBOMAKER_SIMULATION_JOB_ARN" ]
[]
["AWS_ROBOMAKER_SIMULATION_JOB_ARN"]
python
1
0
lspy/src/bot.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Bottle is a fast and simple micro-framework for small web applications. It offers request dispatching (Routes) with url parameter support, templates, a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and template engines - all in a single file and with no dependencies other than the Python Standard Library. Homepage and documentation: http://bottlepy.org/ Copyright (c) 2016, Marcel Hellkamp. License: MIT (see LICENSE for details) """ from __future__ import with_statement __author__ = 'Marcel Hellkamp' __version__ = '0.12.17' __license__ = 'MIT' # The gevent server adapter needs to patch some modules before they are imported # This is why we parse the commandline parameters here but handle them later if __name__ == '__main__': from optparse import OptionParser _cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app") _opt = _cmd_parser.add_option _opt("--version", action="store_true", help="show version number.") _opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.") _opt("-s", "--server", default='wsgiref', help="use SERVER as backend.") _opt("-p", "--plugin", action="append", help="install additional plugin/s.") _opt("--debug", action="store_true", help="start server in debug mode.") _opt("--reload", action="store_true", help="auto-reload on file changes.") _cmd_options, _cmd_args = _cmd_parser.parse_args() if _cmd_options.server and _cmd_options.server.startswith('gevent'): import gevent.monkey; gevent.monkey.patch_all() import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes, \ os, re, subprocess, sys, tempfile, threading, time, warnings from datetime import date as datedate, datetime, timedelta from tempfile import TemporaryFile from traceback import format_exc, print_exc from inspect import getargspec from unicodedata import normalize try: from simplejson import dumps as json_dumps, loads as json_lds except ImportError: # pragma: no cover try: from json import dumps as json_dumps, loads as json_lds except ImportError: try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds except ImportError: def json_dumps(data): raise ImportError("JSON support requires Python 2.6 or simplejson.") json_lds = json_dumps # We now try to fix 2.5/2.6/3.1/3.2 incompatibilities. # It ain't pretty but it works... Sorry for the mess. py = sys.version_info py3k = py >= (3, 0, 0) py25 = py < (2, 6, 0) py31 = (3, 1, 0) <= py < (3, 2, 0) # Workaround for the missing "as" keyword in py3k. def _e(): return sys.exc_info()[1] # Workaround for the "print is a keyword/function" Python 2/3 dilemma # and a fallback for mod_wsgi (resticts stdout/err attribute access) try: _stdout, _stderr = sys.stdout.write, sys.stderr.write except IOError: _stdout = lambda x: sys.stdout.write(x) _stderr = lambda x: sys.stderr.write(x) # Lots of stdlib and builtin differences. if py3k: import http.client as httplib import _thread as thread from urllib.parse import urljoin, SplitResult as UrlSplitResult from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote urlunquote = functools.partial(urlunquote, encoding='latin1') from http.cookies import SimpleCookie from collections import MutableMapping as DictMixin import pickle from io import BytesIO from configparser import ConfigParser basestring = str unicode = str json_loads = lambda s: json_lds(touni(s)) callable = lambda x: hasattr(x, '__call__') imap = map def _raise(*a): raise a[0](a[1]).with_traceback(a[2]) else: # 2.x import httplib import thread from urlparse import urljoin, SplitResult as UrlSplitResult from urllib import urlencode, quote as urlquote, unquote as urlunquote from Cookie import SimpleCookie from itertools import imap import cPickle as pickle from StringIO import StringIO as BytesIO from ConfigParser import SafeConfigParser as ConfigParser if py25: msg = "Python 2.5 support may be dropped in future versions of Bottle." warnings.warn(msg, DeprecationWarning) from UserDict import DictMixin def next(it): return it.next() bytes = str else: # 2.6, 2.7 from collections import MutableMapping as DictMixin unicode = unicode json_loads = json_lds eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec')) # Some helpers for string/byte handling def tob(s, enc='utf8'): return s.encode(enc) if isinstance(s, unicode) else bytes(s) def touni(s, enc='utf8', err='strict'): return s.decode(enc, err) if isinstance(s, bytes) else unicode(s) tonat = touni if py3k else tob # 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense). # 3.1 needs a workaround. if py31: from io import TextIOWrapper class NCTextIOWrapper(TextIOWrapper): def close(self): pass # Keep wrapped buffer open. # A bug in functools causes it to break if the wrapper is an instance method def update_wrapper(wrapper, wrapped, *a, **ka): try: functools.update_wrapper(wrapper, wrapped, *a, **ka) except AttributeError: pass # These helpers are used at module level and need to be defined first. # And yes, I know PEP-8, but sometimes a lower-case classname makes more sense. def depr(message, hard=False): warnings.warn(message, DeprecationWarning, stacklevel=3) def makelist(data): # This is just to handy if isinstance(data, (tuple, list, set, dict)): return list(data) elif data: return [data] else: return [] class DictProperty(object): ''' Property that maps to a key in a local dict-like attribute. ''' def __init__(self, attr, key=None, read_only=False): self.attr, self.key, self.read_only = attr, key, read_only def __call__(self, func): functools.update_wrapper(self, func, updated=[]) self.getter, self.key = func, self.key or func.__name__ return self def __get__(self, obj, cls): if obj is None: return self key, storage = self.key, getattr(obj, self.attr) if key not in storage: storage[key] = self.getter(obj) return storage[key] def __set__(self, obj, value): if self.read_only: raise AttributeError("Read-Only property.") getattr(obj, self.attr)[self.key] = value def __delete__(self, obj): if self.read_only: raise AttributeError("Read-Only property.") del getattr(obj, self.attr)[self.key] class cached_property(object): ''' A property that is only computed once per instance and then replaces itself with an ordinary attribute. Deleting the attribute resets the property. ''' def __init__(self, func): self.__doc__ = getattr(func, '__doc__') self.func = func def __get__(self, obj, cls): if obj is None: return self value = obj.__dict__[self.func.__name__] = self.func(obj) return value class lazy_attribute(object): ''' A property that caches itself to the class object. ''' def __init__(self, func): functools.update_wrapper(self, func, updated=[]) self.getter = func def __get__(self, obj, cls): value = self.getter(cls) setattr(cls, self.__name__, value) return value ############################################################################### # Exceptions and Events ######################################################## ############################################################################### class BottleException(Exception): """ A base class for exceptions used by bottle. """ pass ############################################################################### # Routing ###################################################################### ############################################################################### class RouteError(BottleException): """ This is a base class for all routing related exceptions """ class RouteReset(BottleException): """ If raised by a plugin or request handler, the route is reset and all plugins are re-applied. """ class RouterUnknownModeError(RouteError): pass class RouteSyntaxError(RouteError): """ The route parser found something not supported by this router. """ class RouteBuildError(RouteError): """ The route could not be built. """ def _re_flatten(p): ''' Turn all capturing groups in a regular expression pattern into non-capturing groups. ''' if '(' not in p: return p return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p) class Router(object): ''' A Router is an ordered collection of route->target pairs. It is used to efficiently match WSGI requests against a number of routes and return the first target that satisfies the request. The target may be anything, usually a string, ID or callable object. A route consists of a path-rule and a HTTP method. The path-rule is either a static path (e.g. `/contact`) or a dynamic path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax and details on the matching order are described in docs:`routing`. ''' default_pattern = '[^/]+' default_filter = 're' #: The current CPython regexp implementation does not allow more #: than 99 matching groups per regular expression. _MAX_GROUPS_PER_PATTERN = 99 def __init__(self, strict=False): self.rules = [] # All rules in order self._groups = {} # index of regexes to find them in dyna_routes self.builder = {} # Data structure for the url builder self.static = {} # Search structure for static routes self.dyna_routes = {} self.dyna_regexes = {} # Search structure for dynamic routes #: If true, static routes are no longer checked first. self.strict_order = strict self.filters = { 're': lambda conf: (_re_flatten(conf or self.default_pattern), None, None), 'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))), 'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))), 'path': lambda conf: (r'.+?', None, None)} def add_filter(self, name, func): ''' Add a filter. The provided function is called with the configuration string as parameter and must return a (regexp, to_python, to_url) tuple. The first element is a string, the last two are callables or None. ''' self.filters[name] = func rule_syntax = re.compile('(\\\\*)' \ '(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)' \ '|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)' \ '(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))') def _itertokens(self, rule): offset, prefix = 0, '' for match in self.rule_syntax.finditer(rule): prefix += rule[offset:match.start()] g = match.groups() if len(g[0]) % 2: # Escaped wildcard prefix += match.group(0)[len(g[0]):] offset = match.end() continue if prefix: yield prefix, None, None name, filtr, conf = g[4:7] if g[2] is None else g[1:4] yield name, filtr or 'default', conf or None offset, prefix = match.end(), '' if offset <= len(rule) or prefix: yield prefix + rule[offset:], None, None def add(self, rule, method, target, name=None): ''' Add a new rule or replace the target for an existing rule. ''' anons = 0 # Number of anonymous wildcards found keys = [] # Names of keys pattern = '' # Regular expression pattern with named groups filters = [] # Lists of wildcard input filters builder = [] # Data structure for the URL builder is_static = True for key, mode, conf in self._itertokens(rule): if mode: is_static = False if mode == 'default': mode = self.default_filter mask, in_filter, out_filter = self.filters[mode](conf) if not key: pattern += '(?:%s)' % mask key = 'anon%d' % anons anons += 1 else: pattern += '(?P<%s>%s)' % (key, mask) keys.append(key) if in_filter: filters.append((key, in_filter)) builder.append((key, out_filter or str)) elif key: pattern += re.escape(key) builder.append((None, key)) self.builder[rule] = builder if name: self.builder[name] = builder if is_static and not self.strict_order: self.static.setdefault(method, {}) self.static[method][self.build(rule)] = (target, None) return try: re_pattern = re.compile('^(%s)$' % pattern) re_match = re_pattern.match except re.error: raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e())) if filters: def getargs(path): url_args = re_match(path).groupdict() for name, wildcard_filter in filters: try: url_args[name] = wildcard_filter(url_args[name]) except ValueError: raise HTTPError(400, 'Path has wrong format.') return url_args elif re_pattern.groupindex: def getargs(path): return re_match(path).groupdict() else: getargs = None flatpat = _re_flatten(pattern) whole_rule = (rule, flatpat, target, getargs) if (flatpat, method) in self._groups: if DEBUG: msg = 'Route <%s %s> overwrites a previously defined route' warnings.warn(msg % (method, rule), RuntimeWarning) self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule else: self.dyna_routes.setdefault(method, []).append(whole_rule) self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1 self._compile(method) def _compile(self, method): all_rules = self.dyna_routes[method] comborules = self.dyna_regexes[method] = [] maxgroups = self._MAX_GROUPS_PER_PATTERN for x in range(0, len(all_rules), maxgroups): some = all_rules[x:x + maxgroups] combined = (flatpat for (_, flatpat, _, _) in some) combined = '|'.join('(^%s$)' % flatpat for flatpat in combined) combined = re.compile(combined).match rules = [(target, getargs) for (_, _, target, getargs) in some] comborules.append((combined, rules)) def build(self, _name, *anons, **query): ''' Build an URL by filling the wildcards in a rule. ''' builder = self.builder.get(_name) if not builder: raise RouteBuildError("No route with that name.", _name) try: for i, value in enumerate(anons): query['anon%d' % i] = value url = ''.join([f(query.pop(n)) if n else f for (n, f) in builder]) return url if not query else url + '?' + urlencode(query) except KeyError: raise RouteBuildError('Missing URL argument: %r' % _e().args[0]) def match(self, environ): ''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). ''' verb = environ['REQUEST_METHOD'].upper() path = environ['PATH_INFO'] or '/' target = None if verb == 'HEAD': methods = ['PROXY', verb, 'GET', 'ANY'] else: methods = ['PROXY', verb, 'ANY'] for method in methods: if method in self.static and path in self.static[method]: target, getargs = self.static[method][path] return target, getargs(path) if getargs else {} elif method in self.dyna_regexes: for combined, rules in self.dyna_regexes[method]: match = combined(path) if match: target, getargs = rules[match.lastindex - 1] return target, getargs(path) if getargs else {} # No matching route found. Collect alternative methods for 405 response allowed = set([]) nocheck = set(methods) for method in set(self.static) - nocheck: if path in self.static[method]: allowed.add(verb) for method in set(self.dyna_regexes) - allowed - nocheck: for combined, rules in self.dyna_regexes[method]: match = combined(path) if match: allowed.add(method) if allowed: allow_header = ",".join(sorted(allowed)) raise HTTPError(405, "Method not allowed.", Allow=allow_header) # No matching route and no alternative method found. We give up raise HTTPError(404, "Not found: " + repr(path)) class Route(object): ''' This class wraps a route callback along with route specific metadata and configuration and applies Plugins on demand. It is also responsible for turing an URL path rule into a regular expression usable by the Router. ''' def __init__(self, app, rule, method, callback, name=None, plugins=None, skiplist=None, **config): #: The application this route is installed to. self.app = app #: The path-rule string (e.g. ``/wiki/:page``). self.rule = rule #: The HTTP method as a string (e.g. ``GET``). self.method = method #: The original callback with no plugins applied. Useful for introspection. self.callback = callback #: The name of the route (if specified) or ``None``. self.name = name or None #: A list of route-specific plugins (see :meth:`Bottle.route`). self.plugins = plugins or [] #: A list of plugins to not apply to this route (see :meth:`Bottle.route`). self.skiplist = skiplist or [] #: Additional keyword arguments passed to the :meth:`Bottle.route` #: decorator are stored in this dictionary. Used for route-specific #: plugin configuration and meta-data. self.config = ConfigDict().load_dict(config, make_namespaces=True) def __call__(self, *a, **ka): depr("Some APIs changed to return Route() instances instead of" \ " callables. Make sure to use the Route.call method and not to" \ " call Route instances directly.") # 0.12 return self.call(*a, **ka) @cached_property def call(self): ''' The route callback with all plugins applied. This property is created on demand and then cached to speed up subsequent requests.''' return self._make_callback() def reset(self): ''' Forget any cached values. The next time :attr:`call` is accessed, all plugins are re-applied. ''' self.__dict__.pop('call', None) def prepare(self): ''' Do all on-demand work immediately (useful for debugging).''' self.call @property def _context(self): depr('Switch to Plugin API v2 and access the Route object directly.') # 0.12 return dict(rule=self.rule, method=self.method, callback=self.callback, name=self.name, app=self.app, config=self.config, apply=self.plugins, skip=self.skiplist) def all_plugins(self): ''' Yield all Plugins affecting this route. ''' unique = set() for p in reversed(self.app.plugins + self.plugins): if True in self.skiplist: break name = getattr(p, 'name', False) if name and (name in self.skiplist or name in unique): continue if p in self.skiplist or type(p) in self.skiplist: continue if name: unique.add(name) yield p def _make_callback(self): callback = self.callback for plugin in self.all_plugins(): try: if hasattr(plugin, 'apply'): api = getattr(plugin, 'api', 1) context = self if api > 1 else self._context callback = plugin.apply(callback, context) else: callback = plugin(callback) except RouteReset: # Try again with changed configuration. return self._make_callback() if not callback is self.callback: update_wrapper(callback, self.callback) return callback def get_undecorated_callback(self): ''' Return the callback. If the callback is a decorated function, try to recover the original function. ''' func = self.callback func = getattr(func, '__func__' if py3k else 'im_func', func) closure_attr = '__closure__' if py3k else 'func_closure' while hasattr(func, closure_attr) and getattr(func, closure_attr): func = getattr(func, closure_attr)[0].cell_contents return func def get_callback_args(self): ''' Return a list of argument names the callback (most likely) accepts as keyword arguments. If the callback is a decorated function, try to recover the original function before inspection. ''' return getargspec(self.get_undecorated_callback())[0] def get_config(self, key, default=None): ''' Lookup a config field and return its value, first checking the route.config, then route.app.config.''' for conf in (self.config, self.app.conifg): if key in conf: return conf[key] return default def __repr__(self): cb = self.get_undecorated_callback() return '<%s %r %r>' % (self.method, self.rule, cb) ############################################################################### # Application Object ########################################################### ############################################################################### class Bottle(object): """ Each Bottle object represents a single, distinct web application and consists of routes, callbacks, plugins, resources and configuration. Instances are callable WSGI applications. :param catchall: If true (default), handle all exceptions. Turn off to let debugging middleware handle exceptions. """ def __init__(self, catchall=True, autojson=True): #: A :class:`ConfigDict` for app specific configuration. self.config = ConfigDict() self.config._on_change = functools.partial(self.trigger_hook, 'config') self.config.meta_set('autojson', 'validate', bool) self.config.meta_set('catchall', 'validate', bool) self.config['catchall'] = catchall self.config['autojson'] = autojson #: A :class:`ResourceManager` for application files self.resources = ResourceManager() self.routes = [] # List of installed :class:`Route` instances. self.router = Router() # Maps requests to :class:`Route` instances. self.error_handler = {} # Core plugins self.plugins = [] # List of installed plugins. if self.config['autojson']: self.install(JSONPlugin()) self.install(TemplatePlugin()) #: If true, most exceptions are caught and returned as :exc:`HTTPError` catchall = DictProperty('config', 'catchall') __hook_names = 'before_request', 'after_request', 'app_reset', 'config' __hook_reversed = 'after_request' @cached_property def _hooks(self): return dict((name, []) for name in self.__hook_names) def add_hook(self, name, func): ''' Attach a callback to a hook. Three hooks are currently implemented: before_request Executed once before each request. The request context is available, but no routing has happened yet. after_request Executed once after each request regardless of its outcome. app_reset Called whenever :meth:`Bottle.reset` is called. ''' if name in self.__hook_reversed: self._hooks[name].insert(0, func) else: self._hooks[name].append(func) def remove_hook(self, name, func): ''' Remove a callback from a hook. ''' if name in self._hooks and func in self._hooks[name]: self._hooks[name].remove(func) return True def trigger_hook(self, __name, *args, **kwargs): ''' Trigger a hook and return a list of results. ''' return [hook(*args, **kwargs) for hook in self._hooks[__name][:]] def hook(self, name): """ Return a decorator that attaches a callback to a hook. See :meth:`add_hook` for details.""" def decorator(func): self.add_hook(name, func) return func return decorator def mount(self, prefix, app, **options): ''' Mount an application (:class:`Bottle` or plain WSGI) to a specific URL prefix. Example:: root_app.mount('/admin/', admin_app) :param prefix: path prefix or `mount-point`. If it ends in a slash, that slash is mandatory. :param app: an instance of :class:`Bottle` or a WSGI application. All other parameters are passed to the underlying :meth:`route` call. ''' if isinstance(app, basestring): depr('Parameter order of Bottle.mount() changed.', True) # 0.10 segments = [p for p in prefix.split('/') if p] if not segments: raise ValueError('Empty path prefix.') path_depth = len(segments) def mountpoint_wrapper(): try: request.path_shift(path_depth) rs = HTTPResponse([]) def start_response(status, headerlist, exc_info=None): if exc_info: try: _raise(*exc_info) finally: exc_info = None rs.status = status for name, value in headerlist: rs.add_header(name, value) return rs.body.append body = app(request.environ, start_response) if body and rs.body: body = itertools.chain(rs.body, body) rs.body = body or rs.body return rs finally: request.path_shift(-path_depth) options.setdefault('skip', True) options.setdefault('method', 'PROXY') options.setdefault('mountpoint', {'prefix': prefix, 'target': app}) options['callback'] = mountpoint_wrapper self.route('/%s/<:re:.*>' % '/'.join(segments), **options) if not prefix.endswith('/'): self.route('/' + '/'.join(segments), **options) def merge(self, routes): ''' Merge the routes of another :class:`Bottle` application or a list of :class:`Route` objects into this application. The routes keep their 'owner', meaning that the :data:`Route.app` attribute is not changed. ''' if isinstance(routes, Bottle): routes = routes.routes for route in routes: self.add_route(route) def install(self, plugin): ''' Add a plugin to the list of plugins and prepare it for being applied to all routes of this application. A plugin may be a simple decorator or an object that implements the :class:`Plugin` API. ''' if hasattr(plugin, 'setup'): plugin.setup(self) if not callable(plugin) and not hasattr(plugin, 'apply'): raise TypeError("Plugins must be callable or implement .apply()") self.plugins.append(plugin) self.reset() return plugin def uninstall(self, plugin): ''' Uninstall plugins. Pass an instance to remove a specific plugin, a type object to remove all plugins that match that type, a string to remove all plugins with a matching ``name`` attribute or ``True`` to remove all plugins. Return the list of removed plugins. ''' removed, remove = [], plugin for i, plugin in list(enumerate(self.plugins))[::-1]: if remove is True or remove is plugin or remove is type(plugin) \ or getattr(plugin, 'name', True) == remove: removed.append(plugin) del self.plugins[i] if hasattr(plugin, 'close'): plugin.close() if removed: self.reset() return removed def reset(self, route=None): ''' Reset all routes (force plugins to be re-applied) and clear all caches. If an ID or route object is given, only that specific route is affected. ''' if route is None: routes = self.routes elif isinstance(route, Route): routes = [route] else: routes = [self.routes[route]] for route in routes: route.reset() if DEBUG: for route in routes: route.prepare() self.trigger_hook('app_reset') def close(self): ''' Close the application and all installed plugins. ''' for plugin in self.plugins: if hasattr(plugin, 'close'): plugin.close() self.stopped = True def run(self, **kwargs): ''' Calls :func:`run` with the same parameters. ''' run(self, **kwargs) def match(self, environ): """ Search for a matching route and return a (:class:`Route` , urlargs) tuple. The second value is a dictionary with parameters extracted from the URL. Raise :exc:`HTTPError` (404/405) on a non-match.""" return self.router.match(environ) def get_url(self, routename, **kargs): """ Return a string that matches a named route """ scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/' location = self.router.build(routename, **kargs).lstrip('/') return urljoin(urljoin('/', scriptname), location) def add_route(self, route): ''' Add a route object, but do not change the :data:`Route.app` attribute.''' self.routes.append(route) self.router.add(route.rule, route.method, route, name=route.name) if DEBUG: route.prepare() def route(self, path=None, method='GET', callback=None, name=None, apply=None, skip=None, **config): """ A decorator to bind a function to a request URL. Example:: @app.route('/hello/:name') def hello(name): return 'Hello %s' % name The ``:name`` part is a wildcard. See :class:`Router` for syntax details. :param path: Request path or a list of paths to listen to. If no path is specified, it is automatically generated from the signature of the function. :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of methods to listen to. (default: `GET`) :param callback: An optional shortcut to avoid the decorator syntax. ``route(..., callback=func)`` equals ``route(...)(func)`` :param name: The name for this route. (default: None) :param apply: A decorator or plugin or a list of plugins. These are applied to the route callback in addition to installed plugins. :param skip: A list of plugins, plugin classes or names. Matching plugins are not installed to this route. ``True`` skips all. Any additional keyword arguments are stored as route-specific configuration and passed to plugins (see :meth:`Plugin.apply`). """ if callable(path): path, callback = None, path plugins = makelist(apply) skiplist = makelist(skip) def decorator(callback): # TODO: Documentation and tests if isinstance(callback, basestring): callback = load(callback) for rule in makelist(path) or yieldroutes(callback): for verb in makelist(method): verb = verb.upper() route = Route(self, rule, verb, callback, name=name, plugins=plugins, skiplist=skiplist, **config) self.add_route(route) return callback return decorator(callback) if callback else decorator def get(self, path=None, method='GET', **options): """ Equals :meth:`route`. """ return self.route(path, method, **options) def post(self, path=None, method='POST', **options): """ Equals :meth:`route` with a ``POST`` method parameter. """ return self.route(path, method, **options) def put(self, path=None, method='PUT', **options): """ Equals :meth:`route` with a ``PUT`` method parameter. """ return self.route(path, method, **options) def delete(self, path=None, method='DELETE', **options): """ Equals :meth:`route` with a ``DELETE`` method parameter. """ return self.route(path, method, **options) def error(self, code=500): """ Decorator: Register an output handler for a HTTP error code""" def wrapper(handler): self.error_handler[int(code)] = handler return handler return wrapper def default_error_handler(self, res): return tob(template(ERROR_PAGE_TEMPLATE, e=res)) def _handle(self, environ): path = environ['bottle.raw_path'] = environ['PATH_INFO'] if py3k: try: environ['PATH_INFO'] = path.encode('latin1').decode('utf8') except UnicodeError: return HTTPError(400, 'Invalid path string. Expected UTF-8') try: environ['bottle.app'] = self request.bind(environ) response.bind() try: self.trigger_hook('before_request') route, args = self.router.match(environ) environ['route.handle'] = route environ['bottle.route'] = route environ['route.url_args'] = args return route.call(**args) finally: self.trigger_hook('after_request') except HTTPResponse: return _e() except RouteReset: route.reset() return self._handle(environ) except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception: if not self.catchall: raise stacktrace = format_exc() environ['wsgi.errors'].write(stacktrace) return HTTPError(500, "Internal Server Error", _e(), stacktrace) def _cast(self, out, peek=None): """ Try to convert the parameter into something WSGI compatible and set correct HTTP headers when possible. Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like, iterable of strings and iterable of unicodes """ # Empty output is done here if not out: if 'Content-Length' not in response: response['Content-Length'] = 0 return [] # Join lists of byte or unicode strings. Mixed lists are NOT supported if isinstance(out, (tuple, list)) \ and isinstance(out[0], (bytes, unicode)): out = out[0][0:0].join(out) # b'abc'[0:0] -> b'' # Encode unicode strings if isinstance(out, unicode): out = out.encode(response.charset) # Byte Strings are just returned if isinstance(out, bytes): if 'Content-Length' not in response: response['Content-Length'] = len(out) return [out] # HTTPError or HTTPException (recursive, because they may wrap anything) # TODO: Handle these explicitly in handle() or make them iterable. if isinstance(out, HTTPError): out.apply(response) out = self.error_handler.get(out.status_code, self.default_error_handler)(out) return self._cast(out) if isinstance(out, HTTPResponse): out.apply(response) return self._cast(out.body) # File-like objects. if hasattr(out, 'read'): if 'wsgi.file_wrapper' in request.environ: return request.environ['wsgi.file_wrapper'](out) elif hasattr(out, 'close') or not hasattr(out, '__iter__'): return WSGIFileWrapper(out) # Handle Iterables. We peek into them to detect their inner type. try: iout = iter(out) first = next(iout) while not first: first = next(iout) except StopIteration: return self._cast('') except HTTPResponse: first = _e() except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception: if not self.catchall: raise first = HTTPError(500, 'Unhandled exception', _e(), format_exc()) # These are the inner types allowed in iterator or generator objects. if isinstance(first, HTTPResponse): return self._cast(first) elif isinstance(first, bytes): new_iter = itertools.chain([first], iout) elif isinstance(first, unicode): encoder = lambda x: x.encode(response.charset) new_iter = imap(encoder, itertools.chain([first], iout)) else: msg = 'Unsupported response type: %s' % type(first) return self._cast(HTTPError(500, msg)) if hasattr(out, 'close'): new_iter = _closeiter(new_iter, out.close) return new_iter def wsgi(self, environ, start_response): """ The bottle WSGI-interface. """ try: out = self._cast(self._handle(environ)) # rfc2616 section 4.3 if response._status_code in (100, 101, 204, 304) \ or environ['REQUEST_METHOD'] == 'HEAD': if hasattr(out, 'close'): out.close() out = [] start_response(response._status_line, response.headerlist) return out except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception: if not self.catchall: raise err = '<h1>Critical error while processing request: %s</h1>' \ % html_escape(environ.get('PATH_INFO', '/')) if DEBUG: err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \ '<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \ % (html_escape(repr(_e())), html_escape(format_exc())) environ['wsgi.errors'].write(err) headers = [('Content-Type', 'text/html; charset=UTF-8')] start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info()) return [tob(err)] def __call__(self, environ, start_response): ''' Each instance of :class:'Bottle' is a WSGI application. ''' return self.wsgi(environ, start_response) ############################################################################### # HTTP and WSGI Tools ########################################################## ############################################################################### class BaseRequest(object): """ A wrapper for WSGI environment dictionaries that adds a lot of convenient access methods and properties. Most of them are read-only. Adding new attributes to a request actually adds them to the environ dictionary (as 'bottle.request.ext.<name>'). This is the recommended way to store and access request-specific data. """ __slots__ = ('environ') #: Maximum size of memory buffer for :attr:`body` in bytes. MEMFILE_MAX = 102400 def __init__(self, environ=None): """ Wrap a WSGI environ dictionary. """ #: The wrapped WSGI environ dictionary. This is the only real attribute. #: All other attributes actually are read-only properties. self.environ = {} if environ is None else environ self.environ['bottle.request'] = self @DictProperty('environ', 'bottle.app', read_only=True) def app(self): ''' Bottle application handling this request. ''' raise RuntimeError('This request is not connected to an application.') @DictProperty('environ', 'bottle.route', read_only=True) def route(self): """ The bottle :class:`Route` object that matches this request. """ raise RuntimeError('This request is not connected to a route.') @DictProperty('environ', 'route.url_args', read_only=True) def url_args(self): """ The arguments extracted from the URL. """ raise RuntimeError('This request is not connected to a route.') @property def path(self): ''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix broken clients and avoid the "empty path" edge case). ''' return '/' + self.environ.get('PATH_INFO', '').lstrip('/') @property def method(self): ''' The ``REQUEST_METHOD`` value as an uppercase string. ''' return self.environ.get('REQUEST_METHOD', 'GET').upper() @DictProperty('environ', 'bottle.request.headers', read_only=True) def headers(self): ''' A :class:`WSGIHeaderDict` that provides case-insensitive access to HTTP request headers. ''' return WSGIHeaderDict(self.environ) def get_header(self, name, default=None): ''' Return the value of a request header, or a given default value. ''' return self.headers.get(name, default) @DictProperty('environ', 'bottle.request.cookies', read_only=True) def cookies(self): """ Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT decoded. Use :meth:`get_cookie` if you expect signed cookies. """ cookies = SimpleCookie(self.environ.get('HTTP_COOKIE', '')).values() return FormsDict((c.key, c.value) for c in cookies) def get_cookie(self, key, default=None, secret=None): """ Return the content of a cookie. To read a `Signed Cookie`, the `secret` must match the one used to create the cookie (see :meth:`BaseResponse.set_cookie`). If anything goes wrong (missing cookie or wrong signature), return a default value. """ value = self.cookies.get(key) if secret and value: dec = cookie_decode(value, secret) # (key, value) tuple or None return dec[1] if dec and dec[0] == key else default return value or default @DictProperty('environ', 'bottle.request.query', read_only=True) def query(self): ''' The :attr:`query_string` parsed into a :class:`FormsDict`. These values are sometimes called "URL arguments" or "GET parameters", but not to be confused with "URL wildcards" as they are provided by the :class:`Router`. ''' get = self.environ['bottle.get'] = FormsDict() pairs = _parse_qsl(self.environ.get('QUERY_STRING', '')) for key, value in pairs: get[key] = value return get @DictProperty('environ', 'bottle.request.forms', read_only=True) def forms(self): """ Form values parsed from an `url-encoded` or `multipart/form-data` encoded POST or PUT request body. The result is returned as a :class:`FormsDict`. All keys and values are strings. File uploads are stored separately in :attr:`files`. """ forms = FormsDict() for name, item in self.POST.allitems(): if not isinstance(item, FileUpload): forms[name] = item return forms @DictProperty('environ', 'bottle.request.params', read_only=True) def params(self): """ A :class:`FormsDict` with the combined values of :attr:`query` and :attr:`forms`. File uploads are stored in :attr:`files`. """ params = FormsDict() for key, value in self.query.allitems(): params[key] = value for key, value in self.forms.allitems(): params[key] = value return params @DictProperty('environ', 'bottle.request.files', read_only=True) def files(self): """ File uploads parsed from `multipart/form-data` encoded POST or PUT request body. The values are instances of :class:`FileUpload`. """ files = FormsDict() for name, item in self.POST.allitems(): if isinstance(item, FileUpload): files[name] = item return files @DictProperty('environ', 'bottle.request.json', read_only=True) def json(self): ''' If the ``Content-Type`` header is ``application/json``, this property holds the parsed content of the request body. Only requests smaller than :attr:`MEMFILE_MAX` are processed to avoid memory exhaustion. ''' ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0] if ctype == 'application/json': b = self._get_body_string() if not b: return None return json_loads(b) return None def _iter_body(self, read, bufsize): maxread = max(0, self.content_length) while maxread: part = read(min(maxread, bufsize)) if not part: break yield part maxread -= len(part) def _iter_chunked(self, read, bufsize): err = HTTPError(400, 'Error while parsing chunked transfer body.') rn, sem, bs = tob('\r\n'), tob(';'), tob('') while True: header = read(1) while header[-2:] != rn: c = read(1) header += c if not c: raise err if len(header) > bufsize: raise err size, _, _ = header.partition(sem) try: maxread = int(tonat(size.strip()), 16) except ValueError: raise err if maxread == 0: break buff = bs while maxread > 0: if not buff: buff = read(min(maxread, bufsize)) part, buff = buff[:maxread], buff[maxread:] if not part: raise err yield part maxread -= len(part) if read(2) != rn: raise err @DictProperty('environ', 'bottle.request.body', read_only=True) def _body(self): body_iter = self._iter_chunked if self.chunked else self._iter_body read_func = self.environ['wsgi.input'].read body, body_size, is_temp_file = BytesIO(), 0, False for part in body_iter(read_func, self.MEMFILE_MAX): body.write(part) body_size += len(part) if not is_temp_file and body_size > self.MEMFILE_MAX: body, tmp = TemporaryFile(mode='w+b'), body body.write(tmp.getvalue()) del tmp is_temp_file = True self.environ['wsgi.input'] = body body.seek(0) return body def _get_body_string(self): ''' read body until content-length or MEMFILE_MAX into a string. Raise HTTPError(413) on requests that are to large. ''' clen = self.content_length if clen > self.MEMFILE_MAX: raise HTTPError(413, 'Request to large') if clen < 0: clen = self.MEMFILE_MAX + 1 data = self.body.read(clen) if len(data) > self.MEMFILE_MAX: # Fail fast raise HTTPError(413, 'Request to large') return data @property def body(self): """ The HTTP request body as a seek-able file-like object. Depending on :attr:`MEMFILE_MAX`, this is either a temporary file or a :class:`io.BytesIO` instance. Accessing this property for the first time reads and replaces the ``wsgi.input`` environ variable. Subsequent accesses just do a `seek(0)` on the file object. """ self._body.seek(0) return self._body @property def chunked(self): ''' True if Chunked transfer encoding was. ''' return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower() #: An alias for :attr:`query`. GET = query @DictProperty('environ', 'bottle.request.post', read_only=True) def POST(self): """ The values of :attr:`forms` and :attr:`files` combined into a single :class:`FormsDict`. Values are either strings (form values) or instances of :class:`cgi.FieldStorage` (file uploads). """ post = FormsDict() # We default to application/x-www-form-urlencoded for everything that # is not multipart and take the fast path (also: 3.1 workaround) if not self.content_type.startswith('multipart/'): pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1')) for key, value in pairs: post[key] = value return post safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'): if key in self.environ: safe_env[key] = self.environ[key] args = dict(fp=self.body, environ=safe_env, keep_blank_values=True) if py31: args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8', newline='\n') elif py3k: args['encoding'] = 'utf8' data = cgi.FieldStorage(**args) self['_cgi.FieldStorage'] = data # http://bugs.python.org/issue18394#msg207958 data = data.list or [] for item in data: if item.filename: post[item.name] = FileUpload(item.file, item.name, item.filename, item.headers) else: post[item.name] = item.value return post @property def url(self): """ The full request URI including hostname and scheme. If your app lives behind a reverse proxy or load balancer and you get confusing results, make sure that the ``X-Forwarded-Host`` header is set correctly. """ return self.urlparts.geturl() @DictProperty('environ', 'bottle.request.urlparts', read_only=True) def urlparts(self): ''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple. The tuple contains (scheme, host, path, query_string and fragment), but the fragment is always empty because it is not visible to the server. ''' env = self.environ http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http') host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST') if not host: # HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients. host = env.get('SERVER_NAME', '127.0.0.1') port = env.get('SERVER_PORT') if port and port != ('80' if http == 'http' else '443'): host += ':' + port path = urlquote(self.fullpath) return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '') @property def fullpath(self): """ Request path including :attr:`script_name` (if present). """ return urljoin(self.script_name, self.path.lstrip('/')) @property def query_string(self): """ The raw :attr:`query` part of the URL (everything in between ``?`` and ``#``) as a string. """ return self.environ.get('QUERY_STRING', '') @property def script_name(self): ''' The initial portion of the URL's `path` that was removed by a higher level (server or routing middleware) before the application was called. This script path is returned with leading and tailing slashes. ''' script_name = self.environ.get('SCRIPT_NAME', '').strip('/') return '/' + script_name + '/' if script_name else '/' def path_shift(self, shift=1): ''' Shift path segments from :attr:`path` to :attr:`script_name` and vice versa. :param shift: The number of path segments to shift. May be negative to change the shift direction. (default: 1) ''' script = self.environ.get('SCRIPT_NAME', '/') self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift) @property def content_length(self): ''' The request body length as an integer. The client is responsible to set this header. Otherwise, the real length of the body is unknown and -1 is returned. In this case, :attr:`body` will be empty. ''' return int(self.environ.get('CONTENT_LENGTH') or -1) @property def content_type(self): ''' The Content-Type header as a lowercase-string (default: empty). ''' return self.environ.get('CONTENT_TYPE', '').lower() @property def is_xhr(self): ''' True if the request was triggered by a XMLHttpRequest. This only works with JavaScript libraries that support the `X-Requested-With` header (most of the popular libraries do). ''' requested_with = self.environ.get('HTTP_X_REQUESTED_WITH', '') return requested_with.lower() == 'xmlhttprequest' @property def is_ajax(self): ''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. ''' return self.is_xhr @property def auth(self): """ HTTP authentication data as a (user, password) tuple. This implementation currently supports basic (not digest) authentication only. If the authentication happened at a higher level (e.g. in the front web-server or a middleware), the password field is None, but the user field is looked up from the ``REMOTE_USER`` environ variable. On any errors, None is returned. """ basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION', '')) if basic: return basic ruser = self.environ.get('REMOTE_USER') if ruser: return (ruser, None) return None @property def remote_route(self): """ A list of all IPs that were involved in this request, starting with the client IP and followed by zero or more proxies. This does only work if all proxies support the ```X-Forwarded-For`` header. Note that this information can be forged by malicious clients. """ proxy = self.environ.get('HTTP_X_FORWARDED_FOR') if proxy: return [ip.strip() for ip in proxy.split(',')] remote = self.environ.get('REMOTE_ADDR') return [remote] if remote else [] @property def remote_addr(self): """ The client IP as a string. Note that this information can be forged by malicious clients. """ route = self.remote_route return route[0] if route else None def copy(self): """ Return a new :class:`Request` with a shallow :attr:`environ` copy. """ return Request(self.environ.copy()) def get(self, value, default=None): return self.environ.get(value, default) def __getitem__(self, key): return self.environ[key] def __delitem__(self, key): self[key] = ""; del (self.environ[key]) def __iter__(self): return iter(self.environ) def __len__(self): return len(self.environ) def keys(self): return self.environ.keys() def __setitem__(self, key, value): """ Change an environ value and clear all caches that depend on it. """ if self.environ.get('bottle.request.readonly'): raise KeyError('The environ dictionary is read-only.') self.environ[key] = value todelete = () if key == 'wsgi.input': todelete = ('body', 'forms', 'files', 'params', 'post', 'json') elif key == 'QUERY_STRING': todelete = ('query', 'params') elif key.startswith('HTTP_'): todelete = ('headers', 'cookies') for key in todelete: self.environ.pop('bottle.request.' + key, None) def __repr__(self): return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url) def __getattr__(self, name): ''' Search in self.environ for additional user defined attributes. ''' try: var = self.environ['bottle.request.ext.%s' % name] return var.__get__(self) if hasattr(var, '__get__') else var except KeyError: raise AttributeError('Attribute %r not defined.' % name) def __setattr__(self, name, value): if name == 'environ': return object.__setattr__(self, name, value) self.environ['bottle.request.ext.%s' % name] = value def _hkey(key): if '\n' in key or '\r' in key or '\0' in key: raise ValueError("Header names must not contain control characters: %r" % key) return key.title().replace('_', '-') def _hval(value): value = tonat(value) if '\n' in value or '\r' in value or '\0' in value: raise ValueError("Header value must not contain control characters: %r" % value) return value class HeaderProperty(object): def __init__(self, name, reader=None, writer=None, default=''): self.name, self.default = name, default self.reader, self.writer = reader, writer self.__doc__ = 'Current value of the %r header.' % name.title() def __get__(self, obj, cls): if obj is None: return self value = obj.get_header(self.name, self.default) return self.reader(value) if self.reader else value def __set__(self, obj, value): obj[self.name] = self.writer(value) if self.writer else value def __delete__(self, obj): del obj[self.name] class BaseResponse(object): """ Storage class for a response body as well as headers and cookies. This class does support dict-like case-insensitive item-access to headers, but is NOT a dict. Most notably, iterating over a response yields parts of the body and not the headers. :param body: The response body as one of the supported types. :param status: Either an HTTP status code (e.g. 200) or a status line including the reason phrase (e.g. '200 OK'). :param headers: A dictionary or a list of name-value pairs. Additional keyword arguments are added to the list of headers. Underscores in the header name are replaced with dashes. """ default_status = 200 default_content_type = 'text/html; charset=UTF-8' # Header blacklist for specific response codes # (rfc2616 section 10.2.3 and 10.3.5) bad_headers = { 204: set(('Content-Type',)), 304: set(('Allow', 'Content-Encoding', 'Content-Language', 'Content-Length', 'Content-Range', 'Content-Type', 'Content-Md5', 'Last-Modified'))} def __init__(self, body='', status=None, headers=None, **more_headers): self._cookies = None self._headers = {} self.body = body self.status = status or self.default_status if headers: if isinstance(headers, dict): headers = headers.items() for name, value in headers: self.add_header(name, value) if more_headers: for name, value in more_headers.items(): self.add_header(name, value) def copy(self, cls=None): ''' Returns a copy of self. ''' cls = cls or BaseResponse assert issubclass(cls, BaseResponse) copy = cls() copy.status = self.status copy._headers = dict((k, v[:]) for (k, v) in self._headers.items()) if self._cookies: copy._cookies = SimpleCookie() copy._cookies.load(self._cookies.output(header='')) return copy def __iter__(self): return iter(self.body) def close(self): if hasattr(self.body, 'close'): self.body.close() @property def status_line(self): ''' The HTTP status line as a string (e.g. ``404 Not Found``).''' return self._status_line @property def status_code(self): ''' The HTTP status code as an integer (e.g. 404).''' return self._status_code def _set_status(self, status): if isinstance(status, int): code, status = status, _HTTP_STATUS_LINES.get(status) elif ' ' in status: status = status.strip() code = int(status.split()[0]) else: raise ValueError('String status line without a reason phrase.') if not 100 <= code <= 999: raise ValueError('Status code out of range.') self._status_code = code self._status_line = str(status or ('%d Unknown' % code)) def _get_status(self): return self._status_line status = property(_get_status, _set_status, None, ''' A writeable property to change the HTTP response status. It accepts either a numeric code (100-999) or a string with a custom reason phrase (e.g. "404 Brain not found"). Both :data:`status_line` and :data:`status_code` are updated accordingly. The return value is always a status string. ''') del _get_status, _set_status @property def headers(self): ''' An instance of :class:`HeaderDict`, a case-insensitive dict-like view on the response headers. ''' hdict = HeaderDict() hdict.dict = self._headers return hdict def __contains__(self, name): return _hkey(name) in self._headers def __delitem__(self, name): del self._headers[_hkey(name)] def __getitem__(self, name): return self._headers[_hkey(name)][-1] def __setitem__(self, name, value): self._headers[_hkey(name)] = [_hval(value)] def get_header(self, name, default=None): ''' Return the value of a previously defined header. If there is no header with that name, return a default value. ''' return self._headers.get(_hkey(name), [default])[-1] def set_header(self, name, value): ''' Create a new response header, replacing any previously defined headers with the same name. ''' self._headers[_hkey(name)] = [_hval(value)] def add_header(self, name, value): ''' Add an additional response header, not removing duplicates. ''' self._headers.setdefault(_hkey(name), []).append(_hval(value)) def iter_headers(self): ''' Yield (header, value) tuples, skipping headers that are not allowed with the current response status code. ''' return self.headerlist @property def headerlist(self): """ WSGI conform list of (header, value) tuples. """ out = [] headers = list(self._headers.items()) if 'Content-Type' not in self._headers: headers.append(('Content-Type', [self.default_content_type])) if self._status_code in self.bad_headers: bad_headers = self.bad_headers[self._status_code] headers = [h for h in headers if h[0] not in bad_headers] out += [(name, val) for (name, vals) in headers for val in vals] if self._cookies: for c in self._cookies.values(): out.append(('Set-Cookie', _hval(c.OutputString()))) if py3k: out = [(k, v.encode('utf8').decode('latin1')) for (k, v) in out] return out content_type = HeaderProperty('Content-Type') content_length = HeaderProperty('Content-Length', reader=int) expires = HeaderProperty('Expires', reader=lambda x: datetime.utcfromtimestamp(parse_date(x)), writer=lambda x: http_date(x)) @property def charset(self, default='UTF-8'): """ Return the charset specified in the content-type header (default: utf8). """ if 'charset=' in self.content_type: return self.content_type.split('charset=')[-1].split(';')[0].strip() return default def set_cookie(self, name, value, secret=None, **options): ''' Create a new cookie or replace an old one. If the `secret` parameter is set, create a `Signed Cookie` (described below). :param name: the name of the cookie. :param value: the value of the cookie. :param secret: a signature key required for signed cookies. Additionally, this method accepts all RFC 2109 attributes that are supported by :class:`cookie.Morsel`, including: :param max_age: maximum age in seconds. (default: None) :param expires: a datetime object or UNIX timestamp. (default: None) :param domain: the domain that is allowed to read the cookie. (default: current domain) :param path: limits the cookie to a given path (default: current path) :param secure: limit the cookie to HTTPS connections (default: off). :param httponly: prevents client-side javascript to read this cookie (default: off, requires Python 2.6 or newer). If neither `expires` nor `max_age` is set (default), the cookie will expire at the end of the browser session (as soon as the browser window is closed). Signed cookies may store any pickle-able object and are cryptographically signed to prevent manipulation. Keep in mind that cookies are limited to 4kb in most browsers. Warning: Signed cookies are not encrypted (the client can still see the content) and not copy-protected (the client can restore an old cookie). The main intention is to make pickling and unpickling save, not to store secret information at client side. ''' if not self._cookies: self._cookies = SimpleCookie() if secret: value = touni(cookie_encode((name, value), secret)) elif not isinstance(value, basestring): raise TypeError('Secret key missing for non-string Cookie.') if len(value) > 4096: raise ValueError('Cookie value to long.') self._cookies[name] = value for key, value in options.items(): if key == 'max_age': if isinstance(value, timedelta): value = value.seconds + value.days * 24 * 3600 if key == 'expires': if isinstance(value, (datedate, datetime)): value = value.timetuple() elif isinstance(value, (int, float)): value = time.gmtime(value) value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value) self._cookies[name][key.replace('_', '-')] = value def delete_cookie(self, key, **kwargs): ''' Delete a cookie. Be sure to use the same `domain` and `path` settings as used to create the cookie. ''' kwargs['max_age'] = -1 kwargs['expires'] = 0 self.set_cookie(key, '', **kwargs) def __repr__(self): out = '' for name, value in self.headerlist: out += '%s: %s\n' % (name.title(), value.strip()) return out def local_property(name=None): if name: depr('local_property() is deprecated and will be removed.') # 0.12 ls = threading.local() def fget(self): try: return ls.var except AttributeError: raise RuntimeError("Request context not initialized.") def fset(self, value): ls.var = value def fdel(self): del ls.var return property(fget, fset, fdel, 'Thread-local property') class LocalRequest(BaseRequest): ''' A thread-local subclass of :class:`BaseRequest` with a different set of attributes for each thread. There is usually only one global instance of this class (:data:`request`). If accessed during a request/response cycle, this instance always refers to the *current* request (even on a multithreaded server). ''' bind = BaseRequest.__init__ environ = local_property() class LocalResponse(BaseResponse): ''' A thread-local subclass of :class:`BaseResponse` with a different set of attributes for each thread. There is usually only one global instance of this class (:data:`response`). Its attributes are used to build the HTTP response at the end of the request/response cycle. ''' bind = BaseResponse.__init__ _status_line = local_property() _status_code = local_property() _cookies = local_property() _headers = local_property() body = local_property() Request = BaseRequest Response = BaseResponse class HTTPResponse(Response, BottleException): def __init__(self, body='', status=None, headers=None, **more_headers): super(HTTPResponse, self).__init__(body, status, headers, **more_headers) def apply(self, response): response._status_code = self._status_code response._status_line = self._status_line response._headers = self._headers response._cookies = self._cookies response.body = self.body class HTTPError(HTTPResponse): default_status = 500 def __init__(self, status=None, body=None, exception=None, traceback=None, **options): self.exception = exception self.traceback = traceback super(HTTPError, self).__init__(body, status, **options) ############################################################################### # Plugins ###################################################################### ############################################################################### class PluginError(BottleException): pass class JSONPlugin(object): name = 'json' api = 2 def __init__(self, json_dumps=json_dumps): self.json_dumps = json_dumps def apply(self, callback, route): dumps = self.json_dumps if not dumps: return callback def wrapper(*a, **ka): try: rv = callback(*a, **ka) except HTTPError: rv = _e() if isinstance(rv, dict): # Attempt to serialize, raises exception on failure json_response = dumps(rv) # Set content type only if serialization succesful response.content_type = 'application/json' return json_response elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict): rv.body = dumps(rv.body) rv.content_type = 'application/json' return rv return wrapper class TemplatePlugin(object): ''' This plugin applies the :func:`view` decorator to all routes with a `template` config parameter. If the parameter is a tuple, the second element must be a dict with additional options (e.g. `template_engine`) or default variables for the template. ''' name = 'template' api = 2 def apply(self, callback, route): conf = route.config.get('template') if isinstance(conf, (tuple, list)) and len(conf) == 2: return view(conf[0], **conf[1])(callback) elif isinstance(conf, str): return view(conf)(callback) else: return callback #: Not a plugin, but part of the plugin API. TODO: Find a better place. class _ImportRedirect(object): def __init__(self, name, impmask): ''' Create a virtual package that redirects imports (see PEP 302). ''' self.name = name self.impmask = impmask self.module = sys.modules.setdefault(name, imp.new_module(name)) self.module.__dict__.update({'__file__': __file__, '__path__': [], '__all__': [], '__loader__': self}) sys.meta_path.append(self) def find_module(self, fullname, path=None): if '.' not in fullname: return packname = fullname.rsplit('.', 1)[0] if packname != self.name: return return self def load_module(self, fullname): if fullname in sys.modules: return sys.modules[fullname] modname = fullname.rsplit('.', 1)[1] realname = self.impmask % modname __import__(realname) module = sys.modules[fullname] = sys.modules[realname] setattr(self.module, modname, module) module.__loader__ = self return module ############################################################################### # Common Utilities ############################################################# ############################################################################### class MultiDict(DictMixin): """ This dict stores multiple values per key, but behaves exactly like a normal dict in that it returns only the newest value for any given key. There are special methods available to access the full list of values. """ def __init__(self, *a, **k): self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items()) def __len__(self): return len(self.dict) def __iter__(self): return iter(self.dict) def __contains__(self, key): return key in self.dict def __delitem__(self, key): del self.dict[key] def __getitem__(self, key): return self.dict[key][-1] def __setitem__(self, key, value): self.append(key, value) def keys(self): return self.dict.keys() if py3k: def values(self): return (v[-1] for v in self.dict.values()) def items(self): return ((k, v[-1]) for k, v in self.dict.items()) def allitems(self): return ((k, v) for k, vl in self.dict.items() for v in vl) iterkeys = keys itervalues = values iteritems = items iterallitems = allitems else: def values(self): return [v[-1] for v in self.dict.values()] def items(self): return [(k, v[-1]) for k, v in self.dict.items()] def iterkeys(self): return self.dict.iterkeys() def itervalues(self): return (v[-1] for v in self.dict.itervalues()) def iteritems(self): return ((k, v[-1]) for k, v in self.dict.iteritems()) def iterallitems(self): return ((k, v) for k, vl in self.dict.iteritems() for v in vl) def allitems(self): return [(k, v) for k, vl in self.dict.iteritems() for v in vl] def get(self, key, default=None, index=-1, type=None): ''' Return the most recent value for a key. :param default: The default value to be returned if the key is not present or the type conversion fails. :param index: An index for the list of available values. :param type: If defined, this callable is used to cast the value into a specific type. Exception are suppressed and result in the default value to be returned. ''' try: val = self.dict[key][index] return type(val) if type else val except Exception: pass return default def append(self, key, value): ''' Add a new value to the list of values for this key. ''' self.dict.setdefault(key, []).append(value) def replace(self, key, value): ''' Replace the list of values with a single value. ''' self.dict[key] = [value] def getall(self, key): ''' Return a (possibly empty) list of values for a key. ''' return self.dict.get(key) or [] #: Aliases for WTForms to mimic other multi-dict APIs (Django) getone = get getlist = getall class FormsDict(MultiDict): ''' This :class:`MultiDict` subclass is used to store request form data. Additionally to the normal dict-like item access methods (which return unmodified data as native strings), this container also supports attribute-like access to its values. Attributes are automatically de- or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing attributes default to an empty string. ''' #: Encoding used for attribute values. input_encoding = 'utf8' #: If true (default), unicode strings are first encoded with `latin1` #: and then decoded to match :attr:`input_encoding`. recode_unicode = True def _fix(self, s, encoding=None): if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI return s.encode('latin1').decode(encoding or self.input_encoding) elif isinstance(s, bytes): # Python 2 WSGI return s.decode(encoding or self.input_encoding) else: return s def decode(self, encoding=None): ''' Returns a copy with all keys and values de- or recoded to match :attr:`input_encoding`. Some libraries (e.g. WTForms) want a unicode dictionary. ''' copy = FormsDict() enc = copy.input_encoding = encoding or self.input_encoding copy.recode_unicode = False for key, value in self.allitems(): copy.append(self._fix(key, enc), self._fix(value, enc)) return copy def getunicode(self, name, default=None, encoding=None): ''' Return the value as a unicode string, or the default. ''' try: return self._fix(self[name], encoding) except (UnicodeError, KeyError): return default def __getattr__(self, name, default=unicode()): # Without this guard, pickle generates a cryptic TypeError: if name.startswith('__') and name.endswith('__'): return super(FormsDict, self).__getattr__(name) return self.getunicode(name, default=default) class HeaderDict(MultiDict): """ A case-insensitive version of :class:`MultiDict` that defaults to replace the old value instead of appending it. """ def __init__(self, *a, **ka): self.dict = {} if a or ka: self.update(*a, **ka) def __contains__(self, key): return _hkey(key) in self.dict def __delitem__(self, key): del self.dict[_hkey(key)] def __getitem__(self, key): return self.dict[_hkey(key)][-1] def __setitem__(self, key, value): self.dict[_hkey(key)] = [_hval(value)] def append(self, key, value): self.dict.setdefault(_hkey(key), []).append(_hval(value)) def replace(self, key, value): self.dict[_hkey(key)] = [_hval(value)] def getall(self, key): return self.dict.get(_hkey(key)) or [] def get(self, key, default=None, index=-1): return MultiDict.get(self, _hkey(key), default, index) def filter(self, names): for name in (_hkey(n) for n in names): if name in self.dict: del self.dict[name] class WSGIHeaderDict(DictMixin): ''' This dict-like class wraps a WSGI environ dict and provides convenient access to HTTP_* fields. Keys and values are native strings (2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI environment contains non-native string values, these are de- or encoded using a lossless 'latin1' character set. The API will remain stable even on changes to the relevant PEPs. Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one that uses non-native strings.) ''' #: List of keys that do not have a ``HTTP_`` prefix. cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH') def __init__(self, environ): self.environ = environ def _ekey(self, key): ''' Translate header field name to CGI/WSGI environ key. ''' key = key.replace('-', '_').upper() if key in self.cgikeys: return key return 'HTTP_' + key def raw(self, key, default=None): ''' Return the header value as is (may be bytes or unicode). ''' return self.environ.get(self._ekey(key), default) def __getitem__(self, key): return tonat(self.environ[self._ekey(key)], 'latin1') def __setitem__(self, key, value): raise TypeError("%s is read-only." % self.__class__) def __delitem__(self, key): raise TypeError("%s is read-only." % self.__class__) def __iter__(self): for key in self.environ: if key[:5] == 'HTTP_': yield key[5:].replace('_', '-').title() elif key in self.cgikeys: yield key.replace('_', '-').title() def keys(self): return [x for x in self] def __len__(self): return len(self.keys()) def __contains__(self, key): return self._ekey(key) in self.environ class ConfigDict(dict): ''' A dict-like configuration storage with additional support for namespaces, validators, meta-data, on_change listeners and more. This storage is optimized for fast read access. Retrieving a key or using non-altering dict methods (e.g. `dict.get()`) has no overhead compared to a native dict. ''' __slots__ = ('_meta', '_on_change') class Namespace(DictMixin): def __init__(self, config, namespace): self._config = config self._prefix = namespace def __getitem__(self, key): depr('Accessing namespaces as dicts is discouraged. ' 'Only use flat item access: ' 'cfg["names"]["pace"]["key"] -> cfg["name.space.key"]') # 0.12 return self._config[self._prefix + '.' + key] def __setitem__(self, key, value): self._config[self._prefix + '.' + key] = value def __delitem__(self, key): del self._config[self._prefix + '.' + key] def __iter__(self): ns_prefix = self._prefix + '.' for key in self._config: ns, dot, name = key.rpartition('.') if ns == self._prefix and name: yield name def keys(self): return [x for x in self] def __len__(self): return len(self.keys()) def __contains__(self, key): return self._prefix + '.' + key in self._config def __repr__(self): return '<Config.Namespace %s.*>' % self._prefix def __str__(self): return '<Config.Namespace %s.*>' % self._prefix # Deprecated ConfigDict features def __getattr__(self, key): depr('Attribute access is deprecated.') # 0.12 if key not in self and key[0].isupper(): self[key] = ConfigDict.Namespace(self._config, self._prefix + '.' + key) if key not in self and key.startswith('__'): raise AttributeError(key) return self.get(key) def __setattr__(self, key, value): if key in ('_config', '_prefix'): self.__dict__[key] = value return depr('Attribute assignment is deprecated.') # 0.12 if hasattr(DictMixin, key): raise AttributeError('Read-only attribute.') if key in self and self[key] and isinstance(self[key], self.__class__): raise AttributeError('Non-empty namespace attribute.') self[key] = value def __delattr__(self, key): if key in self: val = self.pop(key) if isinstance(val, self.__class__): prefix = key + '.' for key in self: if key.startswith(prefix): del self[prefix + key] def __call__(self, *a, **ka): depr('Calling ConfDict is deprecated. Use the update() method.') # 0.12 self.update(*a, **ka) return self def __init__(self, *a, **ka): self._meta = {} self._on_change = lambda name, value: None if a or ka: depr('Constructor does no longer accept parameters.') # 0.12 self.update(*a, **ka) def load_config(self, filename): ''' Load values from an *.ini style config file. If the config file contains sections, their names are used as namespaces for the values within. The two special sections ``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix). ''' conf = ConfigParser() conf.read(filename) for section in conf.sections(): for key, value in conf.items(section): if section not in ('DEFAULT', 'bottle'): key = section + '.' + key self[key] = value return self def load_dict(self, source, namespace='', make_namespaces=False): ''' Import values from a dictionary structure. Nesting can be used to represent namespaces. >>> ConfigDict().load_dict({'name': {'space': {'key': 'value'}}}) {'name.space.key': 'value'} ''' stack = [(namespace, source)] while stack: prefix, source = stack.pop() if not isinstance(source, dict): raise TypeError('Source is not a dict (r)' % type(key)) for key, value in source.items(): if not isinstance(key, basestring): raise TypeError('Key is not a string (%r)' % type(key)) full_key = prefix + '.' + key if prefix else key if isinstance(value, dict): stack.append((full_key, value)) if make_namespaces: self[full_key] = self.Namespace(self, full_key) else: self[full_key] = value return self def update(self, *a, **ka): ''' If the first parameter is a string, all keys are prefixed with this namespace. Apart from that it works just as the usual dict.update(). Example: ``update('some.namespace', key='value')`` ''' prefix = '' if a and isinstance(a[0], basestring): prefix = a[0].strip('.') + '.' a = a[1:] for key, value in dict(*a, **ka).items(): self[prefix + key] = value def setdefault(self, key, value): if key not in self: self[key] = value return self[key] def __setitem__(self, key, value): if not isinstance(key, basestring): raise TypeError('Key has type %r (not a string)' % type(key)) value = self.meta_get(key, 'filter', lambda x: x)(value) if key in self and self[key] is value: return self._on_change(key, value) dict.__setitem__(self, key, value) def __delitem__(self, key): dict.__delitem__(self, key) def clear(self): for key in self: del self[key] def meta_get(self, key, metafield, default=None): ''' Return the value of a meta field for a key. ''' return self._meta.get(key, {}).get(metafield, default) def meta_set(self, key, metafield, value): ''' Set the meta field for a key to a new value. This triggers the on-change handler for existing keys. ''' self._meta.setdefault(key, {})[metafield] = value if key in self: self[key] = self[key] def meta_list(self, key): ''' Return an iterable of meta field names defined for a key. ''' return self._meta.get(key, {}).keys() # Deprecated ConfigDict features def __getattr__(self, key): depr('Attribute access is deprecated.') # 0.12 if key not in self and key[0].isupper(): self[key] = self.Namespace(self, key) if key not in self and key.startswith('__'): raise AttributeError(key) return self.get(key) def __setattr__(self, key, value): if key in self.__slots__: return dict.__setattr__(self, key, value) depr('Attribute assignment is deprecated.') # 0.12 if hasattr(dict, key): raise AttributeError('Read-only attribute.') if key in self and self[key] and isinstance(self[key], self.Namespace): raise AttributeError('Non-empty namespace attribute.') self[key] = value def __delattr__(self, key): if key in self: val = self.pop(key) if isinstance(val, self.Namespace): prefix = key + '.' for key in self: if key.startswith(prefix): del self[prefix + key] def __call__(self, *a, **ka): depr('Calling ConfDict is deprecated. Use the update() method.') # 0.12 self.update(*a, **ka) return self class AppStack(list): """ A stack-like list. Calling it returns the head of the stack. """ def __call__(self): """ Return the current default application. """ return self[-1] def push(self, value=None): """ Add a new :class:`Bottle` instance to the stack """ if not isinstance(value, Bottle): value = Bottle() self.append(value) return value class WSGIFileWrapper(object): def __init__(self, fp, buffer_size=1024 * 64): self.fp, self.buffer_size = fp, buffer_size for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'): if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr)) def __iter__(self): buff, read = self.buffer_size, self.read while True: part = read(buff) if not part: return yield part class _closeiter(object): ''' This only exists to be able to attach a .close method to iterators that do not support attribute assignment (most of itertools). ''' def __init__(self, iterator, close=None): self.iterator = iterator self.close_callbacks = makelist(close) def __iter__(self): return iter(self.iterator) def close(self): for func in self.close_callbacks: func() class ResourceManager(object): ''' This class manages a list of search paths and helps to find and open application-bound resources (files). :param base: default value for :meth:`add_path` calls. :param opener: callable used to open resources. :param cachemode: controls which lookups are cached. One of 'all', 'found' or 'none'. ''' def __init__(self, base='./', opener=open, cachemode='all'): self.opener = open self.base = base self.cachemode = cachemode #: A list of search paths. See :meth:`add_path` for details. self.path = [] #: A cache for resolved paths. ``res.cache.clear()`` clears the cache. self.cache = {} def add_path(self, path, base=None, index=None, create=False): ''' Add a new path to the list of search paths. Return False if the path does not exist. :param path: The new search path. Relative paths are turned into an absolute and normalized form. If the path looks like a file (not ending in `/`), the filename is stripped off. :param base: Path used to absolutize relative search paths. Defaults to :attr:`base` which defaults to ``os.getcwd()``. :param index: Position within the list of search paths. Defaults to last index (appends to the list). The `base` parameter makes it easy to reference files installed along with a python module or package:: res.add_path('./resources/', __file__) ''' base = os.path.abspath(os.path.dirname(base or self.base)) path = os.path.abspath(os.path.join(base, os.path.dirname(path))) path += os.sep if path in self.path: self.path.remove(path) if create and not os.path.isdir(path): os.makedirs(path) if index is None: self.path.append(path) else: self.path.insert(index, path) self.cache.clear() return os.path.exists(path) def __iter__(self): ''' Iterate over all existing files in all registered paths. ''' search = self.path[:] while search: path = search.pop() if not os.path.isdir(path): continue for name in os.listdir(path): full = os.path.join(path, name) if os.path.isdir(full): search.append(full) else: yield full def lookup(self, name): ''' Search for a resource and return an absolute file path, or `None`. The :attr:`path` list is searched in order. The first match is returend. Symlinks are followed. The result is cached to speed up future lookups. ''' if name not in self.cache or DEBUG: for path in self.path: fpath = os.path.join(path, name) if os.path.isfile(fpath): if self.cachemode in ('all', 'found'): self.cache[name] = fpath return fpath if self.cachemode == 'all': self.cache[name] = None return self.cache[name] def open(self, name, mode='r', *args, **kwargs): ''' Find a resource and return a file object, or raise IOError. ''' fname = self.lookup(name) if not fname: raise IOError("Resource %r not found." % name) return self.opener(fname, mode=mode, *args, **kwargs) class FileUpload(object): def __init__(self, fileobj, name, filename, headers=None): ''' Wrapper for file uploads. ''' #: Open file(-like) object (BytesIO buffer or temporary file) self.file = fileobj #: Name of the upload form field self.name = name #: Raw filename as sent by the client (may contain unsafe characters) self.raw_filename = filename #: A :class:`HeaderDict` with additional headers (e.g. content-type) self.headers = HeaderDict(headers) if headers else HeaderDict() content_type = HeaderProperty('Content-Type') content_length = HeaderProperty('Content-Length', reader=int, default=-1) def get_header(self, name, default=None): """ Return the value of a header within the mulripart part. """ return self.headers.get(name, default) @cached_property def filename(self): ''' Name of the file on the client file system, but normalized to ensure file system compatibility. An empty filename is returned as 'empty'. Only ASCII letters, digits, dashes, underscores and dots are allowed in the final filename. Accents are removed, if possible. Whitespace is replaced by a single dash. Leading or tailing dots or dashes are removed. The filename is limited to 255 characters. ''' fname = self.raw_filename if not isinstance(fname, unicode): fname = fname.decode('utf8', 'ignore') fname = normalize('NFKD', fname).encode('ASCII', 'ignore').decode('ASCII') fname = os.path.basename(fname.replace('\\', os.path.sep)) fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip() fname = re.sub(r'[-\s]+', '-', fname).strip('.-') return fname[:255] or 'empty' def _copy_file(self, fp, chunk_size=2 ** 16): read, write, offset = self.file.read, fp.write, self.file.tell() while 1: buf = read(chunk_size) if not buf: break write(buf) self.file.seek(offset) def save(self, destination, overwrite=False, chunk_size=2 ** 16): ''' Save file to disk or copy its content to an open file(-like) object. If *destination* is a directory, :attr:`filename` is added to the path. Existing files are not overwritten by default (IOError). :param destination: File path, directory or file(-like) object. :param overwrite: If True, replace existing files. (default: False) :param chunk_size: Bytes to read at a time. (default: 64kb) ''' if isinstance(destination, basestring): # Except file-likes here if os.path.isdir(destination): destination = os.path.join(destination, self.filename) if not overwrite and os.path.exists(destination): raise IOError('File exists.') with open(destination, 'wb') as fp: self._copy_file(fp, chunk_size) else: self._copy_file(destination, chunk_size) ############################################################################### # Application Helper ########################################################### ############################################################################### def abort(code=500, text='Unknown Error.'): """ Aborts execution and causes a HTTP error. """ raise HTTPError(code, text) def redirect(url, code=None): """ Aborts execution and causes a 303 or 302 redirect, depending on the HTTP protocol version. """ if not code: code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302 res = response.copy(cls=HTTPResponse) res.status = code res.body = "" res.set_header('Location', urljoin(request.url, url)) raise res def _file_iter_range(fp, offset, bytes, maxread=1024 * 1024): ''' Yield chunks from a range in a file. No chunk is bigger than maxread.''' fp.seek(offset) while bytes > 0: part = fp.read(min(bytes, maxread)) if not part: break bytes -= len(part) yield part def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'): """ Open a file in a safe way and return :exc:`HTTPResponse` with status code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``, ``Content-Length`` and ``Last-Modified`` headers are set if possible. Special support for ``If-Modified-Since``, ``Range`` and ``HEAD`` requests. :param filename: Name or path of the file to send. :param root: Root path for file lookups. Should be an absolute directory path. :param mimetype: Defines the content-type header (default: guess from file extension) :param download: If True, ask the browser to open a `Save as...` dialog instead of opening the file with the associated program. You can specify a custom filename as a string. If not specified, the original filename is used (default: False). :param charset: The charset to use for files with a ``text/*`` mime-type. (default: UTF-8) """ root = os.path.abspath(root) + os.sep filename = os.path.abspath(os.path.join(root, filename.strip('/\\'))) headers = dict() if not filename.startswith(root): return HTTPError(403, "Access denied.") if not os.path.exists(filename) or not os.path.isfile(filename): return HTTPError(404, "File does not exist.") if not os.access(filename, os.R_OK): return HTTPError(403, "You do not have permission to access this file.") if mimetype == 'auto': mimetype, encoding = mimetypes.guess_type(filename) if encoding: headers['Content-Encoding'] = encoding if mimetype: if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype: mimetype += '; charset=%s' % charset headers['Content-Type'] = mimetype if download: download = os.path.basename(filename if download == True else download) headers['Content-Disposition'] = 'attachment; filename="%s"' % download stats = os.stat(filename) headers['Content-Length'] = clen = stats.st_size lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime)) headers['Last-Modified'] = lm ims = request.environ.get('HTTP_IF_MODIFIED_SINCE') if ims: ims = parse_date(ims.split(";")[0].strip()) if ims is not None and ims >= int(stats.st_mtime): headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) return HTTPResponse(status=304, **headers) body = '' if request.method == 'HEAD' else open(filename, 'rb') headers["Accept-Ranges"] = "bytes" ranges = request.environ.get('HTTP_RANGE') if 'HTTP_RANGE' in request.environ: ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen)) if not ranges: return HTTPError(416, "Requested Range Not Satisfiable") offset, end = ranges[0] headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen) headers["Content-Length"] = str(end - offset) if body: body = _file_iter_range(body, offset, end - offset) return HTTPResponse(body, status=206, **headers) return HTTPResponse(body, **headers) ############################################################################### # HTTP Utilities and MISC (TODO) ############################################### ############################################################################### def debug(mode=True): """ Change the debug level. There is only one debug level supported at the moment.""" global DEBUG if mode: warnings.simplefilter('default') DEBUG = bool(mode) def http_date(value): if isinstance(value, (datedate, datetime)): value = value.utctimetuple() elif isinstance(value, (int, float)): value = time.gmtime(value) if not isinstance(value, basestring): value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value) return value def parse_date(ims): """ Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """ try: ts = email.utils.parsedate_tz(ims) return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone except (TypeError, ValueError, IndexError, OverflowError): return None def parse_auth(header): """ Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None""" try: method, data = header.split(None, 1) if method.lower() == 'basic': user, pwd = touni(base64.b64decode(tob(data))).split(':', 1) return user, pwd except (KeyError, ValueError): return None def parse_range_header(header, maxlen=0): ''' Yield (start, end) ranges parsed from a HTTP Range header. Skip unsatisfiable ranges. The end index is non-inclusive.''' if not header or header[:6] != 'bytes=': return ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r] for start, end in ranges: try: if not start: # bytes=-100 -> last 100 bytes start, end = max(0, maxlen - int(end)), maxlen elif not end: # bytes=100- -> all but the first 99 bytes start, end = int(start), maxlen else: # bytes=100-200 -> bytes 100-200 (inclusive) start, end = int(start), min(int(end) + 1, maxlen) if 0 <= start < end <= maxlen: yield start, end except ValueError: pass def _parse_qsl(qs): r = [] for pair in qs.replace(';', '&').split('&'): if not pair: continue nv = pair.split('=', 1) if len(nv) != 2: nv.append('') key = urlunquote(nv[0].replace('+', ' ')) value = urlunquote(nv[1].replace('+', ' ')) r.append((key, value)) return r def _lscmp(a, b): ''' Compares two strings in a cryptographically safe way: Runtime is not affected by length of common prefix. ''' return not sum(0 if x == y else 1 for x, y in zip(a, b)) and len(a) == len(b) def cookie_encode(data, key): ''' Encode and sign a pickle-able object. Return a (byte) string ''' msg = base64.b64encode(pickle.dumps(data, -1)) sig = base64.b64encode(hmac.new(tob(key), msg).digest()) return tob('!') + sig + tob('?') + msg def cookie_decode(data, key): ''' Verify and decode an encoded string. Return an object or None.''' data = tob(data) if cookie_is_encoded(data): sig, msg = data.split(tob('?'), 1) if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())): return pickle.loads(base64.b64decode(msg)) return None def cookie_is_encoded(data): ''' Return True if the argument looks like a encoded cookie.''' return bool(data.startswith(tob('!')) and tob('?') in data) def html_escape(string): ''' Escape HTML special characters ``&<>`` and quotes ``'"``. ''' return string.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;') \ .replace('"', '&quot;').replace("'", '&#039;') def html_quote(string): ''' Escape and quote a string to be used as an HTTP attribute.''' return '"%s"' % html_escape(string).replace('\n', '&#10;') \ .replace('\r', '&#13;').replace('\t', '&#9;') def yieldroutes(func): """ Return a generator for routes that match the signature (name, args) of the func parameter. This may yield more than one route if the function takes optional keyword arguments. The output is best described by example:: a() -> '/a' b(x, y) -> '/b/<x>/<y>' c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>' d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>' """ path = '/' + func.__name__.replace('__', '/').lstrip('/') spec = getargspec(func) argc = len(spec[0]) - len(spec[3] or []) path += ('/<%s>' * argc) % tuple(spec[0][:argc]) yield path for arg in spec[0][argc:]: path += '/<%s>' % arg yield path def path_shift(script_name, path_info, shift=1): ''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa. :return: The modified paths. :param script_name: The SCRIPT_NAME path. :param script_name: The PATH_INFO path. :param shift: The number of path fragments to shift. May be negative to change the shift direction. (default: 1) ''' if shift == 0: return script_name, path_info pathlist = path_info.strip('/').split('/') scriptlist = script_name.strip('/').split('/') if pathlist and pathlist[0] == '': pathlist = [] if scriptlist and scriptlist[0] == '': scriptlist = [] if shift > 0 and shift <= len(pathlist): moved = pathlist[:shift] scriptlist = scriptlist + moved pathlist = pathlist[shift:] elif shift < 0 and shift >= -len(scriptlist): moved = scriptlist[shift:] pathlist = moved + pathlist scriptlist = scriptlist[:shift] else: empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO' raise AssertionError("Cannot shift. Nothing left from %s" % empty) new_script_name = '/' + '/'.join(scriptlist) new_path_info = '/' + '/'.join(pathlist) if path_info.endswith('/') and pathlist: new_path_info += '/' return new_script_name, new_path_info def auth_basic(check, realm="private", text="Access denied"): ''' Callback decorator to require HTTP auth (basic). TODO: Add route(check_auth=...) parameter. ''' def decorator(func): def wrapper(*a, **ka): user, password = request.auth or (None, None) if user is None or not check(user, password): err = HTTPError(401, text) err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm) return err return func(*a, **ka) return wrapper return decorator # Shortcuts for common Bottle methods. # They all refer to the current default application. def make_default_app_wrapper(name): ''' Return a callable that relays calls to the current default app. ''' @functools.wraps(getattr(Bottle, name)) def wrapper(*a, **ka): return getattr(app(), name)(*a, **ka) return wrapper route = make_default_app_wrapper('route') get = make_default_app_wrapper('get') post = make_default_app_wrapper('post') put = make_default_app_wrapper('put') delete = make_default_app_wrapper('delete') error = make_default_app_wrapper('error') mount = make_default_app_wrapper('mount') hook = make_default_app_wrapper('hook') install = make_default_app_wrapper('install') uninstall = make_default_app_wrapper('uninstall') url = make_default_app_wrapper('get_url') ############################################################################### # Server Adapter ############################################################### ############################################################################### class ServerAdapter(object): quiet = False def __init__(self, host='127.0.0.1', port=8080, **options): self.options = options self.host = host self.port = int(port) def run(self, handler): # pragma: no cover pass def __repr__(self): args = ', '.join(['%s=%s' % (k, repr(v)) for k, v in self.options.items()]) return "%s(%s)" % (self.__class__.__name__, args) class CGIServer(ServerAdapter): quiet = True def run(self, handler): # pragma: no cover from wsgiref.handlers import CGIHandler def fixed_environ(environ, start_response): environ.setdefault('PATH_INFO', '') return handler(environ, start_response) CGIHandler().run(fixed_environ) class FlupFCGIServer(ServerAdapter): def run(self, handler): # pragma: no cover import flup.server.fcgi self.options.setdefault('bindAddress', (self.host, self.port)) flup.server.fcgi.WSGIServer(handler, **self.options).run() class WSGIRefServer(ServerAdapter): def run(self, app): # pragma: no cover from wsgiref.simple_server import WSGIRequestHandler, WSGIServer from wsgiref.simple_server import make_server import socket class FixedHandler(WSGIRequestHandler): def address_string(self): # Prevent reverse DNS lookups please. return self.client_address[0] def log_request(*args, **kw): if not self.quiet: return WSGIRequestHandler.log_request(*args, **kw) handler_cls = self.options.get('handler_class', FixedHandler) server_cls = self.options.get('server_class', WSGIServer) if ':' in self.host: # Fix wsgiref for IPv6 addresses. if getattr(server_cls, 'address_family') == socket.AF_INET: class server_cls(server_cls): address_family = socket.AF_INET6 srv = make_server(self.host, self.port, app, server_cls, handler_cls) srv.serve_forever() class CherryPyServer(ServerAdapter): def run(self, handler): # pragma: no cover from cherrypy import wsgiserver self.options['bind_addr'] = (self.host, self.port) self.options['wsgi_app'] = handler certfile = self.options.get('certfile') if certfile: del self.options['certfile'] keyfile = self.options.get('keyfile') if keyfile: del self.options['keyfile'] server = wsgiserver.CherryPyWSGIServer(**self.options) if certfile: server.ssl_certificate = certfile if keyfile: server.ssl_private_key = keyfile try: server.start() finally: server.stop() class WaitressServer(ServerAdapter): def run(self, handler): from waitress import serve serve(handler, host=self.host, port=self.port) class PasteServer(ServerAdapter): def run(self, handler): # pragma: no cover from paste import httpserver from paste.translogger import TransLogger handler = TransLogger(handler, setup_console_handler=(not self.quiet)) httpserver.serve(handler, host=self.host, port=str(self.port), **self.options) class MeinheldServer(ServerAdapter): def run(self, handler): from meinheld import server server.listen((self.host, self.port)) server.run(handler) class FapwsServer(ServerAdapter): """ Extremely fast webserver using libev. See http://www.fapws.org/ """ def run(self, handler): # pragma: no cover import fapws._evwsgi as evwsgi from fapws import base, config port = self.port if float(config.SERVER_IDENT[-2:]) > 0.4: # fapws3 silently changed its API in 0.5 port = str(port) evwsgi.start(self.host, port) # fapws3 never releases the GIL. Complain upstream. I tried. No luck. if 'BOTTLE_CHILD' in os.environ and not self.quiet: _stderr("WARNING: Auto-reloading does not work with Fapws3.\n") _stderr(" (Fapws3 breaks python thread support)\n") evwsgi.set_base_module(base) def app(environ, start_response): environ['wsgi.multiprocess'] = False return handler(environ, start_response) evwsgi.wsgi_cb(('', app)) evwsgi.run() class TornadoServer(ServerAdapter): """ The super hyped asynchronous server by facebook. Untested. """ def run(self, handler): # pragma: no cover import tornado.wsgi, tornado.httpserver, tornado.ioloop container = tornado.wsgi.WSGIContainer(handler) server = tornado.httpserver.HTTPServer(container) server.listen(port=self.port, address=self.host) tornado.ioloop.IOLoop.instance().start() class AppEngineServer(ServerAdapter): """ Adapter for Google App Engine. """ quiet = True def run(self, handler): from google.appengine.ext.webapp import util # A main() function in the handler script enables 'App Caching'. # Lets makes sure it is there. This _really_ improves performance. module = sys.modules.get('__main__') if module and not hasattr(module, 'main'): module.main = lambda: util.run_wsgi_app(handler) util.run_wsgi_app(handler) class TwistedServer(ServerAdapter): """ Untested. """ def run(self, handler): from twisted.web import server, wsgi from twisted.python.threadpool import ThreadPool from twisted.internet import reactor thread_pool = ThreadPool() thread_pool.start() reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop) factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler)) reactor.listenTCP(self.port, factory, interface=self.host) reactor.run() class DieselServer(ServerAdapter): """ Untested. """ def run(self, handler): from diesel.protocols.wsgi import WSGIApplication app = WSGIApplication(handler, port=self.port) app.run() class GeventServer(ServerAdapter): """ Untested. Options: * `fast` (default: False) uses libevent's http server, but has some issues: No streaming, no pipelining, no SSL. * See gevent.wsgi.WSGIServer() documentation for more options. """ def run(self, handler): from gevent import pywsgi, local if not isinstance(threading.local(), local.local): msg = "Bottle requires gevent.monkey.patch_all() (before import)" raise RuntimeError(msg) if self.options.pop('fast', None): depr('The "fast" option has been deprecated and removed by Gevent.') if self.quiet: self.options['log'] = None address = (self.host, self.port) server = pywsgi.WSGIServer(address, handler, **self.options) if 'BOTTLE_CHILD' in os.environ: import signal signal.signal(signal.SIGINT, lambda s, f: server.stop()) server.serve_forever() class GeventSocketIOServer(ServerAdapter): def run(self, handler): from socketio import server address = (self.host, self.port) server.SocketIOServer(address, handler, **self.options).serve_forever() class GunicornServer(ServerAdapter): """ Untested. See http://gunicorn.org/configure.html for options. """ def run(self, handler): from gunicorn.app.base import Application config = {'bind': "%s:%d" % (self.host, int(self.port))} config.update(self.options) class GunicornApplication(Application): def init(self, parser, opts, args): return config def load(self): return handler GunicornApplication().run() class EventletServer(ServerAdapter): """ Untested """ def run(self, handler): from eventlet import wsgi, listen try: wsgi.server(listen((self.host, self.port)), handler, log_output=(not self.quiet)) except TypeError: # Fallback, if we have old version of eventlet wsgi.server(listen((self.host, self.port)), handler) class RocketServer(ServerAdapter): """ Untested. """ def run(self, handler): from rocket import Rocket server = Rocket((self.host, self.port), 'wsgi', {'wsgi_app': handler}) server.start() class BjoernServer(ServerAdapter): """ Fast server written in C: https://github.com/jonashaag/bjoern """ def run(self, handler): from bjoern import run run(handler, self.host, self.port) class AutoServer(ServerAdapter): """ Untested. """ adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer] def run(self, handler): for sa in self.adapters: try: return sa(self.host, self.port, **self.options).run(handler) except ImportError: pass server_names = { 'cgi': CGIServer, 'flup': FlupFCGIServer, 'wsgiref': WSGIRefServer, 'waitress': WaitressServer, 'cherrypy': CherryPyServer, 'paste': PasteServer, 'fapws3': FapwsServer, 'tornado': TornadoServer, 'gae': AppEngineServer, 'twisted': TwistedServer, 'diesel': DieselServer, 'meinheld': MeinheldServer, 'gunicorn': GunicornServer, 'eventlet': EventletServer, 'gevent': GeventServer, 'geventSocketIO': GeventSocketIOServer, 'rocket': RocketServer, 'bjoern': BjoernServer, 'auto': AutoServer, } ############################################################################### # Application Control ########################################################## ############################################################################### def load(target, **namespace): """ Import a module or fetch an object from a module. * ``package.module`` returns `module` as a module object. * ``pack.mod:name`` returns the module variable `name` from `pack.mod`. * ``pack.mod:func()`` calls `pack.mod.func()` and returns the result. The last form accepts not only function calls, but any type of expression. Keyword arguments passed to this function are available as local variables. Example: ``import_string('re:compile(x)', x='[a-z]')`` """ module, target = target.split(":", 1) if ':' in target else (target, None) if module not in sys.modules: __import__(module) if not target: return sys.modules[module] if target.isalnum(): return getattr(sys.modules[module], target) package_name = module.split('.')[0] namespace[package_name] = sys.modules[package_name] return eval('%s.%s' % (module, target), namespace) def load_app(target): """ Load a bottle application from a module and make sure that the import does not affect the current default application, but returns a separate application object. See :func:`load` for the target parameter. """ global NORUN; NORUN, nr_old = True, NORUN try: tmp = default_app.push() # Create a new "default application" rv = load(target) # Import the target module return rv if callable(rv) else tmp finally: default_app.remove(tmp) # Remove the temporary added default application NORUN = nr_old _debug = debug def run(app=None, server='wsgiref', host='127.0.0.1', port=8080, interval=1, reloader=False, quiet=False, plugins=None, debug=None, **kargs): """ Start a server instance. This method blocks until the server terminates. :param app: WSGI application or target string supported by :func:`load_app`. (default: :func:`default_app`) :param server: Server adapter to use. See :data:`server_names` keys for valid names or pass a :class:`ServerAdapter` subclass. (default: `wsgiref`) :param host: Server address to bind to. Pass ``0.0.0.0`` to listens on all interfaces including the external one. (default: 127.0.0.1) :param port: Server port to bind to. Values below 1024 require root privileges. (default: 8080) :param reloader: Start auto-reloading server? (default: False) :param interval: Auto-reloader interval in seconds (default: 1) :param quiet: Suppress output to stdout and stderr? (default: False) :param options: Options passed to the server adapter. """ if NORUN: return if reloader and not os.environ.get('BOTTLE_CHILD'): try: lockfile = None fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock') os.close(fd) # We only need this file to exist. We never write to it while os.path.exists(lockfile): args = [sys.executable] + sys.argv environ = os.environ.copy() environ['BOTTLE_CHILD'] = 'true' environ['BOTTLE_LOCKFILE'] = lockfile p = subprocess.Popen(args, env=environ) while p.poll() is None: # Busy wait... os.utime(lockfile, None) # I am alive! time.sleep(interval) if p.poll() != 3: if os.path.exists(lockfile): os.unlink(lockfile) sys.exit(p.poll()) except KeyboardInterrupt: pass finally: if os.path.exists(lockfile): os.unlink(lockfile) return try: if debug is not None: _debug(debug) app = app or default_app() if isinstance(app, basestring): app = load_app(app) if not callable(app): raise ValueError("Application is not callable: %r" % app) for plugin in plugins or []: app.install(plugin) if server in server_names: server = server_names.get(server) if isinstance(server, basestring): server = load(server) if isinstance(server, type): server = server(host=host, port=port, **kargs) if not isinstance(server, ServerAdapter): raise ValueError("Unknown or unsupported server: %r" % server) server.quiet = server.quiet or quiet if not server.quiet: _stderr("服务器启动\n") _stderr("浏览器访问 http://%s:%d/\n" % (server.host, server.port)) _stderr("按 Ctrl-C 退出\n\n") if reloader: lockfile = os.environ.get('BOTTLE_LOCKFILE') bgcheck = FileCheckerThread(lockfile, interval) with bgcheck: server.run(app) if bgcheck.status == 'reload': sys.exit(3) else: server.run(app) except KeyboardInterrupt: pass except (SystemExit, MemoryError): raise except: if not reloader: raise if not getattr(server, 'quiet', quiet): print_exc() time.sleep(interval) sys.exit(3) class FileCheckerThread(threading.Thread): ''' Interrupt main-thread as soon as a changed module file is detected, the lockfile gets deleted or gets to old. ''' def __init__(self, lockfile, interval): threading.Thread.__init__(self) self.lockfile, self.interval = lockfile, interval #: Is one of 'reload', 'error' or 'exit' self.status = None def run(self): exists = os.path.exists mtime = lambda path: os.stat(path).st_mtime files = dict() for module in list(sys.modules.values()): path = getattr(module, '__file__', '') or '' if path[-4:] in ('.pyo', '.pyc'): path = path[:-1] if path and exists(path): files[path] = mtime(path) while not self.status: if not exists(self.lockfile) \ or mtime(self.lockfile) < time.time() - self.interval - 5: self.status = 'error' thread.interrupt_main() for path, lmtime in list(files.items()): if not exists(path) or mtime(path) > lmtime: self.status = 'reload' thread.interrupt_main() break time.sleep(self.interval) def __enter__(self): self.start() def __exit__(self, exc_type, exc_val, exc_tb): if not self.status: self.status = 'exit' # silent exit self.join() return exc_type is not None and issubclass(exc_type, KeyboardInterrupt) ############################################################################### # Template Adapters ############################################################ ############################################################################### class TemplateError(HTTPError): def __init__(self, message): HTTPError.__init__(self, 500, message) class BaseTemplate(object): """ Base class and minimal API for template adapters """ extensions = ['tpl', 'html', 'thtml', 'stpl'] settings = {} # used in prepare() defaults = {} # used in render() def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings): """ Create a new template. If the source parameter (str or buffer) is missing, the name argument is used to guess a template filename. Subclasses can assume that self.source and/or self.filename are set. Both are strings. The lookup, encoding and settings parameters are stored as instance variables. The lookup parameter stores a list containing directory paths. The encoding parameter should be used to decode byte strings or files. The settings parameter contains a dict for engine-specific settings. """ self.name = name self.source = source.read() if hasattr(source, 'read') else source self.filename = source.filename if hasattr(source, 'filename') else None self.lookup = [os.path.abspath(x) for x in lookup] self.encoding = encoding self.settings = self.settings.copy() # Copy from class variable self.settings.update(settings) # Apply if not self.source and self.name: self.filename = self.search(self.name, self.lookup) if not self.filename: raise TemplateError('Template %s not found.' % repr(name)) if not self.source and not self.filename: raise TemplateError('No template specified.') self.prepare(**self.settings) @classmethod def search(cls, name, lookup=[]): """ Search name in all directories specified in lookup. First without, then with common extensions. Return first hit. """ if not lookup: depr('The template lookup path list should not be empty.') # 0.12 lookup = ['.'] if os.path.isabs(name) and os.path.isfile(name): depr('Absolute template path names are deprecated.') # 0.12 return os.path.abspath(name) for spath in lookup: spath = os.path.abspath(spath) + os.sep fname = os.path.abspath(os.path.join(spath, name)) if not fname.startswith(spath): continue if os.path.isfile(fname): return fname for ext in cls.extensions: if os.path.isfile('%s.%s' % (fname, ext)): return '%s.%s' % (fname, ext) @classmethod def global_config(cls, key, *args): ''' This reads or sets the global settings stored in class.settings. ''' if args: cls.settings = cls.settings.copy() # Make settings local to class cls.settings[key] = args[0] else: return cls.settings[key] def prepare(self, **options): """ Run preparations (parsing, caching, ...). It should be possible to call this again to refresh a template or to update settings. """ raise NotImplementedError def render(self, *args, **kwargs): """ Render the template with the specified local variables and return a single byte or unicode string. If it is a byte string, the encoding must match self.encoding. This method must be thread-safe! Local variables may be provided in dictionaries (args) or directly, as keywords (kwargs). """ raise NotImplementedError class MakoTemplate(BaseTemplate): def prepare(self, **options): from mako.template import Template from mako.lookup import TemplateLookup options.update({'input_encoding': self.encoding}) options.setdefault('format_exceptions', bool(DEBUG)) lookup = TemplateLookup(directories=self.lookup, **options) if self.source: self.tpl = Template(self.source, lookup=lookup, **options) else: self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) _defaults = self.defaults.copy() _defaults.update(kwargs) return self.tpl.render(**_defaults) class CheetahTemplate(BaseTemplate): def prepare(self, **options): from Cheetah.Template import Template self.context = threading.local() self.context.vars = {} options['searchList'] = [self.context.vars] if self.source: self.tpl = Template(source=self.source, **options) else: self.tpl = Template(file=self.filename, **options) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) self.context.vars.update(self.defaults) self.context.vars.update(kwargs) out = str(self.tpl) self.context.vars.clear() return out class Jinja2Template(BaseTemplate): def prepare(self, filters=None, tests=None, globals={}, **kwargs): from jinja2 import Environment, FunctionLoader if 'prefix' in kwargs: # TODO: to be removed after a while raise RuntimeError('The keyword argument `prefix` has been removed. ' 'Use the full jinja2 environment name line_statement_prefix instead.') self.env = Environment(loader=FunctionLoader(self.loader), **kwargs) if filters: self.env.filters.update(filters) if tests: self.env.tests.update(tests) if globals: self.env.globals.update(globals) if self.source: self.tpl = self.env.from_string(self.source) else: self.tpl = self.env.get_template(self.filename) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) _defaults = self.defaults.copy() _defaults.update(kwargs) return self.tpl.render(**_defaults) def loader(self, name): fname = self.search(name, self.lookup) if not fname: return with open(fname, "rb") as f: return f.read().decode(self.encoding) class SimpleTemplate(BaseTemplate): def prepare(self, escape_func=html_escape, noescape=False, syntax=None, **ka): self.cache = {} enc = self.encoding self._str = lambda x: touni(x, enc) self._escape = lambda x: escape_func(touni(x, enc)) self.syntax = syntax if noescape: self._str, self._escape = self._escape, self._str @cached_property def co(self): return compile(self.code, self.filename or '<string>', 'exec') @cached_property def code(self): source = self.source if not source: with open(self.filename, 'rb') as f: source = f.read() try: source, encoding = touni(source), 'utf8' except UnicodeError: depr('Template encodings other than utf8 are no longer supported.') # 0.11 source, encoding = touni(source, 'latin1'), 'latin1' parser = StplParser(source, encoding=encoding, syntax=self.syntax) code = parser.translate() self.encoding = parser.encoding return code def _rebase(self, _env, _name=None, **kwargs): if _name is None: depr('Rebase function called without arguments.' ' You were probably looking for {{base}}?', True) # 0.12 _env['_rebase'] = (_name, kwargs) def _include(self, _env, _name=None, **kwargs): if _name is None: depr('Rebase function called without arguments.' ' You were probably looking for {{base}}?', True) # 0.12 env = _env.copy() env.update(kwargs) if _name not in self.cache: self.cache[_name] = self.__class__(name=_name, lookup=self.lookup) return self.cache[_name].execute(env['_stdout'], env) def execute(self, _stdout, kwargs): env = self.defaults.copy() env.update(kwargs) env.update({'_stdout': _stdout, '_printlist': _stdout.extend, 'include': functools.partial(self._include, env), 'rebase': functools.partial(self._rebase, env), '_rebase': None, '_str': self._str, '_escape': self._escape, 'get': env.get, 'setdefault': env.setdefault, 'defined': env.__contains__}) eval(self.co, env) if env.get('_rebase'): subtpl, rargs = env.pop('_rebase') rargs['base'] = ''.join(_stdout) # copy stdout del _stdout[:] # clear stdout return self._include(env, subtpl, **rargs) return env def render(self, *args, **kwargs): """ Render the template using keyword arguments as local variables. """ env = {}; stdout = [] for dictarg in args: env.update(dictarg) env.update(kwargs) self.execute(stdout, env) return ''.join(stdout) class StplSyntaxError(TemplateError): pass class StplParser(object): ''' Parser for stpl templates. ''' _re_cache = {} #: Cache for compiled re patterns # This huge pile of voodoo magic splits python code into 8 different tokens. # 1: All kinds of python strings (trust me, it works) _re_tok = '([urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \ '|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \ '|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \ '|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))' _re_inl = _re_tok.replace('|\\n', '') # We re-use this string pattern later # 2: Comments (until end of line, but not the newline itself) _re_tok += '|(#.*)' # 3,4: Open and close grouping tokens _re_tok += '|([\[\{\(])' _re_tok += '|([\]\}\)])' # 5,6: Keywords that start or continue a python block (only start of line) _re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \ '|^([ \\t]*(?:elif|else|except|finally)\\b)' # 7: Our special 'end' keyword (but only if it stands alone) _re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))' # 8: A customizable end-of-code-block template token (only end of line) _re_tok += '|(%(block_close)s[ \\t]*(?=\\r?$))' # 9: And finally, a single newline. The 10th token is 'everything else' _re_tok += '|(\\r?\\n)' # Match the start tokens of code areas in a template _re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))(%%?)' # Match inline statements (may contain python strings) _re_inl = '(?m)%%(inline_start)s((?:%s|[^\'"\n]*?)+)%%(inline_end)s' % _re_inl _re_tok = '(?m)' + _re_tok default_syntax = '<% %> % {{ }}' def __init__(self, source, syntax=None, encoding='utf8'): self.source, self.encoding = touni(source, encoding), encoding self.set_syntax(syntax or self.default_syntax) self.code_buffer, self.text_buffer = [], [] self.lineno, self.offset = 1, 0 self.indent, self.indent_mod = 0, 0 self.paren_depth = 0 def get_syntax(self): ''' Tokens as a space separated string (default: <% %> % {{ }}) ''' return self._syntax def set_syntax(self, syntax): self._syntax = syntax self._tokens = syntax.split() if not syntax in self._re_cache: names = 'block_start block_close line_start inline_start inline_end' etokens = map(re.escape, self._tokens) pattern_vars = dict(zip(names.split(), etokens)) patterns = (self._re_split, self._re_tok, self._re_inl) patterns = [re.compile(p % pattern_vars) for p in patterns] self._re_cache[syntax] = patterns self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax] syntax = property(get_syntax, set_syntax) def translate(self): if self.offset: raise RuntimeError('Parser is a one time instance.') while True: m = self.re_split.search(self.source[self.offset:]) if m: text = self.source[self.offset:self.offset + m.start()] self.text_buffer.append(text) self.offset += m.end() if m.group(1): # New escape syntax line, sep, _ = self.source[self.offset:].partition('\n') self.text_buffer.append(m.group(2) + m.group(5) + line + sep) self.offset += len(line + sep) + 1 continue elif m.group(5): # Old escape syntax depr('Escape code lines with a backslash.') # 0.12 line, sep, _ = self.source[self.offset:].partition('\n') self.text_buffer.append(m.group(2) + line + sep) self.offset += len(line + sep) + 1 continue self.flush_text() self.read_code(multiline=bool(m.group(4))) else: break self.text_buffer.append(self.source[self.offset:]) self.flush_text() return ''.join(self.code_buffer) def read_code(self, multiline): code_line, comment = '', '' while True: m = self.re_tok.search(self.source[self.offset:]) if not m: code_line += self.source[self.offset:] self.offset = len(self.source) self.write_code(code_line.strip(), comment) return code_line += self.source[self.offset:self.offset + m.start()] self.offset += m.end() _str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups() if (code_line or self.paren_depth > 0) and (_blk1 or _blk2): # a if b else c code_line += _blk1 or _blk2 continue if _str: # Python string code_line += _str elif _com: # Python comment (up to EOL) comment = _com if multiline and _com.strip().endswith(self._tokens[1]): multiline = False # Allow end-of-block in comments elif _po: # open parenthesis self.paren_depth += 1 code_line += _po elif _pc: # close parenthesis if self.paren_depth > 0: # we could check for matching parentheses here, but it's # easier to leave that to python - just check counts self.paren_depth -= 1 code_line += _pc elif _blk1: # Start-block keyword (if/for/while/def/try/...) code_line, self.indent_mod = _blk1, -1 self.indent += 1 elif _blk2: # Continue-block keyword (else/elif/except/...) code_line, self.indent_mod = _blk2, -1 elif _end: # The non-standard 'end'-keyword (ends a block) self.indent -= 1 elif _cend: # The end-code-block template token (usually '%>') if multiline: multiline = False else: code_line += _cend else: # \n self.write_code(code_line.strip(), comment) self.lineno += 1 code_line, comment, self.indent_mod = '', '', 0 if not multiline: break def flush_text(self): text = ''.join(self.text_buffer) del self.text_buffer[:] if not text: return parts, pos, nl = [], 0, '\\\n' + ' ' * self.indent for m in self.re_inl.finditer(text): prefix, pos = text[pos:m.start()], m.end() if prefix: parts.append(nl.join(map(repr, prefix.splitlines(True)))) if prefix.endswith('\n'): parts[-1] += nl parts.append(self.process_inline(m.group(1).strip())) if pos < len(text): prefix = text[pos:] lines = prefix.splitlines(True) if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3] elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4] parts.append(nl.join(map(repr, lines))) code = '_printlist((%s,))' % ', '.join(parts) self.lineno += code.count('\n') + 1 self.write_code(code) def process_inline(self, chunk): if chunk[0] == '!': return '_str(%s)' % chunk[1:] return '_escape(%s)' % chunk def write_code(self, line, comment=''): line, comment = self.fix_backward_compatibility(line, comment) code = ' ' * (self.indent + self.indent_mod) code += line.lstrip() + comment + '\n' self.code_buffer.append(code) def fix_backward_compatibility(self, line, comment): parts = line.strip().split(None, 2) if parts and parts[0] in ('include', 'rebase'): depr('The include and rebase keywords are functions now.') # 0.12 if len(parts) == 1: return "_printlist([base])", comment elif len(parts) == 2: return "_=%s(%r)" % tuple(parts), comment else: return "_=%s(%r, %s)" % tuple(parts), comment if self.lineno <= 2 and not line.strip() and 'coding' in comment: m = re.match(r"#.*coding[:=]\s*([-\w.]+)", comment) if m: depr('PEP263 encoding strings in templates are deprecated.') # 0.12 enc = m.group(1) self.source = self.source.encode(self.encoding).decode(enc) self.encoding = enc return line, comment.replace('coding', 'coding*') return line, comment def template(*args, **kwargs): ''' Get a rendered template as a string iterator. You can use a name, a filename or a template string as first parameter. Template rendering arguments can be passed as dictionaries or directly (as keyword arguments). ''' tpl = args[0] if args else None adapter = kwargs.pop('template_adapter', SimpleTemplate) lookup = kwargs.pop('template_lookup', TEMPLATE_PATH) tplid = (id(lookup), tpl) if tplid not in TEMPLATES or DEBUG: settings = kwargs.pop('template_settings', {}) if isinstance(tpl, adapter): TEMPLATES[tplid] = tpl if settings: TEMPLATES[tplid].prepare(**settings) elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl: TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings) else: TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings) if not TEMPLATES[tplid]: abort(500, 'Template (%s) not found' % tpl) for dictarg in args[1:]: kwargs.update(dictarg) return TEMPLATES[tplid].render(kwargs) mako_template = functools.partial(template, template_adapter=MakoTemplate) cheetah_template = functools.partial(template, template_adapter=CheetahTemplate) jinja2_template = functools.partial(template, template_adapter=Jinja2Template) def view(tpl_name, **defaults): ''' Decorator: renders a template for a handler. The handler can control its behavior like that: - return a dict of template vars to fill out the template - return something other than a dict and the view decorator will not process the template, but return the handler result as is. This includes returning a HTTPResponse(dict) to get, for instance, JSON with autojson or other castfilters. ''' def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): result = func(*args, **kwargs) if isinstance(result, (dict, DictMixin)): tplvars = defaults.copy() tplvars.update(result) return template(tpl_name, **tplvars) elif result is None: return template(tpl_name, defaults) return result return wrapper return decorator mako_view = functools.partial(view, template_adapter=MakoTemplate) cheetah_view = functools.partial(view, template_adapter=CheetahTemplate) jinja2_view = functools.partial(view, template_adapter=Jinja2Template) ############################################################################### # Constants and Globals ######################################################## ############################################################################### TEMPLATE_PATH = ['./', './views/'] TEMPLATES = {} DEBUG = False NORUN = False # If set, run() does nothing. Used by load_app() #: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found') HTTP_CODES = httplib.responses HTTP_CODES[418] = "I'm a teapot" # RFC 2324 HTTP_CODES[422] = "Unprocessable Entity" # RFC 4918 HTTP_CODES[428] = "Precondition Required" HTTP_CODES[429] = "Too Many Requests" HTTP_CODES[431] = "Request Header Fields Too Large" HTTP_CODES[511] = "Network Authentication Required" _HTTP_STATUS_LINES = dict((k, '%d %s' % (k, v)) for (k, v) in HTTP_CODES.items()) #: The default template used for error pages. Override with @error() ERROR_PAGE_TEMPLATE = """ %%try: %%from %s import DEBUG, HTTP_CODES, request, touni <!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN"> <html> <head> <title>Error: {{e.status}}</title> <style type="text/css"> html {background-color: #eee; font-family: sans;} body {background-color: #fff; border: 1px solid #ddd; padding: 15px; margin: 15px;} pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;} </style> </head> <body> <h1>Error: {{e.status}}</h1> <p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt> caused an error:</p> <pre>{{e.body}}</pre> %%if DEBUG and e.exception: <h2>Exception:</h2> <pre>{{repr(e.exception)}}</pre> %%end %%if DEBUG and e.traceback: <h2>Traceback:</h2> <pre>{{e.traceback}}</pre> %%end </body> </html> %%except ImportError: <b>ImportError:</b> Could not generate the error page. Please add bottle to the import path. %%end """ % __name__ #: A thread-safe instance of :class:`LocalRequest`. If accessed from within a #: request callback, this instance always refers to the *current* request #: (even on a multithreaded server). request = LocalRequest() #: A thread-safe instance of :class:`LocalResponse`. It is used to change the #: HTTP response for the *current* request. response = LocalResponse() #: A thread-safe namespace. Not used by Bottle. local = threading.local() # Initialize app stack (create first empty Bottle app) # BC: 0.6.4 and needed for run() app = default_app = AppStack() app.push() #: A virtual package that redirects import statements. #: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`. ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__ + ".ext", 'bottle_%s').module if __name__ == '__main__': opt, args, parser = _cmd_options, _cmd_args, _cmd_parser if opt.version: _stdout('Bottle %s\n' % __version__) sys.exit(0) if not args: parser.print_help() _stderr('\nError: No application specified.\n') sys.exit(1) sys.path.insert(0, '.') sys.modules.setdefault('bottle', sys.modules['__main__']) host, port = (opt.bind or 'localhost'), 8080 if ':' in host and host.rfind(']') < host.rfind(':'): host, port = host.rsplit(':', 1) host = host.strip('[]') run(args[0], host=host, port=int(port), server=opt.server, reloader=opt.reload, plugins=opt.plugin, debug=opt.debug) # THE END
[]
[]
[ "BOTTLE_LOCKFILE", "BOTTLE_CHILD" ]
[]
["BOTTLE_LOCKFILE", "BOTTLE_CHILD"]
python
2
0
example/exclient/db/db.go
package db import ( "database/sql" "flag" "os" "testing" _ "github.com/go-sql-driver/mysql" "github.com/luno/reflex/example/internal/db" "github.com/luno/reflex/rsql" "github.com/stretchr/testify/require" ) var dbURI = flag.String("db_example_uri", getDefaultURI(), "URI of reflex example client DB") var Cursors = rsql.NewCursorsTable("client_cursors") func Connect() (*sql.DB, error) { return db.Connect(*dbURI) } func ConnectForTesting(t *testing.T) *sql.DB { dbc, err := db.ConnectForTesting(t, *dbURI+"parseTime=true", "schema.sql") require.NoError(t, err, "uri: %s\nerr: %v", *dbURI, err) return dbc } func getDefaultURI() string { uri := os.Getenv("DB_EXAMPLE_CLIENT_URI") if uri != "" { return uri } return "root@tcp(localhost:3306)/test?" }
[ "\"DB_EXAMPLE_CLIENT_URI\"" ]
[]
[ "DB_EXAMPLE_CLIENT_URI" ]
[]
["DB_EXAMPLE_CLIENT_URI"]
go
1
0
gopls/internal/regtest/diagnostics/diagnostics_test.go
// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package diagnostics import ( "context" "fmt" "os/exec" "testing" "golang.org/x/tools/gopls/internal/hooks" . "golang.org/x/tools/internal/lsp/regtest" "golang.org/x/tools/internal/lsp" "golang.org/x/tools/internal/lsp/fake" "golang.org/x/tools/internal/lsp/protocol" "golang.org/x/tools/internal/testenv" ) func TestMain(m *testing.M) { Main(m, hooks.Options) } // Use mod.com for all go.mod files due to golang/go#35230. const exampleProgram = ` -- go.mod -- module mod.com go 1.12 -- main.go -- package main import "fmt" func main() { fmt.Println("Hello World.") }` func TestDiagnosticErrorInEditedFile(t *testing.T) { // This test is very basic: start with a clean Go program, make an error, and // get a diagnostic for that error. However, it also demonstrates how to // combine Expectations to await more complex state in the editor. Run(t, exampleProgram, func(t *testing.T, env *Env) { // Deleting the 'n' at the end of Println should generate a single error // diagnostic. env.OpenFile("main.go") env.RegexpReplace("main.go", "Printl(n)", "") env.Await( // Once we have gotten diagnostics for the change above, we should // satisfy the DiagnosticAtRegexp assertion. OnceMet( env.DoneWithChange(), env.DiagnosticAtRegexp("main.go", "Printl"), ), // Assert that this test has sent no error logs to the client. This is not // strictly necessary for testing this regression, but is included here // as an example of using the NoErrorLogs() expectation. Feel free to // delete. NoErrorLogs(), ) }) } func TestMissingImportDiagsClearOnFirstFile(t *testing.T) { const onlyMod = ` -- go.mod -- module mod.com go 1.12 ` Run(t, onlyMod, func(t *testing.T, env *Env) { env.CreateBuffer("main.go", `package main func m() { log.Println() } `) env.Await( env.DiagnosticAtRegexp("main.go", "log"), ) env.SaveBuffer("main.go") env.Await( EmptyDiagnostics("main.go"), ) }) } func TestDiagnosticErrorInNewFile(t *testing.T) { const brokenFile = `package main const Foo = "abc ` Run(t, brokenFile, func(t *testing.T, env *Env) { env.CreateBuffer("broken.go", brokenFile) env.Await(env.DiagnosticAtRegexp("broken.go", "\"abc")) }) } // badPackage contains a duplicate definition of the 'a' const. const badPackage = ` -- go.mod -- module mod.com go 1.12 -- a.go -- package consts const a = 1 -- b.go -- package consts const a = 2 ` func TestDiagnosticClearingOnEdit(t *testing.T) { Run(t, badPackage, func(t *testing.T, env *Env) { env.OpenFile("b.go") env.Await(env.DiagnosticAtRegexp("a.go", "a = 1"), env.DiagnosticAtRegexp("b.go", "a = 2")) // Fix the error by editing the const name in b.go to `b`. env.RegexpReplace("b.go", "(a) = 2", "b") env.Await( EmptyDiagnostics("a.go"), EmptyDiagnostics("b.go"), ) }) } func TestDiagnosticClearingOnDelete_Issue37049(t *testing.T) { Run(t, badPackage, func(t *testing.T, env *Env) { env.OpenFile("a.go") env.Await(env.DiagnosticAtRegexp("a.go", "a = 1"), env.DiagnosticAtRegexp("b.go", "a = 2")) env.RemoveWorkspaceFile("b.go") env.Await(EmptyDiagnostics("a.go"), EmptyDiagnostics("b.go")) }) } func TestDiagnosticClearingOnClose(t *testing.T) { Run(t, badPackage, func(t *testing.T, env *Env) { env.CreateBuffer("c.go", `package consts const a = 3`) env.Await( env.DiagnosticAtRegexp("a.go", "a = 1"), env.DiagnosticAtRegexp("b.go", "a = 2"), env.DiagnosticAtRegexp("c.go", "a = 3"), ) env.CloseBuffer("c.go") env.Await( env.DiagnosticAtRegexp("a.go", "a = 1"), env.DiagnosticAtRegexp("b.go", "a = 2"), EmptyDiagnostics("c.go"), ) }) } // Tests golang/go#37978. func TestIssue37978(t *testing.T) { Run(t, exampleProgram, func(t *testing.T, env *Env) { // Create a new workspace-level directory and empty file. env.CreateBuffer("c/c.go", "") // Write the file contents with a missing import. env.EditBuffer("c/c.go", fake.Edit{ Text: `package c const a = http.MethodGet `, }) env.Await( env.DiagnosticAtRegexp("c/c.go", "http.MethodGet"), ) // Save file, which will organize imports, adding the expected import. // Expect the diagnostics to clear. env.SaveBuffer("c/c.go") env.Await( EmptyDiagnostics("c/c.go"), ) }) } // Tests golang/go#38878: good a.go, bad a_test.go, remove a_test.go but its errors remain // If the file is open in the editor, this is working as intended // If the file is not open in the editor, the errors go away const test38878 = ` -- go.mod -- module foo go 1.12 -- a.go -- package x // import "fmt" func f() {} -- a_test.go -- package x import "testing" func TestA(t *testing.T) { f(3) } ` // Tests golang/go#38878: deleting a test file should clear its errors, and // not break the workspace. func TestDeleteTestVariant(t *testing.T) { Run(t, test38878, func(t *testing.T, env *Env) { env.Await(env.DiagnosticAtRegexp("a_test.go", `f\((3)\)`)) env.RemoveWorkspaceFile("a_test.go") env.Await(EmptyDiagnostics("a_test.go")) // Make sure the test variant has been removed from the workspace by // triggering a metadata load. env.OpenFile("a.go") env.RegexpReplace("a.go", `// import`, "import") env.Await(env.DiagnosticAtRegexp("a.go", `"fmt"`)) }) } // Tests golang/go#38878: deleting a test file on disk while it's still open // should not clear its errors. func TestDeleteTestVariant_DiskOnly(t *testing.T) { Run(t, test38878, func(t *testing.T, env *Env) { env.OpenFile("a_test.go") env.Await(DiagnosticAt("a_test.go", 5, 3)) env.Sandbox.Workdir.RemoveFile(context.Background(), "a_test.go") env.Await(OnceMet( env.DoneWithChangeWatchedFiles(), DiagnosticAt("a_test.go", 5, 3))) }) } // TestNoMod confirms that gopls continues to work when a user adds a go.mod // file to their workspace. func TestNoMod(t *testing.T) { const noMod = ` -- main.go -- package main import "mod.com/bob" func main() { bob.Hello() } -- bob/bob.go -- package bob func Hello() { var x int } ` t.Run("manual", func(t *testing.T) { Run(t, noMod, func(t *testing.T, env *Env) { env.Await( env.DiagnosticAtRegexp("main.go", `"mod.com/bob"`), ) env.CreateBuffer("go.mod", `module mod.com go 1.12 `) env.SaveBuffer("go.mod") env.Await( EmptyDiagnostics("main.go"), ) var d protocol.PublishDiagnosticsParams env.Await( OnceMet( env.DiagnosticAtRegexp("bob/bob.go", "x"), ReadDiagnostics("bob/bob.go", &d), ), ) if len(d.Diagnostics) != 1 { t.Fatalf("expected 1 diagnostic, got %v", len(d.Diagnostics)) } }) }) t.Run("initialized", func(t *testing.T) { Run(t, noMod, func(t *testing.T, env *Env) { env.Await( env.DiagnosticAtRegexp("main.go", `"mod.com/bob"`), ) env.RunGoCommand("mod", "init", "mod.com") env.Await( EmptyDiagnostics("main.go"), env.DiagnosticAtRegexp("bob/bob.go", "x"), ) }) }) t.Run("without workspace module", func(t *testing.T) { WithOptions( Modes(Singleton), ).Run(t, noMod, func(t *testing.T, env *Env) { env.Await( env.DiagnosticAtRegexp("main.go", `"mod.com/bob"`), ) if err := env.Sandbox.RunGoCommand(env.Ctx, "", "mod", []string{"init", "mod.com"}, true); err != nil { t.Fatal(err) } env.Await( EmptyDiagnostics("main.go"), env.DiagnosticAtRegexp("bob/bob.go", "x"), ) }) }) } // Tests golang/go#38267. func TestIssue38267(t *testing.T) { const testPackage = ` -- go.mod -- module mod.com go 1.12 -- lib.go -- package lib func Hello(x string) { _ = x } -- lib_test.go -- package lib import "testing" type testStruct struct{ name string } func TestHello(t *testing.T) { testStructs := []*testStruct{ &testStruct{"hello"}, &testStruct{"goodbye"}, } for y := range testStructs { _ = y } } ` Run(t, testPackage, func(t *testing.T, env *Env) { env.OpenFile("lib_test.go") env.Await( DiagnosticAt("lib_test.go", 10, 2), DiagnosticAt("lib_test.go", 11, 2), ) env.OpenFile("lib.go") env.RegexpReplace("lib.go", "_ = x", "var y int") env.Await( env.DiagnosticAtRegexp("lib.go", "y int"), EmptyDiagnostics("lib_test.go"), ) }) } // Tests golang/go#38328. func TestPackageChange_Issue38328(t *testing.T) { const packageChange = ` -- go.mod -- module fake go 1.12 -- a.go -- package foo func main() {} ` Run(t, packageChange, func(t *testing.T, env *Env) { env.OpenFile("a.go") env.RegexpReplace("a.go", "foo", "foox") env.Await( // When the bug reported in #38328 was present, we didn't get erroneous // file diagnostics until after the didChange message generated by the // package renaming was fully processed. Therefore, in order for this // test to actually exercise the bug, we must wait until that work has // completed. OnceMet( env.DoneWithChange(), NoDiagnostics("a.go"), ), ) }) } const testPackageWithRequire = ` -- go.mod -- module mod.com go 1.12 require foo.test v1.2.3 -- go.sum -- foo.test v1.2.3 h1:TMA+lyd1ck0TqjSFpNe4T6cf/K6TYkoHwOOcMBMjaEw= foo.test v1.2.3/go.mod h1:Ij3kyLIe5lzjycjh13NL8I2gX0quZuTdW0MnmlwGBL4= -- print.go -- package lib import ( "fmt" "foo.test/bar" ) func PrintAnswer() { fmt.Printf("answer: %s", bar.Answer) } ` const testPackageWithRequireProxy = ` -- [email protected]/go.mod -- module foo.test go 1.12 -- [email protected]/bar/const.go -- package bar const Answer = 42 ` func TestResolveDiagnosticWithDownload(t *testing.T) { WithOptions( ProxyFiles(testPackageWithRequireProxy), ).Run(t, testPackageWithRequire, func(t *testing.T, env *Env) { env.OpenFile("print.go") // Check that gopackages correctly loaded this dependency. We should get a // diagnostic for the wrong formatting type. // TODO: we should be able to easily also match the diagnostic message. env.Await(env.DiagnosticAtRegexp("print.go", "fmt.Printf")) }) } func TestMissingDependency(t *testing.T) { Run(t, testPackageWithRequire, func(t *testing.T, env *Env) { env.OpenFile("print.go") env.Await(LogMatching(protocol.Error, "initial workspace load failed", 1, false)) }) } // Tests golang/go#36951. func TestAdHocPackages_Issue36951(t *testing.T) { const adHoc = ` -- b/b.go -- package b func Hello() { var x int } ` Run(t, adHoc, func(t *testing.T, env *Env) { env.OpenFile("b/b.go") env.Await(env.DiagnosticAtRegexp("b/b.go", "x")) }) } // Tests golang/go#37984: GOPATH should be read from the go command. func TestNoGOPATH_Issue37984(t *testing.T) { const files = ` -- main.go -- package main func _() { fmt.Println("Hello World") } ` WithOptions( EditorConfig{ Env: map[string]string{ "GOPATH": "", "GO111MODULE": "off", }, }).Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") env.Await(env.DiagnosticAtRegexp("main.go", "fmt")) env.SaveBuffer("main.go") env.Await(EmptyDiagnostics("main.go")) }) } // Tests golang/go#38669. func TestEqualInEnv_Issue38669(t *testing.T) { const files = ` -- go.mod -- module mod.com go 1.12 -- main.go -- package main var _ = x.X -- x/x.go -- package x var X = 0 ` editorConfig := EditorConfig{Env: map[string]string{"GOFLAGS": "-tags=foo"}} WithOptions(editorConfig).Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") env.OrganizeImports("main.go") env.Await(EmptyDiagnostics("main.go")) }) } // Tests golang/go#38467. func TestNoSuggestedFixesForGeneratedFiles_Issue38467(t *testing.T) { const generated = ` -- go.mod -- module mod.com go 1.12 -- main.go -- package main // Code generated by generator.go. DO NOT EDIT. func _() { for i, _ := range []string{} { _ = i } } ` Run(t, generated, func(t *testing.T, env *Env) { env.OpenFile("main.go") var d protocol.PublishDiagnosticsParams env.Await( OnceMet( DiagnosticAt("main.go", 5, 8), ReadDiagnostics("main.go", &d), ), ) if fixes := env.GetQuickFixes("main.go", d.Diagnostics); len(fixes) != 0 { t.Errorf("got quick fixes %v, wanted none", fixes) } }) } // Expect a module/GOPATH error if there is an error in the file at startup. // Tests golang/go#37279. func TestShowCriticalError_Issue37279(t *testing.T) { const noModule = ` -- a.go -- package foo import "mod.com/hello" func f() { hello.Goodbye() } ` Run(t, noModule, func(t *testing.T, env *Env) { env.OpenFile("a.go") env.Await( OutstandingWork(lsp.WorkspaceLoadFailure, "outside of a module"), ) env.RegexpReplace("a.go", `import "mod.com/hello"`, "") env.Await( NoOutstandingWork(), ) }) } func TestNonGoFolder(t *testing.T) { const files = ` -- hello.txt -- hi mom ` for _, go111module := range []string{"on", "off", ""} { t.Run(fmt.Sprintf("GO111MODULE_%v", go111module), func(t *testing.T) { WithOptions(EditorConfig{ Env: map[string]string{"GO111MODULE": go111module}, }).Run(t, files, func(t *testing.T, env *Env) { env.Await( NoOutstandingWork(), ) }) }) } } // Tests the repro case from golang/go#38602. Diagnostics are now handled properly, // which blocks type checking. func TestConflictingMainPackageErrors(t *testing.T) { const collision = ` -- x/x.go -- package x import "x/hello" func Hello() { hello.HiThere() } -- x/main.go -- package main func main() { fmt.Println("") } ` WithOptions( InGOPATH(), EditorConfig{ Env: map[string]string{ "GO111MODULE": "off", }, }, ).Run(t, collision, func(t *testing.T, env *Env) { env.OpenFile("x/x.go") env.Await( env.DiagnosticAtRegexpWithMessage("x/x.go", `^`, "found packages main (main.go) and x (x.go)"), env.DiagnosticAtRegexpWithMessage("x/main.go", `^`, "found packages main (main.go) and x (x.go)"), ) // We don't recover cleanly from the errors without good overlay support. if testenv.Go1Point() >= 16 { env.RegexpReplace("x/x.go", `package x`, `package main`) env.Await(OnceMet( env.DoneWithChange(), env.DiagnosticAtRegexpWithMessage("x/main.go", `fmt`, "undeclared name"))) } }) } const ardanLabsProxy = ` -- github.com/ardanlabs/[email protected]/go.mod -- module github.com/ardanlabs/conf go 1.12 -- github.com/ardanlabs/[email protected]/conf.go -- package conf var ErrHelpWanted error ` // Test for golang/go#38211. func Test_Issue38211(t *testing.T) { testenv.NeedsGo1Point(t, 14) const ardanLabs = ` -- go.mod -- module mod.com go 1.14 -- main.go -- package main import "github.com/ardanlabs/conf" func main() { _ = conf.ErrHelpWanted } ` WithOptions( ProxyFiles(ardanLabsProxy), ).Run(t, ardanLabs, func(t *testing.T, env *Env) { // Expect a diagnostic with a suggested fix to add // "github.com/ardanlabs/conf" to the go.mod file. env.OpenFile("go.mod") env.OpenFile("main.go") var d protocol.PublishDiagnosticsParams env.Await( OnceMet( env.DiagnosticAtRegexp("main.go", `"github.com/ardanlabs/conf"`), ReadDiagnostics("main.go", &d), ), ) env.ApplyQuickFixes("main.go", d.Diagnostics) env.SaveBuffer("go.mod") env.Await( EmptyDiagnostics("main.go"), ) // Comment out the line that depends on conf and expect a // diagnostic and a fix to remove the import. env.RegexpReplace("main.go", "_ = conf.ErrHelpWanted", "//_ = conf.ErrHelpWanted") env.Await( env.DiagnosticAtRegexp("main.go", `"github.com/ardanlabs/conf"`), ) env.SaveBuffer("main.go") // Expect a diagnostic and fix to remove the dependency in the go.mod. env.Await(EmptyDiagnostics("main.go")) env.Await( OnceMet( env.DiagnosticAtRegexpWithMessage("go.mod", "require github.com/ardanlabs/conf", "not used in this module"), ReadDiagnostics("go.mod", &d), ), ) env.ApplyQuickFixes("go.mod", d.Diagnostics) env.SaveBuffer("go.mod") env.Await( EmptyDiagnostics("go.mod"), ) // Uncomment the lines and expect a new diagnostic for the import. env.RegexpReplace("main.go", "//_ = conf.ErrHelpWanted", "_ = conf.ErrHelpWanted") env.SaveBuffer("main.go") env.Await( env.DiagnosticAtRegexp("main.go", `"github.com/ardanlabs/conf"`), ) }) } // Test for golang/go#38207. func TestNewModule_Issue38207(t *testing.T) { testenv.NeedsGo1Point(t, 14) const emptyFile = ` -- go.mod -- module mod.com go 1.12 -- main.go -- ` WithOptions( ProxyFiles(ardanLabsProxy), ).Run(t, emptyFile, func(t *testing.T, env *Env) { env.CreateBuffer("main.go", `package main import "github.com/ardanlabs/conf" func main() { _ = conf.ErrHelpWanted } `) env.SaveBuffer("main.go") var d protocol.PublishDiagnosticsParams env.Await( OnceMet( env.DiagnosticAtRegexpWithMessage("main.go", `"github.com/ardanlabs/conf"`, "no required module"), ReadDiagnostics("main.go", &d), ), ) env.ApplyQuickFixes("main.go", d.Diagnostics) env.Await( EmptyDiagnostics("main.go"), ) }) } // Test for golang/go#36960. func TestNewFileBadImports_Issue36960(t *testing.T) { testenv.NeedsGo1Point(t, 14) const simplePackage = ` -- go.mod -- module mod.com go 1.14 -- a/a1.go -- package a import "fmt" func _() { fmt.Println("hi") } ` Run(t, simplePackage, func(t *testing.T, env *Env) { env.OpenFile("a/a1.go") env.CreateBuffer("a/a2.go", ``) env.SaveBufferWithoutActions("a/a2.go") env.Await( OnceMet( env.DoneWithSave(), NoDiagnostics("a/a1.go"), ), ) env.EditBuffer("a/a2.go", fake.NewEdit(0, 0, 0, 0, `package a`)) env.Await( OnceMet(env.DoneWithChange(), NoDiagnostics("a/a1.go")), ) }) } // This test tries to replicate the workflow of a user creating a new x test. // It also tests golang/go#39315. func TestManuallyCreatingXTest(t *testing.T) { // Only for 1.15 because of golang/go#37971. testenv.NeedsGo1Point(t, 15) // Create a package that already has a test variant (in-package test). const testVariant = ` -- go.mod -- module mod.com go 1.15 -- hello/hello.go -- package hello func Hello() { var x int } -- hello/hello_test.go -- package hello import "testing" func TestHello(t *testing.T) { var x int Hello() } ` Run(t, testVariant, func(t *testing.T, env *Env) { // Open the file, triggering the workspace load. // There are errors in the code to ensure all is working as expected. env.OpenFile("hello/hello.go") env.Await( env.DiagnosticAtRegexp("hello/hello.go", "x"), env.DiagnosticAtRegexp("hello/hello_test.go", "x"), ) // Create an empty file with the intention of making it an x test. // This resembles a typical flow in an editor like VS Code, in which // a user would create an empty file and add content, saving // intermittently. // TODO(rstambler): There might be more edge cases here, as file // content can be added incrementally. env.CreateBuffer("hello/hello_x_test.go", ``) // Save the empty file (no actions since formatting will fail). env.SaveBufferWithoutActions("hello/hello_x_test.go") // Add the content. The missing import is for the package under test. env.EditBuffer("hello/hello_x_test.go", fake.NewEdit(0, 0, 0, 0, `package hello_test import ( "testing" ) func TestHello(t *testing.T) { hello.Hello() } `)) // Expect a diagnostic for the missing import. Save, which should // trigger import organization. The diagnostic should clear. env.Await( env.DiagnosticAtRegexp("hello/hello_x_test.go", "hello.Hello"), ) env.SaveBuffer("hello/hello_x_test.go") env.Await( EmptyDiagnostics("hello/hello_x_test.go"), ) }) } // Reproduce golang/go#40690. func TestCreateOnlyXTest(t *testing.T) { testenv.NeedsGo1Point(t, 13) const mod = ` -- go.mod -- module mod.com go 1.12 -- foo/foo.go -- package foo -- foo/bar_test.go -- ` Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("foo/bar_test.go") env.EditBuffer("foo/bar_test.go", fake.NewEdit(0, 0, 0, 0, "package foo")) env.Await(env.DoneWithChange()) env.RegexpReplace("foo/bar_test.go", "package foo", `package foo_test import "testing" func TestX(t *testing.T) { var x int } `) env.Await( env.DiagnosticAtRegexp("foo/bar_test.go", "x"), ) }) } func TestChangePackageName(t *testing.T) { t.Skip("This issue hasn't been fixed yet. See golang.org/issue/41061.") const mod = ` -- go.mod -- module mod.com go 1.12 -- foo/foo.go -- package foo -- foo/bar_test.go -- package foo_ ` Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("foo/bar_test.go") env.RegexpReplace("foo/bar_test.go", "package foo_", "package foo_test") env.SaveBuffer("foo/bar_test.go") env.Await( OnceMet( env.DoneWithSave(), NoDiagnostics("foo/bar_test.go"), ), OnceMet( env.DoneWithSave(), NoDiagnostics("foo/foo.go"), ), ) }) } func TestIgnoredFiles(t *testing.T) { const ws = ` -- go.mod -- module mod.com go 1.12 -- _foo/x.go -- package x var _ = foo.Bar ` Run(t, ws, func(t *testing.T, env *Env) { env.OpenFile("_foo/x.go") env.Await( OnceMet( env.DoneWithOpen(), NoDiagnostics("_foo/x.go"), )) }) } // Partially reproduces golang/go#38977, moving a file between packages. // It also gets hit by some go command bug fixed in 1.15, but we don't // care about that so much here. func TestDeletePackage(t *testing.T) { const ws = ` -- go.mod -- module mod.com go 1.15 -- a/a.go -- package a const A = 1 -- b/b.go -- package b import "mod.com/a" const B = a.A -- c/c.go -- package c import "mod.com/a" const C = a.A ` Run(t, ws, func(t *testing.T, env *Env) { env.OpenFile("b/b.go") env.Await(env.DoneWithOpen()) // Delete c/c.go, the only file in package c. env.RemoveWorkspaceFile("c/c.go") // We should still get diagnostics for files that exist. env.RegexpReplace("b/b.go", `a.A`, "a.Nonexistant") env.Await(env.DiagnosticAtRegexp("b/b.go", `Nonexistant`)) }) } // This is a copy of the scenario_default/quickfix_empty_files.txt test from // govim. Reproduces golang/go#39646. func TestQuickFixEmptyFiles(t *testing.T) { testenv.NeedsGo1Point(t, 15) const mod = ` -- go.mod -- module mod.com go 1.12 ` // To fully recreate the govim tests, we create files by inserting // a newline, adding to the file, and then deleting the newline. // Wait for each event to process to avoid cancellations and force // package loads. writeGoVim := func(env *Env, name, content string) { env.WriteWorkspaceFile(name, "") env.Await(env.DoneWithChangeWatchedFiles()) env.CreateBuffer(name, "\n") env.Await(env.DoneWithOpen()) env.EditBuffer(name, fake.NewEdit(1, 0, 1, 0, content)) env.Await(env.DoneWithChange()) env.EditBuffer(name, fake.NewEdit(0, 0, 1, 0, "")) env.Await(env.DoneWithChange()) } const p = `package p; func DoIt(s string) {};` const main = `package main import "mod.com/p" func main() { p.DoIt(5) } ` // A simple version of the test that reproduces most of the problems it // exposes. t.Run("short", func(t *testing.T) { Run(t, mod, func(t *testing.T, env *Env) { writeGoVim(env, "p/p.go", p) writeGoVim(env, "main.go", main) env.Await(env.DiagnosticAtRegexp("main.go", "5")) }) }) // A full version that replicates the whole flow of the test. t.Run("full", func(t *testing.T) { Run(t, mod, func(t *testing.T, env *Env) { writeGoVim(env, "p/p.go", p) writeGoVim(env, "main.go", main) writeGoVim(env, "p/p_test.go", `package p import "testing" func TestDoIt(t *testing.T) { DoIt(5) } `) writeGoVim(env, "p/x_test.go", `package p_test import ( "testing" "mod.com/p" ) func TestDoIt(t *testing.T) { p.DoIt(5) } `) env.Await( env.DiagnosticAtRegexp("main.go", "5"), env.DiagnosticAtRegexp("p/p_test.go", "5"), env.DiagnosticAtRegexp("p/x_test.go", "5"), ) env.RegexpReplace("p/p.go", "s string", "i int") env.Await( EmptyDiagnostics("main.go"), EmptyDiagnostics("p/p_test.go"), EmptyDiagnostics("p/x_test.go"), ) }) }) } func TestSingleFile(t *testing.T) { const mod = ` -- go.mod -- module mod.com go 1.13 -- a/a.go -- package a func _() { var x int } ` WithOptions( // Empty workspace folders. WorkspaceFolders(), ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") env.Await( env.DiagnosticAtRegexp("a/a.go", "x"), ) }) } // Reproduces the case described in // https://github.com/golang/go/issues/39296#issuecomment-652058883. func TestPkgm(t *testing.T) { const basic = ` -- go.mod -- module mod.com go 1.15 -- foo/foo.go -- package foo import "fmt" func Foo() { fmt.Println("") } ` Run(t, basic, func(t *testing.T, env *Env) { testenv.NeedsGo1Point(t, 16) // We can't recover cleanly from this case without good overlay support. env.WriteWorkspaceFile("foo/foo_test.go", `package main func main() { }`) env.OpenFile("foo/foo_test.go") env.RegexpReplace("foo/foo_test.go", `package main`, `package foo`) env.Await( OnceMet( env.DoneWithChange(), NoDiagnostics("foo/foo.go"), ), ) }) } func TestClosingBuffer(t *testing.T) { const basic = ` -- go.mod -- module mod.com go 1.14 -- main.go -- package main func main() {} ` Run(t, basic, func(t *testing.T, env *Env) { env.Editor.CreateBuffer(env.Ctx, "foo.go", `package main`) env.Await( env.DoneWithOpen(), ) env.CloseBuffer("foo.go") env.Await( OnceMet( env.DoneWithClose(), NoLogMatching(protocol.Info, "packages=0"), ), ) }) } // Reproduces golang/go#38424. func TestCutAndPaste(t *testing.T) { const basic = ` -- go.mod -- module mod.com go 1.14 -- main2.go -- package main ` Run(t, basic, func(t *testing.T, env *Env) { env.CreateBuffer("main.go", "") env.Await(env.DoneWithOpen()) env.SaveBufferWithoutActions("main.go") env.Await(env.DoneWithSave(), env.DoneWithChangeWatchedFiles()) env.EditBuffer("main.go", fake.NewEdit(0, 0, 0, 0, `package main func main() { } `)) env.Await(env.DoneWithChange()) env.SaveBuffer("main.go") env.Await(env.DoneWithSave(), env.DoneWithChangeWatchedFiles()) env.EditBuffer("main.go", fake.NewEdit(0, 0, 4, 0, "")) env.Await(env.DoneWithChange()) env.EditBuffer("main.go", fake.NewEdit(0, 0, 0, 0, `package main func main() { var x int } `)) env.Await( env.DiagnosticAtRegexp("main.go", "x"), ) }) } // Reproduces golang/go#39763. func TestInvalidPackageName(t *testing.T) { testenv.NeedsGo1Point(t, 15) const pkgDefault = ` -- go.mod -- module mod.com go 1.12 -- main.go -- package default func main() {} ` Run(t, pkgDefault, func(t *testing.T, env *Env) { env.OpenFile("main.go") env.Await( env.DiagnosticAtRegexpWithMessage("main.go", "default", "expected 'IDENT'"), ) }) } // This tests the functionality of the "limitWorkspaceScope" func TestLimitWorkspaceScope(t *testing.T) { const mod = ` -- go.mod -- module mod.com go 1.12 -- a/main.go -- package main func main() {} -- main.go -- package main func main() { var x int } ` WithOptions( WorkspaceFolders("a"), ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("a/main.go") env.Await( env.DiagnosticAtRegexp("main.go", "x"), ) }) WithOptions( WorkspaceFolders("a"), LimitWorkspaceScope(), ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("a/main.go") env.Await( NoDiagnostics("main.go"), ) }) } func TestSimplifyCompositeLitDiagnostic(t *testing.T) { const files = ` -- go.mod -- module mod.com go 1.12 -- main.go -- package main import "fmt" type t struct { msg string } func main() { x := []t{t{"msg"}} fmt.Println(x) } ` WithOptions( EditorConfig{EnableStaticcheck: true}, ).Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") var d protocol.PublishDiagnosticsParams env.Await(OnceMet( env.DiagnosticAtRegexpWithMessage("main.go", `t{"msg"}`, "redundant type"), ReadDiagnostics("main.go", &d), )) if tags := d.Diagnostics[0].Tags; len(tags) == 0 || tags[0] != protocol.Unnecessary { t.Errorf("wanted Unnecessary tag on diagnostic, got %v", tags) } env.ApplyQuickFixes("main.go", d.Diagnostics) env.Await(EmptyDiagnostics("main.go")) }) } // Test some secondary diagnostics func TestSecondaryDiagnostics(t *testing.T) { const dir = ` -- go.mod -- module mod.com go 1.12 -- main.go -- package main func main() { panic("not here") } -- other.go -- package main func main() {} ` Run(t, dir, func(t *testing.T, env *Env) { env.OpenFile("main.go") env.OpenFile("other.go") x := env.DiagnosticsFor("main.go") if x == nil { t.Fatalf("expected 1 diagnostic, got none") } if len(x.Diagnostics) != 1 { t.Fatalf("main.go, got %d diagnostics, expected 1", len(x.Diagnostics)) } keep := x.Diagnostics[0] y := env.DiagnosticsFor("other.go") if len(y.Diagnostics) != 1 { t.Fatalf("other.go: got %d diagnostics, expected 1", len(y.Diagnostics)) } if len(y.Diagnostics[0].RelatedInformation) != 1 { t.Fatalf("got %d RelatedInformations, expected 1", len(y.Diagnostics[0].RelatedInformation)) } // check that the RelatedInformation matches the error from main.go c := y.Diagnostics[0].RelatedInformation[0] if c.Location.Range != keep.Range { t.Errorf("locations don't match. Got %v expected %v", c.Location.Range, keep.Range) } }) } func TestNotifyOrphanedFiles(t *testing.T) { // Need GO111MODULE=on for this test to work with Go 1.12. testenv.NeedsGo1Point(t, 13) const files = ` -- go.mod -- module mod.com go 1.12 -- a/a.go -- package a func main() { var x int } -- a/a_ignore.go -- // +build ignore package a func _() { var x int } ` Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") env.Await( env.DiagnosticAtRegexp("a/a.go", "x"), ) env.OpenFile("a/a_ignore.go") env.Await( DiagnosticAt("a/a_ignore.go", 2, 8), ) }) } func TestEnableAllExperiments(t *testing.T) { const mod = ` -- go.mod -- module mod.com go 1.12 -- main.go -- package main import "bytes" func b(c bytes.Buffer) { _ = 1 } ` WithOptions( EditorConfig{ AllExperiments: true, }, ).Run(t, mod, func(t *testing.T, env *Env) { // Confirm that the setting doesn't cause any warnings. env.Await(NoShowMessage()) }) } func TestSwig(t *testing.T) { // This is fixed in Go 1.17, but not earlier. testenv.NeedsGo1Point(t, 17) if _, err := exec.LookPath("swig"); err != nil { t.Skip("skipping test: swig not available") } if _, err := exec.LookPath("g++"); err != nil { t.Skip("skipping test: g++ not available") } const mod = ` -- go.mod -- module mod.com go 1.12 -- pkg/simple/export_swig.go -- package simple func ExportSimple(x, y int) int { return Gcd(x, y) } -- pkg/simple/simple.swigcxx -- %module simple %inline %{ extern int gcd(int x, int y) { int g; g = y; while (x > 0) { g = x; x = y % x; y = g; } return g; } %} -- main.go -- package a func main() { var x int } ` Run(t, mod, func(t *testing.T, env *Env) { env.Await( OnceMet( InitialWorkspaceLoad, NoDiagnosticWithMessage("", "illegal character U+0023 '#'"), ), ) }) } // When foo_test.go is opened, gopls will object to the borked package name. // This test asserts that when the package name is fixed, gopls will soon after // have no more complaints about it. // https://github.com/golang/go/issues/41061 func TestRenamePackage(t *testing.T) { testenv.NeedsGo1Point(t, 16) const proxy = ` -- [email protected]/go.mod -- module example.com go 1.12 -- [email protected]/blah/blah.go -- package blah const Name = "Blah" -- [email protected]/go.mod -- module random.org go 1.12 -- [email protected]/blah/blah.go -- package hello const Name = "Hello" ` const contents = ` -- go.mod -- module mod.com go 1.12 -- main.go -- package main import "example.com/blah" func main() { blah.Hello() } -- bob.go -- package main -- foo/foo.go -- package foo -- foo/foo_test.go -- package foo_ ` WithOptions( ProxyFiles(proxy), InGOPATH(), EditorConfig{ Env: map[string]string{ "GO111MODULE": "off", }, }, ).Run(t, contents, func(t *testing.T, env *Env) { // Simulate typing character by character. env.OpenFile("foo/foo_test.go") env.Await(env.DoneWithOpen()) env.RegexpReplace("foo/foo_test.go", "_", "_t") env.Await(env.DoneWithChange()) env.RegexpReplace("foo/foo_test.go", "_t", "_test") env.Await(env.DoneWithChange()) env.Await( EmptyDiagnostics("foo/foo_test.go"), NoOutstandingWork(), ) }) } // TestProgressBarErrors confirms that critical workspace load errors are shown // and updated via progress reports. func TestProgressBarErrors(t *testing.T) { testenv.NeedsGo1Point(t, 14) const pkg = ` -- go.mod -- modul mod.com go 1.12 -- main.go -- package main ` Run(t, pkg, func(t *testing.T, env *Env) { env.OpenFile("go.mod") env.Await( OutstandingWork(lsp.WorkspaceLoadFailure, "unknown directive"), ) env.EditBuffer("go.mod", fake.NewEdit(0, 0, 3, 0, `module mod.com go 1.hello `)) // As of golang/go#42529, go.mod changes do not reload the workspace until // they are saved. env.SaveBufferWithoutActions("go.mod") env.Await( OutstandingWork(lsp.WorkspaceLoadFailure, "invalid go version"), ) env.RegexpReplace("go.mod", "go 1.hello", "go 1.12") env.SaveBufferWithoutActions("go.mod") env.Await( NoOutstandingWork(), ) }) } func TestDeleteDirectory(t *testing.T) { testenv.NeedsGo1Point(t, 14) const mod = ` -- bob/bob.go -- package bob func Hello() { var x int } -- go.mod -- module mod.com -- main.go -- package main import "mod.com/bob" func main() { bob.Hello() } ` Run(t, mod, func(t *testing.T, env *Env) { env.RemoveWorkspaceFile("bob") env.Await( env.DiagnosticAtRegexp("main.go", `"mod.com/bob"`), EmptyDiagnostics("bob/bob.go"), RegistrationMatching("didChangeWatchedFiles"), ) }) } // Confirms that circular imports are tested and reported. func TestCircularImports(t *testing.T) { const mod = ` -- go.mod -- module mod.com go 1.12 -- self/self.go -- package self import _ "mod.com/self" func Hello() {} -- double/a/a.go -- package a import _ "mod.com/double/b" -- double/b/b.go -- package b import _ "mod.com/double/a" -- triple/a/a.go -- package a import _ "mod.com/triple/b" -- triple/b/b.go -- package b import _ "mod.com/triple/c" -- triple/c/c.go -- package c import _ "mod.com/triple/a" ` Run(t, mod, func(t *testing.T, env *Env) { env.Await( env.DiagnosticAtRegexpWithMessage("self/self.go", `_ "mod.com/self"`, "import cycle not allowed"), env.DiagnosticAtRegexpWithMessage("double/a/a.go", `_ "mod.com/double/b"`, "import cycle not allowed"), env.DiagnosticAtRegexpWithMessage("triple/a/a.go", `_ "mod.com/triple/b"`, "import cycle not allowed"), ) }) } // Tests golang/go#46667: deleting a problematic import path should resolve // import cycle errors. func TestResolveImportCycle(t *testing.T) { t.Skip("flaky test: see golang/go#46773") const mod = ` -- go.mod -- module mod.test go 1.16 -- a/a.go -- package a import "mod.test/b" const A = b.A const B = 2 -- b/b.go -- package b import "mod.test/a" const A = 1 const B = a.B ` Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") env.OpenFile("b/b.go") env.Await(env.DiagnosticAtRegexp("a/a.go", `"mod.test/b"`)) env.RegexpReplace("b/b.go", `const B = a\.B`, "") env.SaveBuffer("b/b.go") env.Await( EmptyDiagnostics("a/a.go"), EmptyDiagnostics("b/b.go"), ) }) } func TestBadImport(t *testing.T) { testenv.NeedsGo1Point(t, 14) const mod = ` -- go.mod -- module mod.com go 1.12 -- main.go -- package main import ( _ "nosuchpkg" ) ` t.Run("module", func(t *testing.T) { Run(t, mod, func(t *testing.T, env *Env) { env.Await( env.DiagnosticAtRegexpWithMessage("main.go", `"nosuchpkg"`, `could not import nosuchpkg (no required module provides package "nosuchpkg"`), ) }) }) t.Run("GOPATH", func(t *testing.T) { WithOptions( InGOPATH(), EditorConfig{ Env: map[string]string{"GO111MODULE": "off"}, }, Modes(Singleton), ).Run(t, mod, func(t *testing.T, env *Env) { env.Await( env.DiagnosticAtRegexpWithMessage("main.go", `"nosuchpkg"`, `cannot find package "nosuchpkg" in any of`), ) }) }) } func TestMultipleModules_Warning(t *testing.T) { const modules = ` -- a/go.mod -- module a.com go 1.12 -- a/a.go -- package a -- b/go.mod -- module b.com go 1.12 -- b/b.go -- package b ` for _, go111module := range []string{"on", "auto"} { t.Run("GO111MODULE="+go111module, func(t *testing.T) { WithOptions( Modes(Singleton), EditorConfig{ Env: map[string]string{ "GO111MODULE": go111module, }, }, ).Run(t, modules, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") env.OpenFile("b/go.mod") env.Await( env.DiagnosticAtRegexp("a/a.go", "package a"), env.DiagnosticAtRegexp("b/go.mod", "module b.com"), OutstandingWork(lsp.WorkspaceLoadFailure, "gopls requires a module at the root of your workspace."), ) }) }) } // Expect no warning if GO111MODULE=auto in a directory in GOPATH. t.Run("GOPATH_GO111MODULE_auto", func(t *testing.T) { WithOptions( Modes(Singleton), EditorConfig{ Env: map[string]string{ "GO111MODULE": "auto", }, }, InGOPATH(), ).Run(t, modules, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") env.Await( OnceMet( env.DoneWithOpen(), NoDiagnostics("a/a.go"), ), NoOutstandingWork(), ) }) }) } func TestNestedModules(t *testing.T) { const proxy = ` -- [email protected]/go.mod -- module nested.com go 1.12 -- [email protected]/hello/hello.go -- package hello func Hello() {} ` const nested = ` -- go.mod -- module mod.com go 1.12 require nested.com v1.0.0 -- go.sum -- nested.com v1.0.0 h1:I6spLE4CgFqMdBPc+wTV2asDO2QJ3tU0YAT+jkLeN1I= nested.com v1.0.0/go.mod h1:ly53UzXQgVjSlV7wicdBB4p8BxfytuGT1Xcyv0ReJfI= -- main.go -- package main import "nested.com/hello" func main() { hello.Hello() } -- nested/go.mod -- module nested.com -- nested/hello/hello.go -- package hello func Hello() { helloHelper() } -- nested/hello/hello_helper.go -- package hello func helloHelper() {} ` WithOptions( ProxyFiles(proxy), Modes(Singleton), ).Run(t, nested, func(t *testing.T, env *Env) { // Expect a diagnostic in a nested module. env.OpenFile("nested/hello/hello.go") didOpen := env.DoneWithOpen() env.Await( OnceMet( didOpen, env.DiagnosticAtRegexp("nested/hello/hello.go", "helloHelper"), ), OnceMet( didOpen, env.DiagnosticAtRegexpWithMessage("nested/hello/hello.go", "package hello", "nested module"), ), OnceMet( didOpen, OutstandingWork(lsp.WorkspaceLoadFailure, "nested module"), ), ) }) } func TestAdHocPackagesReloading(t *testing.T) { const nomod = ` -- main.go -- package main func main() {} ` Run(t, nomod, func(t *testing.T, env *Env) { env.OpenFile("main.go") env.RegexpReplace("main.go", "{}", "{ var x int; }") // simulate typing env.Await( OnceMet( env.DoneWithChange(), NoLogMatching(protocol.Info, "packages=1"), ), ) }) } func TestBuildTagChange(t *testing.T) { const files = ` -- go.mod -- module mod.com go 1.12 -- foo.go -- // decoy comment // +build hidden // decoy comment package foo var Foo = 1 -- bar.go -- package foo var Bar = Foo ` Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("foo.go") env.Await(env.DiagnosticAtRegexpWithMessage("bar.go", `Foo`, "undeclared name")) env.RegexpReplace("foo.go", `\+build`, "") env.Await(EmptyDiagnostics("bar.go")) }) } func TestIssue44736(t *testing.T) { const files = ` -- go.mod -- module blah.com go 1.16 -- main.go -- package main import "fmt" func main() { asdf fmt.Printf("This is a test %v") fdas } -- other.go -- package main ` Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") env.OpenFile("other.go") env.Await( env.DiagnosticAtRegexpWithMessage("main.go", "asdf", "undeclared name"), env.DiagnosticAtRegexpWithMessage("main.go", "fdas", "undeclared name"), ) env.SetBufferContent("other.go", "package main\n\nasdf") // The new diagnostic in other.go should not suppress diagnostics in main.go. env.Await( OnceMet( env.DiagnosticAtRegexpWithMessage("other.go", "asdf", "expected declaration"), env.DiagnosticAtRegexpWithMessage("main.go", "asdf", "undeclared name"), ), ) }) } func TestInitialization(t *testing.T) { const files = ` -- go.mod -- module mod.com go 1.16 -- main.go -- package main ` Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("go.mod") env.Await(env.DoneWithOpen()) env.RegexpReplace("go.mod", "module", "modul") env.SaveBufferWithoutActions("go.mod") env.Await( OnceMet( env.DoneWithSave(), NoLogMatching(protocol.Error, "initial workspace load failed"), ), ) }) } // Tests golang/go#45075: A panic in fillreturns broke diagnostics. // Expect an error log indicating that fillreturns panicked, as well type // errors for the broken code. func TestFillReturnsPanic(t *testing.T) { // At tip, the panic no longer reproduces. testenv.SkipAfterGo1Point(t, 16) const files = ` -- go.mod -- module mod.com go 1.15 -- main.go -- package main func foo() int { return x, nil } ` Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") env.Await( OnceMet( env.DoneWithOpen(), LogMatching(protocol.Error, `.*analysis fillreturns.*panicked.*`, 1, true), env.DiagnosticAtRegexpWithMessage("main.go", `return x`, "wrong number of return values"), ), ) }) } // This test confirms that the view does not reinitialize when a go.mod file is // opened. func TestNoReinitialize(t *testing.T) { const files = ` -- go.mod -- module mod.com go 1.12 -- main.go -- package main func main() {} ` Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("go.mod") env.Await( OnceMet( env.DoneWithOpen(), LogMatching(protocol.Info, `.*query=\[builtin mod.com/...\].*`, 1, false), ), ) }) } func TestUseOfInvalidMetadata(t *testing.T) { testenv.NeedsGo1Point(t, 13) const mod = ` -- go.mod -- module mod.com go 1.12 -- main.go -- package main import ( "mod.com/a" //"os" ) func _() { a.Hello() os.Getenv("") //var x int } -- a/a.go -- package a func Hello() {} ` WithOptions( EditorConfig{ ExperimentalUseInvalidMetadata: true, }, Modes(Singleton), ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("go.mod") env.RegexpReplace("go.mod", "module mod.com", "modul mod.com") // break the go.mod file env.SaveBufferWithoutActions("go.mod") env.Await( env.DiagnosticAtRegexp("go.mod", "modul"), ) // Confirm that language features work with invalid metadata. env.OpenFile("main.go") file, pos := env.GoToDefinition("main.go", env.RegexpSearch("main.go", "Hello")) wantPos := env.RegexpSearch("a/a.go", "Hello") if file != "a/a.go" && pos != wantPos { t.Fatalf("expected a/a.go:%s, got %s:%s", wantPos, file, pos) } // Confirm that new diagnostics appear with invalid metadata by adding // an unused variable to the body of the function. env.RegexpReplace("main.go", "//var x int", "var x int") env.Await( env.DiagnosticAtRegexp("main.go", "x"), ) // Add an import and confirm that we get a diagnostic for it, since the // metadata will not have been updated. env.RegexpReplace("main.go", "//\"os\"", "\"os\"") env.Await( env.DiagnosticAtRegexp("main.go", `"os"`), ) // Fix the go.mod file and expect the diagnostic to resolve itself. env.RegexpReplace("go.mod", "modul mod.com", "module mod.com") env.SaveBuffer("go.mod") env.Await( env.DiagnosticAtRegexp("main.go", "x"), env.NoDiagnosticAtRegexp("main.go", `"os"`), EmptyDiagnostics("go.mod"), ) }) } func TestReloadInvalidMetadata(t *testing.T) { // We only use invalid metadata for Go versions > 1.12. testenv.NeedsGo1Point(t, 13) const mod = ` -- go.mod -- module mod.com go 1.12 -- main.go -- package main func _() {} ` WithOptions( EditorConfig{ ExperimentalUseInvalidMetadata: true, }, // ExperimentalWorkspaceModule has a different failure mode for this // case. Modes(Singleton), ).Run(t, mod, func(t *testing.T, env *Env) { env.Await( OnceMet( InitialWorkspaceLoad, CompletedWork("Load", 1, false), ), ) // Break the go.mod file on disk, expecting a reload. env.WriteWorkspaceFile("go.mod", `modul mod.com go 1.12 `) env.Await( OnceMet( env.DoneWithChangeWatchedFiles(), env.DiagnosticAtRegexp("go.mod", "modul"), CompletedWork("Load", 1, false), ), ) env.OpenFile("main.go") env.Await(env.DoneWithOpen()) // The first edit after the go.mod file invalidation should cause a reload. // Any subsequent simple edits should not. content := `package main func main() { _ = 1 } ` env.EditBuffer("main.go", fake.NewEdit(0, 0, 3, 0, content)) env.Await( OnceMet( env.DoneWithChange(), CompletedWork("Load", 2, false), NoLogMatching(protocol.Error, "error loading file"), ), ) env.RegexpReplace("main.go", "_ = 1", "_ = 2") env.Await( OnceMet( env.DoneWithChange(), CompletedWork("Load", 2, false), NoLogMatching(protocol.Error, "error loading file"), ), ) // Add an import to the main.go file and confirm that it does get // reloaded, but the reload fails, so we see a diagnostic on the new // "fmt" import. env.EditBuffer("main.go", fake.NewEdit(0, 0, 5, 0, `package main import "fmt" func main() { fmt.Println("") } `)) env.Await( OnceMet( env.DoneWithChange(), env.DiagnosticAtRegexp("main.go", `"fmt"`), CompletedWork("Load", 3, false), ), ) }) }
[ "\"\"" ]
[]
[ "" ]
[]
[""]
go
1
0
main.go
package main import ( "context" "errors" "fmt" "log" "os" "time" dialogflow "cloud.google.com/go/dialogflow/apiv2" dialogflowpb "google.golang.org/genproto/googleapis/cloud/dialogflow/v2" "github.com/go-telegram-bot-api/telegram-bot-api" ) const ( ruLanguage = "ru" errorMessage = "Повторите, пожалуйста, вопрос." ) func DetectIntentText(ctx context.Context, projectID, sessionID, text, languageCode string) (string, error) { sessionClient, err := dialogflow.NewSessionsClient(ctx) if err != nil { return "", err } defer sessionClient.Close() if projectID == "" || sessionID == "" { return "", errors.New(fmt.Sprintf("Received empty project (%s) or session (%s)", projectID, sessionID)) } sessionPath := fmt.Sprintf("projects/%s/agent/sessions/%s", projectID, sessionID) textInput := dialogflowpb.TextInput{Text: text, LanguageCode: languageCode} queryTextInput := dialogflowpb.QueryInput_Text{Text: &textInput} queryInput := dialogflowpb.QueryInput{Input: &queryTextInput} request := dialogflowpb.DetectIntentRequest{Session: sessionPath, QueryInput: &queryInput} response, err := sessionClient.DetectIntent(ctx, &request) if err != nil { return "", err } queryResult := response.GetQueryResult() fulfillmentText := queryResult.GetFulfillmentText() return fulfillmentText, nil } func main() { tgApiToken := os.Getenv("TELEGRAM_API_TOKEN") if len(tgApiToken) == 0 { panic("Telegram API token not set!") } projectID := os.Getenv("PROJECT_ID") if len(projectID) == 0 { panic("Project ID not set!") } sessionID := os.Getenv("SESSION_ID") if len(sessionID) == 0 { panic("Session ID not set!") } bot, err := tgbotapi.NewBotAPI(tgApiToken) if err != nil { panic(err) } bot.Debug = true log.Printf("Authorized on account %s", bot.Self.UserName) ctx, _ := context.WithCancel(context.Background()) u := tgbotapi.NewUpdate(0) u.Timeout = 60 updates, err := bot.GetUpdatesChan(u) // Optional: wait for updates and clear them if you don't want to handle // a large backlog of old messages time.Sleep(time.Millisecond * 500) updates.Clear() for update := range updates { if update.Message == nil { continue } log.Printf("[%s] %s", update.Message.From.UserName, update.Message.Text) text, err := DetectIntentText(ctx, projectID, sessionID, update.Message.Text, ruLanguage) if err != nil { text = errorMessage continue } msg := tgbotapi.NewMessage(update.Message.Chat.ID, text) msg.ReplyToMessageID = update.Message.MessageID bot.Send(msg) } }
[ "\"TELEGRAM_API_TOKEN\"", "\"PROJECT_ID\"", "\"SESSION_ID\"" ]
[]
[ "SESSION_ID", "TELEGRAM_API_TOKEN", "PROJECT_ID" ]
[]
["SESSION_ID", "TELEGRAM_API_TOKEN", "PROJECT_ID"]
go
3
0
training/utils.py
import numpy as np import os os.environ["TOKENIZERS_PARALLELISM"] = "false" # disable warning when using smart batching import pandas as pd from torch.utils.data import Dataset from sklearn.model_selection import StratifiedKFold from torch.utils.data import Dataset from pytorch_lightning.callbacks import ModelCheckpoint import pytorch_lightning as pl import torch from transformers import AutoTokenizer import yaml from argparse import ArgumentParser from config import CFG def prepare_args(): parser = ArgumentParser() parser.add_argument( "--config", action="store", dest="config", help="Configuration scheme", default=None, ) args = parser.parse_args() print(f'[INFO] Using configuration for {args.config}') with open(CFG.finetune_config_path) as f: cfg = yaml.load(f, Loader=yaml.FullLoader) for k, v in cfg[args.config].items(): setattr(CFG, k, v) # reference: https://www.kaggle.com/abhishek/step-1-create-folds def create_folds(df, num_splits, random_seed): # we create a new column called kfold and fill it with -1 df["kfold"] = -1 # calculate number of bins by Sturge's rule # I take the floor of the value, you can also # just round it num_bins = int(np.floor(1 + np.log2(len(df)))) # Bin values into discrete intervals. df.loc[:, "bins"] = pd.cut( df["target"], bins=num_bins, labels=False ) # initiate the kfold class from model_selection module kf = StratifiedKFold(n_splits=num_splits, shuffle=True, random_state=random_seed) # fill the new kfold column # note that, instead of targets, we use bins! for f, (t_, v_) in enumerate(kf.split(X=df, y=df.bins.values)): df.loc[v_, 'kfold'] = f # drop the bins column # df = df.drop("bins", axis=1) # return dfframe with folds return df def get_tokenizer(model_name_or_path): try: print('[INFO] Using cached tokenizer...') return AutoTokenizer.from_pretrained(model_name_or_path, local_files_only=True) except: print('[INFO] Downloading tokenizer...') return AutoTokenizer.from_pretrained(model_name_or_path) class CommonLitDataset(Dataset): def __init__(self, df, tokenizer, shuffle=False): self.df = df if shuffle: self.df = self.df.sample(frac=1, random_state=CFG.train_seed).reset_index(drop=True) self.labeled = 'target' in df.columns self.tokenizer = tokenizer def __len__(self): return len(self.df) def __getitem__(self, idx): item = self.df.iloc[idx] text = item['excerpt'] token = self.tokenizer(text, return_tensors='pt', truncation=True, padding='max_length', max_length=CFG.max_len) if self.labeled: target = item['target'] target = torch.tensor(target, dtype=torch.float) return token['input_ids'].squeeze(), token['attention_mask'].squeeze(), target else: return token['input_ids'].squeeze(), token['attention_mask'].squeeze() def log_message(msg, exp_id): dir_path = [CFG.output_dir, CFG.params_dir] if CFG.env == 'colab' else [CFG.output_dir] for path in dir_path: log_file = os.path.join(path, f'exp_{str(exp_id).zfill(3)}', f'{CFG.model_name}.txt') with open(log_file, 'a') as f: f.write(msg + '\n') if CFG.env == 'local': ''' Custom checkpoint class wrappers. ''' from typing import Any, Optional, Union from pathlib import Path from pytorch_lightning.utilities import rank_zero_deprecation # import error in kaggle from pytorch_lightning.utilities.types import STEP_OUTPUT class CustomModelCheckpointDelayedEval(ModelCheckpoint): def __init__( self, dirpath: Optional[Union[str, Path]] = None, filename: Optional[str] = None, monitor: Optional[str] = None, verbose: bool = False, save_last: Optional[bool] = None, save_top_k: Optional[int] = None, save_weights_only: bool = False, mode: str = "min", auto_insert_metric_name: bool = True, every_n_train_steps: Optional[int] = None, every_n_val_epochs: Optional[int] = None, period: Optional[int] = None, train_steps = 0 ): super().__init__(dirpath=dirpath, filename=filename, monitor=monitor, verbose=verbose, save_last=save_last, save_top_k=save_top_k, save_weights_only=save_weights_only, mode=mode, auto_insert_metric_name=auto_insert_metric_name, every_n_train_steps=every_n_train_steps, every_n_val_epochs=every_n_val_epochs, period=period) # self.eval_schedule = [(0.50, 16), (0.49, 8), (0.48, 4), (0.47, 2), (-1., 1)] self.eval_interval = CFG.delayed_val_check_interval self.last_eval_step = 0 # make sure the result is consistant with different `delayed_val_check_ep` self.delayed_steps = (int(CFG.delayed_val_check_ep * train_steps) // self.eval_interval) * self.eval_interval print(f'[INFO] Delayed steps before evaluation: {self.delayed_steps}') self.val_check_mode = False def on_train_batch_end( self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule', outputs: STEP_OUTPUT, batch: Any, batch_idx: int, dataloader_idx: int, ) -> None: """ Save checkpoint on train batch end if we meet the criteria for `every_n_train_steps` """ if self._should_skip_saving_checkpoint(trainer): return step = trainer.global_step if step == self.delayed_steps: self.val_check_mode = True self.last_eval_step = step print('[INFO] The val check mode is turned on!') if self.val_check_mode and step == self.last_eval_step + self.eval_interval: self.last_eval_step = step trainer.run_evaluation()
[]
[]
[ "TOKENIZERS_PARALLELISM" ]
[]
["TOKENIZERS_PARALLELISM"]
python
1
0
api/config.py
import os from dotenv import load_dotenv, find_dotenv #this will load all the envars from a .env file located in the project root (api) load_dotenv(find_dotenv()) CONFIGURATION = { "development": "config.DevConfig", "testing": "config.TestConfig", "production": "config.Config", "default": "config.Config" } class Config(object): PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__)) SECRET_KEY = 'a secret' SQLALCHEMY_TRACK_MODIFICATIONS = False NRO_SERVICE_ACCOUNT = os.getenv('NRO_SERVICE_ACCOUNT', 'nro_service_account') SOLR_BASE_URL = os.getenv('SOLR_BASE_URL', None) SOLR_SYNONYMS_API_URL = os.getenv('SOLR_SYNONYMS_API_URL', None) NRO_EXTRACTOR_URI = os.getenv('NRO_EXTRACTOR_URI', None) ALEMBIC_INI='migrations/alembic.ini' # POSTGRESQL DB_USER = os.getenv('DATABASE_USERNAME', '') DB_PASSWORD = os.getenv('DATABASE_PASSWORD','') DB_NAME = os.getenv('DATABASE_NAME','') DB_HOST = os.getenv('DATABASE_HOST','') DB_PORT = os.getenv('DATABASE_PORT','5432') SQLALCHEMY_DATABASE_URI = 'postgresql://{user}:{password}@{host}:{port}/{name}'.format( user=DB_USER, password=DB_PASSWORD, host=DB_HOST, port=int(DB_PORT), name=DB_NAME, ) ## ORACLE - LEGACY NRO NAMESDB NRO_USER = os.getenv('NRO_USER', '') NRO_SCHEMA = os.getenv('NRO_SCHEMA', None) NRO_PASSWORD = os.getenv('NRO_PASSWORD', '') NRO_DB_NAME = os.getenv('NRO_DB_NAME', '') NRO_HOST = os.getenv('NRO_HOST', '') NRO_PORT = int(os.getenv('NRO_PORT', '1521')) # JWT_OIDC Settings JWT_OIDC_WELL_KNOWN_CONFIG = os.getenv('JWT_OIDC_WELL_KNOWN_CONFIG') JWT_OIDC_ALGORITHMS = os.getenv('JWT_OIDC_ALGORITHMS') JWT_OIDC_JWKS_URI = os.getenv('JWT_OIDC_JWKS_URI') JWT_OIDC_ISSUER = os.getenv('JWT_OIDC_ISSUER') JWT_OIDC_AUDIENCE = os.getenv('JWT_OIDC_AUDIENCE') JWT_OIDC_CLIENT_SECRET = os.getenv('JWT_OIDC_CLIENT_SECRET') JWT_OIDC_CACHING_ENABLED = os.getenv('JWT_OIDC_CACHING_ENABLED') try: JWT_OIDC_JWKS_CACHE_TIMEOUT = int(os.getenv('JWT_OIDC_JWKS_CACHE_TIMEOUT')) except: JWT_OIDC_JWKS_CACHE_TIMEOUT = 300 TESTING = False, DEBUG = False class DevConfig(Config): TESTING = False, DEBUG = True class TestConfig(Config): DEBUG = True TESTING = True # POSTGRESQL DB_USER = os.getenv('DATABASE_TEST_USERNAME', '') DB_PASSWORD = os.getenv('DATABASE_TEST_PASSWORD','') DB_NAME = os.getenv('DATABASE_TEST_NAME','') DB_HOST = os.getenv('DATABASE_TEST_HOST','') DB_PORT = os.getenv('DATABASE_TEST_PORT','5432') SQLALCHEMY_DATABASE_URI = 'postgresql://{user}:{password}@{host}:{port}/{name}'.format( user=DB_USER, password=DB_PASSWORD, host=DB_HOST, port=int(DB_PORT), name=DB_NAME, ) # JWT OIDC settings ## JWT_OIDC_TEST_MODE will set jwt_manager to use JWT_OIDC_TEST_MODE = True JWT_OIDC_TEST_AUDIENCE = os.getenv('JWT_OIDC_AUDIENCE') JWT_OIDC_TEST_CLIENT_SECRET = os.getenv('JWT_OIDC_CLIENT_SECRET') JWT_OIDC_TEST_ISSUER = 'https://sso-dev.pathfinder.gov.bc.ca/auth/realms/sbc' JWT_OIDC_TEST_KEYS = { "keys": [ { "kid": "flask-jwt-oidc-test-client", "kty": "RSA", "alg": "RS256", "use": "sig", "n": "AN-fWcpCyE5KPzHDjigLaSUVZI0uYrcGcc40InVtl-rQRDmAh-C2W8H4_Hxhr5VLc6crsJ2LiJTV_E72S03pzpOOaaYV6-TzAjCou2GYJIXev7f6Hh512PuG5wyxda_TlBSsI-gvphRTPsKCnPutrbiukCYrnPuWxX5_cES9eStR", "e": "AQAB" } ] } JWT_OIDC_TEST_PRIVATE_KEY_JWKS = { "keys": [ { "kid": "flask-jwt-oidc-test-client", "kty": "RSA", "alg": "RS256", "use": "sig", "kty": "RSA", "n": "AN-fWcpCyE5KPzHDjigLaSUVZI0uYrcGcc40InVtl-rQRDmAh-C2W8H4_Hxhr5VLc6crsJ2LiJTV_E72S03pzpOOaaYV6-TzAjCou2GYJIXev7f6Hh512PuG5wyxda_TlBSsI-gvphRTPsKCnPutrbiukCYrnPuWxX5_cES9eStR", "e": "AQAB", "d": "C0G3QGI6OQ6tvbCNYGCqq043YI_8MiBl7C5dqbGZmx1ewdJBhMNJPStuckhskURaDwk4-8VBW9SlvcfSJJrnZhgFMjOYSSsBtPGBIMIdM5eSKbenCCjO8Tg0BUh_xa3CHST1W4RQ5rFXadZ9AeNtaGcWj2acmXNO3DVETXAX3x0", "p": "APXcusFMQNHjh6KVD_hOUIw87lvK13WkDEeeuqAydai9Ig9JKEAAfV94W6Aftka7tGgE7ulg1vo3eJoLWJ1zvKM", "q": "AOjX3OnPJnk0ZFUQBwhduCweRi37I6DAdLTnhDvcPTrrNWuKPg9uGwHjzFCJgKd8KBaDQ0X1rZTZLTqi3peT43s", "dp": "AN9kBoA5o6_Rl9zeqdsIdWFmv4DB5lEqlEnC7HlAP-3oo3jWFO9KQqArQL1V8w2D4aCd0uJULiC9pCP7aTHvBhc", "dq": "ANtbSY6njfpPploQsF9sU26U0s7MsuLljM1E8uml8bVJE1mNsiu9MgpUvg39jEu9BtM2tDD7Y51AAIEmIQex1nM", "qi": "XLE5O360x-MhsdFXx8Vwz4304-MJg-oGSJXCK_ZWYOB_FGXFRTfebxCsSYi0YwJo-oNu96bvZCuMplzRI1liZw" } ] } JWT_OIDC_TEST_PRIVATE_KEY_PEM = """ -----BEGIN RSA PRIVATE KEY----- MIICXQIBAAKBgQDfn1nKQshOSj8xw44oC2klFWSNLmK3BnHONCJ1bZfq0EQ5gIfg tlvB+Px8Ya+VS3OnK7Cdi4iU1fxO9ktN6c6TjmmmFevk8wIwqLthmCSF3r+3+h4e ddj7hucMsXWv05QUrCPoL6YUUz7Cgpz7ra24rpAmK5z7lsV+f3BEvXkrUQIDAQAB AoGAC0G3QGI6OQ6tvbCNYGCqq043YI/8MiBl7C5dqbGZmx1ewdJBhMNJPStuckhs kURaDwk4+8VBW9SlvcfSJJrnZhgFMjOYSSsBtPGBIMIdM5eSKbenCCjO8Tg0BUh/ xa3CHST1W4RQ5rFXadZ9AeNtaGcWj2acmXNO3DVETXAX3x0CQQD13LrBTEDR44ei lQ/4TlCMPO5bytd1pAxHnrqgMnWovSIPSShAAH1feFugH7ZGu7RoBO7pYNb6N3ia C1idc7yjAkEA6Nfc6c8meTRkVRAHCF24LB5GLfsjoMB0tOeEO9w9Ous1a4o+D24b AePMUImAp3woFoNDRfWtlNktOqLel5PjewJBAN9kBoA5o6/Rl9zeqdsIdWFmv4DB 5lEqlEnC7HlAP+3oo3jWFO9KQqArQL1V8w2D4aCd0uJULiC9pCP7aTHvBhcCQQDb W0mOp436T6ZaELBfbFNulNLOzLLi5YzNRPLppfG1SRNZjbIrvTIKVL4N/YxLvQbT NrQw+2OdQACBJiEHsdZzAkBcsTk7frTH4yGx0VfHxXDPjfTj4wmD6gZIlcIr9lZg 4H8UZcVFN95vEKxJiLRjAmj6g273pu9kK4ymXNEjWWJn -----END RSA PRIVATE KEY-----"""
[]
[]
[ "DATABASE_TEST_HOST", "DATABASE_HOST", "SOLR_BASE_URL", "NRO_EXTRACTOR_URI", "JWT_OIDC_CLIENT_SECRET", "DATABASE_PORT", "JWT_OIDC_ISSUER", "JWT_OIDC_JWKS_URI", "JWT_OIDC_AUDIENCE", "DATABASE_TEST_NAME", "DATABASE_NAME", "DATABASE_TEST_PASSWORD", "DATABASE_TEST_PORT", "NRO_DB_NAME", "NRO_PORT", "DATABASE_TEST_USERNAME", "NRO_SCHEMA", "DATABASE_PASSWORD", "NRO_PASSWORD", "JWT_OIDC_WELL_KNOWN_CONFIG", "DATABASE_USERNAME", "NRO_SERVICE_ACCOUNT", "NRO_HOST", "SOLR_SYNONYMS_API_URL", "NRO_USER", "JWT_OIDC_ALGORITHMS", "JWT_OIDC_JWKS_CACHE_TIMEOUT", "JWT_OIDC_CACHING_ENABLED" ]
[]
["DATABASE_TEST_HOST", "DATABASE_HOST", "SOLR_BASE_URL", "NRO_EXTRACTOR_URI", "JWT_OIDC_CLIENT_SECRET", "DATABASE_PORT", "JWT_OIDC_ISSUER", "JWT_OIDC_JWKS_URI", "JWT_OIDC_AUDIENCE", "DATABASE_TEST_NAME", "DATABASE_NAME", "DATABASE_TEST_PASSWORD", "DATABASE_TEST_PORT", "NRO_DB_NAME", "NRO_PORT", "DATABASE_TEST_USERNAME", "NRO_SCHEMA", "DATABASE_PASSWORD", "NRO_PASSWORD", "JWT_OIDC_WELL_KNOWN_CONFIG", "DATABASE_USERNAME", "NRO_SERVICE_ACCOUNT", "NRO_HOST", "SOLR_SYNONYMS_API_URL", "NRO_USER", "JWT_OIDC_ALGORITHMS", "JWT_OIDC_JWKS_CACHE_TIMEOUT", "JWT_OIDC_CACHING_ENABLED"]
python
28
0
bindings/python/setup.py
from setuptools import setup, Extension from codecs import open import os cmdclass = {} long_description = "" # Build directly from cython source file(s) if user wants so (probably for some experiments). # Otherwise, pre-generated c source file(s) are used. # User has to set environment variable EDLIB_USE_CYTHON. # e.g.: EDLIB_USE_CYTHON=1 python setup.py install USE_CYTHON = os.getenv('EDLIB_USE_CYTHON', False) if USE_CYTHON: from Cython.Build import build_ext edlib_module_src = "edlib.pyx" cmdclass['build_ext'] = build_ext else: edlib_module_src = "edlib.bycython.cpp" # Load README.rst into long description. # User can skip using README.rst as long description: EDLIB_OMIT_README_RST=1 python setup.py install OMIT_README_RST = os.getenv('EDLIB_OMIT_README_RST', False) if not OMIT_README_RST: here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup( # Information name = "edlib", description = "Lightweight, super fast library for sequence alignment using edit (Levenshtein) distance.", long_description = long_description, version = "1.3.9", url = "https://github.com/Martinsos/edlib", author = "Martin Sosic", author_email = "[email protected]", license = "MIT", keywords = "edit distance levenshtein align sequence bioinformatics", # Build instructions ext_modules = [Extension("edlib", [edlib_module_src, "edlib/src/edlib.cpp"], include_dirs=["edlib/include"], depends=["edlib/include/edlib.h"], language="c++", extra_compile_args=["-O3", "-std=c++11"])], cmdclass = cmdclass )
[]
[]
[ "EDLIB_USE_CYTHON", "EDLIB_OMIT_README_RST" ]
[]
["EDLIB_USE_CYTHON", "EDLIB_OMIT_README_RST"]
python
2
0
gonder_rc.py
# -*- coding: utf-8 -*- # Resource object code # # Created by: The Resource Compiler for PyQt5 (Qt v5.13.0) # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore qt_resource_data = b"\ \x00\x00\x04\x1a\ \x00\ \x01\x08\x3e\x78\x9c\xed\x9c\x4d\x6e\xd3\x50\x14\x85\x1d\x65\x10\ \x66\x61\xc4\x2c\x75\x87\xdd\x85\x59\x4a\x77\x92\xcc\xd2\x59\x97\ \xc2\x12\x40\x62\x01\x2c\xa1\x48\xb4\x62\x58\x86\x0c\x10\xc1\x76\ \x52\xc7\x3e\x21\x8e\xe3\xdf\x77\xdf\xfb\xbe\xea\x22\x1d\x17\x61\ \xc7\xe7\xbe\x73\x18\x54\x8d\xa2\x59\xfa\xb5\xd9\x44\xe9\x9f\xb7\ \xd1\xdd\xbb\x59\xf4\x21\x8a\xa2\xbb\x74\xd2\x4b\xd9\xc5\xfc\x7a\ \x4e\xfa\x3d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x08\x97\x97\xed\x6a\xf3\xfc\xb0\x7a\x7a\xde\xae\xee\x9f\ \x1e\xe3\xf7\x53\x3f\x0f\x8c\x4b\xe6\xff\xcb\xc3\xcd\x2e\x9f\xed\ \xea\xf5\x79\x7b\xf3\xf8\x73\x1b\xdf\x4e\xfd\x5c\x30\x0e\x15\xff\ \x4b\x93\xee\xc1\xa7\x1f\xdb\xf8\xe3\xd4\xcf\x07\xc3\x72\xce\xff\ \x62\x0f\xe8\x06\xaf\xb9\xe4\x3f\xdd\xe0\x37\x8d\xfd\xa7\x1b\xbc\ \xa4\x8d\xff\x74\x83\x3f\x74\xf1\x9f\x6e\xb0\x4f\x2f\xfe\xd3\x0d\ \x66\xe9\xdb\x7f\xba\xc1\x16\x43\xf9\x4f\x37\xd8\x60\x70\xff\xe9\ \x06\xa7\x19\xd3\x7f\xba\xc1\x3d\xa6\xf0\x9f\x6e\x70\x87\x49\xfd\ \xa7\x1b\x26\xc7\x15\xff\xe9\x86\x69\x70\xcd\x7f\xba\x61\x5c\x9c\ \xf5\x9f\x6e\x18\x05\x0b\xfe\xd3\x0d\xc3\x61\xc9\x7f\xba\xa1\x7f\ \x4c\xfa\x5f\xce\x04\xba\xa1\x13\xd6\xfd\x2f\xf6\x80\x6e\x68\x85\ \x2f\xfe\x17\x43\x37\x5c\x85\x77\xfe\x97\x33\x81\x6e\xb8\x88\xcf\ \xfe\x17\x7b\x40\x37\x9c\x25\x04\xff\x8b\xa1\x1b\x4e\x08\xca\xff\ \x72\x26\xd0\x0d\x39\xa1\xfa\x5f\xec\x41\xe0\xdd\x10\xba\xff\xc5\ \x04\xda\x0d\xf8\x7f\x3a\x21\x75\x03\xfe\xd7\xec\x41\x00\xdd\x80\ \xff\x0d\xc6\xe3\x6e\xc0\xff\xeb\xc6\xb7\x6e\xc0\xff\x96\x7b\xe0\ \x49\x37\xe0\x7f\xc7\x31\xde\x0d\xf8\xdf\xdf\x58\xec\x06\xfc\x1f\ \x60\x0f\x0c\x75\x43\xb6\xaf\x59\x7e\xbd\x3c\xac\x3e\xa7\xbb\xf0\ \x6d\xea\x77\xe7\xd5\x18\xed\x86\xec\x79\xf7\x7b\xb1\xba\xcf\x7f\ \x3f\x58\x9a\x6b\x87\xfd\x78\x9d\xfc\x9d\x1a\x1d\x8b\xdd\x70\x8e\ \xec\x73\x64\x73\xd8\x0d\xb2\xe3\x9a\x3d\x30\xd4\x0d\x6d\x20\x3b\ \x1a\x8e\xd1\x6e\xe8\x0a\xd9\x71\x3a\x3e\x75\x43\x17\x42\xcf\x0e\ \xdf\xbb\xa1\x2b\xc1\x64\x47\xa0\xdd\xd0\x05\x5f\xb3\x83\x6e\xe8\ \x07\xeb\xd9\x41\x37\x0c\x87\xa9\xec\xa0\x1b\x46\xc7\xd5\xec\xa0\ \x1b\xc6\x01\xff\xfd\x86\xfc\xf7\x1f\x57\xcf\x70\xe3\xb3\xce\xff\ \xff\x6a\x31\x75\x86\xc9\xf8\x56\x58\x3f\xc3\x8d\x27\xd0\x8c\xf7\ \xf5\x0c\x37\x3e\xeb\x01\x64\x7c\x30\x67\xf8\x1a\xdf\x3d\xca\xf8\ \xd0\xcf\x70\xe3\x31\x9c\xf1\x9c\xe1\xf6\x63\x21\xe3\x39\xc3\x03\ \xf8\xee\x68\xc6\xf3\xf3\x9f\x03\x8e\x81\x8c\xe7\xe7\xbf\xfb\x1f\ \x0b\x19\xff\x06\xfe\xf7\xe8\xbb\xa3\x19\x5f\x07\xfe\x77\x1c\x03\ \x19\x5f\x07\xfe\xb7\x1b\x4b\x19\x5f\x07\xfe\x5f\xe9\xbb\xc1\x8c\ \xaf\x03\xff\x1b\x8c\xf1\x8c\xaf\x03\xff\xcf\x8f\x2f\x19\x5f\x07\ \xfe\xff\xc7\x77\xcf\x32\xbe\x0e\xfc\x3f\x8c\xc7\x19\x5f\x47\xe8\ \xfe\x87\x90\xf1\x75\x84\xea\x7f\x48\x19\x5f\x47\x50\xfe\x07\x9a\ \xf1\x75\x84\xe0\x7f\xe8\x19\x5f\x87\xcf\xfe\x93\xf1\x97\xf1\xce\ \x7f\x32\xfe\x2a\x7c\xf1\x9f\x8c\x6f\x87\x75\xff\xc9\xf8\x6e\x98\ \xf4\x9f\x8c\xef\x0d\x4b\xfe\x93\xf1\xfd\x63\xc1\x7f\x32\x7e\x38\ \x9c\xf5\x9f\x8c\x1f\x05\xd7\xfc\x27\xe3\xc7\xc5\x15\xff\xc9\xf8\ \x69\x98\xd4\x7f\x32\x7e\x72\xa6\xf0\x9f\x8c\x77\x87\x31\xfd\x27\ \xe3\xdd\x63\x70\xff\xc9\x78\xa7\x19\xca\x7f\x32\xde\x06\x7d\xfb\ \x4f\xc6\xdb\xa2\x17\xff\xc9\x78\xb3\x74\xf1\x9f\x8c\xb7\x4f\x1b\ \xff\xc9\x78\x7f\x68\xec\x3f\x19\xef\x25\x97\xfc\x27\xe3\xfd\xe6\ \x9c\xff\x64\x7c\x18\x54\xfc\x27\xe3\x83\x23\xff\xfd\x5e\x64\x3c\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\ \x73\xec\x42\xe7\xab\xe8\x2f\xaa\x13\xd1\x0b\xd1\x33\xd1\xd1\x5a\ \xf4\x52\xf4\x5c\x74\xa4\x3a\x16\xbd\x10\x3d\x13\x1d\x25\xa2\x97\ \xa2\xe7\xa2\xcb\x8f\x98\xeb\x58\xf4\x42\x74\xa4\x3a\x11\xbd\x14\ \x3d\x13\x7d\xbc\xe3\x41\xc7\xa2\xe7\xa2\x23\xd5\x89\xe8\x85\xe8\ \x99\xe8\xb7\x3b\x16\x7a\x29\x7a\x2e\x3a\x52\x1d\x8b\x5e\x88\x9e\ \x89\xde\x3f\x62\x49\xe7\x77\xfc\x7b\xd4\xfb\x3b\x96\x2e\xec\x1f\ \xf1\x8f\xdc\xf1\x78\xe1\xed\x33\xfe\xae\x3e\xe2\xf1\x42\x61\xc3\ \xaf\xca\x67\x2c\x2e\x94\x36\xe5\x7b\xf9\xa5\x14\x17\xe2\xb3\x5a\ \xfe\xbe\xfc\x7b\x72\x3f\x79\x1e\x79\x5e\xf9\x3c\xf2\x79\xe5\x7d\ \xe8\xfb\xd2\xf7\x59\xd2\xf2\xbe\xd5\x0f\xf5\x2b\x16\xbd\xab\x6a\ \xdd\x07\xdd\x97\x75\x55\xeb\xbe\xe9\x3e\x26\xa2\x77\x55\xad\xfb\ \x1e\x8b\x5e\x57\xb5\x9e\x27\x3d\x6f\x89\xe8\x5d\x55\xeb\x79\x8e\ \x45\xaf\xab\x5a\xf3\x42\xf3\x24\xa9\x6a\xcd\x23\xcd\xab\x58\xf4\ \xae\xaa\x35\x0f\x35\x2f\xd7\x55\xad\x79\xab\x79\x9c\x88\xae\xca\ \x93\xbc\x0f\x9c\x7f\x31\x73\xbc\x32\ " qt_resource_name = b"\ \x00\x09\ \x0c\x78\x54\x88\ \x00\x6e\ \x00\x65\x00\x77\x00\x50\x00\x72\x00\x65\x00\x66\x00\x69\x00\x78\ \x00\x0a\ \x0a\xc8\x83\x1f\ \x00\x67\ \x00\x6f\x00\x6e\x00\x64\x00\x65\x00\x72\x00\x2e\x00\x69\x00\x63\x00\x6f\ " qt_resource_struct_v1 = b"\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\ \x00\x00\x00\x18\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\ " qt_resource_struct_v2 = b"\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ \x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\ \x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x18\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\ \x00\x00\x01\x6e\xda\x88\xff\x59\ " qt_version = [int(v) for v in QtCore.qVersion().split('.')] if qt_version < [5, 8, 0]: rcc_version = 1 qt_resource_struct = qt_resource_struct_v1 else: rcc_version = 2 qt_resource_struct = qt_resource_struct_v2 def qInitResources(): QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) qInitResources()
[]
[]
[]
[]
[]
python
null
null
null
python/pyarrow/tests/test_parquet.py
# -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from collections import OrderedDict import datetime import decimal import io import json import os import six import pickle import pytest import numpy as np import pyarrow as pa from pyarrow.compat import guid, u, BytesIO, unichar, PY2 from pyarrow.pandas_compat import _pandas_api from pyarrow.tests import util from pyarrow.filesystem import LocalFileSystem, FileSystem try: import pyarrow.parquet as pq except ImportError: pq = None try: import pandas as pd import pandas.util.testing as tm from .pandas_examples import dataframe_with_arrays, dataframe_with_lists except ImportError: pd = tm = None # Marks all of the tests in this module # Ignore these with pytest ... -m 'not parquet' pytestmark = pytest.mark.parquet @pytest.fixture(scope='module') def datadir(datadir): return datadir / 'parquet' def _write_table(table, path, **kwargs): # So we see the ImportError somewhere import pyarrow.parquet as pq if _pandas_api.is_data_frame(table): table = pa.Table.from_pandas(table) pq.write_table(table, path, **kwargs) return table def _read_table(*args, **kwargs): return pq.read_table(*args, **kwargs) def _roundtrip_table(table, read_table_kwargs=None, write_table_kwargs=None): read_table_kwargs = read_table_kwargs or {} write_table_kwargs = write_table_kwargs or {} buf = io.BytesIO() _write_table(table, buf, **write_table_kwargs) buf.seek(0) return _read_table(buf, **read_table_kwargs) def _check_roundtrip(table, expected=None, read_table_kwargs=None, **write_table_kwargs): if expected is None: expected = table read_table_kwargs = read_table_kwargs or {} # intentionally check twice result = _roundtrip_table(table, read_table_kwargs=read_table_kwargs, write_table_kwargs=write_table_kwargs) assert result.equals(expected) result = _roundtrip_table(result, read_table_kwargs=read_table_kwargs, write_table_kwargs=write_table_kwargs) assert result.equals(expected) def _roundtrip_pandas_dataframe(df, write_kwargs): table = pa.Table.from_pandas(df) buf = io.BytesIO() _write_table(table, buf, **write_kwargs) buf.seek(0) table1 = _read_table(buf) return table1.to_pandas() @pytest.mark.parametrize('dtype', [int, float]) def test_single_pylist_column_roundtrip(tempdir, dtype): filename = tempdir / 'single_{}_column.parquet'.format(dtype.__name__) data = [pa.array(list(map(dtype, range(5))))] table = pa.Table.from_arrays(data, names=['a']) _write_table(table, filename) table_read = _read_table(filename) for i in range(table.num_columns): col_written = table[i] col_read = table_read[i] assert table.field(i).name == table_read.field(i).name assert col_read.num_chunks == 1 data_written = col_written.chunk(0) data_read = col_read.chunk(0) assert data_written.equals(data_read) def alltypes_sample(size=10000, seed=0, categorical=False): np.random.seed(seed) arrays = { 'uint8': np.arange(size, dtype=np.uint8), 'uint16': np.arange(size, dtype=np.uint16), 'uint32': np.arange(size, dtype=np.uint32), 'uint64': np.arange(size, dtype=np.uint64), 'int8': np.arange(size, dtype=np.int16), 'int16': np.arange(size, dtype=np.int16), 'int32': np.arange(size, dtype=np.int32), 'int64': np.arange(size, dtype=np.int64), 'float32': np.arange(size, dtype=np.float32), 'float64': np.arange(size, dtype=np.float64), 'bool': np.random.randn(size) > 0, # TODO(wesm): Test other timestamp resolutions now that arrow supports # them 'datetime': np.arange("2016-01-01T00:00:00.001", size, dtype='datetime64[ms]'), 'str': pd.Series([str(x) for x in range(size)]), 'empty_str': [''] * size, 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None], 'null': [None] * size, 'null_list': [None] * 2 + [[None] * (x % 4) for x in range(size - 2)], } if categorical: arrays['str_category'] = arrays['str'].astype('category') return pd.DataFrame(arrays) @pytest.mark.pandas @pytest.mark.parametrize('chunk_size', [None, 1000]) def test_pandas_parquet_2_0_roundtrip(tempdir, chunk_size): df = alltypes_sample(size=10000, categorical=True) filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df) assert arrow_table.schema.pandas_metadata is not None _write_table(arrow_table, filename, version="2.0", coerce_timestamps='ms', chunk_size=chunk_size) table_read = pq.read_pandas(filename) assert table_read.schema.pandas_metadata is not None assert arrow_table.schema.metadata == table_read.schema.metadata df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) def test_set_data_page_size(): arr = pa.array([1, 2, 3] * 1000000) t = pa.Table.from_arrays([arr], names=['f0']) # 128K, 256K, 512K page_sizes = [2 << 16, 2 << 17, 2 << 18] for target_page_size in page_sizes: _check_roundtrip(t, data_page_size=target_page_size) @pytest.mark.pandas def test_chunked_table_write(): # ARROW-232 df = alltypes_sample(size=10) batch = pa.RecordBatch.from_pandas(df) table = pa.Table.from_batches([batch] * 3) _check_roundtrip(table, version='2.0') df, _ = dataframe_with_lists() batch = pa.RecordBatch.from_pandas(df) table = pa.Table.from_batches([batch] * 3) _check_roundtrip(table, version='2.0') @pytest.mark.pandas def test_no_memory_map(tempdir): df = alltypes_sample(size=10) table = pa.Table.from_pandas(df) _check_roundtrip(table, read_table_kwargs={'memory_map': False}, version='2.0') filename = str(tempdir / 'tmp_file') with open(filename, 'wb') as f: _write_table(table, f, version='2.0') table_read = pq.read_pandas(filename, memory_map=False) assert table_read.equals(table) def test_special_chars_filename(tempdir): table = pa.Table.from_arrays([pa.array([42])], ["ints"]) filename = "foo # bar" path = tempdir / filename assert not path.exists() _write_table(table, str(path)) assert path.exists() table_read = _read_table(str(path)) assert table_read.equals(table) @pytest.mark.pandas def test_empty_table_roundtrip(): df = alltypes_sample(size=10) # Create a non-empty table to infer the types correctly, then slice to 0 table = pa.Table.from_pandas(df) table = pa.Table.from_arrays( [col.chunk(0)[:0] for col in table.itercolumns()], names=table.schema.names) assert table.schema.field_by_name('null').type == pa.null() assert table.schema.field_by_name('null_list').type == pa.list_(pa.null()) _check_roundtrip(table, version='2.0') @pytest.mark.pandas def test_empty_table_no_columns(): df = pd.DataFrame() empty = pa.Table.from_pandas(df, preserve_index=False) _check_roundtrip(empty) def test_empty_lists_table_roundtrip(): # ARROW-2744: Shouldn't crash when writing an array of empty lists arr = pa.array([[], []], type=pa.list_(pa.int32())) table = pa.Table.from_arrays([arr], ["A"]) _check_roundtrip(table) @pytest.mark.pandas def test_pandas_parquet_datetime_tz(): s = pd.Series([datetime.datetime(2017, 9, 6)]) s = s.dt.tz_localize('utc') s.index = s # Both a column and an index to hit both use cases df = pd.DataFrame({'tz_aware': s, 'tz_eastern': s.dt.tz_convert('US/Eastern')}, index=s) f = BytesIO() arrow_table = pa.Table.from_pandas(df) _write_table(arrow_table, f, coerce_timestamps='ms') f.seek(0) table_read = pq.read_pandas(f) df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) @pytest.mark.pandas @pytest.mark.skipif(six.PY2, reason='datetime.timezone is available since ' 'python version 3.2') def test_datetime_timezone_tzinfo(): value = datetime.datetime(2018, 1, 1, 1, 23, 45, tzinfo=datetime.timezone.utc) df = pd.DataFrame({'foo': [value]}) _roundtrip_pandas_dataframe(df, write_kwargs={}) @pytest.mark.pandas def test_pandas_parquet_custom_metadata(tempdir): df = alltypes_sample(size=10000) filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df) assert b'pandas' in arrow_table.schema.metadata _write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms') metadata = pq.read_metadata(filename).metadata assert b'pandas' in metadata js = json.loads(metadata[b'pandas'].decode('utf8')) assert js['index_columns'] == [{'kind': 'range', 'name': None, 'start': 0, 'stop': 10000, 'step': 1}] @pytest.mark.pandas def test_pandas_parquet_column_multiindex(tempdir): df = alltypes_sample(size=10) df.columns = pd.MultiIndex.from_tuples( list(zip(df.columns, df.columns[::-1])), names=['level_1', 'level_2'] ) filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df) assert arrow_table.schema.pandas_metadata is not None _write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms') table_read = pq.read_pandas(filename) df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) @pytest.mark.pandas def test_pandas_parquet_2_0_roundtrip_read_pandas_no_index_written(tempdir): df = alltypes_sample(size=10000) filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df, preserve_index=False) js = arrow_table.schema.pandas_metadata assert not js['index_columns'] # ARROW-2170 # While index_columns should be empty, columns needs to be filled still. assert js['columns'] _write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms') table_read = pq.read_pandas(filename) js = table_read.schema.pandas_metadata assert not js['index_columns'] assert arrow_table.schema.metadata == table_read.schema.metadata df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) @pytest.mark.pandas def test_pandas_parquet_1_0_roundtrip(tempdir): size = 10000 np.random.seed(0) df = pd.DataFrame({ 'uint8': np.arange(size, dtype=np.uint8), 'uint16': np.arange(size, dtype=np.uint16), 'uint32': np.arange(size, dtype=np.uint32), 'uint64': np.arange(size, dtype=np.uint64), 'int8': np.arange(size, dtype=np.int16), 'int16': np.arange(size, dtype=np.int16), 'int32': np.arange(size, dtype=np.int32), 'int64': np.arange(size, dtype=np.int64), 'float32': np.arange(size, dtype=np.float32), 'float64': np.arange(size, dtype=np.float64), 'bool': np.random.randn(size) > 0, 'str': [str(x) for x in range(size)], 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None], 'empty_str': [''] * size }) filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df) _write_table(arrow_table, filename, version='1.0') table_read = _read_table(filename) df_read = table_read.to_pandas() # We pass uint32_t as int64_t if we write Parquet version 1.0 df['uint32'] = df['uint32'].values.astype(np.int64) tm.assert_frame_equal(df, df_read) @pytest.mark.pandas def test_multiple_path_types(tempdir): # Test compatibility with PEP 519 path-like objects path = tempdir / 'zzz.parquet' df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)}) _write_table(df, path) table_read = _read_table(path) df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) # Test compatibility with plain string paths path = str(tempdir) + 'zzz.parquet' df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)}) _write_table(df, path) table_read = _read_table(path) df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) @pytest.mark.pandas def test_pandas_column_selection(tempdir): size = 10000 np.random.seed(0) df = pd.DataFrame({ 'uint8': np.arange(size, dtype=np.uint8), 'uint16': np.arange(size, dtype=np.uint16) }) filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df) _write_table(arrow_table, filename) table_read = _read_table(filename, columns=['uint8']) df_read = table_read.to_pandas() tm.assert_frame_equal(df[['uint8']], df_read) # ARROW-4267: Selection of duplicate columns still leads to these columns # being read uniquely. table_read = _read_table(filename, columns=['uint8', 'uint8']) df_read = table_read.to_pandas() tm.assert_frame_equal(df[['uint8']], df_read) def _random_integers(size, dtype): # We do not generate integers outside the int64 range platform_int_info = np.iinfo('int_') iinfo = np.iinfo(dtype) return np.random.randint(max(iinfo.min, platform_int_info.min), min(iinfo.max, platform_int_info.max), size=size).astype(dtype) def _test_dataframe(size=10000, seed=0): np.random.seed(seed) df = pd.DataFrame({ 'uint8': _random_integers(size, np.uint8), 'uint16': _random_integers(size, np.uint16), 'uint32': _random_integers(size, np.uint32), 'uint64': _random_integers(size, np.uint64), 'int8': _random_integers(size, np.int8), 'int16': _random_integers(size, np.int16), 'int32': _random_integers(size, np.int32), 'int64': _random_integers(size, np.int64), 'float32': np.random.randn(size).astype(np.float32), 'float64': np.arange(size, dtype=np.float64), 'bool': np.random.randn(size) > 0, 'strings': [tm.rands(10) for i in range(size)], 'all_none': [None] * size, 'all_none_category': [None] * size }) # TODO(PARQUET-1015) # df['all_none_category'] = df['all_none_category'].astype('category') return df @pytest.mark.pandas def test_pandas_parquet_native_file_roundtrip(tempdir): df = _test_dataframe(10000) arrow_table = pa.Table.from_pandas(df) imos = pa.BufferOutputStream() _write_table(arrow_table, imos, version="2.0") buf = imos.getvalue() reader = pa.BufferReader(buf) df_read = _read_table(reader).to_pandas() tm.assert_frame_equal(df, df_read) @pytest.mark.pandas def test_parquet_incremental_file_build(tempdir): df = _test_dataframe(100) df['unique_id'] = 0 arrow_table = pa.Table.from_pandas(df, preserve_index=False) out = pa.BufferOutputStream() writer = pq.ParquetWriter(out, arrow_table.schema, version='2.0') frames = [] for i in range(10): df['unique_id'] = i arrow_table = pa.Table.from_pandas(df, preserve_index=False) writer.write_table(arrow_table) frames.append(df.copy()) writer.close() buf = out.getvalue() result = _read_table(pa.BufferReader(buf)) expected = pd.concat(frames, ignore_index=True) tm.assert_frame_equal(result.to_pandas(), expected) @pytest.mark.pandas def test_read_pandas_column_subset(tempdir): df = _test_dataframe(10000) arrow_table = pa.Table.from_pandas(df) imos = pa.BufferOutputStream() _write_table(arrow_table, imos, version="2.0") buf = imos.getvalue() reader = pa.BufferReader(buf) df_read = pq.read_pandas(reader, columns=['strings', 'uint8']).to_pandas() tm.assert_frame_equal(df[['strings', 'uint8']], df_read) @pytest.mark.pandas def test_pandas_parquet_empty_roundtrip(tempdir): df = _test_dataframe(0) arrow_table = pa.Table.from_pandas(df) imos = pa.BufferOutputStream() _write_table(arrow_table, imos, version="2.0") buf = imos.getvalue() reader = pa.BufferReader(buf) df_read = _read_table(reader).to_pandas() tm.assert_frame_equal(df, df_read) @pytest.mark.pandas def test_pandas_parquet_pyfile_roundtrip(tempdir): filename = tempdir / 'pandas_pyfile_roundtrip.parquet' size = 5 df = pd.DataFrame({ 'int64': np.arange(size, dtype=np.int64), 'float32': np.arange(size, dtype=np.float32), 'float64': np.arange(size, dtype=np.float64), 'bool': np.random.randn(size) > 0, 'strings': ['foo', 'bar', None, 'baz', 'qux'] }) arrow_table = pa.Table.from_pandas(df) with filename.open('wb') as f: _write_table(arrow_table, f, version="1.0") data = io.BytesIO(filename.read_bytes()) table_read = _read_table(data) df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) @pytest.mark.pandas def test_pandas_parquet_configuration_options(tempdir): size = 10000 np.random.seed(0) df = pd.DataFrame({ 'uint8': np.arange(size, dtype=np.uint8), 'uint16': np.arange(size, dtype=np.uint16), 'uint32': np.arange(size, dtype=np.uint32), 'uint64': np.arange(size, dtype=np.uint64), 'int8': np.arange(size, dtype=np.int16), 'int16': np.arange(size, dtype=np.int16), 'int32': np.arange(size, dtype=np.int32), 'int64': np.arange(size, dtype=np.int64), 'float32': np.arange(size, dtype=np.float32), 'float64': np.arange(size, dtype=np.float64), 'bool': np.random.randn(size) > 0 }) filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df) for use_dictionary in [True, False]: _write_table(arrow_table, filename, version='2.0', use_dictionary=use_dictionary) table_read = _read_table(filename) df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) for write_statistics in [True, False]: _write_table(arrow_table, filename, version='2.0', write_statistics=write_statistics) table_read = _read_table(filename) df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) for compression in ['NONE', 'SNAPPY', 'GZIP', 'LZ4', 'ZSTD']: _write_table(arrow_table, filename, version='2.0', compression=compression) table_read = _read_table(filename) df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) def make_sample_file(table_or_df): if isinstance(table_or_df, pa.Table): a_table = table_or_df else: a_table = pa.Table.from_pandas(table_or_df) buf = io.BytesIO() _write_table(a_table, buf, compression='SNAPPY', version='2.0', coerce_timestamps='ms') buf.seek(0) return pq.ParquetFile(buf) @pytest.mark.pandas def test_parquet_metadata_api(): df = alltypes_sample(size=10000) df = df.reindex(columns=sorted(df.columns)) df.index = np.random.randint(0, 1000000, size=len(df)) fileh = make_sample_file(df) ncols = len(df.columns) # Series of sniff tests meta = fileh.metadata repr(meta) assert meta.num_rows == len(df) assert meta.num_columns == ncols + 1 # +1 for index assert meta.num_row_groups == 1 assert meta.format_version == '2.0' assert 'parquet-cpp' in meta.created_by assert isinstance(meta.serialized_size, int) assert isinstance(meta.metadata, dict) # Schema schema = fileh.schema assert meta.schema is schema assert len(schema) == ncols + 1 # +1 for index repr(schema) col = schema[0] repr(col) assert col.name == df.columns[0] assert col.max_definition_level == 1 assert col.max_repetition_level == 0 assert col.max_repetition_level == 0 assert col.physical_type == 'BOOLEAN' assert col.converted_type == 'NONE' with pytest.raises(IndexError): schema[ncols + 1] # +1 for index with pytest.raises(IndexError): schema[-1] # Row group for rg in range(meta.num_row_groups): rg_meta = meta.row_group(rg) assert isinstance(rg_meta, pq.RowGroupMetaData) repr(rg_meta) for col in range(rg_meta.num_columns): col_meta = rg_meta.column(col) assert isinstance(col_meta, pq.ColumnChunkMetaData) repr(col_meta) with pytest.raises(IndexError): meta.row_group(-1) with pytest.raises(IndexError): meta.row_group(meta.num_row_groups + 1) rg_meta = meta.row_group(0) assert rg_meta.num_rows == len(df) assert rg_meta.num_columns == ncols + 1 # +1 for index assert rg_meta.total_byte_size > 0 with pytest.raises(IndexError): col_meta = rg_meta.column(-1) with pytest.raises(IndexError): col_meta = rg_meta.column(ncols + 2) col_meta = rg_meta.column(0) assert col_meta.file_offset > 0 assert col_meta.file_path == '' # created from BytesIO assert col_meta.physical_type == 'BOOLEAN' assert col_meta.num_values == 10000 assert col_meta.path_in_schema == 'bool' assert col_meta.is_stats_set is True assert isinstance(col_meta.statistics, pq.Statistics) assert col_meta.compression == 'SNAPPY' assert col_meta.encodings == ('PLAIN', 'RLE') assert col_meta.has_dictionary_page is False assert col_meta.dictionary_page_offset is None assert col_meta.data_page_offset > 0 assert col_meta.total_compressed_size > 0 assert col_meta.total_uncompressed_size > 0 with pytest.raises(NotImplementedError): col_meta.has_index_page with pytest.raises(NotImplementedError): col_meta.index_page_offset @pytest.mark.pandas @pytest.mark.parametrize( ( 'data', 'type', 'physical_type', 'min_value', 'max_value', 'null_count', 'num_values', 'distinct_count' ), [ ([1, 2, 2, None, 4], pa.uint8(), 'INT32', 1, 4, 1, 4, 0), ([1, 2, 2, None, 4], pa.uint16(), 'INT32', 1, 4, 1, 4, 0), ([1, 2, 2, None, 4], pa.uint32(), 'INT32', 1, 4, 1, 4, 0), ([1, 2, 2, None, 4], pa.uint64(), 'INT64', 1, 4, 1, 4, 0), ([-1, 2, 2, None, 4], pa.int8(), 'INT32', -1, 4, 1, 4, 0), ([-1, 2, 2, None, 4], pa.int16(), 'INT32', -1, 4, 1, 4, 0), ([-1, 2, 2, None, 4], pa.int32(), 'INT32', -1, 4, 1, 4, 0), ([-1, 2, 2, None, 4], pa.int64(), 'INT64', -1, 4, 1, 4, 0), ( [-1.1, 2.2, 2.3, None, 4.4], pa.float32(), 'FLOAT', -1.1, 4.4, 1, 4, 0 ), ( [-1.1, 2.2, 2.3, None, 4.4], pa.float64(), 'DOUBLE', -1.1, 4.4, 1, 4, 0 ), ( [u'', u'b', unichar(1000), None, u'aaa'], pa.binary(), 'BYTE_ARRAY', b'', unichar(1000).encode('utf-8'), 1, 4, 0 ), ( [True, False, False, True, True], pa.bool_(), 'BOOLEAN', False, True, 0, 5, 0 ), ( [b'\x00', b'b', b'12', None, b'aaa'], pa.binary(), 'BYTE_ARRAY', b'\x00', b'b', 1, 4, 0 ), ] ) def test_parquet_column_statistics_api(data, type, physical_type, min_value, max_value, null_count, num_values, distinct_count): df = pd.DataFrame({'data': data}) schema = pa.schema([pa.field('data', type)]) table = pa.Table.from_pandas(df, schema=schema, safe=False) fileh = make_sample_file(table) meta = fileh.metadata rg_meta = meta.row_group(0) col_meta = rg_meta.column(0) stat = col_meta.statistics assert stat.has_min_max assert _close(type, stat.min, min_value) assert _close(type, stat.max, max_value) assert stat.null_count == null_count assert stat.num_values == num_values # TODO(kszucs) until parquet-cpp API doesn't expose HasDistinctCount # method, missing distinct_count is represented as zero instead of None assert stat.distinct_count == distinct_count assert stat.physical_type == physical_type def _close(type, left, right): if type == pa.float32(): return abs(left - right) < 1E-7 elif type == pa.float64(): return abs(left - right) < 1E-13 else: return left == right def test_statistics_convert_logical_types(tempdir): # ARROW-5166, ARROW-4139 # (min, max, type) cases = [(10, 11164359321221007157, pa.uint64()), (10, 4294967295, pa.uint32()), (u"ähnlich", u"öffentlich", pa.utf8()), (datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000), pa.time32('ms')), (datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000), pa.time64('us')), (datetime.datetime(2019, 6, 24, 0, 0, 0, 1000), datetime.datetime(2019, 6, 25, 0, 0, 0, 1000), pa.timestamp('ms')), (datetime.datetime(2019, 6, 24, 0, 0, 0, 1000), datetime.datetime(2019, 6, 25, 0, 0, 0, 1000), pa.timestamp('us'))] for i, (min_val, max_val, typ) in enumerate(cases): t = pa.Table.from_arrays([pa.array([min_val, max_val], type=typ)], ['col']) path = str(tempdir / ('example{}.parquet'.format(i))) pq.write_table(t, path, version='2.0') pf = pq.ParquetFile(path) stats = pf.metadata.row_group(0).column(0).statistics assert stats.min == min_val assert stats.max == max_val def test_parquet_write_disable_statistics(tempdir): table = pa.Table.from_pydict( {'a': pa.array([1, 2, 3]), 'b': pa.array(['a', 'b', 'c'])}) _write_table(table, tempdir / 'data.parquet') meta = pq.read_metadata(tempdir / 'data.parquet') for col in [0, 1]: cc = meta.row_group(0).column(col) assert cc.is_stats_set is True assert cc.statistics is not None _write_table(table, tempdir / 'data2.parquet', write_statistics=False) meta = pq.read_metadata(tempdir / 'data2.parquet') for col in [0, 1]: cc = meta.row_group(0).column(col) assert cc.is_stats_set is False assert cc.statistics is None _write_table(table, tempdir / 'data3.parquet', write_statistics=['a']) meta = pq.read_metadata(tempdir / 'data3.parquet') cc_a = meta.row_group(0).column(0) assert cc_a.is_stats_set is True assert cc_a.statistics is not None cc_b = meta.row_group(0).column(1) assert cc_b.is_stats_set is False assert cc_b.statistics is None @pytest.mark.pandas def test_compare_schemas(): df = alltypes_sample(size=10000) fileh = make_sample_file(df) fileh2 = make_sample_file(df) fileh3 = make_sample_file(df[df.columns[::2]]) # ParquetSchema assert isinstance(fileh.schema, pq.ParquetSchema) assert fileh.schema.equals(fileh.schema) assert fileh.schema == fileh.schema assert fileh.schema.equals(fileh2.schema) assert fileh.schema == fileh2.schema assert fileh.schema != 'arbitrary object' assert not fileh.schema.equals(fileh3.schema) assert fileh.schema != fileh3.schema # ColumnSchema assert isinstance(fileh.schema[0], pq.ColumnSchema) assert fileh.schema[0].equals(fileh.schema[0]) assert fileh.schema[0] == fileh.schema[0] assert not fileh.schema[0].equals(fileh.schema[1]) assert fileh.schema[0] != fileh.schema[1] assert fileh.schema[0] != 'arbitrary object' def test_validate_schema_write_table(tempdir): # ARROW-2926 simple_fields = [ pa.field('POS', pa.uint32()), pa.field('desc', pa.string()) ] simple_schema = pa.schema(simple_fields) # simple_table schema does not match simple_schema simple_from_array = [pa.array([1]), pa.array(['bla'])] simple_table = pa.Table.from_arrays(simple_from_array, ['POS', 'desc']) path = tempdir / 'simple_validate_schema.parquet' with pq.ParquetWriter(path, simple_schema, version='2.0', compression='snappy', flavor='spark') as w: with pytest.raises(ValueError): w.write_table(simple_table) @pytest.mark.pandas def test_column_of_arrays(tempdir): df, schema = dataframe_with_arrays() filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df, schema=schema) _write_table(arrow_table, filename, version="2.0", coerce_timestamps='ms') table_read = _read_table(filename) df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) @pytest.mark.pandas def test_coerce_timestamps(tempdir): from collections import OrderedDict # ARROW-622 arrays = OrderedDict() fields = [pa.field('datetime64', pa.list_(pa.timestamp('ms')))] arrays['datetime64'] = [ np.array(['2007-07-13T01:23:34.123456789', None, '2010-08-13T05:46:57.437699912'], dtype='datetime64[ms]'), None, None, np.array(['2007-07-13T02', None, '2010-08-13T05:46:57.437699912'], dtype='datetime64[ms]'), ] df = pd.DataFrame(arrays) schema = pa.schema(fields) filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df, schema=schema) _write_table(arrow_table, filename, version="2.0", coerce_timestamps='us') table_read = _read_table(filename) df_read = table_read.to_pandas() df_expected = df.copy() for i, x in enumerate(df_expected['datetime64']): if isinstance(x, np.ndarray): df_expected['datetime64'][i] = x.astype('M8[us]') tm.assert_frame_equal(df_expected, df_read) with pytest.raises(ValueError): _write_table(arrow_table, filename, version='2.0', coerce_timestamps='unknown') @pytest.mark.pandas def test_coerce_timestamps_truncated(tempdir): """ ARROW-2555: Test that we can truncate timestamps when coercing if explicitly allowed. """ dt_us = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1, second=1, microsecond=1) dt_ms = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1, second=1) fields_us = [pa.field('datetime64', pa.timestamp('us'))] arrays_us = {'datetime64': [dt_us, dt_ms]} df_us = pd.DataFrame(arrays_us) schema_us = pa.schema(fields_us) filename = tempdir / 'pandas_truncated.parquet' table_us = pa.Table.from_pandas(df_us, schema=schema_us) _write_table(table_us, filename, version="2.0", coerce_timestamps='ms', allow_truncated_timestamps=True) table_ms = _read_table(filename) df_ms = table_ms.to_pandas() arrays_expected = {'datetime64': [dt_ms, dt_ms]} df_expected = pd.DataFrame(arrays_expected) tm.assert_frame_equal(df_expected, df_ms) @pytest.mark.pandas def test_column_of_lists(tempdir): df, schema = dataframe_with_lists(parquet_compatible=True) filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df, schema=schema) _write_table(arrow_table, filename, version='2.0') table_read = _read_table(filename) df_read = table_read.to_pandas() if PY2: # assert_frame_equal fails when comparing datetime.date and # np.datetime64, even with check_datetimelike_compat=True so # convert the values to np.datetime64 instead for col in ['date32[day]_list', 'date64[ms]_list']: df[col] = df[col].apply( lambda x: list(map(np.datetime64, x)) if x else x ) tm.assert_frame_equal(df, df_read) @pytest.mark.pandas def test_date_time_types(tempdir): t1 = pa.date32() data1 = np.array([17259, 17260, 17261], dtype='int32') a1 = pa.array(data1, type=t1) t2 = pa.date64() data2 = data1.astype('int64') * 86400000 a2 = pa.array(data2, type=t2) t3 = pa.timestamp('us') start = pd.Timestamp('2001-01-01').value / 1000 data3 = np.array([start, start + 1, start + 2], dtype='int64') a3 = pa.array(data3, type=t3) t4 = pa.time32('ms') data4 = np.arange(3, dtype='i4') a4 = pa.array(data4, type=t4) t5 = pa.time64('us') a5 = pa.array(data4.astype('int64'), type=t5) t6 = pa.time32('s') a6 = pa.array(data4, type=t6) ex_t6 = pa.time32('ms') ex_a6 = pa.array(data4 * 1000, type=ex_t6) t7 = pa.timestamp('ns') start = pd.Timestamp('2001-01-01').value data7 = np.array([start, start + 1000, start + 2000], dtype='int64') a7 = pa.array(data7, type=t7) table = pa.Table.from_arrays([a1, a2, a3, a4, a5, a6, a7], ['date32', 'date64', 'timestamp[us]', 'time32[s]', 'time64[us]', 'time32_from64[s]', 'timestamp[ns]']) # date64 as date32 # time32[s] to time32[ms] expected = pa.Table.from_arrays([a1, a1, a3, a4, a5, ex_a6, a7], ['date32', 'date64', 'timestamp[us]', 'time32[s]', 'time64[us]', 'time32_from64[s]', 'timestamp[ns]']) _check_roundtrip(table, expected=expected, version='2.0') t0 = pa.timestamp('ms') data0 = np.arange(4, dtype='int64') a0 = pa.array(data0, type=t0) t1 = pa.timestamp('us') data1 = np.arange(4, dtype='int64') a1 = pa.array(data1, type=t1) t2 = pa.timestamp('ns') data2 = np.arange(4, dtype='int64') a2 = pa.array(data2, type=t2) table = pa.Table.from_arrays([a0, a1, a2], ['ts[ms]', 'ts[us]', 'ts[ns]']) expected = pa.Table.from_arrays([a0, a1, a2], ['ts[ms]', 'ts[us]', 'ts[ns]']) # int64 for all timestamps supported by default filename = tempdir / 'int64_timestamps.parquet' _write_table(table, filename, version='2.0') parquet_schema = pq.ParquetFile(filename).schema for i in range(3): assert parquet_schema.column(i).physical_type == 'INT64' read_table = _read_table(filename) assert read_table.equals(expected) t0_ns = pa.timestamp('ns') data0_ns = np.array(data0 * 1000000, dtype='int64') a0_ns = pa.array(data0_ns, type=t0_ns) t1_ns = pa.timestamp('ns') data1_ns = np.array(data1 * 1000, dtype='int64') a1_ns = pa.array(data1_ns, type=t1_ns) expected = pa.Table.from_arrays([a0_ns, a1_ns, a2], ['ts[ms]', 'ts[us]', 'ts[ns]']) # int96 nanosecond timestamps produced upon request filename = tempdir / 'explicit_int96_timestamps.parquet' _write_table(table, filename, version='2.0', use_deprecated_int96_timestamps=True) parquet_schema = pq.ParquetFile(filename).schema for i in range(3): assert parquet_schema.column(i).physical_type == 'INT96' read_table = _read_table(filename) assert read_table.equals(expected) # int96 nanosecond timestamps implied by flavor 'spark' filename = tempdir / 'spark_int96_timestamps.parquet' _write_table(table, filename, version='2.0', flavor='spark') parquet_schema = pq.ParquetFile(filename).schema for i in range(3): assert parquet_schema.column(i).physical_type == 'INT96' read_table = _read_table(filename) assert read_table.equals(expected) def test_timestamp_restore_timezone(): # ARROW-5888, restore timezone from serialized metadata ty = pa.timestamp('ms', tz='America/New_York') arr = pa.array([1, 2, 3], type=ty) t = pa.table([arr], names=['f0']) _check_roundtrip(t) @pytest.mark.pandas def test_list_of_datetime_time_roundtrip(): # ARROW-4135 times = pd.to_datetime(['09:00', '09:30', '10:00', '10:30', '11:00', '11:30', '12:00']) df = pd.DataFrame({'time': [times.time]}) _roundtrip_pandas_dataframe(df, write_kwargs={}) @pytest.mark.pandas def test_parquet_version_timestamp_differences(): i_s = pd.Timestamp('2010-01-01').value / 1000000000 # := 1262304000 d_s = np.arange(i_s, i_s + 10, 1, dtype='int64') d_ms = d_s * 1000 d_us = d_ms * 1000 d_ns = d_us * 1000 a_s = pa.array(d_s, type=pa.timestamp('s')) a_ms = pa.array(d_ms, type=pa.timestamp('ms')) a_us = pa.array(d_us, type=pa.timestamp('us')) a_ns = pa.array(d_ns, type=pa.timestamp('ns')) names = ['ts:s', 'ts:ms', 'ts:us', 'ts:ns'] table = pa.Table.from_arrays([a_s, a_ms, a_us, a_ns], names) # Using Parquet version 1.0, seconds should be coerced to milliseconds # and nanoseconds should be coerced to microseconds by default expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_us], names) _check_roundtrip(table, expected) # Using Parquet version 2.0, seconds should be coerced to milliseconds # and nanoseconds should be retained by default expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_ns], names) _check_roundtrip(table, expected, version='2.0') # Using Parquet version 1.0, coercing to milliseconds or microseconds # is allowed expected = pa.Table.from_arrays([a_ms, a_ms, a_ms, a_ms], names) _check_roundtrip(table, expected, coerce_timestamps='ms') # Using Parquet version 2.0, coercing to milliseconds or microseconds # is allowed expected = pa.Table.from_arrays([a_us, a_us, a_us, a_us], names) _check_roundtrip(table, expected, version='2.0', coerce_timestamps='us') # TODO: after pyarrow allows coerce_timestamps='ns', tests like the # following should pass ... # Using Parquet version 1.0, coercing to nanoseconds is not allowed # expected = None # with pytest.raises(NotImplementedError): # _roundtrip_table(table, coerce_timestamps='ns') # Using Parquet version 2.0, coercing to nanoseconds is allowed # expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names) # _check_roundtrip(table, expected, version='2.0', coerce_timestamps='ns') # For either Parquet version, coercing to nanoseconds is allowed # if Int96 storage is used expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names) _check_roundtrip(table, expected, use_deprecated_int96_timestamps=True) _check_roundtrip(table, expected, version='2.0', use_deprecated_int96_timestamps=True) def test_large_list_records(): # This was fixed in PARQUET-1100 list_lengths = np.random.randint(0, 500, size=50) list_lengths[::10] = 0 list_values = [list(map(int, np.random.randint(0, 100, size=x))) if i % 8 else None for i, x in enumerate(list_lengths)] a1 = pa.array(list_values) table = pa.Table.from_arrays([a1], ['int_lists']) _check_roundtrip(table) def test_sanitized_spark_field_names(): a0 = pa.array([0, 1, 2, 3, 4]) name = 'prohib; ,\t{}' table = pa.Table.from_arrays([a0], [name]) result = _roundtrip_table(table, write_table_kwargs={'flavor': 'spark'}) expected_name = 'prohib______' assert result.schema[0].name == expected_name @pytest.mark.pandas def test_spark_flavor_preserves_pandas_metadata(): df = _test_dataframe(size=100) df.index = np.arange(0, 10 * len(df), 10) df.index.name = 'foo' result = _roundtrip_pandas_dataframe(df, {'version': '2.0', 'flavor': 'spark'}) tm.assert_frame_equal(result, df) def test_fixed_size_binary(): t0 = pa.binary(10) data = [b'fooooooooo', None, b'barooooooo', b'quxooooooo'] a0 = pa.array(data, type=t0) table = pa.Table.from_arrays([a0], ['binary[10]']) _check_roundtrip(table) @pytest.mark.pandas def test_multithreaded_read(): df = alltypes_sample(size=10000) table = pa.Table.from_pandas(df) buf = io.BytesIO() _write_table(table, buf, compression='SNAPPY', version='2.0') buf.seek(0) table1 = _read_table(buf, use_threads=True) buf.seek(0) table2 = _read_table(buf, use_threads=False) assert table1.equals(table2) @pytest.mark.pandas def test_min_chunksize(): data = pd.DataFrame([np.arange(4)], columns=['A', 'B', 'C', 'D']) table = pa.Table.from_pandas(data.reset_index()) buf = io.BytesIO() _write_table(table, buf, chunk_size=-1) buf.seek(0) result = _read_table(buf) assert result.equals(table) with pytest.raises(ValueError): _write_table(table, buf, chunk_size=0) @pytest.mark.pandas def test_pass_separate_metadata(): # ARROW-471 df = alltypes_sample(size=10000) a_table = pa.Table.from_pandas(df) buf = io.BytesIO() _write_table(a_table, buf, compression='snappy', version='2.0') buf.seek(0) metadata = pq.read_metadata(buf) buf.seek(0) fileh = pq.ParquetFile(buf, metadata=metadata) tm.assert_frame_equal(df, fileh.read().to_pandas()) @pytest.mark.pandas def test_read_single_row_group(): # ARROW-471 N, K = 10000, 4 df = alltypes_sample(size=N) a_table = pa.Table.from_pandas(df) buf = io.BytesIO() _write_table(a_table, buf, row_group_size=N / K, compression='snappy', version='2.0') buf.seek(0) pf = pq.ParquetFile(buf) assert pf.num_row_groups == K row_groups = [pf.read_row_group(i) for i in range(K)] result = pa.concat_tables(row_groups) tm.assert_frame_equal(df, result.to_pandas()) @pytest.mark.pandas def test_read_single_row_group_with_column_subset(): N, K = 10000, 4 df = alltypes_sample(size=N) a_table = pa.Table.from_pandas(df) buf = io.BytesIO() _write_table(a_table, buf, row_group_size=N / K, compression='snappy', version='2.0') buf.seek(0) pf = pq.ParquetFile(buf) cols = list(df.columns[:2]) row_groups = [pf.read_row_group(i, columns=cols) for i in range(K)] result = pa.concat_tables(row_groups) tm.assert_frame_equal(df[cols], result.to_pandas()) # ARROW-4267: Selection of duplicate columns still leads to these columns # being read uniquely. row_groups = [pf.read_row_group(i, columns=cols + cols) for i in range(K)] result = pa.concat_tables(row_groups) tm.assert_frame_equal(df[cols], result.to_pandas()) @pytest.mark.pandas def test_scan_contents(): N, K = 10000, 4 df = alltypes_sample(size=N) a_table = pa.Table.from_pandas(df) buf = io.BytesIO() _write_table(a_table, buf, row_group_size=N / K, compression='snappy', version='2.0') buf.seek(0) pf = pq.ParquetFile(buf) assert pf.scan_contents() == 10000 assert pf.scan_contents(df.columns[:4]) == 10000 @pytest.mark.pandas def test_parquet_piece_read(tempdir): df = _test_dataframe(1000) table = pa.Table.from_pandas(df) path = tempdir / 'parquet_piece_read.parquet' _write_table(table, path, version='2.0') piece1 = pq.ParquetDatasetPiece(path) result = piece1.read() assert result.equals(table) @pytest.mark.pandas def test_parquet_piece_open_and_get_metadata(tempdir): df = _test_dataframe(100) table = pa.Table.from_pandas(df) path = tempdir / 'parquet_piece_read.parquet' _write_table(table, path, version='2.0') piece = pq.ParquetDatasetPiece(path) table1 = piece.read() assert isinstance(table1, pa.Table) meta1 = piece.get_metadata() assert isinstance(meta1, pq.FileMetaData) assert table == table1 def test_parquet_piece_basics(): path = '/baz.parq' piece1 = pq.ParquetDatasetPiece(path) piece2 = pq.ParquetDatasetPiece(path, row_group=1) piece3 = pq.ParquetDatasetPiece( path, row_group=1, partition_keys=[('foo', 0), ('bar', 1)]) assert str(piece1) == path assert str(piece2) == '/baz.parq | row_group=1' assert str(piece3) == 'partition[foo=0, bar=1] /baz.parq | row_group=1' assert piece1 == piece1 assert piece2 == piece2 assert piece3 == piece3 assert piece1 != piece3 def test_partition_set_dictionary_type(): set1 = pq.PartitionSet('key1', [u('foo'), u('bar'), u('baz')]) set2 = pq.PartitionSet('key2', [2007, 2008, 2009]) assert isinstance(set1.dictionary, pa.StringArray) assert isinstance(set2.dictionary, pa.IntegerArray) set3 = pq.PartitionSet('key2', [datetime.datetime(2007, 1, 1)]) with pytest.raises(TypeError): set3.dictionary @pytest.mark.pandas def test_read_partitioned_directory(tempdir): fs = LocalFileSystem.get_instance() _partition_test_for_filesystem(fs, tempdir) @pytest.mark.pandas def test_create_parquet_dataset_multi_threaded(tempdir): fs = LocalFileSystem.get_instance() base_path = tempdir _partition_test_for_filesystem(fs, base_path) manifest = pq.ParquetManifest(base_path, filesystem=fs, metadata_nthreads=1) dataset = pq.ParquetDataset(base_path, filesystem=fs, metadata_nthreads=16) assert len(dataset.pieces) > 0 partitions = dataset.partitions assert len(partitions.partition_names) > 0 assert partitions.partition_names == manifest.partitions.partition_names assert len(partitions.levels) == len(manifest.partitions.levels) @pytest.mark.pandas def test_equivalency(tempdir): fs = LocalFileSystem.get_instance() base_path = tempdir integer_keys = [0, 1] string_keys = ['a', 'b', 'c'] boolean_keys = [True, False] partition_spec = [ ['integer', integer_keys], ['string', string_keys], ['boolean', boolean_keys] ] df = pd.DataFrame({ 'integer': np.array(integer_keys, dtype='i4').repeat(15), 'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2), 'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5), 3), }, columns=['integer', 'string', 'boolean']) _generate_partition_directories(fs, base_path, partition_spec, df) # Old filters syntax: # integer == 1 AND string != b AND boolean == True dataset = pq.ParquetDataset( base_path, filesystem=fs, filters=[('integer', '=', 1), ('string', '!=', 'b'), ('boolean', '==', True)] ) table = dataset.read() result_df = (table.to_pandas().reset_index(drop=True)) assert 0 not in result_df['integer'].values assert 'b' not in result_df['string'].values assert False not in result_df['boolean'].values # filters in disjunctive normal form: # (integer == 1 AND string != b AND boolean == True) OR # (integer == 2 AND boolean == False) # TODO(ARROW-3388): boolean columns are reconstructed as string filters = [ [ ('integer', '=', 1), ('string', '!=', 'b'), ('boolean', '==', 'True') ], [('integer', '=', 0), ('boolean', '==', 'False')] ] dataset = pq.ParquetDataset(base_path, filesystem=fs, filters=filters) table = dataset.read() result_df = table.to_pandas().reset_index(drop=True) # Check that all rows in the DF fulfill the filter # Pandas 0.23.x has problems with indexing constant memoryviews in # categoricals. Thus we need to make an explicity copy here with np.array. df_filter_1 = (np.array(result_df['integer']) == 1) \ & (np.array(result_df['string']) != 'b') \ & (np.array(result_df['boolean']) == 'True') df_filter_2 = (np.array(result_df['integer']) == 0) \ & (np.array(result_df['boolean']) == 'False') assert df_filter_1.sum() > 0 assert df_filter_2.sum() > 0 assert result_df.shape[0] == (df_filter_1.sum() + df_filter_2.sum()) # Check for \0 in predicate values. Until they are correctly implemented # in ARROW-3391, they would otherwise lead to weird results with the # current code. with pytest.raises(NotImplementedError): filters = [[('string', '==', b'1\0a')]] pq.ParquetDataset(base_path, filesystem=fs, filters=filters) with pytest.raises(NotImplementedError): filters = [[('string', '==', u'1\0a')]] pq.ParquetDataset(base_path, filesystem=fs, filters=filters) @pytest.mark.pandas def test_cutoff_exclusive_integer(tempdir): fs = LocalFileSystem.get_instance() base_path = tempdir integer_keys = [0, 1, 2, 3, 4] partition_spec = [ ['integers', integer_keys], ] N = 5 df = pd.DataFrame({ 'index': np.arange(N), 'integers': np.array(integer_keys, dtype='i4'), }, columns=['index', 'integers']) _generate_partition_directories(fs, base_path, partition_spec, df) dataset = pq.ParquetDataset( base_path, filesystem=fs, filters=[ ('integers', '<', 4), ('integers', '>', 1), ] ) table = dataset.read() result_df = (table.to_pandas() .sort_values(by='index') .reset_index(drop=True)) result_list = [x for x in map(int, result_df['integers'].values)] assert result_list == [2, 3] @pytest.mark.pandas @pytest.mark.xfail( raises=TypeError, reason='Loss of type information in creation of categoricals.' ) def test_cutoff_exclusive_datetime(tempdir): fs = LocalFileSystem.get_instance() base_path = tempdir date_keys = [ datetime.date(2018, 4, 9), datetime.date(2018, 4, 10), datetime.date(2018, 4, 11), datetime.date(2018, 4, 12), datetime.date(2018, 4, 13) ] partition_spec = [ ['dates', date_keys] ] N = 5 df = pd.DataFrame({ 'index': np.arange(N), 'dates': np.array(date_keys, dtype='datetime64'), }, columns=['index', 'dates']) _generate_partition_directories(fs, base_path, partition_spec, df) dataset = pq.ParquetDataset( base_path, filesystem=fs, filters=[ ('dates', '<', "2018-04-12"), ('dates', '>', "2018-04-10") ] ) table = dataset.read() result_df = (table.to_pandas() .sort_values(by='index') .reset_index(drop=True)) expected = pd.Categorical( np.array([datetime.date(2018, 4, 11)], dtype='datetime64'), categories=np.array(date_keys, dtype='datetime64')) assert result_df['dates'].values == expected @pytest.mark.pandas def test_inclusive_integer(tempdir): fs = LocalFileSystem.get_instance() base_path = tempdir integer_keys = [0, 1, 2, 3, 4] partition_spec = [ ['integers', integer_keys], ] N = 5 df = pd.DataFrame({ 'index': np.arange(N), 'integers': np.array(integer_keys, dtype='i4'), }, columns=['index', 'integers']) _generate_partition_directories(fs, base_path, partition_spec, df) dataset = pq.ParquetDataset( base_path, filesystem=fs, filters=[ ('integers', '<=', 3), ('integers', '>=', 2), ] ) table = dataset.read() result_df = (table.to_pandas() .sort_values(by='index') .reset_index(drop=True)) result_list = [int(x) for x in map(int, result_df['integers'].values)] assert result_list == [2, 3] @pytest.mark.pandas def test_inclusive_set(tempdir): fs = LocalFileSystem.get_instance() base_path = tempdir integer_keys = [0, 1] string_keys = ['a', 'b', 'c'] boolean_keys = [True, False] partition_spec = [ ['integer', integer_keys], ['string', string_keys], ['boolean', boolean_keys] ] df = pd.DataFrame({ 'integer': np.array(integer_keys, dtype='i4').repeat(15), 'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2), 'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5), 3), }, columns=['integer', 'string', 'boolean']) _generate_partition_directories(fs, base_path, partition_spec, df) dataset = pq.ParquetDataset( base_path, filesystem=fs, filters=[('integer', 'in', {1}), ('string', 'in', {'a', 'b'}), ('boolean', 'in', {True})] ) table = dataset.read() result_df = (table.to_pandas().reset_index(drop=True)) assert 0 not in result_df['integer'].values assert 'c' not in result_df['string'].values assert False not in result_df['boolean'].values @pytest.mark.pandas def test_invalid_pred_op(tempdir): fs = LocalFileSystem.get_instance() base_path = tempdir integer_keys = [0, 1, 2, 3, 4] partition_spec = [ ['integers', integer_keys], ] N = 5 df = pd.DataFrame({ 'index': np.arange(N), 'integers': np.array(integer_keys, dtype='i4'), }, columns=['index', 'integers']) _generate_partition_directories(fs, base_path, partition_spec, df) with pytest.raises(ValueError): pq.ParquetDataset(base_path, filesystem=fs, filters=[ ('integers', '=<', 3), ]) with pytest.raises(ValueError): pq.ParquetDataset(base_path, filesystem=fs, filters=[ ('integers', 'in', set()), ]) with pytest.raises(ValueError): pq.ParquetDataset(base_path, filesystem=fs, filters=[ ('integers', '!=', {3}), ]) @pytest.mark.pandas def test_filters_read_table(tempdir): # test that filters keyword is passed through in read_table fs = LocalFileSystem.get_instance() base_path = tempdir integer_keys = [0, 1, 2, 3, 4] partition_spec = [ ['integers', integer_keys], ] N = 5 df = pd.DataFrame({ 'index': np.arange(N), 'integers': np.array(integer_keys, dtype='i4'), }, columns=['index', 'integers']) _generate_partition_directories(fs, base_path, partition_spec, df) table = pq.read_table( base_path, filesystem=fs, filters=[('integers', '<', 3)]) assert table.num_rows == 3 table = pq.read_table( base_path, filesystem=fs, filters=[[('integers', '<', 3)]]) assert table.num_rows == 3 table = pq.read_pandas( base_path, filters=[('integers', '<', 3)]) assert table.num_rows == 3 @pytest.yield_fixture def s3_example(): access_key = os.environ['PYARROW_TEST_S3_ACCESS_KEY'] secret_key = os.environ['PYARROW_TEST_S3_SECRET_KEY'] bucket_name = os.environ['PYARROW_TEST_S3_BUCKET'] import s3fs fs = s3fs.S3FileSystem(key=access_key, secret=secret_key) test_dir = guid() bucket_uri = 's3://{0}/{1}'.format(bucket_name, test_dir) fs.mkdir(bucket_uri) yield fs, bucket_uri fs.rm(bucket_uri, recursive=True) @pytest.mark.pandas @pytest.mark.s3 def test_read_partitioned_directory_s3fs(s3_example): from pyarrow.filesystem import S3FSWrapper fs, bucket_uri = s3_example wrapper = S3FSWrapper(fs) _partition_test_for_filesystem(wrapper, bucket_uri) # Check that we can auto-wrap dataset = pq.ParquetDataset(bucket_uri, filesystem=fs) dataset.read() def _partition_test_for_filesystem(fs, base_path): foo_keys = [0, 1] bar_keys = ['a', 'b', 'c'] partition_spec = [ ['foo', foo_keys], ['bar', bar_keys] ] N = 30 df = pd.DataFrame({ 'index': np.arange(N), 'foo': np.array(foo_keys, dtype='i4').repeat(15), 'bar': np.tile(np.tile(np.array(bar_keys, dtype=object), 5), 2), 'values': np.random.randn(N) }, columns=['index', 'foo', 'bar', 'values']) _generate_partition_directories(fs, base_path, partition_spec, df) dataset = pq.ParquetDataset(base_path, filesystem=fs) table = dataset.read() result_df = (table.to_pandas() .sort_values(by='index') .reset_index(drop=True)) expected_df = (df.sort_values(by='index') .reset_index(drop=True) .reindex(columns=result_df.columns)) expected_df['foo'] = pd.Categorical(df['foo'], categories=foo_keys) expected_df['bar'] = pd.Categorical(df['bar'], categories=bar_keys) assert (result_df.columns == ['index', 'values', 'foo', 'bar']).all() tm.assert_frame_equal(result_df, expected_df) def _generate_partition_directories(fs, base_dir, partition_spec, df): # partition_spec : list of lists, e.g. [['foo', [0, 1, 2], # ['bar', ['a', 'b', 'c']] # part_table : a pyarrow.Table to write to each partition DEPTH = len(partition_spec) def _visit_level(base_dir, level, part_keys): name, values = partition_spec[level] for value in values: this_part_keys = part_keys + [(name, value)] level_dir = base_dir / '{0}={1}'.format(name, value) fs.mkdir(level_dir) if level == DEPTH - 1: # Generate example data file_path = level_dir / guid() filtered_df = _filter_partition(df, this_part_keys) part_table = pa.Table.from_pandas(filtered_df) with fs.open(file_path, 'wb') as f: _write_table(part_table, f) assert fs.exists(file_path) (level_dir / '_SUCCESS').touch() else: _visit_level(level_dir, level + 1, this_part_keys) (level_dir / '_SUCCESS').touch() _visit_level(base_dir, 0, []) def _test_read_common_metadata_files(fs, base_path): N = 100 df = pd.DataFrame({ 'index': np.arange(N), 'values': np.random.randn(N) }, columns=['index', 'values']) base_path = str(base_path) data_path = os.path.join(base_path, 'data.parquet') table = pa.Table.from_pandas(df) with fs.open(data_path, 'wb') as f: _write_table(table, f) metadata_path = os.path.join(base_path, '_common_metadata') with fs.open(metadata_path, 'wb') as f: pq.write_metadata(table.schema, f) dataset = pq.ParquetDataset(base_path, filesystem=fs) assert dataset.common_metadata_path == str(metadata_path) with fs.open(data_path) as f: common_schema = pq.read_metadata(f).schema assert dataset.schema.equals(common_schema) # handle list of one directory dataset2 = pq.ParquetDataset([base_path], filesystem=fs) assert dataset2.schema.equals(dataset.schema) @pytest.mark.pandas def test_read_common_metadata_files(tempdir): fs = LocalFileSystem.get_instance() _test_read_common_metadata_files(fs, tempdir) @pytest.mark.pandas def test_read_metadata_files(tempdir): fs = LocalFileSystem.get_instance() N = 100 df = pd.DataFrame({ 'index': np.arange(N), 'values': np.random.randn(N) }, columns=['index', 'values']) data_path = tempdir / 'data.parquet' table = pa.Table.from_pandas(df) with fs.open(data_path, 'wb') as f: _write_table(table, f) metadata_path = tempdir / '_metadata' with fs.open(metadata_path, 'wb') as f: pq.write_metadata(table.schema, f) dataset = pq.ParquetDataset(tempdir, filesystem=fs) assert dataset.metadata_path == str(metadata_path) with fs.open(data_path) as f: metadata_schema = pq.read_metadata(f).schema assert dataset.schema.equals(metadata_schema) @pytest.mark.pandas def test_read_schema(tempdir): N = 100 df = pd.DataFrame({ 'index': np.arange(N), 'values': np.random.randn(N) }, columns=['index', 'values']) data_path = tempdir / 'test.parquet' table = pa.Table.from_pandas(df) _write_table(table, data_path) read1 = pq.read_schema(data_path) read2 = pq.read_schema(data_path, memory_map=True) assert table.schema.equals(read1, check_metadata=False) assert table.schema.equals(read2, check_metadata=False) assert table.schema.metadata[b'pandas'] == read1.metadata[b'pandas'] def _filter_partition(df, part_keys): predicate = np.ones(len(df), dtype=bool) to_drop = [] for name, value in part_keys: to_drop.append(name) # to avoid pandas warning if isinstance(value, (datetime.date, datetime.datetime)): value = pd.Timestamp(value) predicate &= df[name] == value return df[predicate].drop(to_drop, axis=1) @pytest.mark.pandas def test_read_multiple_files(tempdir): nfiles = 10 size = 5 dirpath = tempdir / guid() dirpath.mkdir() test_data = [] paths = [] for i in range(nfiles): df = _test_dataframe(size, seed=i) # Hack so that we don't have a dtype cast in v1 files df['uint32'] = df['uint32'].astype(np.int64) path = dirpath / '{}.parquet'.format(i) table = pa.Table.from_pandas(df) _write_table(table, path) test_data.append(table) paths.append(path) # Write a _SUCCESS.crc file (dirpath / '_SUCCESS.crc').touch() def read_multiple_files(paths, columns=None, use_threads=True, **kwargs): dataset = pq.ParquetDataset(paths, **kwargs) return dataset.read(columns=columns, use_threads=use_threads) result = read_multiple_files(paths) expected = pa.concat_tables(test_data) assert result.equals(expected) # Read with provided metadata metadata = pq.read_metadata(paths[0]) result2 = read_multiple_files(paths, metadata=metadata) assert result2.equals(expected) result3 = pa.localfs.read_parquet(dirpath, schema=metadata.schema) assert result3.equals(expected) # Read column subset to_read = [0, 2, 6, result.num_columns - 1] col_names = [result.field(i).name for i in to_read] out = pa.localfs.read_parquet(dirpath, columns=col_names) expected = pa.Table.from_arrays([result.column(i) for i in to_read], names=col_names, metadata=result.schema.metadata) assert out.equals(expected) # Read with multiple threads pa.localfs.read_parquet(dirpath, use_threads=True) # Test failure modes with non-uniform metadata bad_apple = _test_dataframe(size, seed=i).iloc[:, :4] bad_apple_path = tempdir / '{}.parquet'.format(guid()) t = pa.Table.from_pandas(bad_apple) _write_table(t, bad_apple_path) bad_meta = pq.read_metadata(bad_apple_path) with pytest.raises(ValueError): read_multiple_files(paths + [bad_apple_path]) with pytest.raises(ValueError): read_multiple_files(paths, metadata=bad_meta) mixed_paths = [bad_apple_path, paths[0]] with pytest.raises(ValueError): read_multiple_files(mixed_paths, schema=bad_meta.schema) with pytest.raises(ValueError): read_multiple_files(mixed_paths) @pytest.mark.pandas def test_dataset_read_pandas(tempdir): nfiles = 5 size = 5 dirpath = tempdir / guid() dirpath.mkdir() test_data = [] frames = [] paths = [] for i in range(nfiles): df = _test_dataframe(size, seed=i) df.index = np.arange(i * size, (i + 1) * size) df.index.name = 'index' path = dirpath / '{}.parquet'.format(i) table = pa.Table.from_pandas(df) _write_table(table, path) test_data.append(table) frames.append(df) paths.append(path) dataset = pq.ParquetDataset(dirpath) columns = ['uint8', 'strings'] result = dataset.read_pandas(columns=columns).to_pandas() expected = pd.concat([x[columns] for x in frames]) tm.assert_frame_equal(result, expected) @pytest.mark.pandas def test_dataset_no_memory_map(tempdir): # ARROW-2627: Check that we can use ParquetDataset without memory-mapping dirpath = tempdir / guid() dirpath.mkdir() df = _test_dataframe(10, seed=0) path = dirpath / '{}.parquet'.format(0) table = pa.Table.from_pandas(df) _write_table(table, path, version='2.0') # TODO(wesm): Not sure how to easily check that memory mapping is _not_ # used. Mocking is not especially easy for pa.memory_map dataset = pq.ParquetDataset(dirpath, memory_map=False) assert dataset.pieces[0].read().equals(table) @pytest.mark.pandas @pytest.mark.parametrize('preserve_index', [True, False, None]) def test_dataset_read_pandas_common_metadata(tempdir, preserve_index): # ARROW-1103 nfiles = 5 size = 5 dirpath = tempdir / guid() dirpath.mkdir() test_data = [] frames = [] paths = [] for i in range(nfiles): df = _test_dataframe(size, seed=i) df.index = pd.Index(np.arange(i * size, (i + 1) * size), name='index') path = dirpath / '{}.parquet'.format(i) table = pa.Table.from_pandas(df, preserve_index=preserve_index) # Obliterate metadata table = table.replace_schema_metadata(None) assert table.schema.metadata is None _write_table(table, path) test_data.append(table) frames.append(df) paths.append(path) # Write _metadata common file table_for_metadata = pa.Table.from_pandas( df, preserve_index=preserve_index ) pq.write_metadata(table_for_metadata.schema, dirpath / '_metadata') dataset = pq.ParquetDataset(dirpath) columns = ['uint8', 'strings'] result = dataset.read_pandas(columns=columns).to_pandas() expected = pd.concat([x[columns] for x in frames]) expected.index.name = ( df.index.name if preserve_index is not False else None) tm.assert_frame_equal(result, expected) def _make_example_multifile_dataset(base_path, nfiles=10, file_nrows=5): test_data = [] paths = [] for i in range(nfiles): df = _test_dataframe(file_nrows, seed=i) path = base_path / '{}.parquet'.format(i) test_data.append(_write_table(df, path)) paths.append(path) return paths @pytest.mark.pandas def test_ignore_private_directories(tempdir): dirpath = tempdir / guid() dirpath.mkdir() paths = _make_example_multifile_dataset(dirpath, nfiles=10, file_nrows=5) # private directory (dirpath / '_impala_staging').mkdir() dataset = pq.ParquetDataset(dirpath) assert set(map(str, paths)) == set(x.path for x in dataset.pieces) @pytest.mark.pandas def test_ignore_hidden_files_dot(tempdir): dirpath = tempdir / guid() dirpath.mkdir() paths = _make_example_multifile_dataset(dirpath, nfiles=10, file_nrows=5) with (dirpath / '.DS_Store').open('wb') as f: f.write(b'gibberish') with (dirpath / '.private').open('wb') as f: f.write(b'gibberish') dataset = pq.ParquetDataset(dirpath) assert set(map(str, paths)) == set(x.path for x in dataset.pieces) @pytest.mark.pandas def test_ignore_hidden_files_underscore(tempdir): dirpath = tempdir / guid() dirpath.mkdir() paths = _make_example_multifile_dataset(dirpath, nfiles=10, file_nrows=5) with (dirpath / '_committed_123').open('wb') as f: f.write(b'abcd') with (dirpath / '_started_321').open('wb') as f: f.write(b'abcd') dataset = pq.ParquetDataset(dirpath) assert set(map(str, paths)) == set(x.path for x in dataset.pieces) @pytest.mark.pandas def test_multiindex_duplicate_values(tempdir): num_rows = 3 numbers = list(range(num_rows)) index = pd.MultiIndex.from_arrays( [['foo', 'foo', 'bar'], numbers], names=['foobar', 'some_numbers'], ) df = pd.DataFrame({'numbers': numbers}, index=index) table = pa.Table.from_pandas(df) filename = tempdir / 'dup_multi_index_levels.parquet' _write_table(table, filename) result_table = _read_table(filename) assert table.equals(result_table) result_df = result_table.to_pandas() tm.assert_frame_equal(result_df, df) @pytest.mark.pandas def test_write_error_deletes_incomplete_file(tempdir): # ARROW-1285 df = pd.DataFrame({'a': list('abc'), 'b': list(range(1, 4)), 'c': np.arange(3, 6).astype('u1'), 'd': np.arange(4.0, 7.0, dtype='float64'), 'e': [True, False, True], 'f': pd.Categorical(list('abc')), 'g': pd.date_range('20130101', periods=3), 'h': pd.date_range('20130101', periods=3, tz='US/Eastern'), 'i': pd.date_range('20130101', periods=3, freq='ns')}) pdf = pa.Table.from_pandas(df) filename = tempdir / 'tmp_file' try: _write_table(pdf, filename) except pa.ArrowException: pass assert not filename.exists() @pytest.mark.pandas def test_noncoerced_nanoseconds_written_without_exception(tempdir): # ARROW-1957: the Parquet version 2.0 writer preserves Arrow # nanosecond timestamps by default n = 9 df = pd.DataFrame({'x': range(n)}, index=pd.DatetimeIndex(start='2017-01-01', freq='1n', periods=n)) tb = pa.Table.from_pandas(df) filename = tempdir / 'written.parquet' try: pq.write_table(tb, filename, version='2.0') except Exception: pass assert filename.exists() recovered_table = pq.read_table(filename) assert tb.equals(recovered_table) # Loss of data thru coercion (without explicit override) still an error filename = tempdir / 'not_written.parquet' with pytest.raises(ValueError): pq.write_table(tb, filename, coerce_timestamps='ms', version='2.0') def test_read_non_existent_file(tempdir): path = 'non-existent-file.parquet' try: pq.read_table(path) except Exception as e: assert path in e.args[0] def test_read_table_doesnt_warn(datadir): with pytest.warns(None) as record: pq.read_table(datadir / 'v0.7.1.parquet') assert len(record) == 0 def _test_write_to_dataset_with_partitions(base_path, filesystem=None, schema=None, index_name=None): # ARROW-1400 output_df = pd.DataFrame({'group1': list('aaabbbbccc'), 'group2': list('eefeffgeee'), 'num': list(range(10)), 'nan': [pd.np.nan] * 10, 'date': np.arange('2017-01-01', '2017-01-11', dtype='datetime64[D]')}) cols = output_df.columns.tolist() partition_by = ['group1', 'group2'] output_table = pa.Table.from_pandas(output_df, schema=schema, safe=False, preserve_index=False) pq.write_to_dataset(output_table, base_path, partition_by, filesystem=filesystem) metadata_path = os.path.join(base_path, '_common_metadata') if filesystem is not None: with filesystem.open(metadata_path, 'wb') as f: pq.write_metadata(output_table.schema, f) else: pq.write_metadata(output_table.schema, metadata_path) # ARROW-2891: Ensure the output_schema is preserved when writing a # partitioned dataset dataset = pq.ParquetDataset(base_path, filesystem=filesystem, validate_schema=True) # ARROW-2209: Ensure the dataset schema also includes the partition columns dataset_cols = set(dataset.schema.to_arrow_schema().names) assert dataset_cols == set(output_table.schema.names) input_table = dataset.read() input_df = input_table.to_pandas() # Read data back in and compare with original DataFrame # Partitioned columns added to the end of the DataFrame when read input_df_cols = input_df.columns.tolist() assert partition_by == input_df_cols[-1 * len(partition_by):] # Partitioned columns become 'categorical' dtypes input_df = input_df[cols] for col in partition_by: output_df[col] = output_df[col].astype('category') assert output_df.equals(input_df) def _test_write_to_dataset_no_partitions(base_path, filesystem=None): # ARROW-1400 output_df = pd.DataFrame({'group1': list('aaabbbbccc'), 'group2': list('eefeffgeee'), 'num': list(range(10)), 'date': np.arange('2017-01-01', '2017-01-11', dtype='datetime64[D]')}) cols = output_df.columns.tolist() output_table = pa.Table.from_pandas(output_df) if filesystem is None: filesystem = LocalFileSystem.get_instance() # Without partitions, append files to root_path n = 5 for i in range(n): pq.write_to_dataset(output_table, base_path, filesystem=filesystem) output_files = [file for file in filesystem.ls(base_path) if file.endswith(".parquet")] assert len(output_files) == n # Deduplicated incoming DataFrame should match # original outgoing Dataframe input_table = pq.ParquetDataset(base_path, filesystem=filesystem).read() input_df = input_table.to_pandas() input_df = input_df.drop_duplicates() input_df = input_df[cols] assert output_df.equals(input_df) @pytest.mark.pandas def test_write_to_dataset_with_partitions(tempdir): _test_write_to_dataset_with_partitions(str(tempdir)) @pytest.mark.pandas def test_write_to_dataset_with_partitions_and_schema(tempdir): schema = pa.schema([pa.field('group1', type=pa.string()), pa.field('group2', type=pa.string()), pa.field('num', type=pa.int64()), pa.field('nan', type=pa.int32()), pa.field('date', type=pa.timestamp(unit='us'))]) _test_write_to_dataset_with_partitions(str(tempdir), schema=schema) @pytest.mark.pandas def test_write_to_dataset_with_partitions_and_index_name(tempdir): _test_write_to_dataset_with_partitions(str(tempdir), index_name='index_name') @pytest.mark.pandas def test_write_to_dataset_no_partitions(tempdir): _test_write_to_dataset_no_partitions(str(tempdir)) @pytest.mark.pandas def test_write_to_dataset_with_partitions_and_custom_filenames(tempdir): output_df = pd.DataFrame({'group1': list('aaabbbbccc'), 'group2': list('eefeffgeee'), 'num': list(range(10)), 'nan': [pd.np.nan] * 10, 'date': np.arange('2017-01-01', '2017-01-11', dtype='datetime64[D]')}) partition_by = ['group1', 'group2'] output_table = pa.Table.from_pandas(output_df) path = str(tempdir) def partition_filename_callback(keys): return "{0}-{1}.parquet".format(*keys) pq.write_to_dataset(output_table, path, partition_by, partition_filename_callback) dataset = pq.ParquetDataset(path) # ARROW-3538: Ensure partition filenames match the given pattern # defined in the local function partition_filename_callback expected_basenames = [ 'a-e.parquet', 'a-f.parquet', 'b-e.parquet', 'b-f.parquet', 'b-g.parquet', 'c-e.parquet' ] output_basenames = [os.path.basename(p.path) for p in dataset.pieces] assert sorted(expected_basenames) == sorted(output_basenames) @pytest.mark.large_memory def test_large_table_int32_overflow(): size = np.iinfo('int32').max + 1 arr = np.ones(size, dtype='uint8') parr = pa.array(arr, type=pa.uint8()) table = pa.Table.from_arrays([parr], names=['one']) f = io.BytesIO() _write_table(table, f) def _simple_table_roundtrip(table): stream = pa.BufferOutputStream() _write_table(table, stream) buf = stream.getvalue() return _read_table(buf) @pytest.mark.pandas @pytest.mark.large_memory def test_binary_array_overflow_to_chunked(): # ARROW-3762 # 2^31 + 1 bytes values = [b'x'] + [ b'x' * (1 << 20) ] * 2 * (1 << 10) df = pd.DataFrame({'byte_col': values}) tbl = pa.Table.from_pandas(df, preserve_index=False) read_tbl = _simple_table_roundtrip(tbl) col0_data = read_tbl[0] assert isinstance(col0_data, pa.ChunkedArray) # Split up into 2GB chunks assert col0_data.num_chunks == 2 assert tbl.equals(read_tbl) @pytest.mark.pandas @pytest.mark.large_memory def test_list_of_binary_large_cell(): # ARROW-4688 data = [] # TODO(wesm): handle chunked children # 2^31 - 1 bytes in a single cell # data.append([b'x' * (1 << 20)] * 2047 + [b'x' * ((1 << 20) - 1)]) # A little under 2GB in cell each containing approximately 10MB each data.extend([[b'x' * 1000000] * 10] * 214) arr = pa.array(data) table = pa.Table.from_arrays([arr], ['chunky_cells']) read_table = _simple_table_roundtrip(table) assert table.equals(read_table) @pytest.mark.pandas def test_index_column_name_duplicate(tempdir): data = { 'close': { pd.Timestamp('2017-06-30 01:31:00'): 154.99958999999998, pd.Timestamp('2017-06-30 01:32:00'): 154.99958999999998, }, 'time': { pd.Timestamp('2017-06-30 01:31:00'): pd.Timestamp( '2017-06-30 01:31:00' ), pd.Timestamp('2017-06-30 01:32:00'): pd.Timestamp( '2017-06-30 01:32:00' ), } } path = str(tempdir / 'data.parquet') dfx = pd.DataFrame(data).set_index('time', drop=False) tdfx = pa.Table.from_pandas(dfx) _write_table(tdfx, path) arrow_table = _read_table(path) result_df = arrow_table.to_pandas() tm.assert_frame_equal(result_df, dfx) @pytest.mark.pandas def test_parquet_nested_convenience(tempdir): # ARROW-1684 df = pd.DataFrame({ 'a': [[1, 2, 3], None, [4, 5], []], 'b': [[1.], None, None, [6., 7.]], }) path = str(tempdir / 'nested_convenience.parquet') table = pa.Table.from_pandas(df, preserve_index=False) _write_table(table, path) read = pq.read_table(path, columns=['a']) tm.assert_frame_equal(read.to_pandas(), df[['a']]) read = pq.read_table(path, columns=['a', 'b']) tm.assert_frame_equal(read.to_pandas(), df) @pytest.mark.pandas def test_backwards_compatible_index_naming(datadir): expected_string = b"""\ carat cut color clarity depth table price x y z 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39""" expected = pd.read_csv(io.BytesIO(expected_string), sep=r'\s{2,}', index_col=None, header=0, engine='python') table = _read_table(datadir / 'v0.7.1.parquet') result = table.to_pandas() tm.assert_frame_equal(result, expected) @pytest.mark.pandas def test_backwards_compatible_index_multi_level_named(datadir): expected_string = b"""\ carat cut color clarity depth table price x y z 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39""" expected = pd.read_csv( io.BytesIO(expected_string), sep=r'\s{2,}', index_col=['cut', 'color', 'clarity'], header=0, engine='python' ).sort_index() table = _read_table(datadir / 'v0.7.1.all-named-index.parquet') result = table.to_pandas() tm.assert_frame_equal(result, expected) @pytest.mark.pandas def test_backwards_compatible_index_multi_level_some_named(datadir): expected_string = b"""\ carat cut color clarity depth table price x y z 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39""" expected = pd.read_csv( io.BytesIO(expected_string), sep=r'\s{2,}', index_col=['cut', 'color', 'clarity'], header=0, engine='python' ).sort_index() expected.index = expected.index.set_names(['cut', None, 'clarity']) table = _read_table(datadir / 'v0.7.1.some-named-index.parquet') result = table.to_pandas() tm.assert_frame_equal(result, expected) @pytest.mark.pandas def test_backwards_compatible_column_metadata_handling(datadir): expected = pd.DataFrame( {'a': [1, 2, 3], 'b': [.1, .2, .3], 'c': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')}) expected.index = pd.MultiIndex.from_arrays( [['a', 'b', 'c'], pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')], names=['index', None]) path = datadir / 'v0.7.1.column-metadata-handling.parquet' table = _read_table(path) result = table.to_pandas() tm.assert_frame_equal(result, expected) table = _read_table(path, columns=['a']) result = table.to_pandas() tm.assert_frame_equal(result, expected[['a']].reset_index(drop=True)) def _make_dataset_for_pickling(tempdir, N=100): path = tempdir / 'data.parquet' fs = LocalFileSystem.get_instance() df = pd.DataFrame({ 'index': np.arange(N), 'values': np.random.randn(N) }, columns=['index', 'values']) table = pa.Table.from_pandas(df) num_groups = 3 with pq.ParquetWriter(path, table.schema) as writer: for i in range(num_groups): writer.write_table(table) reader = pq.ParquetFile(path) assert reader.metadata.num_row_groups == num_groups metadata_path = tempdir / '_metadata' with fs.open(metadata_path, 'wb') as f: pq.write_metadata(table.schema, f) dataset = pq.ParquetDataset(tempdir, filesystem=fs) assert dataset.metadata_path == str(metadata_path) return dataset @pytest.mark.pandas @pytest.mark.parametrize('pickler', [ pytest.param(pickle, id='builtin'), pytest.param(pytest.importorskip('cloudpickle'), id='cloudpickle') ]) def test_pickle_dataset(tempdir, datadir, pickler): def is_pickleable(obj): return obj == pickler.loads(pickler.dumps(obj)) dataset = _make_dataset_for_pickling(tempdir) assert is_pickleable(dataset) assert is_pickleable(dataset.metadata) assert is_pickleable(dataset.metadata.schema) assert len(dataset.metadata.schema) for column in dataset.metadata.schema: assert is_pickleable(column) for piece in dataset.pieces: assert is_pickleable(piece) metadata = piece.get_metadata() assert metadata.num_row_groups for i in range(metadata.num_row_groups): assert is_pickleable(metadata.row_group(i)) @pytest.mark.pandas def test_decimal_roundtrip(tempdir): num_values = 10 columns = {} for precision in range(1, 39): for scale in range(0, precision + 1): with util.random_seed(0): random_decimal_values = [ util.randdecimal(precision, scale) for _ in range(num_values) ] column_name = ('dec_precision_{:d}_scale_{:d}' .format(precision, scale)) columns[column_name] = random_decimal_values expected = pd.DataFrame(columns) filename = tempdir / 'decimals.parquet' string_filename = str(filename) table = pa.Table.from_pandas(expected) _write_table(table, string_filename) result_table = _read_table(string_filename) result = result_table.to_pandas() tm.assert_frame_equal(result, expected) @pytest.mark.pandas @pytest.mark.xfail( raises=pa.ArrowException, reason='Parquet does not support negative scale' ) def test_decimal_roundtrip_negative_scale(tempdir): expected = pd.DataFrame({'decimal_num': [decimal.Decimal('1.23E4')]}) filename = tempdir / 'decimals.parquet' string_filename = str(filename) t = pa.Table.from_pandas(expected) _write_table(t, string_filename) result_table = _read_table(string_filename) result = result_table.to_pandas() tm.assert_frame_equal(result, expected) @pytest.mark.pandas def test_parquet_writer_context_obj(tempdir): df = _test_dataframe(100) df['unique_id'] = 0 arrow_table = pa.Table.from_pandas(df, preserve_index=False) out = pa.BufferOutputStream() with pq.ParquetWriter(out, arrow_table.schema, version='2.0') as writer: frames = [] for i in range(10): df['unique_id'] = i arrow_table = pa.Table.from_pandas(df, preserve_index=False) writer.write_table(arrow_table) frames.append(df.copy()) buf = out.getvalue() result = _read_table(pa.BufferReader(buf)) expected = pd.concat(frames, ignore_index=True) tm.assert_frame_equal(result.to_pandas(), expected) @pytest.mark.pandas def test_parquet_writer_context_obj_with_exception(tempdir): df = _test_dataframe(100) df['unique_id'] = 0 arrow_table = pa.Table.from_pandas(df, preserve_index=False) out = pa.BufferOutputStream() error_text = 'Artificial Error' try: with pq.ParquetWriter(out, arrow_table.schema, version='2.0') as writer: frames = [] for i in range(10): df['unique_id'] = i arrow_table = pa.Table.from_pandas(df, preserve_index=False) writer.write_table(arrow_table) frames.append(df.copy()) if i == 5: raise ValueError(error_text) except Exception as e: assert str(e) == error_text buf = out.getvalue() result = _read_table(pa.BufferReader(buf)) expected = pd.concat(frames, ignore_index=True) tm.assert_frame_equal(result.to_pandas(), expected) @pytest.mark.pandas def test_zlib_compression_bug(): # ARROW-3514: "zlib deflate failed, output buffer too small" table = pa.Table.from_arrays([pa.array(['abc', 'def'])], ['some_col']) f = io.BytesIO() pq.write_table(table, f, compression='gzip') f.seek(0) roundtrip = pq.read_table(f) tm.assert_frame_equal(roundtrip.to_pandas(), table.to_pandas()) @pytest.mark.pandas def test_merging_parquet_tables_with_different_pandas_metadata(tempdir): # ARROW-3728: Merging Parquet Files - Pandas Meta in Schema Mismatch schema = pa.schema([ pa.field('int', pa.int16()), pa.field('float', pa.float32()), pa.field('string', pa.string()) ]) df1 = pd.DataFrame({ 'int': np.arange(3, dtype=np.uint8), 'float': np.arange(3, dtype=np.float32), 'string': ['ABBA', 'EDDA', 'ACDC'] }) df2 = pd.DataFrame({ 'int': [4, 5], 'float': [1.1, None], 'string': [None, None] }) table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False) table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False) assert not table1.schema.equals(table2.schema) assert table1.schema.equals(table2.schema, check_metadata=False) writer = pq.ParquetWriter(tempdir / 'merged.parquet', schema=schema) writer.write_table(table1) writer.write_table(table2) def test_empty_row_groups(tempdir): # ARROW-3020 table = pa.Table.from_arrays([pa.array([], type='int32')], ['f0']) path = tempdir / 'empty_row_groups.parquet' num_groups = 3 with pq.ParquetWriter(path, table.schema) as writer: for i in range(num_groups): writer.write_table(table) reader = pq.ParquetFile(path) assert reader.metadata.num_row_groups == num_groups for i in range(num_groups): assert reader.read_row_group(i).equals(table) @pytest.mark.pandas def test_parquet_writer_with_caller_provided_filesystem(): out = pa.BufferOutputStream() class CustomFS(FileSystem): def __init__(self): self.path = None self.mode = None def open(self, path, mode='rb'): self.path = path self.mode = mode return out fs = CustomFS() fname = 'expected_fname.parquet' df = _test_dataframe(100) table = pa.Table.from_pandas(df, preserve_index=False) with pq.ParquetWriter(fname, table.schema, filesystem=fs, version='2.0') \ as writer: writer.write_table(table) assert fs.path == fname assert fs.mode == 'wb' assert out.closed buf = out.getvalue() table_read = _read_table(pa.BufferReader(buf)) df_read = table_read.to_pandas() tm.assert_frame_equal(df_read, df) # Should raise ValueError when filesystem is passed with file-like object with pytest.raises(ValueError) as err_info: pq.ParquetWriter(pa.BufferOutputStream(), table.schema, filesystem=fs) expected_msg = ("filesystem passed but where is file-like, so" " there is nothing to open with filesystem.") assert str(err_info) == expected_msg def test_writing_empty_lists(): # ARROW-2591: [Python] Segmentation fault issue in pq.write_table arr1 = pa.array([[], []], pa.list_(pa.int32())) table = pa.Table.from_arrays([arr1], ['list(int32)']) _check_roundtrip(table) def test_write_nested_zero_length_array_chunk_failure(): # Bug report in ARROW-3792 cols = OrderedDict( int32=pa.int32(), list_string=pa.list_(pa.string()) ) data = [[], [OrderedDict(int32=1, list_string=('G',)), ]] # This produces a table with a column like # <Column name='list_string' type=ListType(list<item: string>)> # [ # [], # [ # [ # "G" # ] # ] # ] # # Each column is a ChunkedArray with 2 elements my_arrays = [pa.array(batch, type=pa.struct(cols)).flatten() for batch in data] my_batches = [pa.RecordBatch.from_arrays(batch, pa.schema(cols)) for batch in my_arrays] tbl = pa.Table.from_batches(my_batches, pa.schema(cols)) _check_roundtrip(tbl) @pytest.mark.pandas def test_partitioned_dataset(tempdir): # ARROW-3208: Segmentation fault when reading a Parquet partitioned dataset # to a Parquet file path = tempdir / "ARROW-3208" df = pd.DataFrame({ 'one': [-1, 10, 2.5, 100, 1000, 1, 29.2], 'two': [-1, 10, 2, 100, 1000, 1, 11], 'three': [0, 0, 0, 0, 0, 0, 0] }) table = pa.Table.from_pandas(df) pq.write_to_dataset(table, root_path=str(path), partition_cols=['one', 'two']) table = pq.ParquetDataset(path).read() pq.write_table(table, path / "output.parquet") def test_read_column_invalid_index(): table = pa.table([pa.array([4, 5]), pa.array(["foo", "bar"])], names=['ints', 'strs']) bio = pa.BufferOutputStream() pq.write_table(table, bio) f = pq.ParquetFile(bio.getvalue()) assert f.reader.read_column(0).to_pylist() == [4, 5] assert f.reader.read_column(1).to_pylist() == ["foo", "bar"] for index in (-1, 2): with pytest.raises((ValueError, IndexError)): f.reader.read_column(index) def test_direct_read_dictionary(): # ARROW-3325 repeats = 10 nunique = 5 data = [ [tm.rands(10) for i in range(nunique)] * repeats, ] table = pa.table(data, names=['f0']) bio = pa.BufferOutputStream() pq.write_table(table, bio) contents = bio.getvalue() result = pq.read_table(pa.BufferReader(contents), read_dictionary=['f0']) # Compute dictionary-encoded subfield expected = pa.table([table[0].dictionary_encode()], names=['f0']) assert result.equals(expected) def test_dataset_read_dictionary(tempdir): path = tempdir / "ARROW-3325-dataset" t1 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0']) t2 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0']) pq.write_to_dataset(t1, root_path=str(path)) pq.write_to_dataset(t2, root_path=str(path)) result = pq.ParquetDataset(path, read_dictionary=['f0']).read() # The order of the chunks is non-deterministic ex_chunks = [t1[0].chunk(0).dictionary_encode(), t2[0].chunk(0).dictionary_encode()] assert result[0].num_chunks == 2 c0, c1 = result[0].chunk(0), result[0].chunk(1) if c0.equals(ex_chunks[0]): assert c1.equals(ex_chunks[1]) else: assert c0.equals(ex_chunks[1]) assert c1.equals(ex_chunks[0]) def test_direct_read_dictionary_subfield(): repeats = 10 nunique = 5 data = [ [[tm.rands(10)] for i in range(nunique)] * repeats, ] table = pa.table(data, names=['f0']) bio = pa.BufferOutputStream() pq.write_table(table, bio) contents = bio.getvalue() result = pq.read_table(pa.BufferReader(contents), read_dictionary=['f0.list.item']) arr = pa.array(data[0]) values_as_dict = arr.values.dictionary_encode() inner_indices = values_as_dict.indices.cast('int32') new_values = pa.DictionaryArray.from_arrays(inner_indices, values_as_dict.dictionary) offsets = pa.array(range(51), type='int32') expected_arr = pa.ListArray.from_arrays(offsets, new_values) expected = pa.table([expected_arr], names=['f0']) assert result.equals(expected) assert result[0].num_chunks == 1 @pytest.mark.pandas def test_dataset_metadata(tempdir): path = tempdir / "ARROW-1983-dataset" # create and write a test dataset df = pd.DataFrame({ 'one': [1, 2, 3], 'two': [-1, -2, -3], 'three': [[1, 2], [2, 3], [3, 4]], }) table = pa.Table.from_pandas(df) metadata_list = [] pq.write_to_dataset(table, root_path=str(path), partition_cols=['one', 'two'], metadata_collector=metadata_list) # open the dataset and collect metadata from pieces: dataset = pq.ParquetDataset(path) metadata_list2 = [p.get_metadata() for p in dataset.pieces] # compare metadata list content: assert len(metadata_list) == len(metadata_list2) for md, md2 in zip(metadata_list, metadata_list2): d = md.to_dict() d2 = md2.to_dict() # serialized_size is initialized in the reader: assert d.pop('serialized_size') == 0 assert d2.pop('serialized_size') > 0 assert d == d2 def test_parquet_file_too_small(tempdir): path = str(tempdir / "test.parquet") with pytest.raises(pa.ArrowIOError, match='size is 0 bytes'): with open(path, 'wb') as f: pass pq.read_table(path) with pytest.raises(pa.ArrowIOError, match='size is 4 bytes'): with open(path, 'wb') as f: f.write(b'ffff') pq.read_table(path) @pytest.mark.pandas def test_categorical_index_survives_roundtrip(): # ARROW-3652, addressed by ARROW-3246 df = pd.DataFrame([['a', 'b'], ['c', 'd']], columns=['c1', 'c2']) df['c1'] = df['c1'].astype('category') df = df.set_index(['c1']) table = pa.Table.from_pandas(df) bos = pa.BufferOutputStream() pq.write_table(table, bos) ref_df = pq.read_pandas(bos.getvalue()).to_pandas() assert isinstance(ref_df.index, pd.CategoricalIndex) assert ref_df.index.equals(df.index) def test_dictionary_array_automatically_read(): # ARROW-3246 # Make a large dictionary, a little over 4MB of data dict_length = 4000 dict_values = pa.array([('x' * 1000 + '_{}'.format(i)) for i in range(dict_length)]) num_chunks = 10 chunk_size = 100 chunks = [] for i in range(num_chunks): indices = np.random.randint(0, dict_length, size=chunk_size).astype(np.int32) chunks.append(pa.DictionaryArray.from_arrays(pa.array(indices), dict_values)) table = pa.table([pa.chunked_array(chunks)], names=['f0']) bio = pa.BufferOutputStream() pq.write_table(table, bio) contents = bio.getvalue() result = pq.read_table(pa.BufferReader(contents)) assert result.equals(table) # The only key in the metadata was the Arrow schema key assert result.schema.metadata is None @pytest.mark.pandas def test_pandas_categorical_na_type_row_groups(): # ARROW-5085 df = pd.DataFrame({"col": [None] * 100, "int": [1.0] * 100}) df_category = df.astype({"col": "category", "int": "category"}) table = pa.Table.from_pandas(df) table_cat = pa.Table.from_pandas(df_category) buf = pa.BufferOutputStream() # it works pq.write_table(table_cat, buf, version="2.0", chunk_size=10) result = pq.read_table(buf.getvalue()) # Result is non-categorical assert result[0].equals(table[0]) assert result[1].equals(table[1]) @pytest.mark.pandas def test_pandas_categorical_roundtrip(): # ARROW-5480, this was enabled by ARROW-3246 # Have one of the categories unobserved and include a null (-1) codes = np.array([2, 0, 0, 2, 0, -1, 2], dtype='int32') categories = ['foo', 'bar', 'baz'] df = pd.DataFrame({'x': pd.Categorical.from_codes( codes, categories=categories)}) buf = pa.BufferOutputStream() pq.write_table(pa.table(df), buf) result = pq.read_table(buf.getvalue()).to_pandas() assert result.x.dtype == 'category' assert (result.x.cat.categories == categories).all() tm.assert_frame_equal(result, df) @pytest.mark.pandas def test_multi_dataset_metadata(tempdir): filenames = ["ARROW-1983-dataset.0", "ARROW-1983-dataset.1"] metapath = str(tempdir / "_metadata") # create a test dataset df = pd.DataFrame({ 'one': [1, 2, 3], 'two': [-1, -2, -3], 'three': [[1, 2], [2, 3], [3, 4]], }) table = pa.Table.from_pandas(df) # write dataset twice and collect/merge metadata _meta = None for filename in filenames: meta = [] pq.write_table(table, str(tempdir / filename), metadata_collector=meta) meta[0].set_file_path(filename) if _meta is None: _meta = meta[0] else: _meta.append_row_groups(meta[0]) # Write merged metadata-only file with open(metapath, "wb") as f: _meta.write_metadata_file(f) # Read back the metadata meta = pq.read_metadata(metapath) md = meta.to_dict() _md = _meta.to_dict() for key in _md: if key != 'serialized_size': assert _md[key] == md[key] assert _md['num_columns'] == 3 assert _md['num_rows'] == 6 assert _md['num_row_groups'] == 2 assert _md['serialized_size'] == 0 assert md['serialized_size'] > 0 @pytest.mark.pandas def test_filter_before_validate_schema(tempdir): # ARROW-4076 apply filter before schema validation # to avoid checking unneeded schemas # create partitioned dataset with mismatching schemas which would # otherwise raise if first validation all schemas dir1 = tempdir / 'A=0' dir1.mkdir() table1 = pa.Table.from_pandas(pd.DataFrame({'B': [1, 2, 3]})) pq.write_table(table1, dir1 / 'data.parquet') dir2 = tempdir / 'A=1' dir2.mkdir() table2 = pa.Table.from_pandas(pd.DataFrame({'B': ['a', 'b', 'c']})) pq.write_table(table2, dir2 / 'data.parquet') # read single file using filter table = pq.read_table(tempdir, filters=[[('A', '==', 0)]]) assert table.column('B').equals(pa.chunked_array([[1, 2, 3]]))
[]
[]
[ "PYARROW_TEST_S3_BUCKET", "PYARROW_TEST_S3_SECRET_KEY", "PYARROW_TEST_S3_ACCESS_KEY" ]
[]
["PYARROW_TEST_S3_BUCKET", "PYARROW_TEST_S3_SECRET_KEY", "PYARROW_TEST_S3_ACCESS_KEY"]
python
3
0
octavia/certificates/common/local.py
# Copyright (c) 2014 Rackspace US, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Common classes for local filesystem certificate handling """ import os from oslo_config import cfg from octavia.certificates.common import cert TLS_CERT_DEFAULT = os.environ.get( 'OS_OCTAVIA_TLS_CA_CERT', '/etc/ssl/certs/ssl-cert-snakeoil.pem' ) TLS_KEY_DEFAULT = os.environ.get( 'OS_OCTAVIA_TLS_CA_KEY', '/etc/ssl/private/ssl-cert-snakeoil.key' ) TLS_PKP_DEFAULT = os.environ.get('OS_OCTAVIA_CA_KEY_PASS') TLS_PASS_AMPS_DEFAULT = os.environ.get('TLS_PASS_AMPS_DEFAULT', 'insecure-key-do-not-use-this-key') TLS_DIGEST_DEFAULT = os.environ.get('OS_OCTAVIA_CA_SIGNING_DIGEST', 'sha256') TLS_STORAGE_DEFAULT = os.environ.get( 'OS_OCTAVIA_TLS_STORAGE', '/var/lib/octavia/certificates/' ) certgen_opts = [ cfg.StrOpt('ca_certificate', default=TLS_CERT_DEFAULT, help='Absolute path to the CA Certificate for signing. Defaults' ' to env[OS_OCTAVIA_TLS_CA_CERT].'), cfg.StrOpt('ca_private_key', default=TLS_KEY_DEFAULT, help='Absolute path to the Private Key for signing. Defaults' ' to env[OS_OCTAVIA_TLS_CA_KEY].'), cfg.StrOpt('ca_private_key_passphrase', default=TLS_PKP_DEFAULT, help='Passphrase for the Private Key. Defaults' ' to env[OS_OCTAVIA_CA_KEY_PASS] or None.'), cfg.StrOpt('server_certs_key_passphrase', default=TLS_PASS_AMPS_DEFAULT, help='Passphrase for encrypting Amphora Certificates and ' 'Private Keys. Must be 32, base64(url) compatible, ' 'characters long. Defaults to env[TLS_PASS_AMPS_DEFAULT] ' 'or insecure-key-do-not-use-this-key', regex=r'^[A-Za-z0-9\-_=]{32}$', required=True), cfg.StrOpt('signing_digest', default=TLS_DIGEST_DEFAULT, help='Certificate signing digest. Defaults' ' to env[OS_OCTAVIA_CA_SIGNING_DIGEST] or "sha256".'), cfg.IntOpt('cert_validity_time', default=30 * 24 * 60 * 60, help="The validity time for the Amphora Certificates " "(in seconds)."), ] certmgr_opts = [ cfg.StrOpt('storage_path', default=TLS_STORAGE_DEFAULT, help='Absolute path to the certificate storage directory. ' 'Defaults to env[OS_OCTAVIA_TLS_STORAGE].') ] class LocalCert(cert.Cert): """Representation of a Cert for local storage.""" def __init__(self, certificate, private_key, intermediates=None, private_key_passphrase=None): self.certificate = certificate self.intermediates = intermediates self.private_key = private_key self.private_key_passphrase = private_key_passphrase def get_certificate(self): return self.certificate def get_intermediates(self): return self.intermediates def get_private_key(self): return self.private_key def get_private_key_passphrase(self): return self.private_key_passphrase
[]
[]
[ "TLS_PASS_AMPS_DEFAULT", "OS_OCTAVIA_TLS_CA_CERT", "OS_OCTAVIA_CA_SIGNING_DIGEST", "OS_OCTAVIA_CA_KEY_PASS", "OS_OCTAVIA_TLS_CA_KEY", "OS_OCTAVIA_TLS_STORAGE" ]
[]
["TLS_PASS_AMPS_DEFAULT", "OS_OCTAVIA_TLS_CA_CERT", "OS_OCTAVIA_CA_SIGNING_DIGEST", "OS_OCTAVIA_CA_KEY_PASS", "OS_OCTAVIA_TLS_CA_KEY", "OS_OCTAVIA_TLS_STORAGE"]
python
6
0
examples/replicator/replicator_test.go
package main import ( "context" "fmt" "log" "os" "reflect" "regexp" "testing" "time" "github.com/gocql/gocql" scyllacdc "github.com/scylladb/scylla-cdc-go" ) const ( sourceAddress = "127.0.0.1" destinationAddress = "127.0.0.2" ) type schema struct { tableName string createQuery string } var udts = []string{ "CREATE TYPE ks.udt_simple (a int, b int, c text)", } var ( schemaSimple = schema{ "ks.tbl_simple", "CREATE TABLE ks.tbl_simple (pk text, ck int, v1 int, v2 text, PRIMARY KEY (pk, ck))", } schemaMultipleClusteringKeys = schema{ "ks.tbl_multiple_clustering_keys", "CREATE TABLE ks.tbl_multiple_clustering_keys (pk text, ck1 int, ck2 int, v int, PRIMARY KEY (pk, ck1, ck2))", } schemaBlobs = schema{ "ks.tbl_blobs", "CREATE TABLE ks.tbl_blobs (pk text, ck int, v blob, PRIMARY KEY (pk, ck))", } schemaLists = schema{ "ks.tbl_lists", "CREATE TABLE ks.tbl_lists (pk text, ck int, v list<int>, PRIMARY KEY(pk, ck))", } schemaSets = schema{ "ks.tbl_sets", "CREATE TABLE ks.tbl_sets (pk text, ck int, v set<int>, PRIMARY KEY (pk, ck))", } schemaMaps = schema{ "ks.tbl_maps", "CREATE TABLE ks.tbl_maps (pk text, ck int, v map<int, int>, PRIMARY KEY (pk, ck))", } schemaTuples = schema{ "ks.tbl_tuples", "CREATE TABLE ks.tbl_tuples (pk text, ck int, v tuple<int, text>, PRIMARY KEY (pk, ck))", } schemaTuplesInTuples = schema{ "ks.tbl_tuples_in_tuples", "CREATE TABLE ks.tbl_tuples_in_tuples (pk text, ck int, v tuple<tuple<int, text>, int>, PRIMARY KEY (pk, ck))", } schemaTuplesInTuplesInTuples = schema{ "ks.tbl_tuples_in_tuples_in_tuples", "CREATE TABLE ks.tbl_tuples_in_tuples_in_tuples (pk text, ck int, v tuple<tuple<tuple<int, int>, text>, int>, PRIMARY KEY (pk, ck))", } schemaUDTs = schema{ "ks.tbl_udts", "CREATE TABLE ks.tbl_udts (pk text, ck int, v ks.udt_simple, PRIMARY KEY (pk, ck))", } ) var testCases = []struct { schema schema pk string queries []string }{ // Operations test cases { schemaSimple, "simpleInserts", []string{ "INSERT INTO %s (pk, ck, v1, v2) VALUES ('simpleInserts', 1, 2, 'abc')", "INSERT INTO %s (pk, ck, v1) VALUES ('simpleInserts', 2, 3)", "INSERT INTO %s (pk, ck, v2) VALUES ('simpleInserts', 2, 'def')", }, }, { schemaSimple, "simpleUpdates", []string{ "UPDATE %s SET v1 = 1 WHERE pk = 'simpleUpdates' AND ck = 1", "UPDATE %s SET v2 = 'abc' WHERE pk = 'simpleUpdates' AND ck = 2", "UPDATE %s SET v1 = 5, v2 = 'def' WHERE pk = 'simpleUpdates' AND ck = 3", }, }, { schemaSimple, "rowDeletes", []string{ "INSERT INTO %s (pk, ck, v1, v2) VALUES ('rowDeletes', 1, 2, 'abc')", "INSERT INTO %s (pk, ck, v1, v2) VALUES ('rowDeletes', 2, 3, 'def')", "DELETE FROM %s WHERE pk = 'rowDeletes' AND ck = 1", }, }, { schemaSimple, "partitionDeletes", []string{ "INSERT INTO %s (pk, ck, v1, v2) VALUES ('partitionDeletes', 1, 2, 'abc')", "INSERT INTO %s (pk, ck, v1, v2) VALUES ('partitionDeletes', 2, 3, 'def')", "DELETE FROM %s WHERE pk = 'partitionDeletes'", // Insert one more row, just to check if replication works at all "INSERT INTO %s (pk, ck, v1, v2) VALUES ('partitionDeletes', 4, 5, 'def')", }, }, { schemaMultipleClusteringKeys, "rangeDeletes", []string{ "INSERT INTO %s (pk, ck1, ck2, v) VALUES ('rangeDeletes', 1, 1, 0)", "INSERT INTO %s (pk, ck1, ck2, v) VALUES ('rangeDeletes', 1, 2, 0)", "INSERT INTO %s (pk, ck1, ck2, v) VALUES ('rangeDeletes', 1, 3, 0)", "INSERT INTO %s (pk, ck1, ck2, v) VALUES ('rangeDeletes', 1, 4, 0)", "INSERT INTO %s (pk, ck1, ck2, v) VALUES ('rangeDeletes', 2, 1, 0)", "INSERT INTO %s (pk, ck1, ck2, v) VALUES ('rangeDeletes', 2, 2, 0)", "INSERT INTO %s (pk, ck1, ck2, v) VALUES ('rangeDeletes', 2, 3, 0)", "INSERT INTO %s (pk, ck1, ck2, v) VALUES ('rangeDeletes', 2, 4, 0)", "INSERT INTO %s (pk, ck1, ck2, v) VALUES ('rangeDeletes', 3, 1, 0)", "INSERT INTO %s (pk, ck1, ck2, v) VALUES ('rangeDeletes', 3, 2, 0)", "INSERT INTO %s (pk, ck1, ck2, v) VALUES ('rangeDeletes', 3, 3, 0)", "INSERT INTO %s (pk, ck1, ck2, v) VALUES ('rangeDeletes', 3, 4, 0)", "INSERT INTO %s (pk, ck1, ck2, v) VALUES ('rangeDeletes', 4, 1, 0)", "INSERT INTO %s (pk, ck1, ck2, v) VALUES ('rangeDeletes', 4, 2, 0)", "INSERT INTO %s (pk, ck1, ck2, v) VALUES ('rangeDeletes', 4, 3, 0)", "INSERT INTO %s (pk, ck1, ck2, v) VALUES ('rangeDeletes', 4, 4, 0)", "DELETE FROM %s WHERE pk = 'rangeDeletes' AND ck1 > 3", "DELETE FROM %s WHERE pk = 'rangeDeletes' AND ck1 <= 1", "DELETE FROM %s WHERE pk = 'rangeDeletes' AND ck1 = 2 AND ck2 > 1 AND ck2 < 4", }, }, // Blob test cases { schemaBlobs, "blobs", []string{ "INSERT INTO %s (pk, ck, v) VALUES ('blobs', 1, 0x1234)", "INSERT INTO %s (pk, ck, v) VALUES ('blobs', 2, 0x)", "INSERT INTO %s (pk, ck, v) VALUES ('blobs', 3, null)", "INSERT INTO %s (pk, ck, v) VALUES ('blobs', 4, 0x4321)", "INSERT INTO %s (pk, ck, v) VALUES ('blobs', 5, 0x00)", "UPDATE %s SET v = null WHERE pk = 'blobs' AND ck = 4", "UPDATE %s SET v = 0x WHERE pk = 'blobs' AND ck = 5", }, }, // Lists test cases { schemaLists, "listOverwrites", []string{ "INSERT INTO %s (pk, ck, v) VALUES ('listOverwrites', 1, [1, 2, 3])", "INSERT INTO %s (pk, ck, v) VALUES ('listOverwrites', 1, [4, 5, 6, 7])", "INSERT INTO %s (pk, ck, v) VALUES ('listOverwrites', 2, [6, 5, 4, 3, 2, 1])", "INSERT INTO %s (pk, ck, v) VALUES ('listOverwrites', 2, null)", "INSERT INTO %s (pk, ck, v) VALUES ('listOverwrites', 3, [1, 11, 111])", "UPDATE %s SET v = [2, 22, 222] WHERE pk = 'listOverwrites' AND ck = 3", }, }, { schemaLists, "listAppends", []string{ "INSERT INTO %s (pk, ck, v) VALUES ('listAppends', 1, [1, 2, 3])", "UPDATE %s SET v = v + [4, 5, 6] WHERE pk = 'listAppends' AND ck = 1", "UPDATE %s SET v = [-2, -1, 0] + v WHERE pk = 'listAppends' AND ck = 1", }, }, { schemaLists, "listRemoves", []string{ "INSERT INTO %s (pk, ck, v) VALUES ('listRemoves', 1, [1, 2, 3])", "UPDATE %s SET v = v + [4, 5, 6] WHERE pk = 'listRemoves' AND ck = 1", "UPDATE %s SET v = v - [1, 2, 3] WHERE pk = 'listRemoves' AND ck = 1", }, }, // Set test cases { schemaSets, "setOverwrites", []string{ "INSERT INTO %s (pk, ck, v) VALUES ('setOverwrites', 1, {1, 2, 3, 4})", "INSERT INTO %s (pk, ck, v) VALUES ('setOverwrites', 1, {4, 5, 6, 7})", "INSERT INTO %s (pk, ck, v) VALUES ('setOverwrites', 2, {8, 9, 10, 11})", "INSERT INTO %s (pk, ck, v) VALUES ('setOverwrites', 2, null)", "INSERT INTO %s (pk, ck, v) VALUES ('setOverwrites', 3, {12, 13, 14, 15})", "UPDATE %s SET v = null WHERE pk = 'setOverwrites' AND ck = 3", }, }, { schemaSets, "setAppends", []string{ "INSERT INTO %s (pk, ck, v) VALUES ('setAppends', 1, {1, 2, 3, 4})", "UPDATE %s SET v = v + {5, 6} WHERE pk = 'setAppends' AND ck = 1", "UPDATE %s SET v = v + {5, 6} WHERE pk = 'setAppends' AND ck = 2", }, }, { schemaSets, "setRemovals", []string{ "INSERT INTO %s (pk, ck, v) VALUES ('setRemovals', 1, {1, 2, 3, 4})", "UPDATE %s SET v = v - {1, 3} WHERE pk = 'setRemovals' AND ck = 1", "UPDATE %s SET v = v - {1138} WHERE pk = 'setRemovals' AND ck = 2", }, }, // Map test cases { schemaMaps, "mapOverwrites", []string{ "INSERT INTO %s (pk, ck, v) VALUES ('mapOverwrites', 1, {1: 2, 3: 4})", "INSERT INTO %s (pk, ck, v) VALUES ('mapOverwrites', 1, {5: 6, 7: 8})", "INSERT INTO %s (pk, ck, v) VALUES ('mapOverwrites', 2, {9: 10, 11: 12})", "INSERT INTO %s (pk, ck, v) VALUES ('mapOverwrites', 2, null)", "INSERT INTO %s (pk, ck, v) VALUES ('mapOverwrites', 3, {13: 14, 15: 16})", "UPDATE %s SET v = null WHERE pk = 'mapOverwrites' AND ck = 3", }, }, { schemaMaps, "mapSets", []string{ "INSERT INTO %s (pk, ck, v) VALUES ('mapSets', 1, {1: 2, 3: 4, 5: 6})", "UPDATE %s SET v[1] = 42 WHERE pk = 'mapSets' AND ck = 1", "UPDATE %s SET v[3] = null WHERE pk = 'mapSets' AND ck = 1", "UPDATE %s SET v[3] = 123 WHERE pk = 'mapSets' AND ck = 1", "UPDATE %s SET v[5] = 321 WHERE pk = 'mapSets' AND ck = 2", }, }, { schemaMaps, "mapAppends", []string{ "INSERT INTO %s (pk, ck, v) VALUES ('mapAppends', 1, {1: 2, 3: 4})", "UPDATE %s SET v = v + {5: 6} WHERE pk = 'mapAppends' AND ck = 1", "UPDATE %s SET v = v + {5: 6} WHERE pk = 'mapAppends' AND ck = 2", }, }, { schemaMaps, "mapRemovals", []string{ "INSERT INTO %s (pk, ck, v) VALUES ('mapRemovals', 1, {1: 2, 3: 4})", "UPDATE %s SET v = v - {1} WHERE pk = 'mapRemovals' AND ck = 1", "UPDATE %s SET v = v - {1138} WHERE pk = 'mapRemovals' AND ck = 2", }, }, // Tuple test cases { schemaTuples, "tupleInserts", []string{ "INSERT INTO %s (pk, ck, v) VALUES ('tupleInserts', 1, (7, 'abc'))", "INSERT INTO %s (pk, ck, v) VALUES ('tupleInserts', 2, (9, 'def'))", "INSERT INTO %s (pk, ck, v) VALUES ('tupleInserts', 2, null)", }, }, { schemaTuples, "tupleUpdates", []string{ "INSERT INTO %s (pk, ck, v) VALUES ('tupleUpdates', 1, (7, 'abc'))", "INSERT INTO %s (pk, ck, v) VALUES ('tupleUpdates', 2, (9, 'def'))", "INSERT INTO %s (pk, ck, v) VALUES ('tupleUpdates', 3, (11, 'ghi'))", "INSERT INTO %s (pk, ck, v) VALUES ('tupleUpdates', 4, (13, 'jkl'))", "INSERT INTO %s (pk, ck, v) VALUES ('tupleUpdates', 5, (15, 'mno'))", "INSERT INTO %s (pk, ck, v) VALUES ('tupleUpdates', 6, (17, 'pqr'))", "INSERT INTO %s (pk, ck, v) VALUES ('tupleUpdates', 7, (19, 'stu'))", "UPDATE %s SET v = (111, 'zyx') WHERE pk = 'tupleUpdates' AND ck = 1", "UPDATE %s SET v = null WHERE pk = 'tupleUpdates' AND ck = 2", "INSERT INTO %s (pk, ck) VALUES ('tupleUpdates', 3)", "UPDATE %s SET v = (null, null) WHERE pk = 'tupleUpdates' AND ck = 4", "UPDATE %s SET v = (null, 'asdf') WHERE pk = 'tupleUpdates' AND ck = 5", "UPDATE %s SET v = (123, null) WHERE pk = 'tupleUpdates' AND ck = 6", "UPDATE %s SET v = (null, '') WHERE pk = 'tupleUpdates' AND ck = 7", }, }, { schemaTuplesInTuples, "tuplesInTuples", []string{ "INSERT INTO %s (pk, ck, v) VALUES ('tuplesInTuples', 1, ((1, 'abc'), 7))", "INSERT INTO %s (pk, ck, v) VALUES ('tuplesInTuples', 2, ((3, 'def'), 9))", "INSERT INTO %s (pk, ck, v) VALUES ('tuplesInTuples', 3, ((3, 'ghi'), 9))", "INSERT INTO %s (pk, ck, v) VALUES ('tuplesInTuples', 4, ((3, 'jkl'), 9))", "UPDATE %s SET v = ((100, 'zyx'), 111) WHERE pk = 'tuplesInTuples' AND ck = 1", "UPDATE %s SET v = null WHERE pk = 'tuplesInTuples' AND ck = 2", "UPDATE %s SET v = ((200, null), 999) WHERE pk = 'tuplesInTuples' AND ck = 3", "UPDATE %s SET v = ((300, ''), 333) WHERE pk = 'tuplesInTuples' AND ck = 4", }, }, { schemaTuplesInTuplesInTuples, "tuplesInTuplesInTuples", []string{ "INSERT INTO %s (pk, ck, v) VALUES ('tuplesInTuplesInTuples', 1, (((1, 9), 'abc'), 7))", "INSERT INTO %s (pk, ck, v) VALUES ('tuplesInTuplesInTuples', 2, (((3, 8), 'def'), 9))", "UPDATE %s SET v = (((100, 200), 'zyx'), 111) WHERE pk = 'tuplesInTuplesInTuples' AND ck = 1", "UPDATE %s SET v = null WHERE pk = 'tuplesInTuplesInTuples' AND ck = 2", "UPDATE %s SET v = (null, 123) WHERE pk = 'tuplesInTuplesInTuples' AND ck = 3", "UPDATE %s SET v = ((null, 'xyz'), 321) WHERE pk = 'tuplesInTuplesInTuples' AND ck = 4", }, }, // UDT test cases { schemaUDTs, "udt", []string{ "INSERT INTO %s (pk, ck, v) VALUES ('udt', 1, (2, 3, 'abc'))", "INSERT INTO %s (pk, ck, v) VALUES ('udt', 2, {a: 6, c: 'zxcv'})", "INSERT INTO %s (pk, ck, v) VALUES ('udt', 3, (9, 4, 'def'))", "INSERT INTO %s (pk, ck, v) VALUES ('udt', 4, (123, 321, 'ghi'))", "INSERT INTO %s (pk, ck, v) VALUES ('udt', 5, (333, 222, 'jkl'))", "INSERT INTO %s (pk, ck, v) VALUES ('udt', 6, (432, 678, 'mno'))", "INSERT INTO %s (pk, ck, v) VALUES ('udt', 7, (765, 345, 'pqr'))", "UPDATE %s SET v.b = 41414 WHERE pk = 'udt' AND ck = 2", "UPDATE %s SET v = null WHERE pk = 'udt' AND ck = 3", "UPDATE %s SET v = {b: 123456, c: 'tyu'} WHERE pk = 'udt' AND ck = 4", "INSERT INTO %s (pk, ck, v) VALUES ('udt', 5, (999, 888, 'zxc'))", "UPDATE %s SET v.c = null WHERE pk = 'udt' AND ck = 6", "UPDATE %s SET v = {a: 923, b: 123456, c: ''} WHERE pk = 'udt' AND ck = 7", }, }, } func TestReplicator(t *testing.T) { filter := os.Getenv("REPLICATOR_TEST_FILTER") if filter == "" { filter = ".*" } re := regexp.MustCompile(filter) // Collect all schemas schemas := make(map[string]string) for _, tc := range testCases { schemas[tc.schema.tableName] = tc.schema.createQuery } // TODO: Provide IPs from the env sourceSession := createSessionAndSetupSchema(t, sourceAddress, true, schemas) defer sourceSession.Close() destinationSession := createSessionAndSetupSchema(t, destinationAddress, false, schemas) defer destinationSession.Close() // Execute all of the queries for _, tc := range testCases { if !re.MatchString(tc.pk) { continue } for _, qStr := range tc.queries { execQuery(t, sourceSession, fmt.Sprintf(qStr, tc.schema.tableName)) } } t.Log("running replicators") adv := scyllacdc.AdvancedReaderConfig{ ChangeAgeLimit: time.Minute, PostQueryDelay: 3 * time.Second, PostFailedQueryDelay: 3 * time.Second, QueryTimeWindowSize: 5 * time.Minute, ConfidenceWindowSize: time.Millisecond, } schemaNames := make([]string, 0) for tbl := range schemas { schemaNames = append(schemaNames, tbl) } logger := log.New(os.Stderr, "", log.Ldate|log.Lmicroseconds|log.Lshortfile) replicator, err := newReplicator( context.Background(), sourceAddress, destinationAddress, schemaNames, &adv, gocql.Quorum, gocql.Quorum, "", logger, ) if err != nil { t.Fatal(err) } ctx := context.Background() errC := make(chan error) go func() { errC <- replicator.Run(ctx) }() time.Sleep(time.Second) replicator.StopAt(time.Now().Add(time.Second)) if err := <-errC; err != nil { t.Fatal(err) } t.Log("validating results") // Compare sourceSet := fetchFullSet(t, sourceSession, schemas) destinationSet := fetchFullSet(t, destinationSession, schemas) failedCount := 0 for _, tc := range testCases { sourceData := sourceSet[tc.pk] destinationData := destinationSet[tc.pk] if len(sourceData) != len(destinationData) { t.Logf( "%s: source len %d, destination len %d\n", tc.pk, len(sourceData), len(destinationData), ) t.Log(" source:") for _, row := range sourceData { t.Logf(" %v", row) } t.Log(" dest:") for _, row := range destinationData { t.Logf(" %v", row) } t.Fail() failedCount++ continue } failed := false for i := 0; i < len(sourceData); i++ { if !reflect.DeepEqual(sourceData[i], destinationData[i]) { t.Logf("%s: mismatch", tc.pk) t.Logf(" source: %v", sourceData[i]) t.Logf(" dest: %v", destinationData[i]) failed = true } } if failed { t.Fail() failedCount++ } else { t.Logf("%s: OK", tc.pk) } } if failedCount > 0 { t.Logf("failed %d/%d test cases", failedCount, len(testCases)) } } func createSessionAndSetupSchema(t *testing.T, addr string, withCdc bool, schemas map[string]string) *gocql.Session { cfg := gocql.NewCluster(addr) session, err := cfg.CreateSession() if err != nil { t.Fatal(err) } execQuery(t, session, "DROP KEYSPACE IF EXISTS ks") execQuery(t, session, "CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}") for _, udt := range udts { execQuery(t, session, udt) } for _, tbl := range schemas { tblQuery := tbl if withCdc { tblQuery += " WITH cdc = {'enabled': true, 'preimage': true, 'postimage': true}" } execQuery(t, session, tblQuery) } err = session.AwaitSchemaAgreement(context.Background()) if err != nil { t.Fatal(err) } return session } func execQuery(t *testing.T, session *gocql.Session, query string) { t.Logf("executing query %s", query) err := session.Query(query).Exec() if err != nil { t.Fatal(err) } } func fetchFullSet(t *testing.T, session *gocql.Session, schemas map[string]string) map[string][]map[string]interface{} { groups := make(map[string][]map[string]interface{}) for tbl := range schemas { data, err := session.Query("SELECT * FROM " + tbl).Iter().SliceMap() if err != nil { t.Fatal(err) } for _, row := range data { pk := row["pk"].(string) groups[pk] = append(groups[pk], row) } } return groups }
[ "\"REPLICATOR_TEST_FILTER\"" ]
[]
[ "REPLICATOR_TEST_FILTER" ]
[]
["REPLICATOR_TEST_FILTER"]
go
1
0
cubanoshaciamiami/settings.py
import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) try: import pymysql pymysql.install_as_MySQLdb() except ImportError: pass import dotenv env_file = os.path.join(BASE_DIR, '.env') # dotenv.read_dotenv(env_file) DEBUG = bool(int(os.environ.get('DEBUG', '0'))) DEBUG=True TEMPLATE_DEBUG = DEBUG IN_DEV = bool(int(os.environ.get('IN_DEV', '0'))) ADMINS = ( ('edilio', '[email protected]'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': os.environ.get('NAME'), # Or path to database file if using sqlite3. 'USER': os.environ.get('USER_NAME'), # Not used with sqlite3. 'PASSWORD': os.environ.get('PASSWORD'), # Not used with sqlite3. 'HOST': os.environ.get('HOST', 'localhost'), # Set to empty string for localhost. Not used with sqlite3. 'PORT': os.environ.get('PORT', '3306'), # Set to empty string for default. Not used with sqlite3. } } # Local time zone for this installation. Choices can be found here: # http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE # although not all variations may be possible on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes # http://blogs.law.harvard.edu/tech/stories/storyReader$15 LANGUAGE_CODE = 'es' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a # trailing slash. # Examples: "http://foo.com/media/", "/media/". ADMIN_MEDIA_PREFIX = '/adminmedia/' # Make this unique, and don't share it with anybody. SECRET_KEY = '!enoiltu93o6%ier+y^*zf9z#ie12x0+s5ozkhdf3)qwg%-*r)' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'cubanoshaciamiami.urls' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(BASE_DIR, "apps/services/templates/"), os.path.join(BASE_DIR, "apps/services/templates/adaptacion/"), os.path.join(BASE_DIR, "apps/services/templates/estabilidad/"), os.path.join(BASE_DIR, "apps/services/templates/presupuesto/"), ) print TEMPLATE_DIRS INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.admin', 'django.contrib.staticfiles', 'apps.services', 'apps.ayudamemoria', 'apps.presupuesto', 'apps.django_monetize', ) MONETIZE_DEFAULT = ( 'django_monetize/amazon_search.html', ('amazon_search_terms','Django book'), ('amazon_search_title','Search for Django books!') ) MONETIZE_TARGET = { 'django':'django_monetize/paypal_donate.html', 'Author (Will Larson)':'django_monetize/amazon_honor.html', 'Author (Joe Somebody)':( 'django_monetize/amazon_honor.html', ('amazon_paypage','Joe Somebodys Amazon Honor Paypage url'), ), 'tutorial':{ 'header':'django_monetize/paypal_donate.html', 'footer':'django_monetize/amazon_omakase.html', None:( 'django_monetize/amazon_search.html', ('amazon_search_terms','JQuery'), ('amazon_search_title','Buy books on JQuery!'), ), }, } MONETIZE_CONTEXT = { 'amazon_affiliates_id':'your affiliates tracking id', 'amazon_paypage':'default amazon paypages url', 'paypal_business':'[email protected]', 'paypal_item_name':'www.cubanoshaciamiami.com', 'paypal_currency_code':'USD', 'paypal_tax':'0', 'paypal_lc':'US', 'paypal_bn':'PP-DonationsBF', 'paypal_image':'http://www.paypal.com/en_US/i/btn/btn_donate_LG.gif', } MEDIA_ROOT = os.path.join(BASE_DIR, "media") MEDIA_URL = '/media/' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = '/var/www/html/wmedia/cubanos/static/' # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) TEST_RUNNER = 'django.test.runner.DiscoverRunner'
[]
[]
[ "PORT", "HOST", "PASSWORD", "IN_DEV", "DEBUG", "USER_NAME", "NAME" ]
[]
["PORT", "HOST", "PASSWORD", "IN_DEV", "DEBUG", "USER_NAME", "NAME"]
python
7
0
spatial_exps/mnist_eval.py
""" Evaluation of a given checkpoint in the standard and adversarial sense. Can be called as an infinite loop going through the checkpoints in the model directory as they appear and evaluating them. Accuracy and average loss are printed and added as tensorboard summaries. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse from datetime import datetime import json import math import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import sys import time import numpy as np import tensorflow as tf from tqdm import trange import utils.mnist_input as mnist_input import models.resnet as resnet from attacks.spatial_attack import SpatialAttack import utils.utilities as utilities import models.small_cnn as small_cnn # A function for evaluating a single checkpoint def evaluate(model, attack, sess, config, summary_writer=None): num_eval_examples = config.eval.num_eval_examples # num_eval_examples = config.eval.batch_size eval_batch_size = config.eval.batch_size data_path = config.data.data_path model_dir = config.model.output_dir # Setting up the Tensorboard and checkpoint outputs if not os.path.exists(model_dir): os.makedirs(model_dir) cifar = mnist_input.MNISTData(data_path) global_step = tf.train.get_or_create_global_step() # Iterate over the samples batch-by-batch num_batches = int(math.ceil(num_eval_examples / eval_batch_size)) total_xent_nat = 0. total_xent_adv = 0. total_corr_nat = 0 total_corr_adv = 0 for ibatch in trange(num_batches): bstart = ibatch * eval_batch_size bend = min(bstart + eval_batch_size, num_eval_examples) x_batch = cifar.eval_data.xs[bstart:bend, :] y_batch = cifar.eval_data.ys[bstart:bend] noop_trans = np.zeros([len(x_batch), 3]) if config.eval.adversarial_eval: x_batch_adv, adv_trans = attack.perturb(x_batch, y_batch, sess) else: x_batch_adv, adv_trans = x_batch, noop_trans if config.eval.st_adv: dict_nat = {model.x_input: x_batch, model.y_input: y_batch, model.transform: noop_trans, model.weights: [1. for i in range(len(x_batch))], model.is_training: False, model.flows: np.zeros((len(x_batch), 2, 32, 32))} dict_adv = {model.x_input: x_batch_adv, model.y_input: y_batch, model.transform: adv_trans, model.weights: [1. for i in range(len(x_batch_adv))], model.is_training: False, model.flows: np.zeros((len(x_batch), 2, 32, 32))} else: dict_nat = {model.x_input: x_batch, model.y_input: y_batch, model.transform: noop_trans, model.weights: [1. for i in range(len(x_batch))], model.is_training: False} dict_adv = {model.x_input: x_batch_adv, model.y_input: y_batch, model.transform: adv_trans, model.weights: [1. for i in range(len(x_batch_adv))], model.is_training: False} cur_corr_nat, cur_xent_nat = sess.run([model.num_correct, model.xent], feed_dict = dict_nat) cur_corr_adv, cur_xent_adv = sess.run([model.num_correct, model.xent], feed_dict = dict_adv) total_xent_nat += cur_xent_nat total_xent_adv += cur_xent_adv total_corr_nat += cur_corr_nat total_corr_adv += cur_corr_adv avg_xent_nat = total_xent_nat / num_eval_examples avg_xent_adv = total_xent_adv / num_eval_examples acc_nat = total_corr_nat / num_eval_examples acc_adv = total_corr_adv / num_eval_examples if summary_writer: summary = tf.Summary(value=[ tf.Summary.Value(tag='xent_adv_eval', simple_value= avg_xent_adv), tf.Summary.Value(tag='xent_nat_eval', simple_value= avg_xent_nat), tf.Summary.Value(tag='xent_adv', simple_value= avg_xent_adv), tf.Summary.Value(tag='xent_nat', simple_value= avg_xent_nat), tf.Summary.Value(tag='accuracy_adv_eval', simple_value= acc_adv), tf.Summary.Value(tag='accuracy_nat_eval', simple_value= acc_nat), tf.Summary.Value(tag='accuracy_adv', simple_value= acc_adv), tf.Summary.Value(tag='accuracy_nat', simple_value= acc_nat)]) summary_writer.add_summary(summary, global_step.eval(sess)) step = global_step.eval(sess) print('Eval at step: {}'.format(step)) print(' natural: {:.2f}%'.format(100 * acc_nat)) print(' adversarial: {:.2f}%'.format(100 * acc_adv)) print(' avg nat xent: {:.4f}'.format(avg_xent_nat)) print(' avg adv xent: {:.4f}'.format(avg_xent_adv)) result = {'nat': '{:.2f}%'.format(100 * acc_nat), 'adv': '{:.2f}%'.format(100 * acc_adv)} with open('job_result.json', 'w') as result_file: json.dump(result, result_file, sort_keys=True, indent=4) def loop(model, attack, config, summary_writer=None): last_checkpoint_filename = '' already_seen_state = False model_dir = config.model.output_dir saver = tf.train.Saver() while True: cur_checkpoint = tf.train.latest_checkpoint(model_dir) # Case 1: No checkpoint yet if cur_checkpoint is None: if not already_seen_state: print('No checkpoint yet, waiting ...', end='') already_seen_state = True else: print('.', end='') sys.stdout.flush() time.sleep(10) # Case 2: Previously unseen checkpoint elif cur_checkpoint != last_checkpoint_filename: print('\nCheckpoint {}, evaluating ... ({})'.format(cur_checkpoint, datetime.now())) sys.stdout.flush() last_checkpoint_filename = cur_checkpoint already_seen_state = False with tf.Session() as sess: # Restore the checkpoint saver.restore(sess, cur_checkpoint) evaluate(model, attack, sess, config, summary_writer) # Case 3: Previously evaluated checkpoint else: if not already_seen_state: print('Waiting for the next checkpoint ... ({}) '.format( datetime.now()), end='') already_seen_state = True else: print('.', end='') sys.stdout.flush() time.sleep(10) if __name__ == "__main__": parser = argparse.ArgumentParser( description='Eval script options', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-c', '--config', type=str, help='path to config file', default="config.json", required=False) parser.add_argument('--loop', help='continuously monitor model_dir' 'evaluating new ckpt', action="store_true") args = parser.parse_args() config_dict = utilities.get_config(args.config) config = utilities.config_to_namedtuple(config_dict) model = small_cnn.Model(config.model,config.eval.st_adv) model_dir = config.model.output_dir global_step = tf.train.get_or_create_global_step() if config.eval.st_adv: attack = StadvAttack(model, config) else: attack = SpatialAttack(model, config.attack) if args.loop: eval_dir = os.path.join(model_dir, 'eval') if not os.path.exists(eval_dir): os.makedirs(eval_dir) summary_writer = tf.summary.FileWriter(eval_dir) loop(model, attack, config, summary_writer) else: saver = tf.train.Saver() cur_checkpoint = tf.train.latest_checkpoint(model_dir) if cur_checkpoint is None: print('No checkpoint found.') else: with tf.Session() as sess: # Restore the checkpoint attack.limits = config.attack.spatial_limits; t1, t2, r = attack.limits; attack.granularity = config.attack.grid_granularity; gt1, gt2, gr = attack.granularity print('Evaluating checkpoint {}'.format(cur_checkpoint)) saver.restore(sess, cur_checkpoint) print('############## Evaluating RAND ##############') evaluate(model, attack, sess, config) print('############## Evaluating GRID ##############') attack.method = 'grid' evaluate(model, attack, sess, config) print('############## Evaluating RAND.T ##############') attack.method = 'random'; attack.limits = np.array([t1, t2, 0]); attack.granularity = np.array([gt1, gt2, 1]) evaluate(model, attack, sess, config) print('############## Evaluating GRID.T ##############') attack.method = 'grid' evaluate(model, attack, sess,config) print('############## Evaluating RAND.R ##############') attack.method = 'random'; attack.limits = np.array([0, 0, r]); attack.granularity = np.array([1, 1, gr]) evaluate(model, attack, sess,config) print('############## Evaluating GRID.R ##############') attack.method = 'grid'; evaluate(model, attack, sess,config)
[]
[]
[ "TF_CPP_MIN_LOG_LEVEL" ]
[]
["TF_CPP_MIN_LOG_LEVEL"]
python
1
0
mi/platform/rsn/simulator/process_util.py
#!/usr/bin/env python """ @package ion.agents.platform.rsn.simulator.process_util @file ion/agents/platform/rsn/simulator/process_util.py @author Carlos Rueda @brief Utility to launch/shutdown the RSN OMS simulator as an external process. (elements adapted/simplified from driver_process.py) simple test: bin/python ion/agents/platform/rsn/simulator/process_util.py """ __author__ = 'Carlos Rueda' __license__ = 'Apache 2.0' from mi.platform.rsn.oms_client_factory import CIOMSClientFactory from ooi.logging import log import os import subprocess import signal from gevent import sleep _PYTHON_PATH = 'python' _PROGRAM = "mi/platform/rsn/simulator/oms_simulator_server.py" _COMMAND = [_PYTHON_PATH, _PROGRAM, "--port", "0"] # note: "--port 0" to bind the simulator to a newly generated port. class ProcessUtil(object): """ Utility to launch/shutdown the RSN OMS simulator as an external process. """ def __init__(self): self._process = None self._rsn_oms = None def launch(self): """ Launches the simulator process as indicated by _COMMAND. @return (rsn_oms, uri) A pair with the CIOMSSimulator instance and the associated URI to establish connection with it. """ log.debug("[OMSim] Launching: %s", _COMMAND) self._process = self._spawn(_COMMAND) if not self._process or not self.poll(): msg = "[OMSim] Failed to launch simulator: %s" % _COMMAND log.error(msg) raise Exception(msg) log.debug("[OMSim] process started, pid: %s", self.getpid()) # give it some time to start up sleep(5) # get URI: uri = None with open("logs/rsn_oms_simulator.yml", buffering=1) as f: # we expect one of the first few lines to be of the form: # rsn_oms_simulator_uri=xxxx # where xxxx is the uri -- see oms_simulator_server. while uri is None: line = f.readline() if line.index("rsn_oms_simulator_uri=") == 0: uri = line[len("rsn_oms_simulator_uri="):].strip() self._rsn_oms = CIOMSClientFactory.create_instance(uri) return self._rsn_oms, uri def stop(self): """ Stop the process. """ if self._rsn_oms is not None: log.debug("[OMSim] x_exit_simulator -> %r", self._rsn_oms.x_exit_simulator()) if self._process: try: log.debug("[OMSim] terminating process %s", self._process.pid) self._process.send_signal(signal.SIGINT) log.debug("[OMSim] waiting process %s", self._process.pid) self._process.wait() log.debug("[OMSim] process killed") except OSError: log.warn("[OMSim] Could not stop process, pid: %s" % self._process.pid) sleep(4) self._process = None self._rsn_oms = None def poll(self): """ Check to see if the process is alive. @return true if process is running, false otherwise """ # The Popen.poll() doesn't seem to be returning reliable results. # Sending a signal 0 to the process might be more reliable. if not self._process: return False try: os.kill(self._process.pid, 0) except OSError: log.warn("[OMSim] Could not send a signal to the process, pid: %s" % self._process.pid) return False return True def getpid(self): """ Get the pid of the current running process and ensure that it is running. @returns the pid of the driver process if it is running, otherwise None """ if self._process: if self.poll(): return self._process.pid else: log.warn("[OMSim] process found, but poll failed for pid %s", self._process.pid) else: return None def _spawn(self, spawnargs): """ Launch a process using popen @param spawnargs a list of arguments for the Popen command line. The first argument must be a path to a program and arguments much be in additional list elements. @return subprocess.Popen object """ log.info('spawnargs: %s', spawnargs) return subprocess.Popen(spawnargs, env=os.environ, close_fds=True) def _test(): # pragma: no cover sim_process = ProcessUtil() for _ in range(2): rsn_oms, uri = sim_process.launch() print("rsn_oms_simulator_uri = %s" % uri) print("ping -> %r" % rsn_oms.ping()) print("get_platform_map -> %r" % rsn_oms.get_platform_map()) sim_process.stop() # test using nosetest: # bin/nosetests -s ion/agents/platform/rsn/simulator/process_util.py # commented out; this was for preliminary testing. # # from pyon.util.int_test import IonIntegrationTestCase # class BaseIntTestPlatform(IonIntegrationTestCase): # pragma: no cover # def test(self): # _test() # Main program if __name__ == "__main__": # pragma: no cover _test()
[]
[]
[]
[]
[]
python
0
0
PythonVirtEnv/Lib/site-packages/win32/lib/regcheck.py
# This module is very old and useless in this day and age! It will be # removed in a few years (ie, 2009 or so...) import warnings warnings.warn("The regcheck module has been pending deprecation since build 210", category=PendingDeprecationWarning) import win32con import regutil import win32api import os import sys def CheckRegisteredExe(exename): try: os.stat(win32api.RegQueryValue(regutil.GetRootKey() , regutil.GetAppPathsKey() + "\\" + exename)) # except SystemError: except (os.error,win32api.error): print("Registration of %s - Not registered correctly" % exename) def CheckPathString(pathString): for path in pathString.split(";"): if not os.path.isdir(path): return "'%s' is not a valid directory!" % path return None def CheckPythonPaths(verbose): if verbose: print("Python Paths:") # Check the core path if verbose: print("\tCore Path:", end=' ') try: appPath = win32api.RegQueryValue(regutil.GetRootKey(), regutil.BuildDefaultPythonKey() + "\\PythonPath") except win32api.error as exc: print("** does not exist - ", exc.strerror) problem = CheckPathString(appPath) if problem: print(problem) else: if verbose: print(appPath) key = win32api.RegOpenKey(regutil.GetRootKey(), regutil.BuildDefaultPythonKey() + "\\PythonPath", 0, win32con.KEY_READ) try: keyNo = 0 while 1: try: appName = win32api.RegEnumKey(key, keyNo) appPath = win32api.RegQueryValue(key, appName) if verbose: print("\t"+appName+":", end=' ') if appPath: problem = CheckPathString(appPath) if problem: print(problem) else: if verbose: print(appPath) else: if verbose: print("(empty)") keyNo = keyNo + 1 except win32api.error: break finally: win32api.RegCloseKey(key) def CheckHelpFiles(verbose): if verbose: print("Help Files:") try: key = win32api.RegOpenKey(regutil.GetRootKey(), regutil.BuildDefaultPythonKey() + "\\Help", 0, win32con.KEY_READ) except win32api.error as exc: import winerror if exc.winerror!=winerror.ERROR_FILE_NOT_FOUND: raise return try: keyNo = 0 while 1: try: helpDesc = win32api.RegEnumKey(key, keyNo) helpFile = win32api.RegQueryValue(key, helpDesc) if verbose: print("\t"+helpDesc+":", end=' ') # query the os section. try: os.stat(helpFile ) if verbose: print(helpFile) except os.error: print("** Help file %s does not exist" % helpFile) keyNo = keyNo + 1 except win32api.error as exc: import winerror if exc.winerror!=winerror.ERROR_NO_MORE_ITEMS: raise break finally: win32api.RegCloseKey(key) def CheckRegisteredModules(verbose): # Check out all registered modules. k=regutil.BuildDefaultPythonKey() + "\\Modules" try: keyhandle = win32api.RegOpenKey(regutil.GetRootKey(), k) print("WARNING: 'Modules' registry entry is deprectated and evil!") except win32api.error as exc: import winerror if exc.winerror!=winerror.ERROR_FILE_NOT_FOUND: raise return def CheckRegistry(verbose=0): # check the registered modules if verbose and 'pythonpath' in os.environ: print("Warning - PythonPath in environment - please check it!") # Check out all paths on sys.path CheckPythonPaths(verbose) CheckHelpFiles(verbose) CheckRegisteredModules(verbose) CheckRegisteredExe("Python.exe") if __name__=='__main__': if len(sys.argv)>1 and sys.argv[1]=='-q': verbose = 0 else: verbose = 1 CheckRegistry(verbose)
[]
[]
[]
[]
[]
python
0
0
pkg/firestore/transactional_test.go
package firestore_test import ( "context" "os" "testing" "time" stdFirestore "cloud.google.com/go/firestore" "github.com/ThreeDotsLabs/watermill" "github.com/ThreeDotsLabs/watermill-firestore/pkg/firestore" "github.com/ThreeDotsLabs/watermill/message" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestTransactionalPublisher(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() projectID := os.Getenv("FIRESTORE_PROJECT_ID") client, err := stdFirestore.NewClient(ctx, projectID) require.NoError(t, err) logger := watermill.NewStdLogger(true, true) topic := "transactional_publisher_test_" + watermill.NewShortUUID() subscriber, err := firestore.NewSubscriber(firestore.SubscriberConfig{ ProjectID: projectID, }, logger) require.NoError(t, err) msgs, err := subscriber.Subscribe(ctx, topic) require.NoError(t, err) msg := message.NewMessage(watermill.NewUUID(), message.Payload("payload")) msg.Metadata = message.Metadata{"key": "value"} err = client.RunTransaction(ctx, func(ctx context.Context, tx *stdFirestore.Transaction) error { publisher, err := firestore.NewTransactionalPublisher(firestore.PublisherConfig{ ProjectID: projectID, }, tx, logger) require.NoError(t, err) err = publisher.Publish(topic, msg) require.NoError(t, err) return nil }) require.NoError(t, err) receivedMsg := <-msgs require.NotNil(t, receivedMsg) assert.Equal(t, msg.UUID, receivedMsg.UUID) assert.Equal(t, msg.Payload, receivedMsg.Payload) assert.Equal(t, msg.Metadata, receivedMsg.Metadata) }
[ "\"FIRESTORE_PROJECT_ID\"" ]
[]
[ "FIRESTORE_PROJECT_ID" ]
[]
["FIRESTORE_PROJECT_ID"]
go
1
0
main.go
package main import ( "encoding/json" "errors" "fmt" "io/ioutil" "net/url" "os" "os/exec" "path/filepath" "strconv" "sync" "time" log "github.com/Sirupsen/logrus" "github.com/daneshih1125/docker-volume-freenas/freenas" "github.com/daneshih1125/docker-volume-freenas/utils" "github.com/docker/go-plugins-helpers/volume" ) const socketAddress = "/run/docker/plugins/freenas.sock" const iscsiService = "iscsitarget" type FreeNASISCSIVolume struct { Size int Name string Mountpoint string TargetID int ExtentID int TargetGroupID int TargetToExtentID int PoolName string connections int } type FreeNASISCSIDriver struct { sync.RWMutex root string statePath string url string hostname string username string password string volumes map[string]*FreeNASISCSIVolume freenas *freenas.FreeNAS freenasPortal int } func newFreeNASISCSIDriver(root, furl, username, password string) (*FreeNASISCSIDriver, error) { log.WithField("method", "new driver").Debug(root) fi, err := os.Lstat(root) if os.IsNotExist(err) { return nil, errors.New(fmt.Sprintf("%s is not exist", root)) } else if err != nil { return nil, err } if fi != nil && !fi.IsDir() { return nil, errors.New(fmt.Sprintf("%s already exist and it's not a directory", root)) } d := &FreeNASISCSIDriver{ root: filepath.Join(root, "volumes"), url: furl, username: username, password: password, statePath: filepath.Join(root, "freenas-state.json"), volumes: map[string]*FreeNASISCSIVolume{}, } u, err := url.Parse(d.url) if err != nil { return nil, err } d.hostname = u.Hostname() d.freenas = freenas.NewFreeNAS(d.url, d.username, d.password) iscsiSrv, err := d.freenas.ServicStatus(iscsiService) if iscsiSrv.Status == false { _, err = d.freenas.UpdateService(iscsiService, true) if err != nil { return nil, err } } portals, err := d.freenas.GetISCSIPortalList() if err != nil { return nil, err } allowAny := false for _, p := range portals { for _, ip := range p.IPs { if ip == "0.0.0.0:3260" { allowAny = true break } } if allowAny == true { d.freenasPortal = p.ID break } } if allowAny == false { p, err := d.freenas.CreateISCSIPortal([]string{"0.0.0.0:3260"}) if err != nil { return nil, err } d.freenasPortal = p.ID } log.WithField("freenasPortal", d.freenasPortal).Info("0.0.0.0:3260 portal ID") data, err := ioutil.ReadFile(d.statePath) if err != nil { if os.IsNotExist(err) { log.WithField("statePath", d.statePath).Debug("no state found") } else { return nil, err } } else { if err := json.Unmarshal(data, &d.volumes); err != nil { return nil, err } } return d, nil } func (d *FreeNASISCSIDriver) saveState() { data, err := json.Marshal(d.volumes) if err != nil { log.WithField("statePath", d.statePath).Error(err) return } if err := ioutil.WriteFile(d.statePath, data, 0644); err != nil { log.WithField("savestate", d.statePath).Error(err) } } func (d *FreeNASISCSIDriver) Create(r *volume.CreateRequest) error { log.WithField("method", "create").Debugf("%#v", r) d.Lock() defer d.Unlock() v := &FreeNASISCSIVolume{} for key, val := range r.Options { if key == "size" { v.Size, _ = strconv.Atoi(val) } } if v.Size == 0 { return errors.New("Invalid size value") } // find the volume that has maximum available size volume := freenas.Volume{} freeVols, err := d.freenas.GetVolumeList() for _, vol := range freeVols { if vol.Avail > volume.Avail { volume = vol } } if volume.Avail < 1024*1024*1024*v.Size { return errors.New("Insufficient volume size") } // FreeNAS iscsi volume name v.Name = "docker-" + r.Name v.PoolName = volume.Name // Create ZVOL _, err = d.freenas.CreateZFSVolume(volume.Name, v.Name, v.Size) if err != nil { return err } // Create iSCSI target target, err := d.freenas.CreateISCSITarget(v.Name) if err != nil { return err } v.TargetID = target.ID // Create iSCSI target group tgroup, err := d.freenas.CreateISCSITargetGroup(target.ID, d.freenasPortal) if err != nil { return err } v.TargetGroupID = tgroup.ID // Create iSCSI extent extent, err := d.freenas.CreateISCSIExtent(v.Name, volume.Name, v.Name) if err != nil { return err } v.ExtentID = extent.ID // Create iSCSI target to extent targettoextent, err := d.freenas.CreateISCSITargetToExtent(target.ID, extent.ID) if err != nil { return err } v.TargetToExtentID = targettoextent.ID v.Mountpoint = filepath.Join(d.root, r.Name) if err != nil { return err } d.volumes[r.Name] = v d.saveState() return nil } func (d *FreeNASISCSIDriver) List() (*volume.ListResponse, error) { log.WithField("method", "list").Debugf("") d.Lock() defer d.Unlock() var vols []*volume.Volume for name, v := range d.volumes { vols = append(vols, &volume.Volume{Name: name, Mountpoint: v.Mountpoint}) } return &volume.ListResponse{Volumes: vols}, nil } func (d *FreeNASISCSIDriver) Get(r *volume.GetRequest) (*volume.GetResponse, error) { log.WithField("method", "get").Debugf("%#v", r) d.Lock() defer d.Unlock() v, ok := d.volumes[r.Name] if !ok { return &volume.GetResponse{}, errors.New("volume not found") } return &volume.GetResponse{Volume: &volume.Volume{Name: r.Name, Mountpoint: v.Mountpoint}}, nil } func (d *FreeNASISCSIDriver) Remove(r *volume.RemoveRequest) error { log.WithField("method", "remove").Debugf("%#v", r) d.Lock() defer d.Unlock() v, ok := d.volumes[r.Name] if !ok { return errors.New("Volume not found") } if v.connections != 0 { return errors.New(fmt.Sprintf("volume %s is currently used by a container", r.Name)) } d.freenas.DeleteISCSITargetToExtent(v.TargetToExtentID) d.freenas.DeleteISCSIExtent(v.ExtentID) d.freenas.DeleteISCSITargetGroup(v.TargetGroupID) d.freenas.DeleteISCSITarget(v.TargetID) d.freenas.DeleteZFSVolume(v.PoolName, v.Name) delete(d.volumes, r.Name) d.saveState() return nil } func (d *FreeNASISCSIDriver) Path(r *volume.PathRequest) (*volume.PathResponse, error) { log.WithField("method", "path").Debugf("%#v", r) d.RLock() defer d.RUnlock() v, ok := d.volumes[r.Name] if !ok { return &volume.PathResponse{}, errors.New("volume not found") } return &volume.PathResponse{Mountpoint: v.Mountpoint}, nil } func (d *FreeNASISCSIDriver) mountVolume(v *FreeNASISCSIVolume) error { iqn, err := utils.FindISCSIIQN(d.hostname, v.Name) if err != nil { return err } utils.LoginISCSITarget(iqn) diskpath, err := utils.GetISCSIDiskPath(d.hostname, v.Name) if err != nil { return err } for i := 0; i < 5; i++ { if _, err := os.Stat(diskpath); os.IsNotExist(err) { time.Sleep(time.Second) continue } else { break } } if utils.GetBlkDevType(diskpath) != "xfs" { err = utils.FormatXFS(diskpath) } if err != nil { return err } cmd := fmt.Sprintf("mount %s %s", diskpath, v.Mountpoint) return exec.Command("sh", "-c", cmd).Run() } func (d *FreeNASISCSIDriver) Mount(r *volume.MountRequest) (*volume.MountResponse, error) { log.WithField("method", "mount").Debugf("%#v", r) d.Lock() defer d.Unlock() v, ok := d.volumes[r.Name] if !ok { log.Fatal("Volume not fount") return &volume.MountResponse{}, errors.New("Volume not found") } if v.connections == 0 { fi, err := os.Lstat(v.Mountpoint) if os.IsNotExist(err) { if err := os.MkdirAll(v.Mountpoint, 0755); err != nil { log.Fatal("Failed to mkdir") return &volume.MountResponse{}, err } } else if err != nil { log.Fatal("other error") return &volume.MountResponse{}, err } if fi != nil && !fi.IsDir() { log.Fatal("not dir") return &volume.MountResponse{}, errors.New("already exist and it's not a directory") } if err := d.mountVolume(v); err != nil { return &volume.MountResponse{}, err } } v.connections++ return &volume.MountResponse{Mountpoint: v.Mountpoint}, nil } func (d *FreeNASISCSIDriver) unmountVolume(v *FreeNASISCSIVolume) error { iqn, err := utils.FindISCSIIQN(d.hostname, v.Name) cmd := fmt.Sprintf("umount %s", v.Mountpoint) err = exec.Command("sh", "-c", cmd).Run() if err != nil { return err } return utils.LogoutISCSITarget(iqn) } func (d *FreeNASISCSIDriver) Unmount(r *volume.UnmountRequest) error { log.WithField("method", "unmount").Debugf("%#v", r) d.Lock() defer d.Unlock() v, ok := d.volumes[r.Name] if !ok { return errors.New("volume not found") } v.connections-- if v.connections <= 0 { if err := d.unmountVolume(v); err != nil { return err } v.connections = 0 } return nil } func (d *FreeNASISCSIDriver) Capabilities() *volume.CapabilitiesResponse { log.WithField("method", "capabilities").Debugf("") return &volume.CapabilitiesResponse{ Capabilities: volume.Capability{Scope: "local"}, } } func main() { apiURL := os.Getenv("FREENAS_API_URL") apiUsername := os.Getenv("FREENAS_API_USER") apiPassword := os.Getenv("FREENAS_API_PASSWORD") if apiURL == "" || apiUsername == "" || apiPassword == "" { log.Fatal("Invalid environment variables: FREENAS_API_URL, FREENAS_API_USER, FREENAS_API_PASSWORD") } d, err := newFreeNASISCSIDriver("/mnt/freenas", apiURL, apiUsername, apiPassword) if err != nil { log.Fatal(err) } h := volume.NewHandler(d) log.SetLevel(log.DebugLevel) log.Infof("listening on %s", socketAddress) log.Error(h.ServeUnix(socketAddress, 0)) }
[ "\"FREENAS_API_URL\"", "\"FREENAS_API_USER\"", "\"FREENAS_API_PASSWORD\"" ]
[]
[ "FREENAS_API_USER", "FREENAS_API_PASSWORD", "FREENAS_API_URL" ]
[]
["FREENAS_API_USER", "FREENAS_API_PASSWORD", "FREENAS_API_URL"]
go
3
0
concepts/concepts_service_test.go
//go:build integration // +build integration package concepts import ( "encoding/json" "errors" "fmt" "io/ioutil" "os" "path/filepath" "reflect" "sort" "strconv" "strings" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/jmcvetta/neoism" "github.com/mitchellh/hashstructure" "github.com/stretchr/testify/assert" logger "github.com/Financial-Times/go-logger" "github.com/Financial-Times/neo-utils-go/neoutils" "github.com/Financial-Times/concepts-rw-neo4j/ontology" ) //all uuids to be cleaned from DB const ( basicConceptUUID = "bbc4f575-edb3-4f51-92f0-5ce6c708d1ea" anotherBasicConceptUUID = "4c41f314-4548-4fb6-ac48-4618fcbfa84c" yetAnotherBasicConceptUUID = "f7e3fe2d-7496-4d42-b19f-378094efd263" simpleSmartlogicTopicUUID = "abd38d90-2152-11e8-9ac1-da24cd01f044" parentUUID = "2ef39c2a-da9c-4263-8209-ebfd490d3101" boardRoleUUID = "aa9ef631-c025-43b2-b0ce-d78d394cc6e6" membershipRoleUUID = "f807193d-337b-412f-b32c-afa14b385819" organisationUUID = "7f40d291-b3cb-47c4-9bce-18413e9350cf" personUUID = "35946807-0205-4fc1-8516-bb1ae141659b" financialInstrumentUUID = "475b7b59-66d5-47e2-a273-adc3d1ba8286" financialInstrumentSameIssuerUUID = "08c6066c-9356-4e96-abd5-9a4f3726724a" financialOrgUUID = "4290f028-05e9-4c2d-9f11-61ec59ba081a" anotherFinancialOrgUUID = "230e3a74-694a-4d94-8294-6a45ec1ced26" membershipUUID = "cbadd9a7-5da9-407a-a5ec-e379460991f2" anotherOrganisationUUID = "7ccf2673-2ec0-4b42-b69e-9a2460b945c6" anotherPersonUUID = "69a8e241-2bfb-4aed-a441-8489d813c5f7" testOrgUUID = "c28fa0b4-4245-11e8-842f-0ed5f89f718b" parentOrgUUID = "c001ee9c-94c5-11e8-8f42-da24cd01f044" locationUUID = "82cba3ce-329b-3010-b29d-4282a215889f" anotherLocationUUID = "6b683eff-56c3-43d9-acfc-7511d974fc01" organisationWithNAICSUUID = "b4ddd5a5-0b6c-4dc2-bb75-3eb40c1b05ed" naicsIndustryClassificationUUID = "38ee195d-ebdd-48a9-af4b-c8a322e7b04d" naicsIndustryClassificationAnotherUUID = "49da878c-67ce-4343-9a09-a4a767e584a2" supersededByUUID = "1a96ee7a-a4af-3a56-852c-60420b0b8da6" sourceID1 = "74c94c35-e16b-4527-8ef1-c8bcdcc8f05b" sourceID2 = "de3bcb30-992c-424e-8891-73f5bd9a7d3a" sourceID3 = "5b1d8c31-dfe4-4326-b6a9-6227cb59af1f" unknownThingUUID = "b5d7c6b5-db7d-4bce-9d6a-f62195571f92" anotherUnknownThingUUID = "a4fe339d-664f-4609-9fe0-dd3ec6efe87e" brandUUID = "cce1bc63-3717-4ae6-9399-88dab5966815" anotherBrandUUID = "21b4bdb5-25ca-4705-af5f-519b279f4764" yetAnotherBrandUUID = "2d3e16e0-61cb-4322-8aff-3b01c59f4dab" topicUUID = "740c604b-8d97-443e-be70-33de6f1d6e67" anotherTopicUUID = "2e7429bd-7a84-41cb-a619-2c702893e359" conceptHasFocusUUID = "a39a4558-f562-4dca-8774-000246e6eebe" anotherConceptHasFocusUUID = "2abff0bd-544d-31c3-899b-fba2f60d53dd" ) var ( membershipRole = ontology.MembershipRole{ RoleUUID: "f807193d-337b-412f-b32c-afa14b385819", InceptionDate: "2016-01-01", TerminationDate: "2017-02-02", } anotherMembershipRole = ontology.MembershipRole{ RoleUUID: "fe94adc6-ca44-438f-ad8f-0188d4a74987", InceptionDate: "2011-06-27", } ) //Reusable Neo4J connection var db neoutils.NeoConnection //Concept Service under test var conceptsDriver ConceptService var emptyList []string func helperLoadBytes(t *testing.T, name string) []byte { path := filepath.Join("testdata", name) bytes, err := ioutil.ReadFile(path) if err != nil { t.Fatal(err) } return bytes } // A lone concept should always have matching pref labels and uuid at the src system level and the top level - We are // currently missing validation around this func getAggregatedConcept(t *testing.T, name string) ontology.AggregatedConcept { ac := ontology.AggregatedConcept{} err := json.Unmarshal(helperLoadBytes(t, name), &ac) if err != nil { t.Fatal(err) } return ac } func getOrganisationWithAllCountries() ontology.AggregatedConcept { return ontology.AggregatedConcept{ PrefUUID: testOrgUUID, Type: "PublicCompany", ProperName: "Strix Group Plc", PrefLabel: "Strix Group Plc", ShortName: "Strix Group", TradeNames: []string{ "STRIX GROUP PLC", }, FormerNames: []string{ "Castletown Thermostats", "Steam Plc", }, Aliases: []string{ "Strix Group Plc", "STRIX GROUP PLC", "Strix Group", "Castletown Thermostats", "Steam Plc", }, CountryCode: "BG", CountryOfIncorporation: "GB", CountryOfOperations: "FR", CountryOfRisk: "BG", PostalCode: "IM9 2RG", YearFounded: 1951, EmailAddress: "[email protected]", LeiCode: "213800KZEW5W6BZMNT62", SourceRepresentations: []ontology.Concept{ { UUID: testOrgUUID, Type: "PublicCompany", Authority: "FACTSET", AuthorityValue: "B000BB-S", ProperName: "Strix Group Plc", PrefLabel: "Strix Group Plc", ShortName: "Strix Group", TradeNames: []string{ "STRIX GROUP PLC", }, FormerNames: []string{ "Castletown Thermostats", "Steam Plc", }, Aliases: []string{ "Strix Group Plc", "STRIX GROUP PLC", "Strix Group", "Castletown Thermostats", "Steam Plc", }, CountryCode: "BG", CountryOfIncorporation: "GB", CountryOfOperations: "FR", CountryOfRisk: "BG", CountryOfIncorporationUUID: locationUUID, CountryOfOperationsUUID: locationUUID, CountryOfRiskUUID: anotherLocationUUID, PostalCode: "IM9 2RG", YearFounded: 1951, EmailAddress: "[email protected]", LeiCode: "213800KZEW5W6BZMNT62", ParentOrganisation: parentOrgUUID, }, }, } } func getConcept(t *testing.T, name string) ontology.Concept { c := ontology.Concept{} err := json.Unmarshal(helperLoadBytes(t, name), &c) if err != nil { t.Fatal(err) } return c } func getLocation() ontology.AggregatedConcept { return ontology.AggregatedConcept{ PrefUUID: locationUUID, PrefLabel: "Location Pref Label", Type: "Location", SourceRepresentations: []ontology.Concept{{ UUID: locationUUID, PrefLabel: "Location Pref Label", Type: "Location", Authority: "ManagedLocation", AuthorityValue: locationUUID, }}, } } func getLocationWithISO31661() ontology.AggregatedConcept { return ontology.AggregatedConcept{ PrefUUID: locationUUID, PrefLabel: "Location Pref Label 2", Type: "Location", Aliases: []string{ "Bulgaria", "Bulgarie", "Bulgarien", }, ISO31661: "BG", SourceRepresentations: []ontology.Concept{{ UUID: locationUUID, PrefLabel: "Location Pref Label 2", Type: "Location", Authority: "ManagedLocation", AuthorityValue: locationUUID, Aliases: []string{ "Bulgaria", "Bulgarie", "Bulgarien", }, ISO31661: "BG", }}, } } func getLocationWithISO31661AndConcordance() ontology.AggregatedConcept { return ontology.AggregatedConcept{ PrefUUID: anotherLocationUUID, PrefLabel: "Location Pref Label 2", Type: "Location", Aliases: []string{ "Bulgaria", "Bulgarie", "Bulgarien", }, ISO31661: "BG", SourceRepresentations: []ontology.Concept{ { UUID: locationUUID, PrefLabel: "Location Pref Label 2", Type: "Location", Authority: "ManagedLocation", AuthorityValue: locationUUID, Aliases: []string{ "Bulgaria", "Bulgarie", "Bulgarien", }, ISO31661: "BG", }, { UUID: anotherLocationUUID, PrefLabel: "Location Pref Label 2", Type: "Location", Authority: "Smartlogic", AuthorityValue: anotherLocationUUID, Aliases: []string{ "Bulgaria", "Bulgarie", "Bulgarien", }, }, }, } } func init() { // We are initialising a lot of constraints on an empty database therefore we need the database to be fit before // we run tests so initialising the service will create the constraints first logger.InitLogger("test-concepts-rw-neo4j", "panic") conf := neoutils.DefaultConnectionConfig() conf.Transactional = false db, _ = neoutils.Connect(newURL(), conf) if db == nil { panic("Cannot connect to Neo4J") } conceptsDriver = NewConceptService(db) conceptsDriver.Initialise() duration := 5 * time.Second time.Sleep(duration) } func TestWriteService(t *testing.T) { defer cleanDB(t) tests := []struct { testName string aggregatedConcept ontology.AggregatedConcept otherRelatedConcepts []ontology.AggregatedConcept writtenNotReadFields []string errStr string updatedConcepts ConceptChanges }{ { testName: "Throws validation error for invalid concept", aggregatedConcept: ontology.AggregatedConcept{PrefUUID: basicConceptUUID}, errStr: "invalid request, no prefLabel has been supplied", updatedConcepts: ConceptChanges{ UpdatedIds: []string{}, }, }, { testName: "Creates All Values Present for a Lone Concept", aggregatedConcept: getAggregatedConcept(t, "full-lone-aggregated-concept.json"), updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Section", ConceptUUID: basicConceptUUID, AggregateHash: "11962703960608256906", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ basicConceptUUID, }, }, }, { testName: "Creates All Values Present for a MembershipRole", aggregatedConcept: getAggregatedConcept(t, "membership-role.json"), updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "MembershipRole", ConceptUUID: membershipRoleUUID, AggregateHash: "10926600137775579722", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ membershipRoleUUID, }, }, }, { testName: "Creates All Values Present for a BoardRole", aggregatedConcept: getAggregatedConcept(t, "board-role.json"), updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "BoardRole", ConceptUUID: boardRoleUUID, AggregateHash: "632127633281490148", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ boardRoleUUID, }, }, }, { testName: "Creates All Values Present for a Membership", aggregatedConcept: getAggregatedConcept(t, "membership.json"), updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Membership", ConceptUUID: membershipUUID, AggregateHash: "2583709379931978683", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ membershipUUID, }, }, }, { testName: "Creates All Values Present for a FinancialInstrument", aggregatedConcept: getAggregatedConcept(t, "financial-instrument.json"), updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "FinancialInstrument", ConceptUUID: financialInstrumentUUID, AggregateHash: "740867886434218715", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ financialInstrumentUUID, }, }, }, { testName: "Creates All Values Present for a Concept with a IS_RELATED_TO relationship", aggregatedConcept: getAggregatedConcept(t, "concept-with-related-to.json"), otherRelatedConcepts: []ontology.AggregatedConcept{ getAggregatedConcept(t, "yet-another-full-lone-aggregated-concept.json"), }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Section", ConceptUUID: basicConceptUUID, AggregateHash: "15778472151266496724", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ basicConceptUUID, }, }, }, { testName: "Creates All Values Present for a Concept with a IS_RELATED_TO relationship to an unknown thing", aggregatedConcept: getAggregatedConcept(t, "concept-with-related-to-unknown-thing.json"), updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Section", ConceptUUID: basicConceptUUID, AggregateHash: "16664714450548061902", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ basicConceptUUID, }, }, }, { testName: "Creates All Values correctly for a Concept with multiple IS_RELATED_TO relationships", aggregatedConcept: getAggregatedConcept(t, "concept-with-multiple-related-to.json"), otherRelatedConcepts: []ontology.AggregatedConcept{ getAggregatedConcept(t, "yet-another-full-lone-aggregated-concept.json"), }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Section", ConceptUUID: basicConceptUUID, AggregateHash: "16267515993296956365", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ basicConceptUUID, }, }, }, { testName: "Creates All Values Present for a Concept with a HAS_BROADER relationship", aggregatedConcept: getAggregatedConcept(t, "concept-with-has-broader.json"), otherRelatedConcepts: []ontology.AggregatedConcept{ getAggregatedConcept(t, "yet-another-full-lone-aggregated-concept.json"), }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Section", ConceptUUID: basicConceptUUID, AggregateHash: "10136463773554381892", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ basicConceptUUID, }, }, }, { testName: "Creates All Values Present for a Concept with a HAS_BROADER relationship to an unknown thing", aggregatedConcept: getAggregatedConcept(t, "concept-with-has-broader-to-unknown-thing.json"), updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Section", ConceptUUID: basicConceptUUID, AggregateHash: "16881221654944969347", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ basicConceptUUID, }, }, }, { testName: "Creates All Values correctly for a Concept with multiple HAS_BROADER relationships", aggregatedConcept: getAggregatedConcept(t, "concept-with-multiple-has-broader.json"), otherRelatedConcepts: []ontology.AggregatedConcept{ getAggregatedConcept(t, "yet-another-full-lone-aggregated-concept.json"), }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Section", ConceptUUID: basicConceptUUID, AggregateHash: "10611495773105789085", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ basicConceptUUID, }, }, }, { testName: "Creates All Values Present for a Brand with an IMPLIED_BY relationship", aggregatedConcept: getAggregatedConcept(t, "brand-with-implied-by.json"), otherRelatedConcepts: []ontology.AggregatedConcept{ getAggregatedConcept(t, "topic.json"), }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Brand", ConceptUUID: brandUUID, AggregateHash: "11685880447608683841", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ brandUUID, }, }, }, { testName: "Creates All Values Present for a Brand with an IMPLIED_BY relationship to an unknown thing", aggregatedConcept: getAggregatedConcept(t, "brand-with-implied-by-unknown-thing.json"), updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Brand", ConceptUUID: brandUUID, AggregateHash: "14718680089606136873", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ brandUUID, }, }, }, { testName: "Creates All Values correctly for a Brand with multiple IMPLIED_BY relationships", aggregatedConcept: getAggregatedConcept(t, "brand-with-multiple-implied-by.json"), otherRelatedConcepts: []ontology.AggregatedConcept{ getAggregatedConcept(t, "topic.json"), }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Brand", ConceptUUID: brandUUID, AggregateHash: "11718320835668332357", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ brandUUID, }, }, }, { testName: "Creates All Values correctly for multiple Brand sources with common IMPLIED_BY relationships", aggregatedConcept: getAggregatedConcept(t, "concorded-brand-with-multiple-implied-by.json"), otherRelatedConcepts: []ontology.AggregatedConcept{ getAggregatedConcept(t, "topic.json"), }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Brand", ConceptUUID: brandUUID, AggregateHash: "13280667139926404744", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, { ConceptType: "Brand", ConceptUUID: anotherBrandUUID, AggregateHash: "13280667139926404744", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, { ConceptType: "Brand", ConceptUUID: anotherBrandUUID, AggregateHash: "13280667139926404744", EventDetails: ConcordanceEvent{ Type: AddedEvent, OldID: anotherBrandUUID, NewID: brandUUID, }, }, }, UpdatedIds: []string{ brandUUID, anotherBrandUUID, }, }, }, { testName: "Creates All Values Present for a Concept with a HAS_FOCUS relationship", aggregatedConcept: getAggregatedConcept(t, "concept-with-has-focus.json"), otherRelatedConcepts: []ontology.AggregatedConcept{ getAggregatedConcept(t, "another-topic.json"), }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Organisation", ConceptUUID: conceptHasFocusUUID, AggregateHash: "13449440537497481455", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ conceptHasFocusUUID, }, }, }, { testName: "Creates All Values Present for a Brand with a HAS_FOCUS relationship", aggregatedConcept: getAggregatedConcept(t, "brand-with-has-focus.json"), otherRelatedConcepts: []ontology.AggregatedConcept{ getAggregatedConcept(t, "another-topic.json"), getAggregatedConcept(t, "organisation.json"), }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Brand", ConceptUUID: yetAnotherBrandUUID, AggregateHash: "9392858139411790333", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ yetAnotherBrandUUID, }, }, }, { testName: "Creates All Values Present for a Concept with a HAS_FOCUS relationship to an unknown thing", aggregatedConcept: getAggregatedConcept(t, "concept-with-has-focus-unknown-thing.json"), updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Organisation", ConceptUUID: conceptHasFocusUUID, AggregateHash: "16540497880121135813", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ conceptHasFocusUUID, }, }, }, { testName: "Creates All Values correctly for a Concept with multiple HAS_FOCUS relationships", aggregatedConcept: getAggregatedConcept(t, "concept-with-multiple-has-focus.json"), otherRelatedConcepts: []ontology.AggregatedConcept{ getAggregatedConcept(t, "another-topic.json"), getAggregatedConcept(t, "organisation.json"), }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Organisation", ConceptUUID: conceptHasFocusUUID, AggregateHash: "3410796614082946092", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ conceptHasFocusUUID, }, }, }, { testName: "Creates All Values correctly for multiple Concept sources with common HAS_FOCUS relationships", aggregatedConcept: getAggregatedConcept(t, "concorded-concept-with-multiple-has-focus.json"), otherRelatedConcepts: []ontology.AggregatedConcept{ getAggregatedConcept(t, "topic.json"), getAggregatedConcept(t, "another-topic.json"), getAggregatedConcept(t, "organisation.json"), }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Organisation", ConceptUUID: conceptHasFocusUUID, AggregateHash: "12703582309208260040", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, { ConceptType: "Organisation", ConceptUUID: anotherConceptHasFocusUUID, AggregateHash: "12703582309208260040", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, { ConceptType: "Organisation", ConceptUUID: anotherConceptHasFocusUUID, AggregateHash: "12703582309208260040", EventDetails: ConcordanceEvent{ Type: AddedEvent, OldID: anotherConceptHasFocusUUID, NewID: conceptHasFocusUUID, }, }, }, UpdatedIds: []string{ conceptHasFocusUUID, anotherConceptHasFocusUUID, }, }, }, { testName: "Creates All Values correctly for a Concept with multiple SUPERSEDED_BY relationships", aggregatedConcept: getAggregatedConcept(t, "concept-with-multiple-superseded-by.json"), updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Section", ConceptUUID: basicConceptUUID, AggregateHash: "4024699536717513094", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ basicConceptUUID, }, }, }, { testName: "Creates All Values Present for a Concorded Concept", aggregatedConcept: getAggregatedConcept(t, "full-concorded-aggregated-concept.json"), updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Section", ConceptUUID: anotherBasicConceptUUID, AggregateHash: "15832747680085628960", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, { ConceptType: "Section", ConceptUUID: anotherBasicConceptUUID, AggregateHash: "15832747680085628960", EventDetails: ConcordanceEvent{ Type: AddedEvent, OldID: anotherBasicConceptUUID, NewID: basicConceptUUID, }, }, { ConceptType: "Section", ConceptUUID: basicConceptUUID, AggregateHash: "15832747680085628960", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ anotherBasicConceptUUID, basicConceptUUID, }, }, }, { testName: "Creates Handles Special Characters", aggregatedConcept: getAggregatedConcept(t, "lone-source-system-pref-label.json"), updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Section", ConceptUUID: basicConceptUUID, AggregateHash: "3185186027352954335", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ basicConceptUUID, }, }, }, { testName: "Adding Organisation with all related locations in place works", aggregatedConcept: getOrganisationWithAllCountries(), otherRelatedConcepts: []ontology.AggregatedConcept{ getLocationWithISO31661(), }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "PublicCompany", ConceptUUID: testOrgUUID, AggregateHash: "1083384572460927160", TransactionID: "", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ testOrgUUID, }, }, }, { testName: "Unknown Authority Should Fail", aggregatedConcept: getAggregatedConcept(t, "unknown-authority.json"), errStr: "unknown authority", updatedConcepts: ConceptChanges{ UpdatedIds: []string{}, }, }, { testName: "Concord a ManagedLocation concept with ISO code to a Smartlogic concept", aggregatedConcept: getLocationWithISO31661AndConcordance(), otherRelatedConcepts: []ontology.AggregatedConcept{ getLocationWithISO31661(), }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Location", ConceptUUID: locationUUID, AggregateHash: "14673293395653141343", EventDetails: ConcordanceEvent{ Type: AddedEvent, OldID: locationUUID, NewID: anotherLocationUUID, }, }, { ConceptType: "Location", ConceptUUID: anotherLocationUUID, AggregateHash: "14673293395653141343", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ locationUUID, anotherLocationUUID, }, }, }, { testName: "Creates All Values Present for a NAICSIndustryClassification", aggregatedConcept: getAggregatedConcept(t, "naics-industry-classification.json"), updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "NAICSIndustryClassification", ConceptUUID: naicsIndustryClassificationUUID, AggregateHash: "1773173587993451366", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ naicsIndustryClassificationUUID, }, }, }, { testName: "Creates All Values correctly for Organisation with HAS_INDUSTRY_CLASSIFICATION relationships", aggregatedConcept: getAggregatedConcept(t, "organisation-with-naics.json"), otherRelatedConcepts: []ontology.AggregatedConcept{ getAggregatedConcept(t, "naics-industry-classification.json"), getAggregatedConcept(t, "naics-industry-classification-internet.json"), }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "PublicCompany", ConceptUUID: organisationWithNAICSUUID, AggregateHash: "12721802568035065567", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ organisationWithNAICSUUID, }, }, }, { testName: "Creates All Values correctly for Organisation with HAS_INDUSTRY_CLASSIFICATION relationships to unknown", aggregatedConcept: getAggregatedConcept(t, "organisation-with-naics-unknown.json"), writtenNotReadFields: []string{"NAICSIndustryClassifications"}, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "PublicCompany", ConceptUUID: organisationWithNAICSUUID, AggregateHash: "13749833964494005", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ organisationWithNAICSUUID, }, }, }, } for _, test := range tests { t.Run(test.testName, func(t *testing.T) { defer cleanDB(t) // Create the related, broader than and impliedBy on concepts for _, relatedConcept := range test.otherRelatedConcepts { _, err := conceptsDriver.Write(relatedConcept, "") assert.NoError(t, err, "Failed to write related/broader/impliedBy concept") } updatedConcepts, err := conceptsDriver.Write(test.aggregatedConcept, "") if test.errStr == "" { assert.NoError(t, err, "Failed to write concept") readConceptAndCompare(t, test.aggregatedConcept, test.testName, test.writtenNotReadFields...) sort.Slice(test.updatedConcepts.ChangedRecords, func(i, j int) bool { l, _ := json.Marshal(test.updatedConcepts.ChangedRecords[i]) r, _ := json.Marshal(test.updatedConcepts.ChangedRecords[j]) c := strings.Compare(string(l), string(r)) return c >= 0 }) updatedConcepts := updatedConcepts.(ConceptChanges) sort.Slice(updatedConcepts.ChangedRecords, func(i, j int) bool { l, _ := json.Marshal(updatedConcepts.ChangedRecords[i]) r, _ := json.Marshal(updatedConcepts.ChangedRecords[j]) c := strings.Compare(string(l), string(r)) return c >= 0 }) sort.Strings(test.updatedConcepts.UpdatedIds) sort.Strings(updatedConcepts.UpdatedIds) cmpOpts := cmpopts.IgnoreFields(Event{}, "AggregateHash") if !cmp.Equal(test.updatedConcepts, updatedConcepts, cmpOpts) { t.Errorf("Test %s failed: Updated uuid list differs from expected:\n%s", test.testName, cmp.Diff(test.updatedConcepts, updatedConcepts, cmpOpts)) } } else { assert.Error(t, err, "Error was expected") assert.Contains(t, err.Error(), test.errStr, "Error message is not correct") } }) } } func TestWriteMemberships_Organisation(t *testing.T) { defer cleanDB(t) org := getAggregatedConcept(t, "organisation.json") _, err := conceptsDriver.Write(org, "test_tid") assert.NoError(t, err, "Failed to write concept") readConceptAndCompare(t, org, "TestWriteMemberships_Organisation") upOrg := getAggregatedConcept(t, "updated-organisation.json") _, err = conceptsDriver.Write(upOrg, "test_tid") assert.NoError(t, err, "Failed to write concept") readConceptAndCompare(t, upOrg, "TestWriteMemberships_Organisation.Updated") } func TestWriteMemberships_CleansUpExisting(t *testing.T) { defer cleanDB(t) _, err := conceptsDriver.Write(getAggregatedConcept(t, "membership.json"), "test_tid") assert.NoError(t, err, "Failed to write membership") result, _, err := conceptsDriver.Read(membershipUUID, "test_tid") assert.NoError(t, err, "Failed to read membership") ab, _ := json.Marshal(cleanHash(result.(ontology.AggregatedConcept))) originalMembership := ontology.AggregatedConcept{} json.Unmarshal(ab, &originalMembership) originalMembership = cleanConcept(originalMembership) assert.Equal(t, len(originalMembership.MembershipRoles), 2) assert.True(t, reflect.DeepEqual([]ontology.MembershipRole{membershipRole, anotherMembershipRole}, originalMembership.MembershipRoles)) assert.Equal(t, organisationUUID, originalMembership.OrganisationUUID) assert.Equal(t, personUUID, originalMembership.PersonUUID) assert.Equal(t, "Mr", originalMembership.Salutation) assert.Equal(t, 2018, originalMembership.BirthYear) _, err = conceptsDriver.Write(getAggregatedConcept(t, "updated-membership.json"), "test_tid") assert.NoError(t, err, "Failed to write membership") updatedResult, _, err := conceptsDriver.Read(membershipUUID, "test_tid") assert.NoError(t, err, "Failed to read membership") cd, _ := json.Marshal(cleanHash(updatedResult.(ontology.AggregatedConcept))) updatedMemebership := ontology.AggregatedConcept{} json.Unmarshal(cd, &updatedMemebership) assert.Equal(t, len(updatedMemebership.MembershipRoles), 1) assert.Equal(t, []ontology.MembershipRole{anotherMembershipRole}, updatedMemebership.MembershipRoles) assert.Equal(t, anotherOrganisationUUID, updatedMemebership.OrganisationUUID) assert.Equal(t, anotherPersonUUID, updatedMemebership.PersonUUID) } func TestWriteMemberships_FixOldData(t *testing.T) { defer cleanDB(t) oldConcept := getConcept(t, "old-membership.json") newConcept := ontology.TransformToNewSourceConcept(oldConcept) queries := createNodeQueries(newConcept, membershipUUID) err := db.CypherBatch(queries) assert.NoError(t, err, "Failed to write source") _, err = conceptsDriver.Write(getAggregatedConcept(t, "membership.json"), "test_tid") assert.NoError(t, err, "Failed to write membership") result, _, err := conceptsDriver.Read(membershipUUID, "test_tid") assert.NoError(t, err, "Failed to read membership") ab, _ := json.Marshal(cleanHash(result.(ontology.AggregatedConcept))) originalMembership := ontology.AggregatedConcept{} json.Unmarshal(ab, &originalMembership) originalMembership = cleanConcept(originalMembership) assert.Equal(t, len(originalMembership.MembershipRoles), 2) assert.True(t, reflect.DeepEqual([]ontology.MembershipRole{membershipRole, anotherMembershipRole}, originalMembership.MembershipRoles)) assert.Equal(t, organisationUUID, originalMembership.OrganisationUUID) assert.Equal(t, personUUID, originalMembership.PersonUUID) } func TestFinancialInstrumentExistingIssuedByRemoved(t *testing.T) { defer cleanDB(t) _, err := conceptsDriver.Write(getAggregatedConcept(t, "financial-instrument.json"), "test_tid") assert.NoError(t, err, "Failed to write financial instrument") _, err = conceptsDriver.Write(getAggregatedConcept(t, "financial-instrument.json"), "test_tid") assert.NoError(t, err, "Failed to write financial instrument") readConceptAndCompare(t, getAggregatedConcept(t, "financial-instrument.json"), "TestFinancialInstrumentExistingIssuedByRemoved") _, err = conceptsDriver.Write(getAggregatedConcept(t, "updated-financial-instrument.json"), "test_tid") assert.NoError(t, err, "Failed to write financial instrument") _, err = conceptsDriver.Write(getAggregatedConcept(t, "financial-instrument.json"), "test_tid") assert.NoError(t, err, "Failed to write financial instrument") readConceptAndCompare(t, getAggregatedConcept(t, "financial-instrument.json"), "TestFinancialInstrumentExistingIssuedByRemoved") } func TestFinancialInstrumentIssuerOrgRelationRemoved(t *testing.T) { defer cleanDB(t) _, err := conceptsDriver.Write(getAggregatedConcept(t, "financial-instrument.json"), "test_tid") assert.NoError(t, err, "Failed to write financial instrument") readConceptAndCompare(t, getAggregatedConcept(t, "financial-instrument.json"), "TestFinancialInstrumentExistingIssuedByRemoved") _, err = conceptsDriver.Write(getAggregatedConcept(t, "financial-instrument-with-same-issuer.json"), "test_tid") assert.NoError(t, err, "Failed to write financial instrument") readConceptAndCompare(t, getAggregatedConcept(t, "financial-instrument-with-same-issuer.json"), "TestFinancialInstrumentExistingIssuedByRemoved") } func TestWriteService_HandlingConcordance(t *testing.T) { tid := "test_tid" type testStruct struct { testName string setUpConcept ontology.AggregatedConcept testConcept ontology.AggregatedConcept uuidsToCheck []string returnedError string updatedConcepts ConceptChanges customAssertion func(t *testing.T, concept ontology.AggregatedConcept) } singleConcordanceNoChangesNoUpdates := testStruct{ testName: "singleConcordanceNoChangesNoUpdates", setUpConcept: getAggregatedConcept(t, "single-concordance.json"), testConcept: getAggregatedConcept(t, "single-concordance.json"), uuidsToCheck: []string{ basicConceptUUID, }, updatedConcepts: ConceptChanges{ UpdatedIds: emptyList, }, } dualConcordanceNoChangesNoUpdates := testStruct{ testName: "dualConcordanceNoChangesNoUpdates", setUpConcept: getAggregatedConcept(t, "dual-concordance.json"), testConcept: getAggregatedConcept(t, "dual-concordance.json"), uuidsToCheck: []string{ basicConceptUUID, sourceID1, }, updatedConcepts: ConceptChanges{ UpdatedIds: emptyList, }, } singleConcordanceToDualConcordanceUpdatesBoth := testStruct{ testName: "singleConcordanceToDualConcordanceUpdatesBoth", setUpConcept: getAggregatedConcept(t, "single-concordance.json"), testConcept: getAggregatedConcept(t, "dual-concordance.json"), uuidsToCheck: []string{ basicConceptUUID, sourceID1, }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Brand", ConceptUUID: sourceID1, AggregateHash: "13050067908998386737", TransactionID: "test_tid", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, { ConceptType: "Brand", ConceptUUID: sourceID1, AggregateHash: "13050067908998386737", TransactionID: "test_tid", EventDetails: ConcordanceEvent{ Type: AddedEvent, OldID: sourceID1, NewID: basicConceptUUID, }, }, { ConceptType: "Brand", ConceptUUID: basicConceptUUID, AggregateHash: "13050067908998386737", TransactionID: "test_tid", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ basicConceptUUID, sourceID1, }, }, } dualConcordanceToSingleConcordanceUpdatesBoth := testStruct{ testName: "dualConcordanceToSingleConcordanceUpdatesBoth", setUpConcept: getAggregatedConcept(t, "dual-concordance.json"), testConcept: getAggregatedConcept(t, "single-concordance.json"), uuidsToCheck: []string{ basicConceptUUID, sourceID1, }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Brand", ConceptUUID: sourceID1, AggregateHash: "2137764349277562661", TransactionID: "test_tid", EventDetails: ConcordanceEvent{ Type: RemovedEvent, OldID: basicConceptUUID, NewID: sourceID1, }, }, { ConceptType: "Brand", ConceptUUID: basicConceptUUID, AggregateHash: "2137764349277562661", TransactionID: "test_tid", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ basicConceptUUID, sourceID1, }, }, } errorsOnAddingConcordanceOfCanonicalNode := testStruct{ testName: "errorsOnAddingConcordanceOfCanonicalNode", setUpConcept: getAggregatedConcept(t, "dual-concordance.json"), testConcept: getAggregatedConcept(t, "pref-uuid-as-source.json"), returnedError: "Cannot currently process this record as it will break an existing concordance with prefUuid: bbc4f575-edb3-4f51-92f0-5ce6c708d1ea", } oldCanonicalRemovedWhenSingleConcordancebecomesSource := testStruct{ testName: "oldCanonicalRemovedWhenSingleConcordancebecomesSource", setUpConcept: getAggregatedConcept(t, "single-concordance.json"), testConcept: getAggregatedConcept(t, "pref-uuid-as-source.json"), uuidsToCheck: []string{ anotherBasicConceptUUID, basicConceptUUID, sourceID2, }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Brand", ConceptUUID: basicConceptUUID, AggregateHash: "5757717515788965658", TransactionID: "test_tid", EventDetails: ConcordanceEvent{ Type: AddedEvent, OldID: basicConceptUUID, NewID: anotherBasicConceptUUID, }, }, { ConceptType: "Brand", ConceptUUID: sourceID2, AggregateHash: "5757717515788965658", TransactionID: "test_tid", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, { ConceptType: "Brand", ConceptUUID: sourceID2, AggregateHash: "5757717515788965658", TransactionID: "test_tid", EventDetails: ConcordanceEvent{ Type: AddedEvent, OldID: sourceID2, NewID: anotherBasicConceptUUID, }, }, { ConceptType: "Brand", ConceptUUID: anotherBasicConceptUUID, AggregateHash: "5757717515788965658", TransactionID: "test_tid", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ anotherBasicConceptUUID, basicConceptUUID, sourceID2, }, }, } transferSourceFromOneConcordanceToAnother := testStruct{ testName: "transferSourceFromOneConcordanceToAnother", setUpConcept: getAggregatedConcept(t, "dual-concordance.json"), testConcept: getAggregatedConcept(t, "transfer-source-concordance.json"), uuidsToCheck: []string{ anotherBasicConceptUUID, sourceID1, basicConceptUUID, }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Brand", ConceptUUID: sourceID1, AggregateHash: "7725347417335166648", TransactionID: "test_tid", EventDetails: ConcordanceEvent{ Type: RemovedEvent, OldID: basicConceptUUID, NewID: sourceID1, }, }, { ConceptType: "Brand", ConceptUUID: sourceID1, AggregateHash: "7725347417335166648", TransactionID: "test_tid", EventDetails: ConcordanceEvent{ Type: AddedEvent, OldID: sourceID1, NewID: anotherBasicConceptUUID, }, }, { ConceptType: "Brand", ConceptUUID: anotherBasicConceptUUID, AggregateHash: "7725347417335166648", TransactionID: "test_tid", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ anotherBasicConceptUUID, sourceID1, }, }, } addThirdSourceToDualConcordanceUpdateAll := testStruct{ testName: "addThirdSourceToDualConcordanceUpdateAll", setUpConcept: getAggregatedConcept(t, "dual-concordance.json"), testConcept: getAggregatedConcept(t, "tri-concordance.json"), uuidsToCheck: []string{ basicConceptUUID, sourceID1, sourceID2, }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Brand", ConceptUUID: sourceID2, AggregateHash: "1825428118302879667", TransactionID: "test_tid", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, { ConceptType: "Brand", ConceptUUID: sourceID2, AggregateHash: "1825428118302879667", TransactionID: "test_tid", EventDetails: ConcordanceEvent{ Type: AddedEvent, OldID: sourceID2, NewID: basicConceptUUID, }, }, { ConceptType: "Brand", ConceptUUID: basicConceptUUID, AggregateHash: "1825428118302879667", TransactionID: "test_tid", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ basicConceptUUID, sourceID1, sourceID2, }, }, } triConcordanceToDualConcordanceUpdatesAll := testStruct{ testName: "triConcordanceToDualConcordanceUpdatesAll", setUpConcept: getAggregatedConcept(t, "tri-concordance.json"), testConcept: getAggregatedConcept(t, "dual-concordance.json"), uuidsToCheck: []string{ basicConceptUUID, sourceID1, sourceID2, }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Brand", ConceptUUID: sourceID2, AggregateHash: "13050067908998386737", TransactionID: "test_tid", EventDetails: ConcordanceEvent{ Type: RemovedEvent, OldID: basicConceptUUID, NewID: sourceID2, }, }, { ConceptType: "Brand", ConceptUUID: basicConceptUUID, AggregateHash: "13050067908998386737", TransactionID: "test_tid", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ basicConceptUUID, sourceID1, sourceID2, }, }, } dataChangesOnCanonicalUpdateBoth := testStruct{ testName: "dataChangesOnCanonicalUpdateBoth", setUpConcept: getAggregatedConcept(t, "dual-concordance.json"), testConcept: getAggregatedConcept(t, "updated-dual-concordance.json"), uuidsToCheck: []string{ basicConceptUUID, sourceID1, }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Brand", ConceptUUID: basicConceptUUID, AggregateHash: "411480971478777011", TransactionID: "test_tid", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ basicConceptUUID, sourceID1, }, }, } singleConcordanceDeprecationChangesUpdates := testStruct{ testName: "singleConcordanceDeprecationChangesUpdates", setUpConcept: getAggregatedConcept(t, "single-concordance.json"), testConcept: func() ontology.AggregatedConcept { concept := getAggregatedConcept(t, "single-concordance.json") concept.IsDeprecated = true concept.SourceRepresentations[0].IsDeprecated = true return concept }(), uuidsToCheck: []string{ basicConceptUUID, }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Brand", ConceptUUID: basicConceptUUID, AggregateHash: "17026098453454367869", TransactionID: "test_tid", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ basicConceptUUID, }, }, } singleConcordanceSupersededByAddRelationship := testStruct{ testName: "singleConcordanceSupersededByAddRelationship", setUpConcept: getAggregatedConcept(t, "single-concordance.json"), testConcept: func() ontology.AggregatedConcept { concept := getAggregatedConcept(t, "single-concordance.json") concept.SourceRepresentations[0].SupersededByUUIDs = []string{supersededByUUID} return concept }(), uuidsToCheck: []string{ basicConceptUUID, }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Brand", ConceptUUID: basicConceptUUID, AggregateHash: "13590089407881813689", TransactionID: "test_tid", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ basicConceptUUID, }, }, customAssertion: func(t *testing.T, concept ontology.AggregatedConcept) { assert.Lenf(t, concept.SourceRepresentations, 1, "Test %s failed. Different number of sourceRepresentation items than expected", "singleConcordanceSupersededByRemoveRelationship") assert.Lenf(t, concept.SourceRepresentations[0].SupersededByUUIDs, 1, "Test %s failed. Different number of supersededByUUIDs items than expected", "singleConcordanceSupersededByRemoveRelationship") assert.Equalf(t, supersededByUUID, concept.SourceRepresentations[0].SupersededByUUIDs[0], "Test %s failed. Different supersededByUUID than expected", "singleConcordanceSupersededByRemoveRelationship") }, } singleConcordanceSupersededByRemoveRelationship := testStruct{ testName: "singleConcordanceSupersededByRemoveRelationship", setUpConcept: getAggregatedConcept(t, "concept-with-superseded-by-uuids.json"), testConcept: getAggregatedConcept(t, "single-concordance.json"), uuidsToCheck: []string{ basicConceptUUID, }, updatedConcepts: ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Brand", ConceptUUID: basicConceptUUID, AggregateHash: "2137764349277562661", TransactionID: "test_tid", EventDetails: ConceptEvent{ Type: UpdatedEvent, }, }, }, UpdatedIds: []string{ basicConceptUUID, }, }, customAssertion: func(t *testing.T, concept ontology.AggregatedConcept) { assert.Lenf(t, concept.SourceRepresentations, 1, "Test %s failed. Different number of sourceRepresentation items than expected", "singleConcordanceSupersededByRemoveRelationship") assert.Emptyf(t, concept.SourceRepresentations[0].SupersededByUUIDs, "Test %s failed. No supersededByUUIDs content expected", "singleConcordanceSupersededByRemoveRelationship") }, } scenarios := []testStruct{ singleConcordanceNoChangesNoUpdates, dualConcordanceNoChangesNoUpdates, singleConcordanceToDualConcordanceUpdatesBoth, dualConcordanceToSingleConcordanceUpdatesBoth, errorsOnAddingConcordanceOfCanonicalNode, oldCanonicalRemovedWhenSingleConcordancebecomesSource, transferSourceFromOneConcordanceToAnother, addThirdSourceToDualConcordanceUpdateAll, triConcordanceToDualConcordanceUpdatesAll, dataChangesOnCanonicalUpdateBoth, singleConcordanceDeprecationChangesUpdates, singleConcordanceSupersededByAddRelationship, singleConcordanceSupersededByRemoveRelationship, } cleanDB(t) for _, scenario := range scenarios { //Write data into db, to set up test scenario _, err := conceptsDriver.Write(scenario.setUpConcept, tid) assert.NoError(t, err, "Scenario "+scenario.testName+" failed; returned unexpected error") verifyAggregateHashIsCorrect(t, scenario.setUpConcept, scenario.testName) //Overwrite data with update output, err := conceptsDriver.Write(scenario.testConcept, tid) if scenario.returnedError != "" { if assert.Error(t, err, "Scenario "+scenario.testName+" failed; should return an error") { assert.Contains(t, err.Error(), scenario.returnedError, "Scenario "+scenario.testName+" failed; returned unknown error") } // Do not check the output on error because it sometimes causes test errors continue } if !assert.NoError(t, err, "Scenario "+scenario.testName+" failed; returned unexpected error") { continue } actualChanges := output.(ConceptChanges) sort.Slice(actualChanges.ChangedRecords, func(i, j int) bool { l, _ := json.Marshal(actualChanges.ChangedRecords[i]) r, _ := json.Marshal(actualChanges.ChangedRecords[j]) c := strings.Compare(string(l), string(r)) if c >= 0 { return true } return false }) sort.Slice(scenario.updatedConcepts.ChangedRecords, func(i, j int) bool { l, _ := json.Marshal(scenario.updatedConcepts.ChangedRecords[i]) r, _ := json.Marshal(scenario.updatedConcepts.ChangedRecords[j]) c := strings.Compare(string(l), string(r)) if c >= 0 { return true } return false }) sort.Strings(scenario.updatedConcepts.UpdatedIds) sort.Strings(actualChanges.UpdatedIds) cmpOpts := cmpopts.IgnoreFields(Event{}, "AggregateHash") if !cmp.Equal(scenario.updatedConcepts, actualChanges, cmpOpts) { t.Errorf("Scenario %s failed: Updated uuid list differs from expected:\n%s", scenario.testName, cmp.Diff(scenario.updatedConcepts, actualChanges, cmpOpts)) } for _, id := range scenario.uuidsToCheck { conceptIf, found, err := conceptsDriver.Read(id, tid) concept := cleanHash(conceptIf.(ontology.AggregatedConcept)) if found { assert.NotNil(t, concept, "Scenario "+scenario.testName+" failed; id: "+id+" should return a valid concept") assert.True(t, found, "Scenario "+scenario.testName+" failed; id: "+id+" should return a valid concept") assert.NoError(t, err, "Scenario "+scenario.testName+" failed; returned unexpected error") verifyAggregateHashIsCorrect(t, scenario.testConcept, scenario.testName) } else { assert.Equal(t, ontology.AggregatedConcept{}, concept, "Scenario "+scenario.testName+" failed; id: "+id+" should return a valid concept") assert.NoError(t, err, "Scenario "+scenario.testName+" failed; returned unexpected error") } if scenario.customAssertion != nil { scenario.customAssertion(t, concept) } } cleanDB(t) } } func TestMultipleConcordancesAreHandled(t *testing.T) { defer cleanDB(t) _, err := conceptsDriver.Write(getAggregatedConcept(t, "full-lone-aggregated-concept.json"), "test_tid") assert.NoError(t, err, "Test TestMultipleConcordancesAreHandled failed; returned unexpected error") _, err = conceptsDriver.Write(getAggregatedConcept(t, "lone-tme-section.json"), "test_tid") assert.NoError(t, err, "Test TestMultipleConcordancesAreHandled failed; returned unexpected error") _, err = conceptsDriver.Write(getAggregatedConcept(t, "transfer-multiple-source-concordance.json"), "test_tid") assert.NoError(t, err, "Test TestMultipleConcordancesAreHandled failed; returned unexpected error") conceptIf, found, err := conceptsDriver.Read(simpleSmartlogicTopicUUID, "test_tid") concept := cleanHash(conceptIf.(ontology.AggregatedConcept)) assert.NoError(t, err, "Should be able to read concept with no problems") assert.True(t, found, "Concept should exist") assert.NotNil(t, concept, "Concept should be populated") readConceptAndCompare(t, getAggregatedConcept(t, "transfer-multiple-source-concordance.json"), "TestMultipleConcordancesAreHandled") } // Test case is a concept with multiple sources, one of which has multiple Industry classifications. // From bug, https://financialtimes.atlassian.net/browse/UPPSF-2773 on Write (property update) // the concept in question was returning unexpected CONCORDANCE_ADDED/CONCORDANCE_REMOVED where only CONCEPT_UPDATED was expected. func TestWriteShouldReturnCorrectConceptChanges(t *testing.T) { const mainConceptUUID = "13465cc7-204f-48b9-a8d6-b901d5d86c48" var aggregate ontology.AggregatedConcept concepts, canonicalUUIDs, sourceUUIDs := readTestSetup(t, "testdata/bug/13465cc7-204f-48b9-a8d6-b901d5d86c48.json") for _, concept := range concepts { _, err := conceptsDriver.Write(concept, "tid_init") if err != nil { t.Fatal(err) } if concept.PrefUUID == mainConceptUUID { aggregate = concept } } defer func() { deleteSourceNodes(t, sourceUUIDs...) deleteConcordedNodes(t, canonicalUUIDs...) }() expectedEvents := ConceptChanges{ ChangedRecords: []Event{ { ConceptType: "Organisation", ConceptUUID: "13465cc7-204f-48b9-a8d6-b901d5d86c48", TransactionID: "tid_second", EventDetails: ConceptEvent{Type: UpdatedEvent}, }, }, UpdatedIds: []string{ "0eb54dff-fbe3-330e-b755-7435c4aad411", "374fdcea-062f-3281-81ca-7851323bcf98", "6259ebad-ed4c-3b13-ae66-9117fa591328", "13465cc7-204f-48b9-a8d6-b901d5d86c48", }, } // force concept update aggregate.DescriptionXML += "testing" data, err := conceptsDriver.Write(aggregate, "tid_second") if err != nil { t.Fatal(err) } events, ok := data.(ConceptChanges) if !ok { t.Fatal("concept write did not return 'ConceptChanges'") } if !cmp.Equal(expectedEvents, events, cmpopts.IgnoreFields(Event{}, "AggregateHash")) { t.Error(cmp.Diff(expectedEvents, events, cmpopts.IgnoreFields(Event{}, "AggregateHash"))) } } func TestReadReturnsErrorOnMultipleResults(t *testing.T) { // note the test data that this is explicitly broken setup, where multiple source concepts have HAS_ORGANISATION relationship // this is unsupported behaviour and will produce multiple results when reading from neo4j const mainConceptUUID = "13465cc7-204f-48b9-a8d6-b901d5d86c48" concepts, canonicalUUIDs, sourceUUIDs := readTestSetup(t, "testdata/bug/concorded-multiple-has-organisation.json") for _, concept := range concepts { _, err := conceptsDriver.Write(concept, "tid_init") if err != nil { t.Fatal(err) } } defer func() { deleteSourceNodes(t, sourceUUIDs...) deleteConcordedNodes(t, canonicalUUIDs...) }() _, _, err := conceptsDriver.Read(mainConceptUUID, "tid_test") if !errors.Is(err, ErrUnexpectedReadResult) { t.Fatalf("expected read result error, but got '%v'", err) } } func TestInvalidTypesThrowError(t *testing.T) { invalidPrefConceptType := `MERGE (t:Thing{prefUUID:"bbc4f575-edb3-4f51-92f0-5ce6c708d1ea"}) SET t={prefUUID:"bbc4f575-edb3-4f51-92f0-5ce6c708d1ea", prefLabel:"The Best Label"} SET t:Concept:Brand:Unknown MERGE (s:Thing{uuid:"bbc4f575-edb3-4f51-92f0-5ce6c708d1ea"}) SET s={uuid:"bbc4f575-edb3-4f51-92f0-5ce6c708d1ea"} SET t:Concept:Brand MERGE (t)<-[:EQUIVALENT_TO]-(s)` invalidSourceConceptType := `MERGE (t:Thing{prefUUID:"4c41f314-4548-4fb6-ac48-4618fcbfa84c"}) SET t={prefUUID:"4c41f314-4548-4fb6-ac48-4618fcbfa84c", prefLabel:"The Best Label"} SET t:Concept:Brand MERGE (s:Thing{uuid:"4c41f314-4548-4fb6-ac48-4618fcbfa84c"}) SET s={uuid:"4c41f314-4548-4fb6-ac48-4618fcbfa84c"} SET t:Concept:Brand:Unknown MERGE (t)<-[:EQUIVALENT_TO]-(s)` type testStruct struct { testName string prefUUID string statementToWrite string returnedError error } invalidPrefConceptTypeTest := testStruct{ testName: "invalidPrefConceptTypeTest", prefUUID: basicConceptUUID, statementToWrite: invalidPrefConceptType, returnedError: nil, } invalidSourceConceptTypeTest := testStruct{ testName: "invalidSourceConceptTypeTest", prefUUID: anotherBasicConceptUUID, statementToWrite: invalidSourceConceptType, returnedError: nil, } scenarios := []testStruct{invalidPrefConceptTypeTest, invalidSourceConceptTypeTest} for _, scenario := range scenarios { db.CypherBatch([]*neoism.CypherQuery{{Statement: scenario.statementToWrite}}) aggConcept, found, err := conceptsDriver.Read(scenario.prefUUID, "") assert.Equal(t, ontology.AggregatedConcept{}, aggConcept, "Scenario "+scenario.testName+" failed; aggregate concept should be empty") assert.Equal(t, false, found, "Scenario "+scenario.testName+" failed; aggregate concept should not be returned from read") assert.Error(t, err, "Scenario "+scenario.testName+" failed; read of concept should return error") assert.Contains(t, err.Error(), "provided types are not a consistent hierarchy", "Scenario "+scenario.testName+" failed; should throw error from mapper.MostSpecificType function") } defer cleanDB(t) } func TestFilteringOfUniqueIds(t *testing.T) { type testStruct struct { testName string firstList map[string]string secondList map[string]string filteredList map[string]string } emptyWhenBothListsAreEmpty := testStruct{ testName: "emptyWhenBothListsAreEmpty", firstList: make(map[string]string), secondList: make(map[string]string), filteredList: make(map[string]string), } emptyWhenListsAreTheIdentical := testStruct{ testName: "emptyWhenListsAreTheIdentical", firstList: map[string]string{ "1": "", "2": "", "3": "", }, secondList: map[string]string{ "1": "", "2": "", "3": "", }, filteredList: make(map[string]string), } emptyWhenListsHaveSameIdsInDifferentOrder := testStruct{ testName: "emptyWhenListsHaveSameIdsInDifferentOrder", firstList: map[string]string{ "1": "", "2": "", "3": "", }, secondList: map[string]string{ "2": "", "3": "", "1": "", }, filteredList: make(map[string]string), } hasCompleteFirstListWhenSecondListIsEmpty := testStruct{ testName: "hasCompleteSecondListWhenFirstListIsEmpty", firstList: map[string]string{ "1": "", "2": "", "3": "", }, secondList: make(map[string]string), filteredList: map[string]string{ "1": "", "2": "", "3": "", }, } properlyFiltersWhen1IdIsUnique := testStruct{ testName: "properlyFiltersWhen1IdIsUnique", firstList: map[string]string{ "1": "", "2": "", "3": "", }, secondList: map[string]string{ "1": "", "2": "", }, filteredList: map[string]string{ "3": "", }, } properlyFiltersWhen2IdsAreUnique := testStruct{ testName: "properlyFiltersWhen2IdsAreUnique", firstList: map[string]string{ "1": "", "2": "", "3": "", }, secondList: map[string]string{ "2": "", }, filteredList: map[string]string{ "1": "", "3": "", }, } Scenarios := []testStruct{ emptyWhenBothListsAreEmpty, emptyWhenListsAreTheIdentical, emptyWhenListsHaveSameIdsInDifferentOrder, hasCompleteFirstListWhenSecondListIsEmpty, properlyFiltersWhen1IdIsUnique, properlyFiltersWhen2IdsAreUnique, } for _, scenario := range Scenarios { returnedList := filterIdsThatAreUniqueToFirstMap(scenario.firstList, scenario.secondList) assert.Equal(t, scenario.filteredList, returnedList, "Scenario: "+scenario.testName+" returned unexpected results") } } func TestTransferConcordance(t *testing.T) { statement := `MERGE (a:Thing{prefUUID:"1"}) MERGE (b:Thing{uuid:"1"}) MERGE (c:Thing{uuid:"2"}) MERGE (d:Thing{uuid:"3"}) MERGE (w:Thing{prefUUID:"4"}) MERGE (y:Thing{uuid:"5"}) MERGE (j:Thing{prefUUID:"6"}) MERGE (k:Thing{uuid:"6"}) MERGE (c)-[:EQUIVALENT_TO]->(a)<-[:EQUIVALENT_TO]-(b) MERGE (w)<-[:EQUIVALENT_TO]-(d) MERGE (j)<-[:EQUIVALENT_TO]-(k)` db.CypherBatch([]*neoism.CypherQuery{{Statement: statement}}) var emptyQuery []*neoism.CypherQuery var updatedConcept ConceptChanges type testStruct struct { testName string updatedSourceIds map[string]string returnResult bool returnedError error } nodeHasNoConconcordance := testStruct{ testName: "nodeHasNoConconcordance", updatedSourceIds: map[string]string{ "5": "Brand"}, returnedError: nil, } nodeHasExistingConcordanceWhichWouldCauseDataIssues := testStruct{ testName: "nodeHasExistingConcordanceWhichNeedsToBeReWritten", updatedSourceIds: map[string]string{ "1": "Brand"}, returnedError: errors.New("Cannot currently process this record as it will break an existing concordance with prefUuid: 1"), } nodeHasExistingConcordanceWhichNeedsToBeReWritten := testStruct{ testName: "nodeHasExistingConcordanceWhichNeedsToBeReWritten", updatedSourceIds: map[string]string{ "2": "Brand"}, returnedError: nil, } nodeHasInvalidConcordance := testStruct{ testName: "nodeHasInvalidConcordance", updatedSourceIds: map[string]string{ "3": "Brand"}, returnedError: errors.New("This source id: 3 the only concordance to a non-matching node with prefUuid: 4"), } nodeIsPrefUUIDForExistingConcordance := testStruct{ testName: "nodeIsPrefUuidForExistingConcordance", updatedSourceIds: map[string]string{ "1": "Brand"}, returnedError: errors.New("Cannot currently process this record as it will break an existing concordance with prefUuid: 1"), } nodeHasConcordanceToItselfPrefNodeNeedsToBeDeleted := testStruct{ testName: "nodeHasConcordanceToItselfPrefNodeNeedsToBeDeleted", updatedSourceIds: map[string]string{ "6": "Brand"}, returnResult: true, returnedError: nil, } scenarios := []testStruct{ nodeHasNoConconcordance, nodeHasExistingConcordanceWhichWouldCauseDataIssues, nodeHasExistingConcordanceWhichNeedsToBeReWritten, nodeHasInvalidConcordance, nodeIsPrefUUIDForExistingConcordance, nodeHasConcordanceToItselfPrefNodeNeedsToBeDeleted, } for _, scenario := range scenarios { returnedQueryList, err := conceptsDriver.handleTransferConcordance(scenario.updatedSourceIds, &updatedConcept, "1234", ontology.NewAggregatedConcept{}, "") assert.Equal(t, scenario.returnedError, err, "Scenario "+scenario.testName+" returned unexpected error") if scenario.returnResult == true { assert.NotEqual(t, emptyQuery, returnedQueryList, "Scenario "+scenario.testName+" results do not match") break } assert.Equal(t, emptyQuery, returnedQueryList, "Scenario "+scenario.testName+" results do not match") } defer deleteSourceNodes(t, "1", "2", "3", "5", "6") defer deleteConcordedNodes(t, "1", "4", "6") } func TestTransferCanonicalMultipleConcordance(t *testing.T) { statement := ` MERGE (editorialCanonical:Thing{prefUUID:"1"}) MERGE (editorial:Thing{uuid:"1"}) SET editorial.authority="Smartlogic" MERGE (mlCanonical:Thing{prefUUID:"2"}) MERGE (ml:Thing{uuid:"2"}) SET ml.authority="ManagedLocation" MERGE (geonames:Thing{uuid:"3"}) SET geonames.authority="Geonames" MERGE (factset:Thing{uuid:"4"}) SET factset.authority="FACTSET" MERGE (tme:Thing{uuid:"5"}) SET tme.authority="TME" MERGE (editorial)-[:EQUIVALENT_TO]->(editorialCanonical)<-[:EQUIVALENT_TO]-(factset) MERGE (ml)-[:EQUIVALENT_TO]->(mlCanonical)<-[:EQUIVALENT_TO]-(tme)` db.CypherBatch([]*neoism.CypherQuery{{Statement: statement}}) var emptyQuery []*neoism.CypherQuery var updatedConcept ConceptChanges type testStruct struct { testName string updatedSourceIds map[string]string returnResult bool returnedError error targetConcordance ontology.AggregatedConcept } mergeManagedLocationCanonicalWithTwoSources := testStruct{ testName: "mergeManagedLocationCanonicalWithTwoSources", updatedSourceIds: map[string]string{ "2": "Brand"}, returnedError: nil, returnResult: true, targetConcordance: ontology.AggregatedConcept{ PrefUUID: "1", SourceRepresentations: []ontology.Concept{ {UUID: "1", Authority: "Smartlogic"}, {UUID: "4", Authority: "FACTSET"}, {UUID: "2", Authority: "ManagedLocation"}, }, }, } mergeManagedLocationCanonicalWithTwoSourcesAndGeonames := testStruct{ testName: "mergeManagedLocationCanonicalWithTwoSourcesAndGeonames", updatedSourceIds: map[string]string{ "3": "Brand", "2": "Brand"}, returnedError: nil, returnResult: true, targetConcordance: ontology.AggregatedConcept{ PrefUUID: "1", SourceRepresentations: []ontology.Concept{ {UUID: "1", Authority: "Smartlogic"}, {UUID: "4", Authority: "FACTSET"}, {UUID: "2", Authority: "ManagedLocation"}, {UUID: "5", Authority: "TME"}, }, }, } mergeJustASourceConcordance := testStruct{ testName: "mergeJustASourceConcordance", updatedSourceIds: map[string]string{ "4": "Brand"}, returnedError: nil, } scenarios := []testStruct{ mergeManagedLocationCanonicalWithTwoSources, mergeManagedLocationCanonicalWithTwoSourcesAndGeonames, mergeJustASourceConcordance, } for _, scenario := range scenarios { newConcordance := ontology.TransformToNewAggregateConcept(scenario.targetConcordance) returnedQueryList, err := conceptsDriver.handleTransferConcordance(scenario.updatedSourceIds, &updatedConcept, "1234", newConcordance, "") assert.Equal(t, scenario.returnedError, err, "Scenario "+scenario.testName+" returned unexpected error") if scenario.returnResult == true { assert.NotEqual(t, emptyQuery, returnedQueryList, "Scenario "+scenario.testName+" results do not match") continue } assert.Equal(t, emptyQuery, returnedQueryList, "Scenario "+scenario.testName+" results do not match") } defer deleteSourceNodes(t, "1", "2", "3", "5") defer deleteConcordedNodes(t, "1", "2") } func TestValidateObject(t *testing.T) { tests := []struct { name string aggConcept ontology.AggregatedConcept returnedError string }{ { name: "aggregate concept without prefLabel should be invalid", aggConcept: ontology.AggregatedConcept{ PrefUUID: basicConceptUUID, Type: "Brand", SourceRepresentations: []ontology.Concept{ { UUID: basicConceptUUID, PrefLabel: "The Best Label", Type: "Brand", AuthorityValue: "123456-UPP", }, }, }, returnedError: "invalid request, no prefLabel has been supplied", }, { name: "aggregate concept without type should be invalid", aggConcept: ontology.AggregatedConcept{ PrefUUID: basicConceptUUID, PrefLabel: "The Best Label", SourceRepresentations: []ontology.Concept{ { UUID: basicConceptUUID, PrefLabel: "The Best Label", Type: "Brand", AuthorityValue: "123456-UPP", }, }, }, returnedError: "invalid request, no type has been supplied", }, { name: "aggregate concept without source representations should be invalid", aggConcept: ontology.AggregatedConcept{ PrefUUID: basicConceptUUID, PrefLabel: "The Best Label", Type: "Brand", }, returnedError: "invalid request, no sourceRepresentation has been supplied", }, { name: "source representation without prefLabel should be valid", aggConcept: ontology.AggregatedConcept{ PrefUUID: basicConceptUUID, PrefLabel: "The Best Label", Type: "Brand", SourceRepresentations: []ontology.Concept{ { UUID: basicConceptUUID, Type: "Brand", AuthorityValue: "123456-UPP", Authority: "UPP", }, }, }, }, { name: "source representation without type should be invalid", aggConcept: ontology.AggregatedConcept{ PrefUUID: basicConceptUUID, PrefLabel: "The Best Label", Type: "Brand", SourceRepresentations: []ontology.Concept{ { UUID: basicConceptUUID, PrefLabel: "The Best Label", Authority: "UPP", AuthorityValue: "123456-UPP", }, }, }, returnedError: "invalid request, no sourceRepresentation.type has been supplied", }, { name: "source representation without authorityValue should be invalid", aggConcept: ontology.AggregatedConcept{ PrefUUID: basicConceptUUID, PrefLabel: "The Best Label", Type: "Brand", SourceRepresentations: []ontology.Concept{ { UUID: basicConceptUUID, PrefLabel: "The Best Label", Type: "Brand", Authority: "UPP", }, }, }, returnedError: "invalid request, no sourceRepresentation.authorityValue has been supplied", }, { name: "source representation without authority should be invalid", aggConcept: ontology.AggregatedConcept{ PrefUUID: basicConceptUUID, PrefLabel: "The Best Label", Type: "Brand", SourceRepresentations: []ontology.Concept{ { UUID: basicConceptUUID, PrefLabel: "The Best Label", Type: "Brand", AuthorityValue: "123456-UPP", }, }, }, returnedError: "invalid request, no sourceRepresentation.authority has been supplied", }, { name: "valid concept", aggConcept: ontology.AggregatedConcept{ PrefUUID: basicConceptUUID, PrefLabel: "The Best Label", Type: "Brand", Aliases: []string{"alias1", "alias2"}, Strapline: "strapline", YearFounded: 2000, SourceRepresentations: []ontology.Concept{ { UUID: basicConceptUUID, PrefLabel: "The Best Label", Type: "Brand", Authority: "UPP", AuthorityValue: "123456-UPP", }, }, }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { newAggConcept := ontology.TransformToNewAggregateConcept(test.aggConcept) err := validateObject(newAggConcept, "transaction_id") if err != nil { assert.NotEmpty(t, test.returnedError, "test.returnedError should not be empty when there is an error") assert.Contains(t, err.Error(), test.returnedError, test.name) } else { assert.Empty(t, test.returnedError, "test.returnedError should be empty when there is no error") assert.NoError(t, err, test.name) } }) } } func TestWriteLocation(t *testing.T) { defer cleanDB(t) location := getLocation() _, err := conceptsDriver.Write(location, "test_tid") assert.NoError(t, err, "Failed to write concept") readConceptAndCompare(t, location, "TestWriteLocation") locationISO31661 := getLocationWithISO31661() _, err = conceptsDriver.Write(locationISO31661, "test_tid") assert.NoError(t, err, "Failed to write concept") readConceptAndCompare(t, locationISO31661, "TestWriteLocationISO31661") } func TestSetCanonicalProps(t *testing.T) { tests := []struct { name string concept ontology.NewAggregatedConcept prefUUID string expected map[string]interface{} }{ { name: "Concept with default values and no prefUUID should return default props", concept: ontology.NewAggregatedConcept{}, expected: map[string]interface{}{ "prefUUID": "", "aggregateHash": "", }, }, { name: "Concept with default values with prefUUID should return props with prefUUID", concept: ontology.NewAggregatedConcept{}, prefUUID: "6649aeda-0cd0-4a65-a310-77f28e88b620", expected: map[string]interface{}{ "prefUUID": "6649aeda-0cd0-4a65-a310-77f28e88b620", "aggregateHash": "", }, }, { name: "Concept with empty values for properties should return default props", concept: ontology.NewAggregatedConcept{ Properties: map[string]interface{}{ "strapline": "", "descriptionXML": "", "imageUrl": "", "emailAddress": "", "facebookPage": "", "twitterHandle": "", "scopeNote": "", "shortLabel": "", "properName": "", "shortName": "", "countryCode": "", "countryOfRisk": "", "countryOfIncorporation": "", "countryOfOperations": "", "postalCode": "", "leiCode": "", "iso31661": "", "salutation": "", "industryIdentifier": "", "aliases": []string{}, "formerNames": []string{}, "tradeNames": []string{}, "yearFounded": 0, "birthYear": 0, }, }, prefUUID: "bbc4f575-edb3-4f51-92f0-5ce6c708d1ea", expected: map[string]interface{}{ "prefUUID": "bbc4f575-edb3-4f51-92f0-5ce6c708d1ea", "aggregateHash": "", }, }, { name: "Concept with non-empty valid values should return valid props", concept: ontology.NewAggregatedConcept{ PrefLabel: "prefLabel value", AggregatedHash: "aggregateHash value", InceptionDate: "inceptionDate value", TerminationDate: "terminationDate value", InceptionDateEpoch: 1, TerminationDateEpoch: 2, FigiCode: "figiCode value", IsDeprecated: true, Properties: map[string]interface{}{ "strapline": "strapline value", "descriptionXML": "descriptionXML value", "_imageUrl": "imageUrl value", "emailAddress": "emailAddress value", "facebookPage": "facebookPage value", "twitterHandle": "twitterHandle value", "scopeNote": "scopeNote value", "shortLabel": "shortLabel value", "properName": "properName value", "shortName": "shortName value", "countryCode": "countryCode value", "countryOfRisk": "countryOfRisk value", "countryOfIncorporation": "countryOfIncorporation value", "countryOfOperations": "countryOfOperations value", "postalCode": "postalCode value", "leiCode": "leiCode value", "iso31661": "iso31661 value", "salutation": "salutation value", "industryIdentifier": "industryIdentifier value", "aliases": []interface{}{"alias1", "alias2"}, "formerNames": []interface{}{"former name 1", "former name 2"}, "tradeNames": []interface{}{"trade name 1", "trade name 2"}, "yearFounded": float64(1), "birthYear": float64(2), }, }, prefUUID: "bbc4f575-edb3-4f51-92f0-5ce6c708d1ea", expected: map[string]interface{}{ "prefUUID": "bbc4f575-edb3-4f51-92f0-5ce6c708d1ea", "prefLabel": "prefLabel value", "aggregateHash": "aggregateHash value", "inceptionDate": "inceptionDate value", "terminationDate": "terminationDate value", "inceptionDateEpoch": int64(1), "terminationDateEpoch": int64(2), "figiCode": "figiCode value", "isDeprecated": true, "strapline": "strapline value", "descriptionXML": "descriptionXML value", "imageUrl": "imageUrl value", "emailAddress": "emailAddress value", "facebookPage": "facebookPage value", "twitterHandle": "twitterHandle value", "scopeNote": "scopeNote value", "shortLabel": "shortLabel value", "properName": "properName value", "shortName": "shortName value", "countryCode": "countryCode value", "countryOfRisk": "countryOfRisk value", "countryOfIncorporation": "countryOfIncorporation value", "countryOfOperations": "countryOfOperations value", "postalCode": "postalCode value", "leiCode": "leiCode value", "iso31661": "iso31661 value", "salutation": "salutation value", "industryIdentifier": "industryIdentifier value", "aliases": []interface{}{"alias1", "alias2"}, "formerNames": []interface{}{"former name 1", "former name 2"}, "tradeNames": []interface{}{"trade name 1", "trade name 2"}, "yearFounded": float64(1), "birthYear": float64(2), }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { got := setCanonicalProps(test.concept, test.prefUUID) // check that "lastModifiedEpoch" is always set and ignore it _, ok := got["lastModifiedEpoch"] assert.True(t, ok, "expected lastModifiedEpoch to be set") delete(got, "lastModifiedEpoch") if !cmp.Equal(got, test.expected) { t.Errorf("Node props differ from expected:\n%s", cmp.Diff(got, test.expected)) } }) } } func TestPopulateConceptQueries(t *testing.T) { tests := []struct { name string concept ontology.NewAggregatedConcept goldenFileName string }{ { name: "Aggregate concept with default values", concept: ontology.NewAggregatedConcept{}, goldenFileName: "testdata/concept-queries-default.golden", }, { name: "Aggregate concept with default values and single default source", concept: ontology.NewAggregatedConcept{ SourceRepresentations: []ontology.NewConcept{ {}, }, }, goldenFileName: "testdata/concept-queries-default-source.golden", }, { name: "Aggregate concept with HAS_PARENT relationship", concept: ontology.TransformToNewAggregateConcept(getAggregatedConcept(t, "full-concorded-aggregated-concept.json")), goldenFileName: "testdata/concept-queries-has-parent-rel.golden", }, { name: "Aggregate concept with HAS_BROADER relationship", concept: ontology.TransformToNewAggregateConcept(getAggregatedConcept(t, "concept-with-multiple-has-broader.json")), goldenFileName: "testdata/concept-queries-has-broader-rel.golden", }, { name: "Aggregate concept with IS_RELATED_TO relationship", concept: ontology.TransformToNewAggregateConcept(getAggregatedConcept(t, "concept-with-multiple-related-to.json")), goldenFileName: "testdata/concept-queries-is-related-to-rel.golden", }, { name: "Aggregate concept with SUPERSEDED_BY relationship", concept: ontology.TransformToNewAggregateConcept(getAggregatedConcept(t, "concept-with-multiple-superseded-by.json")), goldenFileName: "testdata/concept-queries-superseded-by-rel.golden", }, { name: "Aggregate concept with IMPLIED_BY relationship", concept: ontology.TransformToNewAggregateConcept(getAggregatedConcept(t, "brand-with-multiple-implied-by.json")), goldenFileName: "testdata/concept-queries-implied-by-rel.golden", }, { name: "Aggregate concept with HAS_FOCUS relationship", concept: ontology.TransformToNewAggregateConcept(getAggregatedConcept(t, "concept-with-multiple-has-focus.json")), goldenFileName: "testdata/concept-queries-has-focus-rel.golden", }, { name: "Aggregate concept with HAS_MEMBER, HAS_ORGANISATION & HAS_ROLE relationships", concept: ontology.TransformToNewAggregateConcept(getAggregatedConcept(t, "updated-membership.json")), goldenFileName: "testdata/concept-queries-membership-rels.golden", }, { name: "Aggregate concept with COUNTRY_OF & NAICS relationships", concept: ontology.TransformToNewAggregateConcept(getAggregatedConcept(t, "organisation-with-naics.json")), goldenFileName: "testdata/concept-queries-country-of-naics-rels.golden", }, { name: "Aggregate concept with SUB_ORGANISATION_OF relationship", concept: ontology.TransformToNewAggregateConcept(getAggregatedConcept(t, "organisation.json")), goldenFileName: "testdata/concept-queries-sub-organisation-of-rel.golden", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var queryBatch []*neoism.CypherQuery queries := populateConceptQueries(queryBatch, test.concept) got := cypherBatchToString(queries) expectedStatement := getFromGoldenFile(t, test.goldenFileName, got, *update) if !cmp.Equal(expectedStatement, got) { t.Errorf("Got unexpected Cypher query batch:\n%s", cmp.Diff(expectedStatement, got)) } }) } } func cypherBatchToString(queryBatch []*neoism.CypherQuery) string { var queries []string for _, query := range queryBatch { // ignore lastModifiedEpoch from allprops if _, ok := query.Parameters["allprops"]; ok { props := query.Parameters["allprops"].(map[string]interface{}) delete(props, "lastModifiedEpoch") query.Parameters["allprops"] = props } params, _ := json.MarshalIndent(query.Parameters, "", " ") queries = append(queries, fmt.Sprintf("Statement: %v,\nParemeters: %v", query.Statement, string(params))) } return strings.Join(queries, "\n==============================================================================\n") } func TestProcessMembershipRoles(t *testing.T) { defer cleanDB(t) oldAggregatedConcept := getAggregatedConcept(t, "membership.json") aggregateConcept := ontology.TransformToNewAggregateConcept(oldAggregatedConcept) processMembershipRoles(&aggregateConcept) expected := membWithProcessedMembRoles() if !cmp.Equal(expected, aggregateConcept) { t.Errorf("Test %s failed: Concepts were not equal:\n%s", "TestProcessMembershipRoles", cmp.Diff(expected, aggregateConcept)) } } func membWithProcessedMembRoles() ontology.NewAggregatedConcept { return ontology.NewAggregatedConcept{ Properties: map[string]interface{}{ "salutation": "Mr", "birthYear": float64(2018), }, PrefUUID: "cbadd9a7-5da9-407a-a5ec-e379460991f2", PrefLabel: "Membership Pref Label", Type: "Membership", OrganisationUUID: "7f40d291-b3cb-47c4-9bce-18413e9350cf", PersonUUID: "35946807-0205-4fc1-8516-bb1ae141659b", InceptionDate: "2016-01-01", TerminationDate: "2017-02-02", SourceRepresentations: []ontology.NewConcept{ { Relationships: []ontology.Relationship{ { UUID: "35946807-0205-4fc1-8516-bb1ae141659b", Label: "HAS_MEMBER", }, { UUID: "7f40d291-b3cb-47c4-9bce-18413e9350cf", Label: "HAS_ORGANISATION", }, }, UUID: "cbadd9a7-5da9-407a-a5ec-e379460991f2", PrefLabel: "Membership Pref Label", Type: "Membership", Authority: "Smartlogic", AuthorityValue: "746464", InceptionDate: "2016-01-01", TerminationDate: "2017-02-02", MembershipRoles: []ontology.MembershipRole{ { RoleUUID: "f807193d-337b-412f-b32c-afa14b385819", InceptionDate: "2016-01-01", TerminationDate: "2017-02-02", InceptionDateEpoch: 1451606400, TerminationDateEpoch: 1485993600, }, { RoleUUID: "fe94adc6-ca44-438f-ad8f-0188d4a74987", InceptionDate: "2011-06-27", InceptionDateEpoch: 1309132800, }, }, }, }, } } func readConceptAndCompare(t *testing.T, payload ontology.AggregatedConcept, testName string, ignoredFields ...string) { actualIf, found, err := conceptsDriver.Read(payload.PrefUUID, "") actual := actualIf.(ontology.AggregatedConcept) newPayload := ontology.TransformToNewAggregateConcept(payload) clean := cleanSourceProperties(newPayload) newClean := ontology.TransformToOldAggregateConcept(clean) expected := cleanHash(cleanConcept(newClean)) actual = cleanHash(cleanConcept(actual)) cmpOptions := cmpopts.IgnoreFields(ontology.Concept{}, ignoredFields...) if !cmp.Equal(expected, actual, cmpOptions) { t.Errorf("Test %s failed: Concepts were not equal:\n%s", testName, cmp.Diff(expected, actual, cmpOptions)) } assert.NoError(t, err, fmt.Sprintf("Test %s failed: Unexpected Error occurred", testName)) assert.True(t, found, fmt.Sprintf("Test %s failed: Concept has not been found", testName)) } func readTestSetup(t *testing.T, filename string) ([]ontology.AggregatedConcept, []string, []string) { t.Helper() f, err := os.Open(filename) if err != nil { t.Fatal(err) } defer f.Close() result := []ontology.AggregatedConcept{} err = json.NewDecoder(f).Decode(&result) if err != nil { t.Fatal(err) } var canonicalUUIDs []string var sourceUUIDs []string for _, concept := range result { canonicalUUIDs = append(canonicalUUIDs, concept.PrefUUID) sourceUUIDs = append(sourceUUIDs, collectRelatedUUIDs(concept)...) } return result, canonicalUUIDs, sourceUUIDs } func collectRelatedUUIDs(concept ontology.AggregatedConcept) []string { var result []string for _, src := range concept.SourceRepresentations { result = append(result, src.UUID) result = append(result, src.ParentUUIDs...) result = append(result, src.BroaderUUIDs...) result = append(result, src.RelatedUUIDs...) result = append(result, src.SupersededByUUIDs...) result = append(result, src.ImpliedByUUIDs...) result = append(result, src.HasFocusUUIDs...) result = append(result, src.OrganisationUUID) result = append(result, src.PersonUUID) for _, memb := range src.MembershipRoles { result = append(result, memb.RoleUUID) } result = append(result, src.IssuedBy) result = append(result, src.CountryOfRiskUUID) result = append(result, src.CountryOfIncorporationUUID) result = append(result, src.CountryOfOperationsUUID) result = append(result, src.ParentOrganisation) for _, naics := range src.NAICSIndustryClassifications { result = append(result, naics.UUID) } } set := map[string]bool{} for _, uuid := range result { if uuid != "" { set[uuid] = true } } result = []string{} for uuid := range set { result = append(result, uuid) } return result } func newURL() string { url := os.Getenv("NEO4J_TEST_URL") if url == "" { url = "http://localhost:7474/db/data" } return url } func cleanDB(t *testing.T) { cleanSourceNodes(t, parentUUID, anotherBasicConceptUUID, basicConceptUUID, sourceID1, sourceID2, sourceID3, unknownThingUUID, anotherUnknownThingUUID, yetAnotherBasicConceptUUID, membershipRole.RoleUUID, personUUID, organisationUUID, membershipUUID, anotherMembershipRole.RoleUUID, anotherOrganisationUUID, anotherPersonUUID, simpleSmartlogicTopicUUID, boardRoleUUID, financialInstrumentSameIssuerUUID, financialInstrumentUUID, financialOrgUUID, anotherFinancialOrgUUID, parentOrgUUID, supersededByUUID, testOrgUUID, locationUUID, anotherLocationUUID, brandUUID, anotherBrandUUID, yetAnotherBrandUUID, topicUUID, anotherTopicUUID, conceptHasFocusUUID, anotherConceptHasFocusUUID, naicsIndustryClassificationUUID, naicsIndustryClassificationAnotherUUID, organisationWithNAICSUUID, ) deleteSourceNodes(t, parentUUID, anotherBasicConceptUUID, basicConceptUUID, sourceID1, sourceID2, sourceID3, unknownThingUUID, anotherUnknownThingUUID, yetAnotherBasicConceptUUID, membershipRole.RoleUUID, personUUID, organisationUUID, membershipUUID, anotherMembershipRole.RoleUUID, anotherOrganisationUUID, anotherPersonUUID, simpleSmartlogicTopicUUID, boardRoleUUID, financialInstrumentSameIssuerUUID, financialInstrumentUUID, financialOrgUUID, anotherFinancialOrgUUID, parentOrgUUID, supersededByUUID, testOrgUUID, locationUUID, anotherLocationUUID, brandUUID, anotherBrandUUID, yetAnotherBrandUUID, topicUUID, anotherTopicUUID, conceptHasFocusUUID, anotherConceptHasFocusUUID, naicsIndustryClassificationUUID, naicsIndustryClassificationAnotherUUID, organisationWithNAICSUUID, ) deleteConcordedNodes(t, parentUUID, basicConceptUUID, anotherBasicConceptUUID, sourceID1, sourceID2, sourceID3, unknownThingUUID, anotherUnknownThingUUID, yetAnotherBasicConceptUUID, membershipRole.RoleUUID, personUUID, organisationUUID, membershipUUID, anotherMembershipRole.RoleUUID, anotherOrganisationUUID, anotherPersonUUID, simpleSmartlogicTopicUUID, boardRoleUUID, financialInstrumentSameIssuerUUID, financialInstrumentUUID, financialOrgUUID, anotherFinancialOrgUUID, parentOrgUUID, supersededByUUID, testOrgUUID, locationUUID, anotherLocationUUID, brandUUID, anotherBrandUUID, yetAnotherBrandUUID, topicUUID, anotherTopicUUID, conceptHasFocusUUID, anotherConceptHasFocusUUID, naicsIndustryClassificationUUID, naicsIndustryClassificationAnotherUUID, organisationWithNAICSUUID, ) } func deleteSourceNodes(t *testing.T, uuids ...string) { qs := make([]*neoism.CypherQuery, len(uuids)) for i, uuid := range uuids { qs[i] = &neoism.CypherQuery{ Statement: fmt.Sprintf(` MATCH (a:Thing {uuid: "%s"}) DETACH DELETE a`, uuid)} } err := db.CypherBatch(qs) assert.NoError(t, err, "Error executing clean up cypher") } func cleanSourceNodes(t *testing.T, uuids ...string) { qs := make([]*neoism.CypherQuery, len(uuids)) for i, uuid := range uuids { qs[i] = &neoism.CypherQuery{ Statement: fmt.Sprintf(` MATCH (a:Thing {uuid: "%s"}) OPTIONAL MATCH (a)-[hp:HAS_PARENT]-(p) DELETE hp`, uuid)} } err := db.CypherBatch(qs) assert.NoError(t, err, "Error executing clean up cypher") } func deleteConcordedNodes(t *testing.T, uuids ...string) { qs := make([]*neoism.CypherQuery, len(uuids)) for i, uuid := range uuids { qs[i] = &neoism.CypherQuery{ Statement: fmt.Sprintf(` MATCH (a:Thing {prefUUID: "%s"}) OPTIONAL MATCH (a)-[rel]-(i) DELETE rel, i, a`, uuid)} } err := db.CypherBatch(qs) assert.NoError(t, err, "Error executing clean up cypher") } func verifyAggregateHashIsCorrect(t *testing.T, concept ontology.AggregatedConcept, testName string) { var results []struct { Hash string `json:"a.aggregateHash"` } query := &neoism.CypherQuery{ Statement: ` MATCH (a:Thing {prefUUID: {uuid}}) RETURN a.aggregateHash`, Parameters: map[string]interface{}{ "uuid": concept.PrefUUID, }, Result: &results, } err := db.CypherBatch([]*neoism.CypherQuery{query}) assert.NoError(t, err, fmt.Sprintf("Error while retrieving concept hash")) newConcept := ontology.TransformToNewAggregateConcept(concept) conceptHash, _ := hashstructure.Hash(cleanSourceProperties(newConcept), nil) hashAsString := strconv.FormatUint(conceptHash, 10) assert.Equal(t, hashAsString, results[0].Hash, fmt.Sprintf("Test %s failed: Concept hash %s and stored record %s are not equal!", testName, hashAsString, results[0].Hash)) } func cleanConcept(c ontology.AggregatedConcept) ontology.AggregatedConcept { for j := range c.SourceRepresentations { c.SourceRepresentations[j].LastModifiedEpoch = 0 for i := range c.SourceRepresentations[j].MembershipRoles { c.SourceRepresentations[j].MembershipRoles[i].InceptionDateEpoch = 0 c.SourceRepresentations[j].MembershipRoles[i].TerminationDateEpoch = 0 } sort.SliceStable(c.SourceRepresentations[j].MembershipRoles, func(k, l int) bool { return c.SourceRepresentations[j].MembershipRoles[k].RoleUUID < c.SourceRepresentations[j].MembershipRoles[l].RoleUUID }) sort.SliceStable(c.SourceRepresentations[j].BroaderUUIDs, func(k, l int) bool { return c.SourceRepresentations[j].BroaderUUIDs[k] < c.SourceRepresentations[j].BroaderUUIDs[l] }) sort.SliceStable(c.SourceRepresentations[j].RelatedUUIDs, func(k, l int) bool { return c.SourceRepresentations[j].RelatedUUIDs[k] < c.SourceRepresentations[j].RelatedUUIDs[l] }) sort.SliceStable(c.SourceRepresentations[j].SupersededByUUIDs, func(k, l int) bool { return c.SourceRepresentations[j].SupersededByUUIDs[k] < c.SourceRepresentations[j].SupersededByUUIDs[l] }) sort.SliceStable(c.SourceRepresentations[j].ImpliedByUUIDs, func(k, l int) bool { return c.SourceRepresentations[j].ImpliedByUUIDs[k] < c.SourceRepresentations[j].ImpliedByUUIDs[l] }) sort.SliceStable(c.SourceRepresentations[j].HasFocusUUIDs, func(k, l int) bool { return c.SourceRepresentations[j].HasFocusUUIDs[k] < c.SourceRepresentations[j].HasFocusUUIDs[l] }) sort.SliceStable(c.SourceRepresentations[j].NAICSIndustryClassifications, func(k, l int) bool { return c.SourceRepresentations[j].NAICSIndustryClassifications[k].Rank < c.SourceRepresentations[j].NAICSIndustryClassifications[l].Rank }) } for i := range c.MembershipRoles { c.MembershipRoles[i].InceptionDateEpoch = 0 c.MembershipRoles[i].TerminationDateEpoch = 0 } sort.SliceStable(c.SourceRepresentations, func(k, l int) bool { return c.SourceRepresentations[k].UUID < c.SourceRepresentations[l].UUID }) return c } func cleanHash(c ontology.AggregatedConcept) ontology.AggregatedConcept { c.AggregatedHash = "" return c }
[ "\"NEO4J_TEST_URL\"" ]
[]
[ "NEO4J_TEST_URL" ]
[]
["NEO4J_TEST_URL"]
go
1
0
api-examples/rbacv1/main.py
# !/usr/bin/env python3 import csv import json import os import urllib import requests try: SHIFTLEFT_ORG_ID = os.environ["SHIFTLEFT_ORG_ID"] SHIFTLEFT_ACCESS_TOKEN = os.environ["SHIFTLEFT_ACCESS_TOKEN"] except KeyError: raise SystemExit("Oops! Do not forget to set both SHIFTLEFT_ORG_ID and SHIFTLEFT_ACCESS_TOKEN!") API_V4_BASE_URL = "https://www.shiftleft.io/api/v4/" API_V4_ORG_PATH = "orgs/{organization_id}/" class SLAPIError: """ SLAPIError represents API error details returned by SL API v4 """ def __init__(self, ok=False, code=0, message="", validation_errors=()): self.ok = ok self.code = code self.message = message self.validation_errors = validation_errors def as_string(self): """ as_string composes the most descriptive error it can with the available data. :return: string containing a descriptive error. """ if len(self.validation_errors) != 0: return "found the following validation errors in the request: {}".format(", ".join(self.validation_errors)) if len(self.message) != 0: return "server responded: {}".format(self.message) return "server returned {} code without further information".format(self.code) def handle_success(resp): """ We discovered a few cases where response body can be empty so we handle the case """ response = "" try: response = resp.json()["response"] except (json.JSONDecodeError, json.decoder.JSONDecodeError): response = resp.text return response def handle_status_code(resp=None): """ handle_status_code intercepts the response and raises an appropriate error if it's not a 200 :param resp: an http response as returned from requests library :return: None in case of success or raises an exception with details otherwise """ if resp is None: return if resp.status_code == 200: return try: json_decoded_body = resp.json() except (json.JSONDecodeError, json.decoder.JSONDecodeError): json_decoded_body = resp.text except Exception: raise Exception(resp.status_code) e = SLAPIError(**json_decoded_body) raise Exception(e.as_string()) class SLResponse: """ Is an implementation of the base 200 response provided by all ShiftLeft API v4 endpoints. """ def __init__(self, ok=True, response=None): if response is None: response = {} self.ok = ok self.response = response class SLTeamMembership: """ SLTeamMembership contains the membership details for a user in a team. """ def __init__(self, team_name="", team_id="", role="", role_name="", role_aliases=[], **kwargs): self.team_name = team_name self.team_id = team_id self.role = role self.role_name = role_name self.role_aliases = role_aliases class SLUser: """ SLUser holds the information for one user as returned from ListUsers endpoint in ShiftLeft APIv4 https://docs.shiftleft.io/api/#operation/ListOrgRBACUsers """ def __init__(self, name="", email="", id_v2="", team_membership=()): self.name = name self.email = email self.id_v2 = id_v2 self.team_membership = [SLTeamMembership(**t) for t in team_membership] def is_member(self, team=""): for tm in self.team_membership: if tm.team_name == team: return True return False class SLListUsersResponse: """ SLListUsersResponse represents the response for the ListUsers endpoint in ShiftLeft API v4 https://docs.shiftleft.io/api/#operation/ListOrgRBACUsers """ def __init__(self, users=()): self.users = [SLUser(**u) for u in users] def id_for_email(self, user_email=""): user_email = user_email.lower() for u in self.users: if u.email.lower() == user_email: return u.id_v2 def user_for_id(self, user_id=""): for u in self.users: if u.id_v2 == user_id: return u class SLTeamInfo: """ SLTeamInfo represents the information returned in one item of ListTeams endpoint in ShiftLeft API v4 https://docs.shiftleft.io/api/#operation/ListTeams """ def __init__(self, team_id="", team_name="", team_version=""): self.team_id = team_id self.team_name = team_name self.team_version = team_version class SLTeams: """ SLTeams represents a group of teams, typically of a same organization. """ def __init__(self, teams=()): self.teams = [SLTeamInfo(**team) for team in teams] def __contains__(self, item): for tm in self.teams: if tm.team_name == item: return True def get_id(self, item): for tm in self.teams: if tm.team_name == item: return tm.team_id def get_team_name(self, item): for tm in self.teams: if tm.team_id == item: return tm.team_name def append(self, team): self.teams.append(team) class SLAPIClient: """ SLAPIClient handles communications with ShiftLeft API v4 for the purposes of this script. It is very limited and bound to be obsoleted of Schema changes. """ def __init__(self, access_token="", organization_id=""): self.__access_header = {'Authorization': 'Bearer {}'.format(access_token), 'Content-Type': 'application/json'} self.__organization_id = organization_id def _do_get(self, api_path): u = API_V4_BASE_URL + API_V4_ORG_PATH.format(organization_id=self.__organization_id) + api_path resp = requests.get(u, headers=self.__access_header) handle_status_code(resp) return handle_success(resp) def _do_post(self, api_path, payload=None): u = API_V4_BASE_URL + API_V4_ORG_PATH.format(organization_id=self.__organization_id) + api_path resp = requests.post(u, headers=self.__access_header, data=json.dumps(payload)) handle_status_code(resp) return handle_success(resp) def _do_put(self, api_path, payload=None): u = API_V4_BASE_URL + API_V4_ORG_PATH.format(organization_id=self.__organization_id) + api_path resp = requests.put(u, headers=self.__access_header, data=json.dumps(payload)) handle_status_code(resp) return handle_success(resp) def list_users(self): """ list_users implements a GET request to https://docs.shiftleft.io/api/#operation/ListOrgRBACUsers :return: """ return SLListUsersResponse(self._do_get("rbac/users")) def list_teams(self): """ list_teams implements a GET request to https://docs.shiftleft.io/api/#operation/ListTeams :return: """ resp = self._do_get("rbac/teams") return SLTeams(resp) def list_roles(self): """ list_roles implements a GET request to https://docs.shiftleft.io/api/#operation/ListTeams :return: """ return self._do_get("rbac/roles") def create_team(self, name=""): """ create_team implements a POST request to https://docs.shiftleft.io/api/#operation/CreateTeam :param name: the name of the team to be created, must be unique :return: """ team_payload = { "name": name } resp = self._do_post("rbac/teams", team_payload) return SLTeamInfo(team_id=resp["team_id"], team_name=name) def assign_user_organization_role(self, user_id="", role=""): """ assign_user_organization_role will assign the role passed to the user at an organization level :param user_id: the id v2 of the user :param role: the role id or alias the user will have at an organization level :return: a dictionary of the json response from the call. """ user_org_role_payload = {"org_role": role} self._do_put("rbac/users/{user_id}".format(user_id=user_id), user_org_role_payload) def assign_user_team_role(self, user_id="", team="", role=""): """ assign_user_team_role will assign a single user to a team :param user_id: the id v2 of the user to add to the team :param team: the team where we want to add the user :param role: the role that user will have on that team :return: a dictionary of the json response from the call. """ version = self.current_team_version(team) payload = { "version": version, "add_team_membership": [ { "user_id_v2": user_id, "team_role": role } ] } self._do_put("rbac/teams/{team}".format(team=team), payload) def current_team_version(self, team=""): """ current_team_version returns the version of the passed team on the server :param team: the name of the team whose version we want :return: an integer representing the current team version """ r = self._do_get("rbac/teams/{team}".format(team=team)) return r["version"] def assign_users_to_teams(self, team="", user_role_pairs=[]): """ assign_users_to_teams will assign the passed users to the passed team taking care of fetching new version. :param team: the name of the team where the users will be added :param user_role_pairs: a list of (user_id_v2, role_id_or_alias) to know users and capacity to add to team. :return: a dictionary of the json response from the call. """ add_to_team = [] #import pdb; pdb.set_trace() for user_id, role in user_role_pairs: add_to_team.append({"user_id_v2": user_id, "team_role": role}) version = self.current_team_version(team) payload = { "version": version, "add_team_membership": add_to_team } self._do_put("rbac/teams/{team}".format(team=team), payload) class CSVUser: """ CSVUser represents user information as present in each row of the sample csv. """ def __init__(self, email="", team="", orgrole="", teamrole=""): self.email = email self.team = team self.organization_role = orgrole # NOTE: These values are not yet stable and might change. self.team_role = teamrole # NOTE: These values are not yet stable and might change. def main(): api_v4 = SLAPIClient(SHIFTLEFT_ACCESS_TOKEN, SHIFTLEFT_ORG_ID) teams = api_v4.list_teams() users = api_v4.list_users() add_to_teams = {} with open("rbac.csv", "r") as csv_file: csv_reader = csv.DictReader(csv_file) for row in csv_reader: # Read one user from CSV user = CSVUser(**row) # Create the team this user should belong to if it doesn't exist if user.team in teams: print("Team {} exists for this organization.".format(user.team)) else: print("Team '{}' does not exist for this organization;" " creating it and assigning '{}' to it.".format(user.team, user.email)) teams.append(api_v4.create_team(user.team)) # Assign the user organization wide role. user_id = users.id_for_email(user.email) if user.organization_role.strip() != '': api_v4.assign_user_organization_role(user_id, user.organization_role) print("Updated organization role for {email} to {org_role}.".format(email=user.email, org_role=user.organization_role)) # Queue the users to add for each team to economize requests team_id = teams.get_id(user.team) if user.team not in add_to_teams: add_to_teams[team_id] = [] add_to_teams[team_id].append((user_id, user.team_role)) # Process team membership changes for team, info in add_to_teams.items(): api_v4.assign_users_to_teams(team, info) print("Updated team membership for '{}'".format(teams.get_team_name(team))) for user_id, team_role in info: u = users.user_for_id(user_id) # is_member works because users info was obtained before making any changes so it depicts initial state. action = "Updated team membership of" if u.is_member(team) else "Added membership of" print('* {action} {email} with role {teamrole}.'.format(action=action, email=u.email, teamrole=team_role)) if __name__ == "__main__": """ This is executed when run from the command line """ main()
[]
[]
[ "SHIFTLEFT_ACCESS_TOKEN", "SHIFTLEFT_ORG_ID" ]
[]
["SHIFTLEFT_ACCESS_TOKEN", "SHIFTLEFT_ORG_ID"]
python
2
0
enterprise/dev/ci/internal/buildkite/buildkite.go
// Package buildkite defines data types that reflect Buildkite's YAML pipeline format. // // Usage: // // pipeline := buildkite.Pipeline{} // pipeline.AddStep("check_mark", buildkite.Cmd("./dev/check/all.sh")) package buildkite import ( "encoding/json" "fmt" "io" "math/rand" "os" "strconv" "strings" "time" "github.com/ghodss/yaml" "github.com/grafana/regexp" "github.com/sourcegraph/sourcegraph/lib/errors" ) type featureFlags struct { // StatelessBuild triggers a stateless build by overriding the default queue to send the build on the stateles // agents and forces a MainDryRun type build to avoid impacting normal builds. // // It is meant to test the stateless builds without any side effects. StatelessBuild bool } // FeatureFlags are for experimenting with CI pipeline features. Use sparingly! var FeatureFlags = featureFlags{ StatelessBuild: os.Getenv("CI_FEATURE_FLAG_STATELESS") == "true" || // Always process retries on stateless agents. // TODO: remove when we switch over entirely to stateless agents os.Getenv("BUILDKITE_REBUILT_FROM_BUILD_NUMBER") != "" || // Roll out to 75% of builds rand.NewSource(time.Now().UnixNano()).Int63()%100 < 75, } type Pipeline struct { Env map[string]string `json:"env,omitempty"` Steps []interface{} `json:"steps"` Notify []slackNotifier `json:"notify,omitempty"` // Group, if provided, indicates this Pipeline is actually a group of steps. // See: https://buildkite.com/docs/pipelines/group-step Group // BeforeEveryStepOpts are e.g. commands that are run before every AddStep, similar to // Plugins. BeforeEveryStepOpts []StepOpt `json:"-"` // AfterEveryStepOpts are e.g. that are run at the end of every AddStep, helpful for // post-processing AfterEveryStepOpts []StepOpt `json:"-"` } var nonAlphaNumeric = regexp.MustCompile("[^a-zA-Z0-9]+") func (p *Pipeline) EnsureUniqueKeys() error { occurences := map[string]int{} for _, step := range p.Steps { if s, ok := step.(*Step); ok { if s.Key == "" { s.Key = nonAlphaNumeric.ReplaceAllString(s.Label, "") } occurences[s.Key] += 1 } } for k, count := range occurences { if count > 1 { return errors.Newf("non unique key on step with key %q", k) } } return nil } type Group struct { Group string `json:"group,omitempty"` Key string `json:"key,omitempty"` } type BuildOptions struct { Message string `json:"message,omitempty"` Commit string `json:"commit,omitempty"` Branch string `json:"branch,omitempty"` MetaData map[string]interface{} `json:"meta_data,omitempty"` Env map[string]string `json:"env,omitempty"` } func (bo BuildOptions) MarshalJSON() ([]byte, error) { type buildOptions BuildOptions boCopy := buildOptions(bo) // Buildkite pipeline upload command will interpolate if it sees a $var // which can cause the pipeline generation to fail because that // variable do not exists. // By replacing $ into $$ in the commit messages we can prevent those // failures to happen. // // https://buildkite.com/docs/agent/v3/cli-pipeline#environment-variable-substitution boCopy.Message = strings.ReplaceAll(boCopy.Message, "$", `$$`) return json.Marshal(boCopy) } func (bo BuildOptions) MarshalYAML() ([]byte, error) { type buildOptions BuildOptions boCopy := buildOptions(bo) // Buildkite pipeline upload command will interpolate if it sees a $var // which can cause the pipeline generation to fail because that // variable do not exists. // By replacing $ into $$ in the commit messages we can prevent those // failures to happen. // // https://buildkite.com/docs/agent/v3/cli-pipeline#environment-variable-substitution boCopy.Message = strings.ReplaceAll(boCopy.Message, "$", `$$`) return yaml.Marshal(boCopy) } // Matches Buildkite pipeline JSON schema: // https://github.com/buildkite/pipeline-schema/blob/master/schema.json type Step struct { Label string `json:"label"` Key string `json:"key,omitempty"` Command []string `json:"command,omitempty"` DependsOn []string `json:"depends_on,omitempty"` AllowDependencyFailure bool `json:"allow_dependency_failure,omitempty"` TimeoutInMinutes string `json:"timeout_in_minutes,omitempty"` Trigger string `json:"trigger,omitempty"` Async bool `json:"async,omitempty"` Build *BuildOptions `json:"build,omitempty"` Env map[string]string `json:"env,omitempty"` Plugins []map[string]interface{} `json:"plugins,omitempty"` ArtifactPaths string `json:"artifact_paths,omitempty"` ConcurrencyGroup string `json:"concurrency_group,omitempty"` Concurrency int `json:"concurrency,omitempty"` Parallelism int `json:"parallelism,omitempty"` Skip string `json:"skip,omitempty"` SoftFail []softFailExitStatus `json:"soft_fail,omitempty"` Retry *RetryOptions `json:"retry,omitempty"` Agents map[string]string `json:"agents,omitempty"` If string `json:"if,omitempty"` } type RetryOptions struct { Automatic *AutomaticRetryOptions `json:"automatic,omitempty"` Manual *ManualRetryOptions `json:"manual,omitempty"` } type AutomaticRetryOptions struct { Limit int `json:"limit,omitempty"` } type ManualRetryOptions struct { Allowed bool `json:"allowed"` Reason string `json:"reason,omitempty"` } func (p *Pipeline) AddStep(label string, opts ...StepOpt) { step := &Step{ Label: label, Env: make(map[string]string), Agents: make(map[string]string), Plugins: make([]map[string]interface{}, 0), } for _, opt := range p.BeforeEveryStepOpts { opt(step) } for _, opt := range opts { opt(step) } for _, opt := range p.AfterEveryStepOpts { opt(step) } p.Steps = append(p.Steps, step) } func (p *Pipeline) AddTrigger(label string, pipeline string, opts ...StepOpt) { step := &Step{ Label: label, Trigger: pipeline, } for _, opt := range opts { opt(step) } p.Steps = append(p.Steps, step) } type slackNotifier struct { Slack slackChannelsNotification `json:"slack"` If string `json:"if"` } type slackChannelsNotification struct { Channels []string `json:"channels"` Message string `json:"message"` } // AddFailureSlackNotify configures a notify block that updates the given channel if the // build fails. func (p *Pipeline) AddFailureSlackNotify(channel string, mentionUserID string, err error) { n := slackChannelsNotification{ Channels: []string{channel}, } if mentionUserID != "" { n.Message = fmt.Sprintf("cc <@%s>", mentionUserID) } else if err != nil { n.Message = err.Error() } p.Notify = append(p.Notify, slackNotifier{ Slack: n, If: `build.state == "failed"`, }) } func (p *Pipeline) WriteJSONTo(w io.Writer) (int64, error) { output, err := json.MarshalIndent(p, "", " ") if err != nil { return 0, err } n, err := w.Write(output) return int64(n), err } func (p *Pipeline) WriteYAMLTo(w io.Writer) (int64, error) { output, err := yaml.Marshal(p) if err != nil { return 0, err } n, err := w.Write(output) return int64(n), err } type StepOpt func(step *Step) // RawCmd adds a command step without any instrumentation. This is useful to // test the instrumentation itself. func RawCmd(command string) StepOpt { return func(step *Step) { step.Command = append(step.Command, command) } } func tracedCmd(command string) string { // ./tr is a symbolic link created by the .buildkite/hooks/post-checkout hook. // Its purpose is to keep the command excerpt in the buildkite UI clear enough to // see the underlying command even if prefixed by the tracing script. return fmt.Sprintf("./tr %s", command) } // Cmd adds a command step with added instrumentation for testing purposes. func Cmd(command string) StepOpt { return RawCmd(tracedCmd(command)) } type AnnotationType string const ( // We opt not to allow 'success' and 'info' type annotations for now to encourage // steps to only provide annotations that help debug failure cases. In the future // we can revisit this if there is a need. // AnnotationTypeSuccess AnnotationType = "success" // AnnotationTypeInfo AnnotationType = "info" AnnotationTypeWarning AnnotationType = "warning" AnnotationTypeError AnnotationType = "error" ) type AnnotationOpts struct { // Type indicates the type annotations from this command should be uploaded as. // Commands that upload annotations of different levels will create separate // annotations. // // If no annotation type is provided, the annotation is created as an error annotation. Type AnnotationType // IncludeNames indicates whether the file names of found annotations should be // included in the Buildkite annotation as section titles. For example, if enabled the // contents of the following files: // // - './annotations/Job log.md' // - './annotations/shfmt' // // Will be included in the annotation with section titles 'Job log' and 'shfmt'. IncludeNames bool // MultiJobContext indicates that this annotation will accept input from multiple jobs // under this context name. MultiJobContext string } type TestReportOpts struct { // TestSuiteKeyVariableName is the name of the variable in gcloud secrets that holds // the test suite key to upload to. // // TODO: This is not finalized, see https://github.com/sourcegraph/sourcegraph/issues/31971 TestSuiteKeyVariableName string } // AnnotatedCmdOpts declares options for AnnotatedCmd. type AnnotatedCmdOpts struct { // AnnotationOpts configures how AnnotatedCmd picks up files left in the // `./annotations` directory and appends them to a shared annotation for this job. // If nil, AnnotatedCmd will not look for annotations. // // To get started, generate an annotation file when you want to publish an annotation, // typically on error, in the './annotations' directory: // // if [ $EXIT_CODE -ne 0 ]; then // echo -e "$OUT" >./annotations/shfmt // echo "^^^ +++" // fi // // Make sure it has a sufficiently unique name, so as to avoid conflicts if multiple // annotations are generated in a single job. // // Annotations can be formatted based on file extensions, for example: // // - './annotations/Job log.md' will have its contents appended as markdown // - './annotations/shfmt' will have its contents formatted as terminal output // // Please be considerate about what generating annotations, since they can cause a lot // of visual clutter in the Buildkite UI. When creating annotations: // // - keep them concise and short, to minimze the space they take up // - ensure they are actionable: an annotation should enable you, the CI user, to // know where to go and what to do next. // // DO NOT use 'buildkite-agent annotate' or 'annotate.sh' directly in scripts. Annotations *AnnotationOpts // TestReports configures how AnnotatedCmd picks up files left in the `./test-reports` // directory and uploads them to Buildkite Analytics. If nil, AnnotatedCmd will not // look for test reports. // // To get started, generate a JUnit XML report for your tests in the './test-reports' // directory. Make sure it has a sufficiently unique name, so as to avoid conflicts if // multiple reports are generated in a single job. Consult your language's test // tooling for more details. // // Use TestReportOpts to configure where to publish reports too. For more details, // see https://buildkite.com/organizations/sourcegraph/analytics. // // DO NOT post directly to the Buildkite API or use 'upload-test-report.sh' directly // in scripts. TestReports *TestReportOpts } // AnnotatedCmd runs the given command and picks up annotations generated by the command: // // - annotations in `./annotations` // - test reports in `./test-reports` // // To learn more, see the AnnotatedCmdOpts docstrings. func AnnotatedCmd(command string, opts AnnotatedCmdOpts) StepOpt { // Options for annotations var annotateOpts string if opts.Annotations != nil { if opts.Annotations.Type == "" { annotateOpts += fmt.Sprintf(" -t %s", AnnotationTypeError) } else { annotateOpts += fmt.Sprintf(" -t %s", opts.Annotations.Type) } if opts.Annotations.MultiJobContext != "" { annotateOpts += fmt.Sprintf(" -c %q", opts.Annotations.MultiJobContext) } annotateOpts = fmt.Sprintf("%v %s", opts.Annotations.IncludeNames, strings.TrimSpace(annotateOpts)) } // Options for test reports var testReportOpts string if opts.TestReports != nil { testReportOpts += opts.TestReports.TestSuiteKeyVariableName } // ./an is a symbolic link created by the .buildkite/hooks/post-checkout hook. // Its purpose is to keep the command excerpt in the buildkite UI clear enough to // see the underlying command even if prefixed by the annotation scraper. annotatedCmd := fmt.Sprintf("./an %q", tracedCmd(command)) return flattenStepOpts(RawCmd(annotatedCmd), Env("ANNOTATE_OPTS", annotateOpts), Env("TEST_REPORT_OPTS", testReportOpts)) } func Async(async bool) StepOpt { return func(step *Step) { step.Async = async } } func Build(buildOptions BuildOptions) StepOpt { return func(step *Step) { step.Build = &buildOptions } } func ConcurrencyGroup(group string) StepOpt { return func(step *Step) { step.ConcurrencyGroup = group } } func Concurrency(limit int) StepOpt { return func(step *Step) { step.Concurrency = limit } } // Parallelism tells Buildkite to run this job multiple time in parallel, // which is very useful to QA a flake fix. func Parallelism(count int) StepOpt { return func(step *Step) { step.Parallelism = count } } func Env(name, value string) StepOpt { return func(step *Step) { step.Env[name] = value } } func Skip(reason string) StepOpt { return func(step *Step) { step.Skip = reason } } type softFailExitStatus struct { ExitStatus int `json:"exit_status"` } // SoftFail indicates the specified exit codes should trigger a soft fail. // https://buildkite.com/docs/pipelines/command-step#command-step-attributes // This function also adds a specific env var named SOFT_FAIL_EXIT_CODES, enabling // to get exit codes from the scripts until https://github.com/sourcegraph/sourcegraph/issues/27264 // is fixed. func SoftFail(exitCodes ...int) StepOpt { return func(step *Step) { var codes []string for _, code := range exitCodes { codes = append(codes, strconv.Itoa(code)) step.SoftFail = append(step.SoftFail, softFailExitStatus{ ExitStatus: code, }) } // https://github.com/sourcegraph/sourcegraph/issues/27264 step.Env["SOFT_FAIL_EXIT_CODES"] = strings.Join(codes, " ") } } // AutomaticRetry enables automatic retry for the step with the number of times this job can be retried. // The maximum value this can be set to is 10. // Docs: https://buildkite.com/docs/pipelines/command-step#automatic-retry-attributes func AutomaticRetry(limit int) StepOpt { return func(step *Step) { step.Retry = &RetryOptions{ Automatic: &AutomaticRetryOptions{ Limit: limit, }, } } } // DisableManualRetry disables manual retry for the step. The reason string passed // will be displayed in a tooltip on the Retry button in the Buildkite interface. // Docs: https://buildkite.com/docs/pipelines/command-step#manual-retry-attributes func DisableManualRetry(reason string) StepOpt { return func(step *Step) { step.Retry = &RetryOptions{ Manual: &ManualRetryOptions{ Allowed: false, Reason: reason, }, } } } func ArtifactPaths(paths ...string) StepOpt { return func(step *Step) { step.ArtifactPaths = strings.Join(paths, ";") } } func Agent(key, value string) StepOpt { return func(step *Step) { step.Agents[key] = value } } func (p *Pipeline) AddWait() { p.Steps = append(p.Steps, "wait") } func Key(key string) StepOpt { return func(step *Step) { step.Key = key } } func Plugin(name string, plugin interface{}) StepOpt { return func(step *Step) { wrapper := map[string]interface{}{} wrapper[name] = plugin step.Plugins = append(step.Plugins, wrapper) } } func DependsOn(dependency ...string) StepOpt { return func(step *Step) { step.DependsOn = append(step.DependsOn, dependency...) } } // IfReadyForReview causes this step to only be added if this build is associated with a // pull request that is also ready for review. func IfReadyForReview() StepOpt { return func(step *Step) { step.If = "build.pull_request.id != null && !build.pull_request.draft" } } // AllowDependencyFailure enables `allow_dependency_failure` attribute on the step. // Such a step will run when the depended-on jobs complete, fail or even did not run. // See extended docs here: https://buildkite.com/docs/pipelines/dependencies#allowing-dependency-failures func AllowDependencyFailure() StepOpt { return func(step *Step) { step.AllowDependencyFailure = true } } // flattenStepOpts conveniently turns a list of StepOpt into a single StepOpt. // It is useful to build helpers that can then be used when defining operations, // when the helper wraps multiple stepOpts at once. func flattenStepOpts(stepOpts ...StepOpt) StepOpt { return func(step *Step) { for _, stepOpt := range stepOpts { stepOpt(step) } } }
[ "\"CI_FEATURE_FLAG_STATELESS\"", "\"BUILDKITE_REBUILT_FROM_BUILD_NUMBER\"" ]
[]
[ "BUILDKITE_REBUILT_FROM_BUILD_NUMBER", "CI_FEATURE_FLAG_STATELESS" ]
[]
["BUILDKITE_REBUILT_FROM_BUILD_NUMBER", "CI_FEATURE_FLAG_STATELESS"]
go
2
0
parties-and-accounts/get-accounts-with-Vega-API-client.go
package main import ( "bytes" "encoding/json" "fmt" "io/ioutil" "net/http" "os" "strings" "github.com/vegaprotocol/api-clients/go/generated/code.vegaprotocol.io/vega/proto/api" "code.vegaprotocol.io/go-wallet/wallet" "golang.org/x/net/context" "google.golang.org/grpc" ) type Req struct { Wallet string `json:"wallet"` Passphrase string `json:"passphrase"` } type Token struct { Token string `json:"token"` } type Keypair struct { Keys []struct { Pub string `json:"pub"` Algo string `json:"algo"` Tainted bool `json:"tainted"` Meta interface{} `json:"meta"` } `json:"keys"` } func checkWalletURL(url string) string { suffixs := []string{"/api/v1/", "/api/v1", "/"} for _, suffix := range suffixs { if strings.HasSuffix(url, suffix) { fmt.Printf("There's no need to add %s to WALLETSERVER_URL.", suffix) fmt.Printf("Removing it.") url = string(url[:len(url)-len(suffix)]) } } return url } func main() { nodeURLGrpc := os.Getenv("NODE_URL_GRPC") if len(nodeURLGrpc) == 0 { panic("NODE_URL_GRPC is null or empty") } walletserverURL := os.Getenv("WALLETSERVER_URL") if len(walletserverURL) == 0 { panic("WALLETSERVER_URL is null or empty") } walletName := os.Getenv("WALLET_NAME") if len(walletName) == 0 { panic("WALLET_NAME is null or empty") } walletPassword := os.Getenv("WALLET_PASSPHRASE") if len(walletPassword) == 0 { panic("WALLET_PASSPHRASE is null or empty") } walletserverURL = checkWalletURL(walletserverURL) conn, err := grpc.Dial(nodeURLGrpc, grpc.WithInsecure()) if err != nil { panic(err) } defer conn.Close() dataClient := api.NewTradingDataServiceClient(conn) // Create new wallet createNewWallet := false var url string if createNewWallet { url = walletserverURL + "/api/v1/wallets" } else { url = walletserverURL + "/api/v1/auth/token" } // Make request to create new wallet or log in to existing wallet creationReq := &wallet.CreateLoginWalletRequest{Wallet: walletName, Passphrase: walletPassword} payload, err := json.Marshal(creationReq) if err != nil { panic(err) } req, err := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(payload)) client := &http.Client{} resp, err := client.Do(req) if err != nil { panic(err) } defer resp.Body.Close() fmt.Println(url, " returns response Status:", resp.Status) fmt.Println("response Headers:", resp.Header) body, err := ioutil.ReadAll(resp.Body) if err != nil { panic(err) } fmt.Println("response Body:", string(body)) var token wallet.TokenResponse json.Unmarshal([]byte(body), &token) fmt.Println(token.Token) // List existing keypairs url = walletserverURL + "/api/v1/keys" req, err = http.NewRequest(http.MethodGet, url, nil) req.Header.Set("Authorization", "Bearer "+token.Token) client = &http.Client{} resp, err = client.Do(req) if err != nil { panic(err) } defer resp.Body.Close() body, err = ioutil.ReadAll(resp.Body) if err != nil { panic(err) } fmt.Println("response Body:", string(body)) var keypair Keypair json.Unmarshal([]byte(body), &keypair) if len(keypair.Keys) == 0 { panic("No keys!") } pubkey := keypair.Keys[0].Pub fmt.Println("pubkey: ", pubkey) // Get market marketRequest := api.MarketsRequest{} markets, err := dataClient.Markets(context.Background(), &marketRequest) if err != nil { panic(err) } marketID := markets.Markets[0].Id // __get_accounts_by_market: // Request a list of accounts for a market on a Vega network accountsReq := api.MarketAccountsRequest{MarketId: marketID} acconutsResp, err := dataClient.MarketAccounts(context.Background(), &accountsReq) if err != nil { panic(err) } fmt.Printf("Market accounts: %v\n", acconutsResp) // :get_accounts_by_market__ // __get_accounts_by_party: // Request a list of accounts for a party (pubkey) on a Vega network partyReq := api.PartyAccountsRequest{PartyId: pubkey} partyResp, err := dataClient.PartyAccounts(context.Background(), &partyReq) if err != nil { panic(err) } fmt.Printf("Party accounts: %v\n", partyResp) // :get_accounts_by_party__ // __get_positions_by_party: // Request a list of positions for a party (pubkey) on a Vega network partyPosReq := api.PositionsByPartyRequest{PartyId: pubkey} partyPosResp, err := dataClient.PositionsByParty(context.Background(), &partyPosReq) if err != nil { panic(err) } fmt.Printf("Party positions: %v\n", partyPosResp) // :get_positions_by_party__ }
[ "\"NODE_URL_GRPC\"", "\"WALLETSERVER_URL\"", "\"WALLET_NAME\"", "\"WALLET_PASSPHRASE\"" ]
[]
[ "NODE_URL_GRPC", "WALLETSERVER_URL", "WALLET_NAME", "WALLET_PASSPHRASE" ]
[]
["NODE_URL_GRPC", "WALLETSERVER_URL", "WALLET_NAME", "WALLET_PASSPHRASE"]
go
4
0
ros/node.go
package ros import ( "fmt" "net" "net/http" "os" "os/signal" "reflect" "strconv" "sync" "time" "github.com/cnord/rosgo/xmlrpc" ) const ( ApiStatusError = -1 ApiStatusFailure = 0 ApiStatusSuccess = 1 ) // *defaultNode implements Node interface // a defaultNode instance must be accessed in user goroutine. type defaultNode struct { qualifiedName string masterUri string xmlrpcUri string xmlrpcListener net.Listener xmlrpcHandler *xmlrpc.Handler subscribers map[string]*defaultSubscriber publishers map[string]*defaultPublisher servers map[string]*defaultServiceServer jobChan chan func() interruptChan chan os.Signal logger Logger ok bool okMutex sync.RWMutex mapMutex sync.Mutex waitGroup sync.WaitGroup } func listenRandomPort(address string) (net.Listener, error) { // Let OS give us one of ephemeral port addr := fmt.Sprintf("%s:0", address) if listener, err := net.Listen("tcp", addr); err == nil { return listener, nil } else { return nil, fmt.Errorf("listenRandomPort cannot found a free port: %s", err.Error()) } } func newDefaultNode(name string) *defaultNode { node := new(defaultNode) node.qualifiedName = name node.subscribers = make(map[string]*defaultSubscriber) node.publishers = make(map[string]*defaultPublisher) node.servers = make(map[string]*defaultServiceServer) node.interruptChan = make(chan os.Signal) node.ok = true logger := NewDefaultLogger() node.logger = logger // Install signal handler signal.Notify(node.interruptChan, os.Interrupt) go func() { <-node.interruptChan logger.Info("Interrupted") node.okMutex.Lock() node.ok = false node.okMutex.Unlock() }() node.jobChan = make(chan func(), 100) node.masterUri = os.Getenv("ROS_MASTER_URI") logger.Debugf("Master URI = %s", node.masterUri) listener, err := listenRandomPort("127.0.0.1") if err != nil { logger.Fatal(err) } node.xmlrpcUri = fmt.Sprintf("http://%s", listener.Addr().String()) node.xmlrpcListener = listener m := map[string]xmlrpc.Method{ "getBusStats": func(callerId string) (interface{}, error) { return node.getBusStats(callerId) }, "getBusInfo": func(callerId string) (interface{}, error) { return node.getBusInfo(callerId) }, "getMasterUri": func(callerId string) (interface{}, error) { return node.getMasterUri(callerId) }, "shutdown": func(callerId string, msg string) (interface{}, error) { return node.shutdown(callerId, msg) }, "getPid": func(callerId string) (interface{}, error) { return node.getPid(callerId) }, "getSubscriptions": func(callerId string) (interface{}, error) { return node.getSubscriptions(callerId) }, "getPublications": func(callerId string) (interface{}, error) { return node.getPublications(callerId) }, "paramUpdate": func(callerId string, key string, value interface{}) (interface{}, error) { return node.paramUpdate(callerId, key, value) }, "publisherUpdate": func(callerId string, topic string, publishers []interface{}) (interface{}, error) { return node.publisherUpdate(callerId, topic, publishers) }, "requestTopic": func(callerId string, topic string, protocols []interface{}) (interface{}, error) { return node.requestTopic(callerId, topic, protocols) }, } node.xmlrpcHandler = xmlrpc.NewHandler(m) go http.Serve(node.xmlrpcListener, node.xmlrpcHandler) logger.Debugf("Started %s", node.qualifiedName) return node } func (node *defaultNode) OK() bool { node.okMutex.RLock() ok := node.ok node.okMutex.RUnlock() return ok } func (node *defaultNode) getBusStats(callerId string) (interface{}, error) { return buildRosApiResult(-1, "Not implemented", 0), nil } func (node *defaultNode) getBusInfo(callerId string) (interface{}, error) { return buildRosApiResult(-1, "Not implemeted", 0), nil } func (node *defaultNode) getMasterUri(callerId string) (interface{}, error) { return buildRosApiResult(0, "Success", node.masterUri), nil } func (node *defaultNode) shutdown(callerId string, msg string) (interface{}, error) { node.okMutex.Lock() node.ok = false node.okMutex.Unlock() return buildRosApiResult(0, "Success", 0), nil } func (node *defaultNode) getPid(callerId string) (interface{}, error) { return buildRosApiResult(0, "Success", os.Getpid()), nil } func (node *defaultNode) getSubscriptions(callerId string) (interface{}, error) { result := []interface{}{} node.mapMutex.Lock() for t, s := range node.subscribers { pair := []interface{}{t, s.msgType.Name()} result = append(result, pair) } node.mapMutex.Unlock() return buildRosApiResult(0, "Success", result), nil } func (node *defaultNode) getPublications(callerId string) (interface{}, error) { result := []interface{}{} node.mapMutex.Lock() for t, p := range node.publishers { pair := []interface{}{t, p.msgType.Name()} result = append(result, pair) } node.mapMutex.Unlock() return buildRosApiResult(0, "Success", result), nil } func (node *defaultNode) paramUpdate(callerId string, key string, value interface{}) (interface{}, error) { return buildRosApiResult(-1, "Not implemented", 0), nil } func (node *defaultNode) publisherUpdate(callerId string, topic string, publishers []interface{}) (interface{}, error) { node.logger.Debug("Slave API publisherUpdate() called.") var code int32 var message string node.mapMutex.Lock() sub, ok := node.subscribers[topic] node.mapMutex.Unlock() if !ok { node.logger.Debug("publisherUpdate() called without subscribing topic.") code = 0 message = "No such topic" } else { pubUris := make([]string, len(publishers)) for i, uri := range publishers { pubUris[i] = uri.(string) } sub.pubListChan <- pubUris code = 1 message = "Success" } return buildRosApiResult(code, message, 0), nil } func (node *defaultNode) requestTopic(callerId string, topic string, protocols []interface{}) (interface{}, error) { node.logger.Debugf("Slave API requestTopic(%s, %s, ...) called.", callerId, topic) var code int32 var message string var value interface{} node.mapMutex.Lock() pub, ok := node.publishers[topic] node.mapMutex.Unlock() if !ok { node.logger.Debug("requestTopic() called with not publishing topic.") code = 0 message = "No such topic" value = nil } else { selectedProtocol := make([]interface{}, 0) for _, v := range protocols { protocolParams := v.([]interface{}) protocolName := protocolParams[0].(string) if protocolName == "TCPROS" { node.logger.Debug("TCPROS requested") selectedProtocol = append(selectedProtocol, "TCPROS") host, portStr := pub.hostAndPort() p, err := strconv.ParseInt(portStr, 10, 32) if err != nil { return nil, err } port := int(p) selectedProtocol = append(selectedProtocol, host) selectedProtocol = append(selectedProtocol, port) break } } node.logger.Debug(selectedProtocol) code = 1 message = "Success" value = selectedProtocol } return buildRosApiResult(code, message, value), nil } func (node *defaultNode) NewPublisher(topic string, msgType MessageType) Publisher { return node.NewPublisherWithCallbacks(topic, msgType, nil, nil) } func (node *defaultNode) NewPublisherWithCallbacks(topic string, msgType MessageType, connectCallback, disconnectCallback func(SingleSubscriberPublisher)) Publisher { node.mapMutex.Lock() pub, ok := node.publishers[topic] node.mapMutex.Unlock() logger := node.logger if !ok { _, err := callRosApi(node.masterUri, "registerPublisher", node.qualifiedName, topic, msgType.Name(), node.xmlrpcUri) if err != nil { logger.Fatalf("Failed to call registerPublisher(): %s", err) } pub = newDefaultPublisher(logger, node.qualifiedName, node.xmlrpcUri, node.masterUri, topic, msgType, connectCallback, disconnectCallback) node.mapMutex.Lock() node.publishers[topic] = pub node.mapMutex.Unlock() go pub.start(&node.waitGroup) } return pub } func (node *defaultNode) NewSubscriber(topic string, msgType MessageType, callback interface{}) Subscriber { node.mapMutex.Lock() sub, ok := node.subscribers[topic] node.mapMutex.Unlock() logger := node.logger if !ok { node.logger.Debug("Call Master API registerSubscriber") result, err := callRosApi(node.masterUri, "registerSubscriber", node.qualifiedName, topic, msgType.Name(), node.xmlrpcUri) if err != nil { logger.Fatalf("Failed to call registerSubscriber() for %s.", err) } list, ok := result.([]interface{}) if !ok { logger.Fatalf("result is not []string but %s.", reflect.TypeOf(result).String()) } var publishers []string for _, item := range list { s, ok := item.(string) if !ok { logger.Fatal("Publisher list contains no string object") } publishers = append(publishers, s) } logger.Debugf("Publisher URI list: ", publishers) sub = newDefaultSubscriber(topic, msgType, callback) node.mapMutex.Lock() node.subscribers[topic] = sub node.mapMutex.Unlock() logger.Debugf("Start subscriber goroutine for topic '%s'", sub.topic) go sub.start(&node.waitGroup, node.masterUri, node.qualifiedName, node.xmlrpcUri, node.jobChan, logger) logger.Debugf("Done") sub.pubListChan <- publishers logger.Debugf("Update publisher list for topic '%s'", topic) } else { sub.callbacks = append(sub.callbacks, callback) } return sub } func (node *defaultNode) NewServiceClient(service string, srvType ServiceType) ServiceClient { client := newDefaultServiceClient(node.logger, node.qualifiedName, node.masterUri, service, srvType) return client } func (node *defaultNode) NewServiceServer(service string, srvType ServiceType, handler interface{}) ServiceServer { node.mapMutex.Lock() server, ok := node.servers[service] node.mapMutex.Unlock() if ok { server.Shutdown() } server = newDefaultServiceServer(node, service, srvType, handler) if server == nil { return nil } node.mapMutex.Lock() node.servers[service] = server node.mapMutex.Unlock() return server } func (node *defaultNode) SpinOnce() { timeoutChan := time.After(10 * time.Millisecond) select { case job := <-node.jobChan: go job() case <-timeoutChan: break } } func (node *defaultNode) Spin() { logger := node.logger for node.OK() { timeoutChan := time.After(10 * time.Millisecond) select { case job := <-node.jobChan: logger.Debug("Execute job") go job() case <-timeoutChan: break } } } func (node *defaultNode) Shutdown() { node.logger.Debug("Shutting node down") node.okMutex.Lock() node.ok = false node.okMutex.Unlock() node.mapMutex.Lock() subscribers := make(map[string]*defaultSubscriber) for k, v := range node.subscribers { subscribers[k] = v } publishers := make(map[string]*defaultPublisher) for k, v := range node.publishers { publishers[k] = v } servers := make(map[string]*defaultServiceServer) for k, v := range node.servers { servers[k] = v } node.mapMutex.Unlock() node.logger.Debug("Shutdown subscribers") for _, s := range subscribers { s.Shutdown() } node.logger.Debug("Shutdown subscribers...done") node.logger.Debug("Shutdown publishers") for _, p := range publishers { p.Shutdown() } node.logger.Debug("Shutdown publishers...done") node.logger.Debug("Shutdown servers") for _, s := range servers { s.Shutdown() } node.logger.Debug("Shutdown servers...done") node.logger.Debug("Close XMLRPC lisetner") node.xmlrpcListener.Close() node.logger.Debug("Close XMLRPC done") node.logger.Debug("Wait XMLRPC server shutdown") node.xmlrpcHandler.WaitForShutdown() node.logger.Debug("Wait XMLRPC server shutdown...Done") node.logger.Debug("Wait all goroutines") node.waitGroup.Wait() node.logger.Debug("Wait all goroutines...Done") node.logger.Debug("Shutting node down completed") return } func (node *defaultNode) GetParam(key string) (interface{}, error) { return callRosApi(node.masterUri, "getParam", node.qualifiedName, key) } func (node *defaultNode) SetParam(key string, value interface{}) error { _, e := callRosApi(node.masterUri, "setParam", node.qualifiedName, key, value) return e } func (node *defaultNode) HasParam(key string) (bool, error) { result, err := callRosApi(node.masterUri, "hasParam", node.qualifiedName, key) if err != nil { return false, err } hasParam := result.(bool) return hasParam, nil } func (node *defaultNode) SearchParam(key string) (string, error) { result, e := callRosApi(node.masterUri, "searchParam", node.qualifiedName, key) foundKey := result.(string) return foundKey, e } func (node *defaultNode) DeleteParam(key string) error { _, e := callRosApi(node.masterUri, "deleteParam", node.qualifiedName, key) return e } func (node *defaultNode) Logger() Logger { return node.logger }
[ "\"ROS_MASTER_URI\"" ]
[]
[ "ROS_MASTER_URI" ]
[]
["ROS_MASTER_URI"]
go
1
0
cmd/infrakit/x/vmwscript.go
package x // import "github.com/docker/infrakit/cmd/infrakit/x" import ( "context" "io/ioutil" "os" logutil "github.com/docker/infrakit/pkg/log" "github.com/docker/infrakit/pkg/types" "github.com/docker/infrakit/pkg/x/vmwscript" "github.com/spf13/cobra" ) var cmdResults = map[string]string{} //var log = logutil.New("module", "x/vmwscript") / var debugV = logutil.V(200) // 100-500 are for typical debug levels, > 500 for highly repetitive logs (e.g. from polling) func vmwscriptCommand() *cobra.Command { cmd := &cobra.Command{ Use: "vmwscript deployment.json", Short: "This tool uses the native VMware APIs to automate Virtual Machines through the guest tools", } var sudoUser string plan := vmwscript.DeploymentPlan{} cmd.Flags().StringVar(&plan.VMWConfig.VCenterURL, "vcurl", os.Getenv("INFRAKIT_VSPHERE_VCURL"), "VMware vCenter URL, format https://user:pass@address/sdk [REQD]") cmd.Flags().StringVar(&plan.VMWConfig.DCName, "datacenter", os.Getenv("INFRAKIT_VSPHERE_VCDATACENTER"), "The name of the Datacenter to host the VM [REQD]") cmd.Flags().StringVar(&plan.VMWConfig.DSName, "datastore", os.Getenv("INFRAKIT_VSPHERE_VCDATASTORE"), "The name of the DataStore to host the VM [REQD]") cmd.Flags().StringVar(&plan.VMWConfig.NetworkName, "network", os.Getenv("INFRAKIT_VSPHERE_VCNETWORK"), "The network label the VM will use [REQD]") cmd.Flags().StringVar(&plan.VMWConfig.VSphereHost, "hostname", os.Getenv("INFRAKIT_VSPHERE_VCHOST"), "The server that will run the VM [REQD]") cmd.Flags().StringVar(&plan.VMWConfig.VMTemplateAuth.Username, "templateUser", os.Getenv("INFRAKIT_VSPHERE_VMUSER"), "A created user inside of the VM template") cmd.Flags().StringVar(&plan.VMWConfig.VMTemplateAuth.Password, "templatePass", os.Getenv("INFRAKIT_VSPHERE_VMPASS"), "The password for the specified user inside the VM template") cmd.Flags().StringVar(&plan.VMWConfig.VMName, "vmname", "", "The name of an existing virtual machine to run a command against") cmd.Flags().StringVar(&plan.VMWConfig.Command, "vmcommand", "", "A command passed as a string to be executed on the virtual machine specified with [--vmname]") cmd.Flags().StringVar(&sudoUser, "vmsudouser", "", "A sudo user that the command will be executed") cmd.RunE = func(cmd *cobra.Command, args []string) error { // Check that the argument (the json file exists) ctx, cancel := context.WithCancel(context.Background()) defer cancel() if plan.VMWConfig.VMName != "" && plan.VMWConfig.Command != "" { client, err := vmwscript.VCenterLogin(ctx, plan.VMWConfig) if err != nil { log.Crit("Error connecting to vCenter", "err", err) os.Exit(-1) } err = plan.RunCommand(ctx, client, sudoUser) return err } if len(args) == 0 { cmd.Usage() log.Crit("Please specify the path to a configuration file, or specify both [--vmname / --vmcommand]") os.Exit(-1) } // Attempt to open file buff, err := ioutil.ReadFile(args[0]) if err != nil { log.Crit("Error opening file", "Error", err) return err } err = types.AnyBytes(buff).Decode(&plan) if err != nil { log.Crit("Error parsing file", "Error", err) return err } err = plan.Validate() if err != nil { log.Crit("Error validating input", "Error", err) os.Exit(-1) } client, err := vmwscript.VCenterLogin(ctx, plan.VMWConfig) if err != nil { log.Crit("Error connecting to vCenter", "err", err) os.Exit(-1) } // Iterate through the deployments and tasks log.Info("Starting VMwScript engine") plan.RunTasks(ctx, client) log.Info("VMwScript has completed succesfully") return nil } return cmd }
[ "\"INFRAKIT_VSPHERE_VCURL\"", "\"INFRAKIT_VSPHERE_VCDATACENTER\"", "\"INFRAKIT_VSPHERE_VCDATASTORE\"", "\"INFRAKIT_VSPHERE_VCNETWORK\"", "\"INFRAKIT_VSPHERE_VCHOST\"", "\"INFRAKIT_VSPHERE_VMUSER\"", "\"INFRAKIT_VSPHERE_VMPASS\"" ]
[]
[ "INFRAKIT_VSPHERE_VMUSER", "INFRAKIT_VSPHERE_VCHOST", "INFRAKIT_VSPHERE_VCNETWORK", "INFRAKIT_VSPHERE_VCDATASTORE", "INFRAKIT_VSPHERE_VCURL", "INFRAKIT_VSPHERE_VCDATACENTER", "INFRAKIT_VSPHERE_VMPASS" ]
[]
["INFRAKIT_VSPHERE_VMUSER", "INFRAKIT_VSPHERE_VCHOST", "INFRAKIT_VSPHERE_VCNETWORK", "INFRAKIT_VSPHERE_VCDATASTORE", "INFRAKIT_VSPHERE_VCURL", "INFRAKIT_VSPHERE_VCDATACENTER", "INFRAKIT_VSPHERE_VMPASS"]
go
7
0
examples/supply-policy/main.go
package main import ( "bytes" "fmt" "html/template" "log" "net/http" "os" "time" "github.com/brianfoshee/s3post" ) // Payload is used to fill out the policy template type Payload struct { Expiration string Bucket string Key string // Redirect looks like: localhost:8080/?bucket=brianfoshee&key=muploads%2Fcubeprint.mp4&etag=%22aca3de6c6cf590a1f75ef08d939b9eff%22 SuccessRedirect string ContentTypeStarts string Credential string Date string } // Form is used to fill out the html form type Form struct { URL string Policy string Signature string Payload } func main() { // be sure to fill out the AWS secret, key, and bucket secret := os.Getenv("AWS_SECRET_ACCESS_KEY") su := s3post.New("us-east-1", secret) key := os.Getenv("AWS_ACCESS_KEY_ID") bucket := "brianfoshee" // parse the template that generates a policy tmpl, err := template.New("test").Parse(policyDoc) if err != nil { fmt.Println(err) return } // parse the template the generates the HTML POST form frm, err := template.New("form").Parse(form) if err != nil { fmt.Println("error parsing html template", err) return } handler := func(w http.ResponseWriter, r *http.Request) { now := time.Now().UTC() p := Payload{ Expiration: now.Add(24 * time.Hour).Format(time.RFC3339), Bucket: bucket, Key: "muploads/", ContentTypeStarts: "video/", Credential: fmt.Sprintf("%s/%s/us-east-1/s3/aws4_request", key, now.Format("20060102")), Date: now.Format("20060102T150405Z"), SuccessRedirect: "http://localhost:8080", } // fill out the policy doc with Payload data b := []byte{} buf := bytes.NewBuffer(b) if err := tmpl.Execute(buf, p); err != nil { fmt.Println(err) return } // generate signature and policy policy, signed := su.Sign(buf.Bytes()) f := Form{ Policy: policy, Signature: signed, URL: "https://" + bucket + ".s3.amazonaws.com/", Payload: p, } // fill out the HTML form and write it out to the browser if err := frm.Execute(w, f); err != nil { fmt.Println("error executing form", err) return } } http.HandleFunc("/", handler) log.Fatal(http.ListenAndServe("localhost:8080", nil)) } var policyDoc = `{ "expiration": "{{ .Expiration }}", "conditions": [ {"bucket": "{{ .Bucket }}"}, ["starts-with", "$key", "{{ .Key }}"], {"acl": "public-read"}, {"success_action_redirect": "{{ .SuccessRedirect }}"}, ["starts-with", "$Content-Type", "{{ .ContentTypeStarts }}"], {"x-amz-credential": "{{ .Credential }}"}, {"x-amz-algorithm": "AWS4-HMAC-SHA256"}, {"x-amz-date": "{{ .Date }}" } ] }` var form = `<!doctype html> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> </head> <body> <form action="{{ .URL }}" method="post" enctype="multipart/form-data"> Key to upload: <input type="input" name="key" value="{{ .Key }}${filename}" /><br /> <input type="hidden" name="acl" value="public-read" /> <input type="hidden" name="success_action_redirect" value="{{ .SuccessRedirect }}" /> Content-Type: <input type="input" name="Content-Type" value="{{ .ContentTypeStarts }}" /><br /> <input type="hidden" name="X-Amz-Credential" value="{{ .Credential }}" /> <input type="hidden" name="X-Amz-Algorithm" value="AWS4-HMAC-SHA256" /> <input type="hidden" name="X-Amz-Date" value="{{ .Date }}" /> <input type="hidden" name="Policy" value='{{ .Policy }}' /> <input type="hidden" name="X-Amz-Signature" value="{{ .Signature }}" /> <br /> File: <input type="file" name="file" /> <br /> <!-- The elements after this will be ignored --> <input type="submit" name="submit" value="Upload to Amazon S3" /> </form> </body> </html>`
[ "\"AWS_SECRET_ACCESS_KEY\"", "\"AWS_ACCESS_KEY_ID\"" ]
[]
[ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY" ]
[]
["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"]
go
2
0
enterprise/internal/campaigns/resolvers/resolver_test.go
package resolvers import ( "context" "database/sql" "encoding/json" "flag" "fmt" "net/http" "os" "path/filepath" "reflect" "strings" "testing" "time" "github.com/dnaeon/go-vcr/cassette" "github.com/google/go-cmp/cmp" graphql "github.com/graph-gophers/graphql-go" gqlerrors "github.com/graph-gophers/graphql-go/errors" "github.com/pkg/errors" "github.com/sourcegraph/go-diff/diff" "github.com/sourcegraph/sourcegraph/cmd/frontend/backend" "github.com/sourcegraph/sourcegraph/cmd/frontend/db" "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" "github.com/sourcegraph/sourcegraph/cmd/frontend/types" "github.com/sourcegraph/sourcegraph/cmd/repo-updater/repos" ee "github.com/sourcegraph/sourcegraph/enterprise/internal/campaigns" "github.com/sourcegraph/sourcegraph/internal/actor" "github.com/sourcegraph/sourcegraph/internal/api" "github.com/sourcegraph/sourcegraph/internal/campaigns" "github.com/sourcegraph/sourcegraph/internal/db/dbconn" "github.com/sourcegraph/sourcegraph/internal/db/dbtesting" "github.com/sourcegraph/sourcegraph/internal/httpcli" "github.com/sourcegraph/sourcegraph/internal/httptestutil" "github.com/sourcegraph/sourcegraph/internal/jsonc" "github.com/sourcegraph/sourcegraph/internal/rcache" "github.com/sourcegraph/sourcegraph/internal/repoupdater" "github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol" "github.com/sourcegraph/sourcegraph/internal/vcs/git" "github.com/sourcegraph/sourcegraph/schema" ) func init() { dbtesting.DBNameSuffix = "campaignsresolversdb" } var update = flag.Bool("update", false, "update testdata") func TestCampaigns(t *testing.T) { if testing.Short() { t.Skip() } ctx := backend.WithAuthzBypass(context.Background()) dbtesting.SetupGlobalTestDB(t) rcache.SetupForTest(t) cf, save := newGithubClientFactory(t, "test-campaigns") defer save() now := time.Now().UTC().Truncate(time.Microsecond) clock := func() time.Time { return now.UTC().Truncate(time.Microsecond) } sr := &Resolver{ store: ee.NewStoreWithClock(dbconn.Global, clock), httpFactory: cf, } s, err := graphqlbackend.NewSchema(sr, nil, nil) if err != nil { t.Fatal(err) } type User struct { ID string DatabaseID int32 SiteAdmin bool } var users struct { Admin, User struct { User `json:"user"` } } mustExec(ctx, t, s, nil, &users, ` fragment u on User { id, databaseID, siteAdmin } mutation { admin: createUser(username: "admin") { user { ...u } } user: createUser(username: "user") { user { ...u } } } `) if !users.Admin.SiteAdmin { t.Fatal("admin must be a site-admin, since it was the first user created") } type Org struct { ID string Name string } var orgs struct { ACME Org } ctx = actor.WithActor(ctx, actor.FromUser(users.Admin.DatabaseID)) mustExec(ctx, t, s, nil, &orgs, ` fragment o on Org { id, name } mutation { acme: createOrganization(name: "ACME") { ...o } } `) type UserOrg struct { ID string DatabaseID int32 SiteAdmin bool Name string } type Campaign struct { ID string Name string Description string Author User CreatedAt string UpdatedAt string PublishedAt string Namespace UserOrg } var campaigns struct{ Admin, Org Campaign } input := map[string]interface{}{ "admin": map[string]interface{}{ "namespace": users.Admin.ID, "name": "Admin Campaign", "description": "It's an admin's campaign", }, "org": map[string]interface{}{ "namespace": orgs.ACME.ID, "name": "ACME's Campaign", "description": "It's an ACME's campaign", }, } mustExec(ctx, t, s, input, &campaigns, ` fragment u on User { id, databaseID, siteAdmin } fragment o on Org { id, name } fragment c on Campaign { id, name, description, createdAt, updatedAt, publishedAt author { ...u } namespace { ... on User { ...u } ... on Org { ...o } } } mutation($admin: CreateCampaignInput!, $org: CreateCampaignInput!){ admin: createCampaign(input: $admin) { ...c } org: createCampaign(input: $org) { ...c } } `) if have, want := campaigns.Admin.Namespace.ID, users.Admin.ID; have != want { t.Fatalf("have admin's campaign namespace id %q, want %q", have, want) } if have, want := campaigns.Org.Namespace.ID, orgs.ACME.ID; have != want { t.Fatalf("have orgs's campaign namespace id %q, want %q", have, want) } type CampaignConnection struct { Nodes []Campaign TotalCount int PageInfo struct { HasNextPage bool } } var listed struct { First, All CampaignConnection } mustExec(ctx, t, s, nil, &listed, ` fragment u on User { id, databaseID, siteAdmin } fragment o on Org { id, name } fragment c on Campaign { id, name, description, createdAt, updatedAt, publishedAt author { ...u } namespace { ... on User { ...u } ... on Org { ...o } } } fragment n on CampaignConnection { nodes { ...c } totalCount pageInfo { hasNextPage } } query { first: campaigns(first: 1) { ...n } all: campaigns() { ...n } } `) have := listed.First.Nodes want := []Campaign{campaigns.Admin} if !reflect.DeepEqual(have, want) { t.Errorf("wrong campaigns listed. diff=%s", cmp.Diff(have, want)) } if !listed.First.PageInfo.HasNextPage { t.Errorf("wrong page info: %+v", listed.First.PageInfo.HasNextPage) } have = listed.All.Nodes want = []Campaign{campaigns.Admin, campaigns.Org} if !reflect.DeepEqual(have, want) { t.Errorf("wrong campaigns listed. diff=%s", cmp.Diff(have, want)) } if listed.All.PageInfo.HasNextPage { t.Errorf("wrong page info: %+v", listed.All.PageInfo.HasNextPage) } campaigns.Admin.Name = "Updated Admin Campaign Name" campaigns.Admin.Description = "Updated Admin Campaign Description" updateInput := map[string]interface{}{ "input": map[string]interface{}{ "id": campaigns.Admin.ID, "name": campaigns.Admin.Name, "description": campaigns.Admin.Description, }, } var updated struct { UpdateCampaign Campaign } mustExec(ctx, t, s, updateInput, &updated, ` fragment u on User { id, databaseID, siteAdmin } fragment o on Org { id, name } fragment c on Campaign { id, name, description, createdAt, updatedAt, publishedAt author { ...u } namespace { ... on User { ...u } ... on Org { ...o } } } mutation($input: UpdateCampaignInput!){ updateCampaign(input: $input) { ...c } } `) haveUpdated, wantUpdated := updated.UpdateCampaign, campaigns.Admin if !reflect.DeepEqual(haveUpdated, wantUpdated) { t.Errorf("wrong campaign updated. diff=%s", cmp.Diff(haveUpdated, wantUpdated)) } store := repos.NewDBStore(dbconn.Global, sql.TxOptions{}) githubExtSvc := &repos.ExternalService{ Kind: "GITHUB", DisplayName: "GitHub", Config: marshalJSON(t, &schema.GitHubConnection{ Url: "https://github.com", Token: os.Getenv("GITHUB_TOKEN"), Repos: []string{"sourcegraph/sourcegraph"}, }), } bbsURL := os.Getenv("BITBUCKET_SERVER_URL") if bbsURL == "" { // The test fixtures and golden files were generated with // this config pointed to bitbucket.sgdev.org bbsURL = "https://bitbucket.sgdev.org" } bbsExtSvc := &repos.ExternalService{ Kind: "BITBUCKETSERVER", DisplayName: "Bitbucket Server", Config: marshalJSON(t, &schema.BitbucketServerConnection{ Url: bbsURL, Token: os.Getenv("BITBUCKET_SERVER_TOKEN"), Repos: []string{"SOUR/vegeta"}, }), } err = store.UpsertExternalServices(ctx, githubExtSvc, bbsExtSvc) if err != nil { t.Fatal(t) } githubSrc, err := repos.NewGithubSource(githubExtSvc, cf) if err != nil { t.Fatal(t) } githubRepo, err := githubSrc.GetRepo(ctx, "sourcegraph/sourcegraph") if err != nil { t.Fatal(t) } bbsSrc, err := repos.NewBitbucketServerSource(bbsExtSvc, cf) if err != nil { t.Fatal(t) } bbsRepos := getBitbucketServerRepos(t, ctx, bbsSrc) if len(bbsRepos) != 1 { t.Fatalf("wrong number of bitbucket server repos. got=%d", len(bbsRepos)) } bbsRepo := bbsRepos[0] err = store.UpsertRepos(ctx, githubRepo, bbsRepo) if err != nil { t.Fatal(err) } type ChangesetEventConnection struct { TotalCount int } git.Mocks.ResolveRevision = func(spec string, opt *git.ResolveRevisionOptions) (api.CommitID, error) { return "mockcommitid", nil } defer func() { git.Mocks.ResolveRevision = nil }() repoupdater.MockRepoLookup = func(args protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) { return &protocol.RepoLookupResult{ Repo: &protocol.RepoInfo{Name: args.Repo}, }, nil } defer func() { repoupdater.MockRepoLookup = nil }() type GitTarget struct { OID string AbbreviatedOID string TargetType string `json:"type"` } type GitRef struct { Name string AbbrevName string DisplayName string Prefix string RefType string `json:"type"` Repository struct{ ID string } URL string Target GitTarget } type Changeset struct { ID string Repository struct{ ID string } Campaigns CampaignConnection CreatedAt string UpdatedAt string Title string Body string State string ExternalURL struct { URL string ServiceType string } ReviewState string CheckState string Events ChangesetEventConnection Head GitRef Base GitRef } var result struct { Changesets []Changeset } graphqlGithubRepoID := string(graphqlbackend.MarshalRepositoryID(api.RepoID(githubRepo.ID))) graphqlBBSRepoID := string(graphqlbackend.MarshalRepositoryID(api.RepoID(bbsRepo.ID))) in := fmt.Sprintf( `[{repository: %q, externalID: %q}, {repository: %q, externalID: %q}]`, graphqlGithubRepoID, "999", graphqlBBSRepoID, "2", ) mustExec(ctx, t, s, nil, &result, fmt.Sprintf(` fragment gitRef on GitRef { name abbrevName displayName prefix type repository { id } url target { oid abbreviatedOID type } } fragment cs on ExternalChangeset { id repository { id } createdAt updatedAt title body state externalURL { url serviceType } reviewState checkState events(first: 100) { totalCount } head { ...gitRef } base { ...gitRef } } mutation() { changesets: createChangesets(input: %s) { ...cs } } `, in)) { want := []Changeset{ { Repository: struct{ ID string }{ID: graphqlGithubRepoID}, CreatedAt: now.Format(time.RFC3339), UpdatedAt: now.Format(time.RFC3339), Title: "add extension filter to filter bar", Body: "Enables adding extension filters to the filter bar by rendering the extension filter as filter chips inside the filter bar.\r\nWIP for https://github.com/sourcegraph/sourcegraph/issues/962\r\n\r\n> This PR updates the CHANGELOG.md file to describe any user-facing changes.\r\n.\r\n", State: "MERGED", ExternalURL: struct{ URL, ServiceType string }{ URL: "https://github.com/sourcegraph/sourcegraph/pull/999", ServiceType: "github", }, ReviewState: "APPROVED", CheckState: "PASSED", Events: ChangesetEventConnection{ TotalCount: 57, }, Head: GitRef{ Name: "refs/heads/vo/add-type-issue-filter", AbbrevName: "vo/add-type-issue-filter", DisplayName: "vo/add-type-issue-filter", Prefix: "refs/heads/", RefType: "GIT_BRANCH", Repository: struct{ ID string }{ID: "UmVwb3NpdG9yeTox"}, URL: "/github.com/sourcegraph/sourcegraph@vo/add-type-issue-filter", Target: GitTarget{ OID: "7db302f07955e41d50e656d5faebefb4d87bce8a", AbbreviatedOID: "7db302f", TargetType: "GIT_COMMIT", }, }, Base: GitRef{ Name: "refs/heads/master", AbbrevName: "master", DisplayName: "master", Prefix: "refs/heads/", RefType: "GIT_BRANCH", Repository: struct{ ID string }{ID: "UmVwb3NpdG9yeTox"}, URL: "/github.com/sourcegraph/sourcegraph@master", Target: GitTarget{ OID: "fa3815ba9ddd49db9111c5e9691e16d27e8f1f60", AbbreviatedOID: "fa3815b", TargetType: "GIT_COMMIT", }, }, }, { Repository: struct{ ID string }{ID: graphqlBBSRepoID}, CreatedAt: now.Format(time.RFC3339), UpdatedAt: now.Format(time.RFC3339), Title: "Release testing pr", Body: "* Remove dump.go\r\n* make make make", State: "MERGED", ExternalURL: struct{ URL, ServiceType string }{ URL: "https://bitbucket.sgdev.org/projects/SOUR/repos/vegeta/pull-requests/2", ServiceType: "bitbucketServer", }, ReviewState: "PENDING", CheckState: "PENDING", Events: ChangesetEventConnection{ TotalCount: 10, }, Head: GitRef{ Name: "refs/heads/release-testing-pr", AbbrevName: "release-testing-pr", DisplayName: "release-testing-pr", Prefix: "refs/heads/", RefType: "GIT_BRANCH", Repository: struct{ ID string }{ID: "UmVwb3NpdG9yeToy"}, URL: "/bitbucket.sgdev.org/SOUR/vegeta@release-testing-pr", Target: GitTarget{ OID: "mockcommitid", AbbreviatedOID: "mockcom", TargetType: "GIT_COMMIT", }, }, Base: GitRef{ Name: "refs/heads/master", AbbrevName: "master", DisplayName: "master", Prefix: "refs/heads/", RefType: "GIT_BRANCH", Repository: struct{ ID string }{ID: "UmVwb3NpdG9yeToy"}, URL: "/bitbucket.sgdev.org/SOUR/vegeta@master", Target: GitTarget{ OID: "mockcommitid", AbbreviatedOID: "mockcom", TargetType: "GIT_COMMIT", }, }, }, } have := make([]Changeset, 0, len(result.Changesets)) for _, c := range result.Changesets { if c.ID == "" { t.Fatal("Changeset ID is empty") } c.ID = "" have = append(have, c) } if diff := cmp.Diff(have, want); diff != "" { t.Fatal(diff) } } type ChangesetConnection struct { Nodes []Changeset TotalCount int PageInfo struct { HasNextPage bool } } type ChangesetCounts struct { Date graphqlbackend.DateTime Total int32 Merged int32 Closed int32 Open int32 OpenApproved int32 OpenChangesRequested int32 OpenPending int32 } type CampaignWithChangesets struct { ID string Name string Description string Author User CreatedAt string UpdatedAt string Namespace UserOrg Changesets ChangesetConnection ChangesetCountsOverTime []ChangesetCounts } var addChangesetsResult struct{ Campaign CampaignWithChangesets } changesetIDs := make([]string, 0, len(result.Changesets)) for _, c := range result.Changesets { changesetIDs = append(changesetIDs, c.ID) } // Date when PR #999 from above was created countsFrom := parseJSONTime(t, "2018-11-14T22:07:45Z") // Date when PR #999 from above was merged countsTo := parseJSONTime(t, "2018-12-04T08:10:07Z") mustExec(ctx, t, s, nil, &addChangesetsResult, fmt.Sprintf(` fragment u on User { id, databaseID, siteAdmin } fragment o on Org { id, name } fragment cs on ExternalChangeset { id repository { id } createdAt updatedAt campaigns { nodes { id } } title body state externalURL { url serviceType } reviewState } fragment c on Campaign { id, name, description, createdAt, updatedAt author { ...u } namespace { ... on User { ...u } ... on Org { ...o } } changesets { nodes { ...cs } totalCount pageInfo { hasNextPage } } changesetCountsOverTime(from: %s, to: %s) { date total merged closed open openApproved openChangesRequested openPending } } mutation() { campaign: addChangesetsToCampaign(campaign: %q, changesets: %s) { ...c } } `, marshalDateTime(t, countsFrom), marshalDateTime(t, countsTo), campaigns.Admin.ID, marshalJSON(t, changesetIDs), )) { have := addChangesetsResult.Campaign.Changesets.TotalCount want := len(changesetIDs) if have != want { t.Fatalf( "want campaign changesets totalcount %d, have=%d", want, have, ) } } { var have []string want := changesetIDs for _, n := range addChangesetsResult.Campaign.Changesets.Nodes { have = append(have, n.ID) } if !reflect.DeepEqual(have, want) { t.Errorf("wrong changesets added to campaign. want=%v, have=%v", want, have) } } { have := map[string]bool{} for _, cs := range addChangesetsResult.Campaign.Changesets.Nodes { have[cs.Campaigns.Nodes[0].ID] = true } if !have[campaigns.Admin.ID] || len(have) != 1 { t.Errorf("wrong campaign added to changeset. want=%v, have=%v", campaigns.Admin.ID, have) } } { counts := addChangesetsResult.Campaign.ChangesetCountsOverTime // There's 20 1-day intervals between countsFrom and including countsTo if have, want := len(counts), 20; have != want { t.Errorf("wrong changeset counts length %d, have=%d", want, have) } for _, c := range counts { if have, want := c.Total, int32(1); have != want { t.Errorf("wrong changeset counts total %d, have=%d", want, have) } } } deleteInput := map[string]interface{}{"id": campaigns.Admin.ID} mustExec(ctx, t, s, deleteInput, &struct{}{}, ` mutation($id: ID!){ deleteCampaign(campaign: $id) { alwaysNil } } `) var campaignsAfterDelete struct { Campaigns struct { TotalCount int } } mustExec(ctx, t, s, nil, &campaignsAfterDelete, ` query { campaigns { totalCount } } `) haveCount := campaignsAfterDelete.Campaigns.TotalCount wantCount := listed.All.TotalCount - 1 if haveCount != wantCount { t.Errorf("wrong campaigns totalcount after delete. want=%d, have=%d", wantCount, haveCount) } } func TestChangesetCountsOverTime(t *testing.T) { if testing.Short() { t.Skip() } ctx := backend.WithAuthzBypass(context.Background()) dbtesting.SetupGlobalTestDB(t) rcache.SetupForTest(t) cf, save := newGithubClientFactory(t, "test-changeset-counts-over-time") defer save() now := time.Now().UTC().Truncate(time.Microsecond) clock := func() time.Time { return now.UTC().Truncate(time.Microsecond) } u, err := db.Users.Create(ctx, db.NewUser{ Email: "[email protected]", Username: "thorsten", DisplayName: "thorsten", Password: "1234", EmailVerificationCode: "foobar", }) if err != nil { t.Fatal(err) } repoStore := repos.NewDBStore(dbconn.Global, sql.TxOptions{}) githubExtSvc := &repos.ExternalService{ Kind: "GITHUB", DisplayName: "GitHub", Config: marshalJSON(t, &schema.GitHubConnection{ Url: "https://github.com", Token: os.Getenv("GITHUB_TOKEN"), Repos: []string{"sourcegraph/sourcegraph"}, }), } err = repoStore.UpsertExternalServices(ctx, githubExtSvc) if err != nil { t.Fatal(t) } githubSrc, err := repos.NewGithubSource(githubExtSvc, cf) if err != nil { t.Fatal(t) } githubRepo, err := githubSrc.GetRepo(ctx, "sourcegraph/sourcegraph") if err != nil { t.Fatal(err) } err = repoStore.UpsertRepos(ctx, githubRepo) if err != nil { t.Fatal(err) } store := ee.NewStoreWithClock(dbconn.Global, clock) campaign := &campaigns.Campaign{ Name: "Test campaign", Description: "Testing changeset counts", AuthorID: u.ID, NamespaceUserID: u.ID, } err = store.CreateCampaign(ctx, campaign) if err != nil { t.Fatal(err) } changesets := []*campaigns.Changeset{ { RepoID: githubRepo.ID, ExternalID: "5834", ExternalServiceType: githubRepo.ExternalRepo.ServiceType, CampaignIDs: []int64{campaign.ID}, }, { RepoID: githubRepo.ID, ExternalID: "5849", ExternalServiceType: githubRepo.ExternalRepo.ServiceType, CampaignIDs: []int64{campaign.ID}, }, } err = store.CreateChangesets(ctx, changesets...) if err != nil { t.Fatal(err) } syncer := ee.ChangesetSyncer{ ReposStore: repoStore, SyncStore: store, HTTPFactory: cf, } err = syncer.SyncChangesets(ctx, changesets...) if err != nil { t.Fatal(err) } for _, c := range changesets { campaign.ChangesetIDs = append(campaign.ChangesetIDs, c.ID) } err = store.UpdateCampaign(ctx, campaign) if err != nil { t.Fatal(err) } // Date when PR #5834 was created: "2019-10-02T14:49:31Z" // We start exactly one day earlier // Date when PR #5849 was created: "2019-10-03T15:03:21Z" start := parseJSONTime(t, "2019-10-01T14:49:31Z") // Date when PR #5834 was merged: "2019-10-07T13:13:45Z" // Date when PR #5849 was merged: "2019-10-04T08:55:21Z" end := parseJSONTime(t, "2019-10-07T13:13:45Z") daysBeforeEnd := func(days int) time.Time { return end.AddDate(0, 0, -days) } r := &campaignResolver{store: store, Campaign: campaign} rs, err := r.ChangesetCountsOverTime(ctx, &graphqlbackend.ChangesetCountsArgs{ From: &graphqlbackend.DateTime{Time: start}, To: &graphqlbackend.DateTime{Time: end}, }) if err != nil { t.Fatalf("ChangsetCountsOverTime failed with error: %s", err) } have := make([]*ee.ChangesetCounts, 0, len(rs)) for _, cr := range rs { r := cr.(*changesetCountsResolver) have = append(have, r.counts) } want := []*ee.ChangesetCounts{ {Time: daysBeforeEnd(5), Total: 0, Open: 0}, {Time: daysBeforeEnd(4), Total: 1, Open: 1, OpenPending: 1}, {Time: daysBeforeEnd(3), Total: 2, Open: 1, OpenPending: 1, Merged: 1}, {Time: daysBeforeEnd(2), Total: 2, Open: 1, OpenPending: 1, Merged: 1}, {Time: daysBeforeEnd(1), Total: 2, Open: 1, OpenPending: 1, Merged: 1}, {Time: end, Total: 2, Merged: 2}, } if !reflect.DeepEqual(have, want) { t.Errorf("wrong counts listed. diff=%s", cmp.Diff(have, want)) } } const testDiff = `diff README.md README.md index 671e50a..851b23a 100644 --- README.md +++ README.md @@ -1,2 +1,2 @@ # README -This file is hosted at example.com and is a test file. +This file is hosted at sourcegraph.com and is a test file. diff --git urls.txt urls.txt index 6f8b5d9..17400bc 100644 --- urls.txt +++ urls.txt @@ -1,3 +1,3 @@ another-url.com -example.com +sourcegraph.com never-touch-the-mouse.com ` // wantFileDiffs is the parsed representation of testDiff. var wantFileDiffs = FileDiffs{ RawDiff: testDiff, DiffStat: DiffStat{Changed: 2}, Nodes: []FileDiff{ { OldPath: "README.md", NewPath: "README.md", OldFile: File{Name: "README.md"}, Hunks: []FileDiffHunk{ { Body: " # README\n-This file is hosted at example.com and is a test file.\n+This file is hosted at sourcegraph.com and is a test file.\n", OldRange: DiffRange{StartLine: 1, Lines: 2}, NewRange: DiffRange{StartLine: 1, Lines: 2}, }, }, Stat: DiffStat{Changed: 1}, }, { OldPath: "urls.txt", NewPath: "urls.txt", OldFile: File{Name: "urls.txt"}, Hunks: []FileDiffHunk{ { Body: " another-url.com\n-example.com\n+sourcegraph.com\n never-touch-the-mouse.com\n", OldRange: DiffRange{StartLine: 1, Lines: 3}, NewRange: DiffRange{StartLine: 1, Lines: 3}, }, }, Stat: DiffStat{Changed: 1}, }, }, } type DiffRange struct{ StartLine, Lines int } type FileDiffHunk struct { Body, Section string OldNoNewlineAt bool OldRange, NewRange DiffRange } type DiffStat struct{ Added, Deleted, Changed int } type File struct { Name string // Ignoring other fields of File2, since that would require gitserver } type FileDiff struct { OldPath, NewPath string Hunks []FileDiffHunk Stat DiffStat OldFile File } type FileDiffs struct { RawDiff string DiffStat DiffStat Nodes []FileDiff } type Patch struct { Repository struct{ Name, URL string } Diff struct { FileDiffs FileDiffs } } type PatchSet struct { ID string Patches struct { Nodes []Patch } PreviewURL string } func TestCreatePatchSetFromPatchesResolver(t *testing.T) { ctx := backend.WithAuthzBypass(context.Background()) dbtesting.SetupGlobalTestDB(t) user := createTestUser(ctx, t) act := actor.FromUser(user.ID) ctx = actor.WithActor(ctx, act) t.Run("invalid patch", func(t *testing.T) { args := graphqlbackend.CreatePatchSetFromPatchesArgs{ Patches: []graphqlbackend.PatchInput{ { Repository: graphqlbackend.MarshalRepositoryID(1), BaseRevision: "f00b4r", BaseRef: "master", Patch: "!!! this is not a valid unified diff !!!\n--- x\n+++ y\n@@ 1,1 2,2\na", }, }, } _, err := (&Resolver{}).CreatePatchSetFromPatches(ctx, args) if err == nil { t.Fatal("want error") } if _, ok := errors.Cause(err).(*diff.ParseError); !ok { t.Fatalf("got error %q (%T), want a diff ParseError", err, errors.Cause(err)) } }) t.Run("integration", func(t *testing.T) { if testing.Short() { t.Skip() } rcache.SetupForTest(t) now := time.Now().UTC().Truncate(time.Microsecond) clock := func() time.Time { return now.UTC().Truncate(time.Microsecond) } // For testing purposes they all share the same rev, across repos testingRev := api.CommitID("24f7ca7c1190835519e261d7eefa09df55ceea4f") backend.Mocks.Repos.ResolveRev = func(_ context.Context, _ *types.Repo, _ string) (api.CommitID, error) { return testingRev, nil } defer func() { backend.Mocks.Repos.ResolveRev = nil }() backend.Mocks.Repos.GetCommit = func(_ context.Context, _ *types.Repo, _ api.CommitID) (*git.Commit, error) { return &git.Commit{ID: testingRev}, nil } defer func() { backend.Mocks.Repos.GetCommit = nil }() reposStore := repos.NewDBStore(dbconn.Global, sql.TxOptions{}) repo := &repos.Repo{ Name: "github.com/sourcegraph/sourcegraph", ExternalRepo: api.ExternalRepoSpec{ ID: "external-id", ServiceType: "github", ServiceID: "https://github.com/", }, Sources: map[string]*repos.SourceInfo{ "extsvc:github:4": { ID: "extsvc:github:4", CloneURL: "https://[email protected]/sourcegraph/sourcegraph", }, }, } if err := reposStore.UpsertRepos(ctx, repo); err != nil { t.Fatal(err) } store := ee.NewStoreWithClock(dbconn.Global, clock) sr := &Resolver{store: store} s, err := graphqlbackend.NewSchema(sr, nil, nil) if err != nil { t.Fatal(err) } var response struct{ CreatePatchSetFromPatches PatchSet } mustExec(ctx, t, s, nil, &response, fmt.Sprintf(` mutation { createPatchSetFromPatches(patches: [{repository: %q, baseRevision: "f00b4r", baseRef: "master", patch: %q}]) { ... on PatchSet { id patches(first: %d) { nodes { repository { name } diff { fileDiffs { rawDiff diffStat { added deleted changed } nodes { oldPath newPath hunks { body section newRange { startLine, lines } oldRange { startLine, lines } oldNoNewlineAt } stat { added deleted changed } oldFile { name externalURLs { serviceType url } } } } } } } previewURL } } } `, graphqlbackend.MarshalRepositoryID(api.RepoID(repo.ID)), testDiff, 1)) result := response.CreatePatchSetFromPatches wantPatches := []Patch{ { Repository: struct{ Name, URL string }{Name: repo.Name}, Diff: struct{ FileDiffs FileDiffs }{FileDiffs: wantFileDiffs}, }, } if !cmp.Equal(result.Patches.Nodes, wantPatches) { t.Error("wrong patches", cmp.Diff(result.Patches.Nodes, wantPatches)) } if have, want := result.PreviewURL, "http://example.com/campaigns/new?patchSet=UGF0Y2hTZXQ6MQ%3D%3D"; have != want { t.Fatalf("have PreviewURL %q, want %q", have, want) } }) } func TestPatchSetResolver(t *testing.T) { if testing.Short() { t.Skip() } ctx := backend.WithAuthzBypass(context.Background()) dbtesting.SetupGlobalTestDB(t) rcache.SetupForTest(t) now := time.Now().UTC().Truncate(time.Microsecond) clock := func() time.Time { return now.UTC().Truncate(time.Microsecond) } // For testing purposes they all share the same rev, across repos testingRev := api.CommitID("24f7ca7c1190835519e261d7eefa09df55ceea4f") backend.Mocks.Repos.ResolveRev = func(_ context.Context, _ *types.Repo, _ string) (api.CommitID, error) { return testingRev, nil } defer func() { backend.Mocks.Repos.ResolveRev = nil }() backend.Mocks.Repos.GetCommit = func(_ context.Context, _ *types.Repo, _ api.CommitID) (*git.Commit, error) { return &git.Commit{ID: testingRev}, nil } defer func() { backend.Mocks.Repos.GetCommit = nil }() repoupdater.MockRepoLookup = func(args protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) { return &protocol.RepoLookupResult{ Repo: &protocol.RepoInfo{Name: args.Repo}, }, nil } defer func() { repoupdater.MockRepoLookup = nil }() reposStore := repos.NewDBStore(dbconn.Global, sql.TxOptions{}) var rs []*repos.Repo for i := 0; i < 3; i++ { repo := &repos.Repo{ Name: fmt.Sprintf("github.com/sourcegraph/sourcegraph-%d", i), URI: fmt.Sprintf("github.com/sourcegraph/sourcegraph-%d", i), Description: "Code search and navigation tool", ExternalRepo: api.ExternalRepoSpec{ ID: fmt.Sprintf("external-id-%d", i), ServiceType: "github", ServiceID: "https://github.com/", }, Sources: map[string]*repos.SourceInfo{ "extsvc:github:4": { ID: "extsvc:github:4", CloneURL: "https://[email protected]/sourcegraph/sourcegraph", }, }, } err := reposStore.UpsertRepos(ctx, repo) if err != nil { t.Fatal(err) } rs = append(rs, repo) } store := ee.NewStoreWithClock(dbconn.Global, clock) user := createTestUser(ctx, t) patchSet := &campaigns.PatchSet{UserID: user.ID} err := store.CreatePatchSet(ctx, patchSet) if err != nil { t.Fatal(err) } var jobs []*campaigns.Patch for _, repo := range rs { job := &campaigns.Patch{ PatchSetID: patchSet.ID, RepoID: repo.ID, Rev: testingRev, BaseRef: "master", Diff: testDiff, } err := store.CreatePatch(ctx, job) if err != nil { t.Fatal(err) } jobs = append(jobs, job) } type Response struct { Node PatchSet } sr := &Resolver{store: store} s, err := graphqlbackend.NewSchema(sr, nil, nil) if err != nil { t.Fatal(err) } var response Response mustExec(ctx, t, s, nil, &response, fmt.Sprintf(` query { node(id: %q) { ... on PatchSet { id patches(first: %d) { nodes { repository { name } diff { fileDiffs { rawDiff diffStat { added deleted changed } nodes { oldPath newPath hunks { body section newRange { startLine, lines } oldRange { startLine, lines } oldNoNewlineAt } stat { added deleted changed } oldFile { name externalURLs { serviceType url } } } } } } } } } } `, marshalPatchSetID(patchSet.ID), len(jobs))) if have, want := len(response.Node.Patches.Nodes), len(jobs); have != want { t.Fatalf("have %d patches, want %d", have, want) } for i, patch := range response.Node.Patches.Nodes { if have, want := patch.Repository.Name, rs[i].Name; have != want { t.Fatalf("wrong Repository Name %q. want=%q", have, want) } if have, want := patch.Diff.FileDiffs.RawDiff, testDiff; have != want { t.Fatalf("wrong RawDiff. diff=%s", cmp.Diff(have, want)) } if have, want := patch.Diff.FileDiffs.DiffStat.Changed, 2; have != want { t.Fatalf("wrong DiffStat.Changed %d, want=%d", have, want) } haveFileDiffs := patch.Diff.FileDiffs if !reflect.DeepEqual(haveFileDiffs, wantFileDiffs) { t.Fatal(cmp.Diff(haveFileDiffs, wantFileDiffs)) } } } func mustExec( ctx context.Context, t testing.TB, s *graphql.Schema, in map[string]interface{}, out interface{}, query string, ) { t.Helper() if errs := exec(ctx, t, s, in, out, query); len(errs) > 0 { t.Fatalf("unexpected graphql query errors: %v", errs) } } func exec( ctx context.Context, t testing.TB, s *graphql.Schema, in map[string]interface{}, out interface{}, query string, ) []*gqlerrors.QueryError { t.Helper() query = strings.Replace(query, "\t", " ", -1) r := s.Exec(ctx, query, "", in) if len(r.Errors) != 0 { return r.Errors } if testing.Verbose() { t.Logf("\n---- GraphQL Query ----\n%s\n\nVars: %s\n---- GraphQL Result ----\n%s\n -----------", query, toJSON(t, in), r.Data) } if err := json.Unmarshal(r.Data, out); err != nil { t.Fatalf("failed to unmarshal graphql data: %v", err) } return nil } func toJSON(t testing.TB, v interface{}) string { data, err := json.Marshal(v) if err != nil { t.Fatal(err) } formatted, err := jsonc.Format(string(data), nil) if err != nil { t.Fatal(err) } return formatted } func newGithubClientFactory(t testing.TB, name string) (*httpcli.Factory, func()) { t.Helper() cassete := filepath.Join("testdata/vcr/", strings.Replace(name, " ", "-", -1)) rec, err := httptestutil.NewRecorder(cassete, *update, func(i *cassette.Interaction) error { return nil }) if err != nil { t.Fatal(err) } mw := httpcli.NewMiddleware(githubProxyRedirectMiddleware) hc := httpcli.NewFactory(mw, httptestutil.NewRecorderOpt(rec)) return hc, func() { if err := rec.Stop(); err != nil { t.Errorf("failed to update test data: %s", err) } } } func githubProxyRedirectMiddleware(cli httpcli.Doer) httpcli.Doer { return httpcli.DoerFunc(func(req *http.Request) (*http.Response, error) { if req.URL.Hostname() == "github-proxy" { req.URL.Host = "api.github.com" req.URL.Scheme = "https" } return cli.Do(req) }) } func marshalJSON(t testing.TB, v interface{}) string { t.Helper() bs, err := json.Marshal(v) if err != nil { t.Fatal(err) } return string(bs) } func marshalDateTime(t testing.TB, ts time.Time) string { t.Helper() dt := graphqlbackend.DateTime{Time: ts} bs, err := dt.MarshalJSON() if err != nil { t.Fatal(err) } return string(bs) } func parseJSONTime(t testing.TB, ts string) time.Time { t.Helper() timestamp, err := time.Parse(time.RFC3339, ts) if err != nil { t.Fatal(err) } return timestamp } func getBitbucketServerRepos(t testing.TB, ctx context.Context, src *repos.BitbucketServerSource) []*repos.Repo { results := make(chan repos.SourceResult) go func() { src.ListRepos(ctx, results) close(results) }() var repos []*repos.Repo for res := range results { if res.Err != nil { t.Fatal(res.Err) } repos = append(repos, res.Repo) } return repos } var testUser = db.NewUser{ Email: "[email protected]", Username: "test", DisplayName: "Test", Password: "test", EmailIsVerified: true, FailIfNotInitialUser: false, } func createTestUser(ctx context.Context, t *testing.T) *types.User { t.Helper() user, err := db.Users.Create(ctx, testUser) if err != nil { t.Fatal(err) } return user }
[ "\"GITHUB_TOKEN\"", "\"BITBUCKET_SERVER_URL\"", "\"BITBUCKET_SERVER_TOKEN\"", "\"GITHUB_TOKEN\"" ]
[]
[ "BITBUCKET_SERVER_URL", "BITBUCKET_SERVER_TOKEN", "GITHUB_TOKEN" ]
[]
["BITBUCKET_SERVER_URL", "BITBUCKET_SERVER_TOKEN", "GITHUB_TOKEN"]
go
3
0
software/motion_estimate/vo_estimate/scripts/send_a_minimal_lidar_state.py
#!/usr/bin/python import os,sys home_dir =os.getenv("HOME") #print home_dir sys.path.append(home_dir + "/drc/software/build/lib/python2.7/site-packages") sys.path.append(home_dir + "/drc/software/build/lib/python2.7/dist-packages") import math import lcm from bot_core.pose_t import pose_t from bot_core.rigid_transform_t import rigid_transform_t from drc_utils import * msg = pose_t() msg.utime = 0 msg.pos = (0, 0, 0.0) msg.orientation = (1, 0, 0, 0) lc = lcm.LCM() #lc.publish("PRE_SPINDLE_TO_POST_SPINDLE", msg.encode()) msg = rigid_transform_t() msg.utime = 0 msg.trans = (0, 0, 0.0) msg.quat = euler_to_quat([0,0, math.pi]) lc = lcm.LCM() lc.publish("PRE_SPINDLE_TO_POST_SPINDLE", msg.encode())
[]
[]
[ "HOME" ]
[]
["HOME"]
python
1
0
firestorefastapi/main.py
import os import time from typing import Any, Callable from fastapi import FastAPI, Request from firestorefastapi import __project_id__, __version__ from firestorefastapi.routers import health, item os.environ["TZ"] = "UTC" # # create the api # api = FastAPI(title=f"Firestore FastAPI: {__project_id__}", version=__version__) # # middleware # @api.middleware("http") async def add_process_time_header(request: Request, call_next: Callable) -> Any: start_time = time.time() response = await call_next(request) process_time = time.time() - start_time response.headers["X-Process-Time"] = str(process_time) return response # # routers # api.include_router(health.router) api.include_router(item.router)
[]
[]
[ "TZ" ]
[]
["TZ"]
python
1
0
hotelrooms/hotelrooms/settings.py
""" Django settings for hotelrooms project. Generated by 'django-admin startproject' using Django 3.0.6. For more information on this file, see https://docs.djangoproject.com/en/3.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.0/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '^b7=e99!2(t7csio=(lospr6ebgbp-2(*n^il4vt8dotctorm*' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.postgres', 'booking', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'hotelrooms.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(BASE_DIR, "hotelrooms", "templates"), os.path.join(BASE_DIR, "booking", "templates"), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'hotelrooms.wsgi.application' PROJECT_DIR = os.path.dirname(__file__) # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'NAME': 'hotelrooms', 'PORT': 5433, 'HOST': os.getenv("DB_HOST", "localhost"), 'USER': 'django', 'PASSWORD': 'hotelrooms', } } # Password validation # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True DATE_INPUT_FORMATS = [ '%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06' '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006' '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006' '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006' '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006' ] # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATIC_ROOT = os.path.join(PROJECT_DIR, 'static/') STATIC_URL = '/static/' STATICFILES_DIRS = [ os.path.join(BASE_DIR, "static"), ] MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = '/media/'
[]
[]
[ "DB_HOST" ]
[]
["DB_HOST"]
python
1
0
vendor/github.com/oracle/oci-go-sdk/v53/core/attach_instance_pool_instance_details.go
// Copyright (c) 2016, 2018, 2021, Oracle and/or its affiliates. All rights reserved. // This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. // Code generated. DO NOT EDIT. // Core Services API // // Use the Core Services API to manage resources such as virtual cloud networks (VCNs), // compute instances, and block storage volumes. For more information, see the console // documentation for the Networking (https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/overview.htm), // Compute (https://docs.cloud.oracle.com/iaas/Content/Compute/Concepts/computeoverview.htm), and // Block Volume (https://docs.cloud.oracle.com/iaas/Content/Block/Concepts/overview.htm) services. // package core import ( "github.com/oracle/oci-go-sdk/v53/common" ) // AttachInstancePoolInstanceDetails An instance that is to be attached to an instance pool. type AttachInstancePoolInstanceDetails struct { // The OCID (https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the instance. InstanceId *string `mandatory:"true" json:"instanceId"` } func (m AttachInstancePoolInstanceDetails) String() string { return common.PointerString(m) }
[]
[]
[]
[]
[]
go
null
null
null
vendor/github.com/docker/swarmkit/cmd/swarmctl/main.go
package main import ( "os" "github.com/docker/swarmkit/cmd/swarmctl/cluster" "github.com/docker/swarmkit/cmd/swarmctl/network" "github.com/docker/swarmkit/cmd/swarmctl/node" "github.com/docker/swarmkit/cmd/swarmctl/service" "github.com/docker/swarmkit/cmd/swarmctl/task" "github.com/docker/swarmkit/version" "github.com/spf13/cobra" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) func main() { if c, err := mainCmd.ExecuteC(); err != nil { c.Println("Error:", grpc.ErrorDesc(err)) // if it's not a grpc, we assume it's a user error and we display the usage. if grpc.Code(err) == codes.Unknown { c.Println(c.UsageString()) } os.Exit(-1) } } var ( mainCmd = &cobra.Command{ Use: os.Args[0], Short: "Control a swarm cluster", SilenceUsage: true, SilenceErrors: true, } ) func defaultSocket() string { swarmSocket := os.Getenv("SWARM_SOCKET") if swarmSocket != "" { return swarmSocket } return "./swarmkitstate/swarmd.sock" } func init() { mainCmd.PersistentFlags().StringP("socket", "s", defaultSocket(), "Socket to connect to the Swarm manager") mainCmd.PersistentFlags().BoolP("no-resolve", "n", false, "Do not try to map IDs to Names when displaying them") mainCmd.AddCommand( node.Cmd, service.Cmd, task.Cmd, version.Cmd, network.Cmd, cluster.Cmd, ) }
[ "\"SWARM_SOCKET\"" ]
[]
[ "SWARM_SOCKET" ]
[]
["SWARM_SOCKET"]
go
1
0
pype/plugins/maya/load/load_ass.py
from avalon import api import pype.hosts.maya.plugin import os from pype.api import config import clique class AssProxyLoader(pype.hosts.maya.plugin.ReferenceLoader): """Load the Proxy""" families = ["ass"] representations = ["ass"] label = "Reference .ASS standin with Proxy" order = -10 icon = "code-fork" color = "orange" def process_reference(self, context, name, namespace, options): import maya.cmds as cmds from avalon import maya import pymel.core as pm version = context['version'] version_data = version.get("data", {}) self.log.info("version_data: {}\n".format(version_data)) frameStart = version_data.get("frameStart", None) try: family = context["representation"]["context"]["family"] except ValueError: family = "ass" with maya.maintained_selection(): groupName = "{}:{}".format(namespace, name) path = self.fname proxyPath_base = os.path.splitext(path)[0] if frameStart is not None: proxyPath_base = os.path.splitext(proxyPath_base)[0] publish_folder = os.path.split(path)[0] files_in_folder = os.listdir(publish_folder) collections, remainder = clique.assemble(files_in_folder) if collections: hashes = collections[0].padding * '#' coll = collections[0].format('{head}[index]{tail}') filename = coll.replace('[index]', hashes) path = os.path.join(publish_folder, filename) proxyPath = proxyPath_base + ".ma" self.log.info nodes = cmds.file(proxyPath, namespace=namespace, reference=True, returnNewNodes=True, groupReference=True, groupName=groupName) cmds.makeIdentity(groupName, apply=False, rotate=True, translate=True, scale=True) # Set attributes proxyShape = pm.ls(nodes, type="mesh")[0] proxyShape.aiTranslator.set('procedural') proxyShape.dso.set(path) proxyShape.aiOverrideShaders.set(0) presets = config.get_presets(project=os.environ['AVALON_PROJECT']) colors = presets['plugins']['maya']['load']['colors'] c = colors.get(family) if c is not None: cmds.setAttr(groupName + ".useOutlinerColor", 1) cmds.setAttr(groupName + ".outlinerColor", c[0], c[1], c[2]) self[:] = nodes return nodes def switch(self, container, representation): self.update(container, representation) def update(self, container, representation): import os from maya import cmds import pymel.core as pm node = container["objectName"] representation["context"].pop("frame", None) path = api.get_representation_path(representation) print(path) # path = self.fname print(self.fname) proxyPath = os.path.splitext(path)[0] + ".ma" print(proxyPath) # Get reference node from container members members = cmds.sets(node, query=True, nodesOnly=True) reference_node = self._get_reference_node(members) assert os.path.exists(proxyPath), "%s does not exist." % proxyPath try: content = cmds.file(proxyPath, loadReference=reference_node, type="mayaAscii", returnNewNodes=True) # Set attributes proxyShape = pm.ls(content, type="mesh")[0] proxyShape.aiTranslator.set('procedural') proxyShape.dso.set(path) proxyShape.aiOverrideShaders.set(0) except RuntimeError as exc: # When changing a reference to a file that has load errors the # command will raise an error even if the file is still loaded # correctly (e.g. when raising errors on Arnold attributes) # When the file is loaded and has content, we consider it's fine. if not cmds.referenceQuery(reference_node, isLoaded=True): raise content = cmds.referenceQuery(reference_node, nodes=True, dagPath=True) if not content: raise self.log.warning("Ignoring file read error:\n%s", exc) # Add new nodes of the reference to the container cmds.sets(content, forceElement=node) # Remove any placeHolderList attribute entries from the set that # are remaining from nodes being removed from the referenced file. members = cmds.sets(node, query=True) invalid = [x for x in members if ".placeHolderList" in x] if invalid: cmds.sets(invalid, remove=node) # Update metadata cmds.setAttr("{}.representation".format(node), str(representation["_id"]), type="string") class AssStandinLoader(api.Loader): """Load .ASS file as standin""" families = ["ass"] representations = ["ass"] label = "Load .ASS file as standin" order = -5 icon = "code-fork" color = "orange" def load(self, context, name, namespace, options): import maya.cmds as cmds import avalon.maya.lib as lib from avalon.maya.pipeline import containerise import mtoa.ui.arnoldmenu import pymel.core as pm version = context['version'] version_data = version.get("data", {}) self.log.info("version_data: {}\n".format(version_data)) frameStart = version_data.get("frameStart", None) asset = context['asset']['name'] namespace = namespace or lib.unique_namespace( asset + "_", prefix="_" if asset[0].isdigit() else "", suffix="_", ) # cmds.loadPlugin("gpuCache", quiet=True) # Root group label = "{}:{}".format(namespace, name) root = pm.group(name=label, empty=True) presets = config.get_presets(project=os.environ['AVALON_PROJECT']) colors = presets['plugins']['maya']['load']['colors'] c = colors.get('ass') if c is not None: cmds.setAttr(root + ".useOutlinerColor", 1) cmds.setAttr(root + ".outlinerColor", c[0], c[1], c[2]) # Create transform with shape transform_name = label + "_ASS" # transform = pm.createNode("transform", name=transform_name, # parent=root) standinShape = pm.PyNode(mtoa.ui.arnoldmenu.createStandIn()) standin = standinShape.getParent() standin.rename(transform_name) pm.parent(standin, root) # Set the standin filepath standinShape.dso.set(self.fname) if frameStart is not None: standinShape.useFrameExtension.set(1) nodes = [root, standin] self[:] = nodes return containerise( name=name, namespace=namespace, nodes=nodes, context=context, loader=self.__class__.__name__) def update(self, container, representation): import pymel.core as pm path = api.get_representation_path(representation) files_in_path = os.listdir(os.path.split(path)[0]) sequence = 0 collections, remainder = clique.assemble(files_in_path) if collections: sequence = 1 # Update the standin standins = list() members = pm.sets(container['objectName'], query=True) for member in members: shape = member.getShape() if (shape and shape.type() == "aiStandIn"): standins.append(shape) for standin in standins: standin.dso.set(path) standin.useFrameExtension.set(sequence) container = pm.PyNode(container["objectName"]) container.representation.set(str(representation["_id"])) def switch(self, container, representation): self.update(container, representation) def remove(self, container): import maya.cmds as cmds members = cmds.sets(container['objectName'], query=True) cmds.lockNode(members, lock=False) cmds.delete([container['objectName']] + members) # Clean up the namespace try: cmds.namespace(removeNamespace=container['namespace'], deleteNamespaceContent=True) except RuntimeError: pass
[]
[]
[ "AVALON_PROJECT" ]
[]
["AVALON_PROJECT"]
python
1
0
contrib/nydusify/tests/nydusify.go
// Copyright 2020 Ant Group. All rights reserved. // // SPDX-License-Identifier: Apache-2.0 package tests import ( "context" "fmt" "os" "path/filepath" "testing" "github.com/stretchr/testify/assert" "github.com/dragonflyoss/image-service/contrib/nydusify/pkg/checker" "github.com/dragonflyoss/image-service/contrib/nydusify/pkg/converter" "github.com/dragonflyoss/image-service/contrib/nydusify/pkg/converter/provider" "github.com/dragonflyoss/image-service/contrib/nydusify/pkg/remote" ) var nydusImagePath string var nydusdPath string func init() { nydusImagePath = os.Getenv("NYDUS_IMAGE") if nydusImagePath == "" { panic("Please specify nydus-image path by env NYDUS_IMAGE") } nydusdPath = os.Getenv("NYDUSD") if nydusdPath == "" { panic("Please specify nydusd path by env NYDUSD") } } type Nydusify struct { Registry *Registry Source string Target string Cache string backendType string backendConfig string } func NewNydusify(registry *Registry, source, target, cache string) *Nydusify { host := registry.Host() backendType := "registry" if os.Getenv("BACKEND_TYPE") != "" { backendType = os.Getenv("BACKEND_TYPE") } backendConfig := fmt.Sprintf(`{ "host": "%s", "repo": "%s", "scheme": "http" }`, host, target) if os.Getenv("BACKEND_CONFIG") != "" { backendConfig = os.Getenv("BACKEND_CONFIG") } return &Nydusify{ Registry: registry, Source: source, Target: target, Cache: cache, backendType: backendType, backendConfig: backendConfig, } } func (nydusify *Nydusify) Convert(t *testing.T) { host := nydusify.Registry.Host() buildCache := "" if nydusify.Cache != "" { buildCache = host + "/" + nydusify.Cache } logger, err := provider.DefaultLogger() assert.Nil(t, err) workDir := "./tmp" sourceDir := filepath.Join(workDir, "source") err = os.MkdirAll(sourceDir, 0755) assert.Nil(t, err) sourceRemote, err := provider.DefaultRemote(host+"/"+nydusify.Source, true) assert.Nil(t, err) sourceProvider, err := provider.DefaultSource(context.Background(), sourceRemote, sourceDir) assert.Nil(t, err) targetRemote, err := provider.DefaultRemote(host+"/"+nydusify.Target, true) assert.Nil(t, err) var cacheRemote *remote.Remote if buildCache != "" { buildCache = host + "/" + nydusify.Cache cacheRemote, err = provider.DefaultRemote(buildCache, true) assert.Nil(t, err) } opt := converter.Opt{ Logger: logger, SourceProvider: sourceProvider, TargetRemote: targetRemote, CacheRemote: cacheRemote, CacheMaxRecords: 10, WorkDir: "./tmp", PrefetchDir: "/", NydusImagePath: nydusImagePath, MultiPlatform: false, DockerV2Format: true, WhiteoutSpec: "oci", BackendType: nydusify.backendType, BackendConfig: nydusify.backendConfig, } cvt, err := converter.New(opt) assert.Nil(t, err) err = cvt.Convert(context.Background()) assert.Nil(t, err) } func (nydusify *Nydusify) Check(t *testing.T) { host := nydusify.Registry.Host() checker, err := checker.New(checker.Opt{ WorkDir: filepath.Join("./tmp", nydusify.Target), Source: host + "/" + nydusify.Source, Target: host + "/" + nydusify.Target, SourceInsecure: true, TargetInsecure: true, NydusImagePath: nydusImagePath, NydusdPath: nydusdPath, BackendType: nydusify.backendType, BackendConfig: nydusify.backendConfig, }) assert.Nil(t, err) err = checker.Check(context.Background()) assert.Nil(t, err) }
[ "\"NYDUS_IMAGE\"", "\"NYDUSD\"", "\"BACKEND_TYPE\"", "\"BACKEND_TYPE\"", "\"BACKEND_CONFIG\"", "\"BACKEND_CONFIG\"" ]
[]
[ "BACKEND_TYPE", "NYDUSD", "BACKEND_CONFIG", "NYDUS_IMAGE" ]
[]
["BACKEND_TYPE", "NYDUSD", "BACKEND_CONFIG", "NYDUS_IMAGE"]
go
4
0
incubator/virtualcluster/pkg/syncer/resources/service/controller.go
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package service import ( "fmt" v1 "k8s.io/api/core/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" listersv1 "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/klog" vcclient "sigs.k8s.io/multi-tenancy/incubator/virtualcluster/pkg/client/clientset/versioned" vcinformers "sigs.k8s.io/multi-tenancy/incubator/virtualcluster/pkg/client/informers/externalversions/tenancy/v1alpha1" "sigs.k8s.io/multi-tenancy/incubator/virtualcluster/pkg/syncer/apis/config" "sigs.k8s.io/multi-tenancy/incubator/virtualcluster/pkg/syncer/constants" "sigs.k8s.io/multi-tenancy/incubator/virtualcluster/pkg/syncer/conversion" "sigs.k8s.io/multi-tenancy/incubator/virtualcluster/pkg/syncer/manager" pa "sigs.k8s.io/multi-tenancy/incubator/virtualcluster/pkg/syncer/patrol" uw "sigs.k8s.io/multi-tenancy/incubator/virtualcluster/pkg/syncer/uwcontroller" mc "sigs.k8s.io/multi-tenancy/incubator/virtualcluster/pkg/util/mccontroller" ) type controller struct { config *config.SyncerConfiguration // super master service client serviceClient v1core.ServicesGetter // super master informer/listers/synced functions serviceLister listersv1.ServiceLister serviceSynced cache.InformerSynced // Connect to all tenant master service informers multiClusterServiceController *mc.MultiClusterController // UWcontroller upwardServiceController *uw.UpwardController // Periodic checker servicePatroller *pa.Patroller } func NewServiceController(config *config.SyncerConfiguration, client clientset.Interface, informer informers.SharedInformerFactory, vcClient vcclient.Interface, vcInformer vcinformers.VirtualClusterInformer, options *manager.ResourceSyncerOptions) (manager.ResourceSyncer, *mc.MultiClusterController, *uw.UpwardController, error) { c := &controller{ config: config, serviceClient: client.CoreV1(), } var mcOptions *mc.Options if options == nil || options.MCOptions == nil { mcOptions = &mc.Options{Reconciler: c} } else { mcOptions = options.MCOptions } mcOptions.MaxConcurrentReconciles = constants.DwsControllerWorkerLow multiClusterServiceController, err := mc.NewMCController("tenant-masters-service-controller", &v1.Service{}, *mcOptions) if err != nil { return nil, nil, nil, fmt.Errorf("failed to create service mc controller: %v", err) } c.multiClusterServiceController = multiClusterServiceController c.serviceLister = informer.Core().V1().Services().Lister() if options != nil && options.IsFake { c.serviceSynced = func() bool { return true } } else { c.serviceSynced = informer.Core().V1().Services().Informer().HasSynced } var uwOptions *uw.Options if options == nil || options.UWOptions == nil { uwOptions = &uw.Options{Reconciler: c} } else { uwOptions = options.UWOptions } uwOptions.MaxConcurrentReconciles = constants.UwsControllerWorkerLow upwardServiceController, err := uw.NewUWController("service-upward-controller", &v1.Service{}, *uwOptions) if err != nil { return nil, nil, nil, fmt.Errorf("failed to create service upward controller: %v", err) } c.upwardServiceController = upwardServiceController var patrolOptions *pa.Options if options == nil || options.PatrolOptions == nil { patrolOptions = &pa.Options{Reconciler: c} } else { patrolOptions = options.PatrolOptions } servicePatroller, err := pa.NewPatroller("service-patroller", &v1.Service{}, *patrolOptions) if err != nil { return nil, nil, nil, fmt.Errorf("failed to create service patroller: %v", err) } c.servicePatroller = servicePatroller informer.Core().V1().Services().Informer().AddEventHandler( cache.FilteringResourceEventHandler{ FilterFunc: func(obj interface{}) bool { switch t := obj.(type) { case *v1.Service: return isBackPopulateService(t) case cache.DeletedFinalStateUnknown: if e, ok := t.Obj.(*v1.Service); ok { return isBackPopulateService(e) } utilruntime.HandleError(fmt.Errorf("unable to convert object %v to *v1.Service", obj)) return false default: utilruntime.HandleError(fmt.Errorf("unable to handle object in super master service controller: %v", obj)) return false } }, Handler: cache.ResourceEventHandlerFuncs{ AddFunc: c.enqueueService, UpdateFunc: func(oldObj, newObj interface{}) { newService := newObj.(*v1.Service) oldService := oldObj.(*v1.Service) if newService.ResourceVersion != oldService.ResourceVersion { c.enqueueService(newObj) } }, DeleteFunc: c.enqueueService, }, }) return c, multiClusterServiceController, upwardServiceController, nil } func isBackPopulateService(svc *v1.Service) bool { return svc.Spec.Type == v1.ServiceTypeLoadBalancer || svc.Spec.Type == v1.ServiceTypeClusterIP } func (c *controller) enqueueService(obj interface{}) { svc, ok := obj.(*v1.Service) if !ok { return } clusterName, _ := conversion.GetVirtualOwner(svc) if clusterName == "" { return } key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { utilruntime.HandleError(fmt.Errorf("couldn't get key for object %v: %v", obj, err)) return } c.upwardServiceController.AddToQueue(key) } func (c *controller) AddCluster(cluster mc.ClusterInterface) { klog.Infof("tenant-masters-service-controller watch cluster %s for service resource", cluster.GetClusterName()) err := c.multiClusterServiceController.WatchClusterResource(cluster, mc.WatchOptions{}) if err != nil { klog.Errorf("failed to watch cluster %s service event: %v", cluster.GetClusterName(), err) } } func (c *controller) RemoveCluster(cluster mc.ClusterInterface) { klog.Infof("tenant-masters-service-controller stop watching cluster %s for service resource", cluster.GetClusterName()) c.multiClusterServiceController.TeardownClusterResource(cluster) }
[]
[]
[]
[]
[]
go
null
null
null
scripts/insert.py
#!/usr/bin/env python3 import os import subprocess import sys import shutil import binascii import textwrap import platform import sys from datetime import datetime OFFSET_TO_PUT = 0x900000 SOURCE_ROM = "BPRE0.gba" def get_path(): if platform.system() == 'Windows': return os.environ.get('Path').split(';') return os.environ.get('PATH').split(':') Paths = get_path() PATH = "" for candidatePath in Paths: if "devkitARM" in candidatePath: PATH = candidatePath break if PATH == "": PATH = 'C://devkitPro//devkitARM//bin' if os.path.isdir(PATH) == False: print('Devkit not found.') sys.exit(1) ROM_NAME = "test.gba" PREFIX = 'arm-none-eabi-' OBJCOPY = os.path.join(PATH, PREFIX + 'objcopy') OBJDUMP = os.path.join(PATH, PREFIX + 'objdump') NM = os.path.join(PATH, PREFIX + 'nm') AS = os.path.join(PATH, PREFIX + 'as') CC = os.path.join(PATH, PREFIX + 'gcc') CXX = os.path.join(PATH, PREFIX + 'g++') def ExtractPointer(listy): pointer = 0 for a in range(len(listy)): pointer += (int(listy[a])) << (8 * a) return pointer def get_text_section(): try: # Dump sections out = subprocess.check_output([OBJDUMP, '-t', 'build/linked.o']) lines = out.decode().split('\n') # Find text section text = filter(lambda x: x.strip().endswith('.text'), lines) section = (list(text))[0] # Get the offset offset = int(section.split(' ')[0], 16) return offset except: print("Error: The insertion process could not be completed.\n" + "The linker symbol file was not found.") sys.exit(1) def symbols(subtract=0): out = subprocess.check_output([NM, 'build/linked.o']) lines = out.decode().split('\n') name = '' ret = {} for line in lines: parts = line.strip().split() if (len(parts) < 3): continue if (parts[1].lower() not in {'t','d'}): continue offset = int(parts[0], 16) ret[parts[2]] = offset - subtract return ret def hook(rom, space, hook_at, register=0): # Align 2 if hook_at & 1: hook_at -= 1 rom.seek(hook_at) register &= 7 if hook_at % 4: data = bytes([0x01, 0x48 | register, 0x00 | (register << 3), 0x47, 0x0, 0x0]) else: data = bytes([0x00, 0x48 | register, 0x00 | (register << 3), 0x47]) space += 0x08000001 data += (space.to_bytes(4, 'little')) rom.write(bytes(data)) def funcwrap(rom, space, hook_at, nparams, isreturning): # Align 2 if hook_at & 1: hook_at -= 1 rom.seek(hook_at) nparams=nparams-1 if nparams<4: data = bytes([0x10, 0xB5, 0x3, 0x4C, 0x0, 0xF0, 0x3, 0xF8, 0x10, 0xBC , (isreturning+1), 0xBC , (isreturning<<3), 0x47, 0x20, 0x47]) else: k=nparams-3 data = bytes([0x10, 0xB5, 0x82, 0xB0]) for i in range(k+2): data += bytes([ i+2, 0x9C , i, 0x94]) data += bytes([0x0, 0x9C , (nparams-1), 0x94, 0x1, 0x9C , nparams, 0x94, 0x2, 0xB0 , (k+8), 0x4C, 0x0, 0xF0 , ((k<<1)+13), 0xF8, 0x82, 0xB0 , nparams, 0x9C, 0x1, 0x94 , (nparams-1), 0x9C , 0x0, 0x94]) for i in reversed(range(k+2)): data += bytes([ i, 0x9C , i+2, 0x94]) data += bytes([0x2, 0xB0 , 0x10, 0xBC, (isreturning+1), 0xBC , (isreturning<<3), 0x47, 0x20, 0x47]) space += 0x08000001 data += (space.to_bytes(4, 'little')) rom.write(bytes(data)) def repoint(rom, space, repoint_at, slidefactor=0): rom.seek(repoint_at) space += (0x08000000+slidefactor) data = (space.to_bytes(4, 'little')) rom.write(bytes(data)) ignored_offsets = [0x3986C0, 0x3986EC, 0xDABDF0] #These offsets contain the word 0x8900000 - the attack data from #Mr. DS's rombase. In order to maintain as much compatability as #possible, the data at these offsets is never modified. def real_repoint(rom, offset_tuples): pointer_list = [] pointer_dict = {} for tuple in offset_tuples: #Format is (Double Pointer, New Pointer, Symbol) offset = tuple[0] rom.seek(offset) pointer = ExtractPointer(rom.read(4)) pointer_list.append(pointer) pointer_dict[pointer] = (tuple[1] + 0x08000000, tuple[2]) offset = 0 offset_list = [] while (offset < 0xFFFFFD): if offset in ignored_offsets: offset += 4 continue rom.seek(offset) word = ExtractPointer(rom.read(4)) rom.seek(offset) for pointer in pointer_list: if word == pointer: offset_list.append((offset, pointer_dict[pointer][1])) rom.write(bytes(pointer_dict[pointer][0].to_bytes(4, 'little'))) break offset += 4 return offset_list def bytereplace(rom, offset, data): ar=offset words=data.split() for i in range(0,len(words)): rom.seek(ar) intbyte=int(words[i],16) rom.write(bytes(intbyte.to_bytes(1, 'big'))) ar += 1 starttime = datetime.now() try: shutil.copyfile(SOURCE_ROM, ROM_NAME) except FileNotFoundError: print('Error: Insertion could not be completed.\nCould not find source rom: "' + SOURCE_ROM + '".\nPlease make sure a rom with this name exists in the root.') sys.exit(0) except PermissionError: print('Error: Insertion could not be completed.\n"' + ROM_NAME + '" is currently in use by another application.\nPlease free it up before trying again.') sys.exit(0) with open(ROM_NAME, 'rb+') as rom: print("Inserting code.") table = symbols(get_text_section()) rom.seek(OFFSET_TO_PUT) with open('build/output.bin', 'rb') as binary: rom.write(binary.read()) binary.close() # Adjust symbol table for entry in table: table[entry] += OFFSET_TO_PUT # Insert byte changes with open('bytereplacement', 'r') as replacelist: for line in replacelist: if line.strip().startswith('#') or line.strip() == '' : continue offset = int(line[:8],16) - 0x08000000 bytereplace(rom, offset, line[9:].strip()) # Do Special Inserts with open('special_inserts.asm', 'r') as file: loadOffsets = False offsetList = [] for line in file: if line.strip().startswith('.org '): offsetList.append(int(line.split('.org ')[1].split(',')[0], 16)) offsetList.sort() try: with open('build/special_inserts.bin', 'rb') as binFile: for offset in offsetList: originalOffset = offset dataList = "" if offsetList.index(offset) == len(offsetList) - 1: while True: try: binFile.seek(offset) dataList += hex(binFile.read(1)[0]) + " " except IndexError: break offset += 1 else: binFile.seek(offset) word = ExtractPointer(binFile.read(4)) while (word != 0xFFFFFFFF): binFile.seek(offset) dataList += hex(binFile.read(1)[0]) + " " offset += 1 if offset in offsetList: #Overlapping data break word = ExtractPointer(binFile.read(4)) bytereplace(rom, originalOffset, dataList.strip()) except FileNotFoundError: pass # Read hooks from a file with open('hooks', 'r') as hooklist: for line in hooklist: if line.strip().startswith('#') or line.strip() == '': continue symbol, address, register = line.split() offset = int(address, 16) - 0x08000000 try: code = table[symbol] except KeyError: print('Symbol missing:', symbol) continue hook(rom, code, offset, int(register)) # Read repoints from a file with open('repoints', 'r') as repointlist: for line in repointlist: if line.strip().startswith('#') or line.strip() == '': continue if len(line.split()) is 2: symbol, address = line.split() offset = int(address, 16) - 0x08000000 try: code = table[symbol] except KeyError: print('Symbol missing:', symbol) continue repoint(rom, code, offset) if len(line.split()) is 3: symbol, address, slide = line.split() offset = int(address, 16) - 0x08000000 try: code = table[symbol] except KeyError: print('Symbol missing:', symbol) continue repoint(rom, code, offset, int(slide)) symbols_repointed = set() try: with open('generatedrepoints', 'r') as repointlist: for line in repointlist: if line.strip().startswith('#') or line.strip() == '': continue symbol, address = line.split() offset = int(address) try: code = table[symbol] except KeyError: print('Symbol missing:', symbol) continue symbols_repointed.add(symbol) repoint(rom, code, offset) except FileNotFoundError: with open('generatedrepoints', 'w') as repointlist: repointlist.write('##This is a generated file at runtime. Do not modify it!\n') offsets_to_repoint_together = [] with open('repointall', 'r') as repointlist: for line in repointlist: if line.strip().startswith('#') or line.strip() == '': continue symbol, address = line.split() offset = int(address, 16) - 0x08000000 if symbol in symbols_repointed: continue try: code = table[symbol] except KeyError: print('Symbol missing:', symbol) continue offsets_to_repoint_together.append((offset, code, symbol)) if offsets_to_repoint_together != []: offsets = real_repoint(rom, offsets_to_repoint_together) #Format is [(offset, symbol), ...] output = open('generatedrepoints', 'a') for tuple in offsets: output.write(tuple[1] + ' ' + str(tuple[0]) + '\n') #output.close() #Purposely left open so the user can't modify it # Read routine repoints from a file with open('routinepointers', 'r') as pointerlist: for line in pointerlist: if line.strip().startswith('#') or line.strip() == '': continue symbol, address = line.split() offset = int(address, 16) - 0x08000000 try: code = table[symbol] except KeyError: print('Symbol missing:', symbol) continue repoint(rom, code, offset, 1) # Read routine rewrite wrapper from a file with open('functionrewrites', 'r') as frwlist: for line in frwlist: if line.strip().startswith('#') or line.strip() == '': continue symbol, address, nparam, isreturning = line.split() offset = int(address, 16) - 0x08000000 try: code = table[symbol] except KeyError: print('Symbol missing:', symbol) continue funcwrap(rom, code, offset, int(nparam), int(isreturning)) width = max(map(len, table.keys())) + 1 try: offset_file = open("offsets.ini", 'r+') except FileNotFoundError: offset_file = open("offsets.ini", 'w') offset_file.truncate() for key in sorted(table.keys()): fstr = ('{:' + str(width) + '} {:08X}') offset_file.write(fstr.format(key + ':', table[key] + 0x08000000) + '\n') offset_file.close() print('Inserted in ' + str(datetime.now() - starttime) + '.')
[]
[]
[ "PATH", "Path" ]
[]
["PATH", "Path"]
python
2
0
code/common/preprocessing.py
from nltk.tokenize import RegexpTokenizer # from stop_words import get_stop_words from nltk.stem.porter import PorterStemmer from string import punctuation import re from nltk.corpus import stopwords en_stop = stopwords.words('english') from nltk.corpus import wordnet import html from common.commons import * CODE_PATH = os.environ["CODE_PATH"] import spacy nlp = spacy.load('en_core_web_lg', disable=['parser', 'tagger', 'ner']) nlp.max_length =100000000 from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity import sys def preprocessingCodeElementsList(res): printDetail = False if isinstance(res, list): merged = str() for r in res: if isinstance(r, list): merged = merged + ' ' + ' '.join(r) else: merged = merged +' ' + r else: merged=res res = html.unescape(merged) tokens = getTokens(res,printDetail) stripped = [] for t in tokens: splits = re.split('\.|\(|\)|:|>|<|:|=|/|\\\\|\'|-',t) for s in splits: stripped.append(s) punc = removeEndingPunct(stripped,printDetail) non_empty = [i for i in punc if i != ''] stripped = removeEndingPunct(non_empty,printDetail) camelCase = handleCamelCase(stripped,printDetail,True) underScore = handleUnderScore(camelCase,printDetail,True) lower = [i.lower() for i in underScore] stopped_tokens = [i for i in lower if not i in en_stop] stem2 = stem(stopped_tokens,printDetail) if printDetail: print('=====CLEANED=========') print(stem2) return stem2 def preprocessingNL(res): printDetail = False if isinstance(res, list): merged = str() for r in res: if isinstance(r, list): merged = merged + ' ' + ' '.join(r) else: merged = merged +' ' + r else: merged=res res = html.unescape(merged) html_decoded_string = res.replace("&amp;", "&").replace("&quot;", '"').replace("&apos;", "'").replace("&gt;", ">").replace( "&lt;", "<") html_decoded_string = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '',html_decoded_string) tokens = getTokens(html_decoded_string,printDetail) stripped = [] for t in tokens: splits = re.split('\.|\(|\)|:|>|<|:|=|/|\\\\|\'|-',t) for s in splits: stripped.append(s) punc = removeEndingPunct(stripped,printDetail) non_empty = [i for i in punc if i != ''] stripped = removeEndingPunct(non_empty,printDetail) camelCase = handleCamelCase(stripped,printDetail,True) underScore = handleUnderScore(camelCase,printDetail,True) lower = [i.lower() for i in underScore] stopped_tokens = [i for i in lower if not i in en_stop] nonDigit = [i for i in stopped_tokens if (not i.isdigit())] doc = nlp(' '.join(nonDigit)) newWord = [] for token in doc: if(token.text in nlp.vocab): newWord.append(token.text) stem2 = stem(newWord,printDetail) if printDetail: print('=====CLEANED=========') print(stem2) return stem2 def getTokens(re,printDetail=False): tokenizer = RegexpTokenizer(r'\S+') tokens = tokenizer.tokenize(re) if printDetail: print('=====TOKENS=========') print(tokens) return tokens def charLength(x, l=3): if x.isalpha() and len(x) >= l: return True else: return False def removeEndingPunct(re,printDetail): stripped = [i.strip(punctuation) for i in re] if printDetail: print('=====removeEndingPunct=========') print(stripped) return stripped def handleCamelCase(re,printDetail=False,keepOriginal = False): camelCased = list() for i in re: listOfCC = camel_case_split(i) camelCased.extend(listOfCC) if i not in listOfCC and keepOriginal: camelCased.append(i) if printDetail: print('=====CAMEL CASE=========') print(camelCased) return camelCased def handleUnderScore(re,printDetail=False,keepOriginal = False): underScored = list() for i in re: listOfCC = i.split('_') underScored.extend(listOfCC) if i not in listOfCC and keepOriginal: underScored.append(i) if printDetail: print('=====UNDER SCORE=========') print(underScored) return underScored def camel_case_split(identifier): matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier) res = [m.group(0) for m in matches] return res def stem(res,printDetail): p_stemmer = PorterStemmer() stemmed_tokens = [p_stemmer.stem(i.strip()) for i in res if i] if printDetail: print('=====STEMMED=========') print(stemmed_tokens) return stemmed_tokens def isEnglish(word_to_test): if not wordnet.synsets(word_to_test): #Not an English Word #TODO word_to_test #print word_to_test else: return word_to_test def dummy_fun(doc): return doc def calculateTfIdfCodeElementsList(aCorpus): global progress progress = 0 v = TfidfVectorizer(tokenizer=dummy_fun,stop_words=None,lowercase=False,sublinear_tf=True)#,max_df=0.7,min_df=3) m = v.fit(aCorpus) return v def calculateTfIdfNLList(aCorpus): global progress progress = 0 v = TfidfVectorizer(tokenizer=dummy_fun,stop_words=None,lowercase=False,sublinear_tf=True)#,max_df=0.7,min_df=3) m = v.fit(aCorpus) return v def getDTMNL(x,v,corpus): ind =x.name v.tokenizer = dummy_fun return v.transform([corpus[ind]]) def getDTMCE(x,v,corpus): ind =x.name v.tokenizer = dummy_fun return v.transform([corpus[ind]]) def getBRDTM(x,v,corpus): ind =x.name v.tokenizer = dummy_fun return v.transform([corpus[ind]]) def getBRDTMCEs(x,v,corpus): ind =x.name v.tokenizer = dummy_fun return v.transform([corpus[ind]])
[]
[]
[ "CODE_PATH" ]
[]
["CODE_PATH"]
python
1
0
deploy/aws-lambda/main.go
package main import ( "os" "fmt" "strconv" "time" "github.com/akrylysov/algnhsa" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/jhaals/yopass/pkg/yopass" ) func main() { maxLength, _ := strconv.Atoi(os.Getenv("MAX_LENGTH")) if maxLength == 0 { maxLength = 10000 } y := yopass.New(NewDynamo(os.Getenv("TABLE_NAME")), maxLength) algnhsa.ListenAndServe( y.HTTPHandler(), nil) } // Dynamo Database implementation type Dynamo struct { tableName string svc *dynamodb.DynamoDB } // NewDynamo returns a database client func NewDynamo(tableName string) yopass.Database { return &Dynamo{tableName: tableName, svc: dynamodb.New(session.New())} } // Get item from dynamo func (d *Dynamo) Get(key string) (yopass.Secret, error) { var s yopass.Secret input := &dynamodb.GetItemInput{ Key: map[string]*dynamodb.AttributeValue{ "id": { S: aws.String(key), }, }, TableName: aws.String(d.tableName), } result, err := d.svc.GetItem(input) if err != nil { return s, err } if len(result.Item) == 0 { return s, fmt.Errorf("Key not found in database") } if *result.Item["one_time"].BOOL { if err := d.Delete(key); err != nil { return s, err } } s.Message = *result.Item["secret"].S return s, nil } // Delete item func (d *Dynamo) Delete(key string) error { input := &dynamodb.DeleteItemInput{ Key: map[string]*dynamodb.AttributeValue{ "id": { S: aws.String(key), }, }, TableName: aws.String(d.tableName), } _, err := d.svc.DeleteItem(input) return err } // Put item in Dynamo func (d *Dynamo) Put(key string, secret yopass.Secret) error { input := &dynamodb.PutItemInput{ // TABLE GENERATED NAME Item: map[string]*dynamodb.AttributeValue{ "id": { S: aws.String(key), }, "secret": { S: aws.String(secret.Message), }, "one_time": { BOOL: aws.Bool(secret.OneTime), }, "ttl": { N: aws.String( fmt.Sprintf( "%d", time.Now().Unix()+int64(secret.Expiration))), }, }, TableName: aws.String(d.tableName), } _, err := d.svc.PutItem(input) return err }
[ "\"MAX_LENGTH\"", "\"TABLE_NAME\"" ]
[]
[ "TABLE_NAME", "MAX_LENGTH" ]
[]
["TABLE_NAME", "MAX_LENGTH"]
go
2
0
tools/dns.go
package tools import ( "context" "errors" "log" "os" "github.com/cloudflare/cloudflare-go" ) var ErrorDnsProviderNotFound = errors.New("DNS provider cannot be found") type DnsProviderType string const ( DnsCloudflare DnsProviderType = "cf" ) type IDnsProvider interface { AddRecord(record string, ip string) error RemoveRecord(record string, ip string) error } type CloudflareProvider struct { ZoneId string API *cloudflare.API } func (c CloudflareProvider) AddRecord(record string, ip string) error { r, err := c.getRecord(record) if r != nil || err != nil { return err } proxied := true rec := cloudflare.DNSRecord{Type: "A", Name: record, Content: ip, Proxied: &proxied, ZoneID: c.ZoneId} _, err = c.API.CreateDNSRecord(context.Background(), c.ZoneId, rec) return err } func (c CloudflareProvider) RemoveRecord(record string, ip string) error { r, err := c.getRecord(record) if r == nil || err != nil { return err } log.Printf("DNS record found for deletion %v", r) return c.API.DeleteDNSRecord(context.Background(), c.ZoneId, r.ID) } func (c CloudflareProvider) getRecord(record string) (r *cloudflare.DNSRecord, err error) { filter := cloudflare.DNSRecord{Name: record, ZoneID: c.ZoneId} recs, err := c.API.DNSRecords(context.Background(), c.ZoneId, filter) if len(recs) == 0 || err != nil { return nil, err } r = &recs[0] return r, err } func NewDnsProvider() (IDnsProvider, error) { cloudflareToken := os.Getenv("CF_TOKEN") cloudflareZone := os.Getenv("CF_ZONE_ID") if cloudflareToken != "" && cloudflareZone != "" { api, err := cloudflare.NewWithAPIToken(cloudflareToken) if err != nil { log.Fatal(err) } return CloudflareProvider{ ZoneId: cloudflareZone, API: api, }, nil } return nil, ErrorDnsProviderNotFound }
[ "\"CF_TOKEN\"", "\"CF_ZONE_ID\"" ]
[]
[ "CF_ZONE_ID", "CF_TOKEN" ]
[]
["CF_ZONE_ID", "CF_TOKEN"]
go
2
0
ssl_check/send_to_prometheus.py
import requests import urllib.parse import os from prometheus_client import CollectorRegistry, Gauge, push_to_gateway def to_prometheus_pushgateway(prometheus_pushgateway, url): try: timeout = 1 if not os.getenv('PUSH_GATEWAY_TIMEOUT') else os.getenv('PUSH_GATEWAY_TIMEOUT') pg_server = os.getenv('PUSH_GATEWAY_SERVER') if os.getenv('PUSH_GATEWAY_SERVER') \ else 'localhost' pg_port = os.getenv('PUSH_GATEWAY_PORT') if os.getenv('PUSH_GATEWAY_PORT') \ else '9091' pg_url=''.join(['http://', pg_server,':',pg_port]) job_name='cert_validity_alert' instance_name = urllib.parse.urlparse(url).netloc team_name = 'certTeam' payload_key = 'cert_verification_error_code' payload_value = '1' # different error codes might signal different poblems endpoint = '{pg_url}/metrics/job/{j}/instance/{i}/team/{t}'.format(pg_url=pg_url, j=job_name, i=instance_name, t=team_name) response = requests.post(endpoint, data='{k} {v}\n'.format(k=payload_key, v=payload_value), timeout=timeout) print('Prometheus pushgateway response status code: ', response.status_code) return response.status_code except Exception as e: print(e) return 440
[]
[]
[ "PUSH_GATEWAY_TIMEOUT", "PUSH_GATEWAY_PORT", "PUSH_GATEWAY_SERVER" ]
[]
["PUSH_GATEWAY_TIMEOUT", "PUSH_GATEWAY_PORT", "PUSH_GATEWAY_SERVER"]
python
3
0
train.py
import os, sys import time import torch from torch import optim import torch.nn as nn import timeit import math import numpy as np import matplotlib matplotlib.use('Agg') from matplotlib import pyplot as plt import torch.backends.cudnn as cudnn from argparse import ArgumentParser # user from builders.model_builder import build_model from builders.dataset_builder import build_dataset_train from utils.utils import setup_seed, init_weight, netParams from utils.metric.metric import get_iou from utils.losses.loss import LovaszSoftmax, CrossEntropyLoss2d, CrossEntropyLoss2dLabelSmooth, \ ProbOhemCrossEntropy2d, FocalLoss2d from utils.optim import RAdam, Ranger, AdamW from utils.scheduler.lr_scheduler import WarmupPolyLR sys.setrecursionlimit(1000000) # solve problem 'maximum recursion depth exceeded' torch_ver = torch.__version__[:3] if torch_ver == '0.3': from torch.autograd import Variable print(torch_ver) GLOBAL_SEED = 1234 def parse_args(): parser = ArgumentParser(description='Efficient semantic segmentation') # model and dataset parser.add_argument('--model', type=str, default="ENet", help="model name: (default ENet)") parser.add_argument('--dataset', type=str, default="custom_dataset", help="dataset: cityscapes or camvid") parser.add_argument('--input_size', type=str, default="360,480", help="input size of model") parser.add_argument('--num_workers', type=int, default=4, help=" the number of parallel threads") parser.add_argument('--classes', type=int, default=19, help="the number of classes in the dataset. 19 and 11 for cityscapes and camvid, respectively") parser.add_argument('--train_type', type=str, default="train", help="ontrain for training on train set, ontrainval for training on train+val set") # training hyper params parser.add_argument('--max_epochs', type=int, default=1000, help="the number of epochs: 300 for train set, 350 for train+val set") parser.add_argument('--random_mirror', type=bool, default=True, help="input image random mirror") parser.add_argument('--random_scale', type=bool, default=True, help="input image resize 0.5 to 2") parser.add_argument('--lr', type=float, default=5e-4, help="initial learning rate") parser.add_argument('--batch_size', type=int, default=4, help="the batch size is set to 16 for 2 GPUs") parser.add_argument('--optim', type=str.lower, default='adam', choices=['sgd', 'adam', 'radam', 'ranger'], help="select optimizer") parser.add_argument('--lr_schedule', type=str, default='warmpoly', help='name of lr schedule: poly') parser.add_argument('--num_cycles', type=int, default=1, help='Cosine Annealing Cyclic LR') parser.add_argument('--poly_exp', type=float, default=0.9, help='polynomial LR exponent') parser.add_argument('--warmup_iters', type=int, default=500, help='warmup iterations') parser.add_argument('--warmup_factor', type=float, default=1.0 / 3, help='warm up start lr=warmup_factor*lr') parser.add_argument('--use_label_smoothing', action='store_true', default=False, help="CrossEntropy2d Loss with label smoothing or not") parser.add_argument('--use_ohem', action='store_true', default=False, help='OhemCrossEntropy2d Loss for cityscapes dataset') parser.add_argument('--use_lovaszsoftmax', action='store_true', default=True, help='LovaszSoftmax Loss for cityscapes dataset') parser.add_argument('--use_focal', action='store_true', default=False, help=' FocalLoss2d for cityscapes dataset') # cuda setting parser.add_argument('--cuda', type=bool, default=True, help="running on CPU or GPU") parser.add_argument('--gpus', type=str, default="0", help="default GPU devices (0,1)") # checkpoint and log parser.add_argument('--resume', type=str, default=r"", help="use this file to load last checkpoint for continuing training") parser.add_argument('--savedir', default="./checkpoint/", help="directory to save the model snapshot") parser.add_argument('--logFile', default="log.txt", help="storing the training and validation logs") args = parser.parse_args() return args def train_model(args): """ args: args: global arguments """ h, w = map(int, args.input_size.split(',')) input_size = (h, w) print("=====> input size:{}".format(input_size)) print(args) if args.cuda: print("=====> use gpu id: '{}'".format(args.gpus)) os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus if not torch.cuda.is_available(): raise Exception("No GPU found or Wrong gpu id, please run without --cuda") # set the seed setup_seed(GLOBAL_SEED) print("=====> set Global Seed: ", GLOBAL_SEED) cudnn.enabled = True print("=====> building network") # build the model and initialization model = build_model(args.model, num_classes=args.classes) init_weight(model, nn.init.kaiming_normal_, nn.BatchNorm2d, 1e-3, 0.1, mode='fan_in') print("=====> computing network parameters and FLOPs") total_paramters = netParams(model) print("the number of parameters: %d ==> %.2f M" % (total_paramters, (total_paramters / 1e6))) # load data and data augmentation datas, trainLoader, valLoader = build_dataset_train(args.dataset, input_size, args.batch_size, args.train_type, args.random_scale, args.random_mirror, args.num_workers) args.per_iter = len(trainLoader) args.max_iter = args.max_epochs * args.per_iter print('=====> Dataset statistics') print("data['classWeights']: ", datas['classWeights']) print('mean and std: ', datas['mean'], datas['std']) # define loss function, respectively weight = torch.from_numpy(datas['classWeights']) if args.dataset == 'camvid': criteria = CrossEntropyLoss2d(weight=weight, ignore_label=ignore_label) elif args.dataset == 'camvid' and args.use_label_smoothing: criteria = CrossEntropyLoss2dLabelSmooth(weight=weight, ignore_label=ignore_label) elif args.dataset == 'cityscapes' and args.use_ohem: min_kept = int(args.batch_size // len(args.gpus) * h * w // 16) criteria = ProbOhemCrossEntropy2d(use_weight=True, ignore_label=ignore_label, thresh=0.7, min_kept=min_kept) elif args.dataset == 'cityscapes' and args.use_label_smoothing: criteria = CrossEntropyLoss2dLabelSmooth(weight=weight, ignore_label=ignore_label) elif (args.dataset == 'cityscapes' or args.dataset == 'custom_dataset') and args.use_lovaszsoftmax: criteria = LovaszSoftmax(ignore_index=ignore_label) elif (args.dataset == 'cityscapes' or args.dataset == 'custom_dataset') and args.use_focal: criteria = FocalLoss2d(weight=weight, ignore_index=ignore_label) else: raise NotImplementedError( "This repository now supports two datasets: cityscapes and camvid, %s is not included" % args.dataset) if args.cuda: criteria = criteria.cuda() if torch.cuda.device_count() > 1: print("torch.cuda.device_count()=", torch.cuda.device_count()) args.gpu_nums = torch.cuda.device_count() model = nn.DataParallel(model).cuda() # multi-card data parallel else: args.gpu_nums = 1 print("single GPU for training") model = model.cuda() # 1-card data parallel args.savedir = (args.savedir + args.dataset + '/' + args.model + 'bs' + str(args.batch_size) + 'gpu' + str(args.gpu_nums) + "_" + str(args.train_type) + '/') if not os.path.exists(args.savedir): os.makedirs(args.savedir) start_epoch = 0 # continue training if args.resume: if os.path.isfile(args.resume): checkpoint = torch.load(args.resume) start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['model']) # model.load_state_dict(convert_state_dict(checkpoint['model'])) print("=====> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch'])) else: print("=====> no checkpoint found at '{}'".format(args.resume)) model.train() cudnn.benchmark = True # cudnn.deterministic = True ## my add logFileLoc = args.savedir + args.logFile if os.path.isfile(logFileLoc): logger = open(logFileLoc, 'a') else: logger = open(logFileLoc, 'w') logger.write("Parameters: %s Seed: %s" % (str(total_paramters), GLOBAL_SEED)) logger.write("\n%s\t\t%s\t%s\t%s" % ('Epoch', 'Loss(Tr)', 'mIOU (val)', 'lr')) logger.flush() # define optimization strategy if args.optim == 'sgd': optimizer = torch.optim.SGD( filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=0.9, weight_decay=1e-4) elif args.optim == 'adam': optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-4) elif args.optim == 'radam': optimizer = RAdam( filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, betas=(0.90, 0.999), eps=1e-08, weight_decay=1e-4) elif args.optim == 'ranger': optimizer = Ranger( filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, betas=(0.95, 0.999), eps=1e-08, weight_decay=1e-4) elif args.optim == 'adamw': optimizer = AdamW( filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-4) lossTr_list = [] epoches = [] mIOU_val_list = [] save_stuff_interval = 5 print('=====> beginning training') for epoch in range(start_epoch, args.max_epochs): # training lossTr, lr = train(args, trainLoader, model, criteria, optimizer, epoch) lossTr_list.append(lossTr) # validation if epoch % save_stuff_interval == 0 or epoch == (args.max_epochs - 1): epoches.append(epoch) mIOU_val, per_class_iu = val(args, valLoader, model) mIOU_val_list.append(mIOU_val) # record train information logger.write("\n%d\t\t%.4f\t\t%.4f\t\t%.7f" % (epoch, lossTr, mIOU_val, lr)) logger.flush() print("Epoch : " + str(epoch) + ' Details') print("Epoch No.: %d\tTrain Loss = %.4f\t mIOU(val) = %.4f\t lr= %.6f\n" % (epoch, lossTr, mIOU_val, lr)) else: # record train information logger.write("\n%d\t\t%.4f\t\t\t\t%.7f" % (epoch, lossTr, lr)) logger.flush() print("Epoch : " + str(epoch) + ' Details') print("Epoch No.: %d\tTrain Loss = %.4f\t lr= %.6f\n" % (epoch, lossTr, lr)) # save the model model_file_name = args.savedir + '/model_' + str(epoch + 1) + '.pth' state = {"epoch": epoch + 1, "model": model.state_dict()} # Individual Setting for save model !!! if args.dataset == 'camvid': torch.save(state, model_file_name) elif args.dataset == 'cityscapes' or args.dataset == 'custom_dataset': if epoch >= args.max_epochs - 10: torch.save(state, model_file_name) elif not epoch % save_stuff_interval: torch.save(state, model_file_name) # draw plots for visualization if epoch % save_stuff_interval == 0 or epoch == (args.max_epochs - 1): # Plot the figures per 50 epochs fig1, ax1 = plt.subplots(figsize=(11, 8)) ax1.plot(range(start_epoch, epoch + 1), lossTr_list) ax1.set_title("Average training loss vs epochs") ax1.set_xlabel("Epochs") ax1.set_ylabel("Current loss") plt.savefig(args.savedir + "loss_vs_epochs.png") plt.clf() fig2, ax2 = plt.subplots(figsize=(11, 8)) ax2.plot(epoches, mIOU_val_list, label="Val IoU") ax2.set_title("Average IoU vs epochs") ax2.set_xlabel("Epochs") ax2.set_ylabel("Current IoU") plt.legend(loc='lower right') plt.savefig(args.savedir + "iou_vs_epochs.png") plt.close('all') logger.close() def train(args, train_loader, model, criterion, optimizer, epoch): """ args: train_loader: loaded for training dataset model: model criterion: loss function optimizer: optimization algorithm, such as ADAM or SGD epoch: epoch number return: average loss, per class IoU, and mean IoU """ model.train() epoch_loss = [] total_batches = len(train_loader) print("=====> the number of iterations per epoch: ", total_batches) st = time.time() for iteration, batch in enumerate(train_loader, 0): args.per_iter = total_batches args.max_iter = args.max_epochs * args.per_iter args.cur_iter = epoch * args.per_iter + iteration # learming scheduling if args.lr_schedule == 'poly': lambda1 = lambda epoch: math.pow((1 - (args.cur_iter / args.max_iter)), args.poly_exp) scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1) elif args.lr_schedule == 'warmpoly': scheduler = WarmupPolyLR(optimizer, T_max=args.max_iter, cur_iter=args.cur_iter, warmup_factor=1.0 / 3, warmup_iters=args.warmup_iters, power=0.9) lr = optimizer.param_groups[0]['lr'] start_time = time.time() images, labels, _, _ = batch if torch_ver == '0.3': images = Variable(images).cuda() labels = Variable(labels.long()).cuda() else: images = images.cuda() labels = labels.long().cuda() output = model(images) loss = criterion(output, labels) optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() # In pytorch 1.1.0 and later, should call 'optimizer.step()' before 'lr_scheduler.step()' epoch_loss.append(loss.item()) time_taken = time.time() - start_time print('=====> epoch[%d/%d] iter: (%d/%d) \tcur_lr: %.6f loss: %.3f time:%.2f' % (epoch + 1, args.max_epochs, iteration + 1, total_batches, lr, loss.item(), time_taken)) time_taken_epoch = time.time() - st remain_time = time_taken_epoch * (args.max_epochs - 1 - epoch) m, s = divmod(remain_time, 60) h, m = divmod(m, 60) print("Remaining training time = %d hour %d minutes %d seconds" % (h, m, s)) average_epoch_loss_train = sum(epoch_loss) / len(epoch_loss) return average_epoch_loss_train, lr def val(args, val_loader, model): """ args: val_loader: loaded for validation dataset model: model return: mean IoU and IoU class """ # evaluation mode model.eval() total_batches = len(val_loader) data_list = [] for i, (input, label, size, name) in enumerate(val_loader): start_time = time.time() with torch.no_grad(): # input_var = Variable(input).cuda() input_var = input.cuda() output = model(input_var) time_taken = time.time() - start_time print("[%d/%d] time: %.2f" % (i + 1, total_batches, time_taken)) output = output.cpu().data[0].numpy() gt = np.asarray(label[0].numpy(), dtype=np.uint8) output = output.transpose(1, 2, 0) output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8) data_list.append([gt.flatten(), output.flatten()]) meanIoU, per_class_iu = get_iou(data_list, args.classes) return meanIoU, per_class_iu if __name__ == '__main__': start = timeit.default_timer() args = parse_args() if args.dataset == 'cityscapes': args.classes = 19 args.input_size = '512,1024' ignore_label = 255 elif args.dataset == 'camvid': args.classes = 11 args.input_size = '360,480' ignore_label = 11 elif args.dataset == 'custom_dataset': args.classes = 2 args.input_size = '512,1024' ignore_label = 255 else: raise NotImplementedError( "This repository now supports two datasets: cityscapes and camvid, %s is not included" % args.dataset) train_model(args) end = timeit.default_timer() hour = 1.0 * (end - start) / 3600 minute = (hour - int(hour)) * 60 print("training time: %d hour %d minutes" % (int(hour), int(minute)))
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
api/src/opentrons/hardware_control/module_control.py
import logging import asyncio import os import re from typing import List, Tuple, Optional from glob import glob from opentrons.config import IS_ROBOT, IS_LINUX from opentrons.drivers.rpi_drivers import types from opentrons.hardware_control.modules import ModuleAtPort from .execution_manager import ExecutionManager from .types import AionotifyEvent from . import modules log = logging.getLogger(__name__) MODULE_PORT_REGEX = re.compile("|".join(modules.MODULE_HW_BY_NAME.keys()), re.I) class AttachedModulesControl: """ A class to handle monitoring module attachment, capturing the physical USB port information and finally building a module object. """ def __init__(self, api): self._available_modules: List[modules.AbstractModule] = [] self._api = api @classmethod async def build(cls, api_instance): mc_instance = cls(api_instance) if not api_instance.is_simulator: await mc_instance.register_modules(mc_instance.scan()) return mc_instance @property def available_modules(self) -> List[modules.AbstractModule]: return self._available_modules @property def api(self): return self._api async def build_module( self, port: str, usb_port: types.USBPort, model: str, loop: asyncio.AbstractEventLoop, sim_model: str = None, ) -> modules.AbstractModule: return await modules.build( port=port, usb_port=usb_port, which=model, simulating=self.api.is_simulator, interrupt_callback=self.api.pause_with_message, loop=loop, execution_manager=self.api._execution_manager, sim_model=sim_model, ) async def unregister_modules( self, mods_at_ports: List[modules.ModuleAtPort] ) -> None: """ De-register Modules. Remove any modules that are no longer found by aionotify. """ removed_modules = [] for mod in mods_at_ports: for attached_mod in self.available_modules: if attached_mod.port == mod.port: removed_modules.append(attached_mod) for removed_mod in removed_modules: try: self._available_modules.remove(removed_mod) except ValueError: log.exception( f"Removed Module {removed_mod} not" " found in attached modules" ) for removed_mod in removed_modules: log.info( f"Module {removed_mod.name()} detached" f" from port {removed_mod.port}" ) await removed_mod.cleanup() async def register_modules( self, new_mods_at_ports: List[modules.ModuleAtPort] = None, removed_mods_at_ports: List[modules.ModuleAtPort] = None, ) -> None: """ Register Modules. Upon system recognition of a module being plugged in, we should register that module and de-register any modules that are no longer found on the system. """ if new_mods_at_ports is None: new_mods_at_ports = [] if removed_mods_at_ports is None: removed_mods_at_ports = [] # destroy removed mods await self.unregister_modules(removed_mods_at_ports) sorted_mods_at_port = self.api._backend._usb.match_virtual_ports( new_mods_at_ports ) # build new mods for mod in sorted_mods_at_port: new_instance = await self.build_module( port=mod.port, usb_port=mod.usb_port, model=mod.name, loop=self.api.loop ) self._available_modules.append(new_instance) log.info( f"Module {mod.name} discovered and attached" f" at port {mod.port}, new_instance: {new_instance}" ) async def parse_modules( self, by_model: modules.types.ModuleModel, resolved_type: modules.types.ModuleType, ) -> Tuple[List[modules.AbstractModule], Optional[modules.AbstractModule]]: """ Parse Modules. Given a module model and type, find all attached modules that fit this criteria. If there are no modules attached, but the module is being loaded in simulation, then it should return a simulating module of the same type. """ matching_modules = [] simulated_module = None mod_type = { modules.types.ModuleType.MAGNETIC: "magdeck", modules.types.ModuleType.TEMPERATURE: "tempdeck", modules.types.ModuleType.THERMOCYCLER: "thermocycler", }[resolved_type] for module in self.available_modules: if mod_type == module.name(): matching_modules.append(module) if self.api.is_simulator: module_builder = { "magdeck": modules.MagDeck.build, "tempdeck": modules.TempDeck.build, "thermocycler": modules.Thermocycler.build, }[mod_type] if module_builder: simulating_module = await module_builder( port="", usb_port=self.api._backend._usb.find_port(""), simulating=True, loop=self.api.loop, execution_manager=ExecutionManager(loop=self.api.loop), sim_model=by_model.value, ) simulated_module = simulating_module return matching_modules, simulated_module def scan(self) -> List[modules.ModuleAtPort]: """Scan for connected modules and return list of tuples of serial ports and device names """ if IS_ROBOT and IS_LINUX: devices = glob("/dev/ot_module*") else: devices = [] discovered_modules = [] for port in devices: symlink_port = port.split("dev/")[1] module_at_port = self.get_module_at_port(symlink_port) if module_at_port: discovered_modules.append(module_at_port) # Check for emulator environment variables emulator_uri = os.environ.get("OT_THERMOCYCLER_EMULATOR_URI") if emulator_uri: discovered_modules.append( ModuleAtPort(port=emulator_uri, name="thermocycler") ) emulator_uri = os.environ.get("OT_TEMPERATURE_EMULATOR_URI") if emulator_uri: discovered_modules.append(ModuleAtPort(port=emulator_uri, name="tempdeck")) emulator_uri = os.environ.get("OT_MAGNETIC_EMULATOR_URI") if emulator_uri: discovered_modules.append(ModuleAtPort(port=emulator_uri, name="magdeck")) log.debug("Discovered modules: {}".format(discovered_modules)) return discovered_modules @staticmethod def get_module_at_port(port: str) -> Optional[modules.ModuleAtPort]: """Given a port, returns either a ModuleAtPort if it is a recognized module, or None if not recognized. """ match = MODULE_PORT_REGEX.search(port) if match: name = match.group().lower() if name not in modules.MODULE_HW_BY_NAME: log.warning(f"Unexpected module connected: {name} on {port}") return None return modules.ModuleAtPort(port=f"/dev/{port}", name=name) return None async def handle_module_appearance(self, event: AionotifyEvent): """Only called upon availability of aionotify. Check that the file system has changed and either remove or add modules depending on the result. :param event_name: The title of the even passed into aionotify. :param event_flags: AionotifyFlags dataclass that maps flags listed from the aionotify event. """ maybe_module_at_port = self.get_module_at_port(event.name) new_modules = None removed_modules = None if maybe_module_at_port is not None: if hasattr(event.flags, "DELETE"): removed_modules = [maybe_module_at_port] log.info(f"Module Removed: {maybe_module_at_port}") elif hasattr(event.flags, "CREATE"): new_modules = [maybe_module_at_port] log.info(f"Module Added: {maybe_module_at_port}") try: await self.register_modules( removed_mods_at_ports=removed_modules, new_mods_at_ports=new_modules, ) except Exception: log.exception("Exception in Module registration")
[]
[]
[ "OT_THERMOCYCLER_EMULATOR_URI", "OT_TEMPERATURE_EMULATOR_URI", "OT_MAGNETIC_EMULATOR_URI" ]
[]
["OT_THERMOCYCLER_EMULATOR_URI", "OT_TEMPERATURE_EMULATOR_URI", "OT_MAGNETIC_EMULATOR_URI"]
python
3
0
pkg/scmprovider/client.go
package scmprovider import ( "context" "fmt" "net/url" "os" "github.com/jenkins-x/go-scm/scm" "k8s.io/apimachinery/pkg/util/sets" ) // ToClient converts the scm client to an API that the prow plugins expect func ToClient(client *scm.Client, botName string) *Client { return &Client{client: client, botName: botName} } // SCMClient is an interface providing all functions on the Client struct. type SCMClient interface { // Functions implemented in client.go BotName() (string, error) SetBotName(string) SupportsGraphQL() bool ProviderType() string PRRefFmt() string SupportsPRLabels() bool ServerURL() *url.URL QuoteAuthorForComment(string) string // Functions implemented in content.go GetFile(string, string, string, string) ([]byte, error) ListFiles(string, string, string, string) ([]*scm.FileEntry, error) // Functions implemented in git.go GetRef(string, string, string) (string, error) DeleteRef(string, string, string) error GetSingleCommit(string, string, string) (*scm.Commit, error) // Functions implemented in issues.go Query(context.Context, interface{}, map[string]interface{}) error Search(scm.SearchOptions) ([]*scm.SearchIssue, *RateLimits, error) ListIssueEvents(string, string, int) ([]*scm.ListedIssueEvent, error) AssignIssue(string, string, int, []string) error UnassignIssue(string, string, int, []string) error AddLabel(string, string, int, string, bool) error RemoveLabel(string, string, int, string, bool) error DeleteComment(string, string, int, int, bool) error DeleteStaleComments(string, string, int, []*scm.Comment, bool, func(*scm.Comment) bool) error ListIssueComments(string, string, int) ([]*scm.Comment, error) GetIssueLabels(string, string, int, bool) ([]*scm.Label, error) CreateComment(string, string, int, bool, string) error ReopenIssue(string, string, int) error FindIssues(string, string, bool) ([]scm.Issue, error) CloseIssue(string, string, int) error EditComment(owner, repo string, number int, id int, comment string, pr bool) error // Functions implemented in organizations.go ListTeams(string) ([]*scm.Team, error) ListTeamMembers(int, string) ([]*scm.TeamMember, error) ListOrgMembers(string) ([]*scm.TeamMember, error) IsOrgAdmin(string, string) (bool, error) // Functions implemented in pull_requests.go GetPullRequest(string, string, int) (*scm.PullRequest, error) ListPullRequestComments(string, string, int) ([]*scm.Comment, error) GetPullRequestChanges(string, string, int) ([]*scm.Change, error) Merge(string, string, int, MergeDetails) error ReopenPR(string, string, int) error ClosePR(string, string, int) error ListAllPullRequestsForFullNameRepo(string, scm.PullRequestListOptions) ([]*scm.PullRequest, error) FindPullRequestsByAuthor(string, string, string) ([]*scm.PullRequest, error) // Functions implemented in repositories.go GetRepoLabels(string, string) ([]*scm.Label, error) IsCollaborator(string, string, string) (bool, error) ListCollaborators(string, string) ([]scm.User, error) CreateStatus(string, string, string, *scm.StatusInput) (*scm.Status, error) CreateGraphQLStatus(string, string, string, *Status) (*scm.Status, error) ListStatuses(string, string, string) ([]*scm.Status, error) GetCombinedStatus(string, string, string) (*scm.CombinedStatus, error) HasPermission(string, string, string, ...string) (bool, error) GetUserPermission(string, string, string) (string, error) IsMember(string, string) (bool, error) GetRepositoryByFullName(string) (*scm.Repository, error) // Functions implemented in reviews.go ListReviews(string, string, int) ([]*scm.Review, error) RequestReview(string, string, int, []string) error UnrequestReview(string, string, int, []string) error // Functions implemented in milestones.go ClearMilestone(string, string, int, bool) error SetMilestone(string, string, int, int, bool) error ListMilestones(string, string) ([]*scm.Milestone, error) } // Client represents an interface that prow plugins expect on top of go-scm type Client struct { client *scm.Client botName string } // ToScmClient gets the underlying SCM client func (c *Client) ToScmClient() *scm.Client { return c.client } // BotName returns the bot name func (c *Client) BotName() (string, error) { botName := c.botName if botName == "" { botName = os.Getenv("GIT_USER") if botName == "" { botName = "jenkins-x-bot" } c.botName = botName } return botName, nil } // SetBotName sets the bot name func (c *Client) SetBotName(botName string) { c.botName = botName } // SupportsPRLabels returns true if the underlying provider supports PR labels func (c *Client) SupportsPRLabels() bool { return !NoLabelProviders().Has(c.ProviderType()) } // QuoteAuthorForComment will quote the author login for use in "@author" if appropriate for the provider. func (c *Client) QuoteAuthorForComment(author string) string { if c.ProviderType() == "stash" { return `"` + author + `"` } return author } // ServerURL returns the server URL for the client func (c *Client) ServerURL() *url.URL { return c.client.BaseURL } // SupportsGraphQL returns true if the underlying provider supports our GraphQL queries // Currently, that means it has to be GitHub. func (c *Client) SupportsGraphQL() bool { return c.client.Driver == scm.DriverGithub } // ProviderType returns the type of the underlying SCM provider func (c *Client) ProviderType() string { return c.client.Driver.String() } // PRRefFmt returns the "refs/(something)/%d/(something)" sprintf format used for constructing PR refs for this provider func (c *Client) PRRefFmt() string { switch c.client.Driver { case scm.DriverStash: return "refs/pull-requests/%d/from" case scm.DriverGitlab: return "refs/merge-requests/%d/head" default: return "refs/pull/%d/head" } } func (c *Client) repositoryName(owner string, repo string) string { return fmt.Sprintf("%s/%s", owner, repo) } func (c *Client) createListOptions() scm.ListOptions { return scm.ListOptions{} } // FileNotFound happens when github cannot find the file requested by GetFile(). type FileNotFound struct { org, repo, path, commit string } // Error formats a file not found error func (e *FileNotFound) Error() string { return fmt.Sprintf("%s/%s/%s @ %s not found", e.org, e.repo, e.path, e.commit) } // NoLabelProviders returns a set of provider names that don't support labels. func NoLabelProviders() sets.String { // "coding" is a placeholder provider name from go-scm that we'll use for testing the comment support for label logic. return sets.NewString("stash", "coding") }
[ "\"GIT_USER\"" ]
[]
[ "GIT_USER" ]
[]
["GIT_USER"]
go
1
0
test/integration/test_containerized_jobs.py
"""Integration tests for running tools in Docker containers.""" import os import unittest from base import integration_util from base.populators import ( DatasetPopulator, ) from galaxy.tools.deps.commands import which from .test_job_environments import RunsEnvironmentJobs SCRIPT_DIRECTORY = os.path.abspath(os.path.dirname(__file__)) DOCKERIZED_JOB_CONFIG_FILE = os.path.join(SCRIPT_DIRECTORY, "dockerized_job_conf.xml") SINGULARITY_JOB_CONFIG_FILE = os.path.join(SCRIPT_DIRECTORY, "singularity_job_conf.xml") EXTENDED_TIMEOUT = 120 class MulledJobTestCases(object): def test_explicit(self): self.dataset_populator.run_tool("mulled_example_explicit", {}, self.history_id) self.dataset_populator.wait_for_history(self.history_id, assert_ok=True) output = self.dataset_populator.get_history_dataset_content(self.history_id, timeout=EXTENDED_TIMEOUT) assert "0.7.15-r1140" in output def test_mulled_simple(self): self.dataset_populator.run_tool("mulled_example_simple", {}, self.history_id) self.dataset_populator.wait_for_history(self.history_id, assert_ok=True) output = self.dataset_populator.get_history_dataset_content(self.history_id, timeout=EXTENDED_TIMEOUT) assert "0.7.15-r1140" in output @integration_util.skip_unless_docker() class DockerizedJobsIntegrationTestCase(integration_util.IntegrationTestCase, RunsEnvironmentJobs, MulledJobTestCases): framework_tool_and_types = True job_config_file = DOCKERIZED_JOB_CONFIG_FILE build_mulled_resolver = 'build_mulled' container_type = 'docker' default_container_home_dir = '/' @classmethod def handle_galaxy_config_kwds(cls, config): cls.jobs_directory = cls._test_driver.mkdtemp() config["jobs_directory"] = cls.jobs_directory config["job_config_file"] = cls.job_config_file # Disable tool dependency resolution. config["tool_dependency_dir"] = "none" config["conda_auto_init"] = False config["conda_auto_install"] = False config["enable_beta_mulled_containers"] = "true" @classmethod def setUpClass(cls): if not which(cls.container_type): raise unittest.SkipTest("Executable '%s' not found on PATH" % cls.container_type) super(DockerizedJobsIntegrationTestCase, cls).setUpClass() def setUp(self): super(DockerizedJobsIntegrationTestCase, self).setUp() self.dataset_populator = DatasetPopulator(self.galaxy_interactor) self.history_id = self.dataset_populator.new_history() def test_container_job_environment(self): job_env = self._run_and_get_environment_properties("job_environment_default") euid = os.geteuid() egid = os.getgid() assert job_env.user_id == str(euid), job_env.user_id assert job_env.group_id == str(egid), job_env.group_id assert job_env.pwd.startswith(self.jobs_directory) assert job_env.pwd.endswith("/working") assert job_env.home.startswith(self.jobs_directory) assert job_env.home.endswith("/home") def test_container_job_environment_legacy(self): job_env = self._run_and_get_environment_properties("job_environment_default_legacy") euid = os.geteuid() egid = os.getgid() assert job_env.user_id == str(euid), job_env.user_id assert job_env.group_id == str(egid), job_env.group_id assert job_env.pwd.startswith(self.jobs_directory) assert job_env.pwd.endswith("/working") # Should we change env_pass_through to just always include TMP and HOME for docker? # I'm not sure, if yes this would change. assert job_env.home == self.default_container_home_dir, job_env.home def test_build_mulled(self): if not which('docker'): raise unittest.SkipTest("Docker not found on PATH, required for building images via involucro") resolver_type = self.build_mulled_resolver tool_id = 'mulled_example_multi_1' endpoint = "tools/%s/dependencies" % tool_id data = {'id': tool_id, 'resolver_type': resolver_type} create_response = self._post(endpoint, data=data, admin=True) self._assert_status_code_is(create_response, 200) response = create_response.json() assert any([True for d in response if d['dependency_type'] == self.container_type]) class SingularityJobsIntegrationTestCase(DockerizedJobsIntegrationTestCase): job_config_file = SINGULARITY_JOB_CONFIG_FILE build_mulled_resolver = 'build_mulled_singularity' container_type = 'singularity' # singularity passes $HOME by default default_container_home_dir = os.environ.get('HOME', '/')
[]
[]
[ "HOME" ]
[]
["HOME"]
python
1
0
device_control_backend/src/app.py
from fastapi import FastAPI, Request from starlette.websockets import WebSocket import traceback import asyncio import psycopg2 from enums import * import smtplib from email.mime.text import MIMEText from backend_lib.websocket_manager import WebsocketManager, getVersion from backend_lib.auth import Auth import os from slack import WebClient app = FastAPI(docs_url=None) auth = Auth() SLACK_BOT_TOKEN = os.getenv("SLACK_BOT_TOKEN") # Database connection db_conn = psycopg2.connect("dbname='db' user='db' host='{}' [redacted-2]".format(os.environ.get("DB_HOST"))) db_conn.autocommit = True cursor = db_conn.cursor() slack = WebClient(os.environ.get('SLACK_BOT_TOKEN')) # TODO make TIMEOUT bigger for after-dev phase TIMEOUT = 30 # Every timeout seconds the client needs to send a ping, to not be kicked! TODO set to 2 minutes or so DEBUG = True def sendEmail(to, text): # TODO fill in password = "[redacted-5]" # BIT msg = MIMEText(text, "html") msg['From'] = "[email protected]" # Something like [email protected] msg['To'] = to try: server = smtplib.SMTP('mailserver', 587) server.connect('mailserver', 587) server.ehlo() server.starttls() server.ehlo() server.login(msg['From'], password) server.sendmail(msg['From'], msg['To'], msg.as_string()) server.quit() if DEBUG: print("Sent email!") except Exception as e: print("Error during email sending process!") print(e) return False return True def lookupEmail(wdcode, deviceid): # User currently not connected! Maybe send him an email? # First check if the device has an option set for the email # If so get the email of the user and send it # Else get the standard email settings from the user and then do above email = None mailpolicy = None cursor.execute( "SELECT value FROM optionals WHERE wdcode=%s AND deviceid='" + str(deviceid) + "' AND key='mailPolicy'", (wdcode,)) res = cursor.fetchall() if len(res) == 1: mailpolicy = int(res[0][0]) cursor.execute( "SELECT email FROM wdcodes WHERE wdcode=%s",(wdcode,)) res = cursor.fetchall() if len(res) == 1: # email = res[0][0] if mailpolicy == None: mailpolicy = res[0][1] elif len(res) != 0: raise Exception("Database Error!") return (email, mailpolicy) @app.exception_handler(Exception) async def slackExceptionHandler(request: Request, exc: Exception): print("Fatal error during exception handling! Kicking the client...") tb = traceback.format_exc() try: slack.chat_postMessage(channel='#stacktraces', text=tb) except Exception as err: print("Could not notify via Slack!") print(err) finally: print(tb) return JSONResponse(status_code=505, content={"message": "Ooops, something went wrong! I already notified the developers!"}) @app.get("/", status_code=200) def status(): return {"status": "ok"} @app.get("/brewCoffee", status_code=418) async def brewCoffee(): # Easter Egg try: slack.chat_postMessage(channel='#stacktraces', text='Someone brewed coffee on a teapot...') except: traceback.print_exc() return {"comment": "I'm a teapot"} ######################################################################################## # Websocket Manager Backend Lib ######################################################################################## async def requestInfo(data, ws_wrapper): wdcode = ws_wrapper.wdcode if ws_wrapper.isUser: # First get all devices cursor.execute("SELECT wdcode,deviceid,devicetype,status FROM devices WHERE wdcode=%s", (wdcode,)) res = cursor.fetchall() d = {} for e in res: wdc = e[0] dei = e[1] det = e[2] sta = e[3] if wdc not in d: d[wdc] = {} d[wdc][dei] = {"type": det} d[wdc][dei]["status"] = sta # Now get all optionals cursor.execute("SELECT wdcode,deviceid,key,value FROM optionals WHERE wdcode=%s", (wdcode,)) l = cursor.fetchall() for e in l: wdc = e[0] dev = e[1] key = e[2] val = e[3] d[wdc][dev][key] = val else: code = id cursor.execute("SELECT deviceid,devicetype,status FROM devices WHERE wdcode=%s", (wdcode,)) res = cursor.fetchall() d = {} for e in res: d[e[0]] = {} d[e[0]]["type"] = e[1] d[e[0]]["status"] = e[2] cursor.execute("SELECT deviceid,key,value FROM optionals WHERE wdcode=%s", (wdcode,)) l = cursor.fetchall() for e in l: dev = e[0] key = e[1] val = e[2] d[dev][key] = val return True, {"data": d} async def registerDevice(data, ws_wrapper): if ws_wrapper.isUser: return False, {"comment": "Users can't register devices manually!"} else: wdcode = ws_wrapper.wdcode deviceid = int(data["deviceid"]) devicetype = int(data["devicetype"]) cursor.execute("SELECT * FROM devices WHERE wdcode=%s AND deviceid=%s",(wdcode,str(deviceid),)) res = cursor.fetchall() if len(res) != 0: return False, {"comment": "Non unique device id!"} cursor.execute("INSERT INTO devices(wdcode,deviceid,devicetype,status) VALUES(%s,%s,%s,%s)", (wdcode, str(deviceid), str(devicetype), str(Status.NORMAL.value))) db_conn.commit() # Notify the user or send an email to him if not await websocket_manager.send({"action": "registerDevice"}, wdcode): email, mailpolicy = lookupEmail(wdcode, deviceid) if email != None and mailpolicy != None and mailpolicy >= MailPolicy.NORMAL.value: # TODO better check sendEmail(email, "<html><body><p>NEW DEVICE REGISTERED!</p></body></html>") return True, None async def updateOption(data, ws_wrapper): if ws_wrapper.isUser: wdcode = ws_wrapper.wdcode deviceid = int(data["deviceid"]) key = data["key"] value = data["value"] # Check if this is a valid option: if not key in set(item.value for item in Option): return False, {"comment": "Invalid Option"} # TODO check if value is valid for given key cursor.execute("SELECT * FROM optionals WHERE wdcode=%s AND deviceid=" + str(deviceid) + " AND key=%s", (wdcode, key,)) res = cursor.fetchall() if len(res) == 1: # Already present cursor.execute( "UPDATE optionals SET value=%s WHERE wdcode=%s AND deviceid=" + str(deviceid) + " AND key=%s", (value, wdcode, key)) db_conn.commit() elif len(res) == 0: cursor.execute( "INSERT INTO optionals(wdcode,deviceid,key,value) VALUES(%s," + str(deviceid) + ",%s,%s)", (wdcode, key, value)) db_conn.commit() else: print("ERROR!") print(res) return False, {"comment": "Error!"} await websocket_manager.send({"action": "updateOption"}, wdcode) return True, None else: wdcode = ws_wrapper.wdcode deviceid = int(data["deviceid"]) key = data["key"] value = data["value"] cursor.execute("SELECT * FROM devices WHERE wdcode=%s AND deviceid=" + str(deviceid),(wdcode,)) res = cursor.fetchall() if len(res) != 1: return False, {"comment": "Device is not registered!"} success = False cursor.execute("SELECT * FROM optionals WHERE wdcode=%s AND deviceid=" + str(deviceid) + " AND key=%s",(wdcode, key,)) res = cursor.fetchall() if len(res) == 1: # Already present cursor.execute( "UPDATE optionals SET value=%s WHERE wdcode=%s AND deviceid=" + str(deviceid) + " AND key=%s", (value, wdcode, key)) db_conn.commit() success = True elif len(res) == 0: # New optional cursor.execute( "INSERT INTO optionals(wdcode,deviceid,key,value) VALUES(%s," + str(deviceid) + ",%s,%s)", (wdcode, key, value)) db_conn.commit() success = True else: print("ERROR!") print(res) return False, None await websocket_manager.send({"action": "updateOption"}, wdcode) return success, None async def updateStatus(data, ws_wrapper): wdcode = ws_wrapper.wdcode if ws_wrapper.isUser: deviceid = int(data["deviceid"]) status = int(data["status"]) if not status in [e.value for e in Status]: return False, {"comment": "Invalid status!"} cursor.execute( "UPDATE devices SET status=" + str(status) + " WHERE wdcode=%s AND deviceid=" + str(deviceid), (wdcode,)) db_conn.commit() updatedrows = cursor.rowcount if updatedrows != 1: return False, None await websocket_manager.send({"action": "updateStatus"}, wdcode) return True, None else: deviceid = int(data["deviceid"]) status = int(data["status"]) if not status in [e.value for e in Status]: return False, {"comment": "Invalid status!"} cursor.execute( "UPDATE devices SET status=" + str(status) + " WHERE wdcode=%s AND deviceid=" + str(deviceid), (wdcode,)) db_conn.commit() updatedrows = cursor.rowcount if updatedrows != 1: return False, None # Notify the user if not await websocket_manager.send({"action": "updateStatus"}, wdcode): email, mailpolicy = lookupEmail(wdcode, deviceid) if email != None and mailpolicy != None and mailpolicy >= MailPolicy.NORMAL.value: # TODO better check sendEmail(email, "<html><body><p>STATUS OF DEVICE WAS UPDATED!</p></body></html>") return True, None async def updateType(data, ws_wrapper): wdcode = ws_wrapper.wdcode if ws_wrapper.isUser: return False, {"comment", "Action not supported for User"} else: deviceid = int(data["deviceid"]) devicetype = int(data["devicetype"]) cursor.execute("UPDATE devices SET devicetype=" + str(devicetype) + " WHERE wdcode=%s AND deviceid=" + str( deviceid), (wdcode,)) db_conn.commit() updatedrows = cursor.rowcount if updatedrows != 1: return False, None # Confirm and notify if not await websocket_manager.send({"action": "updateType"}, wdcode): email, mailpolicy = lookupEmail(wdcode, deviceid,) if email != None and mailpolicy != None and mailpolicy >= MailPolicy.NORMAL.value: # TODO better check sendEmail(email, "<html><body><p>DEVICETYPE WAS CHANGED!</p></body></html>") return True, None websocket_manager = WebsocketManager(debug=True, zmq=True, timeout=30) websocket_manager.register("requestInfo", requestInfo) websocket_manager.register("registerDevice", registerDevice) websocket_manager.register("updateOption", updateOption) websocket_manager.register("updateStatus", updateStatus) websocket_manager.register("updateType", updateType) @app.on_event("startup") async def startup(): # print("startup coroutine called") task = asyncio.get_running_loop().create_task(websocket_manager.init()) # await websocket_manager.init() await task asyncio.get_running_loop().create_task(websocket_manager.work()) print("Working with backend-lib version: "+getVersion()) print("Finished startup co-routine") @app.websocket("/ws") async def websocket_endpoint(websocket: WebSocket): ws_wrapper = await websocket_manager.connect(websocket) # authentication failed if not ws_wrapper: return wdcode = ws_wrapper.wdcode isUser = ws_wrapper.isUser if isUser: # update user database cursor.execute("SELECT * FROM wdcodes WHERE wdcode=%s", (wdcode,)) #Users have an email! email = ws_wrapper.email res = cursor.fetchall() if len(res) == 0: try: cursor.execute("INSERT INTO wdcodes(wdcode,email,mailpolicy) VALUES(%s,%s," + str(MailPolicy.NORMAL.value) + ")", (wdcode,email,)) db_conn.commit() except psycopg2.errors.UniqueViolation: #Entry was made in between -> Ignore! pass if DEBUG: print(wdcode + " connected successfully! ("+("isUser" if isUser else "noUser")+")") while websocket_manager.is_connected(ws_wrapper): success = await websocket_manager.listen(ws_wrapper) if not success: break ######################################################################################## # ########################################################################################
[]
[]
[ "DB_HOST", "SLACK_BOT_TOKEN" ]
[]
["DB_HOST", "SLACK_BOT_TOKEN"]
python
2
0
tests/unit/cli/test_envutils.py
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from six import moves from rally.cli import envutils from rally import exceptions from tests.unit import test class EnvUtilsTestCase(test.TestCase): def test_default_from_global(self): @envutils.default_from_global("test_arg_name", "test_env_name", "test_missing_arg") def test_function(test_arg_name=None): pass with mock.patch("sys.stdout", new_callable=moves.StringIO) as mock_stdout: test_function() self.assertEqual("Missing argument: --test_missing_arg\n", mock_stdout.getvalue()) @mock.patch.dict(os.environ, values={envutils.ENV_DEPLOYMENT: "my_deployment_id"}, clear=True) def test_get_deployment_id_in_env(self): deployment_id = envutils.get_global(envutils.ENV_DEPLOYMENT) self.assertEqual("my_deployment_id", deployment_id) @mock.patch.dict(os.environ, values={}, clear=True) @mock.patch("rally.cli.envutils.fileutils.load_env_file") def test_get_deployment_id_with_exception(self, mock_load_env_file): self.assertRaises(exceptions.InvalidArgumentsException, envutils.get_global, envutils.ENV_DEPLOYMENT, True) mock_load_env_file.assert_called_once_with(os.path.expanduser( "~/.rally/globals")) @mock.patch.dict(os.environ, values={}, clear=True) @mock.patch("rally.cli.envutils.fileutils.load_env_file") def test_get_deployment_id_with_none(self, mock_load_env_file): self.assertIsNone(envutils.get_global(envutils.ENV_DEPLOYMENT)) mock_load_env_file.assert_called_once_with(os.path.expanduser( "~/.rally/globals")) @mock.patch.dict(os.environ, values={envutils.ENV_TASK: "my_task_id"}, clear=True) def test_get_task_id_in_env(self): self.assertEqual("my_task_id", envutils.get_global(envutils.ENV_TASK)) @mock.patch.dict(os.environ, values={}, clear=True) @mock.patch("rally.cli.envutils.fileutils.load_env_file") def test_get_task_id_with_exception(self, mock_load_env_file): self.assertRaises(exceptions.InvalidArgumentsException, envutils.get_global, envutils.ENV_TASK, True) mock_load_env_file.assert_called_once_with(os.path.expanduser( "~/.rally/globals")) @mock.patch.dict(os.environ, values={}, clear=True) @mock.patch("rally.cli.envutils.fileutils.load_env_file") def test_get_task_id_with_none(self, mock_load_env_file): self.assertIsNone(envutils.get_global("RALLY_TASK")) mock_load_env_file.assert_called_once_with(os.path.expanduser( "~/.rally/globals")) @mock.patch.dict(os.environ, values={envutils.ENV_DEPLOYMENT: "test_deployment_id"}, clear=True) @mock.patch("os.path.exists") @mock.patch("rally.cli.envutils.fileutils.update_env_file", return_value=True) def test_clear_global(self, mock_update_env_file, mock_path_exists): envutils.clear_global(envutils.ENV_DEPLOYMENT) mock_update_env_file.assert_called_once_with(os.path.expanduser( "~/.rally/globals"), envutils.ENV_DEPLOYMENT, "\n") self.assertEqual({}, os.environ) @mock.patch.dict(os.environ, values={envutils.ENV_DEPLOYMENT: "test_deployment_id", envutils.ENV_TASK: "test_task_id"}, clear=True) @mock.patch("os.path.exists") @mock.patch("rally.cli.envutils.fileutils.update_env_file", return_value=True) def test_clear_env(self, mock_update_env_file, mock_path_exists): envutils.clear_env() self.assertEqual({}, os.environ) @mock.patch.dict(os.environ, {"OS_AUTH_URL": "fake_auth_url", "OS_USERNAME": "fake_username", "OS_PASSWORD": "fake_password", "OS_TENANT_NAME": "fake_tenant_name", "OS_REGION_NAME": "fake_region_name", "OS_ENDPOINT_TYPE": "fake_endpoint_typeURL", "OS_ENDPOINT": "fake_endpoint", "OS_INSECURE": "True", "OSPROFILER_HMAC_KEY": "fake_hmac_key", "OS_CACERT": "fake_cacert"}) def test_get_creds_from_env_vars_keystone_v2(self): expected_creds = { "auth_url": "fake_auth_url", "admin": { "username": "fake_username", "password": "fake_password", "tenant_name": "fake_tenant_name" }, "endpoint_type": "fake_endpoint_type", "endpoint": "fake_endpoint", "region_name": "fake_region_name", "https_cacert": "fake_cacert", "https_insecure": True, "profiler_hmac_key": "fake_hmac_key" } creds = envutils.get_creds_from_env_vars() self.assertEqual(expected_creds, creds) @mock.patch.dict(os.environ, {"OS_AUTH_URL": "fake_auth_url", "OS_USERNAME": "fake_username", "OS_PASSWORD": "fake_password", "OS_TENANT_NAME": "fake_tenant_name", "OS_REGION_NAME": "fake_region_name", "OS_ENDPOINT_TYPE": "fake_endpoint_typeURL", "OS_ENDPOINT": "fake_endpoint", "OS_INSECURE": "True", "OS_PROJECT_DOMAIN_NAME": "fake_pdn", "OS_USER_DOMAIN_NAME": "fake_udn", "OSPROFILER_HMAC_KEY": "fake_hmac_key", "OS_CACERT": "fake_cacert"}) def test_get_creds_from_env_vars_keystone_v3(self): expected_creds = { "auth_url": "fake_auth_url", "admin": { "username": "fake_username", "password": "fake_password", "user_domain_name": "fake_udn", "project_domain_name": "fake_pdn", "project_name": "fake_tenant_name" }, "endpoint_type": "fake_endpoint_type", "endpoint": "fake_endpoint", "region_name": "fake_region_name", "https_cacert": "fake_cacert", "https_insecure": True, "profiler_hmac_key": "fake_hmac_key" } creds = envutils.get_creds_from_env_vars() self.assertEqual(expected_creds, creds) @mock.patch.dict(os.environ, {"OS_AUTH_URL": "fake_auth_url", "OS_PASSWORD": "fake_password", "OS_REGION_NAME": "fake_region_name", "OS_ENDPOINT": "fake_endpoint", "OS_INSECURE": "True", "OSPROFILER_HMAC_KEY": "fake_hmac_key", "OS_CACERT": "fake_cacert"}) def test_get_creds_from_env_vars_when_required_vars_missing(self): if "OS_USERNAME" in os.environ: del os.environ["OS_USERNAME"] self.assertRaises(exceptions.ValidationError, envutils.get_creds_from_env_vars) @mock.patch.dict(os.environ, {"OS_TENANT_NAME": "fake_tenant_name"}, clear=True) def test_get_project_name_from_env_when_tenant_name(self): project_name = envutils.get_project_name_from_env() self.assertEqual("fake_tenant_name", project_name) @mock.patch.dict(os.environ, {"OS_PROJECT_NAME": "fake_project_name"}, clear=True) def test_get_project_name_from_env_when_project_name(self): project_name = envutils.get_project_name_from_env() self.assertEqual("fake_project_name", project_name) @mock.patch.dict(os.environ, {"OS_TENANT_NAME": "fake_tenant_name", "OS_PROJECT_NAME": "fake_project_name"}) def test_get_project_name_from_env_when_both(self): project_name = envutils.get_project_name_from_env() self.assertEqual("fake_project_name", project_name) @mock.patch.dict(os.environ, values={}, clear=True) def test_get_project_name_from_env_when_neither(self): self.assertRaises(exceptions.ValidationError, envutils.get_project_name_from_env) @mock.patch.dict(os.environ, {"OS_ENDPOINT_TYPE": "fake_endpoint_typeURL"}, clear=True) def test_get_endpoint_type_from_env_when_endpoint_type(self): endpoint_type = envutils.get_endpoint_type_from_env() self.assertEqual("fake_endpoint_type", endpoint_type) @mock.patch.dict(os.environ, {"OS_INTERFACE": "fake_interface"}, clear=True) def test_get_endpoint_type_from_env_when_interface(self): endpoint_type = envutils.get_endpoint_type_from_env() self.assertEqual("fake_interface", endpoint_type) @mock.patch.dict(os.environ, {"OS_ENDPOINT_TYPE": "fake_endpoint_typeURL", "OS_INTERFACE": "fake_interface"}) def test_get_endpoint_type_from_env_when_both(self): endpoint_type = envutils.get_endpoint_type_from_env() self.assertEqual("fake_endpoint_type", endpoint_type) @mock.patch.dict(os.environ, values={}, clear=True) def test_get_endpoint_type_from_env_when_neither(self): endpoint_type = envutils.get_endpoint_type_from_env() self.assertIsNone(endpoint_type)
[]
[]
[ "OS_USERNAME" ]
[]
["OS_USERNAME"]
python
1
0
vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go
package lint import ( "context" "fmt" "os" "runtime/debug" "strings" "github.com/pkg/errors" "github.com/golangci/golangci-lint/internal/errorutil" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/fsutils" "github.com/golangci/golangci-lint/pkg/goutil" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/lint/lintersdb" "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/packages" "github.com/golangci/golangci-lint/pkg/result" "github.com/golangci/golangci-lint/pkg/result/processors" "github.com/golangci/golangci-lint/pkg/timeutils" gopackages "golang.org/x/tools/go/packages" ) type Runner struct { Processors []processors.Processor Log logutils.Log } func NewRunner(cfg *config.Config, log logutils.Log, goenv *goutil.Env, es *lintersdb.EnabledSet, lineCache *fsutils.LineCache, dbManager *lintersdb.Manager, pkgs []*gopackages.Package) (*Runner, error) { skipFilesProcessor, err := processors.NewSkipFiles(cfg.Run.SkipFiles) if err != nil { return nil, err } skipDirs := cfg.Run.SkipDirs if cfg.Run.UseDefaultSkipDirs { skipDirs = append(skipDirs, packages.StdExcludeDirRegexps...) } skipDirsProcessor, err := processors.NewSkipDirs(skipDirs, log.Child("skip dirs"), cfg.Run.Args) if err != nil { return nil, err } enabledLinters, err := es.GetEnabledLintersMap() if err != nil { return nil, errors.Wrap(err, "failed to get enabled linters") } return &Runner{ Processors: []processors.Processor{ processors.NewCgo(goenv), // Must go after Cgo. processors.NewFilenameUnadjuster(pkgs, log.Child("filename_unadjuster")), // Must be before diff, nolint and exclude autogenerated processor at least. processors.NewPathPrettifier(), skipFilesProcessor, skipDirsProcessor, // must be after path prettifier processors.NewAutogeneratedExclude(), // Must be before exclude because users see already marked output and configure excluding by it. processors.NewIdentifierMarker(), getExcludeProcessor(&cfg.Issues), getExcludeRulesProcessor(&cfg.Issues, log, lineCache), processors.NewNolint(log.Child("nolint"), dbManager, enabledLinters), processors.NewUniqByLine(cfg), processors.NewDiff(cfg.Issues.Diff, cfg.Issues.DiffFromRevision, cfg.Issues.DiffPatchFilePath), processors.NewMaxPerFileFromLinter(cfg), processors.NewMaxSameIssues(cfg.Issues.MaxSameIssues, log.Child("max_same_issues"), cfg), processors.NewMaxFromLinter(cfg.Issues.MaxIssuesPerLinter, log.Child("max_from_linter"), cfg), processors.NewSourceCode(lineCache, log.Child("source_code")), processors.NewPathShortener(), getSeverityRulesProcessor(&cfg.Severity, log, lineCache), }, Log: log, }, nil } func (r *Runner) runLinterSafe(ctx context.Context, lintCtx *linter.Context, lc *linter.Config) (ret []result.Issue, err error) { defer func() { if panicData := recover(); panicData != nil { if pe, ok := panicData.(*errorutil.PanicError); ok { // Don't print stacktrace from goroutines twice lintCtx.Log.Warnf("Panic: %s: %s", pe, pe.Stack()) } else { err = fmt.Errorf("panic occurred: %s", panicData) r.Log.Warnf("Panic stack trace: %s", debug.Stack()) } } }() issues, err := lc.Linter.Run(ctx, lintCtx) if lc.DoesChangeTypes { // Packages in lintCtx might be dirty due to the last analysis, // which affects to the next analysis. // To avoid this issue, we clear type information from the packages. // See https://github.com/golangci/golangci-lint/pull/944. // Currently DoesChangeTypes is true only for `unused`. lintCtx.ClearTypesInPackages() } if err != nil { return nil, err } for i := range issues { if issues[i].FromLinter == "" { issues[i].FromLinter = lc.Name() } } return issues, nil } type processorStat struct { inCount int outCount int } func (r Runner) processLintResults(inIssues []result.Issue) []result.Issue { sw := timeutils.NewStopwatch("processing", r.Log) var issuesBefore, issuesAfter int statPerProcessor := map[string]processorStat{} var outIssues []result.Issue if len(inIssues) != 0 { issuesBefore += len(inIssues) outIssues = r.processIssues(inIssues, sw, statPerProcessor) issuesAfter += len(outIssues) } // finalize processors: logging, clearing, no heavy work here for _, p := range r.Processors { p := p sw.TrackStage(p.Name(), func() { p.Finish() }) } if issuesBefore != issuesAfter { r.Log.Infof("Issues before processing: %d, after processing: %d", issuesBefore, issuesAfter) } r.printPerProcessorStat(statPerProcessor) sw.PrintStages() return outIssues } func (r Runner) printPerProcessorStat(stat map[string]processorStat) { parts := make([]string, 0, len(stat)) for name, ps := range stat { if ps.inCount != 0 { parts = append(parts, fmt.Sprintf("%s: %d/%d", name, ps.outCount, ps.inCount)) } } if len(parts) != 0 { r.Log.Infof("Processors filtering stat (out/in): %s", strings.Join(parts, ", ")) } } func (r Runner) Run(ctx context.Context, linters []*linter.Config, lintCtx *linter.Context) ([]result.Issue, error) { sw := timeutils.NewStopwatch("linters", r.Log) defer sw.Print() var issues []result.Issue var runErr error for _, lc := range linters { lc := lc sw.TrackStage(lc.Name(), func() { linterIssues, err := r.runLinterSafe(ctx, lintCtx, lc) if err != nil { r.Log.Warnf("Can't run linter %s: %s", lc.Linter.Name(), err) if os.Getenv("GOLANGCI_COM_RUN") == "" { // Don't stop all linters on one linter failure for golangci.com. runErr = err } return } issues = append(issues, linterIssues...) }) } return r.processLintResults(issues), runErr } func (r *Runner) processIssues(issues []result.Issue, sw *timeutils.Stopwatch, statPerProcessor map[string]processorStat) []result.Issue { for _, p := range r.Processors { var newIssues []result.Issue var err error p := p sw.TrackStage(p.Name(), func() { newIssues, err = p.Process(issues) }) if err != nil { r.Log.Warnf("Can't process result by %s processor: %s", p.Name(), err) } else { stat := statPerProcessor[p.Name()] stat.inCount += len(issues) stat.outCount += len(newIssues) statPerProcessor[p.Name()] = stat issues = newIssues } if issues == nil { issues = []result.Issue{} } } return issues } func getExcludeProcessor(cfg *config.Issues) processors.Processor { excludePatterns := cfg.ExcludePatterns if cfg.UseDefaultExcludes { excludePatterns = append(excludePatterns, config.GetExcludePatternsStrings(cfg.IncludeDefaultExcludes)...) } var excludeTotalPattern string if len(excludePatterns) != 0 { excludeTotalPattern = fmt.Sprintf("(%s)", strings.Join(excludePatterns, "|")) } var excludeProcessor processors.Processor if cfg.ExcludeCaseSensitive { excludeProcessor = processors.NewExcludeCaseSensitive(excludeTotalPattern) } else { excludeProcessor = processors.NewExclude(excludeTotalPattern) } return excludeProcessor } func getExcludeRulesProcessor(cfg *config.Issues, log logutils.Log, lineCache *fsutils.LineCache) processors.Processor { var excludeRules []processors.ExcludeRule for _, r := range cfg.ExcludeRules { excludeRules = append(excludeRules, processors.ExcludeRule{ BaseRule: processors.BaseRule{ Text: r.Text, Source: r.Source, Path: r.Path, Linters: r.Linters, }, }) } var excludeRulesProcessor processors.Processor if cfg.ExcludeCaseSensitive { excludeRulesProcessor = processors.NewExcludeRulesCaseSensitive( excludeRules, lineCache, log.Child("exclude_rules"), ) } else { excludeRulesProcessor = processors.NewExcludeRules( excludeRules, lineCache, log.Child("exclude_rules"), ) } return excludeRulesProcessor } func getSeverityRulesProcessor(cfg *config.Severity, log logutils.Log, lineCache *fsutils.LineCache) processors.Processor { var severityRules []processors.SeverityRule for _, r := range cfg.Rules { severityRules = append(severityRules, processors.SeverityRule{ Severity: r.Severity, BaseRule: processors.BaseRule{ Text: r.Text, Source: r.Source, Path: r.Path, Linters: r.Linters, }, }) } var severityRulesProcessor processors.Processor if cfg.CaseSensitive { severityRulesProcessor = processors.NewSeverityRulesCaseSensitive( cfg.Default, severityRules, lineCache, log.Child("severity_rules"), ) } else { severityRulesProcessor = processors.NewSeverityRules( cfg.Default, severityRules, lineCache, log.Child("severity_rules"), ) } return severityRulesProcessor }
[ "\"GOLANGCI_COM_RUN\"" ]
[]
[ "GOLANGCI_COM_RUN" ]
[]
["GOLANGCI_COM_RUN"]
go
1
0
pkg/config/config.go
// Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-2019 Datadog, Inc. package config import ( "bytes" "fmt" "net/url" "os" "path/filepath" "strings" "time" yaml "gopkg.in/yaml.v2" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/secrets" "github.com/DataDog/datadog-agent/pkg/version" ) // DefaultForwarderRecoveryInterval is the default recovery interval, also used if // the user-provided value is invalid. const DefaultForwarderRecoveryInterval = 2 // DefaultSite is the default site the Agent sends data to. const DefaultSite = "datadoghq.com" const infraURLPrefix = "https://app." var overrideVars = map[string]interface{}{} // Datadog is the global configuration object var ( Datadog Config proxies *Proxy ) // MetadataProviders helps unmarshalling `metadata_providers` config param type MetadataProviders struct { Name string `mapstructure:"name"` Interval time.Duration `mapstructure:"interval"` } // ConfigurationProviders helps unmarshalling `config_providers` config param type ConfigurationProviders struct { Name string `mapstructure:"name"` Polling bool `mapstructure:"polling"` PollInterval string `mapstructure:"poll_interval"` TemplateURL string `mapstructure:"template_url"` TemplateDir string `mapstructure:"template_dir"` Username string `mapstructure:"username"` Password string `mapstructure:"password"` CAFile string `mapstructure:"ca_file"` CAPath string `mapstructure:"ca_path"` CertFile string `mapstructure:"cert_file"` KeyFile string `mapstructure:"key_file"` Token string `mapstructure:"token"` GraceTimeSeconds int `mapstructure:"grace_time_seconds"` } // Listeners helps unmarshalling `listeners` config param type Listeners struct { Name string `mapstructure:"name"` } // Proxy represents the configuration for proxies in the agent type Proxy struct { HTTP string `mapstructure:"http"` HTTPS string `mapstructure:"https"` NoProxy []string `mapstructure:"no_proxy"` } func init() { osinit() // Configure Datadog global configuration Datadog = NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) // Configuration defaults initConfig(Datadog) } // initConfig initializes the config defaults on a config func initConfig(config Config) { // Agent // Don't set a default on 'site' to allow detecting with viper whether it's set in config config.BindEnv("site") config.BindEnv("dd_url") config.BindEnvAndSetDefault("app_key", "") config.SetDefault("proxy", nil) config.BindEnvAndSetDefault("skip_ssl_validation", false) config.BindEnvAndSetDefault("hostname", "") config.BindEnvAndSetDefault("tags", []string{}) config.BindEnvAndSetDefault("tag_value_split_separator", map[string]string{}) config.BindEnvAndSetDefault("conf_path", ".") config.BindEnvAndSetDefault("confd_path", defaultConfdPath) config.BindEnvAndSetDefault("additional_checksd", defaultAdditionalChecksPath) config.BindEnvAndSetDefault("log_payloads", false) config.BindEnvAndSetDefault("log_file", "") config.BindEnvAndSetDefault("log_file_max_size", "10Mb") config.BindEnvAndSetDefault("log_file_max_rolls", 1) config.BindEnvAndSetDefault("log_level", "info") config.BindEnvAndSetDefault("log_to_syslog", false) config.BindEnvAndSetDefault("log_to_console", true) config.BindEnvAndSetDefault("logging_frequency", int64(20)) config.BindEnvAndSetDefault("disable_file_logging", false) config.BindEnvAndSetDefault("syslog_uri", "") config.BindEnvAndSetDefault("syslog_rfc", false) config.BindEnvAndSetDefault("syslog_pem", "") config.BindEnvAndSetDefault("syslog_key", "") config.BindEnvAndSetDefault("syslog_tls_verify", true) config.BindEnvAndSetDefault("cmd_host", "localhost") config.BindEnvAndSetDefault("cmd_port", 5001) config.BindEnvAndSetDefault("cluster_agent.cmd_port", 5005) config.BindEnvAndSetDefault("default_integration_http_timeout", 9) config.BindEnvAndSetDefault("enable_metadata_collection", true) config.BindEnvAndSetDefault("enable_gohai", true) config.BindEnvAndSetDefault("check_runners", int64(4)) config.BindEnvAndSetDefault("auth_token_file_path", "") config.BindEnvAndSetDefault("bind_host", "localhost") config.BindEnvAndSetDefault("health_port", int64(0)) config.BindEnvAndSetDefault("disable_py3_validation", false) // if/when the default is changed to true, make the default platform // dependent; default should remain false on Windows to maintain backward // compatibility with Agent5 behavior/win config.BindEnvAndSetDefault("hostname_fqdn", false) config.BindEnvAndSetDefault("cluster_name", "") // secrets backend config.BindEnv("secret_backend_command") config.BindEnv("secret_backend_arguments") config.BindEnvAndSetDefault("secret_backend_output_max_size", 1024) config.BindEnvAndSetDefault("secret_backend_timeout", 5) // Retry settings config.BindEnvAndSetDefault("forwarder_backoff_factor", 2) config.BindEnvAndSetDefault("forwarder_backoff_base", 2) config.BindEnvAndSetDefault("forwarder_backoff_max", 64) config.BindEnvAndSetDefault("forwarder_recovery_interval", DefaultForwarderRecoveryInterval) config.BindEnvAndSetDefault("forwarder_recovery_reset", false) // Use to output logs in JSON format config.BindEnvAndSetDefault("log_format_json", false) // IPC API server timeout config.BindEnvAndSetDefault("server_timeout", 15) // Use to force client side TLS version to 1.2 config.BindEnvAndSetDefault("force_tls_12", false) // Defaults to safe YAML methods in base and custom checks. config.BindEnvAndSetDefault("disable_unsafe_yaml", true) // Agent GUI access port config.BindEnvAndSetDefault("GUI_port", defaultGuiPort) if IsContainerized() { config.SetDefault("procfs_path", "/host/proc") config.SetDefault("container_proc_root", "/host/proc") config.SetDefault("container_cgroup_root", "/host/sys/fs/cgroup/") } else { config.SetDefault("container_proc_root", "/proc") // for amazon linux the cgroup directory on host is /cgroup/ // we pick memory.stat to make sure it exists and not empty if _, err := os.Stat("/cgroup/memory/memory.stat"); !os.IsNotExist(err) { config.SetDefault("container_cgroup_root", "/cgroup/") } else { config.SetDefault("container_cgroup_root", "/sys/fs/cgroup/") } } config.BindEnv("procfs_path") config.BindEnv("container_proc_root") config.BindEnv("container_cgroup_root") config.BindEnvAndSetDefault("proc_root", "/proc") config.BindEnvAndSetDefault("histogram_aggregates", []string{"max", "median", "avg", "count"}) config.BindEnvAndSetDefault("histogram_percentiles", []string{"0.95"}) // Serializer config.BindEnvAndSetDefault("enable_stream_payload_serialization", false) config.BindEnvAndSetDefault("use_v2_api.series", false) config.BindEnvAndSetDefault("use_v2_api.events", false) config.BindEnvAndSetDefault("use_v2_api.service_checks", false) // Serializer: allow user to blacklist any kind of payload to be sent config.BindEnvAndSetDefault("enable_payloads.events", true) config.BindEnvAndSetDefault("enable_payloads.series", true) config.BindEnvAndSetDefault("enable_payloads.service_checks", true) config.BindEnvAndSetDefault("enable_payloads.sketches", true) config.BindEnvAndSetDefault("enable_payloads.json_to_v1_intake", true) // Forwarder config.BindEnvAndSetDefault("forwarder_timeout", 20) config.BindEnvAndSetDefault("forwarder_retry_queue_max_size", 30) config.BindEnvAndSetDefault("forwarder_num_workers", 1) // Dogstatsd config.BindEnvAndSetDefault("use_dogstatsd", true) config.BindEnvAndSetDefault("dogstatsd_port", 8125) // Notice: 0 means UDP port closed // The following options allow to configure how the dogstatsd intake buffers and queues incoming datagrams. // When a datagram is received it is first added to a datagrams buffer. This buffer fills up until // we reach `dogstatsd_packet_buffer_size` datagrams or after `dogstatsd_packet_buffer_flush_timeout` ms. // After this happens we flush this buffer of datagrams to a queue for processing. The size of this queue // is `dogstatsd_queue_size`. config.BindEnvAndSetDefault("dogstatsd_buffer_size", 1024*8) config.BindEnvAndSetDefault("dogstatsd_packet_buffer_size", 512) config.BindEnvAndSetDefault("dogstatsd_packet_buffer_flush_timeout", 100*time.Millisecond) config.BindEnvAndSetDefault("dogstatsd_queue_size", 100) config.BindEnvAndSetDefault("dogstatsd_non_local_traffic", false) config.BindEnvAndSetDefault("dogstatsd_socket", "") // Notice: empty means feature disabled config.BindEnvAndSetDefault("dogstatsd_stats_port", 5000) config.BindEnvAndSetDefault("dogstatsd_stats_enable", false) config.BindEnvAndSetDefault("dogstatsd_stats_buffer", 10) config.BindEnvAndSetDefault("dogstatsd_expiry_seconds", 300) config.BindEnvAndSetDefault("dogstatsd_origin_detection", false) // Only supported for socket traffic config.BindEnvAndSetDefault("dogstatsd_so_rcvbuf", 0) config.BindEnvAndSetDefault("dogstatsd_tags", []string{}) config.BindEnvAndSetDefault("statsd_forward_host", "") config.BindEnvAndSetDefault("statsd_forward_port", 0) config.BindEnvAndSetDefault("statsd_metric_namespace", "") // Autoconfig config.BindEnvAndSetDefault("autoconf_template_dir", "/datadog/check_configs") config.BindEnvAndSetDefault("exclude_pause_container", true) config.BindEnvAndSetDefault("ac_include", []string{}) config.BindEnvAndSetDefault("ac_exclude", []string{}) config.BindEnvAndSetDefault("ad_config_poll_interval", int64(10)) // in seconds config.BindEnvAndSetDefault("extra_listeners", []string{}) config.BindEnvAndSetDefault("extra_config_providers", []string{}) // Docker config.BindEnvAndSetDefault("docker_query_timeout", int64(5)) config.BindEnvAndSetDefault("docker_labels_as_tags", map[string]string{}) config.BindEnvAndSetDefault("docker_env_as_tags", map[string]string{}) config.BindEnvAndSetDefault("kubernetes_pod_labels_as_tags", map[string]string{}) config.BindEnvAndSetDefault("kubernetes_pod_annotations_as_tags", map[string]string{}) config.BindEnvAndSetDefault("kubernetes_node_labels_as_tags", map[string]string{}) config.BindEnvAndSetDefault("container_cgroup_prefix", "") // CRI config.BindEnvAndSetDefault("cri_socket_path", "") // empty is disabled config.BindEnvAndSetDefault("cri_connection_timeout", int64(1)) // in seconds config.BindEnvAndSetDefault("cri_query_timeout", int64(5)) // in seconds // Containerd // We only support containerd in Kubernetes. By default containerd cri uses `k8s.io` https://github.com/containerd/cri/blob/release/1.2/pkg/constants/constants.go#L22-L23 config.BindEnvAndSetDefault("containerd_namespace", "k8s.io") // Kubernetes config.BindEnvAndSetDefault("kubernetes_kubelet_host", "") config.BindEnvAndSetDefault("kubernetes_http_kubelet_port", 10255) config.BindEnvAndSetDefault("kubernetes_https_kubelet_port", 10250) config.BindEnvAndSetDefault("kubelet_tls_verify", true) config.BindEnvAndSetDefault("collect_kubernetes_events", false) config.BindEnvAndSetDefault("kubelet_client_ca", "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt") config.BindEnvAndSetDefault("kubelet_auth_token_path", "") config.BindEnvAndSetDefault("kubelet_client_crt", "") config.BindEnvAndSetDefault("kubelet_client_key", "") config.BindEnvAndSetDefault("kubelet_wait_on_missing_container", 0) config.BindEnvAndSetDefault("kubelet_cache_pods_duration", 5) // Polling frequency in seconds of the agent to the kubelet "/pods" endpoint config.BindEnvAndSetDefault("kubernetes_collect_metadata_tags", true) config.BindEnvAndSetDefault("kubernetes_metadata_tag_update_freq", 60) // Polling frequency of the Agent to the DCA in seconds (gets the local cache if the DCA is disabled) config.BindEnvAndSetDefault("kubernetes_apiserver_client_timeout", 10) config.BindEnvAndSetDefault("kubernetes_map_services_on_ip", false) // temporary opt-out of the new mapping logic config.BindEnvAndSetDefault("kubernetes_apiserver_use_protobuf", false) // Kube ApiServer config.BindEnvAndSetDefault("kubernetes_kubeconfig_path", "") config.BindEnvAndSetDefault("leader_lease_duration", "60") config.BindEnvAndSetDefault("leader_election", false) config.BindEnvAndSetDefault("kube_resources_namespace", "") // Datadog cluster agent config.BindEnvAndSetDefault("cluster_agent.enabled", false) config.BindEnvAndSetDefault("cluster_agent.auth_token", "") config.BindEnvAndSetDefault("cluster_agent.url", "") config.BindEnvAndSetDefault("cluster_agent.kubernetes_service_name", "datadog-cluster-agent") config.BindEnvAndSetDefault("metrics_port", "5000") // Metadata endpoints // Defines the maximum size of hostame gathered from EC2, GCE, Azure and Alibabacloud metadata endpoints. // Used internally to protect against configurations where metadata endpoints return incorrect values with 200 status codes. config.BindEnvAndSetDefault("metadata_endpoints_max_hostname_size", 255) // ECS config.BindEnvAndSetDefault("ecs_agent_url", "") // Will be autodetected config.BindEnvAndSetDefault("ecs_agent_container_name", "ecs-agent") config.BindEnvAndSetDefault("collect_ec2_tags", false) // GCE config.BindEnvAndSetDefault("collect_gce_tags", true) // Cloud Foundry config.BindEnvAndSetDefault("cloud_foundry", false) config.BindEnvAndSetDefault("bosh_id", "") // JMXFetch config.BindEnvAndSetDefault("jmx_custom_jars", []string{}) config.BindEnvAndSetDefault("jmx_use_cgroup_memory_limit", false) config.BindEnvAndSetDefault("jmx_max_restarts", int64(3)) config.BindEnvAndSetDefault("jmx_restart_interval", int64(5)) config.BindEnvAndSetDefault("jmx_thread_pool_size", 3) config.BindEnvAndSetDefault("jmx_reconnection_thread_pool_size", 3) config.BindEnvAndSetDefault("jmx_collection_timeout", 60) config.BindEnvAndSetDefault("jmx_reconnection_timeout", 10) // Go_expvar server port config.BindEnvAndSetDefault("expvar_port", "5000") // Trace agent config.BindEnvAndSetDefault("apm_config.enabled", true) // Process agent config.BindEnv("process_config.process_dd_url", "") // Logs Agent // External Use: modify those parameters to configure the logs-agent. // enable the logs-agent: config.BindEnvAndSetDefault("logs_enabled", false) config.BindEnvAndSetDefault("log_enabled", false) // deprecated, use logs_enabled instead // collect all logs from all containers: config.BindEnvAndSetDefault("logs_config.container_collect_all", false) // add a socks5 proxy: config.BindEnvAndSetDefault("logs_config.socks5_proxy_address", "") // send the logs to a proxy: config.BindEnv("logs_config.logs_dd_url") // must respect format '<HOST>:<PORT>' and '<PORT>' to be an integer config.BindEnvAndSetDefault("logs_config.logs_no_ssl", false) // send the logs to the port 443 of the logs-backend via TCP: config.BindEnvAndSetDefault("logs_config.use_port_443", false) // increase the read buffer size of the UDP sockets: config.BindEnvAndSetDefault("logs_config.frame_size", 9000) // increase the number of files that can be tailed in parallel: config.BindEnvAndSetDefault("logs_config.open_files_limit", 100) // add global processing rules that are applied on all logs config.BindEnv("logs_config.processing_rules") // Internal Use Only: avoid modifying those configuration parameters, this could lead to unexpected results. config.BindEnvAndSetDefault("logset", "") config.BindEnvAndSetDefault("logs_config.run_path", defaultRunPath) config.BindEnv("logs_config.dd_url") config.BindEnvAndSetDefault("logs_config.dd_port", 10516) config.BindEnvAndSetDefault("logs_config.dev_mode_use_proto", true) config.BindEnvAndSetDefault("logs_config.dd_url_443", "agent-443-intake.logs.datadoghq.com") config.BindEnvAndSetDefault("logs_config.stop_grace_period", 30) // The cardinality of tags to send for checks and dogstatsd respectively. // Choices are: low, orchestrator, high. // WARNING: sending orchestrator, or high tags for dogstatsd metrics may create more metrics // (one per container instead of one per host). // Changing this setting may impact your custom metrics billing. config.BindEnvAndSetDefault("checks_tag_cardinality", "low") config.BindEnvAndSetDefault("dogstatsd_tag_cardinality", "low") config.BindEnvAndSetDefault("histogram_copy_to_distribution", false) config.BindEnvAndSetDefault("histogram_copy_to_distribution_prefix", "") config.BindEnv("api_key") config.BindEnvAndSetDefault("hpa_watcher_polling_freq", 10) config.BindEnvAndSetDefault("hpa_watcher_gc_period", 60*5) // 5 minutes config.BindEnvAndSetDefault("external_metrics_provider.enabled", false) config.BindEnvAndSetDefault("external_metrics_provider.port", 443) config.BindEnvAndSetDefault("hpa_configmap_name", "datadog-custom-metrics") config.BindEnvAndSetDefault("external_metrics_provider.refresh_period", 30) // value in seconds. Frequency of batch calls to the ConfigMap persistent store (GlobalStore) by the Leader. config.BindEnvAndSetDefault("external_metrics_provider.batch_window", 10) // value in seconds. Batch the events from the Autoscalers informer to push updates to the ConfigMap (GlobalStore) config.BindEnvAndSetDefault("external_metrics_provider.max_age", 120) // value in seconds. 4 cycles from the HPA controller (up to Kubernetes 1.11) is enough to consider a metric stale config.BindEnvAndSetDefault("external_metrics.aggregator", "avg") // aggregator used for the external metrics. Choose from [avg,sum,max,min] config.BindEnvAndSetDefault("external_metrics_provider.bucket_size", 60*5) // Window to query to get the metric from Datadog. config.BindEnvAndSetDefault("external_metrics_provider.rollup", 30) // Bucket size to circumvent time aggregation side effects. config.BindEnvAndSetDefault("kubernetes_informers_resync_period", 60*5) // value in seconds. Default to 5 minutes config.BindEnvAndSetDefault("kubernetes_informers_restclient_timeout", 60) // value in seconds config.BindEnvAndSetDefault("external_metrics_provider.local_copy_refresh_rate", 30) // value in seconds // Cluster check Autodiscovery config.BindEnvAndSetDefault("cluster_checks.enabled", false) config.BindEnvAndSetDefault("cluster_checks.node_expiration_timeout", 30) // value in seconds config.BindEnvAndSetDefault("cluster_checks.warmup_duration", 30) // value in seconds config.BindEnvAndSetDefault("cluster_checks.cluster_tag_name", "cluster_name") config.BindEnvAndSetDefault("cluster_checks.extra_tags", []string{}) setAssetFs(config) } var ( ddURLs = map[string]interface{}{ "app.datadoghq.com": nil, "app.datadoghq.eu": nil, "app.datad0g.com": nil, "app.datad0g.eu": nil, } ) // GetProxies returns the proxy settings from the configuration func GetProxies() *Proxy { return proxies } // loadProxyFromEnv overrides the proxy settings with environment variables func loadProxyFromEnv(config Config) { // Viper doesn't handle mixing nested variables from files and set // manually. If we manually set one of the sub value for "proxy" all // other values from the conf file will be shadowed when using // 'config.Get("proxy")'. For that reason we first get the value from // the conf files, overwrite them with the env variables and reset // everything. lookupEnvCaseInsensitive := func(key string) (string, bool) { value, found := os.LookupEnv(key) if !found { value, found = os.LookupEnv(strings.ToLower(key)) } if found { log.Infof("Found '%v' env var, using it for the Agent proxy settings", key) } return value, found } lookupEnv := func(key string) (string, bool) { value, found := os.LookupEnv(key) if found { log.Infof("Found '%v' env var, using it for the Agent proxy settings", key) } return value, found } var isSet bool p := &Proxy{} if isSet = config.IsSet("proxy"); isSet { if err := config.UnmarshalKey("proxy", p); err != nil { isSet = false log.Errorf("Could not load proxy setting from the configuration (ignoring): %s", err) } } if HTTP, found := lookupEnv("DD_PROXY_HTTP"); found { isSet = true p.HTTP = HTTP } else if HTTP, found := lookupEnvCaseInsensitive("HTTP_PROXY"); found { isSet = true p.HTTP = HTTP } if HTTPS, found := lookupEnv("DD_PROXY_HTTPS"); found { isSet = true p.HTTPS = HTTPS } else if HTTPS, found := lookupEnvCaseInsensitive("HTTPS_PROXY"); found { isSet = true p.HTTPS = HTTPS } if noProxy, found := lookupEnv("DD_PROXY_NO_PROXY"); found { isSet = true p.NoProxy = strings.Split(noProxy, " ") // space-separated list, consistent with viper } else if noProxy, found := lookupEnvCaseInsensitive("NO_PROXY"); found { isSet = true p.NoProxy = strings.Split(noProxy, ",") // comma-separated list, consistent with other tools that use the NO_PROXY env var } // We have to set each value individually so both config.Get("proxy") // and config.Get("proxy.http") work if isSet { config.Set("proxy.http", p.HTTP) config.Set("proxy.https", p.HTTPS) config.Set("proxy.no_proxy", p.NoProxy) proxies = p } } // Load reads configs files and initializes the config module func Load() error { return load(Datadog, "datadog.yaml") } func load(config Config, origin string) error { log.Infof("config.Load()") if err := config.ReadInConfig(); err != nil { log.Warnf("config.load() error %v", err) return err } log.Infof("config.load succeeded") // We have to init the secrets package before we can use it to decrypt // anything. secrets.Init( config.GetString("secret_backend_command"), config.GetStringSlice("secret_backend_arguments"), config.GetInt("secret_backend_timeout"), config.GetInt("secret_backend_output_max_size"), ) if config.IsSet("secret_backend_command") { // Viper doesn't expose the final location of the file it // loads. Since we are searching for 'datadog.yaml' in multiple // locations we let viper determine the one to use before // updating it. yamlConf, err := yaml.Marshal(config.AllSettings()) if err != nil { return fmt.Errorf("unable to marshal configuration to YAML to decrypt secrets: %v", err) } finalYamlConf, err := secrets.Decrypt(yamlConf, origin) if err != nil { return fmt.Errorf("unable to decrypt secret from datadog.yaml: %v", err) } r := bytes.NewReader(finalYamlConf) if err = config.MergeConfigOverride(r); err != nil { return fmt.Errorf("could not update main configuration after decrypting secrets: %v", err) } } loadProxyFromEnv(config) sanitizeAPIKey(config) applyOverrides(config) return nil } // Avoid log ingestion breaking because of a newline in the API key func sanitizeAPIKey(config Config) { config.Set("api_key", strings.TrimSpace(config.GetString("api_key"))) } // GetMainInfraEndpoint returns the main DD Infra URL defined in the config, based on the value of `site` and `dd_url` func GetMainInfraEndpoint() string { return getMainInfraEndpointWithConfig(Datadog) } // GetMainEndpoint returns the main DD URL defined in the config, based on `site` and the prefix, or ddURLKey func GetMainEndpoint(prefix string, ddURLKey string) string { return GetMainEndpointWithConfig(Datadog, prefix, ddURLKey) } // GetMultipleEndpoints returns the api keys per domain specified in the main agent config func GetMultipleEndpoints() (map[string][]string, error) { return getMultipleEndpointsWithConfig(Datadog) } // getDomainPrefix provides the right prefix for agent X.Y.Z func getDomainPrefix(app string) string { v, _ := version.New(version.AgentVersion, version.Commit) return fmt.Sprintf("%d-%d-%d-%s.agent", v.Major, v.Minor, v.Patch, app) } // AddAgentVersionToDomain prefixes the domain with the agent version: X-Y-Z.domain func AddAgentVersionToDomain(DDURL string, app string) (string, error) { u, err := url.Parse(DDURL) if err != nil { return "", err } // we don't udpdate unknown URL (ie: proxy or custom StatsD server) if _, found := ddURLs[u.Host]; !found { return DDURL, nil } subdomain := strings.Split(u.Host, ".")[0] newSubdomain := getDomainPrefix(app) u.Host = strings.Replace(u.Host, subdomain, newSubdomain, 1) return u.String(), nil } func getMainInfraEndpointWithConfig(config Config) string { return GetMainEndpointWithConfig(config, infraURLPrefix, "dd_url") } // GetMainEndpointWithConfig implements the logic to extract the DD URL from a config, based on `site` and ddURLKey func GetMainEndpointWithConfig(config Config, prefix string, ddURLKey string) (resolvedDDURL string) { if config.IsSet(ddURLKey) && config.GetString(ddURLKey) != "" { // value under ddURLKey takes precedence over 'site' resolvedDDURL = config.GetString(ddURLKey) if config.IsSet("site") { log.Infof("'site' and '%s' are both set in config: setting main endpoint to '%s': \"%s\"", ddURLKey, ddURLKey, config.GetString(ddURLKey)) } } else if config.GetString("site") != "" { resolvedDDURL = prefix + strings.TrimSpace(config.GetString("site")) } else { resolvedDDURL = prefix + DefaultSite } return } // getMultipleEndpointsWithConfig implements the logic to extract the api keys per domain from an agent config func getMultipleEndpointsWithConfig(config Config) (map[string][]string, error) { // Validating domain ddURL := getMainInfraEndpointWithConfig(config) _, err := url.Parse(ddURL) if err != nil { return nil, fmt.Errorf("could not parse main endpoint: %s", err) } keysPerDomain := map[string][]string{ ddURL: { config.GetString("api_key"), }, } var additionalEndpoints map[string][]string err = config.UnmarshalKey("additional_endpoints", &additionalEndpoints) if err != nil { return keysPerDomain, err } // merge additional endpoints into keysPerDomain for domain, apiKeys := range additionalEndpoints { // Validating domain _, err := url.Parse(domain) if err != nil { return nil, fmt.Errorf("could not parse url from 'additional_endpoints' %s: %s", domain, err) } if _, ok := keysPerDomain[domain]; ok { for _, apiKey := range apiKeys { keysPerDomain[domain] = append(keysPerDomain[domain], apiKey) } } else { keysPerDomain[domain] = apiKeys } } // dedupe api keys and remove domains with no api keys (or empty ones) for domain, apiKeys := range keysPerDomain { dedupedAPIKeys := make([]string, 0, len(apiKeys)) seen := make(map[string]bool) for _, apiKey := range apiKeys { trimmedAPIKey := strings.TrimSpace(apiKey) if _, ok := seen[trimmedAPIKey]; !ok && trimmedAPIKey != "" { seen[trimmedAPIKey] = true dedupedAPIKeys = append(dedupedAPIKeys, trimmedAPIKey) } } if len(dedupedAPIKeys) > 0 { keysPerDomain[domain] = dedupedAPIKeys } else { log.Infof("No API key provided for domain \"%s\", removing domain from endpoints", domain) delete(keysPerDomain, domain) } } return keysPerDomain, nil } // IsContainerized returns whether the Agent is running on a Docker container func IsContainerized() bool { return os.Getenv("DOCKER_DD_AGENT") != "" } // FileUsedDir returns the absolute path to the folder containing the config // file used to populate the registry func FileUsedDir() string { return filepath.Dir(Datadog.ConfigFileUsed()) } // IsKubernetes returns whether the Agent is running on a kubernetes cluster func IsKubernetes() bool { // Injected by Kubernetes itself if os.Getenv("KUBERNETES_SERVICE_PORT") != "" { return true } // support of Datadog environment variable for Kubernetes if os.Getenv("KUBERNETES") != "" { return true } return false } // SetOverrides provides an externally accessible method for // overriding config variables. // This method must be called before Load() to be effective. func SetOverrides(vars map[string]interface{}) { overrideVars = vars } // applyOverrides overrides config variables. func applyOverrides(config Config) { for k, v := range overrideVars { config.Set(k, v) } }
[ "\"DOCKER_DD_AGENT\"", "\"KUBERNETES_SERVICE_PORT\"", "\"KUBERNETES\"" ]
[]
[ "DOCKER_DD_AGENT", "KUBERNETES", "KUBERNETES_SERVICE_PORT" ]
[]
["DOCKER_DD_AGENT", "KUBERNETES", "KUBERNETES_SERVICE_PORT"]
go
3
0
atriaapp/manage.py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "atriaapp.settings") try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
allennlp/common/file_utils.py
""" Utilities for working with the local dataset cache. """ import weakref from contextlib import contextmanager import glob import io import os import logging import tempfile import json from abc import ABC from collections import defaultdict from dataclasses import dataclass, asdict from datetime import timedelta from fnmatch import fnmatch from os import PathLike from urllib.parse import urlparse from pathlib import Path from typing import ( Optional, Tuple, Union, IO, Callable, Set, List, Iterator, Iterable, Dict, NamedTuple, MutableMapping, ) from hashlib import sha256 from functools import wraps from weakref import WeakValueDictionary from zipfile import ZipFile, is_zipfile import tarfile import shutil import pickle import time import warnings import boto3 import botocore import torch from filelock import FileLock as _FileLock from google.cloud import storage from google.api_core.exceptions import NotFound import numpy as np from overrides import overrides import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry import lmdb from torch import Tensor import huggingface_hub as hf_hub from allennlp.version import VERSION from allennlp.common.tqdm import Tqdm logger = logging.getLogger(__name__) CACHE_ROOT = Path(os.getenv("ALLENNLP_CACHE_ROOT", Path.home() / ".allennlp")) CACHE_DIRECTORY = str(CACHE_ROOT / "cache") DEPRECATED_CACHE_DIRECTORY = str(CACHE_ROOT / "datasets") # This variable was deprecated in 0.7.2 since we use a single folder for caching # all types of files (datasets, models, etc.) DATASET_CACHE = CACHE_DIRECTORY # Warn if the user is still using the deprecated cache directory. if os.path.exists(DEPRECATED_CACHE_DIRECTORY): logger.warning( f"Deprecated cache directory found ({DEPRECATED_CACHE_DIRECTORY}). " f"Please remove this directory from your system to free up space." ) class FileLock(_FileLock): """ This is just a subclass of the `FileLock` class from the `filelock` library, except that it adds an additional argument to the `__init__` method: `read_only_ok`. By default this flag is `False`, which an exception will be thrown when a lock can't be acquired due to lack of write permissions. But if this flag is set to `True`, a warning will be emitted instead of an error when the lock already exists but the lock can't be acquired because write access is blocked. """ def __init__( self, lock_file: Union[str, PathLike], timeout=-1, read_only_ok: bool = False ) -> None: super().__init__(str(lock_file), timeout=timeout) self._read_only_ok = read_only_ok @overrides def acquire(self, timeout=None, poll_interval=0.05): try: super().acquire(timeout=timeout, poll_intervall=poll_interval) except OSError as err: # OSError could be a lot of different things, but what we're looking # for in particular are permission errors, such as: # - errno 1 - EPERM - "Operation not permitted" # - errno 13 - EACCES - "Permission denied" # - errno 30 - EROFS - "Read-only file system" if err.errno not in (1, 13, 30): raise if os.path.isfile(self._lock_file) and self._read_only_ok: warnings.warn( f"Lacking permissions required to obtain lock '{self._lock_file}'. " "Race conditions are possible if other processes are writing to the same resource.", UserWarning, ) else: raise def _resource_to_filename(resource: str, etag: str = None) -> str: """ Convert a `resource` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the resources's, delimited by a period. """ resource_bytes = resource.encode("utf-8") resource_hash = sha256(resource_bytes) filename = resource_hash.hexdigest() if etag: etag_bytes = etag.encode("utf-8") etag_hash = sha256(etag_bytes) filename += "." + etag_hash.hexdigest() return filename def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]: """ Return the url and etag (which may be `None`) stored for `filename`. Raise `FileNotFoundError` if `filename` or its stored metadata do not exist. """ if cache_dir is None: cache_dir = CACHE_DIRECTORY cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): raise FileNotFoundError("file {} not found".format(cache_path)) meta_path = cache_path + ".json" if not os.path.exists(meta_path): raise FileNotFoundError("file {} not found".format(meta_path)) with open(meta_path) as meta_file: metadata = json.load(meta_file) url = metadata["url"] etag = metadata["etag"] return url, etag def check_tarfile(tar_file: tarfile.TarFile): """Tar files can contain files outside of the extraction directory, or symlinks that point outside the extraction directory. We also don't want any block devices fifos, or other weird file types extracted. This checks for those issues and throws an exception if there is a problem.""" base_path = os.path.join("tmp", "pathtest") base_path = os.path.normpath(base_path) def normalize_path(path: str) -> str: path = path.rstrip("/") path = path.replace("/", os.sep) path = os.path.join(base_path, path) path = os.path.normpath(path) return path for tarinfo in tar_file: if not ( tarinfo.isreg() or tarinfo.isdir() or tarinfo.isfile() or tarinfo.islnk() or tarinfo.issym() ): raise ValueError( f"Tar file {str(tar_file.name)} contains invalid member {tarinfo.name}." ) target_path = normalize_path(tarinfo.name) if os.path.commonprefix([base_path, target_path]) != base_path: raise ValueError( f"Tar file {str(tar_file.name)} is trying to create a file outside of its extraction directory." ) if tarinfo.islnk() or tarinfo.issym(): target_path = normalize_path(tarinfo.linkname) if os.path.commonprefix([base_path, target_path]) != base_path: raise ValueError( f"Tar file {str(tar_file.name)} is trying to link to a file " "outside of its extraction directory." ) def cached_path( url_or_filename: Union[str, PathLike], cache_dir: Union[str, Path] = None, extract_archive: bool = False, force_extract: bool = False, ) -> str: """ Given something that might be a URL or local path, determine which. If it's a remote resource, download the file and cache it, and then return the path to the cached file. If it's already a local path, make sure the file exists and return the path. For URLs, "http://", "https://", "s3://", "gs://", and "hf://" are all supported. The latter corresponds to the HuggingFace Hub. For example, to download the PyTorch weights for the model `epwalsh/bert-xsmall-dummy` on HuggingFace, you could do: ```python cached_path("hf://epwalsh/bert-xsmall-dummy/pytorch_model.bin") ``` For paths or URLs that point to a tarfile or zipfile, you can also add a path to a specific file to the `url_or_filename` preceeded by a "!", and the archive will be automatically extracted (provided you set `extract_archive` to `True`), returning the local path to the specific file. For example: ```python cached_path("model.tar.gz!weights.th", extract_archive=True) ``` # Parameters url_or_filename : `Union[str, Path]` A URL or path to parse and possibly download. cache_dir : `Union[str, Path]`, optional (default = `None`) The directory to cache downloads. extract_archive : `bool`, optional (default = `False`) If `True`, then zip or tar.gz archives will be automatically extracted. In which case the directory is returned. force_extract : `bool`, optional (default = `False`) If `True` and the file is an archive file, it will be extracted regardless of whether or not the extracted directory already exists. !!! Warning Use this flag with caution! This can lead to race conditions if used from multiple processes on the same file. """ if cache_dir is None: cache_dir = CACHE_DIRECTORY cache_dir = os.path.expanduser(cache_dir) os.makedirs(cache_dir, exist_ok=True) if not isinstance(url_or_filename, str): url_or_filename = str(url_or_filename) file_path: str extraction_path: Optional[str] = None # If we're using the /a/b/foo.zip!c/d/file.txt syntax, handle it here. exclamation_index = url_or_filename.find("!") if extract_archive and exclamation_index >= 0: archive_path = url_or_filename[:exclamation_index] file_name = url_or_filename[exclamation_index + 1 :] # Call 'cached_path' recursively now to get the local path to the archive itself. cached_archive_path = cached_path(archive_path, cache_dir, True, force_extract) if not os.path.isdir(cached_archive_path): raise ValueError( f"{url_or_filename} uses the ! syntax, but does not specify an archive file." ) # Now return the full path to the desired file within the extracted archive, # provided it exists. file_path = os.path.join(cached_archive_path, file_name) if not os.path.exists(file_path): raise FileNotFoundError(f"file {file_name} not found within {archive_path}") return file_path parsed = urlparse(url_or_filename) if parsed.scheme in ("http", "https", "s3", "hf", "gs"): # URL, so get it from the cache (downloading if necessary) file_path = get_from_cache(url_or_filename, cache_dir) if extract_archive and (is_zipfile(file_path) or tarfile.is_tarfile(file_path)): # This is the path the file should be extracted to. # For example ~/.allennlp/cache/234234.21341 -> ~/.allennlp/cache/234234.21341-extracted extraction_path = file_path + "-extracted" else: url_or_filename = os.path.expanduser(url_or_filename) if os.path.exists(url_or_filename): # File, and it exists. file_path = url_or_filename # Normalize the path. url_or_filename = os.path.abspath(url_or_filename) if ( extract_archive and os.path.isfile(file_path) and (is_zipfile(file_path) or tarfile.is_tarfile(file_path)) ): # We'll use a unique directory within the cache to root to extract the archive to. # The name of the directory is a hash of the resource file path and it's modification # time. That way, if the file changes, we'll know when to extract it again. extraction_name = ( _resource_to_filename(url_or_filename, str(os.path.getmtime(file_path))) + "-extracted" ) extraction_path = os.path.join(cache_dir, extraction_name) elif parsed.scheme == "": # File, but it doesn't exist. raise FileNotFoundError(f"file {url_or_filename} not found") else: # Something unknown raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path") if extraction_path is not None: # If the extracted directory already exists (and is non-empty), then no # need to create a lock file and extract again unless `force_extract=True`. if os.path.isdir(extraction_path) and os.listdir(extraction_path) and not force_extract: return extraction_path # Extract it. with FileLock(extraction_path + ".lock"): # Check again if the directory exists now that we've acquired the lock. if os.path.isdir(extraction_path) and os.listdir(extraction_path): if force_extract: logger.warning( "Extraction directory for %s (%s) already exists, " "overwriting it since 'force_extract' is 'True'", url_or_filename, extraction_path, ) else: return extraction_path logger.info("Extracting %s to %s", url_or_filename, extraction_path) shutil.rmtree(extraction_path, ignore_errors=True) # We extract first to a temporary directory in case something goes wrong # during the extraction process so we don't end up with a corrupted cache. tmp_extraction_dir = tempfile.mkdtemp(dir=os.path.split(extraction_path)[0]) try: if is_zipfile(file_path): with ZipFile(file_path, "r") as zip_file: zip_file.extractall(tmp_extraction_dir) zip_file.close() else: tar_file = tarfile.open(file_path) check_tarfile(tar_file) tar_file.extractall(tmp_extraction_dir) tar_file.close() # Extraction was successful, rename temp directory to final # cache directory and dump the meta data. os.replace(tmp_extraction_dir, extraction_path) meta = _Meta( resource=url_or_filename, cached_path=extraction_path, creation_time=time.time(), extraction_dir=True, size=_get_resource_size(extraction_path), ) meta.to_file() finally: shutil.rmtree(tmp_extraction_dir, ignore_errors=True) return extraction_path return file_path def is_url_or_existing_file(url_or_filename: Union[str, Path, None]) -> bool: """ Given something that might be a URL (or might be a local path), determine check if it's url or an existing file path. """ if url_or_filename is None: return False url_or_filename = os.path.expanduser(str(url_or_filename)) parsed = urlparse(url_or_filename) return parsed.scheme in ("http", "https", "s3", "gs") or os.path.exists(url_or_filename) def _split_s3_path(url: str) -> Tuple[str, str]: return _split_cloud_path(url, "s3") def _split_gcs_path(url: str) -> Tuple[str, str]: return _split_cloud_path(url, "gs") def _split_cloud_path(url: str, provider: str) -> Tuple[str, str]: """Split a full s3 path into the bucket name and path.""" parsed = urlparse(url) if not parsed.netloc or not parsed.path: raise ValueError("bad {} path {}".format(provider, url)) bucket_name = parsed.netloc provider_path = parsed.path # Remove '/' at beginning of path. if provider_path.startswith("/"): provider_path = provider_path[1:] return bucket_name, provider_path def _s3_request(func: Callable): """ Wrapper function for s3 requests in order to create more helpful error messages. """ @wraps(func) def wrapper(url: str, *args, **kwargs): try: return func(url, *args, **kwargs) except botocore.exceptions.ClientError as exc: if int(exc.response["Error"]["Code"]) == 404: raise FileNotFoundError("file {} not found".format(url)) else: raise return wrapper def _get_s3_resource(): session = boto3.session.Session() if session.get_credentials() is None: # Use unsigned requests. s3_resource = session.resource( "s3", config=botocore.client.Config(signature_version=botocore.UNSIGNED) ) else: s3_resource = session.resource("s3") return s3_resource @_s3_request def _s3_etag(url: str) -> Optional[str]: """Check ETag on S3 object.""" s3_resource = _get_s3_resource() bucket_name, s3_path = _split_s3_path(url) s3_object = s3_resource.Object(bucket_name, s3_path) return s3_object.e_tag @_s3_request def _s3_get(url: str, temp_file: IO) -> None: """Pull a file directly from S3.""" s3_resource = _get_s3_resource() bucket_name, s3_path = _split_s3_path(url) s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) def _gcs_request(func: Callable): """ Wrapper function for gcs requests in order to create more helpful error messages. """ @wraps(func) def wrapper(url: str, *args, **kwargs): try: return func(url, *args, **kwargs) except NotFound: raise FileNotFoundError("file {} not found".format(url)) return wrapper def _get_gcs_client(): storage_client = storage.Client() return storage_client def _get_gcs_blob(url: str) -> storage.blob.Blob: gcs_resource = _get_gcs_client() bucket_name, gcs_path = _split_gcs_path(url) bucket = gcs_resource.bucket(bucket_name) blob = bucket.blob(gcs_path) return blob @_gcs_request def _gcs_md5(url: str) -> Optional[str]: """Get GCS object's md5.""" blob = _get_gcs_blob(url) return blob.md5_hash @_gcs_request def _gcs_get(url: str, temp_filename: str) -> None: """Pull a file directly from GCS.""" blob = _get_gcs_blob(url) blob.download_to_filename(temp_filename) def _session_with_backoff() -> requests.Session: """ We ran into an issue where http requests to s3 were timing out, possibly because we were making too many requests too quickly. This helper function returns a requests session that has retry-with-backoff built in. See <https://stackoverflow.com/questions/23267409/how-to-implement-retry-mechanism-into-python-requests-library>. """ session = requests.Session() retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504]) session.mount("http://", HTTPAdapter(max_retries=retries)) session.mount("https://", HTTPAdapter(max_retries=retries)) return session def _http_etag(url: str) -> Optional[str]: with _session_with_backoff() as session: response = session.head(url, allow_redirects=True) if response.status_code != 200: raise OSError( "HEAD request failed for url {} with status code {}".format(url, response.status_code) ) return response.headers.get("ETag") def _http_get(url: str, temp_file: IO) -> None: with _session_with_backoff() as session: req = session.get(url, stream=True) req.raise_for_status() content_length = req.headers.get("Content-Length") total = int(content_length) if content_length is not None else None progress = Tqdm.tqdm(unit="B", total=total, desc="downloading") for chunk in req.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks progress.update(len(chunk)) temp_file.write(chunk) progress.close() def _find_latest_cached(url: str, cache_dir: Union[str, Path]) -> Optional[str]: filename = _resource_to_filename(url) cache_path = os.path.join(cache_dir, filename) candidates: List[Tuple[str, float]] = [] for path in glob.glob(cache_path + "*"): if path.endswith(".json") or path.endswith("-extracted") or path.endswith(".lock"): continue mtime = os.path.getmtime(path) candidates.append((path, mtime)) # Sort candidates by modification time, newest first. candidates.sort(key=lambda x: x[1], reverse=True) if candidates: return candidates[0][0] return None def _serialize(data): buffer = pickle.dumps(data, protocol=-1) return np.frombuffer(buffer, dtype=np.uint8) _active_tensor_caches: MutableMapping[int, "TensorCache"] = weakref.WeakValueDictionary() def _unique_file_id(path: Union[str, PathLike]) -> int: result = os.stat(path).st_ino assert result != 0 return result class TensorCache(MutableMapping[str, Tensor], ABC): """ This is a key-value store, mapping strings to tensors. The data is kept on disk, making this class useful as a cache for storing tensors. `TensorCache` is also safe to access from multiple processes at the same time, so you can use it in distributed training situations, or from multiple training runs at the same time. """ def __new__(cls, filename: Union[str, PathLike], *, read_only: bool = False, **kwargs): # This mechanism makes sure we re-use open lmdb file handles. Lmdb has a problem when the same file is # opened by the same process multiple times. This is our workaround. filename = str(filename) try: result = _active_tensor_caches.get(_unique_file_id(filename)) except FileNotFoundError: result = None if result is None: result = super(TensorCache, cls).__new__( cls, filename, read_only=read_only, **kwargs ) # type: ignore return result def __init__( self, filename: Union[str, PathLike], *, map_size: int = 1024 * 1024 * 1024 * 1024, read_only: bool = False, ) -> None: """ Creates a `TensorCache` by either opening an existing one on disk, or creating a new one. Its interface is almost exactly like a Python dictionary, where the keys are strings and the values are `torch.Tensor`. Parameters ---------- filename: `str` Path to the location of the cache map_size: `int`, optional, defaults to 1TB This is the maximum size the cache will ever grow to. On reasonable operating systems, there is no penalty to making this a large value. `TensorCache` uses a memory-mapped file to store the data. When the file is first opened, we have to give the maximum size it can ever grow to. This is that number. Reasonable operating systems don't actually allocate that space until it is really needed. """ self.lmdb_env: lmdb.Environment if hasattr(self, "lmdb_env"): # We're being initialized again after a cache hit in _active_tensor_caches, thanks # to __new__. In this case, we may have to upgrade to read/write, but other than # that we are good to go. if read_only: return if not self.read_only: return # Upgrade a read-only lmdb env to a read/write lmdb env. filename = self.lmdb_env.path() old_info = self.lmdb_env.info() self.lmdb_env.close() self.lmdb_env = lmdb.open( filename, map_size=old_info["map_size"], subdir=False, metasync=False, sync=True, readahead=False, meminit=False, readonly=False, lock=True, ) else: filename = str(filename) cpu_count = os.cpu_count() or 1 if os.path.exists(filename): if os.path.isfile(filename): # If the file is not writable, set read_only to True, but issue a warning. if not os.access(filename, os.W_OK): if not read_only: warnings.warn( f"File '{filename}' is read-only, so cache will be read-only", UserWarning, ) read_only = True else: # If it's not a file, raise an error. raise ValueError("Expect a file, found a directory instead") use_lock = True if read_only: # Check if the lock file is writable. If it's not, then we won't be able to use the lock. # This is always how lmdb names the lock file. lock_filename = filename + "-lock" if os.path.isfile(lock_filename): use_lock = os.access(lock_filename, os.W_OK) else: # If the lock file doesn't exist yet, then the directory needs to be writable in # order to create and use the lock file. use_lock = os.access(os.path.dirname(lock_filename), os.W_OK) if not use_lock: warnings.warn( f"Lacking permissions to use lock file on cache '{filename}'.\nUse at your own risk!", UserWarning, ) self.lmdb_env = lmdb.open( filename, subdir=False, map_size=map_size, max_readers=cpu_count * 4, max_spare_txns=cpu_count * 4, metasync=False, sync=True, readahead=False, meminit=False, readonly=read_only, lock=use_lock, ) _active_tensor_caches[_unique_file_id(filename)] = self # We have another cache here that makes sure we return the same object for the same key. Without it, # you would get a different tensor, using different memory, every time you call __getitem__(), even # if you call it with the same key. # The downside is that we can't keep self.cache_cache up to date when multiple processes modify the # cache at the same time. We can guarantee though that it is up to date as long as processes either # write new values, or read existing ones. self.cache_cache: MutableMapping[str, Tensor] = WeakValueDictionary() @property def read_only(self) -> bool: return self.lmdb_env.flags()["readonly"] def __contains__(self, key: object): if not isinstance(key, str): return False if key in self.cache_cache: return True encoded_key = key.encode() with self.lmdb_env.begin(write=False) as txn: result = txn.get(encoded_key) return result is not None def __getitem__(self, key: str): try: return self.cache_cache[key] except KeyError: encoded_key = key.encode() with self.lmdb_env.begin(write=False) as txn: buffer = txn.get(encoded_key) if buffer is None: raise KeyError() tensor = torch.load(io.BytesIO(buffer), map_location="cpu") self.cache_cache[key] = tensor return tensor def __setitem__(self, key: str, tensor: torch.Tensor): if self.read_only: raise ValueError("cannot write to a read-only cache") tensor = tensor.cpu() encoded_key = key.encode() buffer = io.BytesIO() if tensor.storage().size() != np.prod(tensor.size()): tensor = tensor.clone() assert tensor.storage().size() == np.prod(tensor.size()) torch.save(tensor.detach(), buffer, pickle_protocol=pickle.HIGHEST_PROTOCOL) with self.lmdb_env.begin(write=True) as txn: txn.put(encoded_key, buffer.getbuffer()) self.cache_cache[key] = tensor def __delitem__(self, key: str): if self.read_only: raise ValueError("cannot write to a read-only cache") encoded_key = key.encode() with self.lmdb_env.begin(write=True) as txn: txn.delete(encoded_key) try: del self.cache_cache[key] except KeyError: pass def __del__(self): if self.lmdb_env is not None: self.lmdb_env.close() self.lmdb_env = None def __len__(self): return self.lmdb_env.stat()["entries"] def __iter__(self): # It is not hard to implement this, but we have not needed it so far. raise NotImplementedError() class CacheFile: """ This is a context manager that makes robust caching easier. On `__enter__`, an IO handle to a temporarily file is returned, which can be treated as if it's the actual cache file. On `__exit__`, the temporarily file is renamed to the cache file. If anything goes wrong while writing to the temporary file, it will be removed. """ def __init__( self, cache_filename: Union[PathLike, str], mode: str = "w+b", suffix: str = ".tmp" ) -> None: self.cache_filename = ( cache_filename if isinstance(cache_filename, Path) else Path(cache_filename) ) self.cache_directory = os.path.dirname(self.cache_filename) self.mode = mode self.temp_file = tempfile.NamedTemporaryFile( self.mode, dir=self.cache_directory, delete=False, suffix=suffix ) def __enter__(self): return self.temp_file def __exit__(self, exc_type, exc_value, traceback): self.temp_file.close() if exc_value is None: # Success. logger.debug( "Renaming temp file %s to cache at %s", self.temp_file.name, self.cache_filename ) # Rename the temp file to the actual cache filename. os.replace(self.temp_file.name, self.cache_filename) return True # Something went wrong, remove the temp file. logger.debug("removing temp file %s", self.temp_file.name) os.remove(self.temp_file.name) return False class LocalCacheResource: """ This is a context manager that can be used to fetch and cache arbitrary resources locally using the same mechanisms that `cached_path` uses for remote resources. It can be used, for example, when you want to cache the result of an expensive computation. # Examples ```python with LocalCacheResource("long-computation", "v1") as cache: if cache.cached(): with cache.reader() as f: # read from cache else: with cache.writer() as f: # do the computation # ... # write to cache ``` """ def __init__(self, resource_name: str, version: str, cache_dir: str = CACHE_DIRECTORY) -> None: self.resource_name = resource_name self.version = version self.cache_dir = cache_dir self.path = os.path.join(self.cache_dir, _resource_to_filename(resource_name, version)) self.file_lock = FileLock(self.path + ".lock") def cached(self) -> bool: return os.path.exists(self.path) @contextmanager def writer(self, mode="w"): if self.cached(): raise ValueError( f"local cache of {self.resource_name} (version '{self.version}') already exists!" ) with CacheFile(self.path, mode=mode) as f: yield f meta = _Meta( resource=self.resource_name, cached_path=self.path, creation_time=time.time(), etag=self.version, size=_get_resource_size(self.path), ) meta.to_file() @contextmanager def reader(self, mode="r"): if not self.cached(): raise ValueError( f"local cache of {self.resource_name} (version '{self.version}') does not exist yet!" ) with open(self.path, mode) as f: yield f def __enter__(self): self.file_lock.acquire() return self def __exit__(self, exc_type, exc_value, traceback): self.file_lock.release() if exc_value is None: return True return False @dataclass class _Meta: """ Any resource that is downloaded to - or extracted in - the cache directory will have a meta JSON file written next to it, which corresponds to an instance of this class. In older versions of AllenNLP, this meta document just had two fields: 'url' and 'etag'. The 'url' field is now the more general 'resource' field, but these old meta files are still compatible when a `_Meta` is instantiated with the `.from_path()` class method. """ resource: str """ URL or normalized path to the resource. """ cached_path: str """ Path to the corresponding cached version of the resource. """ creation_time: float """ The unix timestamp of when the corresponding resource was cached or extracted. """ size: int = 0 """ The size of the corresponding resource, in bytes. """ etag: Optional[str] = None """ Optional ETag associated with the current cached version of the resource. """ extraction_dir: bool = False """ Does this meta corresponded to an extraction directory? """ def to_file(self) -> None: with open(self.cached_path + ".json", "w") as meta_file: json.dump(asdict(self), meta_file) @classmethod def from_path(cls, path: Union[str, Path]) -> "_Meta": path = str(path) with open(path) as meta_file: data = json.load(meta_file) # For backwards compat: if "resource" not in data: data["resource"] = data.pop("url") if "creation_time" not in data: data["creation_time"] = os.path.getmtime(path[:-5]) if "extraction_dir" not in data and path.endswith("-extracted.json"): data["extraction_dir"] = True if "cached_path" not in data: data["cached_path"] = path[:-5] if "size" not in data: data["size"] = _get_resource_size(data["cached_path"]) return cls(**data) def _hf_hub_download( url, model_identifier: str, filename: Optional[str], cache_dir: Union[str, Path] ) -> str: revision: Optional[str] if "@" in model_identifier: repo_id = model_identifier.split("@")[0] revision = model_identifier.split("@")[1] else: repo_id = model_identifier revision = None if filename is not None: hub_url = hf_hub.hf_hub_url(repo_id=repo_id, filename=filename, revision=revision) cache_path = str( hf_hub.cached_download( url=hub_url, library_name="allennlp", library_version=VERSION, cache_dir=cache_dir, ) ) # HF writes it's own meta '.json' file which uses the same format we used to use and still # support, but is missing some fields that we like to have. # So we overwrite it when it we can. with FileLock(cache_path + ".lock", read_only_ok=True): meta = _Meta.from_path(cache_path + ".json") # The file HF writes will have 'resource' set to the 'http' URL corresponding to the 'hf://' URL, # but we want 'resource' to be the original 'hf://' URL. if meta.resource != url: meta.resource = url meta.to_file() else: cache_path = str(hf_hub.snapshot_download(repo_id, revision=revision, cache_dir=cache_dir)) # Need to write the meta file for snapshot downloads if it doesn't exist. with FileLock(cache_path + ".lock", read_only_ok=True): if not os.path.exists(cache_path + ".json"): meta = _Meta( resource=url, cached_path=cache_path, creation_time=time.time(), extraction_dir=True, size=_get_resource_size(cache_path), ) meta.to_file() return cache_path # TODO(joelgrus): do we want to do checksums or anything like that? def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str: """ Given a URL, look for the corresponding dataset in the local cache. If it's not there, download it. Then return the path to the cached file. """ if cache_dir is None: cache_dir = CACHE_DIRECTORY if url.startswith("hf://"): # Remove the 'hf://' prefix identifier = url[5:] if identifier.count("/") > 1: filename = "/".join(identifier.split("/")[2:]) model_identifier = "/".join(identifier.split("/")[:2]) return _hf_hub_download(url, model_identifier, filename, cache_dir) elif identifier.count("/") == 1: # 'hf://' URLs like 'hf://xxxx/yyyy' are potentially ambiguous, # because this could refer to either: # 1. the file 'yyyy' in the 'xxxx' repository, or # 2. the repo 'yyyy' under the user/org name 'xxxx'. # We default to (1), but if we get a 404 error then we try (2). try: model_identifier, filename = identifier.split("/") return _hf_hub_download(url, model_identifier, filename, cache_dir) except requests.exceptions.HTTPError as exc: if exc.response.status_code == 404: return _hf_hub_download(url, identifier, None, cache_dir) raise else: return _hf_hub_download(url, identifier, None, cache_dir) # Get eTag to add to filename, if it exists. try: if url.startswith("s3://"): etag = _s3_etag(url) elif url.startswith("gs://"): etag = _gcs_md5(url) else: etag = _http_etag(url) except (requests.exceptions.ConnectionError, botocore.exceptions.EndpointConnectionError): # We might be offline, in which case we don't want to throw an error # just yet. Instead, we'll try to use the latest cached version of the # target resource, if it exists. We'll only throw an exception if we # haven't cached the resource at all yet. logger.warning( "Connection error occurred while trying to fetch ETag for %s. " "Will attempt to use latest cached version of resource", url, ) latest_cached = _find_latest_cached(url, cache_dir) if latest_cached: logger.info( "ETag request failed with connection error, using latest cached " "version of %s: %s", url, latest_cached, ) return latest_cached else: logger.error( "Connection failed while trying to fetch ETag, " "and no cached version of %s could be found", url, ) raise except OSError: # OSError may be triggered if we were unable to fetch the eTag. # If this is the case, try to proceed without eTag check. etag = None filename = _resource_to_filename(url, etag) # Get cache path to put the file. cache_path = os.path.join(cache_dir, filename) # Multiple processes may be trying to cache the same file at once, so we need # to be a little careful to avoid race conditions. We do this using a lock file. # Only one process can own this lock file at a time, and a process will block # on the call to `lock.acquire()` until the process currently holding the lock # releases it. logger.debug("waiting to acquire lock on %s", cache_path) with FileLock(cache_path + ".lock", read_only_ok=True): if os.path.exists(cache_path): logger.info("cache of %s is up-to-date", url) else: with CacheFile(cache_path) as cache_file: logger.info("%s not found in cache, downloading to %s", url, cache_path) # GET file object if url.startswith("s3://"): _s3_get(url, cache_file) elif url.startswith("gs://"): _gcs_get(url, cache_file.name) else: _http_get(url, cache_file) logger.debug("creating metadata file for %s", cache_path) meta = _Meta( resource=url, cached_path=cache_path, creation_time=time.time(), etag=etag, size=_get_resource_size(cache_path), ) meta.to_file() return cache_path def read_set_from_file(filename: str) -> Set[str]: """ Extract a de-duped collection (set) of text from a file. Expected file format is one item per line. """ collection = set() with open(filename, "r") as file_: for line in file_: collection.add(line.rstrip()) return collection def get_file_extension(path: str, dot=True, lower: bool = True): ext = os.path.splitext(path)[1] ext = ext if dot else ext[1:] return ext.lower() if lower else ext def open_compressed( filename: Union[str, PathLike], mode: str = "rt", encoding: Optional[str] = "UTF-8", **kwargs ): if not isinstance(filename, str): filename = str(filename) open_fn: Callable = open if filename.endswith(".gz"): import gzip open_fn = gzip.open elif filename.endswith(".bz2"): import bz2 open_fn = bz2.open return open_fn(cached_path(filename), mode=mode, encoding=encoding, **kwargs) def text_lines_from_file(filename: Union[str, PathLike], strip_lines: bool = True) -> Iterator[str]: with open_compressed(filename, "rt", encoding="UTF-8", errors="replace") as p: if strip_lines: for line in p: yield line.strip() else: yield from p def json_lines_from_file(filename: Union[str, PathLike]) -> Iterable[Union[list, dict]]: return (json.loads(line) for line in text_lines_from_file(filename)) def _get_resource_size(path: str) -> int: """ Get the size of a file or directory. """ if os.path.isfile(path): return os.path.getsize(path) inodes: Set[int] = set() total_size = 0 for dirpath, dirnames, filenames in os.walk(path): for f in filenames: fp = os.path.join(dirpath, f) # skip if it is symbolic link or the same as a file we've already accounted # for (this could happen with hard links). inode = os.stat(fp).st_ino if not os.path.islink(fp) and inode not in inodes: inodes.add(inode) total_size += os.path.getsize(fp) return total_size class _CacheEntry(NamedTuple): regular_files: List[_Meta] extraction_dirs: List[_Meta] def _find_entries( patterns: List[str] = None, cache_dir: Union[str, Path] = None, ) -> Tuple[int, Dict[str, _CacheEntry]]: """ Find all cache entries, filtering ones that don't match any of the glob patterns given. Returns the total size of the matching entries and mapping or resource name to meta data. The values in the returned mapping are tuples because we seperate meta entries that correspond to extraction directories vs regular cache entries. """ cache_dir = os.path.expanduser(cache_dir or CACHE_DIRECTORY) total_size: int = 0 cache_entries: Dict[str, _CacheEntry] = defaultdict(lambda: _CacheEntry([], [])) for meta_path in glob.glob(str(cache_dir) + "/*.json"): meta = _Meta.from_path(meta_path) if patterns and not any(fnmatch(meta.resource, p) for p in patterns): continue if meta.extraction_dir: cache_entries[meta.resource].extraction_dirs.append(meta) else: cache_entries[meta.resource].regular_files.append(meta) total_size += meta.size # Sort entries for each resource by creation time, newest first. for entry in cache_entries.values(): entry.regular_files.sort(key=lambda meta: meta.creation_time, reverse=True) entry.extraction_dirs.sort(key=lambda meta: meta.creation_time, reverse=True) return total_size, cache_entries def remove_cache_entries(patterns: List[str], cache_dir: Union[str, Path] = None) -> int: """ Remove cache entries matching the given patterns. Returns the total reclaimed space in bytes. """ total_size, cache_entries = _find_entries(patterns=patterns, cache_dir=cache_dir) for resource, entry in cache_entries.items(): for meta in entry.regular_files: logger.info("Removing cached version of %s at %s", resource, meta.cached_path) os.remove(meta.cached_path) if os.path.exists(meta.cached_path + ".lock"): os.remove(meta.cached_path + ".lock") os.remove(meta.cached_path + ".json") for meta in entry.extraction_dirs: logger.info("Removing extracted version of %s at %s", resource, meta.cached_path) shutil.rmtree(meta.cached_path) if os.path.exists(meta.cached_path + ".lock"): os.remove(meta.cached_path + ".lock") os.remove(meta.cached_path + ".json") return total_size def inspect_cache(patterns: List[str] = None, cache_dir: Union[str, Path] = None): """ Print out useful information about the cache directory. """ from allennlp.common.util import format_timedelta, format_size cache_dir = os.path.expanduser(cache_dir or CACHE_DIRECTORY) # Gather cache entries by resource. total_size, cache_entries = _find_entries(patterns=patterns, cache_dir=cache_dir) if patterns: print(f"Cached resources matching {patterns}:") else: print("Cached resources:") for resource, entry in sorted( cache_entries.items(), # Sort by creation time, latest first. key=lambda x: max( 0 if not x[1][0] else x[1][0][0].creation_time, 0 if not x[1][1] else x[1][1][0].creation_time, ), reverse=True, ): print("\n-", resource) if entry.regular_files: td = timedelta(seconds=time.time() - entry.regular_files[0].creation_time) n_versions = len(entry.regular_files) size = entry.regular_files[0].size print( f" {n_versions} {'versions' if n_versions > 1 else 'version'} cached, " f"latest {format_size(size)} from {format_timedelta(td)} ago" ) if entry.extraction_dirs: td = timedelta(seconds=time.time() - entry.extraction_dirs[0].creation_time) n_versions = len(entry.extraction_dirs) size = entry.extraction_dirs[0].size print( f" {n_versions} {'versions' if n_versions > 1 else 'version'} extracted, " f"latest {format_size(size)} from {format_timedelta(td)} ago" ) print(f"\nTotal size: {format_size(total_size)}")
[]
[]
[ "ALLENNLP_CACHE_ROOT" ]
[]
["ALLENNLP_CACHE_ROOT"]
python
1
0
pyaims/doc/sphinx/conf.py
# -*- coding: utf-8 -*- # # totor documentation build configuration file, created by # sphinx-quickstart on Mon Jan 24 17:33:44 2011. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. from __future__ import print_function from __future__ import absolute_import import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.append(os.path.abspath('.')) try: import matplotlib sys.path.append( os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(matplotlib.__file__)), 'sphinx', 'ext'))) except Exception as e: print('warning:', e) from soma import aims # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('sphinxext')) # -- General configuration ----------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. try: # try napoleon which replaces numpydoc (and googledoc), # comes with sphinx 1.2 import sphinx.ext.napoleon napoleon = 'sphinx.ext.napoleon' except ImportError: # not available, fallback to numpydoc napoleon = 'numpy_ext.numpydoc' extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.extlinks', 'sphinx.ext.inheritance_diagram', 'sphinx.ext.autosummary', #'sphinx.ext.imgmath', napoleon, ] try: # nbsphinx converts ipython/jupyter notebooks to sphinx docs import nbsphinx nbsphinx_allow_errors = True extensions += ['nbsphinx', 'sphinx.ext.mathjax'] # set this env variable to tell notebooks that we should not use # any GUI during notebooks execution os.environ['ALLOW_GUI'] = '0' except ImportError: nbsphinx = None # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'PyAIMS' copyright = u'2015, CEA' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '.'.join([str(x) for x in aims.version()]) # The full version, including alpha/beta/rc tags. release = aims.versionString() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. # unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_patterns = ['examples', "_themes/scikit-learn/static/ML_MAPS_README.rst", '_build', '**.ipynb_checkpoints'] \ + templates_path # The reST default role (used for this markup: `text`) to use for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = '../../../../axon/trunk/share/icons/brainvisa.png' html_style = 'custom.css' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = '../../../../communication/web/trunk/gas/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = { '**' : [ 'relations.html' ], } # Additional templates that should be rendered to pages, maps page names to # template names. html_additional_pages = {} # If false, no module index is generated. html_use_modindex = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'pyaimsdoc' # -- Options for LaTeX output -------------------------------------------- # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'pyaims.tex', u'PyAIMS Documentation', u'someone', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_use_modindex = True autoclass_content = "both" extlinks = { 'aimsalgodox': ('../../aimsalgo-' + version + '/doxygen/%s', 'aimsalgodox '), 'aimsalgoepy': ('../../pyaimsalgo-' + version + '/epydoc/%s', 'aimsalgoepy '), 'aimsalgoex': ('../../pyaimsalgo-' + version + '/examples/%s', 'aimsalgoex '), 'aims': ('%s', 'aims '), 'aimsdox': ('../../aimsdata-' + version + '/doxygen/%s', 'aimsdox '), 'aimsdata': ('../../aimsdata-' + version + '/%s', 'aimsdata '), 'aimsepy': ('../../pyaims-' + version + '/epydoc/%s', 'aimsepy '), 'cartobdox': ('../../cartobase-' + version + '/doxygen/%s', 'cartobdox '), 'cartoddox': ('../../cartodata-' + version + '/doxygen/%s', 'cartoddox '), 'graphdox': ('../../graph-' + version + '/doxygen/%s', 'graphdox '), 'pyanatomist': ('../../pyanatomist-' + version + '/sphinx/%s', 'pyanatomist '), 'numpy': ('https://numpy.org/%s', 'numpy '), } import soma docpath = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname( soma.__file__))), 'share', 'doc') try: from soma import aims, aimsalgo except: pass intersphinx_mapping = { 'somabase': (os.path.join(docpath, 'soma-base-' + version + '/sphinx'), None), 'pyaims': (os.path.join(docpath, 'pyaims-' + version + '/sphinx'), None), 'pyana': (os.path.join(docpath, 'pyanatomist-' + version + '/sphinx'), None), 'aims': (os.path.join(docpath, 'aimsdata-' + version + '/dev_doc'), None), 'python': ('http://docs.python.org/' + '.'.join([str(x) for x in sys.version_info[:2]]), None), 'numpy': ('https://numpy.org/doc/stable/', None), }
[]
[]
[ "ALLOW_GUI" ]
[]
["ALLOW_GUI"]
python
1
0
drivers/spanner/spanner_test.go
package spanner import ( "context" "fmt" "os" "path/filepath" "testing" "cloud.google.com/go/spanner" cloudspanner "cloud.google.com/go/spanner" "github.com/k1LoW/tbls/schema" ) var ctx context.Context var s *schema.Schema var client *spanner.Client func TestAnalyze(t *testing.T) { ctx, client := initClient(t) defer client.Close() driver, err := New(ctx, client) if err != nil { t.Errorf("%v", err) } err = driver.Analyze(s) if err != nil { t.Errorf("%v", err) } table, _ := s.FindTableByName("posts") want := len(table.Constraints) if want != 2 { t.Errorf("got: %#v\nwant: %#v", 2, want) } } func initClient(t *testing.T) (context.Context, *cloudspanner.Client) { cPath := credentialPath() if _, err := os.Lstat(cPath); err != nil { t.Skipf("spanner_client_secrets.json does not exist") } os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", cPath) var err error projectID := os.Getenv("GCLOUD_PROJECT") instanceID := "test-instance" databaseID := "testdb" db := fmt.Sprintf("projects/%s/instances/%s/databases/%s", projectID, instanceID, databaseID) s = &schema.Schema{ Name: db, } ctx = context.Background() client, err = cloudspanner.NewClient(ctx, db) if err != nil { fmt.Printf("%s\n", err) os.Exit(1) } return ctx, client } func credentialPath() string { wd, _ := os.Getwd() return filepath.Join(filepath.Dir(filepath.Dir(wd)), "spanner_client_secrets.json") }
[ "\"GCLOUD_PROJECT\"" ]
[]
[ "GCLOUD_PROJECT" ]
[]
["GCLOUD_PROJECT"]
go
1
0