filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
sequence | variablearg
sequence | constarg
sequence | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
run_alphafold.py | # Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Full AlphaFold protein structure prediction script."""
import json
import os
import pathlib
import pickle
import random
import sys
import time
from typing import Dict
from string import ascii_uppercase
from absl import app
from absl import flags
from absl import logging
import numpy as np
from alphafold.common import protein
from alphafold.common import residue_constants
from alphafold.data import pipeline
from alphafold.data import parsers
from alphafold.data import templates
from alphafold.model import data
from alphafold.model import config
from alphafold.model import model
from alphafold.relax import relax
# Internal import (7716).
# Path to directory of supporting data, contains 'params' dir.
data_dir = '/proj/wallner/share/alphafold_data'
DOWNLOAD_DIR= data_dir
# Path to the Uniref90 database for use by JackHMMER.
uniref90_database_path = os.path.join(
DOWNLOAD_DIR, 'uniref90', 'uniref90.fasta')
# Path to the MGnify database for use by JackHMMER.
mgnify_database_path = os.path.join(
DOWNLOAD_DIR, 'mgnify', 'mgy_clusters.fa')
# Path to the BFD database for use by HHblits.
bfd_database_path = os.path.join(
DOWNLOAD_DIR, 'bfd',
'bfd_metaclust_clu_complete_id30_c90_final_seq.sorted_opt')
# Path to the Uniclust30 database for use by HHblits.
#uniclust30_database_path = os.path.join(
# DOWNLOAD_DIR, 'uniclust30', 'uniclust30_2018_08', 'uniclust30_2018_08')
uniclust30_database_path = os.path.join(
DOWNLOAD_DIR, 'uniclust30', 'UniRef30_2021_06', 'UniRef30_2021_06')
# Path to the PDB70 database for use by HHsearch.
pdb70_database_path = os.path.join(DOWNLOAD_DIR, 'pdb70', 'pdb70')
# Path to a directory with template mmCIF structures, each named <pdb_id>.cif')
template_mmcif_dir = os.path.join(DOWNLOAD_DIR, 'pdb_mmcif', 'mmcif_files')
# Path to a file mapping obsolete PDB IDs to their replacements.
obsolete_pdbs_path = os.path.join(DOWNLOAD_DIR, 'pdb_mmcif', 'obsolete.dat')
#### END OF USER CONFIGURATION ####
# Names of models to use.
model_names = [
'model_1',
'model_2',
'model_3',
'model_4',
'model_5',
]
flags.DEFINE_list('fasta_paths', None, 'Paths to FASTA files, each containing '
'one sequence. Paths should be separated by commas. '
'All FASTA paths must have a unique basename as the '
'basename is used to name the output directories for '
'each prediction.')
flags.DEFINE_string('output_dir', None, 'Path to a directory that will '
'store the results.')
flags.DEFINE_list('model_names', model_names, 'Names of models to use.')
flags.DEFINE_string('data_dir', data_dir, 'Path to directory of supporting data.')
flags.DEFINE_string('jackhmmer_binary_path', '/proj/wallner/apps/hmmer-3.2.1/bin/jackhmmer',
'Path to the JackHMMER executable.')
flags.DEFINE_string('hhblits_binary_path', '/proj/wallner/apps/hhsuite/bin/hhblits',
'Path to the HHblits executable.')
flags.DEFINE_string('hhsearch_binary_path', '/proj/wallner/apps/hhsuite/bin/hhsearch',
'Path to the HHsearch executable.')
flags.DEFINE_string('kalign_binary_path', '/proj/wallner/apps/kalign/src/kalign',
'Path to the Kalign executable.')
flags.DEFINE_string('uniref90_database_path', uniref90_database_path, 'Path to the Uniref90 '
'database for use by JackHMMER.')
flags.DEFINE_string('mgnify_database_path', mgnify_database_path, 'Path to the MGnify '
'database for use by JackHMMER.')
flags.DEFINE_string('bfd_database_path', bfd_database_path, 'Path to the BFD '
'database for use by HHblits.')
flags.DEFINE_string('uniclust30_database_path', uniclust30_database_path, 'Path to the Uniclust30 '
'database for use by HHblits.')
flags.DEFINE_string('pdb70_database_path', pdb70_database_path, 'Path to the PDB70 '
'database for use by HHsearch.')
flags.DEFINE_string('template_mmcif_dir', template_mmcif_dir, 'Path to a directory with '
'template mmCIF structures, each named <pdb_id>.cif')
flags.DEFINE_string('max_template_date', '2050-01-01', 'Maximum template release date '
'to consider. Important if folding historical test sets.')
flags.DEFINE_string('obsolete_pdbs_path', obsolete_pdbs_path, 'Path to file containing a '
'mapping from obsolete PDB IDs to the PDB IDs of their '
'replacements.')
flags.DEFINE_enum('preset', 'full_dbs', ['full_dbs', 'casp14'],
'Choose preset model configuration - no ensembling '
'(full_dbs) or 8 model ensemblings (casp14).')
flags.DEFINE_boolean('benchmark', False, 'Run multiple JAX model evaluations '
'to obtain a timing that excludes the compilation time, '
'which should be more indicative of the time required for '
'inferencing many proteins.')
flags.DEFINE_boolean('exit_after_sequence_search',False,'Will exit after sequence search')
flags.DEFINE_boolean('norelax',False,'Will skip relax') #exit after sequence search')
flags.DEFINE_boolean('skip_bfd',False,'Skip the large BFD database (1.5TB) search')
flags.DEFINE_integer('random_seed', None, 'The random seed for the data '
'pipeline. By default, this is randomly generated. Note '
'that even if this is set, Alphafold may still not be '
'deterministic, because processes like GPU inference are '
'nondeterministic.')
flags.DEFINE_integer('nstruct',1,'number of structures to generate for each model')
flags.DEFINE_integer('chainbreak_offset',200,'number to offset the residue index in case of chain break')
FLAGS = flags.FLAGS
#print(FLAGS)
#sys.exit()
MAX_TEMPLATE_HITS = 20
RELAX_MAX_ITERATIONS = 0
RELAX_ENERGY_TOLERANCE = 2.39
RELAX_STIFFNESS = 10.0
RELAX_EXCLUDE_RESIDUES = []
RELAX_MAX_OUTER_ITERATIONS = 20
def predict_structure(
fasta_path: str,
fasta_name: str,
output_dir_base: str,
data_pipeline: pipeline.DataPipeline,
model_runners: Dict[str, model.RunModel],
amber_relaxer: relax.AmberRelaxation,
benchmark: bool,
random_seed: int):
"""Predicts structure using AlphaFold for the given sequence."""
timings = {}
output_dir = os.path.join(output_dir_base, fasta_name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
msa_output_dir = os.path.join(output_dir, 'msas')
if not os.path.exists(msa_output_dir):
os.makedirs(msa_output_dir)
#check fasta file for chainbreaks
with open(fasta_path) as f:
input_fasta_str = f.read()
input_seqs, input_descs = parsers.parse_fasta(input_fasta_str)
if len(input_seqs) != 1:
raise ValueError(f'More than one input sequence found in {input_fasta_path}.')
input_sequence = input_seqs[0]
input_description = input_descs[0]
chain_numbering=[]
seqs=input_sequence.split('/')
fasta_concat=os.path.join(output_dir,f'concat.fasta')
with open(fasta_concat,'w') as f:
f.write(f'>{input_description} concat\n')
f.write("".join(seqs))
f.write('\n')
number_of_chains=0
for n,seq in enumerate(seqs):
chain=ascii_uppercase[n]
number_of_chains+=1
fasta_out=os.path.join(output_dir,f'chain{chain}.fasta')
#print(fasta_out)
with open(fasta_out,'w') as f:
f.write(f'>{input_description} chain {chain}\n')
for s in seq:
f.write(s)
chain_numbering.append(n+1)
f.write('\n')
#for a,b in zip("".join(seqs),chain_numbering):
# print(a,b)
print(input_sequence)
print(number_of_chains)
#Currently an MSA is made for the concatinated sequence
#Future feature will be to do the MSAs separately an merge them like so or paired:
# make multiple copies of msa for each copy
# AAA------
# ---AAA---
# ------AAA
#
# note: if you concat the sequences (as below), it does NOT work according to https://colab.research.google.com/github/sokrypton/ColabFold/blob/main/AlphaFold2.ipynb)
# AAAAAAAAA
fasta_path=fasta_concat
# Get features.
t_0 = time.time()
features_output_path = os.path.join(output_dir, 'features.pkl')
if not os.path.exists(features_output_path):
feature_dict = data_pipeline.process(
input_fasta_path=fasta_path,
msa_output_dir=msa_output_dir)
# Write out features as a pickled dictionary.
with open(features_output_path, 'wb') as f:
pickle.dump(feature_dict, f, protocol=4)
else:
feature_dict=pickle.load(open(features_output_path,'rb'))
if FLAGS.exit_after_sequence_search:
sys.exit()
if number_of_chains > 1:
# Based on Minkyung's code
# add big enough number to residue index to indicate chain breaks
#pointer to feature_dict
idx_res = feature_dict['residue_index']
for i,_ in enumerate(idx_res):
idx_res[i] += FLAGS.chainbreak_offset*chain_numbering[i]
#chains = list("".join([ascii_uppercase[n]*L for n,L in enumerate(Ls)]))
#feature_dict['residue_index'] = idx_res
# Write out modified features as a pickled dictionary.
features_output_path = os.path.join(output_dir, 'features.modified.pkl')
with open(features_output_path, 'wb') as f:
pickle.dump(feature_dict, f, protocol=4)
timings['features'] = time.time() - t_0
relaxed_pdbs = {}
plddts = {}
# Run the models.
for network_model_name, model_runner in model_runners.items():
logging.info('Running model %s', network_model_name)
t_0 = time.time()
processed_feature_dict = model_runner.process_features(
feature_dict, random_seed=random_seed)
timings[f'process_features_{network_model_name}'] = time.time() - t_0
t_0 = time.time()
for model_no in range(1,FLAGS.nstruct+1):
model_name=f'{network_model_name}_{model_no}'
unrelaxed_pdb_path = os.path.join(output_dir, f'unrelaxed_{model_name}.pdb')
relaxed_output_path = os.path.join(output_dir, f'relaxed_{model_name}.pdb')
result_output_path = os.path.join(output_dir, f'result_{model_name}.pkl')
prediction_result={}
unrelaxed_protein=None
if os.path.exists(unrelaxed_pdb_path):
logging.info(f'Found {unrelaxed_pdb_path}... loading predictions...')
with open(result_output_path,'rb') as f:
prediction_result=pickle.load(f)
plddt = prediction_result['plddt']
plddt_b_factors = np.repeat(plddt[:, None], residue_constants.atom_type_num, axis=-1)
unrelaxed_protein = protein.from_prediction(
features=processed_feature_dict,
result=prediction_result,
b_factors=plddt_b_factors)
else:
prediction_result = model_runner.predict(processed_feature_dict)
t_diff = time.time() - t_0
timings[f'predict_and_compile_{model_name}'] = t_diff
logging.info(
'Total JAX model %s predict time (includes compilation time, see --benchmark): %.0f?',
model_name, t_diff)
if benchmark:
t_0 = time.time()
model_runner.predict(processed_feature_dict)
timings[f'predict_benchmark_{model_name}'] = time.time() - t_0
# Get mean pLDDT confidence metric.
plddt = prediction_result['plddt']
plddts[model_name] = np.mean(plddt)
# Save the model outputs.
with open(result_output_path, 'wb') as f:
pickle.dump(prediction_result, f, protocol=4)
# unrelaxed_protein = protein.from_prediction(processed_feature_dict,
# prediction_result)
# Add the predicted LDDT in the b-factor column.
# Note that higher predicted LDDT value means higher model confidence.
plddt_b_factors = np.repeat(
plddt[:, None], residue_constants.atom_type_num, axis=-1)
unrelaxed_protein = protein.from_prediction(
features=processed_feature_dict,
result=prediction_result,
b_factors=plddt_b_factors)
with open(unrelaxed_pdb_path, 'w') as f:
f.write(protein.to_pdb(unrelaxed_protein))
f.write(f"pLLDT MEAN {np.mean(prediction_result['plddt'])}\n")
f.write(f"pLLDT MEDIAN {np.median(prediction_result['plddt'])}\n")
with open(unrelaxed_pdb_path+'.plldt', 'w') as f:
for pos,plddt in enumerate(prediction_result['plddt'],1):
f.write(f'{pos} {plddt}\n')
# Relax the prediction.
if not os.path.exists(relaxed_output_path):
t_0 = time.time()
relaxed_pdb_str=''
if FLAGS.norelax:
logging.info('Skipping relax hack by setting rlx=unrlx')
relaxed_pdb_str=protein.to_pdb(unrelaxed_protein)
else:
relaxed_pdb_str, _, _ = amber_relaxer.process(prot=unrelaxed_protein)
timings[f'relax_{model_name}'] = time.time() - t_0
relaxed_pdbs[model_name] = relaxed_pdb_str
# Save the relaxed PDB.
with open(relaxed_output_path, 'w') as f:
f.write(relaxed_pdb_str)
# Rank by pLDDT and write out relaxed PDBs in rank order.
ranked_order = []
for idx, (model_name, _) in enumerate(
sorted(plddts.items(), key=lambda x: x[1], reverse=True)):
ranked_order.append(model_name)
ranked_output_path = os.path.join(output_dir, f'ranked_{idx}.pdb')
with open(ranked_output_path, 'w') as f:
f.write(relaxed_pdbs[model_name])
ranking_output_path = os.path.join(output_dir, 'ranking_debug.json')
with open(ranking_output_path, 'w') as f:
f.write(json.dumps({'plddts': plddts, 'order': ranked_order}, indent=4))
logging.info('Final timings for %s: %s', fasta_name, timings)
timings_output_path = os.path.join(output_dir, 'timings.json')
with open(timings_output_path, 'w') as f:
f.write(json.dumps(timings, indent=4))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
print(FLAGS.preset)
if FLAGS.preset == 'full_dbs':
num_ensemble = 1
elif FLAGS.preset == 'casp14':
num_ensemble = 8
# Check for duplicate FASTA file names.
fasta_names = [pathlib.Path(p).stem for p in FLAGS.fasta_paths]
if len(fasta_names) != len(set(fasta_names)):
raise ValueError('All FASTA paths must have a unique basename.')
template_featurizer = templates.TemplateHitFeaturizer(
mmcif_dir=FLAGS.template_mmcif_dir,
max_template_date=FLAGS.max_template_date,
max_hits=MAX_TEMPLATE_HITS,
kalign_binary_path=FLAGS.kalign_binary_path,
release_dates_path=None,
obsolete_pdbs_path=FLAGS.obsolete_pdbs_path)
data_pipeline = pipeline.DataPipeline(
jackhmmer_binary_path=FLAGS.jackhmmer_binary_path,
hhblits_binary_path=FLAGS.hhblits_binary_path,
hhsearch_binary_path=FLAGS.hhsearch_binary_path,
uniref90_database_path=FLAGS.uniref90_database_path,
mgnify_database_path=FLAGS.mgnify_database_path,
bfd_database_path=FLAGS.bfd_database_path,
uniclust30_database_path=FLAGS.uniclust30_database_path,
pdb70_database_path=FLAGS.pdb70_database_path,
template_featurizer=template_featurizer,
skip_bfd=FLAGS.skip_bfd)
model_runners = {}
for model_name in FLAGS.model_names:
model_config = config.model_config(model_name)
model_config.data.eval.num_ensemble = num_ensemble
model_params = data.get_model_haiku_params(
model_name=model_name, data_dir=FLAGS.data_dir)
model_runner = model.RunModel(model_config, model_params)
model_runners[model_name] = model_runner
logging.info('Have %d models: %s', len(model_runners),
list(model_runners.keys()))
amber_relaxer = relax.AmberRelaxation(
max_iterations=RELAX_MAX_ITERATIONS,
tolerance=RELAX_ENERGY_TOLERANCE,
stiffness=RELAX_STIFFNESS,
exclude_residues=RELAX_EXCLUDE_RESIDUES,
max_outer_iterations=RELAX_MAX_OUTER_ITERATIONS)
random_seed = FLAGS.random_seed
if random_seed is None:
random_seed = random.randrange(sys.maxsize)
logging.info('Using random seed %d for the data pipeline', random_seed)
# Predict structure for each of the sequences.
for fasta_path, fasta_name in zip(FLAGS.fasta_paths, fasta_names):
predict_structure(
fasta_path=fasta_path,
fasta_name=fasta_name,
output_dir_base=FLAGS.output_dir,
data_pipeline=data_pipeline,
model_runners=model_runners,
amber_relaxer=amber_relaxer,
benchmark=FLAGS.benchmark,
random_seed=random_seed)
if __name__ == '__main__':
flags.mark_flags_as_required([
'fasta_paths',
'output_dir',
# 'model_names',
# 'data_dir',
'preset',
# 'uniref90_database_path',
# 'mgnify_database_path',
# 'uniclust30_database_path',
# 'bfd_database_path',
# 'pdb70_database_path',
# 'template_mmcif_dir',
# 'max_template_date',
# 'obsolete_pdbs_path',
])
app.run(main)
| [] | [] | [] | [] | [] | python | null | null | null |
tsweb/tsweb.go | // Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tsweb contains code used in various Tailscale webservers.
package tsweb
import (
"bufio"
"bytes"
"context"
"errors"
"expvar"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
_ "net/http/pprof"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"tailscale.com/metrics"
"tailscale.com/net/interfaces"
"tailscale.com/types/logger"
)
// DevMode controls whether extra output in shown, for when the binary is being run in dev mode.
var DevMode bool
// NewMux returns a new ServeMux with debugHandler registered (and protected) at /debug/.
func NewMux(debugHandler http.Handler) *http.ServeMux {
mux := http.NewServeMux()
registerCommonDebug(mux)
mux.Handle("/debug/", Protected(debugHandler))
return mux
}
func registerCommonDebug(mux *http.ServeMux) {
expvar.Publish("counter_uptime_sec", expvar.Func(func() interface{} { return int64(Uptime().Seconds()) }))
mux.Handle("/debug/pprof/", Protected(http.DefaultServeMux)) // to net/http/pprof
mux.Handle("/debug/vars", Protected(http.DefaultServeMux)) // to expvar
mux.Handle("/debug/varz", Protected(http.HandlerFunc(varzHandler)))
}
func DefaultCertDir(leafDir string) string {
cacheDir, err := os.UserCacheDir()
if err == nil {
return filepath.Join(cacheDir, "tailscale", leafDir)
}
return ""
}
// IsProd443 reports whether addr is a Go listen address for port 443.
func IsProd443(addr string) bool {
_, port, _ := net.SplitHostPort(addr)
return port == "443" || port == "https"
}
// AllowDebugAccess reports whether r should be permitted to access
// various debug endpoints.
func AllowDebugAccess(r *http.Request) bool {
if r.Header.Get("X-Forwarded-For") != "" {
// TODO if/when needed. For now, conservative:
return false
}
ipStr, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
return false
}
ip := net.ParseIP(ipStr)
if interfaces.IsTailscaleIP(ip) || ip.IsLoopback() || ipStr == os.Getenv("TS_ALLOW_DEBUG_IP") {
return true
}
if r.Method == "GET" {
urlKey := r.FormValue("debugkey")
keyPath := os.Getenv("TS_DEBUG_KEY_PATH")
if urlKey != "" && keyPath != "" {
slurp, err := ioutil.ReadFile(keyPath)
if err == nil && string(bytes.TrimSpace(slurp)) == urlKey {
return true
}
}
}
return false
}
// Protected wraps a provided debug handler, h, returning a Handler
// that enforces AllowDebugAccess and returns forbiden replies for
// unauthorized requests.
func Protected(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !AllowDebugAccess(r) {
msg := "debug access denied"
if DevMode {
ipStr, _, _ := net.SplitHostPort(r.RemoteAddr)
msg += fmt.Sprintf("; to permit access, set TS_ALLOW_DEBUG_IP=%v", ipStr)
}
http.Error(w, msg, http.StatusForbidden)
return
}
h.ServeHTTP(w, r)
})
}
var timeStart = time.Now()
func Uptime() time.Duration { return time.Since(timeStart).Round(time.Second) }
// Port80Handler is the handler to be given to
// autocert.Manager.HTTPHandler. The inner handler is the mux
// returned by NewMux containing registered /debug handlers.
type Port80Handler struct{ Main http.Handler }
func (h Port80Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
path := r.RequestURI
if path == "/debug" || strings.HasPrefix(path, "/debug") {
h.Main.ServeHTTP(w, r)
return
}
if r.Method != "GET" && r.Method != "HEAD" {
http.Error(w, "Use HTTPS", http.StatusBadRequest)
return
}
if path == "/" && AllowDebugAccess(r) {
// Redirect authorized user to the debug handler.
path = "/debug/"
}
target := "https://" + stripPort(r.Host) + path
http.Redirect(w, r, target, http.StatusFound)
}
func stripPort(hostport string) string {
host, _, err := net.SplitHostPort(hostport)
if err != nil {
return hostport
}
return net.JoinHostPort(host, "443")
}
// ReturnHandler is like net/http.Handler, but the handler can return an
// error instead of writing to its ResponseWriter.
type ReturnHandler interface {
// ServeHTTPReturn is like http.Handler.ServeHTTP, except that
// it can choose to return an error instead of writing to its
// http.ResponseWriter.
//
// If ServeHTTPReturn returns an error, it caller should handle
// an error by serving an HTTP 500 response to the user. The
// error details should not be sent to the client, as they may
// contain sensitive information. If the error is an
// HTTPError, though, callers should use the HTTP response
// code and message as the response to the client.
ServeHTTPReturn(http.ResponseWriter, *http.Request) error
}
// StdHandler converts a ReturnHandler into a standard http.Handler.
// Handled requests are logged using logf, as are any errors. Errors
// are handled as specified by the Handler interface.
func StdHandler(h ReturnHandler, logf logger.Logf) http.Handler {
return stdHandler(h, logf, time.Now, true)
}
// ReturnHandlerFunc is an adapter to allow the use of ordinary
// functions as ReturnHandlers. If f is a function with the
// appropriate signature, ReturnHandlerFunc(f) is a ReturnHandler that
// calls f.
type ReturnHandlerFunc func(http.ResponseWriter, *http.Request) error
// ServeHTTPReturn calls f(w, r).
func (f ReturnHandlerFunc) ServeHTTPReturn(w http.ResponseWriter, r *http.Request) error {
return f(w, r)
}
// StdHandlerNo200s is like StdHandler, but successfully handled HTTP
// requests don't write an access log entry to logf.
//
// TODO(danderson): quick stopgap, probably want ...Options on StdHandler instead?
func StdHandlerNo200s(h ReturnHandler, logf logger.Logf) http.Handler {
return stdHandler(h, logf, time.Now, false)
}
func stdHandler(h ReturnHandler, logf logger.Logf, now func() time.Time, log200s bool) http.Handler {
return retHandler{h, logf, now, log200s}
}
// retHandler is an http.Handler that wraps a Handler and handles errors.
type retHandler struct {
rh ReturnHandler
logf logger.Logf
timeNow func() time.Time
log200s bool
}
// ServeHTTP implements the http.Handler interface.
func (h retHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
msg := AccessLogRecord{
When: h.timeNow(),
RemoteAddr: r.RemoteAddr,
Proto: r.Proto,
TLS: r.TLS != nil,
Host: r.Host,
Method: r.Method,
RequestURI: r.URL.RequestURI(),
UserAgent: r.UserAgent(),
Referer: r.Referer(),
}
lw := &loggingResponseWriter{ResponseWriter: w, logf: h.logf}
err := h.rh.ServeHTTPReturn(lw, r)
hErr, hErrOK := err.(HTTPError)
if lw.code == 0 && err == nil && !lw.hijacked {
// If the handler didn't write and didn't send a header, that still means 200.
// (See https://play.golang.org/p/4P7nx_Tap7p)
lw.code = 200
}
msg.Seconds = h.timeNow().Sub(msg.When).Seconds()
msg.Code = lw.code
msg.Bytes = lw.bytes
switch {
case lw.hijacked:
// Connection no longer belongs to us, just log that we
// switched protocols away from HTTP.
if msg.Code == 0 {
msg.Code = http.StatusSwitchingProtocols
}
case err != nil && r.Context().Err() == context.Canceled:
msg.Code = 499 // nginx convention: Client Closed Request
msg.Err = context.Canceled.Error()
case hErrOK:
// Handler asked us to send an error. Do so, if we haven't
// already sent a response.
if hErr.Err != nil {
msg.Err = hErr.Err.Error()
}
if lw.code != 0 {
h.logf("[unexpected] handler returned HTTPError %v, but already sent a response with code %d", hErr, lw.code)
break
}
msg.Code = hErr.Code
if msg.Code == 0 {
h.logf("[unexpected] HTTPError %v did not contain an HTTP status code, sending internal server error", hErr)
msg.Code = http.StatusInternalServerError
}
http.Error(lw, hErr.Msg, msg.Code)
case err != nil:
// Handler returned a generic error. Serve an internal server
// error, if necessary.
msg.Err = err.Error()
if lw.code == 0 {
msg.Code = http.StatusInternalServerError
http.Error(lw, "internal server error", msg.Code)
}
}
if msg.Code != 200 || h.log200s {
h.logf("%s", msg)
}
}
// loggingResponseWriter wraps a ResponseWriter and record the HTTP
// response code that gets sent, if any.
type loggingResponseWriter struct {
http.ResponseWriter
code int
bytes int
hijacked bool
logf logger.Logf
}
// WriteHeader implements http.Handler.
func (l *loggingResponseWriter) WriteHeader(statusCode int) {
if l.code != 0 {
l.logf("[unexpected] HTTP handler set statusCode twice (%d and %d)", l.code, statusCode)
return
}
l.code = statusCode
l.ResponseWriter.WriteHeader(statusCode)
}
// Write implements http.Handler.
func (l *loggingResponseWriter) Write(bs []byte) (int, error) {
if l.code == 0 {
l.code = 200
}
n, err := l.ResponseWriter.Write(bs)
l.bytes += n
return n, err
}
// Hijack implements http.Hijacker. Note that hijacking can still fail
// because the wrapped ResponseWriter is not required to implement
// Hijacker, as this breaks HTTP/2.
func (l *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
h, ok := l.ResponseWriter.(http.Hijacker)
if !ok {
return nil, nil, errors.New("ResponseWriter is not a Hijacker")
}
conn, buf, err := h.Hijack()
if err == nil {
l.hijacked = true
}
return conn, buf, err
}
func (l loggingResponseWriter) Flush() {
f, _ := l.ResponseWriter.(http.Flusher)
if f == nil {
l.logf("[unexpected] tried to Flush a ResponseWriter that can't flush")
return
}
f.Flush()
}
// HTTPError is an error with embedded HTTP response information.
//
// It is the error type to be (optionally) used by Handler.ServeHTTPReturn.
type HTTPError struct {
Code int // HTTP response code to send to client; 0 means means 500
Msg string // Response body to send to client
Err error // Detailed error to log on the server
}
// Error implements the error interface.
func (e HTTPError) Error() string { return fmt.Sprintf("httperror{%d, %q, %v}", e.Code, e.Msg, e.Err) }
// Error returns an HTTPError containing the given information.
func Error(code int, msg string, err error) HTTPError {
return HTTPError{Code: code, Msg: msg, Err: err}
}
// varzHandler is an HTTP handler to write expvar values into the
// prometheus export format:
//
// https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md
//
// It makes the following assumptions:
//
// * *expvar.Int are counters (unless marked as a gauge_; see below)
// * a *tailscale/metrics.Set is descended into, joining keys with
// underscores. So use underscores as your metric names.
// * an expvar named starting with "gauge_" or "counter_" is of that
// Prometheus type, and has that prefix stripped.
// * anything else is untyped and thus not exported.
// * expvar.Func can return an int or int64 (for now) and anything else
// is not exported.
//
// This will evolve over time, or perhaps be replaced.
func varzHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; version=0.0.4")
var dump func(prefix string, kv expvar.KeyValue)
dump = func(prefix string, kv expvar.KeyValue) {
name := prefix + kv.Key
var typ string
switch {
case strings.HasPrefix(kv.Key, "gauge_"):
typ = "gauge"
name = prefix + strings.TrimPrefix(kv.Key, "gauge_")
case strings.HasPrefix(kv.Key, "counter_"):
typ = "counter"
name = prefix + strings.TrimPrefix(kv.Key, "counter_")
}
switch v := kv.Value.(type) {
case *expvar.Int:
if typ == "" {
typ = "counter"
}
fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, typ, name, v.Value())
return
case *metrics.Set:
v.Do(func(kv expvar.KeyValue) {
dump(name+"_", kv)
})
return
}
if typ == "" {
var funcRet string
if f, ok := kv.Value.(expvar.Func); ok {
v := f()
if ms, ok := v.(runtime.MemStats); ok && name == "memstats" {
writeMemstats(w, &ms)
return
}
funcRet = fmt.Sprintf(" returning %T", v)
}
fmt.Fprintf(w, "# skipping expvar %q (Go type %T%s) with undeclared Prometheus type\n", name, kv.Value, funcRet)
return
}
switch v := kv.Value.(type) {
case expvar.Func:
val := v()
switch val.(type) {
case int64, int:
fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, typ, name, val)
default:
fmt.Fprintf(w, "# skipping expvar func %q returning unknown type %T\n", name, val)
}
case *metrics.LabelMap:
fmt.Fprintf(w, "# TYPE %s %s\n", name, typ)
// IntMap uses expvar.Map on the inside, which presorts
// keys. The output ordering is deterministic.
v.Do(func(kv expvar.KeyValue) {
fmt.Fprintf(w, "%s{%s=%q} %v\n", name, v.Label, kv.Key, kv.Value)
})
}
}
expvar.Do(func(kv expvar.KeyValue) {
dump("", kv)
})
}
func writeMemstats(w io.Writer, ms *runtime.MemStats) {
out := func(name, typ string, v uint64, help string) {
if help != "" {
fmt.Fprintf(w, "# HELP memstats_%s %s\n", name, help)
}
fmt.Fprintf(w, "# TYPE memstats_%s %s\nmemstats_%s %v\n", name, typ, name, v)
}
g := func(name string, v uint64, help string) { out(name, "gauge", v, help) }
c := func(name string, v uint64, help string) { out(name, "counter", v, help) }
g("heap_alloc", ms.HeapAlloc, "current bytes of allocated heap objects (up/down smoothly)")
c("total_alloc", ms.TotalAlloc, "cumulative bytes allocated for heap objects")
g("sys", ms.Sys, "total bytes of memory obtained from the OS")
c("mallocs", ms.Mallocs, "cumulative count of heap objects allocated")
c("frees", ms.Frees, "cumulative count of heap objects freed")
c("num_gc", uint64(ms.NumGC), "number of completed GC cycles")
}
| [
"\"TS_ALLOW_DEBUG_IP\"",
"\"TS_DEBUG_KEY_PATH\""
] | [] | [
"TS_ALLOW_DEBUG_IP",
"TS_DEBUG_KEY_PATH"
] | [] | ["TS_ALLOW_DEBUG_IP", "TS_DEBUG_KEY_PATH"] | go | 2 | 0 | |
integration/fixture/default.go | package fixture
import (
"os"
. "github.com/openlyinc/pointy"
apiclient "github.com/smartxworks/cloudtower-go-sdk/v2/client"
"github.com/smartxworks/cloudtower-go-sdk/v2/client/cluster"
"github.com/smartxworks/cloudtower-go-sdk/v2/client/vlan"
"github.com/smartxworks/cloudtower-go-sdk/v2/models"
)
var client *apiclient.Cloudtower = nil
func GetClient() *apiclient.Cloudtower {
if client != nil {
return client
}
return apiclient.NewWithUserConfig(apiclient.ClientConfig{
Host: os.Getenv("CLOUDTOWER_SDK_ENDPOINT"),
BasePath: "v2/api",
Schemes: []string{"http"},
}, apiclient.UserConfig{
Name: os.Getenv("CLOUDTOWER_SDK_USERNAME"),
Password: os.Getenv("CLOUDTOWER_SDK_PASSWORD"),
Source: models.UserSource(os.Getenv("CLOUDTOWER_SDK_USERSOURCE")),
})
}
func GetDefaultCluster(client *apiclient.Cloudtower, name string) *models.Cluster {
params := cluster.NewGetClustersParams()
params.RequestBody = &models.GetClustersRequestBody{
Where: &models.ClusterWhereInput{
Name: String(name),
},
First: Int32(1),
}
res, err := client.Cluster.GetClusters(params)
if err != nil {
panic(err.Error())
}
if len(res.Payload) == 0 {
panic("Deafult cluster not found")
}
return res.Payload[0]
}
func GetDefaultVlan(client *apiclient.Cloudtower, clusterId *string) *models.Vlan {
params := vlan.NewGetVlansParams()
params.RequestBody = &models.GetVlansRequestBody{
Where: &models.VlanWhereInput{
Name: String("default"),
Vds: &models.VdsWhereInput{
Cluster: &models.ClusterWhereInput{
ID: clusterId,
},
Internal: Bool(false),
},
},
First: Int32(1),
}
res, err := client.Vlan.GetVlans(params)
if err != nil {
panic(err.Error())
}
if len(res.Payload) == 0 {
panic("Deafult vlan not found")
}
return res.Payload[0]
}
| [
"\"CLOUDTOWER_SDK_ENDPOINT\"",
"\"CLOUDTOWER_SDK_USERNAME\"",
"\"CLOUDTOWER_SDK_PASSWORD\"",
"\"CLOUDTOWER_SDK_USERSOURCE\""
] | [] | [
"CLOUDTOWER_SDK_USERNAME",
"CLOUDTOWER_SDK_ENDPOINT",
"CLOUDTOWER_SDK_PASSWORD",
"CLOUDTOWER_SDK_USERSOURCE"
] | [] | ["CLOUDTOWER_SDK_USERNAME", "CLOUDTOWER_SDK_ENDPOINT", "CLOUDTOWER_SDK_PASSWORD", "CLOUDTOWER_SDK_USERSOURCE"] | go | 4 | 0 | |
src/main/java/org/elasolutions/utils/SystemDump.java | package org.elasolutions.utils;
import java.awt.GraphicsDevice;
import java.awt.GraphicsEnvironment;
import java.text.DecimalFormat;
import java.text.NumberFormat;
import java.util.Properties;
/**
* SystemDump dumps information about the operating system in a consistent and holistic manner.
*
* @author Malcolm G. Davis
* @version 1.0
*/
public class SystemDump {
/**
* <p>dump.</p>
*
* @return a {@link java.lang.String} object.
*/
public static String dump() {
StringBuilder out = new StringBuilder();
out.append("Environment variables:\r\n");
java.util.Map<String,String>map = System.getenv();
for( Object key : map.keySet() ) {
out.append(key +"="+ map.get(key)+"\r\n");
}
out.append("\r\nSystem variables:\r\n");
Properties properties = System.getProperties();
for( Object key : properties.keySet() ) {
out.append(key +"="+ properties.get(key)+"\r\n");
}
out.append("\r\nRuntime:\r\n");
double divider = 1000*1000;
Runtime runtime = Runtime.getRuntime();
long usedMemory = runtime.totalMemory() - runtime.freeMemory();
NumberFormat formatter = new DecimalFormat("#0");
out.append("maxMemory="+ formatter.format(runtime.maxMemory()/divider) +" MB\r\n");
out.append("totalMemory="+ formatter.format(runtime.totalMemory()/divider) +" MB\r\n");
out.append("freeMemory="+ formatter.format(runtime.freeMemory()/divider) +" MB\r\n");
out.append("usedMemory="+ formatter.format(usedMemory/divider) +" MB\r\n");
out.append("availableProcessors="+ runtime.availableProcessors() +"\r\n");
out.append("\r\nGraphics environment:\r\n");
GraphicsEnvironment ge = GraphicsEnvironment.getLocalGraphicsEnvironment();
out.append("maximumWindowBounds="+ ge.getMaximumWindowBounds().toString() +"\r\n");
GraphicsDevice[] gs = ge.getScreenDevices();
out.append("screenCount="+ gs.length +"\r\n");
for (int j = 0; j < gs.length; j++) {
GraphicsDevice gd = gs[j];
out.append("screen="+ gd.getIDstring()+"\r\n");
out.append("size="+ gd.getDefaultConfiguration().getBounds().toString()+"\r\n");
}
return out.toString();
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
app/di/provider_config.go | package di
import (
"database/sql"
"log"
"os"
_ "github.com/go-sql-driver/mysql"
"github.com/google/wire"
)
func DSN() string {
dsn := os.Getenv("DSN")
log.Printf("DSN is %s", dsn)
if dsn != "" {
return dsn
}
return "admin:admin@(127.0.0.1:3307)/api-server?parseTime=true"
}
func provideDB() *sql.DB {
db, err := sql.Open("mysql", DSN())
if err != nil {
log.Fatalf("failed sql open : %v", err)
}
return db
}
var ConfigSet = wire.NewSet(
provideDB,
)
| [
"\"DSN\""
] | [] | [
"DSN"
] | [] | ["DSN"] | go | 1 | 0 | |
example/github/main.go | package main
import (
"context"
"fmt"
"net/http"
"os"
"github.com/kodesmil/gqlgenc/client"
"github.com/kodesmil/gqlgenc/example/github/gen"
)
func main() {
// This example only read public repository. You don't need to select scopes.
token := os.Getenv("GITHUB_TOKEN")
authHeader := func(req *http.Request) {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
}
ctx := context.Background()
githubClient := &gen.Client{
Client: client.NewClient(http.DefaultClient, "https://api.github.com/graphql", authHeader),
}
getUser, err := githubClient.GetUser(ctx, 10, 10)
if err != nil {
if handledError, ok := err.(*client.ErrorResponse); ok {
fmt.Fprintf(os.Stderr, "handled error: %s\n", handledError.Error())
} else {
fmt.Fprintf(os.Stderr, "unhandled error: %s\n", err.Error())
}
os.Exit(1)
}
fmt.Println(*getUser.Viewer.Name, getUser.Viewer.Repositories.Nodes[0].Name)
for _, repository := range getUser.Viewer.Repositories.Nodes {
fmt.Println(repository.Name)
for _, language := range repository.Languages.Nodes {
fmt.Println(language.Name)
}
}
}
| [
"\"GITHUB_TOKEN\""
] | [] | [
"GITHUB_TOKEN"
] | [] | ["GITHUB_TOKEN"] | go | 1 | 0 | |
pkg/cmd/create/create_cluster_aws.go | package create
import (
"fmt"
"io/ioutil"
"net/url"
"os"
"strings"
"time"
"github.com/jenkins-x/jx/pkg/cmd/helper"
survey "gopkg.in/AlecAivazis/survey.v1"
"github.com/jenkins-x/jx/pkg/cloud"
"github.com/jenkins-x/jx/pkg/cloud/amazon"
"github.com/jenkins-x/jx/pkg/cmd/opts"
"github.com/jenkins-x/jx/pkg/cmd/templates"
"github.com/jenkins-x/jx/pkg/features"
"github.com/jenkins-x/jx/pkg/kube"
"github.com/jenkins-x/jx/pkg/log"
"github.com/jenkins-x/jx/pkg/util"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/util/uuid"
)
const (
optionZones = "zones"
)
// CreateClusterAWSOptions contains the CLI flags
type CreateClusterAWSOptions struct {
CreateClusterOptions
Flags CreateClusterAWSFlags
}
type CreateClusterAWSFlags struct {
Profile string
Region string
ClusterName string
NodeCount string
KubeVersion string
Zones string
InsecureDockerRegistry string
UseRBAC bool
TerraformDirectory string
NodeSize string
MasterSize string
State string
SSHPublicKey string
Tags string
UseSpotinst bool
UseSpotinstOcean bool
}
var (
createClusterAWSLong = templates.LongDesc(`
This command creates a new Kubernetes cluster on Amazon Web Service (AWS) using kops, installing required local dependencies and provisions the
Jenkins X platform.
AWS manages your hosted Kubernetes environment via kops, making it quick and easy to deploy and
manage containerized applications without container orchestration expertise. It also eliminates the burden of
ongoing operations and maintenance by provisioning, upgrading, and scaling resources on demand, without taking
your applications offline.
`)
createClusterAWSExample = templates.Examples(`
# to create a new Kubernetes cluster with Jenkins X in your default zones (from $AWS_AVAILABILITY_ZONES)
jx create cluster aws
# to specify the zones
jx create cluster aws --zones us-west-2a,us-west-2b,us-west-2c
# to output terraform configuration
jx create cluster aws --terraform /Users/jx/jx-infra
`)
)
// NewCmdCreateClusterAWS creates the command
func NewCmdCreateClusterAWS(commonOpts *opts.CommonOptions) *cobra.Command {
options := CreateClusterAWSOptions{
CreateClusterOptions: createCreateClusterOptions(commonOpts, cloud.AKS),
}
cmd := &cobra.Command{
Use: "aws",
Short: "Create a new Kubernetes cluster on AWS with kops",
Long: createClusterAWSLong,
Example: createClusterAWSExample,
PreRun: func(cmd *cobra.Command, args []string) {
err := features.IsEnabled(cmd)
helper.CheckErr(err)
err = options.InstallOptions.CheckFeatures()
helper.CheckErr(err)
},
Run: func(cmd *cobra.Command, args []string) {
options.Cmd = cmd
options.Args = args
err := options.Run()
helper.CheckErr(err)
},
}
options.addCreateClusterFlags(cmd)
cmd.Flags().StringVarP(&options.Flags.Profile, "profile", "", "", "AWS profile to use.")
cmd.Flags().StringVarP(&options.Flags.Region, "region", "", "", "AWS region to use. Default: "+amazon.DefaultRegion)
cmd.Flags().BoolVarP(&options.Flags.UseRBAC, "rbac", "r", true, "whether to enable RBAC on the Kubernetes cluster")
cmd.Flags().StringVarP(&options.Flags.ClusterName, optionClusterName, "n", "aws1", "The name of this cluster.")
cmd.Flags().StringVarP(&options.Flags.NodeCount, optionNodes, "o", "", "node count")
cmd.Flags().StringVarP(&options.Flags.KubeVersion, optionKubernetesVersion, "v", "", "Kubernetes version")
cmd.Flags().StringVarP(&options.Flags.Zones, optionZones, "z", "", "Availability Zones. Defaults to $AWS_AVAILABILITY_ZONES")
cmd.Flags().StringVarP(&options.Flags.InsecureDockerRegistry, "insecure-registry", "", "100.64.0.0/10", "The insecure Docker registries to allow")
cmd.Flags().StringVarP(&options.Flags.TerraformDirectory, "terraform", "t", "", "The directory to save Terraform configuration.")
cmd.Flags().StringVarP(&options.Flags.NodeSize, "node-size", "", "", "The size of a node in the kops created cluster.")
cmd.Flags().StringVarP(&options.Flags.MasterSize, "master-size", "", "", "The size of a master in the kops created cluster.")
cmd.Flags().StringVarP(&options.Flags.State, "state", "", "", "The S3 bucket used to store the state of the cluster.")
cmd.Flags().StringVarP(&options.Flags.SSHPublicKey, "ssh-public-key", "", "", "SSH public key to use for nodes (default \"~/.ssh/id_rsa.pub\").")
cmd.Flags().StringVarP(&options.Flags.Tags, "tags", "", "", "A list of KV pairs used to tag all instance groups in AWS (eg \"Owner=John Doe,Team=Some Team\").")
cmd.Flags().BoolVarP(&options.Flags.UseSpotinst, "spotinst", "", false, "Whether to enable Spotinst integration")
cmd.Flags().BoolVarP(&options.Flags.UseSpotinstOcean, "spotinst-ocean", "", false, "Whether to use Spotinst Ocean instance groups")
return cmd
}
// Run runs the command
func (o *CreateClusterAWSOptions) Run() error {
surveyOpts := survey.WithStdio(o.In, o.Out, o.Err)
var deps []string
d := opts.BinaryShouldBeInstalled("kops")
if d != "" {
deps = append(deps, d)
}
err := o.InstallMissingDependencies(deps)
if err != nil {
log.Logger().Errorf("%v\nPlease fix the error or install manually then try again", err)
os.Exit(-1)
}
flags := &o.Flags
if flags.NodeCount == "" {
prompt := &survey.Input{
Message: "nodes",
Default: "3",
Help: "number of nodes",
}
survey.AskOne(prompt, &flags.NodeCount, nil, surveyOpts)
}
/*
kubeVersion := o.Flags.KubeVersion
if kubeVersion == "" {
prompt := &survey.Input{
Message: "Kubernetes version",
Default: kubeVersion,
Help: "The release version of Kubernetes to install in the cluster",
}
survey.AskOne(prompt, &kubeVersion, nil, surveyOpts)
}
*/
zones := flags.Zones
if zones == "" {
zones = os.Getenv("AWS_AVAILABILITY_ZONES")
if zones == "" {
availabilityZones, err := amazon.AvailabilityZones()
if err != nil {
return err
}
c := len(availabilityZones)
if c > 0 {
zones, err = util.PickNameWithDefault(availabilityZones, "Pick Availability Zone: ", availabilityZones[c-1], "", o.In, o.Out, o.Err)
if err != nil {
return err
}
}
}
if zones == "" {
log.Logger().Warnf("No AWS_AVAILABILITY_ZONES environment variable is defined or %s option!", optionZones)
prompt := &survey.Input{
Message: "Availability Zones",
Default: "",
Help: "The AWS Availability Zones to use for the Kubernetes cluster",
}
err = survey.AskOne(prompt, &zones, survey.Required, surveyOpts)
if err != nil {
return err
}
}
}
if zones == "" {
return fmt.Errorf("No Availability Zones provided!")
}
accountId, _, err := amazon.GetAccountIDAndRegion(o.Flags.Profile, o.Flags.Region)
if err != nil {
return err
}
state := flags.State
if state == "" {
kopsState := os.Getenv("KOPS_STATE_STORE")
if kopsState != "" {
if strings.Contains(kopsState, "://") {
state = kopsState
} else {
state = "s3://" + kopsState
}
} else {
bucketName := "kops-state-" + accountId + "-" + string(uuid.NewUUID())
log.Logger().Infof("Creating S3 bucket %s to store kops state", util.ColorInfo(bucketName))
location, err := amazon.CreateS3Bucket(bucketName, o.Flags.Profile, o.Flags.Region)
if err != nil {
return err
}
u, err := url.Parse(location)
if err != nil {
return fmt.Errorf("Failed to parse S3 bucket location URL %s: %s", location, err)
}
state = u.Hostname()
idx := strings.Index(state, ".")
if idx > 0 {
state = state[0:idx]
}
state = "s3://" + state
log.Logger().Infof("To work more easily with kops on the command line you may wish to run the following: %s", util.ColorInfo("export KOPS_STATE_STORE="+state))
}
}
o.Flags.State = state
name := flags.ClusterName
if name == "" {
name = "aws1"
}
if !strings.Contains(name, ".") {
name = name + ".cluster.k8s.local"
}
args := []string{"create", "cluster", "--name", name}
if flags.NodeCount != "" {
args = append(args, "--node-count", flags.NodeCount)
}
if flags.KubeVersion != "" {
args = append(args, "--kubernetes-version", flags.KubeVersion)
}
if flags.NodeSize != "" {
args = append(args, "--node-size", flags.NodeSize)
}
if flags.MasterSize != "" {
args = append(args, "--master-size", flags.MasterSize)
}
if flags.SSHPublicKey != "" {
args = append(args, "--ssh-public-key", flags.SSHPublicKey)
}
if flags.Tags != "" {
args = append(args, "--cloud-labels", flags.Tags)
}
auth := "RBAC"
if !flags.UseRBAC {
auth = "AlwaysAllow"
}
args = append(args, "--authorization", auth, "--zones", zones, "--yes")
if flags.UseSpotinst {
feature := "Spotinst"
if flags.UseSpotinstOcean {
feature += ",SpotinstOcean"
}
features := os.Getenv("KOPS_FEATURE_FLAGS")
if features != "" {
features = fmt.Sprintf("%s,%s", features, feature)
} else {
features = feature
}
if err := os.Setenv("KOPS_FEATURE_FLAGS", features); err != nil {
return err
}
}
if flags.TerraformDirectory != "" {
args = append(args, "--out", flags.TerraformDirectory, "--target=terraform")
}
// TODO allow add custom args?
log.Logger().Info("Creating cluster...")
err = o.runKops(args...)
if err != nil {
return err
}
log.Logger().Infof("\nkops has created cluster %s it will take a minute or so to startup", util.ColorInfo(name))
log.Logger().Infof("You can check on the status in another terminal via the command: %s", util.ColorStatus("kops validate cluster"))
time.Sleep(5 * time.Second)
insecureRegistries := flags.InsecureDockerRegistry
if insecureRegistries != "" {
log.Logger().Warn("Waiting for the Cluster configuration...")
igJson, err := o.waitForClusterJson(name)
if err != nil {
return fmt.Errorf("Failed to wait for the Cluster JSON: %s\n", err)
}
log.Logger().Infof("Loaded Cluster JSON: %s", igJson)
err = o.modifyClusterConfigJson(igJson, insecureRegistries)
if err != nil {
return err
}
log.Logger().Info("Cluster configuration updated")
}
log.Logger().Info("Waiting for the Kubernetes cluster to be ready so we can continue...")
err = o.waitForClusterToComeUp()
if err != nil {
return fmt.Errorf("Failed to wait for Kubernetes cluster to start: %s\n", err)
}
log.Blank()
log.Logger().Info("Waiting to for a valid kops cluster state...")
err = o.waitForClusterValidation()
if err != nil {
return fmt.Errorf("Failed to successfully validate kops cluster state: %s\n", err)
}
log.Logger().Info("State of kops cluster: OK")
log.Blank()
region, err := amazon.ResolveRegion(o.Flags.Profile, o.Flags.Region)
if err != nil {
return err
}
o.InstallOptions.setInstallValues(map[string]string{
kube.Region: region,
})
log.Logger().Info("Initialising cluster ...")
return o.initAndInstall(cloud.AWS)
}
func (o *CreateClusterAWSOptions) waitForClusterJson(clusterName string) (string, error) {
jsonOutput := ""
f := func() error {
args := []string{"get", "cluster", "--name", clusterName, "-o", "json"}
if o.Flags.State != "" {
args = append(args, "--state", o.Flags.State)
}
text, err := o.GetCommandOutput("", "kops", args...)
if err != nil {
return err
}
jsonOutput = text
return nil
}
err := o.RetryQuiet(200, time.Second*10, f)
return jsonOutput, err
}
func (o *CreateClusterAWSOptions) waitForClusterToComeUp() error {
f := func() error {
return o.RunCommandQuietly("kubectl", "get", "node")
}
return o.RetryQuiet(2000, time.Second*10, f)
}
// waitForClusterValidation retries running kops validate cluster, which is necessary
// because it can take a while for all the machines and nodes to join the cluster and be ready.
func (o *CreateClusterAWSOptions) waitForClusterValidation() error {
f := func() error {
args := []string{"validate", "cluster"}
if o.Flags.State != "" {
args = append(args, "--state", o.Flags.State)
}
return o.RunCommandQuietly("kops", args...)
}
return o.RetryQuiet(25, time.Second*15, f)
}
func (o *CreateClusterAWSOptions) modifyClusterConfigJson(json string, insecureRegistries string) error {
if insecureRegistries == "" {
return nil
}
newJson, err := kube.EnableInsecureRegistry(json, insecureRegistries)
if err != nil {
return fmt.Errorf("Failed to modify Cluster JSON to add insecure registries %s: %s", insecureRegistries, err)
}
if newJson == json {
return nil
}
log.Logger().Infof("new json: %s", newJson)
tmpFile, err := ioutil.TempFile("", "kops-ig-json-")
if err != nil {
return err
}
fileName := tmpFile.Name()
err = ioutil.WriteFile(fileName, []byte(newJson), util.DefaultWritePermissions)
if err != nil {
return fmt.Errorf("Failed to write InstanceGroup JSON %s: %s", fileName, err)
}
log.Logger().Infof("Updating Cluster configuration to enable insecure Docker registries %s", util.ColorInfo(insecureRegistries))
err = o.runKops("replace", "-f", fileName)
if err != nil {
return err
}
log.Logger().Info("Updating the cluster")
err = o.runKops("update", "cluster", "--yes")
if err != nil {
return err
}
log.Logger().Info("Rolling update the cluster")
err = o.runKops("rolling-update", "cluster", "--cloudonly", "--yes")
if err != nil {
// lets not fail to install if the rolling upgrade fails
log.Logger().Warnf("Failed to perform rolling upgrade: %s", err)
//return err
}
return nil
}
func (o *CreateClusterAWSOptions) runKops(args ...string) error {
if o.Flags.State != "" {
args = append(args, "--state", o.Flags.State)
}
log.Logger().Infof("running command: %s", util.ColorInfo("kops "+strings.Join(args, " ")))
return o.RunCommandVerbose("kops", args...)
}
| [
"\"AWS_AVAILABILITY_ZONES\"",
"\"KOPS_STATE_STORE\"",
"\"KOPS_FEATURE_FLAGS\""
] | [] | [
"AWS_AVAILABILITY_ZONES",
"KOPS_FEATURE_FLAGS",
"KOPS_STATE_STORE"
] | [] | ["AWS_AVAILABILITY_ZONES", "KOPS_FEATURE_FLAGS", "KOPS_STATE_STORE"] | go | 3 | 0 | |
pkg/daemon/daemon.go | package daemon
import (
"bytes"
"context"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"path/filepath"
"reflect"
"strconv"
"strings"
"sync"
"time"
"github.com/golang/glog"
"golang.org/x/time/rate"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
listerv1 "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubectl/pkg/drain"
// "k8s.io/client-go/kubernetes/scheme"
sriovnetworkv1 "github.com/openshift/sriov-network-operator/pkg/apis/sriovnetwork/v1"
snclientset "github.com/openshift/sriov-network-operator/pkg/client/clientset/versioned"
sninformer "github.com/openshift/sriov-network-operator/pkg/client/informers/externalversions"
"github.com/openshift/sriov-network-operator/pkg/utils"
)
const (
// updateDelay is the baseline speed at which we react to changes. We don't
// need to react in milliseconds as any change would involve rebooting the node.
updateDelay = 5 * time.Second
// maxUpdateBackoff is the maximum time to react to a change as we back off
// in the face of errors.
maxUpdateBackoff = 60 * time.Second
)
type Message struct {
syncStatus string
lastSyncError string
}
type Daemon struct {
// name is the node name.
name string
namespace string
client snclientset.Interface
// kubeClient allows interaction with Kubernetes, including the node we are running on.
kubeClient *kubernetes.Clientset
nodeState *sriovnetworkv1.SriovNetworkNodeState
LoadedPlugins map[string]VendorPlugin
// channel used by callbacks to signal Run() of an error
exitCh chan<- error
// channel used to ensure all spawned goroutines exit when we exit.
stopCh <-chan struct{}
syncCh <-chan struct{}
refreshCh chan<- Message
dpReboot bool
mu *sync.Mutex
drainer *drain.Helper
node *corev1.Node
drainable bool
nodeLister listerv1.NodeLister
workqueue workqueue.RateLimitingInterface
}
type workItem struct {
old, new *sriovnetworkv1.SriovNetworkNodeState
}
const (
scriptsPath = "/bindata/scripts/enable-rdma.sh"
annoKey = "sriovnetwork.openshift.io/state"
annoIdle = "Idle"
annoDraining = "Draining"
)
var namespace = os.Getenv("NAMESPACE")
var pluginsPath = os.Getenv("PLUGINSPATH")
// writer implements io.Writer interface as a pass-through for klog.
type writer struct {
logFunc func(args ...interface{})
}
// Write passes string(p) into writer's logFunc and always returns len(p)
func (w writer) Write(p []byte) (n int, err error) {
w.logFunc(string(p))
return len(p), nil
}
func New(
nodeName string,
client snclientset.Interface,
kubeClient *kubernetes.Clientset,
exitCh chan<- error,
stopCh <-chan struct{},
syncCh <-chan struct{},
refreshCh chan<- Message,
) *Daemon {
return &Daemon{
name: nodeName,
client: client,
kubeClient: kubeClient,
exitCh: exitCh,
stopCh: stopCh,
syncCh: syncCh,
refreshCh: refreshCh,
drainable: true,
nodeState: &sriovnetworkv1.SriovNetworkNodeState{},
drainer: &drain.Helper{
Client: kubeClient,
Force: true,
IgnoreAllDaemonSets: true,
DeleteLocalData: true,
GracePeriodSeconds: -1,
Timeout: 90 * time.Second,
OnPodDeletedOrEvicted: func(pod *corev1.Pod, usingEviction bool) {
verbStr := "Deleted"
if usingEviction {
verbStr = "Evicted"
}
glog.Info(fmt.Sprintf("%s pod from Node", verbStr),
"pod", fmt.Sprintf("%s/%s", pod.Name, pod.Namespace))
},
Out: writer{glog.Info},
ErrOut: writer{glog.Error},
},
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.NewMaxOfRateLimiter(
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(updateDelay), 1)},
workqueue.NewItemExponentialFailureRateLimiter(1*time.Second, maxUpdateBackoff)), "SriovNetworkNodeState"),
}
}
// Run the config daemon
func (dn *Daemon) Run(stopCh <-chan struct{}, exitCh <-chan error) error {
glog.V(0).Info("Run(): start daemon")
// Only watch own SriovNetworkNodeState CR
defer utilruntime.HandleCrash()
defer dn.workqueue.ShutDown()
tryEnableRdma()
if err := tryCreateUdevRule(); err != nil {
return err
}
var timeout int64 = 5
dn.mu = &sync.Mutex{}
informerFactory := sninformer.NewFilteredSharedInformerFactory(dn.client,
time.Second*15,
namespace,
func(lo *metav1.ListOptions) {
lo.FieldSelector = "metadata.name=" + dn.name
lo.TimeoutSeconds = &timeout
},
)
informer := informerFactory.Sriovnetwork().V1().SriovNetworkNodeStates().Informer()
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dn.enqueueNodeState,
UpdateFunc: func(old, new interface{}) {
dn.enqueueNodeState(new)
},
})
cfgInformerFactory := sninformer.NewFilteredSharedInformerFactory(dn.client,
time.Second*30,
namespace,
func(lo *metav1.ListOptions) {
lo.FieldSelector = "metadata.name=" + "default"
},
)
cfgInformer := cfgInformerFactory.Sriovnetwork().V1().SriovOperatorConfigs().Informer()
cfgInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dn.operatorConfigAddHandler,
UpdateFunc: dn.operatorConfigChangeHandler,
})
rand.Seed(time.Now().UnixNano())
nodeInformerFactory := informers.NewSharedInformerFactory(dn.kubeClient,
time.Second*15,
)
dn.nodeLister = nodeInformerFactory.Core().V1().Nodes().Lister()
nodeInformer := nodeInformerFactory.Core().V1().Nodes().Informer()
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dn.nodeAddHandler,
UpdateFunc: dn.nodeUpdateHandler,
})
go cfgInformer.Run(dn.stopCh)
go nodeInformer.Run(dn.stopCh)
time.Sleep(5 * time.Second)
go informer.Run(dn.stopCh)
if ok := cache.WaitForCacheSync(stopCh, cfgInformer.HasSynced, nodeInformer.HasSynced, informer.HasSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
glog.Info("Starting workers")
// Launch one workers to process
go wait.Until(dn.runWorker, time.Second, stopCh)
glog.Info("Started workers")
for {
select {
case <-stopCh:
glog.V(0).Info("Run(): stop daemon")
return nil
case err := <-exitCh:
glog.Warningf("Got an error: %v", err)
dn.refreshCh <- Message{
syncStatus: "Failed",
lastSyncError: err.Error(),
}
return err
}
}
}
func (dn *Daemon) runWorker() {
for dn.processNextWorkItem() {
}
}
func (dn *Daemon) enqueueNodeState(obj interface{}) {
var ns *sriovnetworkv1.SriovNetworkNodeState
var ok bool
if ns, ok = obj.(*sriovnetworkv1.SriovNetworkNodeState); !ok {
utilruntime.HandleError(fmt.Errorf("expected SriovNetworkNodeState but got %#v", obj))
return
}
key := ns.GetGeneration()
dn.workqueue.Add(key)
}
func (dn *Daemon) processNextWorkItem() bool {
glog.V(2).Infof("worker queue size: %d", dn.workqueue.Len())
obj, shutdown := dn.workqueue.Get()
glog.V(2).Infof("get item: %d", obj.(int64))
if shutdown {
return false
}
// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item.
defer dn.workqueue.Done(obj)
var key int64
var ok bool
if key, ok = obj.(int64); !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here.
dn.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected workItem in workqueue but got %#v", obj))
return nil
}
var err error
err = dn.nodeStateSyncHandler(key)
if err != nil {
// Ereport error message, and put the item back to work queue for retry.
dn.refreshCh <- Message{
syncStatus: "Failed",
lastSyncError: err.Error(),
}
<-dn.syncCh
dn.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing: %s, requeuing", err.Error())
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
dn.workqueue.Forget(obj)
glog.Infof("Successfully synced")
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
}
return true
}
func (dn *Daemon) nodeAddHandler(obj interface{}) {
dn.nodeUpdateHandler(nil, obj)
}
func (dn *Daemon) nodeUpdateHandler(old, new interface{}) {
node, err := dn.nodeLister.Get(dn.name)
if errors.IsNotFound(err) {
glog.V(2).Infof("nodeUpdateHandler(): node %v has been deleted", dn.name)
return
}
dn.node = node.DeepCopy()
nodes, err := dn.nodeLister.List(labels.Everything())
if err != nil {
return
}
for _, node := range nodes {
if node.GetName() != dn.name && node.Annotations[annoKey] == annoDraining {
glog.V(2).Infof("nodeUpdateHandler(): node %s is draining", node.Name)
dn.drainable = false
return
}
}
glog.V(2).Infof("nodeUpdateHandler(): no other node is draining")
dn.drainable = true
}
func (dn *Daemon) operatorConfigAddHandler(obj interface{}) {
dn.operatorConfigChangeHandler(&sriovnetworkv1.SriovOperatorConfig{}, obj)
}
func (dn *Daemon) operatorConfigChangeHandler(old, new interface{}) {
newCfg := new.(*sriovnetworkv1.SriovOperatorConfig)
var level = glog.Level(newCfg.Spec.LogLevel)
if level != flag.Lookup("v").Value.(flag.Getter).Get() {
glog.Infof("Set log verbose level to: %d", level)
flag.Set("v", level.String())
}
}
func (dn *Daemon) nodeStateSyncHandler(generation int64) error {
var err error
glog.V(0).Infof("nodeStateSyncHandler(): new generation is %d", generation)
// Get the latest NodeState
var latestState *sriovnetworkv1.SriovNetworkNodeState
latestState, err = dn.client.SriovnetworkV1().SriovNetworkNodeStates(namespace).Get(context.Background(), dn.name, metav1.GetOptions{})
if err != nil {
glog.Warningf("nodeStateSyncHandler(): Failed to fetch node state %s: %v", dn.name, err)
return err
}
latest := latestState.GetGeneration()
if dn.nodeState.GetGeneration() == latest {
glog.V(0).Infof("nodeStateSyncHandler(): Interface not changed")
if latestState.Status.LastSyncError != "" ||
latestState.Status.SyncStatus != "Succeeded" {
dn.refreshCh <- Message{
syncStatus: "Succeeded",
lastSyncError: "",
}
}
return nil
}
dn.refreshCh <- Message{
syncStatus: "InProgress",
lastSyncError: "",
}
// load plugins if has not loaded
if len(dn.LoadedPlugins) == 0 {
err = dn.loadVendorPlugins(latestState)
if err != nil {
glog.Errorf("nodeStateSyncHandler(): failed to load vendor plugin: %v", err)
return err
}
}
reqReboot := false
reqDrain := false
for k, p := range dn.LoadedPlugins {
d, r := false, false
if dn.nodeState.GetName() == "" {
d, r, err = p.OnNodeStateAdd(latestState)
glog.V(0).Infof("nodeStateSyncHandler(): plugin %s: reqDrain %v, reqReboot %v", k, d, r)
} else {
d, r, err = p.OnNodeStateChange(dn.nodeState, latestState)
glog.V(0).Infof("nodeStateSyncHandler(): plugin %s: reqDrain %v, reqReboot %v", k, d, r)
}
if err != nil {
glog.Errorf("nodeStateSyncHandler(): plugin %s error: %v", k, err)
return err
}
reqDrain = reqDrain || d
reqReboot = reqReboot || r
}
glog.V(0).Infof("nodeStateSyncHandler(): reqDrain %v, reqReboot %v", reqDrain, reqReboot)
if reqDrain {
glog.Info("nodeStateSyncHandler(): drain node")
if err := dn.drainNode(dn.name); err != nil {
return err
}
}
for k, p := range dn.LoadedPlugins {
if k != GenericPlugin {
err := p.Apply()
if err != nil {
glog.Errorf("nodeStateSyncHandler(): plugin %s fail to apply: %v", k, err)
return err
}
}
}
if len(dn.LoadedPlugins) > 1 && !reqReboot {
// Apply generic_plugin last
err = dn.LoadedPlugins[GenericPlugin].Apply()
if err != nil {
glog.Errorf("nodeStateSyncHandler(): generic_plugin fail to apply: %v", err)
return err
}
}
if reqReboot {
glog.Info("nodeStateSyncHandler(): reboot node")
rebootNode()
return nil
}
// restart device plugin pod
if reqDrain || latestState.Spec.DpConfigVersion != dn.nodeState.Spec.DpConfigVersion {
glog.Info("nodeStateSyncHandler(): restart device plugin pod")
if err := dn.restartDevicePluginPod(); err != nil {
glog.Errorf("nodeStateSyncHandler(): fail to restart device plugin pod: %v", err)
return err
}
}
if anno, ok := dn.node.Annotations[annoKey]; ok && anno == annoDraining {
if err := dn.completeDrain(); err != nil {
glog.Errorf("nodeStateSyncHandler(): failed to complete draining: %v", err)
return err
}
} else if !ok {
if err := dn.annotateNode(dn.name, annoIdle); err != nil {
glog.Errorf("nodeStateSyncHandler(): failed to annotate node: %v", err)
return err
}
}
glog.Info("nodeStateSyncHandler(): sync succeeded")
dn.nodeState = latestState.DeepCopy()
dn.refreshCh <- Message{
syncStatus: "Succeeded",
lastSyncError: "",
}
// wait for writer to refresh the status
<-dn.syncCh
return nil
}
func (dn *Daemon) completeDrain() error {
if err := drain.RunCordonOrUncordon(dn.drainer, dn.node, false); err != nil {
return err
}
if err := dn.annotateNode(dn.name, annoIdle); err != nil {
glog.Errorf("drainNode(): failed to annotate node: %v", err)
return err
}
return nil
}
func (dn *Daemon) restartDevicePluginPod() error {
dn.mu.Lock()
defer dn.mu.Unlock()
glog.V(2).Infof("restartDevicePluginPod(): try to restart device plugin pod")
var podToDelete string
pods, err := dn.kubeClient.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{
LabelSelector: "app=sriov-device-plugin",
FieldSelector: "spec.nodeName=" + dn.name,
})
if err != nil {
if errors.IsNotFound(err) {
glog.Info("restartDevicePluginPod(): device plugin pod exited")
return nil
}
glog.Warningf("restartDevicePluginPod(): Failed to list device plugin pod: %s, retrying", err)
return err
}
if len(pods.Items) == 0 {
glog.Info("restartDevicePluginPod(): device plugin pod exited")
return nil
}
podToDelete = pods.Items[0].Name
glog.V(2).Infof("restartDevicePluginPod(): Found device plugin pod %s, deleting it", podToDelete)
err = dn.kubeClient.CoreV1().Pods(namespace).Delete(context.Background(), podToDelete, metav1.DeleteOptions{})
if errors.IsNotFound(err) {
glog.Info("restartDevicePluginPod(): pod to delete not found")
return nil
}
if err != nil {
glog.Errorf("restartDevicePluginPod(): Failed to delete device plugin pod: %s, retrying", err)
return err
}
if err := wait.PollImmediateUntil(3*time.Second, func() (bool, error) {
_, err := dn.kubeClient.CoreV1().Pods(namespace).Get(context.Background(), podToDelete, metav1.GetOptions{})
if errors.IsNotFound(err) {
glog.Info("restartDevicePluginPod(): device plugin pod exited")
return true, nil
}
if err != nil {
glog.Warningf("restartDevicePluginPod(): Failed to check for device plugin exit: %s, retrying", err)
} else {
glog.Infof("restartDevicePluginPod(): waiting for device plugin %s to exit", podToDelete)
}
return false, nil
}, dn.stopCh); err != nil {
glog.Errorf("restartDevicePluginPod(): failed to wait for checking pod deletion: %v", err)
return err
}
return nil
}
func (dn *Daemon) loadVendorPlugins(ns *sriovnetworkv1.SriovNetworkNodeState) error {
pl := registerPlugins(ns)
pl = append(pl, GenericPlugin)
dn.LoadedPlugins = make(map[string]VendorPlugin)
for _, pn := range pl {
filePath := filepath.Join(pluginsPath, pn+".so")
glog.Infof("loadVendorPlugins(): try to load plugin %s", pn)
p, err := loadPlugin(filePath)
if err != nil {
glog.Errorf("loadVendorPlugins(): fail to load plugin %s: %v", filePath, err)
return err
}
dn.LoadedPlugins[p.Name()] = p
}
return nil
}
func rebootNode() {
glog.Infof("rebootNode(): trigger node reboot")
exit, err := utils.Chroot("/host")
if err != nil {
glog.Errorf("rebootNode(): %v", err)
}
defer exit()
// creates a new transient systemd unit to reboot the system.
// We explictily try to stop kubelet.service first, before anything else; this
// way we ensure the rest of system stays running, because kubelet may need
// to do "graceful" shutdown by e.g. de-registering with a load balancer.
// However note we use `;` instead of `&&` so we keep rebooting even
// if kubelet failed to shutdown - that way the machine will still eventually reboot
// as systemd will time out the stop invocation.
cmd := exec.Command("systemd-run", "--unit", "sriov-network-config-daemon-reboot",
"--description", fmt.Sprintf("sriov-network-config-daemon reboot node"), "/bin/sh", "-c", "systemctl stop kubelet.service; reboot")
if err := cmd.Run(); err != nil {
glog.Errorf("failed to reboot node: %v", err)
}
}
type GlogLogger struct {
}
func (a GlogLogger) Log(v ...interface{}) {
glog.Info(v...)
}
func (a GlogLogger) Logf(format string, v ...interface{}) {
glog.Infof(format, v...)
}
func (dn *Daemon) annotateNode(node, value string) error {
glog.Infof("annotateNode(): Annotate node %s with: %s", node, value)
oldNode, err := dn.kubeClient.CoreV1().Nodes().Get(context.Background(), dn.name, metav1.GetOptions{})
if err != nil {
glog.Infof("annotateNode(): Failed to get node %s %v, retrying", node, err)
return err
}
oldData, err := json.Marshal(oldNode)
if err != nil {
return err
}
newNode := oldNode.DeepCopy()
if newNode.Annotations == nil {
newNode.Annotations = map[string]string{}
}
if newNode.Annotations[annoKey] != value {
newNode.Annotations[annoKey] = value
newData, err := json.Marshal(newNode)
if err != nil {
return err
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, corev1.Node{})
if err != nil {
return err
}
_, err = dn.kubeClient.CoreV1().Nodes().Patch(context.Background(),
dn.name,
types.StrategicMergePatchType,
patchBytes,
metav1.PatchOptions{})
if err != nil {
glog.Infof("annotateNode(): Failed to patch node %s: %v", node, err)
return err
}
}
return nil
}
func (dn *Daemon) drainNode(name string) error {
glog.Info("drainNode(): Update prepared")
var err error
// wait a random time to avoid all the nodes drain at the same time
wait.PollUntil(time.Duration(rand.Intn(15)+1)*time.Second, func() (bool, error) {
if !dn.drainable {
glog.Info("drainNode(): other node is draining, waiting...")
}
return dn.drainable, nil
}, dn.stopCh)
err = dn.annotateNode(dn.name, annoDraining)
if err != nil {
glog.Errorf("drainNode(): Failed to annotate node: %v", err)
return err
}
backoff := wait.Backoff{
Steps: 5,
Duration: 10 * time.Second,
Factor: 2,
}
var lastErr error
glog.Info("drainNode(): Start draining")
if err = wait.ExponentialBackoff(backoff, func() (bool, error) {
err := drain.RunCordonOrUncordon(dn.drainer, dn.node, true)
if err != nil {
lastErr = err
glog.Infof("Cordon failed with: %v, retrying", err)
return false, nil
}
err = drain.RunNodeDrain(dn.drainer, dn.name)
if err == nil {
return true, nil
}
lastErr = err
glog.Infof("Draining failed with: %v, retrying", err)
return false, nil
}); err != nil {
if err == wait.ErrWaitTimeout {
glog.Errorf("drainNode(): failed to drain node (%d tries): %v :%v", backoff.Steps, err, lastErr)
}
glog.Errorf("drainNode(): failed to drain node: %v", err)
return err
}
glog.Info("drainNode(): drain complete")
return nil
}
func registerPlugins(ns *sriovnetworkv1.SriovNetworkNodeState) []string {
pluginNames := make(map[string]bool)
for _, iface := range ns.Status.Interfaces {
if val, ok := pluginMap[iface.Vendor]; ok {
pluginNames[val] = true
}
}
rawList := reflect.ValueOf(pluginNames).MapKeys()
glog.Infof("registerPlugins(): %v", rawList)
nameList := make([]string, len(rawList))
for i := 0; i < len(rawList); i++ {
nameList[i] = rawList[i].String()
}
return nameList
}
func tryEnableRdma() (bool, error) {
glog.V(2).Infof("tryEnableRdma()")
var stdout, stderr bytes.Buffer
cmd := exec.Command("/bin/bash", scriptsPath)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
glog.Errorf("tryEnableRdma(): fail to enable rdma %v: %v", err, cmd.Stderr)
return false, err
}
glog.V(2).Infof("tryEnableRdma(): %v", cmd.Stdout)
i, err := strconv.Atoi(strings.TrimSpace(stdout.String()))
if err == nil {
if i == 0 {
glog.V(2).Infof("tryEnableRdma(): RDMA kernel modules loaded")
return true, nil
} else {
glog.V(2).Infof("tryEnableRdma(): RDMA kernel modules not loaded")
return false, nil
}
}
return false, err
}
func tryCreateUdevRule() error {
glog.V(2).Infof("tryCreateUdevRule()")
filePath := "/host/etc/udev/rules.d/10-nm-unmanaged.rules"
_, err := os.Stat(filePath)
if err != nil {
if os.IsNotExist(err) {
glog.V(2).Infof("tryCreateUdevRule(): file not existed, create file")
_, err := os.Create(filePath)
if err != nil {
glog.Errorf("tryCreateUdevRule(): fail to create file: %v", err)
return err
}
} else {
return err
}
}
content := fmt.Sprintf("ACTION==\"add|change\", ATTRS{device}==\"%s\", ENV{NM_UNMANAGED}=\"1\"\n", strings.Join(sriovnetworkv1.VfIds, "|"))
err = ioutil.WriteFile(filePath, []byte(content), 0666)
if err != nil {
glog.Errorf("tryCreateUdevRule(): fail to write file: %v", err)
return err
}
return nil
}
| [
"\"NAMESPACE\"",
"\"PLUGINSPATH\""
] | [] | [
"NAMESPACE",
"PLUGINSPATH"
] | [] | ["NAMESPACE", "PLUGINSPATH"] | go | 2 | 0 | |
ncsp/ncsp.py | #!/usr/bin/python
# csp 3/22/2018
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Top level generic CSP (Cloud Service Provider) interface
#
# Demonstrates how to use python to create a consistent interface
# across multiple different cloud providers. Shows code for creating, starting,
# deleting and setting up ssh commands and sessions in those VM's
#
# csp.py outermost command interface
# cpsbaseclass.py the main class, and CSP independent functions
# <csp>_funcs.py contains CSP specific code
#
# CSP interfaces are dynammicly added if they are found in same directory
# as this application. File name is "<csp>_funcs.py". Currently supported
# csp types are:
#
# ali Aliababa
# aws Amazon
# azure Microsoft Azure
#
# Basic commans:
# csp help # basic help, lists which csps currently exist
# csp <csp> createVM # create security groups and VM with default settings
# csp <csp> ssh [cmd...] # ssh's and optionally runs cmd on VM
# csp <csp> deleteVM # destroy VM
# csp <csp> test # simple timing test of all the major csp commands
# csp help # overall command help
# csp <csp> --help # csp specific help
#
# Configuration parameters, VM ids, IP address, .. etc are save in persistent
# file, and updated as needed. See showArgs and cleanArgs commands
#
# csp <csp> --trace [0..3] turns on some minimal command/return tracing
#
import argparse
import time
import sys
import os
from cspbaseclass import error, trace, trace_do, trace_setlevel, debug_stop
###############################################################################
# simple timing class
###############################################################################
class TimeClass:
''' simple timing class '''
def __init__(self, outer_loop_value):
self.m_test_start = self.Now();
self.m_log_data=[]
self.m_log_idx=0
self.m_instanceTypeName="no name set yet"
self.m_outer_loop_value=outer_loop_value # which outer-loop is this create/delete cycle
def SetInstanceTypeName(self, instanceTypeName):
self.m_instanceTypeName = instanceTypeName # we loose this before it's printed
def InstanceTypeName(self):
return(self.m_instanceTypeName)
def Now(self): # current time, as a floating point number
ts = time.time()
return(ts)
def Diff(self, te, ts): # te is end, ts is start -- what's the difference?
diff = te - ts;
return diff
def Start(self): # return start time
ts = self.Now()
return ts
def End(self, taskname, loop, ts): # called at end, given start time and name
te = self.Now()
diff = self.Diff(te, ts)
name = taskname
name += '.'
name += str(loop)
print "%2s %-20s %8.2f" % ("", name, diff)
# simple list containing "name" and "diff" fields
self.m_log_data.append((name, diff))
self.m_log_idx += 1
###############################################################################
# reporting functions
###############################################################################
def SummaryInit(self, my_class, args):
''' Summary initialization - conclusions/sums '''
self.m_test_end = self.Now();
self.m_test_diff = self.Diff(self.m_test_end, self.m_test_start) # overall test time
def SummaryReport(self, my_class, args):
''' Summary report to display at end of job'''
print ""
print "#---------------------------------------------------------"
print "# %s %s image:%s" % (my_class.m_class_name, args.instance_type, args.image_name)
print ("# loop %d of %d start/stop/del:%d %s\n" %
(self.m_outer_loop_value+1, args.outer_loop_cnt,
args.inner_loop_cnt,
time.strftime("%Y%b%d-%a-%H%M", time.localtime())))
print "#"
print ""
for idx in range(0, self.m_log_data.__len__()):
val = self.m_log_data[idx]
print "%2d %-20s %8.2f" % (idx, val[0], val[1])
print "%2s %-20s %8.2f" % ("", "overall", self.m_test_diff) # done after InitSummary called
print ""
def SummaryLog(self, my_class, args):
''' Summary log - intent is to easily cut/paste to spreadsheet table '''
with open(my_class.m_log_path + "test", "a") as f: # test summary file
f.write( "\n" )
f.write( "# %s loop %d of %d start/stop/del:%d\n" %
(my_class.m_class_name,
self.m_outer_loop_value+1, args.outer_loop_cnt,
args.inner_loop_cnt))
f.write( "%s\n" % time.strftime("%Y-%m-%d", time.localtime()))
f.write( "%s\n" % args.image_name)
f.write( "%s\n" % (args.instance_type))
for idx in range(0, self.m_log_data.__len__()):
val = self.m_log_data[idx]
f.write( "%.2f\n" % val[1])
f.write( "%.2f\n" %self.m_test_diff) # done only after InitSummary called
##############################################################################
# generic timing test, how long does it take to do basic VM features?
def time_test(my_class, outer_loop_value, args):
''' generic CSP vm create/stop/start/reset/delete timing test '''
my_time = TimeClass(outer_loop_value)
# create/get id for Network Security Group
ts = my_time.Start()
rc = my_class.CreateNSG(args)
my_time.End("createNSG", 0, ts)
if (rc != 0):
return rc
ts = my_time.Start()
rc = my_class.CreateVM(args) # args is from parser.parse_args(argv)
my_time.End("createVM", 0, ts)
if (rc != 0):
error ("createVM returned %d, stopping test" % rc)
return rc
# type of VM created - size, number of CPUs, GPUs... defined by name
my_time.SetInstanceTypeName(args.instance_type)
# start/stop/restart loops, default is 2
loop = 0 # initialize value if loop isn't run (loop_cnt = 0)
for loop in range(0, args.inner_loop_cnt):
ts = my_time.Start()
my_class.StopVM(args)
my_time.End("stopVM", loop, ts)
time.sleep(5)
ts = my_time.Start()
my_class.StartVM(args)
my_time.End("startVM", loop, ts)
time.sleep(5)
ts = my_time.Start()
my_class.RestartVM(args)
my_time.End("restartVM", loop, ts)
time.sleep(5)
# delete vm
ts = my_time.Start()
my_class.DeleteVM(args)
my_time.End("deleteVM", loop, ts)
# delete Security Group
time.sleep(5) # for alibaba, need a delay before trying to delete NSG
# immediatly after deleting the VM -- the deleteNSG fails
ts = my_time.Start()
my_class.DeleteNSG(args)
my_time.End("deleteNSG", loop, ts)
# delete the persistent information - VM/NSG id, name..
my_class.Clean(args)
# final report
my_time.SummaryInit(my_class, args) # caculate any conclusions..
if (args.summary_report != 0): # extra possiblly redundant
my_time.SummaryReport(my_class, args) # but nicely formatted user report
my_time.SummaryLog(my_class, args) # cut/pasteable format in log file
# successful return
return 0
# get_csp_list
#
# Returns the list of all the csp's that we support (I.E all the files that
# end with _funcs.py).
#
# internal function
def get_csp_list():
''' returns a list of supported csps -- not including 'template' '''
csp_list=[]
import glob
filelist = glob.glob(module_path + "*_funcs.py") # ['test1/ali_funcs.py', 'test1/azure_funcs.py', ...
for name in filelist:
pos0 = name.rfind("/")
pos1 = name.rfind("_funcs.py")
csp_name = name[pos0+1:pos1] # remove the _funcs.py" from it
if (csp_name != 'template'):
csp_list.append(csp_name)
return(csp_list)
# show_csps
#
# Returns the list of all the csp's that we support (I.E all the files that
# end with _funcs.py).
#
# List can be used by further scripting
#
# for csp_name in $(./ncsp csps); do ./ncsp $csp_name running; done
#
def show_csps():
''' returns a list of supported csps -- not including 'template' '''
csp_list = get_csp_list()
for csp_name in csp_list:
print("%s " % csp_name)
return 0
# prints command line usage
def usage(module_path):
''' program usage help text '''
print('''\
Nvidia Cloud Service Provider common simple scriptable interface
usage:
ncsp cmd [options]
ncsp <csp> csp_cmd [options]
cmd: top level csp-independent commands
help overall application help
csps lists supported csps
''')
# show the <csp>_func.py files that have in directory
import glob
filelist = glob.glob(module_path + "*_funcs.py") # ['test1/ali_funcs.py', 'test1/azure_funcs.py', ...
print(" csp: name of the supported Cloud Service Provider (csp)")
# special case for 'all'.
print(" %-23s %s" % ("ALL", "Runs command on all CSP's one after each other"))
# now the rest of the files.
for filename in filelist:
pos0 = filename.rfind("/")
pos1 = filename.rfind("_funcs.py")
csp_name = filename[pos0+1:pos1]
# pull quoted string after HELPTEXT= from the file
#
helptext=""
try:
with open(filename, "r") as f:
for i, line in enumerate(f):
if (i > 10):
break;
idx = line.find("HELPTEXT:");
if (idx >= 0):
start = line.find("\"", idx+9);
end = line.find("\"", start+1)
if (start > idx and end > start):
helptext=line[start+1:end-1]
break
except:
helptext="" # could not open file, don't report error
print(" %-23s %s" % (csp_name, helptext))
# rest of the menu
print('''
csp_cmd:
CSP specific commands:
createVM[opts] create instance, use -h to see csp specific options
stopVM stop current instance
startVM start current instance
restartVM restart current instance
deleteVM delete (stop first) and destroy instance
test create/stop/start/restart/delete timing test
ping simple ping VM if possible - check connection
ssh [cmd] ssh into current VM instance, run command if given
status status of current instance
show verbose info about instance
Network Security Group commands:
createNSG [opts] creates network security group
deleteNSG deletes network security group
showNSGs shows all network security groups
CSP Query commands:
regions displays list of region names supported by csp
running display list of running instances in a region
General commands
validCSP returns 0 if csp name is supported, 1 elsewise
ip prints the ip value of the VM
args display persistent args file
clean clean cached files, restore args to defaults
help
--help csp specific argument help
''')
sys.exit(1)
def add_common_options(my_class, parser):
''' common arguments used in outer control and CSP sepecific features '''
parser.add_argument('--version', action='version', version="%(prog)s 0.0")
parser.add_argument('--trace', dest='trace', type=int, choices=xrange(0,4),
default=0, required=False,
help='trace level flag: 0:none, 1:cmd, 2:+info, 3:+output')
parser.add_argument('--inner_loop_cnt', dest='inner_loop_cnt', type=int, choices=xrange(0, 6),
default=2, required=False,
help='inner stop/start/reset test loops run')
parser.add_argument('--outer_loop_cnt', dest='outer_loop_cnt', type=int, choices=xrange(0, 6),
default=1, required=False,
help='outer over-all create/delete loops run')
parser.add_argument('--summary_report', dest='summary_report', type=int, choices=xrange(0, 2),
default=1, required=False,
help='show summary report at end of test')
# some computed defaults used for VM
my_user = os.environ["USER"];
my_vm_name = my_user + time.strftime("-%a-%Y%b%d-%H%M%S", time.localtime())
my_vm_name = my_vm_name.lower() # gcp (gcloud) wants all lower case names
my_nsg_name = my_user + "NSG" # for NetworkSecurity Group
# common VM arguments -- do it here so don't have to set up these args
# for every CSP. Gives them default values of "" so know if they are created or not
# CSP code can override any of these with parser.set_defaults(key);
parser.add_argument('--user', dest='user', # overridden in CSP specific code
default=None, required=False,
help='username for the VM')
parser.add_argument('--vm_name', dest='vm_name', # Name of VM
default=my_vm_name, required=False,
help='external name of the VM')
parser.add_argument('--vm_id', dest='vm_id', # set in CSP specific code
default=None, required=False,
help='id value of the VM')
parser.add_argument('--nsg_name', dest='nsg_name', # common: Name of Network Security Group
default=my_nsg_name, required=False,
help='Network Security Group Name')
parser.add_argument('--nsg_id', dest='nsg_id', # set in CSP specific code
default="", required=False,
help='Network Security Group ID')
parser.add_argument('--key_name', dest='key_name', # overridden in CSP specific code
default=None, required=False,
help='ssh key name')
parser.add_argument('--key_path', dest='key_path', # common: where ssh key files reside
default="~/.ssh/", required=False,
help='directory where ssh key files reside')
parser.add_argument('--key_file', dest='key_file', # computed in CSP specific code
default=None, required=False,
help='full path to ssh key file')
parser.add_argument('--image_name', dest='image_name', # overridden in CSP specific code
default=None, required=False,
help='name of the VM image to run')
parser.add_argument('--image_id', dest='image_id', # set in CSP specific code
default=None, required=False,
help='ID of the VM image to run')
parser.add_argument('--pingable', dest='pingable', # ping feature is optional to most VM network config
type=int, choices=xrange(0,2),
default=0, required=False, # default is not pingable
help='set to 1 if can ping IP address')
parser.add_argument('--ip', dest='vm_ip', # set in CSP specific dode
default="", required=False,
help='VM IP address')
# process_cmd
#
# command line processor - a big case statement
# see https://www.pydanny.com/why-doesnt-python-have-switch-case.html
#
# my_class is the CSPBaseClass, while argv are the additonal command line
# arguments that were passed in. This function is the top level command
# line parser for all the CSPs - this code is generic across all of them
#
# The 'createVM', 'stopVM' and the like functions are csp sepecific to change
# the state of a VM, and gather the proper IP address and set up the security
# rules.
#
# Commands like 'ssh', 'ping' use the IP address that was saved and allow
# access to that VM
#
#
#
def process_cmd(my_class, argv):
# first thing, verify that the connection to the CSP is up and
# running correctly (cli app downloaded, user logged in, etc...)
rc = my_class.CSPSetupOK() # csp name dependent function
if (rc != 0):
error("CSP \"%s\" access is not configured correctly, set it up first" % my_class.ClassName())
return rc # unhappy
# create the main command line argument parser class
parser = argparse.ArgumentParser(prog='csp',
description='CSP simple python interface for %s' % my_class.ClassName())
# common options arguments
add_common_options(my_class, parser)
# add in positional arguments
parser.add_argument('command', help="command to execute, run 'help' for details")
parser.add_argument('arguments', help="optional csp specific args run '-h' for details",
nargs=argparse.REMAINDER)
# class specific arguments
my_class.ArgOptions(parser) # csp dependent function
# update the defaults with values saved in file if that file exists
my_class.ArgRestoreFromFile(parser)
# actual argument parser, and any CSP class specific checks
# 'args' here contains all the argument and option values in this order
#
# 1) hardcoded defaults in arg-command, or programaticly determined
# 2) overridden by any value specifed in the saved args from last run (if saved)
# 3) overridden by any values specified on command line ]
#
# Then the command is run
#
# Then, At very end of this function, if commands were successful all the
# option values and computed/inquired values like CSP ID values are written
# back to a file -- to be picked up in #2 above.
args = parser.parse_args(argv)
# set global value used for trace level, as 'args' isn't passed around everywhere
trace_setlevel(args.trace)
# CSP class specific arg checks,
# bail here if something isn't set correctly
rc = my_class.ArgSanity(parser, args)
if (rc != 0):
error("In ArgSanity rc:%d" % rc)
return(rc)
# this is the command that is to be run, pull from the args
cmd = args.command
# commands to handle the persistent arg list --
if cmd == "clean":
my_class.Clean(args) # cleans out args an other cached files
return 0
elif cmd == "args":
my_class.ArgShowFile()
return 0
elif cmd == "help":
usage(my_class.m_module_path)
return 1
# print args if higher trace level
if (trace_do(2)):
print vars(args)
print "============"
print "cmd=%s" % cmd
rc = 0 # return value if forget to set below
# parse the commands
if cmd == "validCSP":
rc = 0 # invalid CSP name errors out above
elif cmd == "createNSG":
rc = my_class.CreateNSG(args)
elif cmd == "deleteNSG":
rc = my_class.DeleteNSG(args)
elif cmd == "showNSGs":
rc = my_class.ShowNSGs(args)
elif cmd == "createVM":
rc = my_class.CreateVM(args) # args is from parser.parse_args(argv)
elif cmd == "startVM":
rc = my_class.StartVM(args)
elif cmd == "stopVM":
rc = my_class.StopVM(args)
elif cmd == "restartVM":
rc = my_class.RestartVM(args)
elif cmd == "deleteVM":
rc = my_class.DeleteVM(args)
elif cmd == "ssh":
rc, stdoutstr, stderrstr = my_class.Ssh(args, True, argv[1:]) # args is historical and incl
elif cmd == "ping":
rc = my_class.Ping(args)
elif cmd == "status":
rc = my_class.Status(args)
elif cmd == "show":
rc = my_class.Show(args)
elif cmd == "boottime":
rc, kernel, user, total = my_class.KernelBootTime(args)
if (rc == 0):
print ("kernel:%s user:%s total:%s" % (kernel, user, total))
elif cmd == "running":
rc = my_class.ShowRunning(args)
elif cmd == "regions":
rc = my_class.ShowRegions(args)
elif cmd == "ip":
rc = my_class.ShowIP(args)
elif cmd == "test": # default is 1 outer create/delete loop
if (args.outer_loop_cnt <= 0):
error("outer_loop_cnt=0, no tests run")
else:
for loop in range(0, args.outer_loop_cnt):
rc = time_test(my_class, loop, args)
if (rc != 0):
break
time.sleep(30) # time between loops
if (rc != 0):
error("Test returned %d" % rc)
else:
error("Undefined command", cmd)
usage(my_class.m_module_path)
rc = 1
# save all the persistent args values to file after the above commands have
# run and modified them -- like the VM or SecurityGroup IDs
if (cmd != "DeleteVM"):
my_class.ArgSaveToFile(args)
if rc == None: # handle "None" return case -- should be an error?
error("No return code for cmd \"%s\"" % cmd)
rc = 2
return rc # exit code
###############################################################################
# do_csp_cmd
#
# Major magic of the code..
#
# dynamically based on the csp name, load a module "<csp>_funcs.py"
# and create its main class instance. This csp specific file will
# be in the same directory as the main module.
#
# To add a new CSP, simply create a csp-specific file of the given
# "csp".py name with interfaces that are same as the other examples
# and drop it into the directory with the other csp-specific files
#
# NOTE: find_module() does not handle dotted package names,
# so keep the file structure simple
#
# See: https://pymotw.com/2/imp/ (1/2018)
#
def do_csp_cmd(csp, argv):
''' import csp dependent class based on name, and run command on it '''
import imp
module_name = "%s_funcs" % csp
try:
f, filename, description = imp.find_module(module_name)
package = imp.load_module(module_name, f, filename, description)
my_class = package.CSPClass(csp, module_path)
except ImportError, err:
print "Error: CSP \"%s\" not supprted: %s" %(csp, err)
sys.exit(1) # unhappy return
# process the command line arguments on class (does all the work)
rc = process_cmd(my_class, sys.argv[2:])
return rc
###############################################################################
# main body of nsp application. Code starts here.
#
# Loads the csp specific csp module and does the work
#
# argv[0] is the full path to the prog name -- from it we can get
# the path where our modules will be, used for search later
try:
pos = sys.argv[0].rfind("/")
module_path = sys.argv[0][0:pos+1]
except:
module_path = sys.argv[0]
if (sys.argv.__len__() == 1): # no arguments, print usage
usage(module_path)
# if we have one arg, it's probably the csp name, but there are
# few special options like 'help' or 'csps' that are also allowed
arg1=sys.argv[1] # our csp name, like "aws",
if (arg1 == "help" or arg1[0:1] == '-'): # be nice if user is confused
usage(module_path) # usage exits, does not return
elif (arg1 == "csps"): # list all known CSP classes
rc = show_csps()
sys.exit(rc)
# from here on out, we are doing a CSP depenent function -- so
# need at least one more argument beyond the CSP name
csp = arg1 # name of the csp are we talking about
if (sys.argv.__len__() <= 2):
usage(module_path) # not enough args, exit with usage
# from here on, the argument list starts with the 2nd value
argv=sys.argv[2:]
# if csp is 'all', then run the given command on all of csp's that are
# active (don't complain about those CSP's that fail the CSPSetupOK test)
# Also don't run 'template' class -- we want the good stuff here
if (csp == "ALL"):
csp_list = get_csp_list()
for csp in csp_list:
rc = do_csp_cmd(csp, argv)
else:
# single csp is given -- run it.
# parse the rest of the command line and run it on the given CSP
rc = do_csp_cmd(csp, argv)
sys.exit(rc)
| [] | [] | [
"USER"
] | [] | ["USER"] | python | 1 | 0 | |
BugTracker/wsgi.py | """
WSGI config for BugTracker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'BugTracker.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
pyspider/run.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2014-03-05 00:11:49
import os
import sys
import six
import copy
import time
import shutil
import logging
import logging.config
import click
import pyspider
from pyspider.message_queue import connect_message_queue
from pyspider.database import connect_database
from pyspider.libs import utils
def read_config(ctx, param, value):
if not value:
return {}
import json
def underline_dict(d):
if not isinstance(d, dict):
return d
return dict((k.replace('-', '_'), underline_dict(v)) for k, v in six.iteritems(d))
config = underline_dict(json.load(value))
ctx.default_map = config
return config
def connect_db(ctx, param, value):
if not value:
return
return utils.Get(lambda: connect_database(value))
def load_cls(ctx, param, value):
if isinstance(value, six.string_types):
return utils.load_object(value)
return value
def connect_rpc(ctx, param, value):
if not value:
return
try:
from six.moves import xmlrpc_client
except ImportError:
import xmlrpclib as xmlrpc_client
return xmlrpc_client.ServerProxy(value, allow_none=True)
@click.group(invoke_without_command=True)
@click.option('-c', '--config', callback=read_config, type=click.File('r'),
help='a json file with default values for subcommands. {"webui": {"port":5001}}')
@click.option('--logging-config', default=os.path.join(os.path.dirname(__file__), "logging.conf"),
help="logging config file for built-in python logging module", show_default=True)
@click.option('--debug', envvar='DEBUG', default=False, is_flag=True, help='debug mode')
@click.option('--rdbg', default=False, is_flag=True, help='port of remote debug')
@click.option('--queue-maxsize', envvar='QUEUE_MAXSIZE', default=100,
help='maxsize of queue')
@click.option('--taskdb', envvar='TASKDB', callback=connect_db,
help='database url for taskdb, default: sqlite')
@click.option('--projectdb', envvar='PROJECTDB', callback=connect_db,
help='database url for projectdb, default: sqlite')
@click.option('--resultdb', envvar='RESULTDB', callback=connect_db,
help='database url for resultdb, default: sqlite')
@click.option('--message-queue', envvar='AMQP_URL',
help='connection url to message queue, '
'default: builtin multiprocessing.Queue')
@click.option('--amqp-url', help='[deprecated] amqp url for rabbitmq. '
'please use --message-queue instead.')
@click.option('--beanstalk', envvar='BEANSTALK_HOST',
help='[deprecated] beanstalk config for beanstalk queue. '
'please use --message-queue instead.')
@click.option('--phantomjs-proxy', envvar='PHANTOMJS_PROXY', help="phantomjs proxy ip:port")
@click.option('--puppeteer-proxy', envvar='PUPPETEER_PROXY', help="puppeteer proxy ip:port")
@click.option('--data-path', default='./data', help='data dir path')
@click.option('--add-sys-path/--not-add-sys-path', default=True, is_flag=True,
help='add current working directory to python lib search path')
@click.version_option(version=pyspider.__version__, prog_name=pyspider.__name__)
@click.pass_context
def cli(ctx, **kwargs):
"""
A powerful spider system in python.
"""
if kwargs['add_sys_path']:
sys.path.append(os.getcwd())
logging.config.fileConfig(kwargs['logging_config'])
if kwargs.get('rdbg'):
import pydevd
pydevd.settrace('127.0.0.1', port=9000, stdoutToServer=True, stderrToServer=True, suspend=False)
print ("pyspider is running in rdbg mode, server is connected at localhost:9000")
# get db from env
for db in ('taskdb', 'projectdb', 'resultdb'):
if kwargs[db] is not None:
continue
if os.environ.get('MYSQL_NAME'):
kwargs[db] = utils.Get(lambda db=db: connect_database(
'sqlalchemy+mysql+%s://%s:%s/%s' % (
db, os.environ['MYSQL_PORT_3306_TCP_ADDR'],
os.environ['MYSQL_PORT_3306_TCP_PORT'], db)))
elif os.environ.get('MONGODB_NAME'):
kwargs[db] = utils.Get(lambda db=db: connect_database(
'mongodb+%s://%s:%s/%s' % (
db, os.environ['MONGODB_PORT_27017_TCP_ADDR'],
os.environ['MONGODB_PORT_27017_TCP_PORT'], db)))
elif ctx.invoked_subcommand == 'bench':
if kwargs['data_path'] == './data':
kwargs['data_path'] += '/bench'
shutil.rmtree(kwargs['data_path'], ignore_errors=True)
os.mkdir(kwargs['data_path'])
if db in ('taskdb', 'resultdb'):
kwargs[db] = utils.Get(lambda db=db: connect_database('sqlite+%s://' % (db)))
elif db in ('projectdb', ):
kwargs[db] = utils.Get(lambda db=db: connect_database('local+%s://%s' % (
db, os.path.join(os.path.dirname(__file__), 'libs/bench.py'))))
else:
if not os.path.exists(kwargs['data_path']):
os.mkdir(kwargs['data_path'])
kwargs[db] = utils.Get(lambda db=db: connect_database('sqlite+%s:///%s/%s.db' % (
db, kwargs['data_path'], db[:-2])))
kwargs['is_%s_default' % db] = True
# create folder for counter.dump
if not os.path.exists(kwargs['data_path']):
os.mkdir(kwargs['data_path'])
# message queue, compatible with old version
if kwargs.get('message_queue'):
pass
elif kwargs.get('amqp_url'):
kwargs['message_queue'] = kwargs['amqp_url']
elif os.environ.get('RABBITMQ_NAME'):
kwargs['message_queue'] = ("amqp://guest:guest@%(RABBITMQ_PORT_5672_TCP_ADDR)s"
":%(RABBITMQ_PORT_5672_TCP_PORT)s/%%2F" % os.environ)
elif kwargs.get('beanstalk'):
kwargs['message_queue'] = "beanstalk://%s/" % kwargs['beanstalk']
for name in ('newtask_queue', 'status_queue', 'scheduler2fetcher',
'fetcher2processor', 'processor2result'):
if kwargs.get('message_queue'):
kwargs[name] = utils.Get(lambda name=name: connect_message_queue(
name, kwargs.get('message_queue'), kwargs['queue_maxsize']))
else:
kwargs[name] = connect_message_queue(name, kwargs.get('message_queue'),
kwargs['queue_maxsize'])
# phantomjs-proxy
if kwargs.get('phantomjs_proxy'):
pass
elif os.environ.get('PHANTOMJS_NAME'):
kwargs['phantomjs_proxy'] = os.environ['PHANTOMJS_PORT_25555_TCP'][len('tcp://'):]
# puppeteer-proxy
if kwargs.get('puppeteer_proxy'):
pass
elif os.environ.get('PUPPETEER_NAME'):
kwargs['puppeteer_proxy'] = os.environ['PUPPETEER_PORT_22222_TCP'][len('tcp://'):]
ctx.obj = utils.ObjectDict(ctx.obj or {})
ctx.obj['instances'] = []
ctx.obj.update(kwargs)
if ctx.invoked_subcommand is None and not ctx.obj.get('testing_mode'):
ctx.invoke(all)
return ctx
@cli.command()
@click.option('--xmlrpc/--no-xmlrpc', default=True)
@click.option('--xmlrpc-host', default='0.0.0.0')
@click.option('--xmlrpc-port', envvar='SCHEDULER_XMLRPC_PORT', default=23333)
@click.option('--inqueue-limit', default=0,
help='size limit of task queue for each project, '
'tasks will been ignored when overflow')
@click.option('--delete-time', default=24 * 60 * 60,
help='delete time before marked as delete')
@click.option('--active-tasks', default=100, help='active log size')
@click.option('--loop-limit', default=1000, help='maximum number of tasks due with in a loop')
@click.option('--fail-pause-num', default=10, help='auto pause the project when last FAIL_PAUSE_NUM task failed, set 0 to disable')
@click.option('--scheduler-cls', default='pyspider.scheduler.ThreadBaseScheduler', callback=load_cls,
help='scheduler class to be used.')
@click.option('--threads', default=None, help='thread number for ThreadBaseScheduler, default: 4')
@click.pass_context
def scheduler(ctx, xmlrpc, xmlrpc_host, xmlrpc_port,
inqueue_limit, delete_time, active_tasks, loop_limit, fail_pause_num,
scheduler_cls, threads, get_object=False):
"""
Run Scheduler, only one scheduler is allowed.
"""
re_init_rpc_in_app = False
while True:
g = ctx.obj
Scheduler = load_cls(None, None, scheduler_cls)
kwargs = dict(taskdb=g.taskdb, projectdb=g.projectdb, resultdb=g.resultdb,
newtask_queue=g.newtask_queue, status_queue=g.status_queue,
out_queue=g.scheduler2fetcher, data_path=g.get('data_path', 'data'))
if threads:
kwargs['threads'] = int(threads)
scheduler = Scheduler(**kwargs)
scheduler.INQUEUE_LIMIT = inqueue_limit
scheduler.DELETE_TIME = delete_time
scheduler.ACTIVE_TASKS = active_tasks
scheduler.LOOP_LIMIT = loop_limit
scheduler.FAIL_PAUSE_NUM = fail_pause_num
g.instances.append(scheduler)
if g.get('testing_mode') or get_object:
return scheduler
if xmlrpc:
utils.run_in_thread(scheduler.xmlrpc_run, port=xmlrpc_port, bind=xmlrpc_host)
if re_init_rpc_in_app:
app = g['app']
scheduler_rpc = g['scheduler_rpc']
init_rpc_in_app(ctx, app, scheduler_rpc)
re_init_rpc_in_app = False
logging.info('scheduler running...')
scheduler.run()
if not scheduler.keep_running:
break
else:
g.instances.remove(scheduler)
re_init_rpc_in_app = True
logging.info('scheduler restarted.')
@cli.command()
@click.option('--xmlrpc/--no-xmlrpc', default=False)
@click.option('--xmlrpc-host', default='0.0.0.0')
@click.option('--xmlrpc-port', envvar='FETCHER_XMLRPC_PORT', default=24444)
@click.option('--poolsize', default=100, help="max simultaneous fetches")
@click.option('--proxy', help="proxy host:port")
@click.option('--user-agent', help='user agent')
@click.option('--timeout', help='default fetch timeout')
@click.option('--phantomjs-endpoint', help="endpoint of phantomjs, start via pyspider phantomjs")
@click.option('--puppeteer-endpoint', help="endpoint of puppeteer, start via pyspider puppeteer")
@click.option('--splash-endpoint', help="execute endpoint of splash: http://splash.readthedocs.io/en/stable/api.html#execute")
@click.option('--fetcher-cls', default='pyspider.fetcher.Fetcher', callback=load_cls,
help='Fetcher class to be used.')
@click.pass_context
def fetcher(ctx, xmlrpc, xmlrpc_host, xmlrpc_port, poolsize, proxy, user_agent,
timeout, phantomjs_endpoint, puppeteer_endpoint, splash_endpoint, fetcher_cls,
async_mode=True, get_object=False, no_input=False):
"""
Run Fetcher.
"""
g = ctx.obj
Fetcher = load_cls(None, None, fetcher_cls)
if no_input:
inqueue = None
outqueue = None
else:
inqueue = g.scheduler2fetcher
outqueue = g.fetcher2processor
fetcher = Fetcher(inqueue=inqueue, outqueue=outqueue,
poolsize=poolsize, proxy=proxy, async_mode=async_mode)
fetcher.phantomjs_proxy = phantomjs_endpoint or g.phantomjs_proxy
fetcher.puppeteer_proxy = puppeteer_endpoint or g.puppeteer_proxy
fetcher.splash_endpoint = splash_endpoint
if user_agent:
fetcher.user_agent = user_agent
if timeout:
fetcher.default_options = copy.deepcopy(fetcher.default_options)
fetcher.default_options['timeout'] = timeout
g.instances.append(fetcher)
if g.get('testing_mode') or get_object:
return fetcher
if xmlrpc:
utils.run_in_thread(fetcher.xmlrpc_run, port=xmlrpc_port, bind=xmlrpc_host)
fetcher.run()
@cli.command()
@click.option('--processor-cls', default='pyspider.processor.Processor',
callback=load_cls, help='Processor class to be used.')
@click.option('--process-time-limit', default=30, help='script process time limit')
@click.pass_context
def processor(ctx, processor_cls, process_time_limit, enable_stdout_capture=True, get_object=False):
"""
Run Processor.
"""
g = ctx.obj
Processor = load_cls(None, None, processor_cls)
processor = Processor(projectdb=g.projectdb,
inqueue=g.fetcher2processor, status_queue=g.status_queue,
newtask_queue=g.newtask_queue, result_queue=g.processor2result,
enable_stdout_capture=enable_stdout_capture,
process_time_limit=process_time_limit)
g.instances.append(processor)
if g.get('testing_mode') or get_object:
return processor
processor.run()
@cli.command()
@click.option('--result-cls', default='pyspider.result.ResultWorker', callback=load_cls,
help='ResultWorker class to be used.')
@click.pass_context
def result_worker(ctx, result_cls, get_object=False):
"""
Run result worker.
"""
g = ctx.obj
ResultWorker = load_cls(None, None, result_cls)
result_worker = ResultWorker(resultdb=g.resultdb, inqueue=g.processor2result)
g.instances.append(result_worker)
if g.get('testing_mode') or get_object:
return result_worker
result_worker.run()
@cli.command()
@click.option('--host', default='0.0.0.0', envvar='WEBUI_HOST',
help='webui bind to host')
@click.option('--port', default=5000, envvar='WEBUI_PORT',
help='webui bind to host')
@click.option('--cdn', default='//cdnjs.cloudflare.com/ajax/libs/',
help='js/css cdn server')
@click.option('--scheduler-rpc', help='xmlrpc path of scheduler')
@click.option('--fetcher-rpc', help='xmlrpc path of fetcher')
@click.option('--max-rate', type=float, help='max rate for each project')
@click.option('--max-burst', type=float, help='max burst for each project')
@click.option('--username', envvar='WEBUI_USERNAME',
help='username of lock -ed projects')
@click.option('--password', envvar='WEBUI_PASSWORD',
help='password of lock -ed projects')
@click.option('--need-auth', is_flag=True, default=False, help='need username and password')
@click.option('--webui-instance', default='pyspider.webui.app.app', callback=load_cls,
help='webui Flask Application instance to be used.')
@click.option('--process-time-limit', default=30, help='script process time limit in debug')
@click.pass_context
def webui(ctx, host, port, cdn, scheduler_rpc, fetcher_rpc, max_rate, max_burst,
username, password, need_auth, webui_instance, process_time_limit, get_object=False):
"""
Run WebUI
"""
app = load_cls(None, None, webui_instance)
g = ctx.obj
app.config['taskdb'] = g.taskdb
app.config['projectdb'] = g.projectdb
app.config['resultdb'] = g.resultdb
app.config['cdn'] = cdn
if max_rate:
app.config['max_rate'] = max_rate
if max_burst:
app.config['max_burst'] = max_burst
if username:
app.config['webui_username'] = username
if password:
app.config['webui_password'] = password
app.config['need_auth'] = need_auth
app.config['process_time_limit'] = process_time_limit
# inject queues for webui
for name in ('newtask_queue', 'status_queue', 'scheduler2fetcher',
'fetcher2processor', 'processor2result'):
app.config['queues'][name] = getattr(g, name, None)
# fetcher rpc
if isinstance(fetcher_rpc, six.string_types):
import umsgpack
fetcher_rpc = connect_rpc(ctx, None, fetcher_rpc)
app.config['fetch'] = lambda x: umsgpack.unpackb(fetcher_rpc.fetch(x).data)
else:
# get fetcher instance for webui
fetcher_config = g.config.get('fetcher', {})
webui_fetcher = ctx.invoke(fetcher, async_mode=False, get_object=True, no_input=True, **fetcher_config)
app.config['fetch'] = lambda x: webui_fetcher.fetch(x)
init_rpc_in_app(ctx, app, scheduler_rpc)
app.debug = g.debug
g['app'] = app
g['scheduler_rpc'] = scheduler_rpc
g.instances.append(app)
if g.get('testing_mode') or get_object:
return app
app.run(host=host, port=port)
@cli.command()
@click.option('--phantomjs-path', default='phantomjs', help='phantomjs path')
@click.option('--port', default=25555, help='phantomjs port')
@click.option('--auto-restart', default=False, help='auto restart phantomjs if crashed')
@click.argument('args', nargs=-1)
@click.pass_context
def phantomjs(ctx, phantomjs_path, port, auto_restart, args):
"""
Run phantomjs fetcher if phantomjs is installed.
"""
args = args or ctx.default_map and ctx.default_map.get('args', [])
import subprocess
g = ctx.obj
_quit = []
phantomjs_fetcher = os.path.join(
os.path.dirname(pyspider.__file__), 'fetcher/phantomjs_fetcher.js')
cmd = [phantomjs_path,
# this may cause memory leak: https://github.com/ariya/phantomjs/issues/12903
#'--load-images=false',
'--ssl-protocol=any',
'--disk-cache=true'] + list(args or []) + [phantomjs_fetcher, str(port)]
try:
_phantomjs = subprocess.Popen(cmd)
except OSError:
logging.warning('phantomjs not found, continue running without it.')
return None
def quit(*args, **kwargs):
_quit.append(1)
_phantomjs.kill()
_phantomjs.wait()
logging.info('phantomjs exited.')
if not g.get('phantomjs_proxy'):
g['phantomjs_proxy'] = '127.0.0.1:%s' % port
phantomjs = utils.ObjectDict(port=port, quit=quit)
g.instances.append(phantomjs)
if g.get('testing_mode'):
return phantomjs
while True:
_phantomjs.wait()
if _quit or not auto_restart:
break
_phantomjs = subprocess.Popen(cmd)
@cli.command()
@click.option('--port', default=22222, help='puppeteer port')
@click.option('--auto-restart', default=False, help='auto restart puppeteer if crashed')
@click.argument('args', nargs=-1)
@click.pass_context
def puppeteer(ctx, port, auto_restart, args):
"""
Run puppeteer fetcher if puppeteer is installed.
"""
import subprocess
g = ctx.obj
_quit = []
puppeteer_fetcher = os.path.join(
os.path.dirname(pyspider.__file__), 'fetcher/puppeteer_fetcher.js')
cmd = ['node', puppeteer_fetcher, str(port)]
try:
_puppeteer = subprocess.Popen(cmd)
except OSError:
logging.warning('puppeteer not found, continue running without it.')
return None
def quit(*args, **kwargs):
_quit.append(1)
_puppeteer.kill()
_puppeteer.wait()
logging.info('puppeteer exited.')
if not g.get('puppeteer_proxy'):
g['puppeteer_proxy'] = '127.0.0.1:%s' % port
puppeteer = utils.ObjectDict(port=port, quit=quit)
g.instances.append(puppeteer)
if g.get('testing_mode'):
return puppeteer
while True:
_puppeteer.wait()
if _quit or not auto_restart:
break
_puppeteer = subprocess.Popen(cmd)
@cli.command()
@click.option('--fetcher-num', default=1, help='instance num of fetcher')
@click.option('--processor-num', default=1, help='instance num of processor')
@click.option('--result-worker-num', default=1,
help='instance num of result worker')
@click.option('--run-in', default='subprocess', type=click.Choice(['subprocess', 'thread']),
help='run each components in thread or subprocess. '
'always using thread for windows.')
@click.pass_context
def all(ctx, fetcher_num, processor_num, result_worker_num, run_in):
"""
Run all the components in subprocess or thread
"""
ctx.obj['debug'] = False
g = ctx.obj
# FIXME: py34 cannot run components with threads
if run_in == 'subprocess' and os.name != 'nt':
run_in = utils.run_in_subprocess
else:
run_in = utils.run_in_thread
threads = []
try:
# phantomjs
if not g.get('phantomjs_proxy'):
phantomjs_config = g.config.get('phantomjs', {})
phantomjs_config.setdefault('auto_restart', True)
threads.append(run_in(ctx.invoke, phantomjs, **phantomjs_config))
time.sleep(2)
if threads[-1].is_alive() and not g.get('phantomjs_proxy'):
g['phantomjs_proxy'] = '127.0.0.1:%s' % phantomjs_config.get('port', 25555)
# puppeteer
if not g.get('puppeteer_proxy'):
puppeteer_config = g.config.get('puppeteer', {})
puppeteer_config.setdefault('auto_restart', True)
threads.append(run_in(ctx.invoke, puppeteer, **puppeteer_config))
time.sleep(2)
if threads[-1].is_alive() and not g.get('puppeteer_proxy'):
g['puppeteer_proxy'] = '127.0.0.1:%s' % puppeteer_config.get('port', 22222)
# result worker
result_worker_config = g.config.get('result_worker', {})
for i in range(result_worker_num):
threads.append(run_in(ctx.invoke, result_worker, **result_worker_config))
# processor
processor_config = g.config.get('processor', {})
for i in range(processor_num):
threads.append(run_in(ctx.invoke, processor, **processor_config))
# fetcher
fetcher_config = g.config.get('fetcher', {})
fetcher_config.setdefault('xmlrpc_host', '127.0.0.1')
for i in range(fetcher_num):
threads.append(run_in(ctx.invoke, fetcher, **fetcher_config))
# scheduler
scheduler_config = g.config.get('scheduler', {})
scheduler_config.setdefault('xmlrpc_host', '127.0.0.1')
threads.append(run_in(ctx.invoke, scheduler, **scheduler_config))
# running webui in main thread to make it exitable
webui_config = g.config.get('webui', {})
webui_config.setdefault('scheduler_rpc', 'http://127.0.0.1:%s/'
% g.config.get('scheduler', {}).get('xmlrpc_port', 23333))
ctx.invoke(webui, **webui_config)
finally:
# exit components run in threading
for each in g.instances:
each.quit()
# exit components run in subprocess
for each in threads:
if not each.is_alive():
continue
if hasattr(each, 'terminate'):
each.terminate()
each.join()
@cli.command()
@click.option('--fetcher-num', default=1, help='instance num of fetcher')
@click.option('--processor-num', default=2, help='instance num of processor')
@click.option('--result-worker-num', default=1, help='instance num of result worker')
@click.option('--run-in', default='subprocess', type=click.Choice(['subprocess', 'thread']),
help='run each components in thread or subprocess. '
'always using thread for windows.')
@click.option('--total', default=10000, help="total url in test page")
@click.option('--show', default=20, help="show how many urls in a page")
@click.option('--taskdb-bench', default=False, is_flag=True,
help="only run taskdb bench test")
@click.option('--message-queue-bench', default=False, is_flag=True,
help="only run message queue bench test")
@click.option('--all-bench', default=False, is_flag=True,
help="only run all bench test")
@click.pass_context
def bench(ctx, fetcher_num, processor_num, result_worker_num, run_in, total, show,
taskdb_bench, message_queue_bench, all_bench):
"""
Run Benchmark test.
In bench mode, in-memory sqlite database is used instead of on-disk sqlite database.
"""
from pyspider.libs import bench
from pyspider.webui import bench_test # flake8: noqa
ctx.obj['debug'] = False
g = ctx.obj
if result_worker_num == 0:
g['processor2result'] = None
if run_in == 'subprocess' and os.name != 'nt':
run_in = utils.run_in_subprocess
else:
run_in = utils.run_in_thread
all_test = not taskdb_bench and not message_queue_bench and not all_bench
# test taskdb
if all_test or taskdb_bench:
bench.bench_test_taskdb(g.taskdb)
# test message queue
if all_test or message_queue_bench:
bench.bench_test_message_queue(g.scheduler2fetcher)
# test all
if not all_test and not all_bench:
return
project_name = 'bench'
def clear_project():
g.taskdb.drop(project_name)
g.resultdb.drop(project_name)
clear_project()
# disable log
logging.getLogger().setLevel(logging.ERROR)
logging.getLogger('scheduler').setLevel(logging.ERROR)
logging.getLogger('fetcher').setLevel(logging.ERROR)
logging.getLogger('processor').setLevel(logging.ERROR)
logging.getLogger('result').setLevel(logging.ERROR)
logging.getLogger('webui').setLevel(logging.ERROR)
logging.getLogger('werkzeug').setLevel(logging.ERROR)
try:
threads = []
# result worker
result_worker_config = g.config.get('result_worker', {})
for i in range(result_worker_num):
threads.append(run_in(ctx.invoke, result_worker,
result_cls='pyspider.libs.bench.BenchResultWorker',
**result_worker_config))
# processor
processor_config = g.config.get('processor', {})
for i in range(processor_num):
threads.append(run_in(ctx.invoke, processor,
processor_cls='pyspider.libs.bench.BenchProcessor',
**processor_config))
# fetcher
fetcher_config = g.config.get('fetcher', {})
fetcher_config.setdefault('xmlrpc_host', '127.0.0.1')
for i in range(fetcher_num):
threads.append(run_in(ctx.invoke, fetcher,
fetcher_cls='pyspider.libs.bench.BenchFetcher',
**fetcher_config))
# webui
webui_config = g.config.get('webui', {})
webui_config.setdefault('scheduler_rpc', 'http://127.0.0.1:%s/'
% g.config.get('scheduler', {}).get('xmlrpc_port', 23333))
threads.append(run_in(ctx.invoke, webui, **webui_config))
# scheduler
scheduler_config = g.config.get('scheduler', {})
scheduler_config.setdefault('xmlrpc_host', '127.0.0.1')
scheduler_config.setdefault('xmlrpc_port', 23333)
threads.append(run_in(ctx.invoke, scheduler,
scheduler_cls='pyspider.libs.bench.BenchScheduler',
**scheduler_config))
scheduler_rpc = connect_rpc(ctx, None,
'http://%(xmlrpc_host)s:%(xmlrpc_port)s/' % scheduler_config)
for _ in range(20):
if utils.check_port_open(23333):
break
time.sleep(1)
scheduler_rpc.newtask({
"project": project_name,
"taskid": "on_start",
"url": "data:,on_start",
"fetch": {
"save": {"total": total, "show": show}
},
"process": {
"callback": "on_start",
},
})
# wait bench test finished
while True:
time.sleep(1)
if scheduler_rpc.size() == 0:
break
finally:
# exit components run in threading
for each in g.instances:
each.quit()
# exit components run in subprocess
for each in threads:
if hasattr(each, 'terminate'):
each.terminate()
each.join(1)
clear_project()
@cli.command()
@click.option('-i', '--interactive', default=False, is_flag=True,
help='enable interactive mode, you can choose crawl url.')
@click.option('--phantomjs', 'enable_phantomjs', default=False, is_flag=True,
help='enable phantomjs, will spawn a subprocess for phantomjs')
@click.option('--puppeteer', 'enable_puppeteer', default=False, is_flag=True,
help='enable puppeteer, will spawn a subprocess for puppeteer')
@click.argument('scripts', nargs=-1)
@click.pass_context
def one(ctx, interactive, enable_phantomjs, enable_puppeteer, scripts):
"""
One mode not only means all-in-one, it runs every thing in one process over
tornado.ioloop, for debug purpose
"""
ctx.obj['debug'] = False
g = ctx.obj
g['testing_mode'] = True
if scripts:
from pyspider.database.local.projectdb import ProjectDB
g['projectdb'] = ProjectDB(scripts)
if g.get('is_taskdb_default'):
g['taskdb'] = connect_database('sqlite+taskdb://')
if g.get('is_resultdb_default'):
g['resultdb'] = None
if enable_phantomjs:
phantomjs_config = g.config.get('phantomjs', {})
phantomjs_obj = ctx.invoke(phantomjs, **phantomjs_config)
if phantomjs_obj:
g.setdefault('phantomjs_proxy', '127.0.0.1:%s' % phantomjs_obj.port)
else:
phantomjs_obj = None
if enable_puppeteer:
puppeteer_config = g.config.get('puppeteer', {})
puppeteer_obj = ctx.invoke(puppeteer, **puppeteer_config)
if puppeteer_obj:
g.setdefault('puppeteer_proxy', '127.0.0.1:%s' % puppeteer.port)
else:
puppeteer_obj = None
result_worker_config = g.config.get('result_worker', {})
if g.resultdb is None:
result_worker_config.setdefault('result_cls',
'pyspider.result.OneResultWorker')
result_worker_obj = ctx.invoke(result_worker, **result_worker_config)
processor_config = g.config.get('processor', {})
processor_config.setdefault('enable_stdout_capture', False)
processor_obj = ctx.invoke(processor, **processor_config)
fetcher_config = g.config.get('fetcher', {})
fetcher_config.setdefault('xmlrpc', False)
fetcher_obj = ctx.invoke(fetcher, **fetcher_config)
scheduler_config = g.config.get('scheduler', {})
scheduler_config.setdefault('xmlrpc', False)
scheduler_config.setdefault('scheduler_cls',
'pyspider.scheduler.OneScheduler')
scheduler_obj = ctx.invoke(scheduler, **scheduler_config)
scheduler_obj.init_one(ioloop=fetcher_obj.ioloop,
fetcher=fetcher_obj,
processor=processor_obj,
result_worker=result_worker_obj,
interactive=interactive)
if scripts:
for project in g.projectdb.projects:
scheduler_obj.trigger_on_start(project)
try:
scheduler_obj.run()
finally:
scheduler_obj.quit()
if phantomjs_obj:
phantomjs_obj.quit()
if puppeteer_obj:
puppeteer_obj.quit()
@cli.command()
@click.option('--scheduler-rpc', callback=connect_rpc, help='xmlrpc path of scheduler')
@click.argument('project', nargs=1)
@click.argument('message', nargs=1)
@click.pass_context
def send_message(ctx, scheduler_rpc, project, message):
"""
Send Message to project from command line
"""
if isinstance(scheduler_rpc, six.string_types):
scheduler_rpc = connect_rpc(ctx, None, scheduler_rpc)
if scheduler_rpc is None and os.environ.get('SCHEDULER_NAME'):
scheduler_rpc = connect_rpc(ctx, None, 'http://%s/' % (
os.environ['SCHEDULER_PORT_23333_TCP'][len('tcp://'):]))
if scheduler_rpc is None:
scheduler_rpc = connect_rpc(ctx, None, 'http://127.0.0.1:23333/')
return scheduler_rpc.send_task({
'taskid': utils.md5string('data:,on_message'),
'project': project,
'url': 'data:,on_message',
'fetch': {
'save': ('__command__', message),
},
'process': {
'callback': '_on_message',
}
})
def init_rpc_in_app(ctx, app, scheduler_rpc):
if isinstance(scheduler_rpc, six.string_types):
scheduler_rpc = connect_rpc(ctx, None, scheduler_rpc)
if scheduler_rpc is None and os.environ.get('SCHEDULER_NAME'):
app.config['scheduler_rpc'] = connect_rpc(ctx, None, 'http://%s/' % (
os.environ['SCHEDULER_PORT_23333_TCP'][len('tcp://'):]))
elif scheduler_rpc is None:
app.config['scheduler_rpc'] = connect_rpc(ctx, None, 'http://127.0.0.1:23333/')
else:
app.config['scheduler_rpc'] = scheduler_rpc
def main():
cli()
if __name__ == '__main__':
main()
| [] | [] | [
"MYSQL_PORT_3306_TCP_PORT",
"RABBITMQ_NAME",
"PUPPETEER_PORT_22222_TCP",
"SCHEDULER_PORT_23333_TCP",
"MYSQL_PORT_3306_TCP_ADDR",
"MONGODB_NAME",
"MYSQL_NAME",
"PHANTOMJS_PORT_25555_TCP",
"SCHEDULER_NAME",
"MONGODB_PORT_27017_TCP_PORT",
"PUPPETEER_NAME",
"PHANTOMJS_NAME",
"MONGODB_PORT_27017_TCP_ADDR"
] | [] | ["MYSQL_PORT_3306_TCP_PORT", "RABBITMQ_NAME", "PUPPETEER_PORT_22222_TCP", "SCHEDULER_PORT_23333_TCP", "MYSQL_PORT_3306_TCP_ADDR", "MONGODB_NAME", "MYSQL_NAME", "PHANTOMJS_PORT_25555_TCP", "SCHEDULER_NAME", "MONGODB_PORT_27017_TCP_PORT", "PUPPETEER_NAME", "PHANTOMJS_NAME", "MONGODB_PORT_27017_TCP_ADDR"] | python | 13 | 0 | |
pipenv/vendor/vistir/misc.py | # -*- coding=utf-8 -*-
from __future__ import absolute_import, unicode_literals
import json
import logging
import locale
import os
import subprocess
import sys
from collections import OrderedDict
from functools import partial
from itertools import islice
import six
from .cmdparse import Script
from .compat import Path, fs_str, partialmethod, to_native_string
from .contextmanagers import spinner as spinner
if os.name != "nt":
class WindowsError(OSError):
pass
__all__ = [
"shell_escape",
"unnest",
"dedup",
"run",
"load_path",
"partialclass",
"to_text",
"to_bytes",
"locale_encoding",
"chunked",
"take",
"divide",
"getpreferredencoding",
"decode_for_output",
]
def _get_logger(name=None, level="ERROR"):
if not name:
name = __name__
if isinstance(level, six.string_types):
level = getattr(logging, level.upper())
logger = logging.getLogger(name)
logger.setLevel(level)
formatter = logging.Formatter(
"%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s"
)
handler = logging.StreamHandler(stream=sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def shell_escape(cmd):
"""Escape strings for use in :func:`~subprocess.Popen` and :func:`run`.
This is a passthrough method for instantiating a :class:`~vistir.cmdparse.Script`
object which can be used to escape commands to output as a single string.
"""
cmd = Script.parse(cmd)
return cmd.cmdify()
def unnest(elem):
"""Flatten an arbitrarily nested iterable
:param elem: An iterable to flatten
:type elem: :class:`~collections.Iterable`
>>> nested_iterable = (1234, (3456, 4398345, (234234)), (2396, (23895750, 9283798, 29384, (289375983275, 293759, 2347, (2098, 7987, 27599)))))
>>> list(vistir.misc.unnest(nested_iterable))
[1234, 3456, 4398345, 234234, 2396, 23895750, 9283798, 29384, 289375983275, 293759, 2347, 2098, 7987, 27599]
"""
if _is_iterable(elem):
for item in elem:
if _is_iterable(item):
for sub_item in unnest(item):
yield sub_item
else:
yield item
else:
raise ValueError("Expecting an iterable, got %r" % elem)
def _is_iterable(elem):
if getattr(elem, "__iter__", False):
return True
return False
def dedup(iterable):
"""Deduplicate an iterable object like iter(set(iterable)) but
order-reserved.
"""
return iter(OrderedDict.fromkeys(iterable))
def _spawn_subprocess(script, env=None, block=True, cwd=None, combine_stderr=True):
from distutils.spawn import find_executable
if not env:
env = os.environ.copy()
command = find_executable(script.command)
options = {
"env": env,
"universal_newlines": True,
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE if not combine_stderr else subprocess.STDOUT,
"shell": False,
}
if not block:
options["stdin"] = subprocess.PIPE
if cwd:
options["cwd"] = cwd
# Command not found, maybe this is a shell built-in?
cmd = [command] + script.args
if not command: # Try to use CreateProcess directly if possible.
cmd = script.cmdify()
options["shell"] = True
# Try to use CreateProcess directly if possible. Specifically catch
# Windows error 193 "Command is not a valid Win32 application" to handle
# a "command" that is non-executable. See pypa/pipenv#2727.
try:
return subprocess.Popen(cmd, **options)
except WindowsError as e:
if getattr(e, "winerror", 9999) != 193:
raise
options["shell"] = True
# Try shell mode to use Windows's file association for file launch.
return subprocess.Popen(script.cmdify(), **options)
def _create_subprocess(
cmd,
env=None,
block=True,
return_object=False,
cwd=os.curdir,
verbose=False,
spinner=None,
combine_stderr=False,
display_limit=200,
start_text="",
write_to_stdout=True
):
if not env:
env = os.environ.copy()
try:
c = _spawn_subprocess(cmd, env=env, block=block, cwd=cwd,
combine_stderr=combine_stderr)
except Exception as exc:
sys.stderr.write("Error %s while executing command %s", exc, " ".join(cmd._parts))
raise
if not block:
c.stdin.close()
output = []
err = []
spinner_orig_text = None
if spinner:
spinner_orig_text = getattr(spinner, "text", None)
if spinner_orig_text is None:
spinner_orig_text = start_text if start_text is not None else ""
streams = {
"stdout": c.stdout,
"stderr": c.stderr
}
while True:
stdout_line = None
stderr_line = None
for outstream in streams.keys():
stream = streams[outstream]
if not stream:
continue
line = to_text(stream.readline())
if not line:
continue
line = to_text("{0}".format(line.rstrip()))
if outstream == "stderr":
stderr_line = line
else:
stdout_line = line
if not (stdout_line or stderr_line):
break
if stderr_line is not None:
err.append(stderr_line)
err_line = fs_str("{0}".format(stderr_line))
if verbose and err_line is not None:
if spinner:
spinner.hide_and_write(err_line, target=spinner.stderr)
else:
sys.stderr.write(err_line)
sys.stderr.flush()
if stdout_line is not None:
output.append(stdout_line)
display_line = fs_str("{0}".format(stdout_line))
if len(stdout_line) > display_limit:
display_line = "{0}...".format(stdout_line[:display_limit])
if verbose and display_line is not None:
if spinner:
target = spinner.stdout if write_to_stdout else spinner.stderr
spinner.hide_and_write(display_line, target=target)
else:
target = sys.stdout if write_to_stdout else sys.stderr
target.write(display_line)
target.flush()
if spinner:
spinner.text = to_native_string("{0} {1}".format(spinner_orig_text, display_line))
continue
try:
c.wait()
finally:
if c.stdout:
c.stdout.close()
if c.stderr:
c.stderr.close()
if spinner:
if c.returncode > 0:
spinner.fail(to_native_string("Failed...cleaning up..."))
if not os.name == "nt":
spinner.ok(to_native_string("✔ Complete"))
else:
spinner.ok(to_native_string("Complete"))
c.out = "\n".join(output) if output else ""
c.err = "\n".join(err) if err else ""
else:
c.out, c.err = c.communicate()
if not block:
c.wait()
c.out = to_text("{0}".format(c.out)) if c.out else fs_str("")
c.err = to_text("{0}".format(c.err)) if c.err else fs_str("")
if not return_object:
return c.out.strip(), c.err.strip()
return c
def run(
cmd,
env=None,
return_object=False,
block=True,
cwd=None,
verbose=False,
nospin=False,
spinner_name=None,
combine_stderr=True,
display_limit=200,
write_to_stdout=True
):
"""Use `subprocess.Popen` to get the output of a command and decode it.
:param list cmd: A list representing the command you want to run.
:param dict env: Additional environment settings to pass through to the subprocess.
:param bool return_object: When True, returns the whole subprocess instance
:param bool block: When False, returns a potentially still-running :class:`subprocess.Popen` instance
:param str cwd: Current working directory contect to use for spawning the subprocess.
:param bool verbose: Whether to print stdout in real time when non-blocking.
:param bool nospin: Whether to disable the cli spinner.
:param str spinner_name: The name of the spinner to use if enabled, defaults to bouncingBar
:param bool combine_stderr: Optionally merge stdout and stderr in the subprocess, false if nonblocking.
:param int dispay_limit: The max width of output lines to display when using a spinner.
:param bool write_to_stdout: Whether to write to stdout when using a spinner, default True.
:returns: A 2-tuple of (output, error) or a :class:`subprocess.Popen` object.
.. Warning:: Merging standard out and standarad error in a nonblocking subprocess
can cause errors in some cases and may not be ideal. Consider disabling
this functionality.
"""
_env = os.environ.copy()
if env:
_env.update(env)
env = _env
if six.PY2:
fs_encode = partial(to_bytes, encoding=locale_encoding)
_env = {fs_encode(k): fs_encode(v) for k, v in os.environ.items()}
for key, val in env.items():
_env[fs_encode(key)] = fs_encode(val)
else:
_env = {k: fs_str(v) for k, v in os.environ.items()}
if not spinner_name:
spinner_name = "bouncingBar"
if six.PY2:
if isinstance(cmd, six.string_types):
cmd = cmd.encode("utf-8")
elif isinstance(cmd, (list, tuple)):
cmd = [c.encode("utf-8") for c in cmd]
if not isinstance(cmd, Script):
cmd = Script.parse(cmd)
if block or not return_object:
combine_stderr = False
start_text = ""
with spinner(spinner_name=spinner_name, start_text=start_text, nospin=nospin,
write_to_stdout=write_to_stdout) as sp:
return _create_subprocess(
cmd,
env=_env,
return_object=return_object,
block=block,
cwd=cwd,
verbose=verbose,
spinner=sp,
combine_stderr=combine_stderr,
start_text=start_text,
write_to_stdout=True
)
def load_path(python):
"""Load the :mod:`sys.path` from the given python executable's environment as json
:param str python: Path to a valid python executable
:return: A python representation of the `sys.path` value of the given python executable.
:rtype: list
>>> load_path("/home/user/.virtualenvs/requirementslib-5MhGuG3C/bin/python")
['', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python37.zip', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/lib-dynload', '/home/user/.pyenv/versions/3.7.0/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/site-packages', '/home/user/git/requirementslib/src']
"""
python = Path(python).as_posix()
out, err = run([python, "-c", "import json, sys; print(json.dumps(sys.path))"],
nospin=True)
if out:
return json.loads(out)
else:
return []
def partialclass(cls, *args, **kwargs):
"""Returns a partially instantiated class
:return: A partial class instance
:rtype: cls
>>> source = partialclass(Source, url="https://pypi.org/simple")
>>> source
<class '__main__.Source'>
>>> source(name="pypi")
>>> source.__dict__
mappingproxy({'__module__': '__main__', '__dict__': <attribute '__dict__' of 'Source' objects>, '__weakref__': <attribute '__weakref__' of 'Source' objects>, '__doc__': None, '__init__': functools.partialmethod(<function Source.__init__ at 0x7f23af429bf8>, , url='https://pypi.org/simple')})
>>> new_source = source(name="pypi")
>>> new_source
<__main__.Source object at 0x7f23af189b38>
>>> new_source.__dict__
{'url': 'https://pypi.org/simple', 'verify_ssl': True, 'name': 'pypi'}
"""
name_attrs = [
n
for n in (getattr(cls, name, str(cls)) for name in ("__name__", "__qualname__"))
if n is not None
]
name_attrs = name_attrs[0]
type_ = type(
name_attrs, (cls,), {"__init__": partialmethod(cls.__init__, *args, **kwargs)}
)
# Swiped from attrs.make_class
try:
type_.__module__ = sys._getframe(1).f_globals.get("__name__", "__main__")
except (AttributeError, ValueError):
pass
return type_
# Borrowed from django -- force bytes and decode -- see link for details:
# https://github.com/django/django/blob/fc6b90b/django/utils/encoding.py#L112
def to_bytes(string, encoding="utf-8", errors="ignore"):
"""Force a value to bytes.
:param string: Some input that can be converted to a bytes.
:type string: str or bytes unicode or a memoryview subclass
:param encoding: The encoding to use for conversions, defaults to "utf-8"
:param encoding: str, optional
:return: Corresponding byte representation (for use in filesystem operations)
:rtype: bytes
"""
if not errors:
if encoding.lower() == "utf-8":
errors = "surrogateescape" if six.PY3 else "ignore"
else:
errors = "strict"
if isinstance(string, bytes):
if encoding.lower() == "utf-8":
return string
else:
return string.decode("utf-8").encode(encoding, errors)
elif isinstance(string, memoryview):
return bytes(string)
elif not isinstance(string, six.string_types):
try:
if six.PY3:
return six.text_type(string).encode(encoding, errors)
else:
return bytes(string)
except UnicodeEncodeError:
if isinstance(string, Exception):
return b" ".join(to_bytes(arg, encoding, errors) for arg in string)
return six.text_type(string).encode(encoding, errors)
else:
return string.encode(encoding, errors)
def to_text(string, encoding="utf-8", errors=None):
"""Force a value to a text-type.
:param string: Some input that can be converted to a unicode representation.
:type string: str or bytes unicode
:param encoding: The encoding to use for conversions, defaults to "utf-8"
:param encoding: str, optional
:return: The unicode representation of the string
:rtype: str
"""
if not errors:
if encoding.lower() == "utf-8":
errors = "surrogateescape" if six.PY3 else "ignore"
else:
errors = "strict"
if issubclass(type(string), six.text_type):
return string
try:
if not issubclass(type(string), six.string_types):
if six.PY3:
if isinstance(string, bytes):
string = six.text_type(string, encoding, errors)
else:
string = six.text_type(string)
elif hasattr(string, "__unicode__"):
string = six.text_type(string)
else:
string = six.text_type(bytes(string), encoding, errors)
else:
string = string.decode(encoding, errors)
except UnicodeDecodeError as e:
string = " ".join(to_text(arg, encoding, errors) for arg in string)
return string
def divide(n, iterable):
"""
split an iterable into n groups, per https://more-itertools.readthedocs.io/en/latest/api.html#grouping
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
:return: a list of new iterables derived from the original iterable
:rtype: list
"""
seq = tuple(iterable)
q, r = divmod(len(seq), n)
ret = []
for i in range(n):
start = (i * q) + (i if i < r else r)
stop = ((i + 1) * q) + (i + 1 if i + 1 < r else r)
ret.append(iter(seq[start:stop]))
return ret
def take(n, iterable):
"""Take n elements from the supplied iterable without consuming it.
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
from https://github.com/erikrose/more-itertools/blob/master/more_itertools/recipes.py
"""
return list(islice(iterable, n))
def chunked(n, iterable):
"""Split an iterable into lists of length *n*.
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
from https://github.com/erikrose/more-itertools/blob/master/more_itertools/more.py
"""
return iter(partial(take, n, iter(iterable)), [])
try:
locale_encoding = locale.getdefaultencoding()[1] or "ascii"
except Exception:
locale_encoding = "ascii"
def getpreferredencoding():
"""Determine the proper output encoding for terminal rendering"""
# Borrowed from Invoke
# (see https://github.com/pyinvoke/invoke/blob/93af29d/invoke/runners.py#L881)
_encoding = locale.getpreferredencoding(False)
if six.PY2 and not sys.platform == "win32":
_default_encoding = locale.getdefaultlocale()[1]
if _default_encoding is not None:
_encoding = _default_encoding
return _encoding
PREFERRED_ENCODING = getpreferredencoding()
def decode_for_output(output):
"""Given a string, decode it for output to a terminal
:param str output: A string to print to a terminal
:return: A re-encoded string using the preferred encoding
:rtype: str
"""
if not isinstance(output, six.string_types):
return output
try:
output = output.encode(PREFERRED_ENCODING)
except AttributeError:
pass
output = output.decode(PREFERRED_ENCODING)
return output
| [] | [] | [] | [] | [] | python | 0 | 0 | |
controllerconfig/controllerconfig/controllerconfig/upgrades/utils.py | #
# Copyright (c) 2016-2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
#
# This file contains common upgrades functions that can be used by both sysinv
# and during the upgrade of controller-1.
#
import keyring
import os
import psycopg2
from psycopg2.extras import RealDictCursor
import subprocess
import tempfile
import yaml
import netaddr
# WARNING: The controller-1 upgrade is done before any puppet manifests
# have been applied, so only the static entries from tsconfig can be used.
# (the platform.conf file will not have been updated with dynamic values).
from tsconfig.tsconfig import SW_VERSION
from tsconfig.tsconfig import PLATFORM_PATH
from controllerconfig import utils as cutils
from controllerconfig.common import constants
from sysinv.common import constants as sysinv_constants
from sysinv.common import utils as sysinv_utils
from oslo_log import log
LOG = log.getLogger(__name__)
POSTGRES_PATH = '/var/lib/postgresql'
POSTGRES_DATA_DIR = os.path.join(POSTGRES_PATH, SW_VERSION)
RABBIT_PATH = '/var/lib/rabbitmq'
CONTROLLER_1_HOSTNAME = "controller-1"
DB_CONNECTION = "postgresql://%s:%[email protected]/%s\n"
KUBERNETES_CONF_PATH = "/etc/kubernetes"
KUBERNETES_ADMIN_CONF_FILE = "admin.conf"
# well-known default domain name
DEFAULT_DOMAIN_NAME = 'Default'
# Migration script actions
ACTION_START = "start"
ACTION_MIGRATE = "migrate"
ACTION_ACTIVATE = "activate"
def execute_migration_scripts(from_release, to_release, action,
migration_script_dir="/etc/upgrade.d"):
""" Execute migration scripts with an action:
start: Prepare for upgrade on release N side. Called during
"system upgrade-start".
migrate: Perform data migration on release N+1 side. Called while
controller-1 is performing its upgrade.
"""
devnull = open(os.devnull, 'w')
LOG.info("Executing migration scripts with from_release: %s, "
"to_release: %s, action: %s" % (from_release, to_release, action))
# Get a sorted list of all the migration scripts
# Exclude any files that can not be executed, including .pyc and .pyo files
files = [f for f in os.listdir(migration_script_dir)
if os.path.isfile(os.path.join(migration_script_dir, f)) and
os.access(os.path.join(migration_script_dir, f), os.X_OK)]
# From file name, get the number to sort the calling sequence,
# abort when the file name format does not follow the pattern
# "nnn-*.*", where "nnn" string shall contain only digits, corresponding
# to a valid unsigned integer (first sequence of characters before "-")
try:
files.sort(key=lambda x: int(x.split("-")[0]))
except Exception:
LOG.exception("Migration script sequence validation failed, invalid "
"file name format")
raise
# Execute each migration script
for f in files:
migration_script = os.path.join(migration_script_dir, f)
try:
LOG.info("Executing migration script %s" % migration_script)
subprocess.check_call([migration_script,
from_release,
to_release,
action],
stdout=devnull, stderr=devnull)
except subprocess.CalledProcessError as e:
LOG.exception("Migration script %s failed with returncode %d" %
(migration_script, e.returncode))
# Abort when a migration script fails
raise e
def get_db_connection(hiera_db_records, database):
username = hiera_db_records[database]['username']
password = hiera_db_records[database]['password']
return "postgresql://%s:%s@%s/%s" % (
username, password, 'localhost', database)
def get_password_from_keyring(service, username):
"""Retrieve password from keyring"""
password = ""
os.environ["XDG_DATA_HOME"] = constants.KEYRING_PERMDIR
try:
password = keyring.get_password(service, username)
except Exception as e:
LOG.exception("Received exception when attempting to get password "
"for service %s, username %s: %s" %
(service, username, e))
raise
finally:
del os.environ["XDG_DATA_HOME"]
return password
def set_password_in_keyring(service, username):
"""Generate random password and store in keyring"""
os.environ["XDG_DATA_HOME"] = constants.KEYRING_PERMDIR
try:
password = sysinv_utils.generate_random_password(length=16)
keyring.set_password(service, username, password)
except Exception as e:
LOG.exception("Received exception when attempting to generate "
"password for service %s, username %s: %s" %
(service, username, e))
raise
finally:
del os.environ["XDG_DATA_HOME"]
return password
def get_upgrade_token(from_release,
config,
secure_config):
# Get the system hiera data from the from release
from_hiera_path = os.path.join(PLATFORM_PATH, "puppet", from_release,
"hieradata")
system_file = os.path.join(from_hiera_path, "system.yaml")
with open(system_file, 'r') as file:
system_config = yaml.load(file)
# during a controller-1 upgrade, keystone is running
# on the controller UNIT IP, however the service catalog
# that was migrated from controller-0 since lists the
# floating controller IP. Keystone operations that use
# the AUTH URL will hit this service URL and fail,
# therefore we have to issue an Upgrade token for
# all Keystone operations during an Upgrade. This token
# will allow us to circumvent the service catalog entry, by
# providing a bypass endpoint.
keystone_upgrade_url = "http://{}:5000/{}".format(
'127.0.0.1',
system_config['openstack::keystone::params::api_version'])
admin_user_domain = system_config.get(
'platform::client::params::admin_user_domain')
if admin_user_domain is None:
# This value wasn't present in R2. So may be missing in upgrades from
# that release
LOG.info("platform::client::params::admin_user_domain key not found. "
"Using Default.")
admin_user_domain = DEFAULT_DOMAIN_NAME
admin_project_domain = system_config.get(
'platform::client::params::admin_project_domain')
if admin_project_domain is None:
# This value wasn't present in R2. So may be missing in upgrades from
# that release
LOG.info("platform::client::params::admin_project_domain key not "
"found. Using Default.")
admin_project_domain = DEFAULT_DOMAIN_NAME
admin_password = get_password_from_keyring("CGCS", "admin")
admin_username = system_config.get(
'platform::client::params::admin_username')
# the upgrade token command
keystone_upgrade_token = (
"openstack "
"--os-username {} "
"--os-password '{}' "
"--os-auth-url {} "
"--os-project-name admin "
"--os-user-domain-name {} "
"--os-project-domain-name {} "
"--os-interface internal "
"--os-identity-api-version 3 "
"token issue -c id -f value".format(
admin_username,
admin_password,
keystone_upgrade_url,
admin_user_domain,
admin_project_domain
))
config.update({
'openstack::keystone::upgrade::upgrade_token_file':
'/etc/keystone/upgrade_token',
'openstack::keystone::upgrade::url': keystone_upgrade_url
})
secure_config.update({
'openstack::keystone::upgrade::upgrade_token_cmd':
keystone_upgrade_token,
})
def get_upgrade_data(from_release,
system_config,
secure_config):
""" Retrieve required data from the from-release, update system_config
and secure_config with them.
This function is needed for adding new service account and endpoints
during upgrade.
"""
# Get the system hiera data from the from release
from_hiera_path = os.path.join(PLATFORM_PATH, "puppet", from_release,
"hieradata")
system_file = os.path.join(from_hiera_path, "system.yaml")
with open(system_file, 'r') as file:
system_config_from_release = yaml.load(file)
# Get keystone region
keystone_region = system_config_from_release.get(
'keystone::endpoint::region')
system_config.update({
'platform::client::params::identity_region': keystone_region,
# Retrieve keystone::auth::region from the from-release for the new
# service.
# 'newservice::keystone::auth::region': keystone_region,
})
# Generate password for the new service
# password = sysinv_utils.generate_random_password(16)
secure_config.update({
# Generate and set the keystone::auth::password for the new service.
# 'newservice::keystone::auth::password': password,
})
def add_upgrade_entries_to_hiera_data(from_release):
""" Adds upgrade entries to the hiera data """
filename = 'static.yaml'
secure_filename = 'secure_static.yaml'
path = constants.HIERADATA_PERMDIR
# Get the hiera data for this release
filepath = os.path.join(path, filename)
with open(filepath, 'r') as file:
config = yaml.load(file)
secure_filepath = os.path.join(path, secure_filename)
with open(secure_filepath, 'r') as file:
secure_config = yaml.load(file)
# File for system.yaml
# This is needed for adding new service account and endpoints
# during upgrade.
system_filename = 'system.yaml'
system_filepath = os.path.join(path, system_filename)
# Get a token and update the config
get_upgrade_token(from_release, config, secure_config)
# Get required data from the from-release and add them in system.yaml.
# We don't carry system.yaml from the from-release.
# This is needed for adding new service account and endpoints
# during upgrade.
system_config = {}
get_upgrade_data(from_release, system_config, secure_config)
# Update the hiera data on disk
try:
fd, tmppath = tempfile.mkstemp(dir=path, prefix=filename,
text=True)
with open(tmppath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
os.close(fd)
os.rename(tmppath, filepath)
except Exception:
LOG.exception("failed to write config file: %s" % filepath)
raise
try:
fd, tmppath = tempfile.mkstemp(dir=path, prefix=secure_filename,
text=True)
with open(tmppath, 'w') as f:
yaml.dump(secure_config, f, default_flow_style=False)
os.close(fd)
os.rename(tmppath, secure_filepath)
except Exception:
LOG.exception("failed to write secure config: %s" % secure_filepath)
raise
# Add required hiera data into system.yaml.
# This is needed for adding new service account and endpoints
# during upgrade.
try:
fd, tmppath = tempfile.mkstemp(dir=path, prefix=system_filename,
text=True)
with open(tmppath, 'w') as f:
yaml.dump(system_config, f, default_flow_style=False)
os.close(fd)
os.rename(tmppath, system_filepath)
except Exception:
LOG.exception("failed to write system config: %s" % system_filepath)
raise
def create_simplex_runtime_config(filename):
""" Create any runtime parameters needed for simplex upgrades"""
config = {}
# Here is an example from a previous release...
# config.update({'nova::db::sync_api::cellv2_setup': False})
cutils.create_manifest_runtime_config(filename, config)
def apply_upgrade_manifest(controller_address):
"""Apply puppet upgrade manifest files."""
cmd = [
"/usr/local/bin/puppet-manifest-apply.sh",
constants.HIERADATA_PERMDIR,
str(controller_address),
sysinv_constants.CONTROLLER,
'upgrade'
]
logfile = "/tmp/apply_manifest.log"
try:
with open(logfile, "w") as flog:
subprocess.check_call(cmd, stdout=flog, stderr=flog)
except subprocess.CalledProcessError:
msg = "Failed to execute upgrade manifest"
print(msg)
raise Exception(msg)
def format_url_address(address):
"""Format the URL address according to RFC 2732"""
try:
addr = netaddr.IPAddress(address)
if addr.version == sysinv_constants.IPV6_FAMILY:
return "[%s]" % address
else:
return str(address)
except netaddr.AddrFormatError:
return address
def get_keystone_user_id(user_name):
""" Get the a keystone user id by name"""
conn = psycopg2.connect("dbname='keystone' user='postgres'")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("SELECT user_id FROM local_user WHERE name='%s'" %
user_name)
user_id = cur.fetchone()
if user_id is not None:
return user_id['user_id']
else:
return user_id
def get_keystone_project_id(project_name):
""" Get the a keystone project id by name"""
conn = psycopg2.connect("dbname='keystone' user='postgres'")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("SELECT id FROM project WHERE name='%s'" %
project_name)
project_id = cur.fetchone()
if project_id is not None:
return project_id['id']
else:
return project_id
| [] | [] | [
"XDG_DATA_HOME"
] | [] | ["XDG_DATA_HOME"] | python | 1 | 0 | |
services/horizon/internal/test/integration/integration.go | //lint:file-ignore U1001 Ignore all unused code, this is only used in tests.
package integration
import (
"context"
"fmt"
"io/ioutil"
"os"
"os/exec"
"os/signal"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"testing"
"time"
"github.com/spf13/cobra"
"github.com/stellar/go/services/horizon/internal/ingest"
"github.com/stretchr/testify/assert"
sdk "github.com/stellar/go/clients/horizonclient"
"github.com/stellar/go/clients/stellarcore"
"github.com/stellar/go/keypair"
proto "github.com/stellar/go/protocols/horizon"
horizon "github.com/stellar/go/services/horizon/internal"
"github.com/stellar/go/support/db/dbtest"
"github.com/stellar/go/support/errors"
"github.com/stellar/go/txnbuild"
"github.com/stellar/go/xdr"
)
const (
StandaloneNetworkPassphrase = "Standalone Network ; February 2017"
stellarCorePostgresPassword = "mysecretpassword"
adminPort = 6060
stellarCorePort = 11626
stellarCorePostgresPort = 5641
historyArchivePort = 1570
)
var (
RunWithCaptiveCore = os.Getenv("HORIZON_INTEGRATION_ENABLE_CAPTIVE_CORE") != ""
RunWithCaptiveCoreUseDB = os.Getenv("HORIZON_INTEGRATION_ENABLE_CAPTIVE_CORE_USE_DB") != ""
)
type Config struct {
PostgresURL string
ProtocolVersion uint32
SkipContainerCreation bool
CoreDockerImage string
// Weird naming here because bools default to false, but we want to start
// Horizon by default.
SkipHorizonStart bool
// If you want to override the default parameters passed to Horizon, you can
// set this map accordingly. All of them are passed along as --k=v, but if
// you pass an empty value, the parameter will be dropped. (Note that you
// should exclude the prepending `--` from keys; this is for compatibility
// with the constant names in flags.go)
//
// You can also control the environmental variables in a similar way, but
// note that CLI args take precedence over envvars, so set the corresponding
// CLI arg empty.
HorizonParameters map[string]string
HorizonEnvironment map[string]string
}
type CaptiveConfig struct {
binaryPath string
configPath string
useDB bool
}
type Test struct {
t *testing.T
composePath string
config Config
coreConfig CaptiveConfig
horizonConfig horizon.Config
environment *EnvironmentManager
horizonClient *sdk.Client
coreClient *stellarcore.Client
app *horizon.App
appStopped chan struct{}
shutdownOnce sync.Once
shutdownCalls []func()
masterKey *keypair.Full
passPhrase string
}
func NewTestForRemoteHorizon(t *testing.T, horizonURL string, passPhrase string, masterKey *keypair.Full) *Test {
return &Test{
t: t,
horizonClient: &sdk.Client{HorizonURL: horizonURL},
masterKey: masterKey,
passPhrase: passPhrase,
}
}
// NewTest starts a new environment for integration test at a given
// protocol version and blocks until Horizon starts ingesting.
//
// Skips the test if HORIZON_INTEGRATION_TESTS env variable is not set.
//
// WARNING: This requires Docker Compose installed.
func NewTest(t *testing.T, config Config) *Test {
if os.Getenv("HORIZON_INTEGRATION_TESTS") == "" {
t.Skip("skipping integration test: HORIZON_INTEGRATION_TESTS not set")
}
// If not specific explicitly, set the protocol to the maximum supported version
if config.ProtocolVersion == 0 {
config.ProtocolVersion = ingest.MaxSupportedProtocolVersion
}
composePath := findDockerComposePath()
i := &Test{
t: t,
config: config,
composePath: composePath,
passPhrase: StandaloneNetworkPassphrase,
environment: NewEnvironmentManager(),
}
i.configureCaptiveCore()
// Only run Stellar Core container and its dependencies.
i.runComposeCommand("up", "--detach", "--quiet-pull", "--no-color", "core")
i.prepareShutdownHandlers()
i.coreClient = &stellarcore.Client{URL: "http://localhost:" + strconv.Itoa(stellarCorePort)}
i.waitForCore()
if !config.SkipHorizonStart {
if innerErr := i.StartHorizon(); innerErr != nil {
t.Fatalf("Failed to start Horizon: %v", innerErr)
}
i.WaitForHorizon()
}
return i
}
func (i *Test) configureCaptiveCore() {
// We either test Captive Core through environment variables or through
// custom Horizon parameters.
if RunWithCaptiveCore {
composePath := findDockerComposePath()
i.coreConfig.binaryPath = os.Getenv("CAPTIVE_CORE_BIN")
i.coreConfig.configPath = filepath.Join(composePath, "captive-core-integration-tests.cfg")
if RunWithCaptiveCoreUseDB {
i.coreConfig.useDB = true
}
}
if value := i.getParameter(
horizon.StellarCoreBinaryPathName,
"STELLAR_CORE_BINARY_PATH",
); value != "" {
i.coreConfig.binaryPath = value
}
if value := i.getParameter(
horizon.CaptiveCoreConfigPathName,
"CAPTIVE_CORE_CONFIG_PATH",
); value != "" {
i.coreConfig.configPath = value
}
}
func (i *Test) getParameter(argName, envName string) string {
if value, ok := i.config.HorizonEnvironment[envName]; ok {
return value
}
if value, ok := i.config.HorizonParameters[argName]; ok {
return value
}
return ""
}
// Runs a docker-compose command applied to the above configs
func (i *Test) runComposeCommand(args ...string) {
integrationYaml := filepath.Join(i.composePath, "docker-compose.integration-tests.yml")
cmdline := append([]string{"-f", integrationYaml}, args...)
cmd := exec.Command("docker-compose", cmdline...)
if i.config.CoreDockerImage != "" {
cmd.Env = append(
os.Environ(),
fmt.Sprintf("CORE_IMAGE=%s", i.config.CoreDockerImage),
)
}
i.t.Log("Running", cmd.Env, cmd.Args)
out, innerErr := cmd.Output()
if exitErr, ok := innerErr.(*exec.ExitError); ok {
fmt.Printf("stdout:\n%s\n", string(out))
fmt.Printf("stderr:\n%s\n", string(exitErr.Stderr))
}
if innerErr != nil {
i.t.Fatalf("Compose command failed: %v", innerErr)
}
}
func (i *Test) prepareShutdownHandlers() {
i.shutdownCalls = append(i.shutdownCalls,
func() {
if i.app != nil {
i.app.Close()
}
i.runComposeCommand("rm", "-fvs", "core")
i.runComposeCommand("rm", "-fvs", "core-postgres")
},
i.environment.Restore,
)
// Register cleanup handlers (on panic and ctrl+c) so the containers are
// stopped even if ingestion or testing fails.
i.t.Cleanup(i.Shutdown)
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
<-c
i.Shutdown()
os.Exit(int(syscall.SIGTERM))
}()
}
func (i *Test) RestartHorizon() error {
i.StopHorizon()
if err := i.StartHorizon(); err != nil {
return err
}
i.WaitForHorizon()
return nil
}
func (i *Test) GetHorizonConfig() horizon.Config {
return i.horizonConfig
}
// Shutdown stops the integration tests and destroys all its associated
// resources. It will be implicitly called when the calling test (i.e. the
// `testing.Test` passed to `New()`) is finished if it hasn't been explicitly
// called before.
func (i *Test) Shutdown() {
i.shutdownOnce.Do(func() {
// run them in the opposite order in which they where added
for callI := len(i.shutdownCalls) - 1; callI >= 0; callI-- {
i.shutdownCalls[callI]()
}
})
}
func (i *Test) StartHorizon() error {
horizonPostgresURL := i.config.PostgresURL
if horizonPostgresURL == "" {
postgres := dbtest.Postgres(i.t)
i.shutdownCalls = append(i.shutdownCalls, func() {
// FIXME: Unfortunately, Horizon leaves open sessions behind,
// leading to a "database is being accessed by other users"
// error when trying to drop it.
// postgres.Close()
})
horizonPostgresURL = postgres.DSN
}
config, configOpts := horizon.Flags()
cmd := &cobra.Command{
Use: "horizon",
Short: "Client-facing API server for the Stellar network",
Long: "Client-facing API server for the Stellar network.",
Run: func(cmd *cobra.Command, args []string) {
var err error
i.app, err = horizon.NewAppFromFlags(config, configOpts)
if err != nil {
// Explicitly exit here as that's how these tests are structured for now.
fmt.Println(err)
os.Exit(1)
}
},
}
// To facilitate custom runs of Horizon, we merge a default set of
// parameters with the tester-supplied ones (if any).
//
// TODO: Ideally, we'd be pulling host/port information from the Docker
// Compose YAML file itself rather than hardcoding it.
hostname := "localhost"
coreBinaryPath := i.coreConfig.binaryPath
captiveCoreConfigPath := i.coreConfig.configPath
captiveCoreUseDB := strconv.FormatBool(i.coreConfig.useDB)
defaultArgs := map[string]string{
"stellar-core-url": i.coreClient.URL,
"stellar-core-db-url": fmt.Sprintf(
"postgres://postgres:%s@%s:%d/stellar?sslmode=disable",
stellarCorePostgresPassword,
hostname,
stellarCorePostgresPort,
),
"stellar-core-binary-path": coreBinaryPath,
"captive-core-config-path": captiveCoreConfigPath,
"captive-core-http-port": "21626",
"captive-core-use-db": captiveCoreUseDB,
"enable-captive-core-ingestion": strconv.FormatBool(len(coreBinaryPath) > 0),
"ingest": "true",
"history-archive-urls": fmt.Sprintf("http://%s:%d", hostname, historyArchivePort),
"db-url": horizonPostgresURL,
"network-passphrase": i.passPhrase,
"apply-migrations": "true",
"admin-port": strconv.Itoa(i.AdminPort()),
"port": "8000",
// due to ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING
"checkpoint-frequency": "8",
"per-hour-rate-limit": "0", // disable rate limiting
}
merged := MergeMaps(defaultArgs, i.config.HorizonParameters)
args := mapToFlags(merged)
// initialize core arguments
i.t.Log("Horizon command line:", args)
var env strings.Builder
for key, value := range i.config.HorizonEnvironment {
env.WriteString(fmt.Sprintf("%s=%s ", key, value))
}
i.t.Logf("Horizon environmental variables: %s\n", env.String())
// prepare env
cmd.SetArgs(args)
for key, value := range i.config.HorizonEnvironment {
innerErr := i.environment.Add(key, value)
if innerErr != nil {
return errors.Wrap(innerErr, fmt.Sprintf(
"failed to set envvar (%s=%s)", key, value))
}
}
var err error
if err = configOpts.Init(cmd); err != nil {
return errors.Wrap(err, "cannot initialize params")
}
if err = cmd.Execute(); err != nil {
return errors.Wrap(err, "cannot initialize Horizon")
}
horizonPort := "8000"
if port, ok := merged["--port"]; ok {
horizonPort = port
}
i.horizonConfig = *config
i.horizonClient = &sdk.Client{
HorizonURL: fmt.Sprintf("http://%s:%s", hostname, horizonPort),
}
done := make(chan struct{})
go func() {
i.app.Serve()
close(done)
}()
i.appStopped = done
return nil
}
// Wait for core to be up and manually close the first ledger
func (i *Test) waitForCore() {
i.t.Log("Waiting for core to be up...")
for t := 30 * time.Second; t >= 0; t -= time.Second {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
_, err := i.coreClient.Info(ctx)
cancel()
if err != nil {
i.t.Logf("could not obtain info response: %v", err)
time.Sleep(time.Second)
continue
}
break
}
i.UpgradeProtocol(i.config.ProtocolVersion)
for t := 0; t < 5; t++ {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
info, err := i.coreClient.Info(ctx)
cancel()
if err != nil || !info.IsSynced() {
i.t.Logf("Core is still not synced: %v %v", err, info)
time.Sleep(time.Second)
continue
}
i.t.Log("Core is up.")
return
}
i.t.Fatal("Core could not sync after 30s")
}
// UpgradeProtocol arms Core with upgrade and blocks until protocol is upgraded.
func (i *Test) UpgradeProtocol(version uint32) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
err := i.coreClient.Upgrade(ctx, int(version))
cancel()
if err != nil {
i.t.Fatalf("could not upgrade protocol: %v", err)
}
for t := 0; t < 10; t++ {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
info, err := i.coreClient.Info(ctx)
cancel()
if err != nil {
i.t.Logf("could not obtain info response: %v", err)
time.Sleep(time.Second)
continue
}
if info.Info.Ledger.Version == int(version) {
i.t.Logf("Protocol upgraded to: %d", info.Info.Ledger.Version)
return
}
time.Sleep(time.Second)
}
i.t.Fatalf("could not upgrade protocol in 10s")
}
func (i *Test) WaitForHorizon() {
for t := 60; t >= 0; t -= 1 {
time.Sleep(time.Second)
i.t.Log("Waiting for ingestion and protocol upgrade...")
root, err := i.horizonClient.Root()
if err != nil {
i.t.Logf("could not obtain root response %v", err)
continue
}
if root.HorizonSequence < 3 ||
int(root.HorizonSequence) != int(root.IngestSequence) {
i.t.Logf("Horizon ingesting... %v", root)
continue
}
if uint32(root.CurrentProtocolVersion) == i.config.ProtocolVersion {
i.t.Logf("Horizon protocol version matches... %v", root)
return
}
}
i.t.Fatal("Horizon not ingesting...")
}
// Client returns horizon.Client connected to started Horizon instance.
func (i *Test) Client() *sdk.Client {
return i.horizonClient
}
// Horizon returns the horizon.App instance for the current integration test
func (i *Test) Horizon() *horizon.App {
return i.app
}
// StopHorizon shuts down the running Horizon process
func (i *Test) StopHorizon() {
i.app.CloseDB()
i.app.Close()
// Wait for Horizon to shut down completely.
<-i.appStopped
i.app = nil
}
// AdminPort returns Horizon admin port.
func (i *Test) AdminPort() int {
return adminPort
}
// Metrics URL returns Horizon metrics URL.
func (i *Test) MetricsURL() string {
return fmt.Sprintf("http://localhost:%d/metrics", i.AdminPort())
}
// Master returns a keypair of the network masterKey account.
func (i *Test) Master() *keypair.Full {
if i.masterKey != nil {
return i.masterKey
}
return keypair.Master(i.passPhrase).(*keypair.Full)
}
func (i *Test) MasterAccount() txnbuild.Account {
master, client := i.Master(), i.Client()
request := sdk.AccountRequest{AccountID: master.Address()}
account, err := client.AccountDetail(request)
panicIf(err)
return &account
}
func (i *Test) CurrentTest() *testing.T {
return i.t
}
/* Utility functions for easier test case creation. */
// Creates new accounts via the master account.
//
// It funds each account with the given balance and then queries the API to
// find the randomized sequence number for future operations.
//
// Returns: The slice of created keypairs and account objects.
//
// Note: panics on any errors, since we assume that tests cannot proceed without
// this method succeeding.
func (i *Test) CreateAccounts(count int, initialBalance string) ([]*keypair.Full, []txnbuild.Account) {
client := i.Client()
master := i.Master()
pairs := make([]*keypair.Full, count)
ops := make([]txnbuild.Operation, count)
// Two paths here: either caller already did some stuff with the master
// account so we should retrieve the sequence number, or caller hasn't and
// we start from scratch.
seq := int64(0)
request := sdk.AccountRequest{AccountID: master.Address()}
account, err := client.AccountDetail(request)
if err == nil {
seq, err = strconv.ParseInt(account.Sequence, 10, 64) // str -> bigint
panicIf(err)
}
masterAccount := txnbuild.SimpleAccount{
AccountID: master.Address(),
Sequence: seq,
}
for i := 0; i < count; i++ {
pair, _ := keypair.Random()
pairs[i] = pair
ops[i] = &txnbuild.CreateAccount{
SourceAccount: masterAccount.AccountID,
Destination: pair.Address(),
Amount: initialBalance,
}
}
// Submit transaction, then retrieve new account details.
_ = i.MustSubmitOperations(&masterAccount, master, ops...)
accounts := make([]txnbuild.Account, count)
for i, kp := range pairs {
request := sdk.AccountRequest{AccountID: kp.Address()}
account, err := client.AccountDetail(request)
panicIf(err)
accounts[i] = &account
}
for _, keys := range pairs {
i.t.Logf("Funded %s (%s) with %s XLM.\n",
keys.Seed(), keys.Address(), initialBalance)
}
return pairs, accounts
}
// Panics on any error establishing a trustline.
func (i *Test) MustEstablishTrustline(
truster *keypair.Full, account txnbuild.Account, asset txnbuild.Asset,
) (resp proto.Transaction) {
txResp, err := i.EstablishTrustline(truster, account, asset)
panicIf(err)
return txResp
}
// EstablishTrustline works on a given asset for a particular account.
func (i *Test) EstablishTrustline(
truster *keypair.Full, account txnbuild.Account, asset txnbuild.Asset,
) (proto.Transaction, error) {
if asset.IsNative() {
return proto.Transaction{}, nil
}
line, err := asset.ToChangeTrustAsset()
if err != nil {
return proto.Transaction{}, err
}
return i.SubmitOperations(account, truster, &txnbuild.ChangeTrust{
Line: line,
Limit: "2000",
})
}
// MustCreateClaimableBalance panics on any error creating a claimable balance.
func (i *Test) MustCreateClaimableBalance(
source *keypair.Full, asset txnbuild.Asset, amount string,
claimants ...txnbuild.Claimant,
) (claim proto.ClaimableBalance) {
account := i.MustGetAccount(source)
_ = i.MustSubmitOperations(&account, source,
&txnbuild.CreateClaimableBalance{
Destinations: claimants,
Asset: asset,
Amount: amount,
},
)
// Ensure it exists in the global list
balances, err := i.Client().ClaimableBalances(sdk.ClaimableBalanceRequest{})
panicIf(err)
claims := balances.Embedded.Records
if len(claims) == 0 {
panic(-1)
}
claim = claims[len(claims)-1] // latest one
i.t.Logf("Created claimable balance w/ id=%s", claim.BalanceID)
return
}
// MustGetAccount panics on any error retrieves an account's details from its
// key. This means it must have previously been funded.
func (i *Test) MustGetAccount(source *keypair.Full) proto.Account {
client := i.Client()
account, err := client.AccountDetail(sdk.AccountRequest{AccountID: source.Address()})
panicIf(err)
return account
}
// MustSubmitOperations submits a signed transaction from an account with
// standard options.
//
// Namely, we set the standard fee, time bounds, etc. to "non-production"
// defaults that work well for tests.
//
// Most transactions only need one signer, so see the more verbose
// `MustSubmitOperationsWithSigners` below for multi-sig transactions.
//
// Note: We assume that transaction will be successful here so we panic in case
// of all errors. To allow failures, use `SubmitOperations`.
func (i *Test) MustSubmitOperations(
source txnbuild.Account, signer *keypair.Full, ops ...txnbuild.Operation,
) proto.Transaction {
tx, err := i.SubmitOperations(source, signer, ops...)
panicIf(err)
return tx
}
func (i *Test) SubmitOperations(
source txnbuild.Account, signer *keypair.Full, ops ...txnbuild.Operation,
) (proto.Transaction, error) {
return i.SubmitMultiSigOperations(source, []*keypair.Full{signer}, ops...)
}
func (i *Test) SubmitMultiSigOperations(
source txnbuild.Account, signers []*keypair.Full, ops ...txnbuild.Operation,
) (proto.Transaction, error) {
tx, err := i.CreateSignedTransaction(source, signers, ops...)
if err != nil {
return proto.Transaction{}, err
}
return i.Client().SubmitTransaction(tx)
}
func (i *Test) MustSubmitMultiSigOperations(
source txnbuild.Account, signers []*keypair.Full, ops ...txnbuild.Operation,
) proto.Transaction {
tx, err := i.SubmitMultiSigOperations(source, signers, ops...)
panicIf(err)
return tx
}
func (i *Test) CreateSignedTransaction(
source txnbuild.Account, signers []*keypair.Full, ops ...txnbuild.Operation,
) (*txnbuild.Transaction, error) {
txParams := txnbuild.TransactionParams{
SourceAccount: source,
Operations: ops,
BaseFee: txnbuild.MinBaseFee,
Timebounds: txnbuild.NewInfiniteTimeout(),
IncrementSequenceNum: true,
}
tx, err := txnbuild.NewTransaction(txParams)
if err != nil {
return nil, err
}
for _, signer := range signers {
tx, err = tx.Sign(i.passPhrase, signer)
if err != nil {
return nil, err
}
}
return tx, nil
}
func (i *Test) GetCurrentCoreLedgerSequence() (int, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
info, err := i.coreClient.Info(ctx)
if err != nil {
return 0, err
}
return info.Info.Ledger.Num, nil
}
// LogFailedTx is a convenience function to provide verbose information about a
// failing transaction to the test output log, if it's expected to succeed.
func (i *Test) LogFailedTx(txResponse proto.Transaction, horizonResult error) {
t := i.CurrentTest()
assert.NoErrorf(t, horizonResult, "Submitting the transaction failed")
if prob := sdk.GetError(horizonResult); prob != nil {
t.Logf(" problem: %s\n", prob.Problem.Detail)
t.Logf(" extras: %s\n", prob.Problem.Extras["result_codes"])
return
}
var txResult xdr.TransactionResult
err := xdr.SafeUnmarshalBase64(txResponse.ResultXdr, &txResult)
assert.NoErrorf(t, err, "Unmarshalling transaction failed.")
assert.Equalf(t, xdr.TransactionResultCodeTxSuccess, txResult.Result.Code,
"Transaction doesn't have success code.")
}
func (i *Test) GetPassPhrase() string {
return i.passPhrase
}
// Cluttering code with if err != nil is absolute nonsense.
func panicIf(err error) {
if err != nil {
panic(err)
}
}
// findDockerComposePath performs a best-effort attempt to find the project's
// Docker Compose files.
func findDockerComposePath() string {
// Lets you check if a particular directory contains a file.
directoryContainsFilename := func(dir string, filename string) bool {
files, innerErr := ioutil.ReadDir(dir)
panicIf(innerErr)
for _, file := range files {
if file.Name() == filename {
return true
}
}
return false
}
current, err := os.Getwd()
panicIf(err)
//
// We have a primary and backup attempt for finding the necessary docker
// files: via $GOPATH and via local directory traversal.
//
if gopath := os.Getenv("GOPATH"); gopath != "" {
monorepo := filepath.Join(gopath, "src", "github.com", "stellar", "go")
if _, err = os.Stat(monorepo); !os.IsNotExist(err) {
current = monorepo
}
}
// In either case, we try to walk up the tree until we find "go.mod",
// which we hope is the root directory of the project.
for !directoryContainsFilename(current, "go.mod") {
current, err = filepath.Abs(filepath.Join(current, ".."))
// FIXME: This only works on *nix-like systems.
if err != nil || filepath.Base(current)[0] == filepath.Separator {
fmt.Println("Failed to establish project root directory.")
panic(err)
}
}
// Directly jump down to the folder that should contain the configs
return filepath.Join(current, "services", "horizon", "docker")
}
// MergeMaps returns a new map which contains the keys and values of *all* input
// maps, overwriting earlier values with later values on duplicate keys.
func MergeMaps(maps ...map[string]string) map[string]string {
merged := map[string]string{}
for _, m := range maps {
for k, v := range m {
merged[k] = v
}
}
return merged
}
// mapToFlags will convert a map of parameters into an array of CLI args (i.e.
// in the form --key=value). Note that an empty value for a key means to drop
// the parameter.
func mapToFlags(params map[string]string) []string {
args := make([]string, 0, len(params))
for key, value := range params {
if value == "" {
continue
}
args = append(args, fmt.Sprintf("--%s=%s", key, value))
}
return args
}
| [
"\"HORIZON_INTEGRATION_ENABLE_CAPTIVE_CORE\"",
"\"HORIZON_INTEGRATION_ENABLE_CAPTIVE_CORE_USE_DB\"",
"\"HORIZON_INTEGRATION_TESTS\"",
"\"CAPTIVE_CORE_BIN\"",
"\"GOPATH\""
] | [] | [
"HORIZON_INTEGRATION_TESTS",
"CAPTIVE_CORE_BIN",
"HORIZON_INTEGRATION_ENABLE_CAPTIVE_CORE_USE_DB",
"GOPATH",
"HORIZON_INTEGRATION_ENABLE_CAPTIVE_CORE"
] | [] | ["HORIZON_INTEGRATION_TESTS", "CAPTIVE_CORE_BIN", "HORIZON_INTEGRATION_ENABLE_CAPTIVE_CORE_USE_DB", "GOPATH", "HORIZON_INTEGRATION_ENABLE_CAPTIVE_CORE"] | go | 5 | 0 | |
PyGitUp/tests/test_git_not_in_path.py | # System imports
import os
from os.path import join
from nose.tools import *
from PyGitUp.git_wrapper import GitError
from PyGitUp.tests import basepath
test_name = 'git-not-in-path'
repo_path = join(basepath, test_name + os.sep)
def setup():
os.makedirs(repo_path, 0o700)
def test_not_a_git_repo():
""" Run 'git up' with git no being in PATH """
os.chdir(repo_path)
environ = os.environ.copy()
os.environ['PATH'] = ''
try:
with assert_raises(GitError) as e:
from PyGitUp.gitup import GitUp
GitUp(testing=True)
assert e.exception.message == "The git executable could not be found"
finally:
os.environ.update(environ)
| [] | [] | [
"PATH"
] | [] | ["PATH"] | python | 1 | 0 | |
pkg/diskmaker/api_updater.go | package diskmaker
import (
"context"
"fmt"
"os"
localv1 "github.com/openshift/local-storage-operator/pkg/apis/local/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
type apiUpdater interface {
recordEvent(lv *localv1.LocalVolume, e *event)
getLocalVolume(lv *localv1.LocalVolume) (*localv1.LocalVolume, error)
}
type sdkAPIUpdater struct {
recorder record.EventRecorder
// This client, initialized using mgr.Client() above, is a split client
// that reads objects from the cache and writes to the apiserver
client client.Client
}
func newAPIUpdater(mgr manager.Manager) apiUpdater {
apiClient := &sdkAPIUpdater{
client: mgr.GetClient(),
recorder: mgr.GetEventRecorderFor("local-storage-diskmaker"),
}
return apiClient
}
func (s *sdkAPIUpdater) recordEvent(lv *localv1.LocalVolume, e *event) {
nodeName := os.Getenv("MY_NODE_NAME")
message := e.message
if len(nodeName) != 0 {
message = fmt.Sprintf("%s - %s", nodeName, message)
}
s.recorder.Eventf(lv, e.eventType, e.eventReason, message)
}
func (s *sdkAPIUpdater) getLocalVolume(lv *localv1.LocalVolume) (*localv1.LocalVolume, error) {
newLocalVolume := lv.DeepCopy()
err := s.client.Get(context.TODO(), types.NamespacedName{Name: newLocalVolume.GetName(), Namespace: newLocalVolume.GetNamespace()}, newLocalVolume)
return lv, err
}
| [
"\"MY_NODE_NAME\""
] | [] | [
"MY_NODE_NAME"
] | [] | ["MY_NODE_NAME"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "base4.settings")
from django.core.management import execute_from_command_line
if 'livereload' in sys.argv:
from django.core.wsgi import get_wsgi_application
from livereload import Server
application = get_wsgi_application()
server = Server(application)
# Add your watch
# server.watch('path/to/file', 'your command')
server.serve('8000')
else:
execute_from_command_line(sys.argv)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
bootstrap/internal/obj/sym.go | // Do not edit. Bootstrap copy of /Users/rsc/g/go/src/cmd/internal/obj/sym.go
// Derived from Inferno utils/6l/obj.c and utils/6l/span.c
// http://code.google.com/p/inferno-os/source/browse/utils/6l/obj.c
// http://code.google.com/p/inferno-os/source/browse/utils/6l/span.c
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Portions Copyright © 1995-1997 C H Forsyth ([email protected])
// Portions Copyright © 1997-1999 Vita Nuova Limited
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
// Portions Copyright © 2004,2006 Bruce Ellis
// Portions Copyright © 2005-2007 C H Forsyth ([email protected])
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package obj
import (
"log"
"os"
"path/filepath"
"runtime"
"strconv"
)
var headers = []struct {
name string
val int
}{
{"darwin", Hdarwin},
{"dragonfly", Hdragonfly},
{"elf", Helf},
{"freebsd", Hfreebsd},
{"linux", Hlinux},
{"android", Hlinux}, // must be after "linux" entry or else headstr(Hlinux) == "android"
{"nacl", Hnacl},
{"netbsd", Hnetbsd},
{"openbsd", Hopenbsd},
{"plan9", Hplan9},
{"solaris", Hsolaris},
{"windows", Hwindows},
{"windowsgui", Hwindows},
}
func headtype(name string) int {
for i := 0; i < len(headers); i++ {
if name == headers[i].name {
return headers[i].val
}
}
return -1
}
func Headstr(v int) string {
for i := 0; i < len(headers); i++ {
if v == headers[i].val {
return headers[i].name
}
}
return strconv.Itoa(v)
}
func Linknew(arch *LinkArch) *Link {
ctxt := new(Link)
ctxt.Hash = make(map[SymVer]*LSym)
ctxt.Arch = arch
ctxt.Version = HistVersion
ctxt.Goroot = Getgoroot()
ctxt.Goroot_final = os.Getenv("GOROOT_FINAL")
if runtime.GOOS == "windows" {
// TODO(rsc): Remove ctxt.Windows and let callers use runtime.GOOS.
ctxt.Windows = 1
}
var buf string
buf, _ = os.Getwd()
if buf == "" {
buf = "/???"
}
buf = filepath.ToSlash(buf)
ctxt.Pathname = buf
ctxt.LineHist.GOROOT = ctxt.Goroot
ctxt.LineHist.GOROOT_FINAL = ctxt.Goroot_final
ctxt.LineHist.Dir = ctxt.Pathname
ctxt.Headtype = headtype(Getgoos())
if ctxt.Headtype < 0 {
log.Fatalf("unknown goos %s", Getgoos())
}
// Record thread-local storage offset.
// TODO(rsc): Move tlsoffset back into the linker.
switch ctxt.Headtype {
default:
log.Fatalf("unknown thread-local storage offset for %s", Headstr(ctxt.Headtype))
case Hplan9, Hwindows:
break
/*
* ELF uses TLS offset negative from FS.
* Translate 0(FS) and 8(FS) into -16(FS) and -8(FS).
* Known to low-level assembly in package runtime and runtime/cgo.
*/
case Hlinux,
Hfreebsd,
Hnetbsd,
Hopenbsd,
Hdragonfly,
Hsolaris:
ctxt.Tlsoffset = -1 * ctxt.Arch.Ptrsize
case Hnacl:
switch ctxt.Arch.Thechar {
default:
log.Fatalf("unknown thread-local storage offset for nacl/%s", ctxt.Arch.Name)
case '5':
ctxt.Tlsoffset = 0
case '6':
ctxt.Tlsoffset = 0
case '8':
ctxt.Tlsoffset = -8
}
/*
* OS X system constants - offset from 0(GS) to our TLS.
* Explained in ../../runtime/cgo/gcc_darwin_*.c.
*/
case Hdarwin:
switch ctxt.Arch.Thechar {
default:
log.Fatalf("unknown thread-local storage offset for darwin/%s", ctxt.Arch.Name)
case '5':
ctxt.Tlsoffset = 0 // dummy value, not needed
case '6':
ctxt.Tlsoffset = 0x8a0
case '7':
ctxt.Tlsoffset = 0 // dummy value, not needed
case '8':
ctxt.Tlsoffset = 0x468
}
}
// On arm, record goarm.
if ctxt.Arch.Thechar == '5' {
p := Getgoarm()
if p != "" {
ctxt.Goarm = int32(Atoi(p))
} else {
ctxt.Goarm = 6
}
}
return ctxt
}
func _lookup(ctxt *Link, symb string, v int, create bool) *LSym {
s := ctxt.Hash[SymVer{symb, v}]
if s != nil || !create {
return s
}
s = &LSym{
Name: symb,
Type: 0,
Version: int16(v),
Value: 0,
Size: 0,
}
ctxt.Hash[SymVer{symb, v}] = s
return s
}
func Linklookup(ctxt *Link, name string, v int) *LSym {
return _lookup(ctxt, name, v, true)
}
// read-only lookup
func linkrlookup(ctxt *Link, name string, v int) *LSym {
return _lookup(ctxt, name, v, false)
}
func Linksymfmt(s *LSym) string {
if s == nil {
return "<nil>"
}
return s.Name
}
| [
"\"GOROOT_FINAL\""
] | [] | [
"GOROOT_FINAL"
] | [] | ["GOROOT_FINAL"] | go | 1 | 0 | |
backend/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yellow_sound_29787.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
qa/rpc-tests/keypool.py | #!/usr/bin/env python2
# Copyright (c) 2014 The Moneta Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the wallet keypool, and interaction with wallet encryption/locking
# Add python-monetarpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-monetarpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from monetarpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
def run_test(nodes, tmpdir):
# Encrypt wallet and wait to terminate
nodes[0].encryptwallet('test')
monetad_processes[0].wait()
# Restart node 0
nodes[0] = start_node(0, tmpdir)
# Keep creating keys
addr = nodes[0].getnewaddress()
try:
addr = nodes[0].getnewaddress()
raise AssertionError('Keypool should be exhausted after one address')
except JSONRPCException,e:
assert(e.error['code']==-12)
# put three new keys in the keypool
nodes[0].walletpassphrase('test', 12000)
nodes[0].keypoolrefill(3)
nodes[0].walletlock()
# drain the keys
addr = set()
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
# assert that four unique addresses were returned
assert(len(addr) == 4)
# the next one should fail
try:
addr = nodes[0].getrawchangeaddress()
raise AssertionError('Keypool should be exhausted after three addresses')
except JSONRPCException,e:
assert(e.error['code']==-12)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave monetads and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing monetad/moneta-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
nodes = start_nodes(1, options.tmpdir)
run_test(nodes, options.tmpdir)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: "+str(sys.exc_info()[0]))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
stop_nodes(nodes)
wait_monetads()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
| [] | [] | [
"PATH"
] | [] | ["PATH"] | python | 1 | 0 | |
pkg/protocols/database/connection.go | package database
import (
"database/sql"
"log"
"net"
"os"
"time"
"github.com/bungysheep/news-api/pkg/configs"
)
var (
// DbConnection - Database connection
DbConnection *sql.DB
)
// CreateDbConnection - Creates connection to database
func CreateDbConnection() error {
log.Printf("Creating database connection...")
dbConnString, err := resolveDbConnectionString()
if err != nil {
return err
}
db, err := sql.Open("postgres", dbConnString)
if err != nil {
return err
}
DbConnection = db
for i := 0; i < configs.NUMBERDIALATTEMPT; i++ {
err = DbConnection.Ping()
if err != nil {
opErr, ok := err.(*net.OpError)
if !ok || opErr.Op != "dial" {
return err
}
time.Sleep(5 * time.Second)
}
}
return err
}
func resolveDbConnectionString() (string, error) {
connString := os.Getenv("DATABASE_URL")
if connString != "" {
return connString, nil
}
return configs.DBCONNSTRING, nil
}
| [
"\"DATABASE_URL\""
] | [] | [
"DATABASE_URL"
] | [] | ["DATABASE_URL"] | go | 1 | 0 | |
rst/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys, os
docs_italia_theme = __import__("docs-italia-theme")
from recommonmark.transform import AutoStructify
from recommonmark.parser import CommonMarkParser
# -- PROJECT Variables ----------------------------------------------------
settings_project_name = 'PagoPA Web Services'
settings_copyright_copyleft = 'AgID - Team Digitale'
settings_editor_name = 'AgID - Team Digitale'
settings_doc_version = 'version: latest'
settings_doc_release = 'version: latest'
settings_basename = 'pagopa-web-services'
settings_file_name = 'pagopa-web-services'
# -- RTD configuration ------------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# This is used for linking and such so we link to the thing we're building
rtd_version = os.environ.get('READTHEDOCS_VERSION', 'latest')
if rtd_version not in ['stable', 'latest']:
rtd_version = 'stable'
rtd_project = os.environ.get('READTHEDOCS_PROJECT', '')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'docs-italia-theme',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = settings_project_name
copyright = settings_copyright_copyleft
# URL of Discourse instance used by sphinxcontrib.discourse extension
# discourse_url = settings_discourse_url
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = settings_doc_version
# The full version, including alpha/beta/rc tags.
release = settings_doc_release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'it'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.DS_Store', 'README', 'README.md', '.venv*']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- AutoStructify --------------------------------------------------------
def setup(app):
app.add_config_value('recommonmark_config', {
'auto_toc_tree_section': 'Contents',
'enable_eval_rst': True,
'enable_auto_doc_ref': True
}, True)
app.add_transform(AutoStructify)
# -- Options for HTML output ----------------------------------------------
html_theme = 'docs-italia-theme'
html_theme_path = [docs_italia_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# This option can be used with docs-italia-theme to customise how the versions "badge" is shown:
# 'False': default (alabaster) badge | 'True': custom (italia) badge
'custom_versions_badge': 'True',
}
# -- ReadTheDoc requirements and local template generation---------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
html_theme = 'docs-italia-theme'
#html_theme_path = ["themes", ]
else:
# Override default css to get a larger width for ReadTheDoc build
html_context = {
'css_files': [
'_static/css/theme.css',
'_static/css/badge_only.css',
],
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = settings_project_name
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = "images/logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%d/%m/%Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = settings_basename + 'doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', settings_file_name + '.tex', settings_project_name,
settings_copyright_copyleft, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = "images/..."
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', settings_file_name, settings_project_name,
[settings_editor_name], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', settings_file_name, settings_project_name,
settings_copyright_copyleft, settings_project_name, settings_project_name,
'Miscellaneous'),
]
numfig = True
| [] | [] | [
"READTHEDOCS_VERSION",
"READTHEDOCS_PROJECT",
"READTHEDOCS"
] | [] | ["READTHEDOCS_VERSION", "READTHEDOCS_PROJECT", "READTHEDOCS"] | python | 3 | 0 | |
libpod/runtime.go | package libpod
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"syscall"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/pkg/sysregistriesv2"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/libpod/events"
"github.com/containers/podman/v2/libpod/image"
"github.com/containers/podman/v2/libpod/lock"
"github.com/containers/podman/v2/libpod/plugin"
"github.com/containers/podman/v2/libpod/shutdown"
"github.com/containers/podman/v2/pkg/cgroups"
"github.com/containers/podman/v2/pkg/registries"
"github.com/containers/podman/v2/pkg/rootless"
"github.com/containers/podman/v2/pkg/util"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/docker/pkg/namesgenerator"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// A RuntimeOption is a functional option which alters the Runtime created by
// NewRuntime
type RuntimeOption func(*Runtime) error
type storageSet struct {
RunRootSet bool
GraphRootSet bool
StaticDirSet bool
VolumePathSet bool
GraphDriverNameSet bool
TmpDirSet bool
}
// Runtime is the core libpod runtime
type Runtime struct {
config *config.Config
storageConfig storage.StoreOptions
storageSet storageSet
state State
store storage.Store
storageService *storageService
imageContext *types.SystemContext
defaultOCIRuntime OCIRuntime
ociRuntimes map[string]OCIRuntime
runtimeFlags []string
netPlugin ocicni.CNIPlugin
conmonPath string
imageRuntime *image.Runtime
lockManager lock.Manager
// doRenumber indicates that the runtime should perform a lock renumber
// during initialization.
// Once the runtime has been initialized and returned, this variable is
// unused.
doRenumber bool
doMigrate bool
// System migrate can move containers to a new runtime.
// We make no promises that these migrated containers work on the new
// runtime, though.
migrateRuntime string
// valid indicates whether the runtime is ready to use.
// valid is set to true when a runtime is returned from GetRuntime(),
// and remains true until the runtime is shut down (rendering its
// storage unusable). When valid is false, the runtime cannot be used.
valid bool
lock sync.RWMutex
// mechanism to read and write even logs
eventer events.Eventer
// noStore indicates whether we need to interact with a store or not
noStore bool
}
// SetXdgDirs ensures the XDG_RUNTIME_DIR env and XDG_CONFIG_HOME variables are set.
// containers/image uses XDG_RUNTIME_DIR to locate the auth file, XDG_CONFIG_HOME is
// use for the containers.conf configuration file.
func SetXdgDirs() error {
if !rootless.IsRootless() {
return nil
}
// Setup XDG_RUNTIME_DIR
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir == "" {
var err error
runtimeDir, err = util.GetRuntimeDir()
if err != nil {
return err
}
}
if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR")
}
if rootless.IsRootless() && os.Getenv("DBUS_SESSION_BUS_ADDRESS") == "" {
sessionAddr := filepath.Join(runtimeDir, "bus")
if _, err := os.Stat(sessionAddr); err == nil {
os.Setenv("DBUS_SESSION_BUS_ADDRESS", fmt.Sprintf("unix:path=%s", sessionAddr))
}
}
// Setup XDG_CONFIG_HOME
if cfgHomeDir := os.Getenv("XDG_CONFIG_HOME"); cfgHomeDir == "" {
cfgHomeDir, err := util.GetRootlessConfigHomeDir()
if err != nil {
return err
}
if err := os.Setenv("XDG_CONFIG_HOME", cfgHomeDir); err != nil {
return errors.Wrapf(err, "cannot set XDG_CONFIG_HOME")
}
}
return nil
}
// NewRuntime creates a new container runtime
// Options can be passed to override the default configuration for the runtime
func NewRuntime(ctx context.Context, options ...RuntimeOption) (*Runtime, error) {
conf, err := config.NewConfig("")
if err != nil {
return nil, err
}
conf.CheckCgroupsAndAdjustConfig()
return newRuntimeFromConfig(ctx, conf, options...)
}
// NewRuntimeFromConfig creates a new container runtime using the given
// configuration file for its default configuration. Passed RuntimeOption
// functions can be used to mutate this configuration further.
// An error will be returned if the configuration file at the given path does
// not exist or cannot be loaded
func NewRuntimeFromConfig(ctx context.Context, userConfig *config.Config, options ...RuntimeOption) (*Runtime, error) {
return newRuntimeFromConfig(ctx, userConfig, options...)
}
func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...RuntimeOption) (*Runtime, error) {
runtime := new(Runtime)
if conf.Engine.OCIRuntime == "" {
conf.Engine.OCIRuntime = "runc"
// If we're running on cgroups v2, default to using crun.
if onCgroupsv2, _ := cgroups.IsCgroup2UnifiedMode(); onCgroupsv2 {
conf.Engine.OCIRuntime = "crun"
}
}
runtime.config = conf
if err := SetXdgDirs(); err != nil {
return nil, err
}
storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID())
if err != nil {
return nil, err
}
runtime.storageConfig = storeOpts
// Overwrite config with user-given configuration options
for _, opt := range options {
if err := opt(runtime); err != nil {
return nil, errors.Wrapf(err, "error configuring runtime")
}
}
if err := shutdown.Register("libpod", func(sig os.Signal) error {
os.Exit(1)
return nil
}); err != nil && errors.Cause(err) != shutdown.ErrHandlerExists {
logrus.Errorf("Error registering shutdown handler for libpod: %v", err)
}
if err := shutdown.Start(); err != nil {
return nil, errors.Wrapf(err, "error starting shutdown signal handler")
}
if err := makeRuntime(ctx, runtime); err != nil {
return nil, err
}
return runtime, nil
}
func getLockManager(runtime *Runtime) (lock.Manager, error) {
var err error
var manager lock.Manager
switch runtime.config.Engine.LockType {
case "file":
lockPath := filepath.Join(runtime.config.Engine.TmpDir, "locks")
manager, err = lock.OpenFileLockManager(lockPath)
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
manager, err = lock.NewFileLockManager(lockPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to get new file lock manager")
}
} else {
return nil, err
}
}
case "", "shm":
lockPath := define.DefaultSHMLockPath
if rootless.IsRootless() {
lockPath = fmt.Sprintf("%s_%d", define.DefaultRootlessSHMLockPath, rootless.GetRootlessUID())
}
// Set up the lock manager
manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
if err != nil {
switch {
case os.IsNotExist(errors.Cause(err)):
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
if err != nil {
return nil, errors.Wrapf(err, "failed to get new shm lock manager")
}
case errors.Cause(err) == syscall.ERANGE && runtime.doRenumber:
logrus.Debugf("Number of locks does not match - removing old locks")
// ERANGE indicates a lock numbering mismatch.
// Since we're renumbering, this is not fatal.
// Remove the earlier set of locks and recreate.
if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
return nil, errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
}
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
if err != nil {
return nil, err
}
default:
return nil, err
}
}
default:
return nil, errors.Wrapf(define.ErrInvalidArg, "unknown lock type %s", runtime.config.Engine.LockType)
}
return manager, nil
}
// Make a new runtime based on the given configuration
// Sets up containers/storage, state store, OCI runtime
func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) {
// Find a working conmon binary
cPath, err := runtime.config.FindConmon()
if err != nil {
return err
}
runtime.conmonPath = cPath
// Make the static files directory if it does not exist
if err := os.MkdirAll(runtime.config.Engine.StaticDir, 0700); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
return errors.Wrap(err, "error creating runtime static files directory")
}
}
// Set up the state.
//
// TODO - if we further break out the state implementation into
// libpod/state, the config could take care of the code below. It
// would further allow to move the types and consts into a coherent
// package.
switch runtime.config.Engine.StateType {
case config.InMemoryStateStore:
state, err := NewInMemoryState()
if err != nil {
return err
}
runtime.state = state
case config.SQLiteStateStore:
return errors.Wrapf(define.ErrInvalidArg, "SQLite state is currently disabled")
case config.BoltDBStateStore:
dbPath := filepath.Join(runtime.config.Engine.StaticDir, "bolt_state.db")
state, err := NewBoltState(dbPath, runtime)
if err != nil {
return err
}
runtime.state = state
default:
return errors.Wrapf(define.ErrInvalidArg, "unrecognized state type passed (%v)", runtime.config.Engine.StateType)
}
// Grab config from the database so we can reset some defaults
dbConfig, err := runtime.state.GetDBConfig()
if err != nil {
return errors.Wrapf(err, "error retrieving runtime configuration from database")
}
runtime.mergeDBConfig(dbConfig)
logrus.Debugf("Using graph driver %s", runtime.storageConfig.GraphDriverName)
logrus.Debugf("Using graph root %s", runtime.storageConfig.GraphRoot)
logrus.Debugf("Using run root %s", runtime.storageConfig.RunRoot)
logrus.Debugf("Using static dir %s", runtime.config.Engine.StaticDir)
logrus.Debugf("Using tmp dir %s", runtime.config.Engine.TmpDir)
logrus.Debugf("Using volume path %s", runtime.config.Engine.VolumePath)
// Validate our config against the database, now that we've set our
// final storage configuration
if err := runtime.state.ValidateDBConfig(runtime); err != nil {
return err
}
if err := runtime.state.SetNamespace(runtime.config.Engine.Namespace); err != nil {
return errors.Wrapf(err, "error setting libpod namespace in state")
}
logrus.Debugf("Set libpod namespace to %q", runtime.config.Engine.Namespace)
// Set up containers/storage
var store storage.Store
if os.Geteuid() != 0 {
logrus.Debug("Not configuring container store")
} else if runtime.noStore {
logrus.Debug("No store required. Not opening container store.")
} else if err := runtime.configureStore(); err != nil {
return err
}
defer func() {
if retErr != nil && store != nil {
// Don't forcibly shut down
// We could be opening a store in use by another libpod
if _, err := store.Shutdown(false); err != nil {
logrus.Errorf("Error removing store for partially-created runtime: %s", err)
}
}
}()
// Setup the eventer
eventer, err := runtime.newEventer()
if err != nil {
return err
}
runtime.eventer = eventer
if runtime.imageRuntime != nil {
runtime.imageRuntime.Eventer = eventer
}
// Set up containers/image
if runtime.imageContext == nil {
runtime.imageContext = &types.SystemContext{}
}
runtime.imageContext.SignaturePolicyPath = runtime.config.Engine.SignaturePolicyPath
// Create the tmpDir
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0751); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
return errors.Wrap(err, "error creating tmpdir")
}
}
// Create events log dir
if err := os.MkdirAll(filepath.Dir(runtime.config.Engine.EventsLogFilePath), 0700); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
return errors.Wrap(err, "error creating events dirs")
}
}
// Get us at least one working OCI runtime.
runtime.ociRuntimes = make(map[string]OCIRuntime)
// Initialize remaining OCI runtimes
for name, paths := range runtime.config.Engine.OCIRuntimes {
ociRuntime, err := newConmonOCIRuntime(name, paths, runtime.conmonPath, runtime.runtimeFlags, runtime.config)
if err != nil {
// Don't fatally error.
// This will allow us to ship configs including optional
// runtimes that might not be installed (crun, kata).
// Only a infof so default configs don't spec errors.
logrus.Infof("Error initializing configured OCI runtime %s: %v", name, err)
continue
}
runtime.ociRuntimes[name] = ociRuntime
}
// Do we have a default OCI runtime?
if runtime.config.Engine.OCIRuntime != "" {
// If the string starts with / it's a path to a runtime
// executable.
if strings.HasPrefix(runtime.config.Engine.OCIRuntime, "/") {
ociRuntime, err := newConmonOCIRuntime(runtime.config.Engine.OCIRuntime, []string{runtime.config.Engine.OCIRuntime}, runtime.conmonPath, runtime.runtimeFlags, runtime.config)
if err != nil {
return err
}
runtime.ociRuntimes[runtime.config.Engine.OCIRuntime] = ociRuntime
runtime.defaultOCIRuntime = ociRuntime
} else {
ociRuntime, ok := runtime.ociRuntimes[runtime.config.Engine.OCIRuntime]
if !ok {
return errors.Wrapf(define.ErrInvalidArg, "default OCI runtime %q not found", runtime.config.Engine.OCIRuntime)
}
runtime.defaultOCIRuntime = ociRuntime
}
}
// Do we have at least one valid OCI runtime?
if len(runtime.ociRuntimes) == 0 {
return errors.Wrapf(define.ErrInvalidArg, "no OCI runtime has been configured")
}
// Do we have a default runtime?
if runtime.defaultOCIRuntime == nil {
return errors.Wrapf(define.ErrInvalidArg, "no default OCI runtime was configured")
}
// Make the per-boot files directory if it does not exist
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0755); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
return errors.Wrapf(err, "error creating runtime temporary files directory")
}
}
// Set up the CNI net plugin
if !rootless.IsRootless() {
netPlugin, err := ocicni.InitCNI(runtime.config.Network.DefaultNetwork, runtime.config.Network.NetworkConfigDir, runtime.config.Network.CNIPluginDirs...)
if err != nil {
return errors.Wrapf(err, "error configuring CNI network plugin")
}
runtime.netPlugin = netPlugin
}
// We now need to see if the system has restarted
// We check for the presence of a file in our tmp directory to verify this
// This check must be locked to prevent races
runtimeAliveLock := filepath.Join(runtime.config.Engine.TmpDir, "alive.lck")
runtimeAliveFile := filepath.Join(runtime.config.Engine.TmpDir, "alive")
aliveLock, err := storage.GetLockfile(runtimeAliveLock)
if err != nil {
return errors.Wrapf(err, "error acquiring runtime init lock")
}
// Acquire the lock and hold it until we return
// This ensures that no two processes will be in runtime.refresh at once
// TODO: we can't close the FD in this lock, so we should keep it around
// and use it to lock important operations
aliveLock.Lock()
doRefresh := false
defer func() {
if aliveLock.Locked() {
aliveLock.Unlock()
}
}()
_, err = os.Stat(runtimeAliveFile)
if err != nil {
// If we need to refresh, then it is safe to assume there are
// no containers running. Create immediately a namespace, as
// we will need to access the storage.
if os.Geteuid() != 0 {
aliveLock.Unlock() // Unlock to avoid deadlock as BecomeRootInUserNS will reexec.
pausePid, err := util.GetRootlessPauseProcessPidPathGivenDir(runtime.config.Engine.TmpDir)
if err != nil {
return errors.Wrapf(err, "could not get pause process pid file path")
}
became, ret, err := rootless.BecomeRootInUserNS(pausePid)
if err != nil {
return err
}
if became {
os.Exit(ret)
}
}
// If the file doesn't exist, we need to refresh the state
// This will trigger on first use as well, but refreshing an
// empty state only creates a single file
// As such, it's not really a performance concern
if os.IsNotExist(err) {
doRefresh = true
} else {
return errors.Wrapf(err, "error reading runtime status file %s", runtimeAliveFile)
}
}
runtime.lockManager, err = getLockManager(runtime)
if err != nil {
return err
}
// If we're renumbering locks, do it now.
// It breaks out of normal runtime init, and will not return a valid
// runtime.
if runtime.doRenumber {
if err := runtime.renumberLocks(); err != nil {
return err
}
}
// If we need to refresh the state, do it now - things are guaranteed to
// be set up by now.
if doRefresh {
// Ensure we have a store before refresh occurs
if runtime.store == nil {
if err := runtime.configureStore(); err != nil {
return err
}
}
if err2 := runtime.refresh(runtimeAliveFile); err2 != nil {
return err2
}
}
// Mark the runtime as valid - ready to be used, cannot be modified
// further
runtime.valid = true
if runtime.doMigrate {
if err := runtime.migrate(ctx); err != nil {
return err
}
}
return nil
}
// TmpDir gets the current Libpod temporary files directory.
func (r *Runtime) TmpDir() (string, error) {
if !r.valid {
return "", define.ErrRuntimeStopped
}
return r.config.Engine.TmpDir, nil
}
// GetConfig returns a copy of the configuration used by the runtime
func (r *Runtime) GetConfig() (*config.Config, error) {
r.lock.RLock()
defer r.lock.RUnlock()
if !r.valid {
return nil, define.ErrRuntimeStopped
}
config := new(config.Config)
// Copy so the caller won't be able to modify the actual config
if err := JSONDeepCopy(r.config, config); err != nil {
return nil, errors.Wrapf(err, "error copying config")
}
return config, nil
}
// DeferredShutdown shuts down the runtime without exposing any
// errors. This is only meant to be used when the runtime is being
// shutdown within a defer statement; else use Shutdown
func (r *Runtime) DeferredShutdown(force bool) {
_ = r.Shutdown(force)
}
// Shutdown shuts down the runtime and associated containers and storage
// If force is true, containers and mounted storage will be shut down before
// cleaning up; if force is false, an error will be returned if there are
// still containers running or mounted
func (r *Runtime) Shutdown(force bool) error {
r.lock.Lock()
defer r.lock.Unlock()
if !r.valid {
return define.ErrRuntimeStopped
}
r.valid = false
// Shutdown all containers if --force is given
if force {
ctrs, err := r.state.AllContainers()
if err != nil {
logrus.Errorf("Error retrieving containers from database: %v", err)
} else {
for _, ctr := range ctrs {
if err := ctr.StopWithTimeout(r.config.Engine.StopTimeout); err != nil {
logrus.Errorf("Error stopping container %s: %v", ctr.ID(), err)
}
}
}
}
var lastError error
// If no store was requested, it can be nil and there is no need to
// attempt to shut it down
if r.store != nil {
if _, err := r.store.Shutdown(force); err != nil {
lastError = errors.Wrapf(err, "error shutting down container storage")
}
}
if err := r.state.Close(); err != nil {
if lastError != nil {
logrus.Errorf("%v", lastError)
}
lastError = err
}
return lastError
}
// Reconfigures the runtime after a reboot
// Refreshes the state, recreating temporary files
// Does not check validity as the runtime is not valid until after this has run
func (r *Runtime) refresh(alivePath string) error {
logrus.Debugf("Podman detected system restart - performing state refresh")
// First clear the state in the database
if err := r.state.Refresh(); err != nil {
return err
}
// Next refresh the state of all containers to recreate dirs and
// namespaces, and all the pods to recreate cgroups.
// Containers, pods, and volumes must also reacquire their locks.
ctrs, err := r.state.AllContainers()
if err != nil {
return errors.Wrapf(err, "error retrieving all containers from state")
}
pods, err := r.state.AllPods()
if err != nil {
return errors.Wrapf(err, "error retrieving all pods from state")
}
vols, err := r.state.AllVolumes()
if err != nil {
return errors.Wrapf(err, "error retrieving all volumes from state")
}
// No locks are taken during pod, volume, and container refresh.
// Furthermore, the pod/volume/container refresh() functions are not
// allowed to take locks themselves.
// We cannot assume that any pod/volume/container has a valid lock until
// after this function has returned.
// The runtime alive lock should suffice to provide mutual exclusion
// until this has run.
for _, ctr := range ctrs {
if err := ctr.refresh(); err != nil {
logrus.Errorf("Error refreshing container %s: %v", ctr.ID(), err)
}
}
for _, pod := range pods {
if err := pod.refresh(); err != nil {
logrus.Errorf("Error refreshing pod %s: %v", pod.ID(), err)
}
}
for _, vol := range vols {
if err := vol.refresh(); err != nil {
logrus.Errorf("Error refreshing volume %s: %v", vol.Name(), err)
}
}
// Create a file indicating the runtime is alive and ready
file, err := os.OpenFile(alivePath, os.O_RDONLY|os.O_CREATE, 0644)
if err != nil {
return errors.Wrap(err, "error creating runtime status file")
}
defer file.Close()
r.newSystemEvent(events.Refresh)
return nil
}
// Info returns the store and host information
func (r *Runtime) Info() (*define.Info, error) {
return r.info()
}
// generateName generates a unique name for a container or pod.
func (r *Runtime) generateName() (string, error) {
for {
name := namesgenerator.GetRandomName(0)
// Make sure container with this name does not exist
if _, err := r.state.LookupContainer(name); err == nil {
continue
} else if errors.Cause(err) != define.ErrNoSuchCtr {
return "", err
}
// Make sure pod with this name does not exist
if _, err := r.state.LookupPod(name); err == nil {
continue
} else if errors.Cause(err) != define.ErrNoSuchPod {
return "", err
}
return name, nil
}
// The code should never reach here.
}
// Configure store and image runtime
func (r *Runtime) configureStore() error {
store, err := storage.GetStore(r.storageConfig)
if err != nil {
return err
}
r.store = store
is.Transport.SetStore(store)
// Set up a storage service for creating container root filesystems from
// images
r.storageService = getStorageService(r.store)
ir := image.NewImageRuntimeFromStore(r.store)
ir.SignaturePolicyPath = r.config.Engine.SignaturePolicyPath
ir.EventsLogFilePath = r.config.Engine.EventsLogFilePath
ir.EventsLogger = r.config.Engine.EventsLogger
r.imageRuntime = ir
return nil
}
// ImageRuntime returns the imageruntime for image operations.
// If WithNoStore() was used, no image runtime will be available, and this
// function will return nil.
func (r *Runtime) ImageRuntime() *image.Runtime {
return r.imageRuntime
}
// SystemContext returns the imagecontext
func (r *Runtime) SystemContext() *types.SystemContext {
return r.imageContext
}
// GetOCIRuntimePath retrieves the path of the default OCI runtime.
func (r *Runtime) GetOCIRuntimePath() string {
return r.defaultOCIRuntime.Path()
}
// StorageConfig retrieves the storage options for the container runtime
func (r *Runtime) StorageConfig() storage.StoreOptions {
return r.storageConfig
}
// GetStore returns the runtime stores
func (r *Runtime) GetStore() storage.Store {
return r.store
}
// GetName retrieves the name associated with a given full ID.
// This works for both containers and pods, and does not distinguish between the
// two.
// If the given ID does not correspond to any existing Pod or Container,
// ErrNoSuchCtr is returned.
func (r *Runtime) GetName(id string) (string, error) {
r.lock.RLock()
defer r.lock.RUnlock()
if !r.valid {
return "", define.ErrRuntimeStopped
}
return r.state.GetName(id)
}
// DBConfig is a set of Libpod runtime configuration settings that are saved in
// a State when it is first created, and can subsequently be retrieved.
type DBConfig struct {
LibpodRoot string
LibpodTmp string
StorageRoot string
StorageTmp string
GraphDriver string
VolumePath string
}
// mergeDBConfig merges the configuration from the database.
func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) {
c := &r.config.Engine
if !r.storageSet.RunRootSet && dbConfig.StorageTmp != "" {
if r.storageConfig.RunRoot != dbConfig.StorageTmp &&
r.storageConfig.RunRoot != "" {
logrus.Debugf("Overriding run root %q with %q from database",
r.storageConfig.RunRoot, dbConfig.StorageTmp)
}
r.storageConfig.RunRoot = dbConfig.StorageTmp
}
if !r.storageSet.GraphRootSet && dbConfig.StorageRoot != "" {
if r.storageConfig.GraphRoot != dbConfig.StorageRoot &&
r.storageConfig.GraphRoot != "" {
logrus.Debugf("Overriding graph root %q with %q from database",
r.storageConfig.GraphRoot, dbConfig.StorageRoot)
}
r.storageConfig.GraphRoot = dbConfig.StorageRoot
}
if !r.storageSet.GraphDriverNameSet && dbConfig.GraphDriver != "" {
if r.storageConfig.GraphDriverName != dbConfig.GraphDriver &&
r.storageConfig.GraphDriverName != "" {
logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve",
r.storageConfig.GraphDriverName, dbConfig.GraphDriver)
}
r.storageConfig.GraphDriverName = dbConfig.GraphDriver
}
if !r.storageSet.StaticDirSet && dbConfig.LibpodRoot != "" {
if c.StaticDir != dbConfig.LibpodRoot && c.StaticDir != "" {
logrus.Debugf("Overriding static dir %q with %q from database", c.StaticDir, dbConfig.LibpodRoot)
}
c.StaticDir = dbConfig.LibpodRoot
}
if !r.storageSet.TmpDirSet && dbConfig.LibpodTmp != "" {
if c.TmpDir != dbConfig.LibpodTmp && c.TmpDir != "" {
logrus.Debugf("Overriding tmp dir %q with %q from database", c.TmpDir, dbConfig.LibpodTmp)
}
c.TmpDir = dbConfig.LibpodTmp
c.EventsLogFilePath = filepath.Join(dbConfig.LibpodTmp, "events", "events.log")
}
if !r.storageSet.VolumePathSet && dbConfig.VolumePath != "" {
if c.VolumePath != dbConfig.VolumePath && c.VolumePath != "" {
logrus.Debugf("Overriding volume path %q with %q from database", c.VolumePath, dbConfig.VolumePath)
}
c.VolumePath = dbConfig.VolumePath
}
}
func (r *Runtime) EnableLabeling() bool {
return r.config.Containers.EnableLabeling
}
// Reload reloads the configurations files
func (r *Runtime) Reload() error {
if err := r.reloadContainersConf(); err != nil {
return err
}
if err := r.reloadStorageConf(); err != nil {
return err
}
if err := reloadRegistriesConf(); err != nil {
return err
}
return nil
}
// reloadContainersConf reloads the containers.conf
func (r *Runtime) reloadContainersConf() error {
config, err := config.Reload()
if err != nil {
return err
}
r.config = config
logrus.Infof("applied new containers configuration: %v", config)
return nil
}
// reloadRegistries reloads the registries.conf
func reloadRegistriesConf() error {
sysregistriesv2.InvalidateCache()
registries, err := sysregistriesv2.GetRegistries(&types.SystemContext{SystemRegistriesConfPath: registries.SystemRegistriesConfPath()})
if err != nil {
return err
}
logrus.Infof("applied new registry configuration: %+v", registries)
return nil
}
// reloadStorageConf reloads the storage.conf
func (r *Runtime) reloadStorageConf() error {
configFile, err := storage.DefaultConfigFile(rootless.IsRootless())
if err != nil {
return err
}
storage.ReloadConfigurationFile(configFile, &r.storageConfig)
logrus.Infof("applied new storage configuration: %v", r.storageConfig)
return nil
}
// getVolumePlugin gets a specific volume plugin given its name.
func (r *Runtime) getVolumePlugin(name string) (*plugin.VolumePlugin, error) {
// There is no plugin for local.
if name == define.VolumeDriverLocal || name == "" {
return nil, nil
}
pluginPath, ok := r.config.Engine.VolumePlugins[name]
if !ok {
return nil, errors.Wrapf(define.ErrMissingPlugin, "no volume plugin with name %s available", name)
}
return plugin.GetVolumePlugin(name, pluginPath)
}
// GetSecretsStoreageDir returns the directory that the secrets manager should take
func (r *Runtime) GetSecretsStorageDir() string {
return filepath.Join(r.store.GraphRoot(), "secrets")
}
| [
"\"XDG_RUNTIME_DIR\"",
"\"DBUS_SESSION_BUS_ADDRESS\"",
"\"XDG_CONFIG_HOME\""
] | [] | [
"XDG_RUNTIME_DIR",
"DBUS_SESSION_BUS_ADDRESS",
"XDG_CONFIG_HOME"
] | [] | ["XDG_RUNTIME_DIR", "DBUS_SESSION_BUS_ADDRESS", "XDG_CONFIG_HOME"] | go | 3 | 0 | |
commands/build.go | package commands
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/docker/buildx/build"
"github.com/docker/buildx/util/buildflags"
"github.com/docker/buildx/util/platformutil"
"github.com/docker/buildx/util/progress"
"github.com/docker/buildx/util/tracing"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/opts"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/go-units"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/session/auth/authprovider"
"github.com/moby/buildkit/util/appcontext"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
const defaultTargetName = "default"
type buildOptions struct {
commonOptions
contextPath string
dockerfileName string
tags []string
labels []string
buildArgs []string
cacheFrom []string
cacheTo []string
target string
platforms []string
secrets []string
ssh []string
outputs []string
imageIDFile string
extraHosts []string
networkMode string
quiet bool
ulimits *opts.UlimitOpt
// unimplemented
squash bool
allow []string
// hidden
// untrusted bool
// memory opts.MemBytes
// memorySwap opts.MemSwapBytes
// shmSize opts.MemBytes
// cpuShares int64
// cpuPeriod int64
// cpuQuota int64
// cpuSetCpus string
// cpuSetMems string
// cgroupParent string
// isolation string
// compress bool
// securityOpt []string
}
type commonOptions struct {
builder string
noCache *bool
progress string
pull *bool
metadataFile string
// golangci-lint#826
// nolint:structcheck
exportPush bool
// nolint:structcheck
exportLoad bool
}
func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
if in.squash {
return errors.Errorf("squash currently not implemented")
}
ctx := appcontext.Context()
ctx, end, err := tracing.TraceCurrentCommand(ctx, "build")
if err != nil {
return err
}
defer func() {
end(err)
}()
noCache := false
if in.noCache != nil {
noCache = *in.noCache
}
pull := false
if in.pull != nil {
pull = *in.pull
}
if in.quiet && in.progress != "auto" && in.progress != "quiet" {
return errors.Errorf("progress=%s and quiet cannot be used together", in.progress)
} else if in.quiet {
in.progress = "quiet"
}
opts := build.Options{
Inputs: build.Inputs{
ContextPath: in.contextPath,
DockerfilePath: in.dockerfileName,
InStream: os.Stdin,
},
Tags: in.tags,
Labels: listToMap(in.labels, false),
BuildArgs: listToMap(in.buildArgs, true),
Pull: pull,
NoCache: noCache,
Target: in.target,
ImageIDFile: in.imageIDFile,
ExtraHosts: in.extraHosts,
NetworkMode: in.networkMode,
Ulimits: in.ulimits,
}
platforms, err := platformutil.Parse(in.platforms)
if err != nil {
return err
}
opts.Platforms = platforms
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(os.Stderr))
secrets, err := buildflags.ParseSecretSpecs(in.secrets)
if err != nil {
return err
}
opts.Session = append(opts.Session, secrets)
sshSpecs := in.ssh
if len(sshSpecs) == 0 && buildflags.IsGitSSH(in.contextPath) {
sshSpecs = []string{"default"}
}
ssh, err := buildflags.ParseSSHSpecs(sshSpecs)
if err != nil {
return err
}
opts.Session = append(opts.Session, ssh)
outputs, err := buildflags.ParseOutputs(in.outputs)
if err != nil {
return err
}
if in.exportPush {
if in.exportLoad {
return errors.Errorf("push and load may not be set together at the moment")
}
if len(outputs) == 0 {
outputs = []client.ExportEntry{{
Type: "image",
Attrs: map[string]string{
"push": "true",
},
}}
} else {
switch outputs[0].Type {
case "image":
outputs[0].Attrs["push"] = "true"
default:
return errors.Errorf("push and %q output can't be used together", outputs[0].Type)
}
}
}
if in.exportLoad {
if len(outputs) == 0 {
outputs = []client.ExportEntry{{
Type: "docker",
Attrs: map[string]string{},
}}
} else {
switch outputs[0].Type {
case "docker":
default:
return errors.Errorf("load and %q output can't be used together", outputs[0].Type)
}
}
}
opts.Exports = outputs
cacheImports, err := buildflags.ParseCacheEntry(in.cacheFrom)
if err != nil {
return err
}
opts.CacheFrom = cacheImports
cacheExports, err := buildflags.ParseCacheEntry(in.cacheTo)
if err != nil {
return err
}
opts.CacheTo = cacheExports
allow, err := buildflags.ParseEntitlements(in.allow)
if err != nil {
return err
}
opts.Allow = allow
// key string used for kubernetes "sticky" mode
contextPathHash, err := filepath.Abs(in.contextPath)
if err != nil {
contextPathHash = in.contextPath
}
imageID, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile)
if err != nil {
return err
}
if in.quiet {
fmt.Println(imageID)
}
return nil
}
func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]build.Options, progressMode, contextPathHash, instance string, metadataFile string) (imageID string, err error) {
dis, err := getInstanceOrDefault(ctx, dockerCli, instance, contextPathHash)
if err != nil {
return "", err
}
ctx2, cancel := context.WithCancel(context.TODO())
defer cancel()
printer := progress.NewPrinter(ctx2, os.Stderr, progressMode)
resp, err := build.Build(ctx, dis, opts, dockerAPI(dockerCli), dockerCli.ConfigFile(), printer)
err1 := printer.Wait()
if err == nil {
err = err1
}
if err != nil {
return "", err
}
if len(metadataFile) > 0 && resp != nil {
mdatab, err := json.MarshalIndent(resp[defaultTargetName].ExporterResponse, "", " ")
if err != nil {
return "", err
}
if err := ioutils.AtomicWriteFile(metadataFile, mdatab, 0644); err != nil {
return "", err
}
}
return resp[defaultTargetName].ExporterResponse["containerimage.digest"], err
}
func newBuildOptions() buildOptions {
ulimits := make(map[string]*units.Ulimit)
return buildOptions{
ulimits: opts.NewUlimitOpt(&ulimits),
}
}
func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
options := newBuildOptions()
cmd := &cobra.Command{
Use: "build [OPTIONS] PATH | URL | -",
Aliases: []string{"b"},
Short: "Start a build",
Args: cli.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
options.contextPath = args[0]
options.builder = rootOpts.builder
return runBuild(dockerCli, options)
},
}
flags := cmd.Flags()
flags.BoolVar(&options.exportPush, "push", false, "Shorthand for `--output=type=registry`")
flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for `--output=type=docker`")
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Name and optionally a tag (format: `name:tag`)")
flags.SetAnnotation("tag", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t"})
flags.StringArrayVar(&options.buildArgs, "build-arg", []string{}, "Set build-time variables")
flags.SetAnnotation("build-arg", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg"})
flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (default: `PATH/Dockerfile`)")
flags.SetAnnotation("file", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f"})
flags.StringArrayVar(&options.labels, "label", []string{}, "Set metadata for an image")
flags.StringArrayVar(&options.cacheFrom, "cache-from", []string{}, "External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`)")
flags.StringArrayVar(&options.cacheTo, "cache-to", []string{}, "Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`)")
flags.StringVar(&options.target, "target", "", "Set the target build stage to build.")
flags.SetAnnotation("target", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target"})
flags.StringSliceVar(&options.allow, "allow", []string{}, "Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`)")
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, "Add a custom host-to-IP mapping (format: `host:ip`)")
flags.SetAnnotation("add-host", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host"})
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
flags.Var(options.ulimits, "ulimit", "Ulimit options")
// not implemented
flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer")
flags.MarkHidden("squash")
// hidden flags
var ignore string
var ignoreSlice []string
var ignoreBool bool
var ignoreInt int64
flags.StringSliceVar(&ignoreSlice, "security-opt", []string{}, "Security options")
flags.MarkHidden("security-opt")
flags.BoolVar(&ignoreBool, "compress", false, "Compress the build context using gzip")
flags.MarkHidden("compress")
flags.StringVarP(&ignore, "memory", "m", "", "Memory limit")
flags.MarkHidden("memory")
flags.StringVar(&ignore, "memory-swap", "", "Swap limit equal to memory plus swap: `-1` to enable unlimited swap")
flags.MarkHidden("memory-swap")
flags.StringVar(&ignore, "shm-size", "", "Size of `/dev/shm`")
flags.MarkHidden("shm-size")
flags.Int64VarP(&ignoreInt, "cpu-shares", "c", 0, "CPU shares (relative weight)")
flags.MarkHidden("cpu-shares")
flags.Int64Var(&ignoreInt, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
flags.MarkHidden("cpu-period")
flags.Int64Var(&ignoreInt, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
flags.MarkHidden("cpu-quota")
flags.StringVar(&ignore, "cpuset-cpus", "", "CPUs in which to allow execution (`0-3`, `0,1`)")
flags.MarkHidden("cpuset-cpus")
flags.StringVar(&ignore, "cpuset-mems", "", "MEMs in which to allow execution (`0-3`, `0,1`)")
flags.MarkHidden("cpuset-mems")
flags.StringVar(&ignore, "cgroup-parent", "", "Optional parent cgroup for the container")
flags.MarkHidden("cgroup-parent")
flags.StringVar(&ignore, "isolation", "", "Container isolation technology")
flags.MarkHidden("isolation")
flags.BoolVar(&ignoreBool, "rm", true, "Remove intermediate containers after a successful build")
flags.MarkHidden("rm")
flags.BoolVar(&ignoreBool, "force-rm", false, "Always remove intermediate containers")
flags.MarkHidden("force-rm")
platformsDefault := []string{}
if v := os.Getenv("DOCKER_DEFAULT_PLATFORM"); v != "" {
platformsDefault = []string{v}
}
flags.StringArrayVar(&options.platforms, "platform", platformsDefault, "Set target platform for build")
flags.StringArrayVar(&options.secrets, "secret", []string{}, "Secret file to expose to the build (format: `id=mysecret,src=/local/secret`)")
flags.StringArrayVar(&options.ssh, "ssh", []string{}, "SSH agent socket or keys to expose to the build (format: `default|<id>[=<socket>|<key>[,<key>]]`)")
flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, "Output destination (format: `type=local,dest=path`)")
commonBuildFlags(&options.commonOptions, flags)
return cmd
}
func commonBuildFlags(options *commonOptions, flags *pflag.FlagSet) {
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output")
options.pull = flags.Bool("pull", false, "Always attempt to pull a newer version of the image")
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to the file")
}
func listToMap(values []string, defaultEnv bool) map[string]string {
result := make(map[string]string, len(values))
for _, value := range values {
kv := strings.SplitN(value, "=", 2)
if len(kv) == 1 {
if defaultEnv {
v, ok := os.LookupEnv(kv[0])
if ok {
result[kv[0]] = v
}
} else {
result[kv[0]] = ""
}
} else {
result[kv[0]] = kv[1]
}
}
return result
}
| [
"\"DOCKER_DEFAULT_PLATFORM\""
] | [] | [
"DOCKER_DEFAULT_PLATFORM"
] | [] | ["DOCKER_DEFAULT_PLATFORM"] | go | 1 | 0 | |
stagemonitor-configuration/src/main/java/org/stagemonitor/configuration/source/EnvironmentVariableConfigurationSource.java | package org.stagemonitor.configuration.source;
import java.util.Map;
/**
* This configuration source get its values from the operating system's environment variables.
* <p>
* Because of the naming restrictions/conventions of environment variables, all dots ('.') are replaced with underscores
* and all letters are converted to upper case.
* <p>
* Example: To set the configuration key <code>stagemonitor.active</code>, the environment variable has to be
* <code>STAGEMONITOR_ACTIVE</code>
*/
public class EnvironmentVariableConfigurationSource extends AbstractConfigurationSource {
private Map<String, String> env;
public EnvironmentVariableConfigurationSource() {
reload();
}
public EnvironmentVariableConfigurationSource(Map<String, String> env) {
this.env = env;
}
/**
* Returns the configuration value from the the operating system's environment variables.
* <p>
* Because of the naming restrictions/conventions of environment variables, all dots ('.') are replaced with underscores
* and all letters are converted to upper case.
* <p>
* Example: To set the configuration key <code>stagemonitor.active</code>, the environment variable has to be
* <code>STAGEMONITOR_ACTIVE</code>
*
* @param key the property key
* @return the value
*/
@Override
public String getValue(String key) {
return env.get(key.replace('.', '_').toUpperCase());
}
@Override
public String getName() {
return "Environment Variables";
}
@Override
public void reload() {
this.env = System.getenv();
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
main.go | package main
import (
"log"
"net/http"
"os"
"service/index"
"service/server"
)
var (
CertFile = os.Getenv("CERT_FILE")
KeyFile = os.Getenv("KEY_FILE")
ServiceAddr = os.Getenv("SERVICE_ADDR")
)
func main() {
logger := log.New(os.Stdout, " ", log.LstdFlags|log.Lshortfile)
h := index.NewHandlers(logger)
mux := http.NewServeMux()
h.SetupRoutes(mux)
srv := server.New(mux, ServiceAddr)
logger.Println("Server starting")
err := srv.ListenAndServeTLS(CertFile, KeyFile)
if err != nil {
logger.Fatalf("Server failed to start: %v", err)
}
}
| [
"\"CERT_FILE\"",
"\"KEY_FILE\"",
"\"SERVICE_ADDR\""
] | [] | [
"SERVICE_ADDR",
"CERT_FILE",
"KEY_FILE"
] | [] | ["SERVICE_ADDR", "CERT_FILE", "KEY_FILE"] | go | 3 | 0 | |
examples/customers/delete/delete_customer.go | package main
import (
"encoding/json"
"fmt"
"os"
"github.com/tuneuptechnology/tuneuptechnology-go/v3"
)
func main() {
client := tuneuptechnology.New(os.Getenv("API_EMAIL"), os.Getenv("API_KEY"))
customer := client.DeleteCustomer(23)
prettyJSON, err := json.MarshalIndent(customer, "", " ")
if err != nil {
fmt.Fprintln(os.Stderr, "error creating JSON:", err)
}
fmt.Printf("%s\n", string(prettyJSON))
}
| [
"\"API_EMAIL\"",
"\"API_KEY\""
] | [] | [
"API_KEY",
"API_EMAIL"
] | [] | ["API_KEY", "API_EMAIL"] | go | 2 | 0 | |
Tutorial2/app.py | import os
from flask import Flask
app = Flask(__name__)
# app.config.from_object('Tutorial2.settings.Test')
app.config.from_envvar('FLASK_SETTINGS')
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY', '')
@app.route('/')
def show_config():
return f'{app.testing=}'
| [] | [] | [
"SECRET_KEY"
] | [] | ["SECRET_KEY"] | python | 1 | 0 | |
tests/integration/integration_nodejs_test.go | // Copyright 2016-2020, Pulumi Corporation. All rights reserved.
//go:build nodejs || all
// +build nodejs all
package ints
import (
"bytes"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"github.com/pulumi/pulumi/pkg/v3/resource/deploy/providers"
"github.com/pulumi/pulumi/pkg/v3/secrets/cloud"
"github.com/pulumi/pulumi/pkg/v3/secrets/passphrase"
"github.com/pulumi/pulumi/pkg/v3/testing/integration"
"github.com/pulumi/pulumi/sdk/v3/go/common/apitype"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
ptesting "github.com/pulumi/pulumi/sdk/v3/go/common/testing"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/contract"
"github.com/stretchr/testify/assert"
)
// TestEmptyNodeJS simply tests that we can run an empty NodeJS project.
func TestEmptyNodeJS(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("empty", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
})
}
// Tests emitting many engine events doesn't result in a performance problem.
func TestEngineEventPerf(t *testing.T) {
t.Skip() // TODO[pulumi/pulumi#7883]
// Prior to pulumi/pulumi#2303, a preview or update would take ~40s.
// Since then, it should now be down to ~4s, with additional padding,
// since some Travis machines (especially the macOS ones) seem quite slow
// to begin with.
benchmarkEnforcer := &assertPerfBenchmark{
T: t,
MaxPreviewDuration: 8 * time.Second,
MaxUpdateDuration: 8 * time.Second,
}
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "ee_perf",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ReportStats: benchmarkEnforcer,
// Don't run in parallel since it is sensitive to system resources.
NoParallel: true,
})
}
// TestEngineEvents ensures that the test framework properly records and reads engine events.
func TestEngineEvents(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "single_resource",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
NoParallel: true, // avoid contention for Dir
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// Ensure that we have a non-empty list of events.
assert.NotEmpty(t, stackInfo.Events)
// Ensure that we have two "ResourcePre" events: one for the stack and one for our resource.
preEventResourceTypes := []string{}
for _, e := range stackInfo.Events {
if e.ResourcePreEvent != nil {
preEventResourceTypes = append(preEventResourceTypes, e.ResourcePreEvent.Metadata.Type)
}
}
assert.Equal(t, 2, len(preEventResourceTypes))
assert.Contains(t, preEventResourceTypes, "pulumi:pulumi:Stack")
assert.Contains(t, preEventResourceTypes, "pulumi-nodejs:dynamic:Resource")
},
})
}
// TestProjectMain tests out the ability to override the main entrypoint.
func TestProjectMain(t *testing.T) {
test := integration.ProgramTestOptions{
Dir: "project_main",
Dependencies: []string{"@pulumi/pulumi"},
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// Simple runtime validation that just ensures the checkpoint was written and read.
assert.NotNil(t, stackInfo.Deployment)
},
}
integration.ProgramTest(t, &test)
t.Run("AbsolutePath", func(t *testing.T) {
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
e.ImportDirectory("project_main_abs")
// write a new Pulumi.yaml using the absolute path of the environment as "main"
yamlPath := filepath.Join(e.RootPath, "Pulumi.yaml")
absYamlContents := fmt.Sprintf(
"name: project_main_abs\ndescription: A program with an absolute entry point\nruntime: nodejs\nmain: %s\n",
e.RootPath,
)
t.Logf("writing new Pulumi.yaml: \npath: %s\ncontents:%s", yamlPath, absYamlContents)
if err := os.WriteFile(yamlPath, []byte(absYamlContents), 0644); err != nil {
t.Error(err)
return
}
e.RunCommand("yarn", "link", "@pulumi/pulumi")
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
e.RunCommand("pulumi", "stack", "init", "main-abs")
e.RunCommand("pulumi", "preview")
e.RunCommand("pulumi", "stack", "rm", "--yes")
})
t.Run("ParentFolder", func(t *testing.T) {
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
e.ImportDirectory("project_main_parent")
// yarn link first
e.RunCommand("yarn", "link", "@pulumi/pulumi")
// then virtually change directory to the location of the nested Pulumi.yaml
e.CWD = filepath.Join(e.RootPath, "foo", "bar")
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
e.RunCommand("pulumi", "stack", "init", "main-parent")
e.RunCommand("pulumi", "preview")
e.RunCommand("pulumi", "stack", "rm", "--yes")
})
}
// TestStackProjectName ensures we can read the Pulumi stack and project name from within the program.
func TestStackProjectName(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "stack_project_name",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
})
}
func TestRemoveWithResourcesBlocked(t *testing.T) {
if os.Getenv("PULUMI_ACCESS_TOKEN") == "" {
t.Skipf("Skipping: PULUMI_ACCESS_TOKEN is not set")
}
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
stackName, err := resource.NewUniqueHex("rm-test-", 8, -1)
contract.AssertNoErrorf(err, "resource.NewUniqueHex should not fail with no maximum length is set")
e.ImportDirectory("single_resource")
e.RunCommand("pulumi", "stack", "init", stackName)
e.RunCommand("yarn", "link", "@pulumi/pulumi")
e.RunCommand("pulumi", "up", "--non-interactive", "--yes", "--skip-preview")
_, stderr := e.RunCommandExpectError("pulumi", "stack", "rm", "--yes")
assert.Contains(t, stderr, "--force")
e.RunCommand("pulumi", "destroy", "--skip-preview", "--non-interactive", "--yes")
e.RunCommand("pulumi", "stack", "rm", "--yes")
}
// TestStackOutputs ensures we can export variables from a stack and have them get recorded as outputs.
func TestStackOutputsNodeJS(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("stack_outputs", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// Ensure the checkpoint contains a single resource, the Stack, with two outputs.
fmt.Printf("Deployment: %v", stackInfo.Deployment)
assert.NotNil(t, stackInfo.Deployment)
if assert.Equal(t, 1, len(stackInfo.Deployment.Resources)) {
stackRes := stackInfo.Deployment.Resources[0]
assert.NotNil(t, stackRes)
assert.Equal(t, resource.RootStackType, stackRes.URN.Type())
assert.Equal(t, 0, len(stackRes.Inputs))
assert.Equal(t, 2, len(stackRes.Outputs))
assert.Equal(t, "ABC", stackRes.Outputs["xyz"])
assert.Equal(t, float64(42), stackRes.Outputs["foo"])
}
},
})
}
// TestStackOutputsJSON ensures the CLI properly formats stack outputs as JSON when requested.
func TestStackOutputsJSON(t *testing.T) {
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
e.ImportDirectory(filepath.Join("stack_outputs", "nodejs"))
e.RunCommand("yarn", "link", "@pulumi/pulumi")
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
e.RunCommand("pulumi", "stack", "init", "stack-outs")
e.RunCommand("pulumi", "up", "--non-interactive", "--yes", "--skip-preview")
stdout, _ := e.RunCommand("pulumi", "stack", "output", "--json")
assert.Equal(t, `{
"foo": 42,
"xyz": "ABC"
}
`, stdout)
}
// TestStackOutputsDisplayed ensures that outputs are printed at the end of an update
func TestStackOutputsDisplayed(t *testing.T) {
stdout := &bytes.Buffer{}
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("stack_outputs", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: false,
Verbose: true,
Stdout: stdout,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
output := stdout.String()
// ensure we get the outputs info both for the normal update, and for the no-change update.
assert.Contains(t, output, "Outputs:\n foo: 42\n xyz: \"ABC\"\n\nResources:\n + 1 created")
assert.Contains(t, output, "Outputs:\n foo: 42\n xyz: \"ABC\"\n\nResources:\n 1 unchanged")
},
})
}
// TestStackOutputsSuppressed ensures that outputs whose values are intentionally suppresses don't show.
func TestStackOutputsSuppressed(t *testing.T) {
stdout := &bytes.Buffer{}
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("stack_outputs", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: false,
Verbose: true,
Stdout: stdout,
UpdateCommandlineFlags: []string{"--suppress-outputs"},
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
output := stdout.String()
assert.NotContains(t, output, "Outputs:\n foo: 42\n xyz: \"ABC\"\n")
assert.NotContains(t, output, "Outputs:\n foo: 42\n xyz: \"ABC\"\n")
},
})
}
// TestStackParenting tests out that stacks and components are parented correctly.
func TestStackParenting(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "stack_parenting",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// Ensure the checkpoint contains resources parented correctly. This should look like this:
//
// A F
// / \ \
// B C G
// / \
// D E
//
// with the caveat, of course, that A and F will share a common parent, the implicit stack.
assert.NotNil(t, stackInfo.Deployment)
if assert.Equal(t, 9, len(stackInfo.Deployment.Resources)) {
stackRes := stackInfo.Deployment.Resources[0]
assert.NotNil(t, stackRes)
assert.Equal(t, resource.RootStackType, stackRes.Type)
assert.Equal(t, "", string(stackRes.Parent))
urns := make(map[string]resource.URN)
for _, res := range stackInfo.Deployment.Resources[1:] {
assert.NotNil(t, res)
urns[string(res.URN.Name())] = res.URN
switch res.URN.Name() {
case "a", "f":
assert.NotEqual(t, "", res.Parent)
assert.Equal(t, stackRes.URN, res.Parent)
case "b", "c":
assert.Equal(t, urns["a"], res.Parent)
case "d", "e":
assert.Equal(t, urns["c"], res.Parent)
case "g":
assert.Equal(t, urns["f"], res.Parent)
case "default":
// Default providers are not parented.
assert.Equal(t, "", string(res.Parent))
default:
t.Fatalf("unexpected name %s", res.URN.Name())
}
}
}
},
})
}
func TestStackBadParenting(t *testing.T) {
if runtime.GOOS == WindowsOS {
t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811")
}
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "stack_bad_parenting",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExpectFailure: true,
})
}
// TestStackDependencyGraph tests that the dependency graph of a stack is saved
// in the checkpoint file.
func TestStackDependencyGraph(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "stack_dependencies",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
assert.NotNil(t, stackInfo.Deployment)
latest := stackInfo.Deployment
assert.True(t, len(latest.Resources) >= 2)
sawFirst := false
sawSecond := false
for _, res := range latest.Resources {
urn := string(res.URN)
if strings.Contains(urn, "dynamic:Resource::first") {
// The first resource doesn't depend on anything.
assert.Equal(t, 0, len(res.Dependencies))
sawFirst = true
} else if strings.Contains(urn, "dynamic:Resource::second") {
// The second resource uses an Output property of the first resource, so it
// depends directly on first.
assert.Equal(t, 1, len(res.Dependencies))
assert.True(t, strings.Contains(string(res.Dependencies[0]), "dynamic:Resource::first"))
sawSecond = true
}
}
assert.True(t, sawFirst && sawSecond)
},
})
}
// Tests basic configuration from the perspective of a Pulumi program.
func TestConfigBasicNodeJS(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("config_basic", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
Config: map[string]string{
"aConfigValue": "this value is a value",
},
Secrets: map[string]string{
"bEncryptedSecret": "this super secret is encrypted",
},
OrderedConfig: []integration.ConfigValue{
{Key: "outer.inner", Value: "value", Path: true},
{Key: "names[0]", Value: "a", Path: true},
{Key: "names[1]", Value: "b", Path: true},
{Key: "names[2]", Value: "c", Path: true},
{Key: "names[3]", Value: "super secret name", Path: true, Secret: true},
{Key: "servers[0].port", Value: "80", Path: true},
{Key: "servers[0].host", Value: "example", Path: true},
{Key: "a.b[0].c", Value: "true", Path: true},
{Key: "a.b[1].c", Value: "false", Path: true},
{Key: "tokens[0]", Value: "shh", Path: true, Secret: true},
{Key: "foo.bar", Value: "don't tell", Path: true, Secret: true},
},
})
}
func TestConfigCaptureNodeJS(t *testing.T) {
if runtime.GOOS == WindowsOS {
t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811")
}
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("config_capture_e2e", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
Config: map[string]string{
"value": "it works",
},
})
}
// Tests that accessing config secrets using non-secret APIs results in warnings being logged.
func TestConfigSecretsWarnNodeJS(t *testing.T) {
// TODO[pulumi/pulumi#7127]: Re-enabled the warning.
t.Skip("Temporarily skipping test until we've re-enabled the warning - pulumi/pulumi#7127")
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("config_secrets_warn", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
Config: map[string]string{
"plainstr1": "1",
"plainstr2": "2",
"plainstr3": "3",
"plainstr4": "4",
"plainbool1": "true",
"plainbool2": "true",
"plainbool3": "true",
"plainbool4": "true",
"plainnum1": "1",
"plainnum2": "2",
"plainnum3": "3",
"plainnum4": "4",
"plainobj1": "{}",
"plainobj2": "{}",
"plainobj3": "{}",
"plainobj4": "{}",
},
Secrets: map[string]string{
"str1": "1",
"str2": "2",
"str3": "3",
"str4": "4",
"bool1": "true",
"bool2": "true",
"bool3": "true",
"bool4": "true",
"num1": "1",
"num2": "2",
"num3": "3",
"num4": "4",
"obj1": "{}",
"obj2": "{}",
"obj3": "{}",
"obj4": "{}",
},
OrderedConfig: []integration.ConfigValue{
{Key: "parent1.foo", Value: "plain1", Path: true},
{Key: "parent1.bar", Value: "secret1", Path: true, Secret: true},
{Key: "parent2.foo", Value: "plain2", Path: true},
{Key: "parent2.bar", Value: "secret2", Path: true, Secret: true},
{Key: "names1[0]", Value: "plain1", Path: true},
{Key: "names1[1]", Value: "secret1", Path: true, Secret: true},
{Key: "names2[0]", Value: "plain2", Path: true},
{Key: "names2[1]", Value: "secret2", Path: true, Secret: true},
},
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
assert.NotEmpty(t, stackInfo.Events)
//nolint:lll
expectedWarnings := []string{
"Configuration 'config_secrets_node:str1' value is a secret; use `getSecret` instead of `get`",
"Configuration 'config_secrets_node:str2' value is a secret; use `requireSecret` instead of `require`",
"Configuration 'config_secrets_node:bool1' value is a secret; use `getSecretBoolean` instead of `getBoolean`",
"Configuration 'config_secrets_node:bool2' value is a secret; use `requireSecretBoolean` instead of `requireBoolean`",
"Configuration 'config_secrets_node:num1' value is a secret; use `getSecretNumber` instead of `getNumber`",
"Configuration 'config_secrets_node:num2' value is a secret; use `requireSecretNumber` instead of `requireNumber`",
"Configuration 'config_secrets_node:obj1' value is a secret; use `getSecretObject` instead of `getObject`",
"Configuration 'config_secrets_node:obj2' value is a secret; use `requireSecretObject` instead of `requireObject`",
"Configuration 'config_secrets_node:parent1' value is a secret; use `getSecretObject` instead of `getObject`",
"Configuration 'config_secrets_node:parent2' value is a secret; use `requireSecretObject` instead of `requireObject`",
"Configuration 'config_secrets_node:names1' value is a secret; use `getSecretObject` instead of `getObject`",
"Configuration 'config_secrets_node:names2' value is a secret; use `requireSecretObject` instead of `requireObject`",
}
for _, warning := range expectedWarnings {
var found bool
for _, event := range stackInfo.Events {
if event.DiagnosticEvent != nil && event.DiagnosticEvent.Severity == "warning" &&
strings.Contains(event.DiagnosticEvent.Message, warning) {
found = true
break
}
}
assert.True(t, found, "expected warning %q", warning)
}
// These keys should not be in any warning messages.
unexpectedWarnings := []string{
"plainstr1",
"plainstr2",
"plainstr3",
"plainstr4",
"plainbool1",
"plainbool2",
"plainbool3",
"plainbool4",
"plainnum1",
"plainnum2",
"plainnum3",
"plainnum4",
"plainobj1",
"plainobj2",
"plainobj3",
"plainobj4",
"str3",
"str4",
"bool3",
"bool4",
"num3",
"num4",
"obj3",
"obj4",
}
for _, warning := range unexpectedWarnings {
for _, event := range stackInfo.Events {
if event.DiagnosticEvent != nil {
assert.NotContains(t, event.DiagnosticEvent.Message, warning)
}
}
}
},
})
}
func TestInvalidVersionInPackageJson(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("invalid_package_json"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
Config: map[string]string{},
})
}
// Tests an explicit provider instance.
func TestExplicitProvider(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "explicit_provider",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
assert.NotNil(t, stackInfo.Deployment)
latest := stackInfo.Deployment
// Expect one stack resource, two provider resources, and two custom resources.
assert.True(t, len(latest.Resources) == 5)
var defaultProvider *apitype.ResourceV3
var explicitProvider *apitype.ResourceV3
for _, res := range latest.Resources {
urn := res.URN
switch urn.Name() {
case "default":
assert.True(t, providers.IsProviderType(res.Type))
assert.Nil(t, defaultProvider)
prov := res
defaultProvider = &prov
case "p":
assert.True(t, providers.IsProviderType(res.Type))
assert.Nil(t, explicitProvider)
prov := res
explicitProvider = &prov
case "a":
prov, err := providers.ParseReference(res.Provider)
assert.NoError(t, err)
assert.NotNil(t, defaultProvider)
defaultRef, err := providers.NewReference(defaultProvider.URN, defaultProvider.ID)
assert.NoError(t, err)
assert.Equal(t, defaultRef.String(), prov.String())
case "b":
prov, err := providers.ParseReference(res.Provider)
assert.NoError(t, err)
assert.NotNil(t, explicitProvider)
explicitRef, err := providers.NewReference(explicitProvider.URN, explicitProvider.ID)
assert.NoError(t, err)
assert.Equal(t, explicitRef.String(), prov.String())
}
}
assert.NotNil(t, defaultProvider)
assert.NotNil(t, explicitProvider)
},
})
}
// Tests that stack references work in Node.
func TestStackReferenceNodeJS(t *testing.T) {
if runtime.GOOS == WindowsOS {
t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811")
}
if owner := os.Getenv("PULUMI_TEST_OWNER"); owner == "" {
t.Skipf("Skipping: PULUMI_TEST_OWNER is not set")
}
opts := &integration.ProgramTestOptions{
Dir: filepath.Join("stack_reference", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
Config: map[string]string{
"org": os.Getenv("PULUMI_TEST_OWNER"),
},
EditDirs: []integration.EditDir{
{
Dir: "step1",
Additive: true,
},
{
Dir: "step2",
Additive: true,
},
},
}
integration.ProgramTest(t, opts)
}
// Tests that reads of unknown IDs do not fail.
func TestGetCreated(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "get_created",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
})
}
// TestProviderSecretConfig that a first class provider can be created when it has secrets as part of its config.
func TestProviderSecretConfig(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "provider_secret_config",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
})
}
func TestResourceWithSecretSerializationNodejs(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("secret_outputs", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// The program exports three resources:
// 1. One named `withSecret` who's prefix property should be secret, specified via `pulumi.secret()`.
// 2. One named `withSecretAdditional` who's prefix property should be a secret, specified via
// additionalSecretOutputs.
// 3. One named `withoutSecret` which should not be a secret.
// We serialize both of the these as POJO objects, so they appear as maps in the output.
withSecretProps, ok := stackInfo.Outputs["withSecret"].(map[string]interface{})
assert.Truef(t, ok, "POJO output was not serialized as a map")
withSecretAdditionalProps, ok := stackInfo.Outputs["withSecretAdditional"].(map[string]interface{})
assert.Truef(t, ok, "POJO output was not serialized as a map")
withoutSecretProps, ok := stackInfo.Outputs["withoutSecret"].(map[string]interface{})
assert.Truef(t, ok, "POJO output was not serialized as a map")
// The secret prop should have been serialized as a secret
secretPropValue, ok := withSecretProps["prefix"].(map[string]interface{})
assert.Truef(t, ok, "secret output was not serialized as a secret")
assert.Equal(t, resource.SecretSig, secretPropValue[resource.SigKey].(string))
// The other secret prop should have been serialized as a secret
secretAdditionalPropValue, ok := withSecretAdditionalProps["prefix"].(map[string]interface{})
assert.Truef(t, ok, "secret output was not serialized as a secret")
assert.Equal(t, resource.SecretSig, secretAdditionalPropValue[resource.SigKey].(string))
// And here, the prop was not set, it should just be a string value
_, isString := withoutSecretProps["prefix"].(string)
assert.Truef(t, isString, "non-secret output was not a string")
},
})
}
func TestStackReferenceSecretsNodejs(t *testing.T) {
if runtime.GOOS == WindowsOS {
t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811")
}
owner := os.Getenv("PULUMI_TEST_OWNER")
if owner == "" {
t.Skipf("Skipping: PULUMI_TEST_OWNER is not set")
}
d := "stack_reference_secrets"
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join(d, "nodejs", "step1"),
Dependencies: []string{"@pulumi/pulumi"},
Config: map[string]string{
"org": owner,
},
Quick: true,
EditDirs: []integration.EditDir{
{
Dir: filepath.Join(d, "nodejs", "step2"),
Additive: true,
ExpectNoChanges: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
_, isString := stackInfo.Outputs["refNormal"].(string)
assert.Truef(t, isString, "referenced non-secret output was not a string")
secretPropValue, ok := stackInfo.Outputs["refSecret"].(map[string]interface{})
assert.Truef(t, ok, "secret output was not serialized as a secret")
assert.Equal(t, resource.SecretSig, secretPropValue[resource.SigKey].(string))
},
},
},
})
}
func TestPasswordlessPassphraseSecretsProvider(t *testing.T) {
testOptions := integration.ProgramTestOptions{
Dir: "cloud_secrets_provider",
Dependencies: []string{"@pulumi/pulumi"},
SecretsProvider: fmt.Sprintf("passphrase"),
Env: []string{"PULUMI_CONFIG_PASSPHRASE=\"\""},
NoParallel: true,
Secrets: map[string]string{
"mysecret": "THISISASECRET",
},
CloudURL: "file://~",
}
workingTestOptions := testOptions.With(integration.ProgramTestOptions{
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
os.Setenv("PULUMI_CONFIG_PASSPHRASE", "")
secretsProvider := stackInfo.Deployment.SecretsProviders
assert.NotNil(t, secretsProvider)
assert.Equal(t, secretsProvider.Type, "passphrase")
_, err := passphrase.NewPassphaseSecretsManagerFromState(secretsProvider.State)
assert.NoError(t, err)
out, ok := stackInfo.Outputs["out"].(map[string]interface{})
assert.True(t, ok)
_, ok = out["ciphertext"]
assert.True(t, ok)
os.Unsetenv("PULUMI_CONFIG_PASSPHRASE")
},
})
brokenTestOptions := testOptions.With(integration.ProgramTestOptions{
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
secretsProvider := stackInfo.Deployment.SecretsProviders
assert.NotNil(t, secretsProvider)
assert.Equal(t, secretsProvider.Type, "passphrase")
_, err := passphrase.NewPassphaseSecretsManagerFromState(secretsProvider.State)
assert.Error(t, err)
},
})
t.Run("works-when-passphrase-set", func(t *testing.T) {
integration.ProgramTest(t, &workingTestOptions)
})
t.Run("error-when-passphrase-not-set", func(t *testing.T) {
integration.ProgramTest(t, &brokenTestOptions)
})
}
func TestCloudSecretProvider(t *testing.T) {
awsKmsKeyAlias := os.Getenv("PULUMI_TEST_KMS_KEY_ALIAS")
if awsKmsKeyAlias == "" {
t.Skipf("Skipping: PULUMI_TEST_KMS_KEY_ALIAS is not set")
}
azureKeyVault := os.Getenv("PULUMI_TEST_AZURE_KEY")
if azureKeyVault == "" {
t.Skipf("Skipping: PULUMI_TEST_AZURE_KEY is not set")
}
gcpKmsKey := os.Getenv("PULUMI_TEST_GCP_KEY")
if azureKeyVault == "" {
t.Skipf("Skipping: PULUMI_TEST_GCP_KEY is not set")
}
// Generic test options for all providers
testOptions := integration.ProgramTestOptions{
Dir: "cloud_secrets_provider",
Dependencies: []string{"@pulumi/pulumi"},
SecretsProvider: fmt.Sprintf("awskms://alias/%s", awsKmsKeyAlias),
Secrets: map[string]string{
"mysecret": "THISISASECRET",
},
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
secretsProvider := stackInfo.Deployment.SecretsProviders
assert.NotNil(t, secretsProvider)
assert.Equal(t, secretsProvider.Type, "cloud")
_, err := cloud.NewCloudSecretsManagerFromState(secretsProvider.State)
assert.NoError(t, err)
out, ok := stackInfo.Outputs["out"].(map[string]interface{})
assert.True(t, ok)
_, ok = out["ciphertext"]
assert.True(t, ok)
},
}
localTestOptions := testOptions.With(integration.ProgramTestOptions{
CloudURL: "file://~",
})
azureTestOptions := testOptions.With(integration.ProgramTestOptions{
SecretsProvider: fmt.Sprintf("azurekeyvault://%s", azureKeyVault),
})
gcpTestOptions := testOptions.With(integration.ProgramTestOptions{
SecretsProvider: fmt.Sprintf("gcpkms://projects/%s", gcpKmsKey),
})
// Run with default Pulumi service backend
t.Run("service", func(t *testing.T) {
integration.ProgramTest(t, &testOptions)
})
// Check Azure secrets provider
t.Run("azure", func(t *testing.T) { integration.ProgramTest(t, &azureTestOptions) })
// Check gcloud secrets provider
t.Run("gcp", func(t *testing.T) { integration.ProgramTest(t, &gcpTestOptions) })
// Also run with local backend
t.Run("local", func(t *testing.T) { integration.ProgramTest(t, &localTestOptions) })
}
// Tests a resource with a large (>4mb) string prop in Node.js
func TestLargeResourceNode(t *testing.T) {
if runtime.GOOS == WindowsOS {
t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811")
}
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("large_resource", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
})
}
// Tests enum outputs
func TestEnumOutputNode(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("enums", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assert.NotNil(t, stack.Outputs)
assert.Equal(t, "Burgundy", stack.Outputs["myTreeType"])
assert.Equal(t, "Pulumi Planters Inc.foo", stack.Outputs["myTreeFarmChanged"])
assert.Equal(t, "My Burgundy Rubber tree is from Pulumi Planters Inc.", stack.Outputs["mySentence"])
},
})
}
// Test remote component construction in Node.
func TestConstructNode(t *testing.T) {
if runtime.GOOS == WindowsOS {
t.Skip("Temporarily skipping test on Windows")
}
tests := []struct {
componentDir string
expectedResourceCount int
env []string
}{
{
componentDir: "testcomponent",
expectedResourceCount: 9,
},
{
componentDir: "testcomponent-python",
expectedResourceCount: 9,
env: []string{pulumiRuntimeVirtualEnv(t, filepath.Join("..", ".."))},
},
{
componentDir: "testcomponent-go",
expectedResourceCount: 8, // One less because no dynamic provider.
},
}
for _, test := range tests {
t.Run(test.componentDir, func(t *testing.T) {
pathEnv := pathEnv(t, filepath.Join("construct_component", test.componentDir))
integration.ProgramTest(t,
optsForConstructNode(t, test.expectedResourceCount, append(test.env, pathEnv)...))
})
}
}
func optsForConstructNode(t *testing.T, expectedResourceCount int, env ...string) *integration.ProgramTestOptions {
return &integration.ProgramTestOptions{
Env: env,
Dir: filepath.Join("construct_component", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Secrets: map[string]string{
"secret": "this super secret is encrypted",
},
Quick: true,
NoParallel: true,
// verify that additional flags don't cause the component provider hang
UpdateCommandlineFlags: []string{"--logflow", "--logtostderr"},
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
assert.NotNil(t, stackInfo.Deployment)
if assert.Equal(t, expectedResourceCount, len(stackInfo.Deployment.Resources)) {
stackRes := stackInfo.Deployment.Resources[0]
assert.NotNil(t, stackRes)
assert.Equal(t, resource.RootStackType, stackRes.Type)
assert.Equal(t, "", string(stackRes.Parent))
// Check that dependencies flow correctly between the originating program and the remote component
// plugin.
urns := make(map[string]resource.URN)
for _, res := range stackInfo.Deployment.Resources[1:] {
assert.NotNil(t, res)
urns[string(res.URN.Name())] = res.URN
switch res.URN.Name() {
case "child-a":
for _, deps := range res.PropertyDependencies {
assert.Empty(t, deps)
}
case "child-b":
expected := []resource.URN{urns["a"]}
assert.ElementsMatch(t, expected, res.Dependencies)
assert.ElementsMatch(t, expected, res.PropertyDependencies["echo"])
case "child-c":
expected := []resource.URN{urns["a"], urns["child-a"]}
assert.ElementsMatch(t, expected, res.Dependencies)
assert.ElementsMatch(t, expected, res.PropertyDependencies["echo"])
case "a", "b", "c":
secretPropValue, ok := res.Outputs["secret"].(map[string]interface{})
assert.Truef(t, ok, "secret output was not serialized as a secret")
assert.Equal(t, resource.SecretSig, secretPropValue[resource.SigKey].(string))
}
}
}
},
}
}
// Test remote component construction with a child resource that takes a long time to be created, ensuring it's created.
func TestConstructSlowNode(t *testing.T) {
pathEnv := testComponentSlowPathEnv(t)
var opts *integration.ProgramTestOptions
opts = &integration.ProgramTestOptions{
Env: []string{pathEnv},
Dir: filepath.Join("construct_component_slow", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
assert.NotNil(t, stackInfo.Deployment)
if assert.Equal(t, 5, len(stackInfo.Deployment.Resources)) {
stackRes := stackInfo.Deployment.Resources[0]
assert.NotNil(t, stackRes)
assert.Equal(t, resource.RootStackType, stackRes.Type)
assert.Equal(t, "", string(stackRes.Parent))
}
},
}
integration.ProgramTest(t, opts)
}
// Test remote component construction with prompt inputs.
func TestConstructPlainNode(t *testing.T) {
tests := []struct {
componentDir string
expectedResourceCount int
env []string
}{
{
componentDir: "testcomponent",
expectedResourceCount: 9,
},
{
componentDir: "testcomponent-python",
expectedResourceCount: 9,
env: []string{pulumiRuntimeVirtualEnv(t, filepath.Join("..", ".."))},
},
{
componentDir: "testcomponent-go",
expectedResourceCount: 8, // One less because no dynamic provider.
},
}
for _, test := range tests {
t.Run(test.componentDir, func(t *testing.T) {
pathEnv := pathEnv(t, filepath.Join("construct_component_plain", test.componentDir))
integration.ProgramTest(t,
optsForConstructPlainNode(t, test.expectedResourceCount, append(test.env, pathEnv)...))
})
}
}
func optsForConstructPlainNode(t *testing.T, expectedResourceCount int, env ...string) *integration.ProgramTestOptions {
return &integration.ProgramTestOptions{
Env: env,
Dir: filepath.Join("construct_component_plain", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
NoParallel: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
assert.NotNil(t, stackInfo.Deployment)
assert.Equal(t, expectedResourceCount, len(stackInfo.Deployment.Resources))
},
}
}
// Test remote component inputs properly handle unknowns.
func TestConstructUnknownNode(t *testing.T) {
testConstructUnknown(t, "nodejs", "@pulumi/pulumi")
}
// Test methods on remote components.
func TestConstructMethodsNode(t *testing.T) {
tests := []struct {
componentDir string
env []string
}{
{
componentDir: "testcomponent",
},
{
componentDir: "testcomponent-python",
env: []string{pulumiRuntimeVirtualEnv(t, filepath.Join("..", ".."))},
},
{
componentDir: "testcomponent-go",
},
}
for _, test := range tests {
t.Run(test.componentDir, func(t *testing.T) {
pathEnv := pathEnv(t, filepath.Join("construct_component_methods", test.componentDir))
integration.ProgramTest(t, &integration.ProgramTestOptions{
Env: append(test.env, pathEnv),
Dir: filepath.Join("construct_component_methods", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
NoParallel: true, // avoid contention for Dir
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
assert.Equal(t, "Hello World, Alice!", stackInfo.Outputs["message"])
},
})
})
}
}
func TestConstructMethodsUnknownNode(t *testing.T) {
testConstructMethodsUnknown(t, "nodejs", "@pulumi/pulumi")
}
func TestConstructMethodsResourcesNode(t *testing.T) {
testConstructMethodsResources(t, "nodejs", "@pulumi/pulumi")
}
func TestConstructMethodsErrorsNode(t *testing.T) {
testConstructMethodsErrors(t, "nodejs", "@pulumi/pulumi")
}
func TestConstructProviderNode(t *testing.T) {
const testDir = "construct_component_provider"
tests := []struct {
componentDir string
env []string
}{
{
componentDir: "testcomponent",
},
{
componentDir: "testcomponent-python",
env: []string{pulumiRuntimeVirtualEnv(t, filepath.Join("..", ".."))},
},
{
componentDir: "testcomponent-go",
},
}
for _, test := range tests {
t.Run(test.componentDir, func(t *testing.T) {
pathEnv := pathEnv(t, filepath.Join(testDir, test.componentDir))
integration.ProgramTest(t, &integration.ProgramTestOptions{
Env: append(test.env, pathEnv),
Dir: filepath.Join(testDir, "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
NoParallel: true, // avoid contention for Dir
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
assert.Equal(t, "hello world", stackInfo.Outputs["message"])
},
})
})
}
}
func TestGetResourceNode(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("get_resource", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
AllowEmptyPreviewChanges: true,
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assert.NotNil(t, stack.Outputs)
assert.Equal(t, "foo", stack.Outputs["foo"])
},
})
}
func TestComponentProviderSchemaNode(t *testing.T) {
path := filepath.Join("component_provider_schema", "testcomponent", "pulumi-resource-testcomponent")
if runtime.GOOS == WindowsOS {
path += ".cmd"
}
testComponentProviderSchema(t, path)
}
// Test throwing an error within an apply in a remote component written in nodejs.
// The provider should return the error and shutdown gracefully rather than hanging.
func TestConstructNodeErrorApply(t *testing.T) {
dir := "construct_component_error_apply"
componentDir := "testcomponent"
stderr := &bytes.Buffer{}
expectedError := "intentional error from within an apply"
opts := &integration.ProgramTestOptions{
Env: []string{pathEnv(t, filepath.Join(dir, componentDir))},
Dir: filepath.Join(dir, "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
NoParallel: true,
Stderr: stderr,
ExpectFailure: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
output := stderr.String()
assert.Contains(t, output, expectedError)
},
}
t.Run(componentDir, func(t *testing.T) {
integration.ProgramTest(t, opts)
})
}
// Test targeting `es2016` in `tsconfig.json` works.
func TestCompilerOptionsNode(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("nodejs", "compiler_options"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
})
}
// Test that the about command works as expected. Because about parses the
// results of each runtime independently, we have an integration test in each
// language.
func TestAboutNodeJS(t *testing.T) {
if runtime.GOOS == WindowsOS {
t.Skip("Skip on windows because we lack yarn")
}
dir := filepath.Join("about", "nodejs")
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
e.ImportDirectory(dir)
e.RunCommand("yarn", "link", "@pulumi/pulumi")
e.RunCommand("yarn", "install")
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
e.RunCommand("pulumi", "stack", "init", "about-nodejs")
e.RunCommand("pulumi", "stack", "select", "about-nodejs")
stdout, stderr := e.RunCommand("pulumi", "about")
e.RunCommand("pulumi", "stack", "rm", "--yes")
// Assert we parsed the dependencies
assert.Containsf(t, stdout, "@types/node",
"Did not contain expected output. stderr: \n%q", stderr)
}
func TestConstructOutputValuesNode(t *testing.T) {
testConstructOutputValues(t, "nodejs", "@pulumi/pulumi")
}
func TestTSConfigOption(t *testing.T) {
if runtime.GOOS == WindowsOS {
t.Skip("Skip on windows because we lack yarn")
}
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
e.ImportDirectory("tsconfig")
e.RunCommand("yarn", "link", "@pulumi/pulumi")
e.RunCommand("yarn", "install")
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
e.RunCommand("pulumi", "stack", "select", "tsconfg", "--create")
e.RunCommand("pulumi", "preview")
}
| [
"\"PULUMI_ACCESS_TOKEN\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_KMS_KEY_ALIAS\"",
"\"PULUMI_TEST_AZURE_KEY\"",
"\"PULUMI_TEST_GCP_KEY\""
] | [] | [
"PULUMI_ACCESS_TOKEN",
"PULUMI_TEST_KMS_KEY_ALIAS",
"PULUMI_TEST_GCP_KEY",
"PULUMI_TEST_OWNER",
"PULUMI_TEST_AZURE_KEY"
] | [] | ["PULUMI_ACCESS_TOKEN", "PULUMI_TEST_KMS_KEY_ALIAS", "PULUMI_TEST_GCP_KEY", "PULUMI_TEST_OWNER", "PULUMI_TEST_AZURE_KEY"] | go | 5 | 0 | |
pkg/env/env_test.go | package env
import (
_ "embed"
"os"
"testing"
"github.com/zaydek/svetlana/pkg/expect"
)
type Test struct {
got string
want string
}
func TestVersion(t *testing.T) {
text := `
+---------------------------+
| SVELTE | v3.0.0 |
| SVETLANA_VERSION | v0.0.1 |
+---------------------------+
`
SetEnvVars(text)
tests := []Test{
{got: os.Getenv("SVELTE"), want: "v3.0.0"},
{got: os.Getenv("SVETLANA_VERSION"), want: "v0.0.1"},
}
for _, test := range tests {
expect.DeepEqual(t, test.got, test.want)
}
}
func TestLatest(t *testing.T) {
text := `
+---------------------------+
| SVELTE | latest |
| SVETLANA_VERSION | latest |
+---------------------------+
`
SetEnvVars(text)
tests := []Test{
{got: os.Getenv("SVELTE"), want: "latest"},
{got: os.Getenv("SVETLANA_VERSION"), want: "latest"},
}
for _, test := range tests {
expect.DeepEqual(t, test.got, test.want)
}
}
| [
"\"SVELTE\"",
"\"SVETLANA_VERSION\"",
"\"SVELTE\"",
"\"SVETLANA_VERSION\""
] | [] | [
"SVETLANA_VERSION",
"SVELTE"
] | [] | ["SVETLANA_VERSION", "SVELTE"] | go | 2 | 0 | |
handler/biz/connect/connect.go | package main
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"os"
"qoq/actor"
"qoq/actor/emq"
"qoq/protocol"
client "github.com/eclipse/paho.mqtt.golang"
)
type input struct {
C int `json:"cnt"`
B int `json:"cps"`
}
// Connect handler
func Connect(i *emq.Emq, cmd json.RawMessage) (err error) {
var d input
err = json.Unmarshal(cmd, &d)
if err != nil {
return
}
if d.B == 0 {
} else {
for cnt := d.B; cnt > 0; cnt-- {
for step := d.C / d.B; step > 0; step-- {
go func(ctx context.Context, c int, s int) {
var tk client.Token
msg := make(chan client.Message)
cli := client.NewClient(client.NewClientOptions().
SetClientID(fmt.Sprintf("%d@%d@%d", c, s, rand.Int63n(1<<62))).
SetAutoReconnect(false).
SetConnectionLostHandler(func(cli client.Client, err error) {
t, p := actor.GenError(protocol.Command{
Handle: "connect"}, 0, err)
i.Cli.Publish(t, 1, false, p)
}).
SetOnConnectHandler(func(cli client.Client) {
if tk := cli.Subscribe("connect/#", 0, func(cli client.Client, evt client.Message) {
msg <- evt
}); tk.Wait() && tk.Error() != nil {
t, p := actor.GenError(protocol.Command{
Handle: "connect"}, 0, tk.Error())
i.Cli.Publish(t, 1, false, p)
}
}).
AddBroker(os.Getenv("MQTTSRV")).
SetUsername(os.Getenv("MQTTUSR")).
SetPassword(os.Getenv("MQTTPWD")))
if tk = cli.Connect(); tk.Wait() && tk.Error() != nil {
fmt.Println(tk.Error(), "@", c, "@", s)
return
}
for {
select {
case <-msg:
cli.Disconnect(100)
return
case <-i.Ctx.Done():
cli.Disconnect(100)
return
}
}
}(i.Ctx, cnt, step)
}
}
}
return nil
}
// Gen func
func Gen() (string, func(*emq.Emq, json.RawMessage) error) {
return "connect", Connect
}
func main() {
}
| [
"\"MQTTSRV\"",
"\"MQTTUSR\"",
"\"MQTTPWD\""
] | [] | [
"MQTTSRV",
"MQTTPWD",
"MQTTUSR"
] | [] | ["MQTTSRV", "MQTTPWD", "MQTTUSR"] | go | 3 | 0 | |
src/testcases/CWE369_Divide_by_Zero/s01/CWE369_Divide_by_Zero__float_Environment_modulo_68a.java | /* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE369_Divide_by_Zero__float_Environment_modulo_68a.java
Label Definition File: CWE369_Divide_by_Zero__float.label.xml
Template File: sources-sinks-68a.tmpl.java
*/
/*
* @description
* CWE: 369 Divide by zero
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded non-zero number (two)
* Sinks: modulo
* GoodSink: Check for zero before modulo
* BadSink : Modulo by a value that may be zero
* Flow Variant: 68 Data flow: data passed as a member variable in the "a" class, which is used by a method in another class in the same package
*
* */
package testcases.CWE369_Divide_by_Zero.s01;
import testcasesupport.*;
import java.util.logging.Level;
public class CWE369_Divide_by_Zero__float_Environment_modulo_68a extends AbstractTestCase
{
public static float data;
public void bad() throws Throwable
{
data = -1.0f; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null)
{
try
{
data = Float.parseFloat(stringNumber.trim());
}
catch (NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
(new CWE369_Divide_by_Zero__float_Environment_modulo_68b()).badSink();
}
public void good() throws Throwable
{
goodG2B();
goodB2G();
}
/* goodG2B() - use goodsource and badsink */
private void goodG2B() throws Throwable
{
/* FIX: Use a hardcoded number that won't a divide by zero */
data = 2.0f;
(new CWE369_Divide_by_Zero__float_Environment_modulo_68b()).goodG2BSink();
}
/* goodB2G() - use badsource and goodsink */
private void goodB2G() throws Throwable
{
data = -1.0f; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null)
{
try
{
data = Float.parseFloat(stringNumber.trim());
}
catch (NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
(new CWE369_Divide_by_Zero__float_Environment_modulo_68b()).goodB2GSink();
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
| [
"\"ADD\"",
"\"ADD\""
] | [] | [
"ADD"
] | [] | ["ADD"] | java | 1 | 0 | |
youtube_dlc/extractor/xtube.py | from __future__ import unicode_literals
import itertools
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
js_to_json,
orderedSet,
parse_duration,
sanitized_Request,
str_to_int,
)
class XTubeIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
xtube:|
https?://(?:www\.)?xtube\.com/(?:watch\.php\?.*\bv=|video-watch/(?:embedded/)?(?P<display_id>[^/]+)-)
)
(?P<id>[^/?&#]+)
'''
_TESTS = [{
# old URL schema
'url': 'http://www.xtube.com/watch.php?v=kVTUy_G222_',
'md5': '092fbdd3cbe292c920ef6fc6a8a9cdab',
'info_dict': {
'id': 'kVTUy_G222_',
'ext': 'mp4',
'title': 'strange erotica',
'description': 'contains:an ET kind of thing',
'uploader': 'greenshowers',
'duration': 449,
'view_count': int,
'comment_count': int,
'age_limit': 18,
}
}, {
# new URL schema
'url': 'http://www.xtube.com/video-watch/strange-erotica-625837',
'only_matching': True,
}, {
'url': 'xtube:625837',
'only_matching': True,
}, {
'url': 'xtube:kVTUy_G222_',
'only_matching': True,
}, {
'url': 'https://www.xtube.com/video-watch/embedded/milf-tara-and-teen-shared-and-cum-covered-extreme-bukkake-32203482?embedsize=big',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
if not display_id:
display_id = video_id
if video_id.isdigit() and len(video_id) < 11:
url_pattern = 'http://www.xtube.com/video-watch/-%s'
else:
url_pattern = 'http://www.xtube.com/watch.php?v=%s'
webpage = self._download_webpage(
url_pattern % video_id, display_id, headers={
'Cookie': 'age_verified=1; cookiesAccepted=1',
})
title, thumbnail, duration = [None] * 3
json_config_string = self._search_regex(
r'playerConf=({.+?}),loaderConf',
webpage, 'config', default=None)
if not json_config_string:
raise ExtractorError("Could not extract video player data")
json_config_string = json_config_string.replace("!0", "true").replace("!1", "false")
config = self._parse_json(json_config_string, video_id, transform_source=js_to_json, fatal=False)
if not config:
raise ExtractorError("Could not extract video player data")
config = config.get('mainRoll')
if isinstance(config, dict):
title = config.get('title')
thumbnail = config.get('poster')
duration = int_or_none(config.get('duration'))
sources = config.get('sources') or config.get('format')
if not isinstance(sources, dict):
sources = self._parse_json(self._search_regex(
r'(["\'])?sources\1?\s*:\s*(?P<sources>{.+?}),',
webpage, 'sources', group='sources'), video_id,
transform_source=js_to_json)
formats = []
for format_id, format_url in sources.items():
formats.append({
'url': format_url,
'format_id': format_id,
'height': int_or_none(format_id),
})
self._remove_duplicate_formats(formats)
self._sort_formats(formats)
if not title:
title = self._search_regex(
(r'<h1>\s*(?P<title>[^<]+?)\s*</h1>', r'videoTitle\s*:\s*(["\'])(?P<title>.+?)\1'),
webpage, 'title', group='title')
description = self._og_search_description(
webpage, default=None) or self._html_search_meta(
'twitter:description', webpage, default=None) or self._search_regex(
r'</h1>\s*<p>([^<]+)', webpage, 'description', fatal=False)
uploader = self._search_regex(
(r'<input[^>]+name="contentOwnerId"[^>]+value="([^"]+)"',
r'<span[^>]+class="nickname"[^>]*>([^<]+)'),
webpage, 'uploader', fatal=False)
if not duration:
duration = parse_duration(self._search_regex(
r'<dt>Runtime:?</dt>\s*<dd>([^<]+)</dd>',
webpage, 'duration', fatal=False))
view_count = str_to_int(self._search_regex(
(r'["\']viewsCount["\'][^>]*>(\d+)\s+views',
r'<dt>Views:?</dt>\s*<dd>([\d,\.]+)</dd>'),
webpage, 'view count', fatal=False))
comment_count = str_to_int(self._html_search_regex(
r'>Comments? \(([\d,\.]+)\)<',
webpage, 'comment count', fatal=False))
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'duration': duration,
'view_count': view_count,
'comment_count': comment_count,
'age_limit': 18,
'formats': formats,
}
class XTubeUserIE(InfoExtractor):
IE_DESC = 'XTube user profile'
_VALID_URL = r'https?://(?:www\.)?xtube\.com/profile/(?P<id>[^/]+-\d+)'
_TEST = {
'url': 'http://www.xtube.com/profile/greenshowers-4056496',
'info_dict': {
'id': 'greenshowers-4056496',
'age_limit': 18,
},
'playlist_mincount': 154,
}
def _real_extract(self, url):
user_id = self._match_id(url)
entries = []
for pagenum in itertools.count(1):
request = sanitized_Request(
'http://www.xtube.com/profile/%s/videos/%d' % (user_id, pagenum),
headers={
'Cookie': 'popunder=4',
'X-Requested-With': 'XMLHttpRequest',
'Referer': url,
})
page = self._download_json(
request, user_id, 'Downloading videos JSON page %d' % pagenum)
html = page.get('html')
if not html:
break
for video_id in orderedSet([video_id for _, video_id in re.findall(
r'data-plid=(["\'])(.+?)\1', html)]):
entries.append(self.url_result('xtube:%s' % video_id, XTubeIE.ie_key()))
page_count = int_or_none(page.get('pageCount'))
if not page_count or pagenum == page_count:
break
playlist = self.playlist_result(entries, user_id)
playlist['age_limit'] = 18
return playlist
| [] | [] | [] | [] | [] | python | null | null | null |
vendor/github.com/hashicorp/vault/builtin/logical/aws/backend_test.go | package aws
import (
"bytes"
"context"
"encoding/json"
"fmt"
"log"
"os"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/vault/logical"
logicaltest "github.com/hashicorp/vault/logical/testing"
"github.com/mitchellh/mapstructure"
)
func getBackend(t *testing.T) logical.Backend {
be, _ := Factory(context.Background(), logical.TestBackendConfig())
return be
}
func TestBackend_basic(t *testing.T) {
logicaltest.Test(t, logicaltest.TestCase{
AcceptanceTest: true,
PreCheck: func() { testAccPreCheck(t) },
Backend: getBackend(t),
Steps: []logicaltest.TestStep{
testAccStepConfig(t),
testAccStepWritePolicy(t, "test", testPolicy),
testAccStepReadUser(t, "test"),
},
})
}
func TestBackend_basicSTS(t *testing.T) {
accessKey := &awsAccessKey{}
logicaltest.Test(t, logicaltest.TestCase{
AcceptanceTest: true,
PreCheck: func() {
testAccPreCheck(t)
createUser(t, accessKey)
createRole(t)
// Sleep sometime because AWS is eventually consistent
// Both the createUser and createRole depend on this
log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...")
time.Sleep(10 * time.Second)
},
Backend: getBackend(t),
Steps: []logicaltest.TestStep{
testAccStepConfigWithCreds(t, accessKey),
testAccStepWritePolicy(t, "test", testPolicy),
testAccStepReadSTS(t, "test"),
testAccStepWriteArnPolicyRef(t, "test", testPolicyArn),
testAccStepReadSTSWithArnPolicy(t, "test"),
testAccStepWriteArnRoleRef(t, testRoleName),
testAccStepReadSTS(t, testRoleName),
},
Teardown: func() error {
return teardown(accessKey)
},
})
}
func TestBackend_policyCrud(t *testing.T) {
var compacted bytes.Buffer
if err := json.Compact(&compacted, []byte(testPolicy)); err != nil {
t.Fatalf("bad: %s", err)
}
logicaltest.Test(t, logicaltest.TestCase{
AcceptanceTest: true,
Backend: getBackend(t),
Steps: []logicaltest.TestStep{
testAccStepConfig(t),
testAccStepWritePolicy(t, "test", testPolicy),
testAccStepReadPolicy(t, "test", compacted.String()),
testAccStepDeletePolicy(t, "test"),
testAccStepReadPolicy(t, "test", ""),
},
})
}
func testAccPreCheck(t *testing.T) {
if v := os.Getenv("AWS_DEFAULT_REGION"); v == "" {
log.Println("[INFO] Test: Using us-west-2 as test region")
os.Setenv("AWS_DEFAULT_REGION", "us-west-2")
}
if v := os.Getenv("AWS_ACCOUNT_ID"); v == "" {
accountId, err := getAccountId()
if err != nil {
t.Logf("Unable to retrive user via iam:GetUser: %#v", err)
t.Skip("AWS_ACCOUNT_ID not explicitly set and could not be read from iam:GetUser for acceptance tests, skipping")
}
log.Printf("[INFO] Test: Used %s as AWS_ACCOUNT_ID", accountId)
os.Setenv("AWS_ACCOUNT_ID", accountId)
}
}
func getAccountId() (string, error) {
awsConfig := &aws.Config{
Region: aws.String("us-east-1"),
HTTPClient: cleanhttp.DefaultClient(),
}
svc := sts.New(session.New(awsConfig))
params := &sts.GetCallerIdentityInput{}
res, err := svc.GetCallerIdentity(params)
if err != nil {
return "", err
}
if res == nil {
return "", fmt.Errorf("got nil response from GetCallerIdentity")
}
return *res.Account, nil
}
const testRoleName = "Vault-Acceptance-Test-AWS-Assume-Role"
func createRole(t *testing.T) {
const testRoleAssumePolicy = `{
"Version": "2012-10-17",
"Statement": [
{
"Effect":"Allow",
"Principal": {
"AWS": "arn:aws:iam::%s:root"
},
"Action": "sts:AssumeRole"
}
]
}
`
awsConfig := &aws.Config{
Region: aws.String("us-east-1"),
HTTPClient: cleanhttp.DefaultClient(),
}
svc := iam.New(session.New(awsConfig))
trustPolicy := fmt.Sprintf(testRoleAssumePolicy, os.Getenv("AWS_ACCOUNT_ID"))
params := &iam.CreateRoleInput{
AssumeRolePolicyDocument: aws.String(trustPolicy),
RoleName: aws.String(testRoleName),
Path: aws.String("/"),
}
log.Printf("[INFO] AWS CreateRole: %s", testRoleName)
_, err := svc.CreateRole(params)
if err != nil {
t.Fatalf("AWS CreateRole failed: %v", err)
}
attachment := &iam.AttachRolePolicyInput{
PolicyArn: aws.String(testPolicyArn),
RoleName: aws.String(testRoleName), // Required
}
_, err = svc.AttachRolePolicy(attachment)
if err != nil {
t.Fatalf("AWS CreateRole failed: %v", err)
}
}
const testUserName = "Vault-Acceptance-Test-AWS-FederationToken"
func createUser(t *testing.T, accessKey *awsAccessKey) {
// The sequence of user creation actions is carefully chosen to minimize
// impact of stolen IAM user credentials
// 1. Create user, without any permissions or credentials. At this point,
// nobody cares if creds compromised because this user can do nothing.
// 2. Attach the timebomb policy. This grants no access but puts a time limit
// on validitity of compromised credentials. If this fails, nobody cares
// because the user has no permissions to do anything anyway
// 3. Attach the AdminAccess policy. The IAM user still has no credentials to
// do anything
// 4. Generate API creds to get an actual access key and secret key
timebombPolicyTemplate := `{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Deny",
"Action": "*",
"Resource": "*",
"Condition": {
"DateGreaterThan": {
"aws:CurrentTime": "%s"
}
}
}
]
}
`
validity := time.Duration(2 * time.Hour)
expiry := time.Now().Add(validity)
timebombPolicy := fmt.Sprintf(timebombPolicyTemplate, expiry.Format(time.RFC3339))
awsConfig := &aws.Config{
Region: aws.String("us-east-1"),
HTTPClient: cleanhttp.DefaultClient(),
}
svc := iam.New(session.New(awsConfig))
createUserInput := &iam.CreateUserInput{
UserName: aws.String(testUserName),
}
log.Printf("[INFO] AWS CreateUser: %s", testUserName)
_, err := svc.CreateUser(createUserInput)
if err != nil {
t.Fatalf("AWS CreateUser failed: %v", err)
}
putPolicyInput := &iam.PutUserPolicyInput{
PolicyDocument: aws.String(timebombPolicy),
PolicyName: aws.String("SelfDestructionTimebomb"),
UserName: aws.String(testUserName),
}
_, err = svc.PutUserPolicy(putPolicyInput)
if err != nil {
t.Fatalf("AWS PutUserPolicy failed: %v", err)
}
attachUserPolicyInput := &iam.AttachUserPolicyInput{
PolicyArn: aws.String("arn:aws:iam::aws:policy/AdministratorAccess"),
UserName: aws.String(testUserName),
}
_, err = svc.AttachUserPolicy(attachUserPolicyInput)
if err != nil {
t.Fatalf("AWS AttachUserPolicy failed, %v", err)
}
createAccessKeyInput := &iam.CreateAccessKeyInput{
UserName: aws.String(testUserName),
}
createAccessKeyOutput, err := svc.CreateAccessKey(createAccessKeyInput)
if err != nil {
t.Fatalf("AWS CreateAccessKey failed: %v", err)
}
if createAccessKeyOutput == nil {
t.Fatalf("AWS CreateAccessKey returned nil")
}
genAccessKey := createAccessKeyOutput.AccessKey
accessKey.AccessKeyId = *genAccessKey.AccessKeyId
accessKey.SecretAccessKey = *genAccessKey.SecretAccessKey
}
func teardown(accessKey *awsAccessKey) error {
awsConfig := &aws.Config{
Region: aws.String("us-east-1"),
HTTPClient: cleanhttp.DefaultClient(),
}
svc := iam.New(session.New(awsConfig))
attachment := &iam.DetachRolePolicyInput{
PolicyArn: aws.String(testPolicyArn),
RoleName: aws.String(testRoleName), // Required
}
_, err := svc.DetachRolePolicy(attachment)
if err != nil {
log.Printf("[WARN] AWS DetachRolePolicy failed: %v", err)
return err
}
params := &iam.DeleteRoleInput{
RoleName: aws.String(testRoleName),
}
log.Printf("[INFO] AWS DeleteRole: %s", testRoleName)
_, err = svc.DeleteRole(params)
if err != nil {
log.Printf("[WARN] AWS DeleteRole failed: %v", err)
return err
}
userDetachment := &iam.DetachUserPolicyInput{
PolicyArn: aws.String("arn:aws:iam::aws:policy/AdministratorAccess"),
UserName: aws.String(testUserName),
}
_, err = svc.DetachUserPolicy(userDetachment)
if err != nil {
log.Printf("[WARN] AWS DetachUserPolicy failed: %v", err)
return err
}
deleteAccessKeyInput := &iam.DeleteAccessKeyInput{
AccessKeyId: aws.String(accessKey.AccessKeyId),
UserName: aws.String(testUserName),
}
_, err = svc.DeleteAccessKey(deleteAccessKeyInput)
if err != nil {
log.Printf("[WARN] AWS DeleteAccessKey failed: %v", err)
return err
}
deleteUserPolicyInput := &iam.DeleteUserPolicyInput{
PolicyName: aws.String("SelfDestructionTimebomb"),
UserName: aws.String(testUserName),
}
_, err = svc.DeleteUserPolicy(deleteUserPolicyInput)
if err != nil {
log.Printf("[WARN] AWS DeleteUserPolicy failed: %v", err)
return err
}
deleteUserInput := &iam.DeleteUserInput{
UserName: aws.String(testUserName),
}
log.Printf("[INFO] AWS DeleteUser: %s", testUserName)
_, err = svc.DeleteUser(deleteUserInput)
if err != nil {
log.Printf("[WARN] AWS DeleteUser failed: %v", err)
return err
}
return nil
}
func testAccStepConfig(t *testing.T) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "config/root",
Data: map[string]interface{}{
"region": os.Getenv("AWS_DEFAULT_REGION"),
},
}
}
func testAccStepConfigWithCreds(t *testing.T, accessKey *awsAccessKey) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "config/root",
Data: map[string]interface{}{
"region": os.Getenv("AWS_DEFAULT_REGION"),
},
PreFlight: func(req *logical.Request) error {
// Values in Data above get eagerly evaluated due to the testing framework.
// In particular, they get evaluated before accessKey gets set by CreateUser
// and thus would fail. By moving to a closure in a PreFlight, we ensure that
// the creds get evaluated lazily after they've been properly set
req.Data["access_key"] = accessKey.AccessKeyId
req.Data["secret_key"] = accessKey.SecretAccessKey
return nil
},
}
}
func testAccStepReadUser(t *testing.T, name string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "creds/" + name,
Check: func(resp *logical.Response) error {
var d struct {
AccessKey string `mapstructure:"access_key"`
SecretKey string `mapstructure:"secret_key"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
log.Printf("[WARN] Generated credentials: %v", d)
// Build a client and verify that the credentials work
creds := credentials.NewStaticCredentials(d.AccessKey, d.SecretKey, "")
awsConfig := &aws.Config{
Credentials: creds,
Region: aws.String("us-east-1"),
HTTPClient: cleanhttp.DefaultClient(),
}
client := ec2.New(session.New(awsConfig))
log.Printf("[WARN] Verifying that the generated credentials work...")
retryCount := 0
success := false
var err error
for !success && retryCount < 10 {
_, err = client.DescribeInstances(&ec2.DescribeInstancesInput{})
if err == nil {
return nil
}
time.Sleep(time.Second)
retryCount++
}
return err
},
}
}
func testAccStepReadSTS(t *testing.T, name string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "sts/" + name,
Check: func(resp *logical.Response) error {
var d struct {
AccessKey string `mapstructure:"access_key"`
SecretKey string `mapstructure:"secret_key"`
STSToken string `mapstructure:"security_token"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
log.Printf("[WARN] Generated credentials: %v", d)
// Build a client and verify that the credentials work
creds := credentials.NewStaticCredentials(d.AccessKey, d.SecretKey, d.STSToken)
awsConfig := &aws.Config{
Credentials: creds,
Region: aws.String("us-east-1"),
HTTPClient: cleanhttp.DefaultClient(),
}
client := ec2.New(session.New(awsConfig))
log.Printf("[WARN] Verifying that the generated credentials work...")
_, err := client.DescribeInstances(&ec2.DescribeInstancesInput{})
if err != nil {
return err
}
return nil
},
}
}
func testAccStepReadSTSWithArnPolicy(t *testing.T, name string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "sts/" + name,
ErrorOk: true,
Check: func(resp *logical.Response) error {
if resp.Data["error"] !=
"Can't generate STS credentials for a managed policy; use a role to assume or an inline policy instead" {
t.Fatalf("bad: %v", resp)
}
return nil
},
}
}
func testAccStepWritePolicy(t *testing.T, name string, policy string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "roles/" + name,
Data: map[string]interface{}{
"policy": testPolicy,
},
}
}
func testAccStepDeletePolicy(t *testing.T, n string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.DeleteOperation,
Path: "roles/" + n,
}
}
func testAccStepReadPolicy(t *testing.T, name string, value string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "roles/" + name,
Check: func(resp *logical.Response) error {
if resp == nil {
if value == "" {
return nil
}
return fmt.Errorf("bad: %#v", resp)
}
var d struct {
Policy string `mapstructure:"policy"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
if d.Policy != value {
return fmt.Errorf("bad: %#v", resp)
}
return nil
},
}
}
const testPolicy = `
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt1426528957000",
"Effect": "Allow",
"Action": [
"ec2:*"
],
"Resource": [
"*"
]
}
]
}
`
const testPolicyArn = "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess"
func testAccStepWriteArnPolicyRef(t *testing.T, name string, arn string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "roles/" + name,
Data: map[string]interface{}{
"arn": testPolicyArn,
},
}
}
func TestBackend_basicPolicyArnRef(t *testing.T) {
logicaltest.Test(t, logicaltest.TestCase{
AcceptanceTest: true,
PreCheck: func() { testAccPreCheck(t) },
Backend: getBackend(t),
Steps: []logicaltest.TestStep{
testAccStepConfig(t),
testAccStepWriteArnPolicyRef(t, "test", testPolicyArn),
testAccStepReadUser(t, "test"),
},
})
}
func TestBackend_policyArnCrud(t *testing.T) {
logicaltest.Test(t, logicaltest.TestCase{
AcceptanceTest: true,
Backend: getBackend(t),
Steps: []logicaltest.TestStep{
testAccStepConfig(t),
testAccStepWriteArnPolicyRef(t, "test", testPolicyArn),
testAccStepReadArnPolicy(t, "test", testPolicyArn),
testAccStepDeletePolicy(t, "test"),
testAccStepReadArnPolicy(t, "test", ""),
},
})
}
func testAccStepReadArnPolicy(t *testing.T, name string, value string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "roles/" + name,
Check: func(resp *logical.Response) error {
if resp == nil {
if value == "" {
return nil
}
return fmt.Errorf("bad: %#v", resp)
}
var d struct {
Policy string `mapstructure:"arn"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
if d.Policy != value {
return fmt.Errorf("bad: %#v", resp)
}
return nil
},
}
}
func testAccStepWriteArnRoleRef(t *testing.T, roleName string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "roles/" + roleName,
Data: map[string]interface{}{
"arn": fmt.Sprintf("arn:aws:iam::%s:role/%s", os.Getenv("AWS_ACCOUNT_ID"), roleName),
},
}
}
type awsAccessKey struct {
AccessKeyId string
SecretAccessKey string
}
| [
"\"AWS_DEFAULT_REGION\"",
"\"AWS_ACCOUNT_ID\"",
"\"AWS_ACCOUNT_ID\"",
"\"AWS_DEFAULT_REGION\"",
"\"AWS_DEFAULT_REGION\"",
"\"AWS_ACCOUNT_ID\""
] | [] | [
"AWS_DEFAULT_REGION",
"AWS_ACCOUNT_ID"
] | [] | ["AWS_DEFAULT_REGION", "AWS_ACCOUNT_ID"] | go | 2 | 0 | |
thespian/system/utilis.py | from datetime import datetime, timedelta
import logging
import os
import tempfile
from thespian.actors import InvalidActorSpecification
import inspect
import warnings
###
### Logging
###
# recognized logging level names for the Thespian internal log.
_name_to_level = {
"CRITICAL": logging.CRITICAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
"NOTSET": logging.NOTSET
}
# Default/current logging controls
_thesplog_control_settings = (
_name_to_level.get(os.getenv('THESPLOG_THRESHOLD') or 'WARNING', logging.WARNING),
False,
os.getenv('THESPLOG_FILE_MAXSIZE', 50 * 1024) # 50KB by default
)
# Usually logging would be directed to /var/log, but that is often not
# user-writeable, so $TMPDIR (or /tmp) is used by default; setting
# THESPLOG_FILE is encouraged. The below variables are set on first
# use to allow direction to be set post load of this file.
_thesplog_file = None
_thesplog_old_file = None
def thesplog_control(baseLevel=logging.DEBUG, useLogging=True, tmpFileMaxSize=0):
"""Specifies the logging performed by thesplog().
The first parameter specifies the baseLevel for logging output;
any log messages whose severity is lower than this level are
not logged (DEBUG is the lowest level, CRITICAL is the highest
level).
The useLogging parameter specifies whether messages are to be
logged via the normal logging in Thespian that Actor logging
will also use. The default is True.
The tmpFileMaxSize, if > 10KB, specifies the maximum size of
the thespian.log file to write logging output to. A value
of 0 (or < 10KB) means that no logging to the thespian.log
file will be performed. Note that the actual footprint is
double this size: when this size is reached, the existing
${TMPDIR}/thespian.log file is renamed to ${TMPDIR}/thespian.log.old
(removing any existing file with that target name) and then a
new empty thespian.log file is created for subsequent
logging.
"""
global _thesplog_control_settings
_thesplog_control_settings = (baseLevel, useLogging, tmpFileMaxSize)
def thesplog(msg, *args, **kw):
global _thesplog_control_settings
if kw.get('level', logging.INFO) >= _thesplog_control_settings[0]:
if int(_thesplog_control_settings[2]) >= 10 * 1024:
levelstr = lambda l: { logging.DEBUG: 'dbg',
logging.INFO: 'I',
logging.WARNING: 'Warn',
logging.ERROR: 'ERR',
logging.CRITICAL: 'CRIT' }.get(l, '??')
global _thesplog_file, _thesplog_old_file
if not _thesplog_file:
_thesplog_file = os.getenv('THESPLOG_FILE',
os.path.join(os.getenv('TMPDIR',
tempfile.gettempdir()),
'thespian.log'))
_thesplog_old_file = _thesplog_file + '.old'
try:
if os.stat(_thesplog_file).st_size > int(_thesplog_control_settings[2]):
# Tricky: a multiprocess system might enter here
# with multiple processes. The single line append
# write below is atomic. Rename should be as well,
# but don't try anything more than that.
os.rename(_thesplog_file, _thesplog_old_file)
except OSError:
# The logfile didn't exist or another process already
# rotated it. Move along.
pass
try:
with open(_thesplog_file, 'a') as lf:
lf.write('%s p%s %-4s %s\n'%(str(datetime.now()), os.getpid(),
levelstr(kw.get('level', logging.INFO)), str(msg%args)))
except Exception:
# It should not be fatal if there was an error writing
# to the logfile (e.g. the disk was full)
pass
# The Thespian environment uses its own transport to forward
# logging messages. This can be dangerous if the transport itself
# generates logging output because this can lead to a never-ending
# logging storm. The primary=True keyword argument can be used
# with thesplog to request the item to be logged to standard logging;
# this argument should be used carefully to ensure the logging
# storm scenario is not triggered.
if _thesplog_control_settings[1] and kw.get('primary', False):
oldSettings = _thesplog_control_settings
_thesplog_control_settings = oldSettings[0], False, oldSettings[1]
logging.getLogger('Thespian.System').log(
kw.get('level', logging.INFO), msg, *args,
exc_info=kw.get('exc_info', False),
extra=kw.get('extra', None))
_thesplog_control_settings = oldSettings
###
### Common Actor operations
###
def checkActorCapabilities(actorClass, capabilities=None, requirements=None,
sourceHashLoader=None):
actorClass = actualActorClass(actorClass, sourceHashLoader)
if not hasattr(actorClass, "actorSystemCapabilityCheck"): return True
try:
return actorClass.actorSystemCapabilityCheck(capabilities or {},
requirements or {})
except Exception as ex:
# The Actor may have a bug in their implementation of
# actorSystemCapabilityCheck, but perhaps there is another
# ActorSystem for which the Actor's actorSystemCapabilityCheck
# will succeed, so this is a soft failure.
return False
def isStr(var):
# Needed for Python2 and Python 3 compatibility
if isinstance(var, str): return True
try:
return isinstance(var, unicode)
except NameError:
return False
def actualActorClass(actorClass, sourceHashLoader=None):
# the actorClass can either be a class object already or
# it can be a string. If it's the latter, get the actual
# class object corresponding to the string.
if isStr(actorClass):
# actorClass is a module-qualified object reference
# (e.g. "thespian.test.testLoadSource.BarActor').
classModule, adot, className = actorClass.rpartition('.')
if not classModule:
# Caller passed an unqualified name string. The name is
# presumably in the same file context as the caller, and
# for some systemBases (those that share the same process)
# it might be possible to walk up the call frames and find
# the right context, but that is not universally possible
# (esp. for multi-process configurations), so this is
# *always* disallowed.
raise InvalidActorSpecification(actorClass)
else:
try:
import importlib
except ImportError:
import thespian.importlib as importlib # KWQ?
if sourceHashLoader:
actorClass = sourceHashLoader(classModule, className)
else:
m = importlib.import_module(classModule)
actorClass = getattr(m, className)
return actorClass
###
### Functional operations
###
import functools
try:
foldl = reduce # type: ignore
except NameError:
foldl = functools.reduce
def _append(iterable, value):
iterable.append(value)
return iterable
def join(iterable_of_iterables):
return foldl(lambda a, b: a + b, iterable_of_iterables, [])
def partition(testPred, inp_iterable, output_type=list):
"""Splits an iterable (e.g. list) into a tuple of two lists (or other
output_type): the first output iterable contains the elements
that pass the testPred (i.e. testPred(Element) is True), and
the second output iterable contains elements that do not pass
the testPred.
"""
appLeft = lambda ll, e: (_append(ll[0], e), ll[1])
appRight = lambda ll, e: (ll[0], _append(ll[1], e))
appendLeftOrRight = lambda ll, e: (appLeft if testPred(e) else appRight)(ll, e)
return foldl(appendLeftOrRight, inp_iterable, (output_type(), output_type()))
def fmap(func, obj):
if isinstance(obj, tuple):
return tuple(map(functools.partial(fmap, func), obj))
iterableitems = isinstance(obj, (list, dict))
if not iterableitems:
try:
iterableitems = isinstance(obj, (filter, map, zip, range))
except TypeError:
# Python2 doesn't have objects like the above. The
# corresponding operations just result in lists which is
# already covered.
pass
if iterableitems:
if hasattr(obj, 'items'):
return dict(map(functools.partial(fmap, func), obj.items()))
return list(map(functools.partial(fmap, func), obj))
if hasattr(obj, 'fmap'):
return obj.fmap(func)
return func(obj)
###
### Useful object for managing Stats
###
class StatsManager(object):
def __init__(self):
self._kv = {}
def inc(self, kw):
if kw not in self._kv:
self._kv[kw] = 1
else:
self._kv[kw] += 1
def copyToStatusResponse(self, response):
for kw in self._kv:
response.addKeyVal(kw, self._kv[kw])
###
### Miscellaneous
###
def setProcName(name, actorAddr):
try: from setproctitle import setproctitle
#This library not required, but its presence will make
#actor names and addresses available in the process list.
except: pass
else: setproctitle('%s %s'%(name, str(actorAddr)))
class AssocList(object):
def __init__(self):
self._qa = [] # (addr, val)
def find(self, addr):
for each in self._qa:
if each[0] == addr:
return each[1]
return None
def add(self, addr, val):
self._qa = [(A,V) for (A,V) in self._qa if A != addr] + [(addr,val)]
def rmv(self, addr):
self._qa = [(A,V) for (A,V) in self._qa if A != addr]
def rmv_value(self, val):
self._qa = [(A,V) for (A,V) in self._qa if V != val]
def values(self):
return [V for (A,V) in self._qa]
def items(self):
return self._qa
def fmap(self, func):
map(func, self._qa)
def __len__(self):
return len(self._qa)
class withPossibleInitArgs(object):
"""Allows the creation of an object, passing arguments to the
__init__() method only if those arguments are specified by
name.
Example:
class Foo(object):
def __init__(self, bar): ...
foo = withPossibleInitArgs(bar=1, cow='moo').create(Foo)
will construct a Foo object by pass a bar value of 1 to the
__init__ method. If the __init__ method required an argument
of "baz", the construction would fail because the
withPossibleInitArgs set of arguments did not include a "baz"
argument.
"""
def __init__(self, **kw):
self.kwargs = kw
def create(self, klass):
"""Creates an instance of the specified class, passing any of the
initial arguments that are valid arguments for the __init__
method.
"""
try:
initsig = [P.name
for P in inspect.signature(klass.__init__).parameters]
except (ValueError, NameError, AttributeError):
try:
with warnings.catch_warnings():
warnings.simplefilter('error', category=DeprecationWarning)
initsig = inspect.getargspec(klass.__init__).args
except (ValueError, NameError, AttributeError, DeprecationWarning):
try:
initsig = inspect.getfullargspec(klass.__init__).args
except (ValueError, NameError, AttributeError):
# Default to just initializing with no arguments
print('defaulting')
return klass()
return klass(**{ k: self.kwargs[k] for k in initsig if k in self.kwargs })
| [] | [] | [
"THESPLOG_FILE",
"THESPLOG_FILE_MAXSIZE",
"THESPLOG_THRESHOLD",
"TMPDIR"
] | [] | ["THESPLOG_FILE", "THESPLOG_FILE_MAXSIZE", "THESPLOG_THRESHOLD", "TMPDIR"] | python | 4 | 0 | |
research/cv/simple_baselines/train.py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''
train
'''
from __future__ import division
import os
import ast
import argparse
import numpy as np
from mindspore import context, Tensor
from mindspore.context import ParallelMode
from mindspore.communication.management import init
from mindspore.train import Model
from mindspore.train.callback import TimeMonitor, LossMonitor, ModelCheckpoint, CheckpointConfig
from mindspore.nn.optim import Adam
from mindspore.common import set_seed
from src.config import config
from src.pose_resnet import GetPoseResNet
from src.network_with_loss import JointsMSELoss, PoseResNetWithLoss
from src.dataset import keypoint_dataset
if config.MODELARTS.IS_MODEL_ARTS:
import moxing as mox
set_seed(config.GENERAL.TRAIN_SEED)
def get_lr(begin_epoch,
total_epochs,
steps_per_epoch,
lr_init=0.1,
factor=0.1,
epoch_number_to_drop=(90, 120)
):
'''
get_lr
'''
lr_each_step = []
total_steps = steps_per_epoch * total_epochs
step_number_to_drop = [steps_per_epoch * x for x in epoch_number_to_drop]
for i in range(int(total_steps)):
if i in step_number_to_drop:
lr_init = lr_init * factor
lr_each_step.append(lr_init)
current_step = steps_per_epoch * begin_epoch
lr_each_step = np.array(lr_each_step, dtype=np.float32)
learning_rate = lr_each_step[current_step:]
return learning_rate
def parse_args():
'''
args
'''
parser = argparse.ArgumentParser(description="Simplebaseline training")
parser.add_argument('--data_url', required=False, default=None, help='Location of data.')
parser.add_argument('--train_url', required=False, default=None, help='Location of training outputs.')
parser.add_argument('--device_id', required=False, default=None, type=int, help='Location of training outputs.')
parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU", "CPU"], default="Ascend",
help="device target")
parser.add_argument('--run_distribute', required=False, default=False, help='Location of training outputs.')
parser.add_argument('--is_model_arts', type=ast.literal_eval, default=False, help='Location of training outputs.')
args = parser.parse_args()
return args
def main():
print("loading parse...")
args = parse_args()
device_id = args.device_id
device_target = args.device_target
config.GENERAL.RUN_DISTRIBUTE = args.run_distribute
config.MODELARTS.IS_MODEL_ARTS = args.is_model_arts
if config.GENERAL.RUN_DISTRIBUTE or config.MODELARTS.IS_MODEL_ARTS:
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE,
device_target=device_target,
save_graphs=False,
device_id=device_id)
if config.GENERAL.RUN_DISTRIBUTE:
init()
rank = int(os.getenv('DEVICE_ID'))
device_num = int(os.getenv('RANK_SIZE'))
context.set_auto_parallel_context(device_num=device_num,
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
else:
rank = 0
device_num = 1
if config.MODELARTS.IS_MODEL_ARTS:
mox.file.copy_parallel(src_url=args.data_url, dst_url=config.MODELARTS.CACHE_INPUT)
dataset, _ = keypoint_dataset(config,
rank=rank,
group_size=device_num,
train_mode=True,
num_parallel_workers=config.TRAIN.NUM_PARALLEL_WORKERS,
)
net = GetPoseResNet(config)
loss = JointsMSELoss(config.LOSS.USE_TARGET_WEIGHT)
net_with_loss = PoseResNetWithLoss(net, loss)
dataset_size = dataset.get_dataset_size()
lr = Tensor(get_lr(config.TRAIN.BEGIN_EPOCH,
config.TRAIN.END_EPOCH,
dataset_size,
lr_init=config.TRAIN.LR,
factor=config.TRAIN.LR_FACTOR,
epoch_number_to_drop=config.TRAIN.LR_STEP))
opt = Adam(net.trainable_params(), learning_rate=lr)
time_cb = TimeMonitor(data_size=dataset_size)
loss_cb = LossMonitor()
cb = [time_cb, loss_cb]
if config.TRAIN.SAVE_CKPT:
config_ck = CheckpointConfig(save_checkpoint_steps=dataset_size, keep_checkpoint_max=20)
prefix = ''
if config.GENERAL.RUN_DISTRIBUTE:
prefix = 'multi_' + 'train_poseresnet_' + config.GENERAL.VERSION + '_' + os.getenv('DEVICE_ID')
else:
prefix = 'single_' + 'train_poseresnet_' + config.GENERAL.VERSION
directory = ''
if config.MODELARTS.IS_MODEL_ARTS:
directory = config.MODELARTS.CACHE_OUTPUT + 'device_'+ os.getenv('DEVICE_ID')
elif config.GENERAL.RUN_DISTRIBUTE:
directory = config.TRAIN.CKPT_PATH + 'device_'+ os.getenv('DEVICE_ID')
else:
directory = config.TRAIN.CKPT_PATH + 'device'
ckpoint_cb = ModelCheckpoint(prefix=prefix, directory=directory, config=config_ck)
cb.append(ckpoint_cb)
model = Model(net_with_loss, loss_fn=None, optimizer=opt, amp_level="O2")
epoch_size = config.TRAIN.END_EPOCH - config.TRAIN.BEGIN_EPOCH
print("************ Start training now ************")
print('start training, epoch size = %d' % epoch_size)
model.train(epoch_size, dataset, callbacks=cb)
if config.MODELARTS.IS_MODEL_ARTS:
mox.file.copy_parallel(src_url=config.MODELARTS.CACHE_OUTPUT, dst_url=args.train_url)
if __name__ == '__main__':
main()
| [] | [] | [
"RANK_SIZE",
"DEVICE_ID"
] | [] | ["RANK_SIZE", "DEVICE_ID"] | python | 2 | 0 | |
src/bot.py | import os
import telebot
bot = telebot.TeleBot(os.environ['BOT_TOKEN'], parse_mode='HTML')
| [] | [] | [
"BOT_TOKEN"
] | [] | ["BOT_TOKEN"] | python | 1 | 0 | |
rqd/rqd/__main__.py | #!/usr/bin/env python
# Copyright (c) 2018 Sony Pictures Imageworks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Initializes and starts rqd.
- RQD allows the cuebot to launch frames on a remote host.
- RQD monitors the resources on a machine.
- Frames can be monitored or killed.
- Status updates are sent to the cuebot every 60 seconds.
- Nimby built into RQD allows a desktop to be used as a render machine when
not in use.
- See the rqnetwork module for a description of ICE interfaces.
Optional configuration file:
----------------------------
in /etc/rqd3/rqd3.conf:
[Override]
OVERRIDE_CORES = 2
OVERRIDE_PROCS = 3
OVERRIDE_MEMORY = 1000000
OVERRIDE_CUEBOT = cuebot1 cuebot2 cuebot3
# True will start nimby, False will keep nimby from starting
OVERRIDE_NIMBY = False
# True will check and report gpu memory if cuda capable
GPU = True
# True will force 256mb gpu memory
PLAYBLAST = True
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import getopt
import logging
import logging.handlers
import os
import platform
import sys
import rqd.rqconstants
import rqd.rqcore
import rqd.rqutil
def setupLogging():
"""Sets up the logging for RQD.
Logs to /var/log/messages"""
# TODO(bcipriano) These should be config based. (Issue #72)
consoleFormat = '%(asctime)s %(levelname)-9s rqd3-%(module)-10s %(message)s'
consoleLevel = logging.DEBUG
fileFormat = '%(asctime)s %(levelname)-9s rqd3-%(module)-10s %(message)s'
fileLevel = logging.WARNING # Equal to or greater than the consoleLevel
logging.basicConfig(level=consoleLevel, format=consoleFormat)
if platform.system() in ('Linux', 'Darwin'):
if platform.system() == 'Linux':
syslogAddress = '/dev/log'
else:
syslogAddress = '/var/run/syslog'
if os.path.exists(syslogAddress):
logfile = logging.handlers.SysLogHandler(address=syslogAddress)
else:
logfile = logging.handlers.SysLogHandler()
else:
logfile = logging.handlers.SysLogHandler()
logfile.setLevel(fileLevel)
logfile.setFormatter(logging.Formatter(fileFormat))
logging.getLogger('').addHandler(logfile)
def usage():
"""Prints command line syntax"""
s = sys.stderr
print("SYNOPSIS", file=s)
print(" ", sys.argv[0], "[options]\n", file=s)
print(" -d | --daemon => Run as daemon", file=s)
print(" --nimbyoff => Disables nimby activation", file=s)
print(" -c => Provide an alternate config file", file=s)
print(" Defaults to /etc/rqd3/rqd3.conf", file=s)
print(" Config file is optional", file=s)
def main():
setupLogging()
if platform.system() == 'Linux' and os.getuid() != 0:
<<<<<<< HEAD
log.critical("Please launch as root if you want user level control")
# sys.exit(1)
=======
logging.critical("Please run launch as root")
sys.exit(1)
>>>>>>> eaef93de1c492f9692cb6019650f2cbc2b588099
try:
opts, argv = getopt.getopt(sys.argv[1:], 'hdc:', ['help',
'daemon',
'nimbyoff',
'update'])
except getopt.GetoptError:
usage()
sys.exit(1)
optNimbyOff = False
for o, a in opts:
if o in ["-h", "--help"]:
usage()
sys.exit(0)
if o in ["-d", "--daemon"]:
# TODO(bcipriano) Background the process. (Issue #153)
pass
if o in ["--nimbyoff"]:
optNimbyOff = True
rqd.rqutil.permissionsLow()
logging.warning('RQD Starting Up')
<<<<<<< HEAD
# do we need Sony Image Works hardcoded fixes?
# if rqconstants.FACILITY in ('abq'):
# os.environ['TZ'] = 'PST8PDT'
=======
if rqd.rqconstants.FACILITY == 'abq':
os.environ['TZ'] = 'PST8PDT'
>>>>>>> eaef93de1c492f9692cb6019650f2cbc2b588099
rqCore = rqd.rqcore.RqCore(optNimbyOff)
rqCore.start()
if __name__ == "__main__":
main()
| [] | [] | [
"TZ"
] | [] | ["TZ"] | python | 1 | 0 | |
src/cmd/dist/build.go | // Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
"sync"
"time"
)
// Initialization for any invocation.
// The usual variables.
var (
goarch string
gobin string
gohostarch string
gohostos string
goos string
goarm string
go386 string
goroot string
goroot_final string
goextlinkenabled string
gogcflags string // For running built compiler
goldflags string
workdir string
tooldir string
oldgoos string
oldgoarch string
exe string
defaultcc string
defaultcflags string
defaultldflags string
defaultcxxtarget string
defaultcctarget string
defaultpkgconfigtarget string
rebuildall bool
defaultclang bool
vflag int // verbosity
)
// The known architectures.
var okgoarch = []string{
"386",
"amd64",
"amd64p32",
"arm",
"arm64",
"mips",
"mipsle",
"mips64",
"mips64le",
"ppc64",
"ppc64le",
"s390x",
}
// The known operating systems.
var okgoos = []string{
"darwin",
"dragonfly",
"linux",
"android",
"solaris",
"freebsd",
"nacl",
"netbsd",
"openbsd",
"plan9",
"windows",
}
// find reports the first index of p in l[0:n], or else -1.
func find(p string, l []string) int {
for i, s := range l {
if p == s {
return i
}
}
return -1
}
// xinit handles initialization of the various global state, like goroot and goarch.
func xinit() {
b := os.Getenv("GOROOT")
if b == "" {
fatalf("$GOROOT must be set")
}
goroot = filepath.Clean(b)
b = os.Getenv("GOROOT_FINAL")
if b == "" {
b = goroot
}
goroot_final = b
b = os.Getenv("GOBIN")
if b == "" {
b = pathf("%s/bin", goroot)
}
gobin = b
b = os.Getenv("GOOS")
if b == "" {
b = gohostos
}
goos = b
if find(goos, okgoos) < 0 {
fatalf("unknown $GOOS %s", goos)
}
b = os.Getenv("GOARM")
if b == "" {
b = xgetgoarm()
}
goarm = b
b = os.Getenv("GO386")
if b == "" {
if cansse2() {
b = "sse2"
} else {
b = "387"
}
}
go386 = b
if p := pathf("%s/src/all.bash", goroot); !isfile(p) {
fatalf("$GOROOT is not set correctly or not exported\n"+
"\tGOROOT=%s\n"+
"\t%s does not exist", goroot, p)
}
b = os.Getenv("GOHOSTARCH")
if b != "" {
gohostarch = b
}
if find(gohostarch, okgoarch) < 0 {
fatalf("unknown $GOHOSTARCH %s", gohostarch)
}
b = os.Getenv("GOARCH")
if b == "" {
b = gohostarch
}
goarch = b
if find(goarch, okgoarch) < 0 {
fatalf("unknown $GOARCH %s", goarch)
}
b = os.Getenv("GO_EXTLINK_ENABLED")
if b != "" {
if b != "0" && b != "1" {
fatalf("unknown $GO_EXTLINK_ENABLED %s", b)
}
goextlinkenabled = b
}
gogcflags = os.Getenv("BOOT_GO_GCFLAGS")
b = os.Getenv("CC")
if b == "" {
// Use clang on OS X, because gcc is deprecated there.
// Xcode for OS X 10.9 Mavericks will ship a fake "gcc" binary that
// actually runs clang. We prepare different command
// lines for the two binaries, so it matters what we call it.
// See golang.org/issue/5822.
if defaultclang {
b = "clang"
} else {
b = "gcc"
}
}
defaultcc = b
defaultcflags = os.Getenv("CFLAGS")
defaultldflags = os.Getenv("LDFLAGS")
b = os.Getenv("CC_FOR_TARGET")
if b == "" {
b = defaultcc
}
defaultcctarget = b
b = os.Getenv("CXX_FOR_TARGET")
if b == "" {
b = os.Getenv("CXX")
if b == "" {
if defaultclang {
b = "clang++"
} else {
b = "g++"
}
}
}
defaultcxxtarget = b
b = os.Getenv("PKG_CONFIG")
if b == "" {
b = "pkg-config"
}
defaultpkgconfigtarget = b
// For tools being invoked but also for os.ExpandEnv.
os.Setenv("GO386", go386)
os.Setenv("GOARCH", goarch)
os.Setenv("GOARM", goarm)
os.Setenv("GOHOSTARCH", gohostarch)
os.Setenv("GOHOSTOS", gohostos)
os.Setenv("GOOS", goos)
os.Setenv("GOROOT", goroot)
os.Setenv("GOROOT_FINAL", goroot_final)
// Use a build cache separate from the default user one.
// Also one that will be wiped out during startup, so that
// make.bash really does start from a clean slate.
os.Setenv("GOCACHE", pathf("%s/pkg/obj/go-build", goroot))
// Make the environment more predictable.
os.Setenv("LANG", "C")
os.Setenv("LANGUAGE", "en_US.UTF8")
workdir = xworkdir()
xatexit(rmworkdir)
tooldir = pathf("%s/pkg/tool/%s_%s", goroot, gohostos, gohostarch)
}
// rmworkdir deletes the work directory.
func rmworkdir() {
if vflag > 1 {
errprintf("rm -rf %s\n", workdir)
}
xremoveall(workdir)
}
// Remove trailing spaces.
func chomp(s string) string {
return strings.TrimRight(s, " \t\r\n")
}
func branchtag(branch string) (tag string, precise bool) {
log := run(goroot, CheckExit, "git", "log", "--decorate=full", "--format=format:%d", "master.."+branch)
tag = branch
for row, line := range strings.Split(log, "\n") {
// Each line is either blank, or looks like
// (tag: refs/tags/go1.4rc2, refs/remotes/origin/release-branch.go1.4, refs/heads/release-branch.go1.4)
// We need to find an element starting with refs/tags/.
const s = " refs/tags/"
i := strings.Index(line, s)
if i < 0 {
continue
}
// Trim off known prefix.
line = line[i+len(s):]
// The tag name ends at a comma or paren.
j := strings.IndexAny(line, ",)")
if j < 0 {
continue // malformed line; ignore it
}
tag = line[:j]
if row == 0 {
precise = true // tag denotes HEAD
}
break
}
return
}
// findgoversion determines the Go version to use in the version string.
func findgoversion() string {
// The $GOROOT/VERSION file takes priority, for distributions
// without the source repo.
path := pathf("%s/VERSION", goroot)
if isfile(path) {
b := chomp(readfile(path))
// Commands such as "dist version > VERSION" will cause
// the shell to create an empty VERSION file and set dist's
// stdout to its fd. dist in turn looks at VERSION and uses
// its content if available, which is empty at this point.
// Only use the VERSION file if it is non-empty.
if b != "" {
// Some builders cross-compile the toolchain on linux-amd64
// and then copy the toolchain to the target builder (say, linux-arm)
// for use there. But on non-release (devel) branches, the compiler
// used on linux-amd64 will be an amd64 binary, and the compiler
// shipped to linux-arm will be an arm binary, so they will have different
// content IDs (they are binaries for different architectures) and so the
// packages compiled by the running-on-amd64 compiler will appear
// stale relative to the running-on-arm compiler. Avoid this by setting
// the version string to something that doesn't begin with devel.
// Then the version string will be used in place of the content ID,
// and the packages will look up-to-date.
// TODO(rsc): Really the builders could be writing out a better VERSION file instead,
// but it is easier to change cmd/dist than to try to make changes to
// the builder while Brad is away.
if strings.HasPrefix(b, "devel") {
if hostType := os.Getenv("META_BUILDLET_HOST_TYPE"); strings.Contains(hostType, "-cross") {
fmt.Fprintf(os.Stderr, "warning: changing VERSION from %q to %q\n", b, "builder "+hostType)
b = "builder " + hostType
}
}
return b
}
}
// The $GOROOT/VERSION.cache file is a cache to avoid invoking
// git every time we run this command. Unlike VERSION, it gets
// deleted by the clean command.
path = pathf("%s/VERSION.cache", goroot)
if isfile(path) {
return chomp(readfile(path))
}
// Show a nicer error message if this isn't a Git repo.
if !isGitRepo() {
fatalf("FAILED: not a Git repo; must put a VERSION file in $GOROOT")
}
// Otherwise, use Git.
// What is the current branch?
branch := chomp(run(goroot, CheckExit, "git", "rev-parse", "--abbrev-ref", "HEAD"))
// What are the tags along the current branch?
tag := "devel"
precise := false
// If we're on a release branch, use the closest matching tag
// that is on the release branch (and not on the master branch).
if strings.HasPrefix(branch, "release-branch.") {
tag, precise = branchtag(branch)
}
if !precise {
// Tag does not point at HEAD; add hash and date to version.
tag += chomp(run(goroot, CheckExit, "git", "log", "-n", "1", "--format=format: +%h %cd", "HEAD"))
}
// Cache version.
writefile(tag, path, 0)
return tag
}
// isGitRepo reports whether the working directory is inside a Git repository.
func isGitRepo() bool {
// NB: simply checking the exit code of `git rev-parse --git-dir` would
// suffice here, but that requires deviating from the infrastructure
// provided by `run`.
gitDir := chomp(run(goroot, 0, "git", "rev-parse", "--git-dir"))
if !filepath.IsAbs(gitDir) {
gitDir = filepath.Join(goroot, gitDir)
}
return isdir(gitDir)
}
/*
* Initial tree setup.
*/
// The old tools that no longer live in $GOBIN or $GOROOT/bin.
var oldtool = []string{
"5a", "5c", "5g", "5l",
"6a", "6c", "6g", "6l",
"8a", "8c", "8g", "8l",
"9a", "9c", "9g", "9l",
"6cov",
"6nm",
"6prof",
"cgo",
"ebnflint",
"goapi",
"gofix",
"goinstall",
"gomake",
"gopack",
"gopprof",
"gotest",
"gotype",
"govet",
"goyacc",
"quietgcc",
}
// Unreleased directories (relative to $GOROOT) that should
// not be in release branches.
var unreleased = []string{
"src/cmd/newlink",
"src/cmd/objwriter",
"src/debug/goobj",
"src/old",
}
// setup sets up the tree for the initial build.
func setup() {
// Create bin directory.
if p := pathf("%s/bin", goroot); !isdir(p) {
xmkdir(p)
}
// Create package directory.
if p := pathf("%s/pkg", goroot); !isdir(p) {
xmkdir(p)
}
p := pathf("%s/pkg/%s_%s", goroot, gohostos, gohostarch)
if rebuildall {
xremoveall(p)
}
xmkdirall(p)
if goos != gohostos || goarch != gohostarch {
p := pathf("%s/pkg/%s_%s", goroot, goos, goarch)
if rebuildall {
xremoveall(p)
}
xmkdirall(p)
}
// Create object directory.
// We used to use it for C objects.
// Now we use it for the build cache, to separate dist's cache
// from any other cache the user might have.
p = pathf("%s/pkg/obj/go-build", goroot)
if rebuildall {
xremoveall(p)
}
xmkdirall(p)
// Create tool directory.
// We keep it in pkg/, just like the object directory above.
if rebuildall {
xremoveall(tooldir)
}
xmkdirall(tooldir)
// Remove tool binaries from before the tool/gohostos_gohostarch
xremoveall(pathf("%s/bin/tool", goroot))
// Remove old pre-tool binaries.
for _, old := range oldtool {
xremove(pathf("%s/bin/%s", goroot, old))
}
// If $GOBIN is set and has a Go compiler, it must be cleaned.
for _, char := range "56789" {
if isfile(pathf("%s/%c%s", gobin, char, "g")) {
for _, old := range oldtool {
xremove(pathf("%s/%s", gobin, old))
}
break
}
}
// For release, make sure excluded things are excluded.
goversion := findgoversion()
if strings.HasPrefix(goversion, "release.") || (strings.HasPrefix(goversion, "go") && !strings.Contains(goversion, "beta")) {
for _, dir := range unreleased {
if p := pathf("%s/%s", goroot, dir); isdir(p) {
fatalf("%s should not exist in release build", p)
}
}
}
}
/*
* Tool building
*/
// deptab lists changes to the default dependencies for a given prefix.
// deps ending in /* read the whole directory; deps beginning with -
// exclude files with that prefix.
// Note that this table applies only to the build of cmd/go,
// after the main compiler bootstrap.
var deptab = []struct {
prefix string // prefix of target
dep []string // dependency tweaks for targets with that prefix
}{
{"cmd/go/internal/cfg", []string{
"zdefaultcc.go",
"zosarch.go",
}},
{"runtime/internal/sys", []string{
"zversion.go",
}},
{"go/build", []string{
"zcgo.go",
}},
}
// depsuffix records the allowed suffixes for source files.
var depsuffix = []string{
".s",
".go",
}
// gentab records how to generate some trivial files.
var gentab = []struct {
nameprefix string
gen func(string, string)
}{
{"zdefaultcc.go", mkzdefaultcc},
{"zosarch.go", mkzosarch},
{"zversion.go", mkzversion},
{"zcgo.go", mkzcgo},
// not generated anymore, but delete the file if we see it
{"enam.c", nil},
{"anames5.c", nil},
{"anames6.c", nil},
{"anames8.c", nil},
{"anames9.c", nil},
}
// installed maps from a dir name (as given to install) to a chan
// closed when the dir's package is installed.
var installed = make(map[string]chan struct{})
// install installs the library, package, or binary associated with dir,
// which is relative to $GOROOT/src.
func install(dir string) {
if ch, ok := installed[dir]; ok {
defer close(ch)
}
for _, dep := range builddeps[dir] {
<-installed[dep]
}
if vflag > 0 {
if goos != gohostos || goarch != gohostarch {
errprintf("%s (%s/%s)\n", dir, goos, goarch)
} else {
errprintf("%s\n", dir)
}
}
workdir := pathf("%s/%s", workdir, dir)
xmkdirall(workdir)
var clean []string
defer func() {
for _, name := range clean {
xremove(name)
}
}()
// path = full path to dir.
path := pathf("%s/src/%s", goroot, dir)
name := filepath.Base(dir)
ispkg := !strings.HasPrefix(dir, "cmd/") || strings.Contains(dir, "/internal/")
// Start final link command line.
// Note: code below knows that link.p[targ] is the target.
var (
link []string
targ int
ispackcmd bool
)
if ispkg {
// Go library (package).
ispackcmd = true
link = []string{"pack", pathf("%s/pkg/%s_%s/%s.a", goroot, goos, goarch, dir)}
targ = len(link) - 1
xmkdirall(filepath.Dir(link[targ]))
} else {
// Go command.
elem := name
if elem == "go" {
elem = "go_bootstrap"
}
link = []string{pathf("%s/link", tooldir), "-o", pathf("%s/%s%s", tooldir, elem, exe)}
targ = len(link) - 1
}
ttarg := mtime(link[targ])
// Gather files that are sources for this target.
// Everything in that directory, and any target-specific
// additions.
files := xreaddir(path)
// Remove files beginning with . or _,
// which are likely to be editor temporary files.
// This is the same heuristic build.ScanDir uses.
// There do exist real C files beginning with _,
// so limit that check to just Go files.
files = filter(files, func(p string) bool {
return !strings.HasPrefix(p, ".") && (!strings.HasPrefix(p, "_") || !strings.HasSuffix(p, ".go"))
})
for _, dt := range deptab {
if dir == dt.prefix || strings.HasSuffix(dt.prefix, "/") && strings.HasPrefix(dir, dt.prefix) {
for _, p := range dt.dep {
p = os.ExpandEnv(p)
files = append(files, p)
}
}
}
files = uniq(files)
// Convert to absolute paths.
for i, p := range files {
if !filepath.IsAbs(p) {
files[i] = pathf("%s/%s", path, p)
}
}
// Is the target up-to-date?
var gofiles, missing []string
stale := rebuildall
files = filter(files, func(p string) bool {
for _, suf := range depsuffix {
if strings.HasSuffix(p, suf) {
goto ok
}
}
return false
ok:
t := mtime(p)
if !t.IsZero() && !strings.HasSuffix(p, ".a") && !shouldbuild(p, dir) {
return false
}
if strings.HasSuffix(p, ".go") {
gofiles = append(gofiles, p)
}
if t.After(ttarg) {
stale = true
}
if t.IsZero() {
missing = append(missing, p)
}
return true
})
// If there are no files to compile, we're done.
if len(files) == 0 {
return
}
if !stale {
return
}
// For package runtime, copy some files into the work space.
if dir == "runtime" || strings.HasPrefix(dir, "runtime/internal/") {
xmkdirall(pathf("%s/pkg/include", goroot))
// For use by assembly and C files.
copyfile(pathf("%s/pkg/include/textflag.h", goroot),
pathf("%s/src/runtime/textflag.h", goroot), 0)
copyfile(pathf("%s/pkg/include/funcdata.h", goroot),
pathf("%s/src/runtime/funcdata.h", goroot), 0)
copyfile(pathf("%s/pkg/include/asm_ppc64x.h", goroot),
pathf("%s/src/runtime/asm_ppc64x.h", goroot), 0)
}
// Generate any missing files; regenerate existing ones.
for _, p := range files {
elem := filepath.Base(p)
for _, gt := range gentab {
if gt.gen == nil {
continue
}
if strings.HasPrefix(elem, gt.nameprefix) {
if vflag > 1 {
errprintf("generate %s\n", p)
}
gt.gen(path, p)
// Do not add generated file to clean list.
// In runtime, we want to be able to
// build the package with the go tool,
// and it assumes these generated files already
// exist (it does not know how to build them).
// The 'clean' command can remove
// the generated files.
goto built
}
}
// Did not rebuild p.
if find(p, missing) >= 0 {
fatalf("missing file %s", p)
}
built:
}
if goos != gohostos || goarch != gohostarch {
// We've generated the right files; the go command can do the build.
if vflag > 1 {
errprintf("skip build for cross-compile %s\n", dir)
}
return
}
var archive string
// The next loop will compile individual non-Go files.
// Hand the Go files to the compiler en masse.
// For package runtime, this writes go_asm.h, which
// the assembly files will need.
pkg := dir
if strings.HasPrefix(dir, "cmd/") && strings.Count(dir, "/") == 1 {
pkg = "main"
}
b := pathf("%s/_go_.a", workdir)
clean = append(clean, b)
if !ispackcmd {
link = append(link, b)
} else {
archive = b
}
compile := []string{pathf("%s/compile", tooldir), "-pack", "-o", b, "-p", pkg}
if gogcflags != "" {
compile = append(compile, strings.Fields(gogcflags)...)
}
if dir == "runtime" {
compile = append(compile, "-+", "-asmhdr", pathf("%s/go_asm.h", workdir))
}
compile = append(compile, gofiles...)
run(path, CheckExit|ShowOutput, compile...)
// Compile the files.
var wg sync.WaitGroup
for _, p := range files {
if !strings.HasSuffix(p, ".s") {
continue
}
var compile []string
// Assembly file for a Go package.
compile = []string{
pathf("%s/asm", tooldir),
"-I", workdir,
"-I", pathf("%s/pkg/include", goroot),
"-D", "GOOS_" + goos,
"-D", "GOARCH_" + goarch,
"-D", "GOOS_GOARCH_" + goos + "_" + goarch,
}
doclean := true
b := pathf("%s/%s", workdir, filepath.Base(p))
// Change the last character of the output file (which was c or s).
b = b[:len(b)-1] + "o"
compile = append(compile, "-o", b, p)
bgrun(&wg, path, compile...)
link = append(link, b)
if doclean {
clean = append(clean, b)
}
}
bgwait(&wg)
if ispackcmd {
xremove(link[targ])
dopack(link[targ], archive, link[targ+1:])
return
}
// Remove target before writing it.
xremove(link[targ])
run("", CheckExit|ShowOutput, link...)
}
// matchfield reports whether the field (x,y,z) matches this build.
// all the elements in the field must be satisfied.
func matchfield(f string) bool {
for _, tag := range strings.Split(f, ",") {
if !matchtag(tag) {
return false
}
}
return true
}
// matchtag reports whether the tag (x or !x) matches this build.
func matchtag(tag string) bool {
if tag == "" {
return false
}
if tag[0] == '!' {
if len(tag) == 1 || tag[1] == '!' {
return false
}
return !matchtag(tag[1:])
}
return tag == "gc" || tag == goos || tag == goarch || tag == "cmd_go_bootstrap" || tag == "go1.1" || (goos == "android" && tag == "linux")
}
// shouldbuild reports whether we should build this file.
// It applies the same rules that are used with context tags
// in package go/build, except it's less picky about the order
// of GOOS and GOARCH.
// We also allow the special tag cmd_go_bootstrap.
// See ../go/bootstrap.go and package go/build.
func shouldbuild(file, dir string) bool {
// Check file name for GOOS or GOARCH.
name := filepath.Base(file)
excluded := func(list []string, ok string) bool {
for _, x := range list {
if x == ok || ok == "android" && x == "linux" {
continue
}
i := strings.Index(name, x)
if i <= 0 || name[i-1] != '_' {
continue
}
i += len(x)
if i == len(name) || name[i] == '.' || name[i] == '_' {
return true
}
}
return false
}
if excluded(okgoos, goos) || excluded(okgoarch, goarch) {
return false
}
// Omit test files.
if strings.Contains(name, "_test") {
return false
}
// Check file contents for // +build lines.
for _, p := range strings.Split(readfile(file), "\n") {
p = strings.TrimSpace(p)
if p == "" {
continue
}
code := p
i := strings.Index(code, "//")
if i > 0 {
code = strings.TrimSpace(code[:i])
}
if code == "package documentation" {
return false
}
if code == "package main" && dir != "cmd/go" && dir != "cmd/cgo" {
return false
}
if !strings.HasPrefix(p, "//") {
break
}
if !strings.Contains(p, "+build") {
continue
}
fields := strings.Fields(p[2:])
if len(fields) < 1 || fields[0] != "+build" {
continue
}
for _, p := range fields[1:] {
if matchfield(p) {
goto fieldmatch
}
}
return false
fieldmatch:
}
return true
}
// copy copies the file src to dst, via memory (so only good for small files).
func copyfile(dst, src string, flag int) {
if vflag > 1 {
errprintf("cp %s %s\n", src, dst)
}
writefile(readfile(src), dst, flag)
}
// dopack copies the package src to dst,
// appending the files listed in extra.
// The archive format is the traditional Unix ar format.
func dopack(dst, src string, extra []string) {
bdst := bytes.NewBufferString(readfile(src))
for _, file := range extra {
b := readfile(file)
// find last path element for archive member name
i := strings.LastIndex(file, "/") + 1
j := strings.LastIndex(file, `\`) + 1
if i < j {
i = j
}
fmt.Fprintf(bdst, "%-16.16s%-12d%-6d%-6d%-8o%-10d`\n", file[i:], 0, 0, 0, 0644, len(b))
bdst.WriteString(b)
if len(b)&1 != 0 {
bdst.WriteByte(0)
}
}
writefile(bdst.String(), dst, 0)
}
// builddeps records the build dependencies for the 'go bootstrap' command.
// It is a map[string][]string and generated by mkdeps.bash into deps.go.
// buildlist is the list of directories being built, sorted by name.
var buildlist = makeBuildlist()
func makeBuildlist() []string {
var all []string
for dir := range builddeps {
all = append(all, dir)
}
sort.Strings(all)
return all
}
var runtimegen = []string{
"zaexperiment.h",
"zversion.go",
}
func clean() {
for _, name := range buildlist {
path := pathf("%s/src/%s", goroot, name)
// Remove generated files.
for _, elem := range xreaddir(path) {
for _, gt := range gentab {
if strings.HasPrefix(elem, gt.nameprefix) {
xremove(pathf("%s/%s", path, elem))
}
}
}
// Remove generated binary named for directory.
if strings.HasPrefix(name, "cmd/") {
xremove(pathf("%s/%s", path, name[4:]))
}
}
// remove runtimegen files.
path := pathf("%s/src/runtime", goroot)
for _, elem := range runtimegen {
xremove(pathf("%s/%s", path, elem))
}
if rebuildall {
// Remove object tree.
xremoveall(pathf("%s/pkg/obj/%s_%s", goroot, gohostos, gohostarch))
// Remove installed packages and tools.
xremoveall(pathf("%s/pkg/%s_%s", goroot, gohostos, gohostarch))
xremoveall(pathf("%s/pkg/%s_%s", goroot, goos, goarch))
xremoveall(pathf("%s/pkg/%s_%s_race", goroot, gohostos, gohostarch))
xremoveall(pathf("%s/pkg/%s_%s_race", goroot, goos, goarch))
xremoveall(tooldir)
// Remove cached version info.
xremove(pathf("%s/VERSION.cache", goroot))
}
}
/*
* command implementations
*/
// The env command prints the default environment.
func cmdenv() {
path := flag.Bool("p", false, "emit updated PATH")
plan9 := flag.Bool("9", false, "emit plan 9 syntax")
windows := flag.Bool("w", false, "emit windows syntax")
xflagparse(0)
format := "%s=\"%s\"\n"
switch {
case *plan9:
format = "%s='%s'\n"
case *windows:
format = "set %s=%s\r\n"
}
xprintf(format, "CC", defaultcc)
xprintf(format, "CC_FOR_TARGET", defaultcctarget)
xprintf(format, "GOROOT", goroot)
xprintf(format, "GOBIN", gobin)
xprintf(format, "GOARCH", goarch)
xprintf(format, "GOOS", goos)
xprintf(format, "GOHOSTARCH", gohostarch)
xprintf(format, "GOHOSTOS", gohostos)
xprintf(format, "GOTOOLDIR", tooldir)
if goarch == "arm" {
xprintf(format, "GOARM", goarm)
}
if goarch == "386" {
xprintf(format, "GO386", go386)
}
if *path {
sep := ":"
if gohostos == "windows" {
sep = ";"
}
xprintf(format, "PATH", fmt.Sprintf("%s%s%s", gobin, sep, os.Getenv("PATH")))
}
}
var (
timeLogEnabled = os.Getenv("GOBUILDTIMELOGFILE") != ""
timeLogMu sync.Mutex
timeLogFile *os.File
timeLogStart time.Time
)
func timelog(op, name string) {
if !timeLogEnabled {
return
}
timeLogMu.Lock()
defer timeLogMu.Unlock()
if timeLogFile == nil {
f, err := os.OpenFile(os.Getenv("GOBUILDTIMELOGFILE"), os.O_RDWR|os.O_APPEND, 0666)
if err != nil {
log.Fatal(err)
}
buf := make([]byte, 100)
n, _ := f.Read(buf)
s := string(buf[:n])
if i := strings.Index(s, "\n"); i >= 0 {
s = s[:i]
}
i := strings.Index(s, " start")
if i < 0 {
log.Fatalf("time log %s does not begin with start line", os.Getenv("GOBULDTIMELOGFILE"))
}
t, err := time.Parse(time.UnixDate, s[:i])
if err != nil {
log.Fatalf("cannot parse time log line %q: %v", s, err)
}
timeLogStart = t
timeLogFile = f
}
t := time.Now()
fmt.Fprintf(timeLogFile, "%s %+.1fs %s %s\n", t.Format(time.UnixDate), t.Sub(timeLogStart).Seconds(), op, name)
}
var toolchain = []string{"cmd/asm", "cmd/cgo", "cmd/compile", "cmd/link"}
// The bootstrap command runs a build from scratch,
// stopping at having installed the go_bootstrap command.
//
// WARNING: This command runs after cmd/dist is built with Go 1.4.
// It rebuilds and installs cmd/dist with the new toolchain, so other
// commands (like "go tool dist test" in run.bash) can rely on bug fixes
// made since Go 1.4, but this function cannot. In particular, the uses
// of os/exec in this function cannot assume that
// cmd.Env = append(os.Environ(), "X=Y")
// sets $X to Y in the command's environment. That guarantee was
// added after Go 1.4, and in fact in Go 1.4 it was typically the opposite:
// if $X was already present in os.Environ(), most systems preferred
// that setting, not the new one.
func cmdbootstrap() {
timelog("start", "dist bootstrap")
defer timelog("end", "dist bootstrap")
var noBanner bool
var debug bool
flag.BoolVar(&rebuildall, "a", rebuildall, "rebuild all")
flag.BoolVar(&debug, "d", debug, "enable debugging of bootstrap process")
flag.BoolVar(&noBanner, "no-banner", noBanner, "do not print banner")
xflagparse(0)
if isdir(pathf("%s/src/pkg", goroot)) {
fatalf("\n\n"+
"The Go package sources have moved to $GOROOT/src.\n"+
"*** %s still exists. ***\n"+
"It probably contains stale files that may confuse the build.\n"+
"Please (check what's there and) remove it and try again.\n"+
"See https://golang.org/s/go14nopkg\n",
pathf("%s/src/pkg", goroot))
}
if rebuildall {
clean()
}
setup()
timelog("build", "toolchain1")
checkCC()
bootstrapBuildTools()
// Remember old content of $GOROOT/bin for comparison below.
oldBinFiles, _ := filepath.Glob(pathf("%s/bin/*", goroot))
// For the main bootstrap, building for host os/arch.
oldgoos = goos
oldgoarch = goarch
goos = gohostos
goarch = gohostarch
os.Setenv("GOHOSTARCH", gohostarch)
os.Setenv("GOHOSTOS", gohostos)
os.Setenv("GOARCH", goarch)
os.Setenv("GOOS", goos)
timelog("build", "go_bootstrap")
xprintf("Building Go bootstrap cmd/go (go_bootstrap) using Go toolchain1.\n")
for _, dir := range buildlist {
installed[dir] = make(chan struct{})
}
for _, dir := range buildlist {
go install(dir)
}
<-installed["cmd/go"]
if vflag > 0 {
xprintf("\n")
}
gogcflags = os.Getenv("GO_GCFLAGS") // we were using $BOOT_GO_GCFLAGS until now
goldflags = os.Getenv("GO_LDFLAGS")
goBootstrap := pathf("%s/go_bootstrap", tooldir)
cmdGo := pathf("%s/go", gobin)
if debug {
run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full")
copyfile(pathf("%s/compile1", tooldir), pathf("%s/compile", tooldir), writeExec)
}
// To recap, so far we have built the new toolchain
// (cmd/asm, cmd/cgo, cmd/compile, cmd/link)
// using Go 1.4's toolchain and go command.
// Then we built the new go command (as go_bootstrap)
// using the new toolchain and our own build logic (above).
//
// toolchain1 = mk(new toolchain, go1.4 toolchain, go1.4 cmd/go)
// go_bootstrap = mk(new cmd/go, toolchain1, cmd/dist)
//
// The toolchain1 we built earlier is built from the new sources,
// but because it was built using cmd/go it has no build IDs.
// The eventually installed toolchain needs build IDs, so we need
// to do another round:
//
// toolchain2 = mk(new toolchain, toolchain1, go_bootstrap)
//
timelog("build", "toolchain2")
if vflag > 0 {
xprintf("\n")
}
xprintf("Building Go toolchain2 using go_bootstrap and Go toolchain1.\n")
os.Setenv("CC", defaultcc)
if goos == oldgoos && goarch == oldgoarch {
// Host and target are same, and we have historically
// chosen $CC_FOR_TARGET in this case.
os.Setenv("CC", defaultcctarget)
}
goInstall(goBootstrap, toolchain...)
if debug {
run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full")
run("", ShowOutput|CheckExit, pathf("%s/buildid", tooldir), pathf("%s/pkg/%s_%s/runtime/internal/sys.a", goroot, goos, goarch))
copyfile(pathf("%s/compile2", tooldir), pathf("%s/compile", tooldir), writeExec)
}
// Toolchain2 should be semantically equivalent to toolchain1,
// but it was built using the new compilers instead of the Go 1.4 compilers,
// so it should at the least run faster. Also, toolchain1 had no build IDs
// in the binaries, while toolchain2 does. In non-release builds, the
// toolchain's build IDs feed into constructing the build IDs of built targets,
// so in non-release builds, everything now looks out-of-date due to
// toolchain2 having build IDs - that is, due to the go command seeing
// that there are new compilers. In release builds, the toolchain's reported
// version is used in place of the build ID, and the go command does not
// see that change from toolchain1 to toolchain2, so in release builds,
// nothing looks out of date.
// To keep the behavior the same in both non-release and release builds,
// we force-install everything here.
//
// toolchain3 = mk(new toolchain, toolchain2, go_bootstrap)
//
timelog("build", "toolchain3")
if vflag > 0 {
xprintf("\n")
}
xprintf("Building Go toolchain3 using go_bootstrap and Go toolchain2.\n")
goInstall(goBootstrap, append([]string{"-a"}, toolchain...)...)
if debug {
run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full")
run("", ShowOutput|CheckExit, pathf("%s/buildid", tooldir), pathf("%s/pkg/%s_%s/runtime/internal/sys.a", goroot, goos, goarch))
copyfile(pathf("%s/compile3", tooldir), pathf("%s/compile", tooldir), writeExec)
}
checkNotStale(goBootstrap, append(toolchain, "runtime/internal/sys")...)
if goos == oldgoos && goarch == oldgoarch {
// Common case - not setting up for cross-compilation.
timelog("build", "toolchain")
if vflag > 0 {
xprintf("\n")
}
xprintf("Building packages and commands for %s/%s.\n", goos, goarch)
} else {
// GOOS/GOARCH does not match GOHOSTOS/GOHOSTARCH.
// Finish GOHOSTOS/GOHOSTARCH installation and then
// run GOOS/GOARCH installation.
timelog("build", "host toolchain")
if vflag > 0 {
xprintf("\n")
}
xprintf("Building packages and commands for host, %s/%s.\n", goos, goarch)
goInstall(goBootstrap, "std", "cmd")
checkNotStale(goBootstrap, "std", "cmd")
checkNotStale(cmdGo, "std", "cmd")
timelog("build", "target toolchain")
if vflag > 0 {
xprintf("\n")
}
goos = oldgoos
goarch = oldgoarch
os.Setenv("GOOS", goos)
os.Setenv("GOARCH", goarch)
os.Setenv("CC", defaultcctarget)
xprintf("Building packages and commands for target, %s/%s.\n", goos, goarch)
}
goInstall(goBootstrap, "std", "cmd")
checkNotStale(goBootstrap, "std", "cmd")
checkNotStale(cmdGo, "std", "cmd")
if debug {
run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full")
run("", ShowOutput|CheckExit, pathf("%s/buildid", tooldir), pathf("%s/pkg/%s_%s/runtime/internal/sys.a", goroot, goos, goarch))
checkNotStale(goBootstrap, append(toolchain, "runtime/internal/sys")...)
copyfile(pathf("%s/compile4", tooldir), pathf("%s/compile", tooldir), writeExec)
}
// Check that there are no new files in $GOROOT/bin other than
// go and gofmt and $GOOS_$GOARCH (target bin when cross-compiling).
binFiles, _ := filepath.Glob(pathf("%s/bin/*", goroot))
ok := map[string]bool{}
for _, f := range oldBinFiles {
ok[f] = true
}
for _, f := range binFiles {
elem := strings.TrimSuffix(filepath.Base(f), ".exe")
if !ok[f] && elem != "go" && elem != "gofmt" && elem != goos+"_"+goarch {
fatalf("unexpected new file in $GOROOT/bin: %s", elem)
}
}
// Remove go_bootstrap now that we're done.
xremove(pathf("%s/go_bootstrap", tooldir))
// Print trailing banner unless instructed otherwise.
if !noBanner {
banner()
}
}
func goInstall(goBinary string, args ...string) {
installCmd := []string{goBinary, "install", "-gcflags=" + gogcflags, "-ldflags=" + goldflags}
if vflag > 0 {
installCmd = append(installCmd, "-v")
}
// Force only one process at a time on vx32 emulation.
if gohostos == "plan9" && os.Getenv("sysname") == "vx32" {
installCmd = append(installCmd, "-p=1")
}
run(goroot, ShowOutput|CheckExit, append(installCmd, args...)...)
}
func checkNotStale(goBinary string, targets ...string) {
out := run(goroot, CheckExit,
append([]string{
goBinary,
"list", "-gcflags=" + gogcflags, "-ldflags=" + goldflags,
"-f={{if .Stale}}\t{{.ImportPath}}: {{.StaleReason}}{{end}}",
}, targets...)...)
if out != "" {
os.Setenv("GODEBUG", "gocachehash=1")
for _, target := range []string{"runtime/internal/sys", "cmd/dist", "cmd/link"} {
if strings.Contains(out, target) {
run(goroot, ShowOutput|CheckExit, goBinary, "list", "-f={{.ImportPath}} {{.Stale}}", target)
break
}
}
fatalf("unexpected stale targets reported by %s list -gcflags=\"%s\" -ldflags=\"%s\" for %v:\n%s", goBinary, gogcflags, goldflags, targets, out)
}
}
// Cannot use go/build directly because cmd/dist for a new release
// builds against an old release's go/build, which may be out of sync.
// To reduce duplication, we generate the list for go/build from this.
//
// We list all supported platforms in this list, so that this is the
// single point of truth for supported platforms. This list is used
// by 'go tool dist list'.
var cgoEnabled = map[string]bool{
"darwin/386": true,
"darwin/amd64": true,
"darwin/arm": true,
"darwin/arm64": true,
"dragonfly/amd64": true,
"freebsd/386": true,
"freebsd/amd64": true,
"freebsd/arm": false,
"linux/386": true,
"linux/amd64": true,
"linux/arm": true,
"linux/arm64": true,
"linux/ppc64": false,
"linux/ppc64le": true,
"linux/mips": true,
"linux/mipsle": true,
"linux/mips64": true,
"linux/mips64le": true,
"linux/s390x": true,
"android/386": true,
"android/amd64": true,
"android/arm": true,
"android/arm64": true,
"nacl/386": false,
"nacl/amd64p32": false,
"nacl/arm": false,
"netbsd/386": true,
"netbsd/amd64": true,
"netbsd/arm": true,
"openbsd/386": true,
"openbsd/amd64": true,
"openbsd/arm": false,
"plan9/386": false,
"plan9/amd64": false,
"plan9/arm": false,
"solaris/amd64": true,
"windows/386": true,
"windows/amd64": true,
}
func needCC() bool {
switch os.Getenv("CGO_ENABLED") {
case "1":
return true
case "0":
return false
}
return cgoEnabled[gohostos+"/"+gohostarch]
}
func checkCC() {
if !needCC() {
return
}
if output, err := exec.Command(defaultcc, "--help").CombinedOutput(); err != nil {
outputHdr := ""
if len(output) > 0 {
outputHdr = "\nCommand output:\n\n"
}
fatalf("cannot invoke C compiler %q: %v\n\n"+
"Go needs a system C compiler for use with cgo.\n"+
"To set a C compiler, set CC=the-compiler.\n"+
"To disable cgo, set CGO_ENABLED=0.\n%s%s", defaultcc, err, outputHdr, output)
}
}
func defaulttarg() string {
// xgetwd might return a path with symlinks fully resolved, and if
// there happens to be symlinks in goroot, then the hasprefix test
// will never succeed. Instead, we use xrealwd to get a canonical
// goroot/src before the comparison to avoid this problem.
pwd := xgetwd()
src := pathf("%s/src/", goroot)
real_src := xrealwd(src)
if !strings.HasPrefix(pwd, real_src) {
fatalf("current directory %s is not under %s", pwd, real_src)
}
pwd = pwd[len(real_src):]
// guard against xrealwd returning the directory without the trailing /
pwd = strings.TrimPrefix(pwd, "/")
return pwd
}
// Install installs the list of packages named on the command line.
func cmdinstall() {
xflagparse(-1)
if flag.NArg() == 0 {
install(defaulttarg())
}
for _, arg := range flag.Args() {
install(arg)
}
}
// Clean deletes temporary objects.
func cmdclean() {
xflagparse(0)
clean()
}
// Banner prints the 'now you've installed Go' banner.
func cmdbanner() {
xflagparse(0)
banner()
}
func banner() {
if vflag > 0 {
xprintf("\n")
}
xprintf("---\n")
xprintf("Installed Go for %s/%s in %s\n", goos, goarch, goroot)
xprintf("Installed commands in %s\n", gobin)
if !xsamefile(goroot_final, goroot) {
// If the files are to be moved, don't check that gobin
// is on PATH; assume they know what they are doing.
} else if gohostos == "plan9" {
// Check that gobin is bound before /bin.
pid := strings.Replace(readfile("#c/pid"), " ", "", -1)
ns := fmt.Sprintf("/proc/%s/ns", pid)
if !strings.Contains(readfile(ns), fmt.Sprintf("bind -b %s /bin", gobin)) {
xprintf("*** You need to bind %s before /bin.\n", gobin)
}
} else {
// Check that gobin appears in $PATH.
pathsep := ":"
if gohostos == "windows" {
pathsep = ";"
}
if !strings.Contains(pathsep+os.Getenv("PATH")+pathsep, pathsep+gobin+pathsep) {
xprintf("*** You need to add %s to your PATH.\n", gobin)
}
}
if !xsamefile(goroot_final, goroot) {
xprintf("\n"+
"The binaries expect %s to be copied or moved to %s\n",
goroot, goroot_final)
}
}
// Version prints the Go version.
func cmdversion() {
xflagparse(0)
xprintf("%s\n", findgoversion())
}
// cmdlist lists all supported platforms.
func cmdlist() {
jsonFlag := flag.Bool("json", false, "produce JSON output")
xflagparse(0)
var plats []string
for p := range cgoEnabled {
plats = append(plats, p)
}
sort.Strings(plats)
if !*jsonFlag {
for _, p := range plats {
xprintf("%s\n", p)
}
return
}
type jsonResult struct {
GOOS string
GOARCH string
CgoSupported bool
}
var results []jsonResult
for _, p := range plats {
fields := strings.Split(p, "/")
results = append(results, jsonResult{
GOOS: fields[0],
GOARCH: fields[1],
CgoSupported: cgoEnabled[p]})
}
out, err := json.MarshalIndent(results, "", "\t")
if err != nil {
fatalf("json marshal error: %v", err)
}
if _, err := os.Stdout.Write(out); err != nil {
fatalf("write failed: %v", err)
}
}
| [
"\"GOROOT\"",
"\"GOROOT_FINAL\"",
"\"GOBIN\"",
"\"GOOS\"",
"\"GOARM\"",
"\"GO386\"",
"\"GOHOSTARCH\"",
"\"GOARCH\"",
"\"GO_EXTLINK_ENABLED\"",
"\"BOOT_GO_GCFLAGS\"",
"\"CC\"",
"\"CFLAGS\"",
"\"LDFLAGS\"",
"\"CC_FOR_TARGET\"",
"\"CXX_FOR_TARGET\"",
"\"CXX\"",
"\"PKG_CONFIG\"",
"\"META_BUILDLET_HOST_TYPE\"",
"\"PATH\"",
"\"GOBUILDTIMELOGFILE\"",
"\"GOBUILDTIMELOGFILE\"",
"\"GOBULDTIMELOGFILE\"",
"\"GO_GCFLAGS\"",
"\"GO_LDFLAGS\"",
"\"sysname\"",
"\"CGO_ENABLED\"",
"\"PATH\""
] | [] | [
"GOBUILDTIMELOGFILE",
"CXX_FOR_TARGET",
"GOROOT",
"CFLAGS",
"PKG_CONFIG",
"GO_EXTLINK_ENABLED",
"GO386",
"GO_LDFLAGS",
"GOOS",
"META_BUILDLET_HOST_TYPE",
"LDFLAGS",
"GOHOSTARCH",
"GO_GCFLAGS",
"GOARM",
"GOROOT_FINAL",
"GOBULDTIMELOGFILE",
"CC",
"CXX",
"CGO_ENABLED",
"sysname",
"PATH",
"BOOT_GO_GCFLAGS",
"CC_FOR_TARGET",
"GOBIN",
"GOARCH"
] | [] | ["GOBUILDTIMELOGFILE", "CXX_FOR_TARGET", "GOROOT", "CFLAGS", "PKG_CONFIG", "GO_EXTLINK_ENABLED", "GO386", "GO_LDFLAGS", "GOOS", "META_BUILDLET_HOST_TYPE", "LDFLAGS", "GOHOSTARCH", "GO_GCFLAGS", "GOARM", "GOROOT_FINAL", "GOBULDTIMELOGFILE", "CC", "CXX", "CGO_ENABLED", "sysname", "PATH", "BOOT_GO_GCFLAGS", "CC_FOR_TARGET", "GOBIN", "GOARCH"] | go | 25 | 0 | |
lab1-movie-chatbot/movie-chatbot-lambda/app/chatbot.py | """
This sample demonstrates an implementation of the Lex Code Hook Interface
in order to serve a sample bot which servers movie recommendations absed on a similar movie
"""
import math
import datetime
import logging
import boto3
import os
import csv
import json
""" --- Static initialization--- """
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
if os.environ['CAMPAIGN_ARN']: # we only care about these items when integrating with Personalize
logger.debug('We will be integrating with Amazon Personalize now')
campaign_arn = os.environ['CAMPAIGN_ARN'] # the arn of the campaign to call in Amazon Personalize
assets_bucket = os.environ['ASSETS_BUCKET'] # the bucket which contains static assets
## Let's make sure that the bucket exists
s3 = boto3.client('s3')
if s3.head_bucket(Bucket=assets_bucket):
logger.debug("The bucket "+ assets_bucket+ " exists, let's move on")
elif assets_bucket: ## the variable will be empty at first, so we need to make sure to account for that
raise Exception("The bucket " + assets_bucket + " does not exist. Please enter in an S3 bucket in the form of: movie-chatbot-resources-(your account number without hyphens, only digits)")
## Initialize the content
if os.environ.get('MOVIE_DATA_OBJECT') is None:
movie_data_object = 'movies.csv' # the object in the s3 bucket which has the list of movie titles and IDs
else:
movie_data_object = os.environ['MOVIE_DATA_OBJECT']
movies_file_local = '/tmp/movies.csv' # where to cache the file locally
logger.debug(
'Initializing lambda with campaign: {}, bucket: {}, movie_data:{}, file: {}'.format(campaign_arn,assets_bucket, movie_data_object, movies_file_local))
""" --- download the movies file --- """
# First we need to download a list of possible movies so we can match them to an item id which can be used to call Amazon Personalize
logger.debug(
'Downloading movies list from url=s3://{}/{}'.format(assets_bucket, movie_data_object))
s3.download_file(assets_bucket, movie_data_object, movies_file_local)
# Read in CSV file and create simple lookup dictionary, we could use pandas, however this pulls in a huge dependency and we want to keep it simple for this demo
moviesDict = {}
movies = csv.DictReader(open("/tmp/movies.csv"))
for row in movies:
moviesDict.update({row['ITEM_ID'] : {'id': row['ITEM_ID'], 'title': row['title'], 'genre': row['genre']}})
""" --- Helpers functions --- """
def get_slots(intent_request):
return intent_request['currentIntent']['slots']
def close(session_attributes, fulfillment_state, message):
response = {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Close',
'fulfillmentState': fulfillment_state,
'message': message
}
}
return response
def searchMovieByTitle(movies, title):
"""
We search for the title with a simple string match in our list movie titles. Keeping it simple, as this is jsut a demo!
"""
for k in movies:
if title.lower() in movies[k]['title'].lower():
return movies[k]
return None
def get_recommendations_for_movie(watchedMovie):
"""
Gets a list of similar movies from a trained model in Amazon Personalize
"""
logger.debug('get_recommendations_for_movie={}'.format(watchedMovie))
client = boto3.client('personalize-runtime')
rec_Items = []
## change this so that we ignore pesonalize endpoint existence if not provided
movieItem = searchMovieByTitle(moviesDict, watchedMovie)
logger.debug('Matched to item={}'.format(movieItem))
rec_response = client.get_recommendations(
campaignArn=campaign_arn,
itemId=str(movieItem['id']),
numResults=5
)
rec_itemIds = [x['itemId'] for x in rec_response['itemList']] # parse ItemIds from response
logger.debug('Personalize returned following ids:={}'.format(rec_itemIds))
for itemId in rec_itemIds:
rec_Items.append(moviesDict[itemId])
logger.debug('Returning recommendations:={}'.format(rec_Items))
return rec_Items
def get_fulfilled_message(rec_Items):
responseMessage = 'Thanks, Here is a list of movies I would recommend:'
for movie in rec_Items:
responseMessage = responseMessage + \
',\n' + movie['title']
return responseMessage + '.\n Enjoy!'
""" --- Function that control the bot's behavior --- """
def recommend_movies(intent_request):
"""
Extracts the watched Movie from the intent, calls Personalize to get similar movies and returns the 5 most recommended movies!
"""
watchedMovie = get_slots(intent_request)["watchedMovie"]
if os.environ['CAMPAIGN_ARN']:
recommendations = get_recommendations_for_movie(watchedMovie)
message = get_fulfilled_message(recommendations)
return close(intent_request['sessionAttributes'],
'Fulfilled',
{'contentType': 'PlainText',
'content': message})
else:
message = 'Right now, I am feeling under the weather and cannot provide a movie suggestion.'
return close(intent_request['sessionAttributes'],
'Fulfilled',
{'contentType': 'PlainText',
'content': message})
def dispatch(intent_request):
"""
Dispatch function In case you want to support multiple intents with a single lambda function
"""
logger.debug('dispatch userId={}, intentName={}'.format(
intent_request['userId'], intent_request['currentIntent']['name']))
intent_name = intent_request['currentIntent']['name']
# Dispatch to your bot's intent handlers
if intent_name == os.environ['INTENT_NAME']:
return recommend_movies(intent_request)
raise Exception('Intent with name ' + intent_name + ' not supported, because it is not equal to the value set for environment variable INTENT_NAME: '+ os.environ['INTENT_NAME'])
""" --- Main handler --- """
def lambda_handler(event, context):
"""
Route the incoming request based on intent.
The JSON body of the request is provided in the event slot.
"""
logger.debug('event.bot.name={}'.format(event['bot']['name']))
return dispatch(event)
| [] | [] | [
"INTENT_NAME",
"ASSETS_BUCKET",
"MOVIE_DATA_OBJECT",
"CAMPAIGN_ARN"
] | [] | ["INTENT_NAME", "ASSETS_BUCKET", "MOVIE_DATA_OBJECT", "CAMPAIGN_ARN"] | python | 4 | 0 | |
snmp_collector/read_conf/read_configuration.py | import json
import os
from pprint import pprint
from pysnmp.hlapi.asyncio import *
from colored_print import ColoredPrint
try:
from snmp_collector.utility.utility import MWT
except:
from utility.utility import MWT
log = ColoredPrint()
__author__ = "aGn"
__copyright__ = "Copyright 2018, Planet Earth"
def flatten(configs):
"""
Parsing the received Json config file (removing metrics key and spread them in parent key).
:param configs: Received configs from Django admin.
:return:
"""
flatten_configs = []
for conf in configs:
parent = {}
for key, val in conf.items():
if key != "metrics":
parent[key] = val
for key, val in conf.items():
if key == "metrics":
for met in conf[key]:
flatten_configs.append({})
last_index = len(flatten_configs) - 1
flatten_configs[last_index].update(parent)
for mk, mv in met.items():
flatten_configs[last_index][mk] = mv
return flatten_configs
def parse_isEnable(configs):
"""
Set isEnable=False to each parameter isEnable if its parent (SNMP device) isEnable key ,equal
to False.
:param configs: SNMP configurations.
:return: Applied isEnable from SNMP device config to each SNMP parameters.
"""
for conf in configs:
if not conf["isEnable"]:
for metric in conf["metrics"]:
metric["isEnable"] = False
return configs
def add_snmp_engine(configs):
"""
Add SNMP-Engine per each SNMP-Line or SNMP-Device.
:param configs: flatten SNMP configurations.
:return: Updated configuration with SNMP-Engine key value.
"""
for conf in configs:
conf["engine"] = SnmpEngine()
return configs
# @MWT(timeout=7)
def get_config():
"""
Reading the stored SNMP Json configuration file.
:return: SNMP configuration Json.
"""
configs = None
try:
if "CONFIG_PATH" in os.environ:
config_path = os.environ["CONFIG_PATH"]
elif os.path.exists("/app/config/config.json"):
config_path = "/app/config/config.json"
elif os.path.exists("../config.json"):
config_path = "../config.json"
elif os.path.exists("config.json"):
config_path = "config.json"
elif os.path.exists("./config/config.json"):
config_path = "./config/config.json"
elif os.path.exists("./snmp_collector/config/config.json"):
config_path = "./snmp_collector/config/config.json"
elif os.path.exists("../snmp_collector/config/config.json"):
config_path = "../snmp_collector/config/config.json"
else:
raise ValueError("Cannot find a config file!")
with open(config_path) as json_file:
configs = json.load(json_file)
configs = parse_isEnable(configs)
configs = add_snmp_engine(configs)
configs = flatten(configs)
pprint(configs)
except (KeyError, IOError, FileNotFoundError, Exception) as exc:
log.err(exc)
return configs
if __name__ == "__main__":
"""Test case usage."""
configs = get_config()
for conf in configs:
pprint(conf)
| [] | [] | [
"CONFIG_PATH"
] | [] | ["CONFIG_PATH"] | python | 1 | 0 | |
upup/pkg/fi/cloudup/template_functions.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/******************************************************************************
Template Functions are what map functions in the models, to internal logic in
kops. This is the point where we connect static YAML configuration to dynamic
runtime values in memory.
When defining a new function:
- Build the new function here
- Define the new function in AddTo()
dest["MyNewFunction"] = MyNewFunction // <-- Function Pointer
******************************************************************************/
package cloudup
import (
"encoding/base64"
"fmt"
"os"
"strconv"
"strings"
"text/template"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/model"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/sets"
)
type TemplateFunctions struct {
cluster *kops.Cluster
instanceGroups []*kops.InstanceGroup
tags sets.String
region string
modelContext *model.KopsModelContext
}
// This will define the available functions we can use in our YAML models
// If we are trying to get a new function implemented it MUST
// be defined here.
func (tf *TemplateFunctions) AddTo(dest template.FuncMap) {
dest["SharedVPC"] = tf.SharedVPC
// Remember that we may be on a different arch from the target. Hard-code for now.
dest["Arch"] = func() string { return "amd64" }
dest["Base64Encode"] = func(s string) string {
return base64.StdEncoding.EncodeToString([]byte(s))
}
dest["replace"] = func(s, find, replace string) string {
return strings.Replace(s, find, replace, -1)
}
dest["join"] = func(a []string, sep string) string {
return strings.Join(a, sep)
}
dest["ClusterName"] = tf.modelContext.ClusterName
dest["HasTag"] = tf.HasTag
dest["WithDefaultBool"] = func(v *bool, defaultValue bool) bool {
if v != nil {
return *v
}
return defaultValue
}
dest["GetInstanceGroup"] = tf.GetInstanceGroup
dest["CloudTags"] = tf.modelContext.CloudTagsForInstanceGroup
dest["KubeDNS"] = func() *kops.KubeDNSConfig {
return tf.cluster.Spec.KubeDNS
}
dest["DnsControllerArgv"] = tf.DnsControllerArgv
dest["ExternalDnsArgv"] = tf.ExternalDnsArgv
// TODO: Only for GCE?
dest["EncodeGCELabel"] = gce.EncodeGCELabel
dest["Region"] = func() string {
return tf.region
}
dest["ProxyEnv"] = tf.ProxyEnv
if tf.cluster.Spec.Networking != nil && tf.cluster.Spec.Networking.Flannel != nil {
flannelBackendType := tf.cluster.Spec.Networking.Flannel.Backend
if flannelBackendType == "" {
glog.Warningf("Defaulting flannel backend to udp (not a recommended configuration)")
flannelBackendType = "udp"
}
dest["FlannelBackendType"] = func() string { return flannelBackendType }
}
}
// SharedVPC is a simple helper function which makes the templates for a shared VPC clearer
func (tf *TemplateFunctions) SharedVPC() bool {
return tf.cluster.SharedVPC()
}
// HasTag returns true if the specified tag is set
func (tf *TemplateFunctions) HasTag(tag string) bool {
_, found := tf.tags[tag]
return found
}
// GetInstanceGroup returns the instance group with the specified name
func (tf *TemplateFunctions) GetInstanceGroup(name string) (*kops.InstanceGroup, error) {
for _, ig := range tf.instanceGroups {
if ig.ObjectMeta.Name == name {
return ig, nil
}
}
return nil, fmt.Errorf("InstanceGroup %q not found", name)
}
// DnsControllerArgv returns the args to the DNS controller
func (tf *TemplateFunctions) DnsControllerArgv() ([]string, error) {
var argv []string
argv = append(argv, "/usr/bin/dns-controller")
// @check if the dns controller has custom configuration
if tf.cluster.Spec.ExternalDNS == nil {
argv = append(argv, []string{"--watch-ingress=false"}...)
glog.V(4).Infof("watch-ingress=false set on dns-controller")
} else {
// @check if the watch ingress is set
var watchIngress bool
if tf.cluster.Spec.ExternalDNS.WatchIngress != nil {
watchIngress = fi.BoolValue(tf.cluster.Spec.ExternalDNS.WatchIngress)
}
if watchIngress {
glog.Warningln("--watch-ingress=true set on dns-controller")
glog.Warningln("this may cause problems with previously defined services: https://github.com/kubernetes/kops/issues/2496")
}
argv = append(argv, fmt.Sprintf("--watch-ingress=%t", watchIngress))
if tf.cluster.Spec.ExternalDNS.WatchNamespace != "" {
argv = append(argv, fmt.Sprintf("--watch-namespace=%q", tf.cluster.Spec.ExternalDNS.WatchNamespace))
}
}
if dns.IsGossipHostname(tf.cluster.Spec.MasterInternalName) {
argv = append(argv, "--dns=gossip")
argv = append(argv, "--gossip-seed=127.0.0.1:3999")
} else {
switch kops.CloudProviderID(tf.cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
if strings.HasPrefix(os.Getenv("AWS_REGION"), "cn-") {
argv = append(argv, "--dns=gossip")
} else {
argv = append(argv, "--dns=aws-route53")
}
case kops.CloudProviderGCE:
argv = append(argv, "--dns=google-clouddns")
case kops.CloudProviderDO:
// this is not supported yet, here so we can successfully create clusters
// this will be supported for digitalocean in the future
argv = append(argv, "--dns=digitalocean")
case kops.CloudProviderVSphere:
argv = append(argv, "--dns=coredns")
argv = append(argv, "--dns-server="+*tf.cluster.Spec.CloudConfig.VSphereCoreDNSServer)
default:
return nil, fmt.Errorf("unhandled cloudprovider %q", tf.cluster.Spec.CloudProvider)
}
}
zone := tf.cluster.Spec.DNSZone
if zone != "" {
if strings.Contains(zone, ".") {
// match by name
argv = append(argv, "--zone="+zone)
} else {
// match by id
argv = append(argv, "--zone=*/"+zone)
}
}
// permit wildcard updates
argv = append(argv, "--zone=*/*")
// Verbose, but not crazy logging
argv = append(argv, "-v=2")
return argv, nil
}
func (tf *TemplateFunctions) ExternalDnsArgv() ([]string, error) {
var argv []string
cloudProvider := tf.cluster.Spec.CloudProvider
switch kops.CloudProviderID(cloudProvider) {
case kops.CloudProviderAWS:
argv = append(argv, "--provider=aws")
case kops.CloudProviderGCE:
project := tf.cluster.Spec.Project
argv = append(argv, "--provider=google")
argv = append(argv, "--google-project="+project)
default:
return nil, fmt.Errorf("unhandled cloudprovider %q", tf.cluster.Spec.CloudProvider)
}
argv = append(argv, "--source=ingress")
return argv, nil
}
func (tf *TemplateFunctions) ProxyEnv() map[string]string {
envs := map[string]string{}
proxies := tf.cluster.Spec.EgressProxy
if proxies == nil {
return envs
}
httpProxy := proxies.HTTPProxy
if httpProxy.Host != "" {
var portSuffix string
if httpProxy.Port != 0 {
portSuffix = ":" + strconv.Itoa(httpProxy.Port)
} else {
portSuffix = ""
}
url := "http://" + httpProxy.Host + portSuffix
envs["http_proxy"] = url
envs["https_proxy"] = url
}
if proxies.ProxyExcludes != "" {
envs["no_proxy"] = proxies.ProxyExcludes
envs["NO_PROXY"] = proxies.ProxyExcludes
}
return envs
}
| [
"\"AWS_REGION\""
] | [] | [
"AWS_REGION"
] | [] | ["AWS_REGION"] | go | 1 | 0 | |
clang/tools/scan-build-py/libscanbuild/analyze.py | # -*- coding: utf-8 -*-
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
""" This module implements the 'scan-build' command API.
To run the static analyzer against a build is done in multiple steps:
-- Intercept: capture the compilation command during the build,
-- Analyze: run the analyzer against the captured commands,
-- Report: create a cover report from the analyzer outputs. """
import re
import os
import os.path
import json
import logging
import multiprocessing
import tempfile
import functools
import subprocess
import contextlib
import datetime
import shutil
import glob
from collections import defaultdict
from libscanbuild import command_entry_point, compiler_wrapper, \
wrapper_environment, run_build, run_command, CtuConfig
from libscanbuild.arguments import parse_args_for_scan_build, \
parse_args_for_analyze_build
from libscanbuild.intercept import capture
from libscanbuild.report import document
from libscanbuild.compilation import split_command, classify_source, \
compiler_language
from libscanbuild.clang import get_version, get_arguments, get_triple_arch, \
ClangErrorException
from libscanbuild.shell import decode
__all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper']
COMPILER_WRAPPER_CC = 'analyze-cc'
COMPILER_WRAPPER_CXX = 'analyze-c++'
CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt'
CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps'
@command_entry_point
def scan_build():
""" Entry point for scan-build command. """
args = parse_args_for_scan_build()
# will re-assign the report directory as new output
with report_directory(args.output, args.keep_empty) as args.output:
# Run against a build command. there are cases, when analyzer run
# is not required. But we need to set up everything for the
# wrappers, because 'configure' needs to capture the CC/CXX values
# for the Makefile.
if args.intercept_first:
# Run build command with intercept module.
exit_code = capture(args)
# Run the analyzer against the captured commands.
if need_analyzer(args.build):
govern_analyzer_runs(args)
else:
# Run build command and analyzer with compiler wrappers.
environment = setup_environment(args)
exit_code = run_build(args.build, env=environment)
# Cover report generation and bug counting.
number_of_bugs = document(args)
# Set exit status as it was requested.
return number_of_bugs if args.status_bugs else exit_code
@command_entry_point
def analyze_build():
""" Entry point for analyze-build command. """
args = parse_args_for_analyze_build()
# will re-assign the report directory as new output
with report_directory(args.output, args.keep_empty) as args.output:
# Run the analyzer against a compilation db.
govern_analyzer_runs(args)
# Cover report generation and bug counting.
number_of_bugs = document(args)
# Set exit status as it was requested.
return number_of_bugs if args.status_bugs else 0
def need_analyzer(args):
""" Check the intent of the build command.
When static analyzer run against project configure step, it should be
silent and no need to run the analyzer or generate report.
To run `scan-build` against the configure step might be necessary,
when compiler wrappers are used. That's the moment when build setup
check the compiler and capture the location for the build process. """
return len(args) and not re.search(r'configure|autogen', args[0])
def prefix_with(constant, pieces):
""" From a sequence create another sequence where every second element
is from the original sequence and the odd elements are the prefix.
eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3] """
return [elem for piece in pieces for elem in [constant, piece]]
def get_ctu_config_from_args(args):
""" CTU configuration is created from the chosen phases and dir. """
return (
CtuConfig(collect=args.ctu_phases.collect,
analyze=args.ctu_phases.analyze,
dir=args.ctu_dir,
extdef_map_cmd=args.extdef_map_cmd)
if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir')
else CtuConfig(collect=False, analyze=False, dir='', extdef_map_cmd=''))
def get_ctu_config_from_json(ctu_conf_json):
""" CTU configuration is created from the chosen phases and dir. """
ctu_config = json.loads(ctu_conf_json)
# Recover namedtuple from json when coming from analyze-cc or analyze-c++
return CtuConfig(collect=ctu_config[0],
analyze=ctu_config[1],
dir=ctu_config[2],
extdef_map_cmd=ctu_config[3])
def create_global_ctu_extdef_map(extdef_map_lines):
""" Takes iterator of individual external definition maps and creates a
global map keeping only unique names. We leave conflicting names out of
CTU.
:param extdef_map_lines: Contains the id of a definition (mangled name) and
the originating source (the corresponding AST file) name.
:type extdef_map_lines: Iterator of str.
:returns: Mangled name - AST file pairs.
:rtype: List of (str, str) tuples.
"""
mangled_to_asts = defaultdict(set)
for line in extdef_map_lines:
mangled_name, ast_file = line.strip().split(' ', 1)
mangled_to_asts[mangled_name].add(ast_file)
mangled_ast_pairs = []
for mangled_name, ast_files in mangled_to_asts.items():
if len(ast_files) == 1:
mangled_ast_pairs.append((mangled_name, next(iter(ast_files))))
return mangled_ast_pairs
def merge_ctu_extdef_maps(ctudir):
""" Merge individual external definition maps into a global one.
As the collect phase runs parallel on multiple threads, all compilation
units are separately mapped into a temporary file in CTU_TEMP_DEFMAP_FOLDER.
These definition maps contain the mangled names and the source
(AST generated from the source) which had their definition.
These files should be merged at the end into a global map file:
CTU_EXTDEF_MAP_FILENAME."""
def generate_extdef_map_lines(extdefmap_dir):
""" Iterate over all lines of input files in a determined order. """
files = glob.glob(os.path.join(extdefmap_dir, '*'))
files.sort()
for filename in files:
with open(filename, 'r') as in_file:
for line in in_file:
yield line
def write_global_map(arch, mangled_ast_pairs):
""" Write (mangled name, ast file) pairs into final file. """
extern_defs_map_file = os.path.join(ctudir, arch,
CTU_EXTDEF_MAP_FILENAME)
with open(extern_defs_map_file, 'w') as out_file:
for mangled_name, ast_file in mangled_ast_pairs:
out_file.write('%s %s\n' % (mangled_name, ast_file))
triple_arches = glob.glob(os.path.join(ctudir, '*'))
for triple_path in triple_arches:
if os.path.isdir(triple_path):
triple_arch = os.path.basename(triple_path)
extdefmap_dir = os.path.join(ctudir, triple_arch,
CTU_TEMP_DEFMAP_FOLDER)
extdef_map_lines = generate_extdef_map_lines(extdefmap_dir)
mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines)
write_global_map(triple_arch, mangled_ast_pairs)
# Remove all temporary files
shutil.rmtree(extdefmap_dir, ignore_errors=True)
def run_analyzer_parallel(args):
""" Runs the analyzer against the given compilation database. """
def exclude(filename):
""" Return true when any excluded directory prefix the filename. """
return any(re.match(r'^' + directory, filename)
for directory in args.excludes)
consts = {
'clang': args.clang,
'output_dir': args.output,
'output_format': args.output_format,
'output_failures': args.output_failures,
'direct_args': analyzer_params(args),
'force_debug': args.force_debug,
'ctu': get_ctu_config_from_args(args)
}
logging.debug('run analyzer against compilation database')
with open(args.cdb, 'r') as handle:
generator = (dict(cmd, **consts)
for cmd in json.load(handle) if not exclude(cmd['file']))
# when verbose output requested execute sequentially
pool = multiprocessing.Pool(1 if args.verbose > 2 else None)
for current in pool.imap_unordered(run, generator):
if current is not None:
# display error message from the static analyzer
for line in current['error_output']:
logging.info(line.rstrip())
pool.close()
pool.join()
def govern_analyzer_runs(args):
""" Governs multiple runs in CTU mode or runs once in normal mode. """
ctu_config = get_ctu_config_from_args(args)
# If we do a CTU collect (1st phase) we remove all previous collection
# data first.
if ctu_config.collect:
shutil.rmtree(ctu_config.dir, ignore_errors=True)
# If the user asked for a collect (1st) and analyze (2nd) phase, we do an
# all-in-one run where we deliberately remove collection data before and
# also after the run. If the user asks only for a single phase data is
# left so multiple analyze runs can use the same data gathered by a single
# collection run.
if ctu_config.collect and ctu_config.analyze:
# CTU strings are coming from args.ctu_dir and extdef_map_cmd,
# so we can leave it empty
args.ctu_phases = CtuConfig(collect=True, analyze=False,
dir='', extdef_map_cmd='')
run_analyzer_parallel(args)
merge_ctu_extdef_maps(ctu_config.dir)
args.ctu_phases = CtuConfig(collect=False, analyze=True,
dir='', extdef_map_cmd='')
run_analyzer_parallel(args)
shutil.rmtree(ctu_config.dir, ignore_errors=True)
else:
# Single runs (collect or analyze) are launched from here.
run_analyzer_parallel(args)
if ctu_config.collect:
merge_ctu_extdef_maps(ctu_config.dir)
def setup_environment(args):
""" Set up environment for build command to interpose compiler wrapper. """
environment = dict(os.environ)
environment.update(wrapper_environment(args))
environment.update({
'CC': COMPILER_WRAPPER_CC,
'CXX': COMPILER_WRAPPER_CXX,
'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '',
'ANALYZE_BUILD_REPORT_DIR': args.output,
'ANALYZE_BUILD_REPORT_FORMAT': args.output_format,
'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '',
'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)),
'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else '',
'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args))
})
return environment
@command_entry_point
def analyze_compiler_wrapper():
""" Entry point for `analyze-cc` and `analyze-c++` compiler wrappers. """
return compiler_wrapper(analyze_compiler_wrapper_impl)
def analyze_compiler_wrapper_impl(result, execution):
""" Implements analyzer compiler wrapper functionality. """
# don't run analyzer when compilation fails. or when it's not requested.
if result or not os.getenv('ANALYZE_BUILD_CLANG'):
return
# check is it a compilation?
compilation = split_command(execution.cmd)
if compilation is None:
return
# collect the needed parameters from environment, crash when missing
parameters = {
'clang': os.getenv('ANALYZE_BUILD_CLANG'),
'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'),
'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'),
'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'),
'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS',
'').split(' '),
'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'),
'directory': execution.cwd,
'command': [execution.cmd[0], '-c'] + compilation.flags,
'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU'))
}
# call static analyzer against the compilation
for source in compilation.files:
parameters.update({'file': source})
logging.debug('analyzer parameters %s', parameters)
current = run(parameters)
# display error message from the static analyzer
if current is not None:
for line in current['error_output']:
logging.info(line.rstrip())
@contextlib.contextmanager
def report_directory(hint, keep):
""" Responsible for the report directory.
hint -- could specify the parent directory of the output directory.
keep -- a boolean value to keep or delete the empty report directory. """
stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-'
stamp = datetime.datetime.now().strftime(stamp_format)
parent_dir = os.path.abspath(hint)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir)
logging.info('Report directory created: %s', name)
try:
yield name
finally:
if os.listdir(name):
msg = "Run 'scan-view %s' to examine bug reports."
keep = True
else:
if keep:
msg = "Report directory '%s' contains no report, but kept."
else:
msg = "Removing directory '%s' because it contains no report."
logging.warning(msg, name)
if not keep:
os.rmdir(name)
def analyzer_params(args):
""" A group of command line arguments can mapped to command
line arguments of the analyzer. This method generates those. """
result = []
if args.store_model:
result.append('-analyzer-store={0}'.format(args.store_model))
if args.constraints_model:
result.append('-analyzer-constraints={0}'.format(
args.constraints_model))
if args.internal_stats:
result.append('-analyzer-stats')
if args.analyze_headers:
result.append('-analyzer-opt-analyze-headers')
if args.stats:
result.append('-analyzer-checker=debug.Stats')
if args.maxloop:
result.extend(['-analyzer-max-loop', str(args.maxloop)])
if args.output_format:
result.append('-analyzer-output={0}'.format(args.output_format))
if args.analyzer_config:
result.extend(['-analyzer-config', args.analyzer_config])
if args.verbose >= 4:
result.append('-analyzer-display-progress')
if args.plugins:
result.extend(prefix_with('-load', args.plugins))
if args.enable_checker:
checkers = ','.join(args.enable_checker)
result.extend(['-analyzer-checker', checkers])
if args.disable_checker:
checkers = ','.join(args.disable_checker)
result.extend(['-analyzer-disable-checker', checkers])
return prefix_with('-Xclang', result)
def require(required):
""" Decorator for checking the required values in state.
It checks the required attributes in the passed state and stop when
any of those is missing. """
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
for key in required:
if key not in args[0]:
raise KeyError('{0} not passed to {1}'.format(
key, function.__name__))
return function(*args, **kwargs)
return wrapper
return decorator
@require(['command', # entry from compilation database
'directory', # entry from compilation database
'file', # entry from compilation database
'clang', # clang executable name (and path)
'direct_args', # arguments from command line
'force_debug', # kill non debug macros
'output_dir', # where generated report files shall go
'output_format', # it's 'plist', 'html', both or plist-multi-file
'output_failures', # generate crash reports or not
'ctu']) # ctu control options
def run(opts):
""" Entry point to run (or not) static analyzer against a single entry
of the compilation database.
This complex task is decomposed into smaller methods which are calling
each other in chain. If the analysis is not possible the given method
just return and break the chain.
The passed parameter is a python dictionary. Each method first check
that the needed parameters received. (This is done by the 'require'
decorator. It's like an 'assert' to check the contract between the
caller and the called method.) """
try:
command = opts.pop('command')
command = command if isinstance(command, list) else decode(command)
logging.debug("Run analyzer against '%s'", command)
opts.update(classify_parameters(command))
return arch_check(opts)
except Exception:
logging.error("Problem occurred during analysis.", exc_info=1)
return None
@require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language',
'error_output', 'exit_code'])
def report_failure(opts):
""" Create report when analyzer failed.
The major report is the preprocessor output. The output filename generated
randomly. The compiler output also captured into '.stderr.txt' file.
And some more execution context also saved into '.info.txt' file. """
def extension():
""" Generate preprocessor file extension. """
mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'}
return mapping.get(opts['language'], '.i')
def destination():
""" Creates failures directory if not exits yet. """
failures_dir = os.path.join(opts['output_dir'], 'failures')
if not os.path.isdir(failures_dir):
os.makedirs(failures_dir)
return failures_dir
# Classify error type: when Clang terminated by a signal it's a 'Crash'.
# (python subprocess Popen.returncode is negative when child terminated
# by signal.) Everything else is 'Other Error'.
error = 'crash' if opts['exit_code'] < 0 else 'other_error'
# Create preprocessor output file name. (This is blindly following the
# Perl implementation.)
(handle, name) = tempfile.mkstemp(suffix=extension(),
prefix='clang_' + error + '_',
dir=destination())
os.close(handle)
# Execute Clang again, but run the syntax check only.
cwd = opts['directory']
cmd = [opts['clang'], '-fsyntax-only', '-E'] + opts['flags'] + \
[opts['file'], '-o', name]
try:
cmd = get_arguments(cmd, cwd)
run_command(cmd, cwd=cwd)
except subprocess.CalledProcessError:
pass
except ClangErrorException:
pass
# write general information about the crash
with open(name + '.info.txt', 'w') as handle:
handle.write(opts['file'] + os.linesep)
handle.write(error.title().replace('_', ' ') + os.linesep)
handle.write(' '.join(cmd) + os.linesep)
handle.write(' '.join(os.uname()) + os.linesep)
handle.write(get_version(opts['clang']))
handle.close()
# write the captured output too
with open(name + '.stderr.txt', 'w') as handle:
handle.writelines(opts['error_output'])
handle.close()
@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir',
'output_format'])
def run_analyzer(opts, continuation=report_failure):
""" It assembles the analysis command line and executes it. Capture the
output of the analysis and returns with it. If failure reports are
requested, it calls the continuation to generate it. """
def target():
""" Creates output file name for reports. """
if opts['output_format'] in {
'plist',
'plist-html',
'plist-multi-file'}:
(handle, name) = tempfile.mkstemp(prefix='report-',
suffix='.plist',
dir=opts['output_dir'])
os.close(handle)
return name
return opts['output_dir']
try:
cwd = opts['directory']
cmd = get_arguments([opts['clang'], '--analyze'] +
opts['direct_args'] + opts['flags'] +
[opts['file'], '-o', target()],
cwd)
output = run_command(cmd, cwd=cwd)
return {'error_output': output, 'exit_code': 0}
except subprocess.CalledProcessError as ex:
result = {'error_output': ex.output, 'exit_code': ex.returncode}
if opts.get('output_failures', False):
opts.update(result)
continuation(opts)
return result
except ClangErrorException as ex:
result = {'error_output': ex.error, 'exit_code': 0}
if opts.get('output_failures', False):
opts.update(result)
continuation(opts)
return result
def extdef_map_list_src_to_ast(extdef_src_list):
""" Turns textual external definition map list with source files into an
external definition map list with ast files. """
extdef_ast_list = []
for extdef_src_txt in extdef_src_list:
mangled_name, path = extdef_src_txt.split(" ", 1)
# Normalize path on windows as well
path = os.path.splitdrive(path)[1]
# Make relative path out of absolute
path = path[1:] if path[0] == os.sep else path
ast_path = os.path.join("ast", path + ".ast")
extdef_ast_list.append(mangled_name + " " + ast_path)
return extdef_ast_list
@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'ctu'])
def ctu_collect_phase(opts):
""" Preprocess source by generating all data needed by CTU analysis. """
def generate_ast(triple_arch):
""" Generates ASTs for the current compilation command. """
args = opts['direct_args'] + opts['flags']
ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch, 'ast',
os.path.realpath(opts['file'])[1:] +
'.ast')
ast_path = os.path.abspath(ast_joined_path)
ast_dir = os.path.dirname(ast_path)
if not os.path.isdir(ast_dir):
try:
os.makedirs(ast_dir)
except OSError:
# In case an other process already created it.
pass
ast_command = [opts['clang'], '-emit-ast']
ast_command.extend(args)
ast_command.append('-w')
ast_command.append(opts['file'])
ast_command.append('-o')
ast_command.append(ast_path)
logging.debug("Generating AST using '%s'", ast_command)
run_command(ast_command, cwd=opts['directory'])
def map_extdefs(triple_arch):
""" Generate external definition map file for the current source. """
args = opts['direct_args'] + opts['flags']
extdefmap_command = [opts['ctu'].extdef_map_cmd]
extdefmap_command.append(opts['file'])
extdefmap_command.append('--')
extdefmap_command.extend(args)
logging.debug("Generating external definition map using '%s'",
extdefmap_command)
extdef_src_list = run_command(extdefmap_command, cwd=opts['directory'])
extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list)
extern_defs_map_folder = os.path.join(opts['ctu'].dir, triple_arch,
CTU_TEMP_DEFMAP_FOLDER)
if not os.path.isdir(extern_defs_map_folder):
try:
os.makedirs(extern_defs_map_folder)
except OSError:
# In case an other process already created it.
pass
if extdef_ast_list:
with tempfile.NamedTemporaryFile(mode='w',
dir=extern_defs_map_folder,
delete=False) as out_file:
out_file.write("\n".join(extdef_ast_list) + "\n")
cwd = opts['directory']
cmd = [opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] \
+ [opts['file']]
triple_arch = get_triple_arch(cmd, cwd)
generate_ast(triple_arch)
map_extdefs(triple_arch)
@require(['ctu'])
def dispatch_ctu(opts, continuation=run_analyzer):
""" Execute only one phase of 2 phases of CTU if needed. """
ctu_config = opts['ctu']
if ctu_config.collect or ctu_config.analyze:
assert ctu_config.collect != ctu_config.analyze
if ctu_config.collect:
return ctu_collect_phase(opts)
if ctu_config.analyze:
cwd = opts['directory']
cmd = [opts['clang'], '--analyze'] + opts['direct_args'] \
+ opts['flags'] + [opts['file']]
triarch = get_triple_arch(cmd, cwd)
ctu_options = ['ctu-dir=' + os.path.join(ctu_config.dir, triarch),
'experimental-enable-naive-ctu-analysis=true']
analyzer_options = prefix_with('-analyzer-config', ctu_options)
direct_options = prefix_with('-Xanalyzer', analyzer_options)
opts['direct_args'].extend(direct_options)
return continuation(opts)
@require(['flags', 'force_debug'])
def filter_debug_flags(opts, continuation=dispatch_ctu):
""" Filter out nondebug macros when requested. """
if opts.pop('force_debug'):
# lazy implementation just append an undefine macro at the end
opts.update({'flags': opts['flags'] + ['-UNDEBUG']})
return continuation(opts)
@require(['language', 'compiler', 'file', 'flags'])
def language_check(opts, continuation=filter_debug_flags):
""" Find out the language from command line parameters or file name
extension. The decision also influenced by the compiler invocation. """
accepted = frozenset({
'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output',
'c++-cpp-output', 'objective-c-cpp-output'
})
# language can be given as a parameter...
language = opts.pop('language')
compiler = opts.pop('compiler')
# ... or find out from source file extension
if language is None and compiler is not None:
language = classify_source(opts['file'], compiler == 'c')
if language is None:
logging.debug('skip analysis, language not known')
return None
elif language not in accepted:
logging.debug('skip analysis, language not supported')
return None
else:
logging.debug('analysis, language: %s', language)
opts.update({'language': language,
'flags': ['-x', language] + opts['flags']})
return continuation(opts)
@require(['arch_list', 'flags'])
def arch_check(opts, continuation=language_check):
""" Do run analyzer through one of the given architectures. """
disabled = frozenset({'ppc', 'ppc64'})
received_list = opts.pop('arch_list')
if received_list:
# filter out disabled architectures and -arch switches
filtered_list = [a for a in received_list if a not in disabled]
if filtered_list:
# There should be only one arch given (or the same multiple
# times). If there are multiple arch are given and are not
# the same, those should not change the pre-processing step.
# But that's the only pass we have before run the analyzer.
current = filtered_list.pop()
logging.debug('analysis, on arch: %s', current)
opts.update({'flags': ['-arch', current] + opts['flags']})
return continuation(opts)
else:
logging.debug('skip analysis, found not supported arch')
return None
else:
logging.debug('analysis, on default arch')
return continuation(opts)
# To have good results from static analyzer certain compiler options shall be
# omitted. The compiler flag filtering only affects the static analyzer run.
#
# Keys are the option name, value number of options to skip
IGNORED_FLAGS = {
'-c': 0, # compile option will be overwritten
'-fsyntax-only': 0, # static analyzer option will be overwritten
'-o': 1, # will set up own output file
# flags below are inherited from the perl implementation.
'-g': 0,
'-save-temps': 0,
'-install_name': 1,
'-exported_symbols_list': 1,
'-current_version': 1,
'-compatibility_version': 1,
'-init': 1,
'-e': 1,
'-seg1addr': 1,
'-bundle_loader': 1,
'-multiply_defined': 1,
'-sectorder': 3,
'--param': 1,
'--serialize-diagnostics': 1
}
def classify_parameters(command):
""" Prepare compiler flags (filters some and add others) and take out
language (-x) and architecture (-arch) flags for future processing. """
result = {
'flags': [], # the filtered compiler flags
'arch_list': [], # list of architecture flags
'language': None, # compilation language, None, if not specified
'compiler': compiler_language(command) # 'c' or 'c++'
}
# iterate on the compile options
args = iter(command[1:])
for arg in args:
# take arch flags into a separate basket
if arg == '-arch':
result['arch_list'].append(next(args))
# take language
elif arg == '-x':
result['language'] = next(args)
# parameters which looks source file are not flags
elif re.match(r'^[^-].+', arg) and classify_source(arg):
pass
# ignore some flags
elif arg in IGNORED_FLAGS:
count = IGNORED_FLAGS[arg]
for _ in range(count):
next(args)
# we don't care about extra warnings, but we should suppress ones
# that we don't want to see.
elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg):
pass
# and consider everything else as compilation flag.
else:
result['flags'].append(arg)
return result
| [] | [] | [
"ANALYZE_BUILD_REPORT_FORMAT",
"ANALYZE_BUILD_CLANG",
"ANALYZE_BUILD_CTU",
"ANALYZE_BUILD_REPORT_DIR",
"ANALYZE_BUILD_FORCE_DEBUG",
"ANALYZE_BUILD_REPORT_FAILURES",
"ANALYZE_BUILD_PARAMETERS"
] | [] | ["ANALYZE_BUILD_REPORT_FORMAT", "ANALYZE_BUILD_CLANG", "ANALYZE_BUILD_CTU", "ANALYZE_BUILD_REPORT_DIR", "ANALYZE_BUILD_FORCE_DEBUG", "ANALYZE_BUILD_REPORT_FAILURES", "ANALYZE_BUILD_PARAMETERS"] | python | 7 | 0 | |
cmd/internal/shell/zsh.go | package shell
import (
"io/ioutil"
"os"
"os/exec"
"path/filepath"
)
type zsh struct {
sh string
}
func (z zsh) Command(subcommands []string, rundir string) (*exec.Cmd, error) {
// Generate alias executables so things like `xargs` work.
if err := writeAliases(subcommands, rundir); err != nil {
return nil, err
}
// Generate and invoke custom .zshenv and .zshrc files.
// - .zshenv will load ~/.zshenv (if ~/.washenv is absent), then alias subcommands,
// then load ~/.washenv (if present).
// - .zshrc will load ~/.zshrc (if ~/.washrc is absent), then configure the prompt,
// then load ~/.washrc (if present).
cmd := exec.Command(z.sh)
// Override ZDOTDIR so zsh looks for our configs, but save the original ZDOTDIR so we can use it
// when loading zsh-specific config that may rely on it being set.
cmd.Env = append(os.Environ(), "ZDOTDIR="+rundir)
zdotdir := os.Getenv("ZDOTDIR")
var common string
for _, alias := range subcommands {
common += "alias " + alias + "='WASH_EMBEDDED=1 wash " + alias + "'\n"
}
content := `if [[ ! -s ~/.washenv ]]; then
# Reset ZDOTDIR for zsh config, then set it back so we load Wash's zshrc
ZDOTDIR='` + zdotdir + `'
if [[ -s "${ZDOTDIR:-$HOME}/.zshenv" ]]; then
source "${ZDOTDIR:-$HOME}/.zshenv"
fi
ZDOTDIR='` + rundir + `'
fi
`
content += common
content += "if [[ -s ~/.washenv ]]; then source ~/.washenv; fi\n"
if err := ioutil.WriteFile(filepath.Join(rundir, ".zshenv"), []byte(content), 0644); err != nil {
return nil, err
}
content = `if [[ ! -s ~/.washrc ]]; then
ZDOTDIR='` + zdotdir + `'
if [[ -s "${ZDOTDIR:-$HOME}/.zprofile" ]]; then source "${ZDOTDIR:-$HOME}/.zprofile"; fi
if [[ -s "${ZDOTDIR:-$HOME}/.zshrc" ]]; then source "${ZDOTDIR:-$HOME}/.zshrc"; fi
fi
`
// Re-add aliases in case .zprofile or .zshrc overrode them.
content += common
// Configure prompt and override `cd`
content += preparePrompt("%F{cyan}", "%F{green}", "%f", "PROMPT") + `
autoload -Uz add-zsh-hook
add-zsh-hook precmd prompter
` + overrideCd() + `
if [[ -s ~/.washrc ]]; then source ~/.washrc; fi
`
if err := ioutil.WriteFile(filepath.Join(rundir, ".zshrc"), []byte(content), 0644); err != nil {
return nil, err
}
return cmd, nil
}
| [
"\"ZDOTDIR\""
] | [] | [
"ZDOTDIR"
] | [] | ["ZDOTDIR"] | go | 1 | 0 | |
aiven/resource_service_test.go | package aiven
import (
"fmt"
"os"
"reflect"
"sort"
"testing"
"github.com/aiven/aiven-go-client"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
func init() {
resource.AddTestSweepers("aiven_service", &resource.Sweeper{
Name: "aiven_service",
F: sweepServices,
Dependencies: []string{
"aiven_database",
"aiven_kafka_topic",
"aiven_kafka_schema",
"aiven_kafka_connector",
"aiven_connection_pool",
},
})
}
func sweepServices(region string) error {
client, err := sharedClient(region)
if err != nil {
return fmt.Errorf("error getting client: %s", err)
}
conn := client.(*aiven.Client)
projects, err := conn.Projects.List()
if err != nil {
return fmt.Errorf("error retrieving a list of projects : %s", err)
}
for _, project := range projects {
if project.Name == os.Getenv("AIVEN_PROJECT_NAME") {
services, err := conn.Services.List(project.Name)
if err != nil {
return fmt.Errorf("error retrieving a list of services for a project `%s`: %s", project.Name, err)
}
for _, service := range services {
// if service termination_protection is on service cannot be deleted
// update service and turn termination_protection off
if service.TerminationProtection == true {
_, err := conn.Services.Update(project.Name, service.Name, aiven.UpdateServiceRequest{
Cloud: service.CloudName,
MaintenanceWindow: &service.MaintenanceWindow,
Plan: service.Plan,
ProjectVPCID: service.ProjectVPCID,
Powered: true,
TerminationProtection: false,
UserConfig: service.UserConfig,
})
if err != nil {
return fmt.Errorf("error destroying service %s during sweep, disabling `termination_protection`: %s", service.Name, err)
}
}
if err := conn.Services.Delete(project.Name, service.Name); err != nil {
if err.(aiven.Error).Status != 404 {
return fmt.Errorf("error destroying service %s during sweep: %s", service.Name, err)
}
}
}
}
}
return nil
}
func testAccCheckAivenServiceCommonAttributes(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
r := s.RootModule().Resources[n]
a := r.Primary.Attributes
if a["cloud_name"] == "" {
return fmt.Errorf("expected to get a cloud_name from Aiven")
}
if a["service_name"] == "" {
return fmt.Errorf("expected to get a service name from Aiven")
}
if a["project"] == "" {
return fmt.Errorf("expected to get a project name from Aiven")
}
if a["plan"] == "" {
return fmt.Errorf("expected to get a plan from Aiven")
}
if a["service_uri"] == "" {
return fmt.Errorf("expected to get a service_uri from Aiven")
}
if a["maintenance_window_dow"] != "monday" {
return fmt.Errorf("expected to get a service.maintenance_window_dow from Aiven")
}
// Kafka service has no username and password
if a["service_type"] != "kafka" {
if a["service_password"] == "" {
return fmt.Errorf("expected to get a service_password from Aiven")
}
if a["service_username"] == "" {
return fmt.Errorf("expected to get a service_username from Aiven")
}
}
if a["service_port"] == "" {
return fmt.Errorf("expected to get a service_port from Aiven")
}
if a["service_host"] == "" {
return fmt.Errorf("expected to get a service_host from Aiven")
}
if a["service_type"] == "" {
return fmt.Errorf("expected to get a service_type from Aiven")
}
if a["service_name"] == "" {
return fmt.Errorf("expected to get a service_name from Aiven")
}
if a["state"] != "RUNNING" {
return fmt.Errorf("expected to get a correct state from Aiven")
}
if a["maintenance_window_time"] != "10:00:00" {
return fmt.Errorf("expected to get a service.maintenance_window_time from Aiven")
}
if a["termination_protection"] == "" {
return fmt.Errorf("expected to get a termination_protection from Aiven")
}
return nil
}
}
func testAccCheckAivenServiceResourceDestroy(s *terraform.State) error {
c := testAccProvider.Meta().(*aiven.Client)
// loop through the resources in state, verifying each service is destroyed
for _, rs := range s.RootModule().Resources {
var r []string
for _, t := range availableServiceTypes() {
r = append(r, fmt.Sprintf("aiven_%s", t))
}
if sort.SearchStrings(r, rs.Type) > 0 {
continue
}
projectName, serviceName := splitResourceID2(rs.Primary.ID)
p, err := c.Services.Get(projectName, serviceName)
if err != nil {
if err.(aiven.Error).Status != 404 {
return err
}
}
if p != nil {
return fmt.Errorf("service (%s) still exists", rs.Primary.ID)
}
}
return nil
}
func Test_flattenServiceComponents(t *testing.T) {
type args struct {
r *aiven.Service
}
tests := []struct {
name string
args args
want []map[string]interface{}
}{
{
"",
args{r: &aiven.Service{
Components: []*aiven.ServiceComponents{
{
Component: "grafana",
Host: "aive-public-grafana.aiven.io",
Port: 433,
Route: "public",
Usage: "primary",
},
},
}},
[]map[string]interface{}{
{
"component": "grafana",
"host": "aive-public-grafana.aiven.io",
"port": 433,
"route": "public",
"usage": "primary",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := flattenServiceComponents(tt.args.r); !reflect.DeepEqual(got, tt.want) {
t.Errorf("flattenServiceComponents() = %v, want %v", got, tt.want)
}
})
}
}
| [
"\"AIVEN_PROJECT_NAME\""
] | [] | [
"AIVEN_PROJECT_NAME"
] | [] | ["AIVEN_PROJECT_NAME"] | go | 1 | 0 | |
provider/postgis/util_internal_test.go | package postgis
import (
"context"
"os"
"testing"
"github.com/jackc/pgx"
"github.com/go-spatial/tegola"
"github.com/go-spatial/tegola/provider"
"github.com/go-spatial/tegola/internal/ttools"
)
func TestReplaceTokens(t *testing.T) {
type tcase struct {
sql string
srid uint64
tile provider.Tile
expected string
}
fn := func(tc tcase) func(t *testing.T) {
return func(t *testing.T) {
sql, err := replaceTokens(tc.sql, tc.srid, tc.tile)
if err != nil {
t.Errorf("unexpected error, Expected nil Got %v", err)
return
}
if sql != tc.expected {
t.Errorf("incorrect sql,\n Expected \n \t%v\n Got \n \t%v", tc.expected, sql)
return
}
}
}
tests := map[string]tcase{
"replace BBOX": {
sql: "SELECT * FROM foo WHERE geom && !BBOX!",
srid: tegola.WebMercator,
tile: provider.NewTile(2, 1, 1, 64, tegola.WebMercator),
expected: "SELECT * FROM foo WHERE geom && ST_MakeEnvelope(-1.017529720390625e+07,-156543.03390625,156543.03390625,1.017529720390625e+07,3857)",
},
"replace BBOX with != in query": {
sql: "SELECT * FROM foo WHERE geom && !BBOX! AND bar != 42",
srid: tegola.WebMercator,
tile: provider.NewTile(2, 1, 1, 64, tegola.WebMercator),
expected: "SELECT * FROM foo WHERE geom && ST_MakeEnvelope(-1.017529720390625e+07,-156543.03390625,156543.03390625,1.017529720390625e+07,3857) AND bar != 42",
},
"replace BBOX and ZOOM 1": {
sql: "SELECT id, scalerank=!ZOOM! FROM foo WHERE geom && !BBOX!",
srid: tegola.WebMercator,
tile: provider.NewTile(2, 1, 1, 64, tegola.WebMercator),
expected: "SELECT id, scalerank=2 FROM foo WHERE geom && ST_MakeEnvelope(-1.017529720390625e+07,-156543.03390625,156543.03390625,1.017529720390625e+07,3857)",
},
"replace BBOX and ZOOM 2": {
sql: "SELECT id, scalerank=!ZOOM! FROM foo WHERE geom && !BBOX!",
srid: tegola.WebMercator,
tile: provider.NewTile(16, 11241, 26168, 64, tegola.WebMercator),
expected: "SELECT id, scalerank=16 FROM foo WHERE geom && ST_MakeEnvelope(-1.3163688815956049e+07,4.0352540420407765e+06,-1.3163058210472783e+07,4.035884647524042e+06,3857)",
},
"replace pixel_width/height and scale_denominator": {
sql: "SELECT id, !pixel_width! as width, !pixel_height! as height, !scale_denominator! as scale_denom FROM foo WHERE geom && !BBOX!",
srid: tegola.WebMercator,
tile: provider.NewTile(11, 1070, 676, 64, tegola.WebMercator),
expected: "SELECT id, 76.43702827453671 as width, 76.43702827453671 as height, 272989.38669477403 as scale_denom FROM foo WHERE geom && ST_MakeEnvelope(899816.6968478388,6.789748347570495e+06,919996.0723123164,6.809927723034973e+06,3857)",
},
}
for name, tc := range tests {
t.Run(name, fn(tc))
}
}
func TestUppercaseTokens(t *testing.T) {
type tcase struct {
str string
expected string
}
fn := func(tc tcase) func(t *testing.T) {
return func(t *testing.T) {
out := uppercaseTokens(tc.str)
if out != tc.expected {
t.Errorf("expected \n \t%v\n out \n \t%v", tc.expected, out)
return
}
}
}
tests := map[string]tcase{
"uppercase tokens": {
str: "this !lower! case !STrInG! should uppercase !TOKENS!",
expected: "this !LOWER! case !STRING! should uppercase !TOKENS!",
},
"no tokens": {
str: "no token",
expected: "no token",
},
"empty string": {
str: "",
expected: "",
},
"unclosed token": {
str: "unclosed !token",
expected: "unclosed !token",
},
}
for name, tc := range tests {
t.Run(name, fn(tc))
}
}
func TestDecipherFields(t *testing.T) {
ttools.ShouldSkip(t, TESTENV)
type tcase struct {
sql string
expectedRowCount int
expectedTags map[string]interface{}
}
cc := pgx.ConnConfig{
Host: os.Getenv("PGHOST"),
Port: 5432,
Database: os.Getenv("PGDATABASE"),
User: os.Getenv("PGUSER"),
Password: os.Getenv("PGPASSWORD"),
}
conn, err := pgx.Connect(cc)
if err != nil {
t.Fatalf("unable to connect to database: %v", err)
}
defer conn.Close()
fn := func(tc tcase) func(t *testing.T) {
return func(t *testing.T) {
rows, err := conn.Query(tc.sql)
defer rows.Close()
if err != nil {
t.Errorf("Error performing query: %v", err)
return
}
var rowCount int
for rows.Next() {
geoFieldname := "geom"
idFieldname := "id"
descriptions := rows.FieldDescriptions()
vals, err := rows.Values()
if err != nil {
t.Errorf("unexepcted error reading row Values: %v", err)
return
}
_, _, tags, err := decipherFields(context.TODO(), geoFieldname, idFieldname, descriptions, vals)
if err != nil {
t.Errorf("unexepcted error running decipherFileds: %v", err)
return
}
if len(tags) != len(tc.expectedTags) {
t.Errorf("got %v tags, expecting %v: %#v, %#v", len(tags), len(tc.expectedTags), tags, tc.expectedTags)
return
}
for k, v := range tags {
if tc.expectedTags[k] != v {
t.Errorf("missing or bad value for tag %v: %v (%T) != %v (%T)", k, v, v, tc.expectedTags[k], tc.expectedTags[k])
return
}
}
rowCount++
}
if rows.Err() != nil {
t.Errorf("unexpected err: %v", rows.Err())
return
}
if rowCount != tc.expectedRowCount {
t.Errorf("invalid row count. expected %v. got %v", tc.expectedRowCount, rowCount)
return
}
}
}
tests := map[string]tcase{
"hstore 1": {
sql: "SELECT id, tags, int8_test FROM hstore_test WHERE id = 1;",
expectedRowCount: 1,
expectedTags: map[string]interface{}{
"height": "9",
"int8_test": int64(1000888),
},
},
"hstore 2": {
sql: "SELECT id, tags, int8_test FROM hstore_test WHERE id = 2;",
expectedRowCount: 1,
expectedTags: map[string]interface{}{
"hello": "there",
"good": "day",
"int8_test": int64(8880001),
},
},
}
for name, tc := range tests {
t.Run(name, fn(tc))
}
}
| [
"\"PGHOST\"",
"\"PGDATABASE\"",
"\"PGUSER\"",
"\"PGPASSWORD\""
] | [] | [
"PGHOST",
"PGUSER",
"PGPASSWORD",
"PGDATABASE"
] | [] | ["PGHOST", "PGUSER", "PGPASSWORD", "PGDATABASE"] | go | 4 | 0 | |
datalad/core/local/tests/test_save.py | # -*- coding: utf-8 -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test save command"""
import os
import os.path as op
from six import iteritems
from six import text_type
from datalad.utils import (
on_windows,
assure_list,
rmtree,
)
from datalad.tests.utils import (
assert_status,
assert_result_count,
assert_in,
assert_in_results,
assert_not_in,
assert_raises,
create_tree,
with_tempfile,
with_tree,
with_testrepos,
eq_,
ok_,
chpwd,
known_failure_windows,
OBSCURE_FILENAME,
SkipTest,
)
from datalad.distribution.tests.test_add import tree_arg
import datalad.utils as ut
from datalad.distribution.dataset import Dataset
from datalad.support.annexrepo import AnnexRepo
from datalad.support.exceptions import CommandError
from datalad.api import (
save,
create,
install,
)
from datalad.tests.utils import (
assert_repo_status,
skip_wo_symlink_capability,
)
@with_testrepos('.*git.*', flavors=['clone'])
def test_save(path):
ds = Dataset(path)
with open(op.join(path, "new_file.tst"), "w") as f:
f.write("something")
ds.repo.add("new_file.tst", git=True)
ok_(ds.repo.dirty)
ds.save(message="add a new file")
assert_repo_status(path, annex=isinstance(ds.repo, AnnexRepo))
with open(op.join(path, "new_file.tst"), "w") as f:
f.write("modify")
ok_(ds.repo.dirty)
ds.save(message="modified new_file.tst")
assert_repo_status(path, annex=isinstance(ds.repo, AnnexRepo))
# save works without ds and files given in the PWD
with open(op.join(path, "new_file.tst"), "w") as f:
f.write("rapunzel")
with chpwd(path):
save(message="love rapunzel")
assert_repo_status(path, annex=isinstance(ds.repo, AnnexRepo))
# and also without `-a` when things are staged
with open(op.join(path, "new_file.tst"), "w") as f:
f.write("exotic")
ds.repo.add("new_file.tst", git=True)
with chpwd(path):
save(message="love marsians")
assert_repo_status(path, annex=isinstance(ds.repo, AnnexRepo))
files = ['one.txt', 'two.txt']
for fn in files:
with open(op.join(path, fn), "w") as f:
f.write(fn)
ds.save([op.join(path, f) for f in files])
# superfluous call to save (alll saved it already), should not fail
# but report that nothing was saved
assert_status('notneeded', ds.save(message="set of new files"))
assert_repo_status(path, annex=isinstance(ds.repo, AnnexRepo))
# create subdataset
subds = ds.create('subds')
assert_repo_status(path, annex=isinstance(ds.repo, AnnexRepo))
# modify subds
with open(op.join(subds.path, "some_file.tst"), "w") as f:
f.write("something")
subds.save()
assert_repo_status(subds.path, annex=isinstance(subds.repo, AnnexRepo))
# ensure modified subds is committed
ds.save()
assert_repo_status(path, annex=isinstance(ds.repo, AnnexRepo))
# now introduce a change downstairs
subds.create('someotherds')
assert_repo_status(subds.path, annex=isinstance(subds.repo, AnnexRepo))
ok_(ds.repo.dirty)
# and save via subdataset path
ds.save('subds', version_tag='new_sub')
assert_repo_status(path, annex=isinstance(ds.repo, AnnexRepo))
tags = ds.repo.get_tags()
ok_(len(tags) == 1)
eq_(tags[0], dict(hexsha=ds.repo.get_hexsha(), name='new_sub'))
# fails when retagged, like git does
res = ds.save(version_tag='new_sub', on_failure='ignore')
assert_status('error', res)
assert_result_count(
res, 1,
action='save', type='dataset', path=ds.path,
message=('cannot tag this version: %s',
"fatal: tag 'new_sub' already exists"))
@with_tempfile()
def test_save_message_file(path):
ds = Dataset(path).create()
with assert_raises(ValueError):
ds.save("blah", message="me", message_file="and me")
create_tree(path, {"foo": "x",
"msg": "add foo"})
ds.repo.add("foo")
ds.save(message_file=op.join(ds.path, "msg"))
eq_(ds.repo.repo.git.show("--format=%s", "--no-patch"),
"add foo")
def test_renamed_file():
@with_tempfile()
def check_renamed_file(recursive, no_annex, path):
ds = Dataset(path).create(no_annex=no_annex)
create_tree(path, {'old': ''})
ds.repo.add('old')
ds.repo._git_custom_command(['old', 'new'], ['git', 'mv'])
ds.save(recursive=recursive)
assert_repo_status(path)
for recursive in False,: #, True TODO when implemented
for no_annex in True, False:
yield check_renamed_file, recursive, no_annex
@with_tempfile(mkdir=True)
def test_subdataset_save(path):
parent = Dataset(path).create()
sub = parent.create('sub')
assert_repo_status(parent.path)
create_tree(parent.path, {
"untracked": 'ignore',
'sub': {
"new": "wanted"}})
sub.save('new')
# defined state: one untracked, modified (but clean in itself) subdataset
assert_repo_status(sub.path)
assert_repo_status(parent.path, untracked=['untracked'], modified=['sub'])
# `save sub` does not save the parent!!
with chpwd(parent.path):
assert_status('notneeded', save(dataset=sub.path))
assert_repo_status(parent.path, untracked=['untracked'], modified=['sub'])
# `save -u .` saves the state change in the subdataset,
# but leaves any untracked content alone
with chpwd(parent.path):
assert_status('ok', parent.save(updated=True))
assert_repo_status(parent.path, untracked=['untracked'])
# get back to the original modified state and check that -S behaves in
# exactly the same way
create_tree(parent.path, {
'sub': {
"new2": "wanted2"}})
sub.save('new2')
assert_repo_status(parent.path, untracked=['untracked'], modified=['sub'])
@skip_wo_symlink_capability
@with_tempfile(mkdir=True)
def test_symlinked_relpath(path):
# initially ran into on OSX https://github.com/datalad/datalad/issues/2406
os.makedirs(op.join(path, "origin"))
dspath = op.join(path, "linked")
os.symlink('origin', dspath)
ds = Dataset(dspath).create()
create_tree(dspath, {
"mike1": 'mike1', # will be added from topdir
"later": "later", # later from within subdir
"d": {
"mike2": 'mike2', # to be added within subdir
}
})
# in the root of ds
with chpwd(dspath):
ds.repo.add("mike1", git=True)
ds.save(message="committing", path="./mike1")
# Let's also do in subdirectory as CWD, check that relative path
# given to a plain command (not dataset method) are treated as
# relative to CWD
with chpwd(op.join(dspath, 'd')):
save(dataset=ds.path,
message="committing",
path="mike2")
later = op.join(op.pardir, "later")
ds.repo.add(later, git=True)
save(dataset=ds.path, message="committing", path=later)
assert_repo_status(dspath)
@skip_wo_symlink_capability
@with_tempfile(mkdir=True)
def test_bf1886(path):
parent = Dataset(path).create()
parent.create('sub')
assert_repo_status(parent.path)
# create a symlink pointing down to the subdataset, and add it
os.symlink('sub', op.join(parent.path, 'down'))
parent.save('down')
assert_repo_status(parent.path)
# now symlink pointing up
os.makedirs(op.join(parent.path, 'subdir', 'subsubdir'))
os.symlink(op.join(op.pardir, 'sub'), op.join(parent.path, 'subdir', 'up'))
parent.save(op.join('subdir', 'up'))
# 'all' to avoid the empty dir being listed
assert_repo_status(parent.path, untracked_mode='all')
# now symlink pointing 2xup, as in #1886
os.symlink(
op.join(op.pardir, op.pardir, 'sub'),
op.join(parent.path, 'subdir', 'subsubdir', 'upup'))
parent.save(op.join('subdir', 'subsubdir', 'upup'))
assert_repo_status(parent.path)
# simulatenously add a subds and a symlink pointing to it
# create subds, but don't register it
create(op.join(parent.path, 'sub2'))
os.symlink(
op.join(op.pardir, op.pardir, 'sub2'),
op.join(parent.path, 'subdir', 'subsubdir', 'upup2'))
parent.save(['sub2', op.join('subdir', 'subsubdir', 'upup2')])
assert_repo_status(parent.path)
# full replication of #1886: the above but be in subdir of symlink
# with no reference dataset
create(op.join(parent.path, 'sub3'))
os.symlink(
op.join(op.pardir, op.pardir, 'sub3'),
op.join(parent.path, 'subdir', 'subsubdir', 'upup3'))
# need to use absolute paths
with chpwd(op.join(parent.path, 'subdir', 'subsubdir')):
save([op.join(parent.path, 'sub3'),
op.join(parent.path, 'subdir', 'subsubdir', 'upup3')])
assert_repo_status(parent.path)
@with_tree({
'1': '',
'2': '',
'3': ''})
def test_gh2043p1(path):
# this tests documents the interim agreement on what should happen
# in the case documented in gh-2043
ds = Dataset(path).create(force=True)
ds.save('1')
assert_repo_status(ds.path, untracked=['2', '3'])
ds.unlock('1')
assert_repo_status(
ds.path,
# on windows we are in an unlocked branch by default, hence
# we would see no change
modified=[] if on_windows else ['1'],
untracked=['2', '3'])
# save(.) should recommit unlocked file, and not touch anything else
# this tests the second issue in #2043
with chpwd(path):
# only save modified bits
save(path='.', updated=True)
# state of the file (unlocked/locked) is committed as well, and the
# test doesn't lock the file again
assert_repo_status(ds.path, untracked=['2', '3'])
with chpwd(path):
# but when a path is given, anything that matches this path
# untracked or not is added/saved
save(path='.')
# state of the file (unlocked/locked) is committed as well, and the
# test doesn't lock the file again
assert_repo_status(ds.path)
@with_tree({
'staged': 'staged',
'untracked': 'untracked'})
def test_bf2043p2(path):
ds = Dataset(path).create(force=True)
ds.repo.add('staged')
assert_repo_status(ds.path, added=['staged'], untracked=['untracked'])
# save -u does not commit untracked content
# this tests the second issue in #2043
with chpwd(path):
save(updated=True)
assert_repo_status(ds.path, untracked=['untracked'])
@with_tree({
OBSCURE_FILENAME + u'_staged': 'staged',
OBSCURE_FILENAME + u'_untracked': 'untracked'})
def test_encoding(path):
staged = OBSCURE_FILENAME + u'_staged'
untracked = OBSCURE_FILENAME + u'_untracked'
ds = Dataset(path).create(force=True)
ds.repo.add(staged)
assert_repo_status(ds.path, added=[staged], untracked=[untracked])
ds.save(updated=True)
assert_repo_status(ds.path, untracked=[untracked])
@with_tree(**tree_arg)
def test_add_files(path):
ds = Dataset(path).create(force=True)
test_list_1 = ['test_annex.txt']
test_list_2 = ['test.txt']
test_list_3 = ['test1.dat', 'test2.dat']
test_list_4 = [op.join('dir', 'testindir'),
op.join('dir', OBSCURE_FILENAME)]
for arg in [(test_list_1[0], False),
(test_list_2[0], True),
(test_list_3, False),
(test_list_4, False)]:
# special case 4: give the dir:
if arg[0] == test_list_4:
result = ds.save('dir', to_git=arg[1])
status = ds.repo.annexstatus(['dir'])
else:
result = ds.save(arg[0], to_git=arg[1])
for a in assure_list(arg[0]):
assert_result_count(result, 1, path=text_type(ds.pathobj / a))
status = ds.repo.get_content_annexinfo(
ut.Path(p) for p in assure_list(arg[0]))
for f, p in iteritems(status):
if arg[1]:
assert p.get('key', None) is None, f
else:
assert p.get('key', None) is not None, f
@with_tree(**tree_arg)
@with_tempfile(mkdir=True)
def test_add_subdataset(path, other):
subds = create(op.join(path, 'dir'), force=True)
ds = create(path, force=True)
ok_(subds.repo.dirty)
ok_(ds.repo.dirty)
assert_not_in('dir', ds.subdatasets(result_xfm='relpaths'))
# "add everything in subds to subds"
save(dataset=subds.path)
assert_repo_status(subds.path)
assert_not_in('dir', ds.subdatasets(result_xfm='relpaths'))
# but with a base directory we add the dataset subds as a subdataset
# to ds
res = ds.save(subds.path)
assert_in_results(res, action="add", path=subds.path, refds=ds.path)
assert_in('dir', ds.subdatasets(result_xfm='relpaths'))
# create another one
other = create(other)
# install into superdataset, but don't add
other_clone = install(source=other.path, path=op.join(ds.path, 'other'))
# little dance to get the revolution-type dataset
other_clone = Dataset(other_clone.path)
ok_(other_clone.is_installed)
assert_not_in('other', ds.subdatasets(result_xfm='relpaths'))
# now add, it should pick up the source URL
ds.save('other')
# and that is why, we can reobtain it from origin
ds.uninstall('other')
ok_(not other_clone.is_installed())
ds.get('other')
ok_(other_clone.is_installed())
# CommandError: command '['git', '-c', 'receive.autogc=0', '-c', 'gc.auto=0', 'annex', 'add', '--json', '--', 'empty', 'file.txt']' failed with exitcode 1
# Failed to run ['git', '-c', 'receive.autogc=0', '-c', 'gc.auto=0', 'annex', 'add', '--json', '--', 'empty', 'file.txt'] under 'C:\\Users\\appveyor\\AppData\\Local\\Temp\\1\\datalad_temp_tree_j2mk92y3'. Exit code=1.
@known_failure_windows
@with_tree(tree={
'file.txt': 'some text',
'empty': '',
'file2.txt': 'some text to go to annex',
'.gitattributes': '* annex.largefiles=(not(mimetype=text/*))'}
)
def test_add_mimetypes(path):
ds = Dataset(path).create(force=True)
ds.repo.add('.gitattributes')
ds.repo.commit('added attributes to git explicitly')
# now test that those files will go into git/annex correspondingly
# WINDOWS FAILURE NEXT
__not_tested__ = ds.save(['file.txt', 'empty'])
assert_repo_status(path, untracked=['file2.txt'])
# But we should be able to force adding file to annex when desired
ds.save('file2.txt', to_git=False)
# check annex file status
annexinfo = ds.repo.get_content_annexinfo()
for path, in_annex in (
# Empty one considered to be application/octet-stream
# i.e. non-text
('empty', True),
('file.txt', False),
('file2.txt', True)):
# low-level API report -> repo path reference, no ds path
p = ds.repo.pathobj / path
assert_in(p, annexinfo)
if in_annex:
assert_in('key', annexinfo[p], p)
else:
assert_not_in('key', annexinfo[p], p)
@with_tempfile(mkdir=True)
def test_gh1597(path):
if 'APPVEYOR' in os.environ:
# issue only happens on appveyor, Python itself implodes
# cannot be reproduced on a real windows box
raise SkipTest(
'this test causes appveyor to crash, reason unknown')
ds = Dataset(path).create()
sub = ds.create('sub')
res = ds.subdatasets()
assert_result_count(res, 1, path=sub.path)
# now modify .gitmodules with another command
ds.subdatasets(contains=sub.path, set_property=[('this', 'that')])
# now modify low-level
with open(op.join(ds.path, '.gitmodules'), 'a') as f:
f.write('\n')
assert_repo_status(ds.path, modified=['.gitmodules'])
ds.save('.gitmodules')
# must not come under annex mangement
assert_not_in(
'key',
ds.repo.annexstatus(paths=['.gitmodules']).popitem()[1])
@with_tempfile(mkdir=True)
def test_gh1597_simpler(path):
ds = Dataset(path).create()
# same goes for .gitattributes
with open(op.join(ds.path, '.gitignore'), 'a') as f:
f.write('*.swp\n')
ds.save('.gitignore')
assert_repo_status(ds.path)
# put .gitattributes in some subdir and add all, should also go into Git
attrfile = op.join ('subdir', '.gitattributes')
ds.repo.set_gitattributes(
[('*', dict(mycustomthing='this'))],
attrfile)
assert_repo_status(ds.path, untracked=[attrfile], untracked_mode='all')
ds.save()
assert_repo_status(ds.path)
# no annex key, not in annex
assert_not_in(
'key',
ds.repo.get_content_annexinfo([ut.Path(attrfile)]).popitem()[1])
@with_tempfile(mkdir=True)
def test_update_known_submodule(path):
def get_baseline(p):
ds = Dataset(p).create()
sub = create(text_type(ds.pathobj / 'sub'))
assert_repo_status(ds.path, untracked=['sub'])
return ds
# attempt one
ds = get_baseline(op.join(path, 'wo_ref'))
with chpwd(ds.path):
save(recursive=True)
assert_repo_status(ds.path)
# attempt two, same as above but call add via reference dataset
ds = get_baseline(op.join(path, 'w_ref'))
ds.save(recursive=True)
assert_repo_status(ds.path)
@with_tempfile(mkdir=True)
def test_add_recursive(path):
# make simple hierarchy
parent = Dataset(path).create()
assert_repo_status(parent.path)
sub1 = parent.create(op.join('down', 'sub1'))
assert_repo_status(parent.path)
sub2 = parent.create('sub2')
# next one make the parent dirty
subsub = sub2.create('subsub')
assert_repo_status(parent.path, modified=['sub2'])
res = parent.save()
assert_repo_status(parent.path)
# now add content deep in the hierarchy
create_tree(subsub.path, {'new': 'empty'})
assert_repo_status(parent.path, modified=['sub2'])
# recursive add should not even touch sub1, because
# it knows that it is clean
res = parent.save(recursive=True)
# the key action is done
assert_result_count(
res, 1, path=op.join(subsub.path, 'new'), action='add', status='ok')
# saved all the way up
assert_result_count(res, 3, action='save', status='ok')
assert_repo_status(parent.path)
@with_tree(**tree_arg)
def test_relpath_add(path):
ds = Dataset(path).create(force=True)
with chpwd(op.join(path, 'dir')):
eq_(save('testindir')[0]['path'],
op.join(ds.path, 'dir', 'testindir'))
# and now add all
save('..')
# auto-save enabled
assert_repo_status(ds.path)
@skip_wo_symlink_capability
@with_tempfile()
def test_bf2541(path):
ds = create(path)
subds = ds.create('sub')
assert_repo_status(ds.path)
os.symlink('sub', op.join(ds.path, 'symlink'))
with chpwd(ds.path):
res = save(recursive=True)
assert_repo_status(ds.path)
@with_tempfile()
def test_remove_subds(path):
ds = create(path)
ds.create('sub')
ds.create(op.join('sub', 'subsub'))
assert_repo_status(ds.path)
assert_result_count(
ds.subdatasets(), 1,
path=op.join(ds.path, 'sub'))
# all good at this point, subdataset known, dataset clean
# now have some external force wipe out the subdatasets
rmtree(op.join(ds.path, 'sub'))
assert_result_count(
ds.status(), 1,
path=op.join(ds.path, 'sub'),
state='deleted')
# a single call to save() must fix up the mess
assert_status('ok', ds.save())
assert_repo_status(ds.path)
@with_tempfile()
def test_partial_unlocked(path):
# https://github.com/datalad/datalad/issues/1651
ds = create(path)
(ds.pathobj / 'normal.txt').write_text(u'123')
ds.save()
assert_repo_status(ds.path)
ds.unlock('normal.txt')
ds.save()
# mixed git and git-annex'ed files
(ds.pathobj / 'ingit.txt').write_text(u'234')
ds.save(to_git=True)
(ds.pathobj / 'culprit.txt').write_text(u'345')
(ds.pathobj / 'ingit.txt').write_text(u'modified')
ds.save()
assert_repo_status(ds.path)
# but now a change in the attributes
ds.unlock('culprit.txt')
ds.repo.set_gitattributes([
('*', {'annex.largefiles': 'nothing'})])
ds.save()
assert_repo_status(ds.path)
@with_tree({'.gitattributes': "* annex.largefiles=(largerthan=4b)",
"foo": "in annex"})
def test_save_partial_commit_shrinking_annex(path):
# This is a variation on the test above. The main difference is that there
# are other staged changes in addition to the unlocked filed.
ds = create(path, force=True)
ds.save()
assert_repo_status(ds.path)
ds.unlock(path="foo")
create_tree(ds.path, tree={"foo": "a", "staged": ""},
remove_existing=True)
# Even without this staged change, a plain 'git commit -- foo' would fail
# with git-annex's partial index error, but rev-save (or more specifically
# GitRepo.save_) drops the pathspec if there are no staged changes.
ds.repo.add("staged", git=True)
if ds.repo.supports_unlocked_pointers:
ds.save(path="foo")
assert_repo_status(ds.path, added=["staged"])
else:
# Unlike the obsolete interface.save, save doesn't handle a partial
# commit if there were other staged changes.
with assert_raises(CommandError) as cm:
ds.save(path="foo")
assert_in("partial commit", str(cm.exception))
@with_tempfile()
def test_path_arg_call(path):
ds = create(path)
for testfile in (
ds.pathobj / 'abs.txt',
ds.pathobj / 'rel.txt'):
testfile.write_text(u'123')
# we used to resolve relative paths against a dataset just given by
# a path, but we no longer do that
#save(dataset=ds.path, path=[testfile.name], to_git=True)
save(dataset=ds, path=[testfile.name], to_git=True)
@with_tree(tree={
'file.txt': 'some text',
'd1': {
'subrepo': {
'subfile': 'more repo text',
},
},
'd2': {
'subds': {
'subfile': 'more ds text',
},
},
})
def test_surprise_subds(path):
# https://github.com/datalad/datalad/issues/3139
ds = create(path, force=True)
# a lonely repo without any commit
somerepo = AnnexRepo(path=op.join(path, 'd1', 'subrepo'), create=True)
# a proper subdataset
subds = create(op.join(path, 'd2', 'subds'), force=True)
# save non-recursive
ds.save(recursive=False)
# the content of both subds and subrepo are not added to their
# respective parent as no --recursive was given
assert_repo_status(subds.path, untracked=['subfile'])
assert_repo_status(somerepo.path, untracked=['subfile'])
# however, while the subdataset is added (and reported as modified
# because it content is still untracked) the subrepo
# cannot be added (it has no commit)
# worse: its untracked file add been added to the superdataset
# XXX the next conditional really says: if the subrepo is not in an
# adjusted branch: #datalad/3178 (that would have a commit)
if not on_windows:
assert_repo_status(ds.path, modified=['d2/subds'])
assert_in(ds.repo.pathobj / 'd1' / 'subrepo' / 'subfile',
ds.repo.get_content_info())
# with proper subdatasets, all evil is gone
assert_not_in(ds.repo.pathobj / 'd2' / 'subds' / 'subfile',
ds.repo.get_content_info())
@with_tree({"foo": ""})
def test_bf3285(path):
ds = Dataset(path).create(force=True)
# Note: Using repo.pathobj matters in the "TMPDIR=/var/tmp/sym\ link" case
# because assert_repo_status is based off of {Annex,Git}Repo.path, which is
# the realpath'd path (from the processing in _flyweight_id_from_args).
subds = create(ds.repo.pathobj.joinpath("subds"))
# Explicitly saving a path does not save an untracked, unspecified
# subdataset.
ds.save("foo")
assert_repo_status(ds.path, untracked=[subds.path])
| [] | [] | [] | [] | [] | python | 0 | 0 | |
src/com/xilinx/rapidwright/edif/EDIFNetlist.java | /*
*
* Copyright (c) 2017 Xilinx, Inc.
* All rights reserved.
*
* Author: Chris Lavin, Xilinx Research Labs.
*
* This file is part of RapidWright.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
*
*/
package com.xilinx.rapidwright.edif;
import java.io.BufferedWriter;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.regex.Pattern;
import java.util.Queue;
import com.xilinx.rapidwright.design.Design;
import com.xilinx.rapidwright.design.Net;
import com.xilinx.rapidwright.design.Unisim;
import com.xilinx.rapidwright.device.Device;
import com.xilinx.rapidwright.device.Series;
import com.xilinx.rapidwright.tests.CodePerfTracker;
import com.xilinx.rapidwright.util.FileTools;
import com.xilinx.rapidwright.util.MessageGenerator;
/**
* Top level object for a (logical) EDIF netlist.
*
* Created on: May 11, 2017
*/
public class EDIFNetlist extends EDIFName {
private Map<String, EDIFLibrary> libraries;
private EDIFDesign design;
private EDIFCellInst topCellInstance = null;
private List<String> comments;
private Map<String,EDIFPropertyValue> metax;
private Map<String,String> parentNetMap;
private Map<String, ArrayList<EDIFHierPortInst>> physicalNetPinMap;
protected int nameSpaceUniqueCount = 0;
private transient Device device;
private boolean DEBUG = false;
public EDIFNetlist(String name){
super(name);
init();
}
protected EDIFNetlist(){
init();
}
private void init(){
libraries = getNewMap();
comments = new ArrayList<>();
metax = getNewMap();
}
/**
* Adds date and username build comments such as:
* (comment "Built on 'Mon May 1 15:17:36 PDT 2017'")
* (comment "Built by 'clavin'")
*/
public void generateBuildComments(){
addComment("Built on '"+FileTools.getTimeString()+"'");
addComment("Built by '"+System.getenv().get("USER")+"'");
}
/**
* Adds the library to this netlist. Checks for naming collisions
* and throws a RuntimeException if it occurs.
* @param library The library to add.
* @return The library that was added.
*/
public EDIFLibrary addLibrary(EDIFLibrary library){
library.setNetlist(this);
EDIFLibrary collision = libraries.put(library.getName(), library);
if(collision != null){
throw new RuntimeException("ERROR: EDIFNetlist already has "
+ "library named " + library.getName() );
}
return library;
}
public EDIFLibrary getLibrary(String name){
return libraries.get(name);
}
public EDIFLibrary getHDIPrimitivesLibrary(){
EDIFLibrary primLib = libraries.get(EDIFTools.EDIF_LIBRARY_HDI_PRIMITIVES_NAME);
if(primLib == null){
primLib = addLibrary(new EDIFLibrary(EDIFTools.EDIF_LIBRARY_HDI_PRIMITIVES_NAME));
}
return primLib;
}
/**
* Will create or get the specified unisim cell and ensure it is added to the HDI
* primitives library. If the cell is already in the library, it will simply get it
* and return it.
* @param unisim The desired Unisim cell type.
* @return The current unisim cell in the HDI primitive library for this netlist.
*/
public EDIFCell getHDIPrimitive(Unisim unisim){
EDIFLibrary lib = getHDIPrimitivesLibrary();
EDIFCell cell = lib.getCell(unisim.name());
if(cell == null){
cell = Design.getUnisimCell(unisim);
}
return lib.addCell(cell);
}
public EDIFLibrary getWorkLibrary(){
EDIFLibrary primLib = libraries.get(EDIFTools.EDIF_LIBRARY_WORK_NAME);
if(primLib == null){
primLib = addLibrary(new EDIFLibrary(EDIFTools.EDIF_LIBRARY_WORK_NAME));
}
return primLib;
}
public EDIFLibrary removeLibrary(String name){
return libraries.remove(name);
}
public void renameNetlistAndTopCell(String newName){
this.setName(newName);
this.updateEDIFRename();
design.setName(newName);
design.updateEDIFRename();
design.getTopCell().setName(newName);
design.getTopCell().updateEDIFRename();
if(topCellInstance != null){
topCellInstance.setName(newName);
topCellInstance.updateEDIFRename();
}
}
public void removeUnusedCellsFromWorkLibrary(){
HashMap<String,EDIFCell> cellsToRemove = new HashMap<>(getWorkLibrary().getCellMap());
cellsToRemove.remove(getTopCell().getLegalEDIFName());
for(EDIFHierCellInst i : getAllDescendants("", null, false)){
if(i.getCellType().getLibrary().getName().equals(EDIFTools.EDIF_LIBRARY_WORK_NAME)){
cellsToRemove.remove(i.getCellType().getLegalEDIFName());
}
}
for(String name : cellsToRemove.keySet()){
getWorkLibrary().removeCell(name);
}
}
/**
* Iterates through libraries to find first cell with matching name and
* returns it.
* @param legalEdifName The legal EDIF name of the cell to find.
* @return The first occurring cell with the provided name.
*/
public EDIFCell getCell(String legalEdifName){
for(EDIFLibrary lib : getLibraries()){
EDIFCell c = lib.getCell(legalEdifName);
if(c != null) return c;
}
return null;
}
/**
* @return the design
*/
public EDIFDesign getDesign() {
return design;
}
/**
* @param design the design to set
*/
public void setDesign(EDIFDesign design) {
this.design = design;
}
public Device getDevice() {
return device;
}
public void setDevice(Device device) {
this.device = device;
}
public EDIFCell getTopCell(){
return design.getTopCell();
}
public EDIFCellInst getTopCellInst(){
if(topCellInstance == null){
topCellInstance = getTopCell().createCellInst("top", null);
}
return topCellInstance;
}
public boolean addComment(String comment){
return comments.add(comment);
}
public EDIFPropertyValue addMetax(String key, EDIFPropertyValue value){
return metax.put(key, value);
}
/**
* @return the comments
*/
public List<String> getComments() {
return comments;
}
/**
* Migrates all cells in the provided library
* into the standard work library.
* @param library The library with cells to be migrated to work.
*/
public void migrateToWorkLibrary(String library) {
EDIFLibrary work = getWorkLibrary();
EDIFLibrary oldWork = getLibrary(library);
List<EDIFCell> toRemove = new ArrayList<>(oldWork.getCells());
for (EDIFCell c : toRemove) {
work.addCell(c);
oldWork.removeCell(c);
}
removeLibrary(library);
}
/**
* Migrates all libraries except HDI primitives and work to
* the work library.
*/
public void consolidateAllToWorkLibrary() {
List<EDIFLibrary> librariesToMigrate = new ArrayList<>();
for (EDIFLibrary l : getLibraries()) {
if (!l.isHDIPrimitivesLibrary() && !l.isWorkLibrary()) {
librariesToMigrate.add(l);
}
}
for (EDIFLibrary l : librariesToMigrate) {
migrateToWorkLibrary(l.getName());
}
}
public void migrateCellAndSubCells(EDIFCell cell){
Queue<EDIFCell> cells = new LinkedList<>();
cells.add(cell);
while(!cells.isEmpty()){
EDIFCell curr = cells.poll();
EDIFLibrary destLib = getLibrary(curr.getLibrary().getName());
if(destLib == null){
if(curr.getLibrary().getName().equals(EDIFTools.EDIF_LIBRARY_HDI_PRIMITIVES_NAME)){
destLib = getHDIPrimitivesLibrary();
}else{
destLib = getWorkLibrary();
}
}
if(!destLib.containsCell(curr)){
destLib.addCell(curr);
}
for(EDIFCellInst inst : curr.getCellInsts()){
cells.add(inst.getCellType());
}
}
}
/**
* Will change the netlist name and top cell and instance name.
* @param newName New name for the netlist
*/
public void changeTopName(String newName){
this.setName(newName);
this.design.setName(newName);
EDIFCell top = this.design.getTopCell();
EDIFLibrary lib = top.getLibrary();
top.getLibrary().removeCell(top);
top.setName(newName);
lib.addCell(top);
}
/**
* @return the libraries
*/
public Map<String, EDIFLibrary> getLibrariesMap() {
return libraries;
}
public Collection<EDIFLibrary> getLibraries(){
return libraries.values();
}
public void exportEDIF(String fileName){
BufferedWriter bw = null;
//for(EDIFLibrary lib : getLibraries()){
// lib.ensureValidEDIFCellNames();
//}
try {
bw = new BufferedWriter(new FileWriter(fileName));
bw.write("(edif ");
exportEDIFName(bw);
bw.write("\n");
bw.write(" (edifversion 2 0 0)\n");
bw.write(" (edifLevel 0)\n");
bw.write(" (keywordmap (keywordlevel 0))\n");
bw.write("(status\n");
bw.write(" (written\n");
bw.write(" (timeStamp ");
SimpleDateFormat formatter = new SimpleDateFormat("yyyy MM dd HH mm ss");
bw.write(formatter.format(new java.util.Date()));
bw.write(")\n");
bw.write(" (program \""+Device.FRAMEWORK_NAME+"\" (version \"" + Device.RAPIDWRIGHT_VERSION + "\"))\n");
for(String comment : getComments()){
bw.write(" (comment \"");
bw.write(comment);
bw.write("\")\n");
}
for(Entry<String,EDIFPropertyValue> e : metax.entrySet()){
bw.write("(metax ");
bw.write(e.getKey());
bw.write(" ");
e.getValue().writeEDIFString(bw);
bw.write(")\n");
}
bw.write(" )\n");
bw.write(")\n");
getHDIPrimitivesLibrary().exportEDIF(bw);
for(EDIFLibrary lib : getLibrariesMap().values()){
if(lib.getName().equals(EDIFTools.EDIF_LIBRARY_HDI_PRIMITIVES_NAME)) continue;
lib.exportEDIF(bw);
}
bw.write("(comment \"Reference To The Cell Of Highest Level\")\n\n");
bw.write(" (design ");
EDIFDesign design = getDesign();
design.exportEDIFName(bw);
bw.write("\n (cellref " + design.getTopCell().getLegalEDIFName() + " (libraryref ");
bw.write(design.getTopCell().getLibrary().getLegalEDIFName() +"))\n");
design.exportEDIFProperties(bw, " ");
bw.write(" )\n");
bw.write(")\n");
bw.flush();
bw.close();
} catch (IOException e) {
MessageGenerator.briefError("ERROR: Failed to export EDIF file " + fileName);
e.printStackTrace();
}
}
/**
* Based on a hierarchical string, this method will get the instance corresponding
* to the name provided.
* @param name Hierarchical name of the instance, for example: 'clk_wiz/inst/bufg0'
* @return The instance corresponding to the provided name. If the name string is empty,
* it returns the top cell instance.
*/
public EDIFCellInst getCellInstFromHierName(String name){
EDIFCellInst currInst = getTopCellInst();
if(name.equals("")) return currInst;
String[] parts = name.split(EDIFTools.EDIF_HIER_SEP);
for(int i=0; i < parts.length; i++){
EDIFCellInst checkInst = currInst.getCellType().getCellInst(parts[i]);
// Someone named their instance with hierarchy separators, joy!
if(checkInst == null){
StringBuilder sb = new StringBuilder(parts[i]);
i++;
while(checkInst == null && i < parts.length){
sb.append(EDIFTools.EDIF_HIER_SEP);
sb.append(parts[i]);
checkInst = currInst.getCellType().getCellInst(sb.toString());
if(checkInst == null) i++;
}
}
currInst = checkInst;
}
return currInst;
}
/**
* Based on a hierarchical string name, this method gets and returns the net inside
* the instance.
* @param netName The hierarchical name of the net to get, for example: 'inst0/inst1/inst2/net0'
* @return The hierarchical net, or null if none could be found.
*/
public EDIFNet getNetFromHierName(String netName){
EDIFHierNet net = getHierNetFromName(netName);
return net == null ? null : net.getNet();
}
/**
* Gets the hierarchical port instance object from the full name.
* @param hierPortInstName Full hierarchical name of the port instance.
* @return The port instance of interest or null if none could be found.
*/
public EDIFHierPortInst getHierPortInstFromName(String hierPortInstName){
String instName = "";
String localPortName = hierPortInstName;
int lastSep = hierPortInstName.lastIndexOf(EDIFTools.EDIF_HIER_SEP);
if(lastSep != -1){
instName = hierPortInstName.substring(0,lastSep);
localPortName = hierPortInstName.substring(lastSep+1);
}
EDIFCellInst inst = getCellInstFromHierName(instName);
if(inst == null) return null;
EDIFPortInst port = inst.getPortInst(localPortName);
if(port == null) return null;
String parentInstName = getHierParentName(instName);
EDIFHierPortInst hierPortInst = new EDIFHierPortInst(parentInstName,port);
return hierPortInst;
}
/**
* Looks at the hierarchical name and returns the parent or instance above. For example:
* "block0/operator0" -> "block0"; "block0" -> ""; "" -> ""
* @param hierReferenceName Hierarchical reference name
* @return
*/
private String getHierParentName(String hierReferenceName){
if(hierReferenceName == null) return null;
if(hierReferenceName.length() == 0) return hierReferenceName;
int lastSep = hierReferenceName.lastIndexOf(EDIFTools.EDIF_HIER_SEP);
if(lastSep != -1){
return hierReferenceName.substring(0,lastSep);
}
return "";
}
/**
* Gets the hierarchical net from the netname provided. Returns the wrapped EDIFNet, with the hierarchical
* String in {@link EDIFHierNet}.
* @param netName Full hierarchical name of the net to retrieve.
* @return The absolute net with hierarchical name, or null if none could be found.
*/
public EDIFHierNet getHierNetFromName(String netName){
String instName = "";
String localNetName = netName;
int lastSep = netName.lastIndexOf(EDIFTools.EDIF_HIER_SEP);
if(lastSep != -1){
instName = netName.substring(0,lastSep);
localNetName = netName.substring(lastSep+1);
}
EDIFCellInst i = getCellInstFromHierName(instName);
EDIFNet net = i == null ? null : i.getCellType().getNet(localNetName);
if(i == null || net == null){
// Maybe instance or net name contains '/', try a few different alternatives
while(net == null && instName.contains(EDIFTools.EDIF_HIER_SEP)){
lastSep = instName.lastIndexOf(EDIFTools.EDIF_HIER_SEP);
instName = netName.substring(0,lastSep);
localNetName = netName.substring(lastSep+1);
i = getCellInstFromHierName(instName);
net = i == null ? null : i.getCellType().getNet(localNetName);
}
if(net == null){
return null;
}
}
EDIFHierNet an = new EDIFHierNet(instName, net);
return an;
}
public Net getPhysicalNetFromPin(String parentHierInstName, EDIFPortInst p, Design d){
String hierarchicalNetName = null;
if(parentHierInstName.equals("")){
hierarchicalNetName = p.getNet().getName();
}else{
hierarchicalNetName = parentHierInstName + EDIFTools.EDIF_HIER_SEP + p.getNet().getName();
}
if(hierarchicalNetName.equals(EDIFTools.LOGICAL_GND_NET_NAME)) return d.getGndNet();
if(hierarchicalNetName.equals(EDIFTools.LOGICAL_VCC_NET_NAME)) return d.getVccNet();
Map<String,String> parentNetMap = getParentNetMap();
String parentNetName = parentNetMap.get(hierarchicalNetName);
Net n = d.getNet(parentNetName);
if(n == null){
if(parentNetName == null){
// Maybe it is GND/VCC
EDIFPortInst src = p.getNet().getSourcePortInsts(false).get(0);
if(src.getCellInst() != null){
String cellType = src.getCellInst().getCellType().getName();
if(cellType.equals("GND")) return d.getGndNet();
if(cellType.equals("VCC")) return d.getVccNet();
}
}
EDIFNet logicalNet = getNetFromHierName(parentNetName);
List<EDIFPortInst> eprList = logicalNet.getSourcePortInsts(false);
if(eprList.size() > 1) throw new RuntimeException("ERROR: Bad assumption on net, has two sources.");
if(eprList.size() == 1){
String cellTypeName = eprList.get(0).getCellInst().getCellType().getName();
if(cellTypeName.equals("GND")){
return d.getGndNet();
}else if(cellTypeName.equals("VCC")){
return d.getVccNet();
}
}
// If size is 0, assume top level port in an OOC design
n = d.createNet(parentNetName);
n.setLogicalNet(logicalNet);
}
return n;
}
/**
* Searches all EDIFCellInst objects to find those with matching names
* against the wildcard pattern.
* @param wildcardPattern Search pattern that includes alphanumeric and wildcards (*).
* @return The list of all matching EDIFHierCellInst
*/
public List<EDIFHierCellInst> findCellInsts(String wildcardPattern){
return getAllDescendants("", wildcardPattern, false);
}
/**
* Searches all lower levels of hierarchy to find all leaf descendants. It returns a
* list of all leaf cells that fall under the hierarchy of the provided instance name.
* @param instanceName Name of the instance to start searching from.
* @return A list of all leaf cell instances or null if the instanceName was not found.
*/
public List<EDIFHierCellInst> getAllLeafDescendants(String instanceName){
List<EDIFHierCellInst> leafCells = new ArrayList<>();
EDIFCellInst currTop = getCellInstFromHierName(instanceName);
Queue<EDIFHierCellInst> toProcess = new LinkedList<EDIFHierCellInst>();
EDIFHierCellInst eci = new EDIFHierCellInst(EDIFTools.getHierarchicalRootFromPinName(instanceName), currTop);
toProcess.add(eci);
while(!toProcess.isEmpty()){
EDIFHierCellInst curr = toProcess.poll();
if(curr.getCellType().isPrimitive()){
leafCells.add(curr);
}else{
for(EDIFCellInst i : curr.getInst().getCellType().getCellInsts()){
toProcess.add(new EDIFHierCellInst(curr.getFullHierarchicalInstName(), i));
}
}
}
return leafCells;
}
private String convertWildcardToRegex(String wildcardPattern){
if(wildcardPattern == null) return null;
StringBuilder sb = new StringBuilder();
for(int i=0; i < wildcardPattern.length(); i++){
char c = wildcardPattern.charAt(i);
switch (c) {
case '*':
sb.append(".*");
break;
case '?': case '\\': case '{': case '}': case '|':
case '^': case '$': case '(': case ')': case '[': case ']':
sb.append("\\");
sb.append(c);
break;
default:
sb.append(c);
}
}
sb.append("$");
return sb.toString();
}
public List<EDIFHierCellInst> getAllLeafDescendants(String instanceName, String wildcardPattern){
return getAllDescendants(instanceName, wildcardPattern, true);
}
/**
* Searches all lower levels of hierarchy to find descendants. It returns the
* set of all cells that fall under the hierarchy of the provided instance name.
* @param instanceName Name of the instance to start searching from.
* @param wildcardPattern if non-null, filters results by matching wildcard pattern
* @param leavesOnly Flag indicating if only leaf cells should be included
* @return A set of all leaf cell instances or null if the instanceName was not found.
*/
public List<EDIFHierCellInst> getAllDescendants(String instanceName, String wildcardPattern, boolean leavesOnly){
List<EDIFHierCellInst> children = new ArrayList<>();
EDIFCellInst eci = getCellInstFromHierName(instanceName);
if(eci == null) return null;
Queue<EDIFHierCellInst> q = new LinkedList<>();
q.add(new EDIFHierCellInst(instanceName, eci));
String pattern = convertWildcardToRegex(wildcardPattern);
Pattern pat = wildcardPattern != null ? Pattern.compile(pattern) : null;
while(!q.isEmpty()){
EDIFHierCellInst i = q.poll();
for(EDIFCellInst child : i.getInst().getCellType().getCellInsts()){
String fullName = "";
if(!i.isTopLevelInst()){
fullName = i.getFullHierarchicalInstName();
}
EDIFHierCellInst newCell = new EDIFHierCellInst(fullName, child);
if(newCell.getInst().getCellType().isPrimitive()){
if(pat != null && !pat.matcher(newCell.getFullHierarchicalInstName()).matches()){
continue;
}
children.add(newCell);
} else{
q.add(newCell);
if(!leavesOnly) {
if(pat != null && !pat.matcher(newCell.getFullHierarchicalInstName()).matches()){
continue;
}
children.add(newCell);
}
}
}
}
return children;
}
private static boolean isDeviceNullPrinted = false;
private boolean isTransformPrim(EDIFHierPortInst p){
EDIFCellInst cellInst = p.getPortInst().getCellInst();
if(!cellInst.getCellType().isPrimitive()) return false;
Unisim u = Unisim.valueOf(p.getPortInst().getCellInst().getCellType().getName());
if(device == null && !isDeviceNullPrinted){
System.err.println("WARNING: EDIFNetlist.device==null when calling isTransformPrim(), results may be incorrect");
isDeviceNullPrinted = true;
}
return u.hasTransform(device == null ? Series.UltraScale : device.getSeries());
}
/**
* TODO - Revisit this code, simplify, remove duplication
* Get's all equivalent nets in the netlist from the provided net name.
* The returned list also includes the provided netName.
* @param netName Full hierarchical netname to use as a starting point in the search.
* @return A list of all electrically connected nets in the netlist that are equivalent.
* The list is composed of all full hierarchical net names or an empty list if netName is invalid.
*/
public List<String> getNetAliases(String netName){
if(physicalNetPinMap == null){
physicalNetPinMap = new HashMap<String,ArrayList<EDIFHierPortInst>>();
}
String parentNetName = null;
ArrayList<EDIFHierPortInst> leafCellPins = new ArrayList<>();
List<String> aliases = new ArrayList<>();
aliases.add(netName);
EDIFHierNet an = getHierNetFromName(netName);
if(an == null) return Collections.emptyList();
Queue<EDIFHierPortInst> queue = new LinkedList<>();
EDIFPortInst source = null;
for(EDIFPortInst p : an.getNet().getPortInsts()){
EDIFHierPortInst absPortInst = new EDIFHierPortInst(an.getHierarchicalInstName(), p);
// Checks if cell is primitive or black box
boolean isCellPin = p.getCellInst() != null && p.getCellInst().getCellType().getCellInsts().size() == 0;
if(isCellPin){
leafCellPins.add(absPortInst);
}
if((p.getCellInst() == null && p.isInput()) || (isCellPin && p.isOutput())){
source = p;
parentNetName = netName;
}
queue.add(absPortInst);
}
while(!queue.isEmpty()){
EDIFHierPortInst p = queue.poll();
EDIFNet otherNet = null;
if(p.getPortInst().getCellInst() == null){
// Moving up in hierarchy
EDIFCellInst inst = getCellInstFromHierName(p.getHierarchicalInstName());
EDIFPortInst epr = inst.getPortInst(p.getPortInst().getPortInstNameFromPort());
if(epr == null){
if(parentNetName == null && getTopCellInst().equals(inst) && p.getPortInst().isOutput()){
source = p.getPortInst();
parentNetName = p.getPortInst().getNet().getName();
}
continue;
}
otherNet = epr.getNet();
int lastIndex = p.getHierarchicalInstName().lastIndexOf(EDIFTools.EDIF_HIER_SEP);
String instName = lastIndex > 0 ? p.getHierarchicalInstName().substring(0, lastIndex) : "";
EDIFCellInst checkInst = getCellInstFromHierName(instName);
while(checkInst == null && lastIndex > 0){
// Check for cells with hierarchy separator in their name
lastIndex = p.getHierarchicalInstName().lastIndexOf(EDIFTools.EDIF_HIER_SEP, lastIndex-1);
instName = p.getHierarchicalInstName().substring(0, lastIndex);
checkInst = getCellInstFromHierName(instName);
}
StringBuilder sb = new StringBuilder(instName);
if(!instName.equals("")) sb.append(EDIFTools.EDIF_HIER_SEP);
sb.append(otherNet);
aliases.add(sb.toString());
for(EDIFPortInst opr : otherNet.getPortInsts()){
if(epr.getPort() != opr.getPort()){ // Here we really want to compare object references!
EDIFHierPortInst absPortInst = new EDIFHierPortInst(instName, opr);
if(epr.getCellInst().getCellType().isPrimitive()){
leafCellPins.add(absPortInst);
if(parentNetName == null && epr.isOutput()) {
source = epr;
parentNetName = netName;
}
}
queue.add(absPortInst);
}
}
}else if(p.isOutput() && isTransformPrim(p)){
if(p.getPortInst().getPort().getWidth() > 1){
aliases.add(p.getTransformedNetName());
}else{
aliases.add(p.toString());
}
}else{
// Moving down in hierarchy
EDIFPort port = p.getPortInst().getPort();
if(port != null && port.getParentCell().hasContents()){
otherNet = port.getParentCell().getInternalNet(p.getPortInst());
if(otherNet == null){
// Looks unconnected
continue;
}
StringBuilder sb = new StringBuilder(p.getHierarchicalInstName());
if(!p.getHierarchicalInstName().equals("")) sb.append(EDIFTools.EDIF_HIER_SEP);
sb.append(p.getPortInst().getCellInst().getName());
String instName = sb.toString();
sb.append(EDIFTools.EDIF_HIER_SEP);
sb.append(otherNet.getName());
aliases.add(sb.toString());
for(EDIFPortInst ipr : otherNet.getPortInsts()){
if(port != ipr.getPort()){ // Here we really want to compare object references!
EDIFHierPortInst absPortInst = new EDIFHierPortInst(instName, ipr);
boolean isCellPin = ipr.getCellInst() != null && ipr.getCellInst().getCellType().isPrimitive();
if(isCellPin){
leafCellPins.add(absPortInst);
}
if((ipr.getCellInst() == null && ipr.isInput()) || (isCellPin && ipr.isOutput())){
source = ipr;
parentNetName = netName;
}
queue.add(absPortInst);
}
}
}
}
}
if(parentNetName != null){
String cellType = source.getCellInst() == null ? "" : source.getCellInst().getCellType().getName();
String staticNetName = cellType.equals("GND") ? Net.GND_NET : (cellType.equals("VCC") ? Net.VCC_NET : null);
if(staticNetName != null){
ArrayList<EDIFHierPortInst> existing = physicalNetPinMap.get(staticNetName);
if(existing == null)
physicalNetPinMap.put(staticNetName, leafCellPins);
else
existing.addAll(leafCellPins);
}else{
physicalNetPinMap.put(parentNetName, leafCellPins);
}
} else if(an.getNet().getPortInsts().size() == 0){
return aliases;
} else{
throw new RuntimeException("ERROR: Couldn't identify parent net, no output pins (or top level output port) found.");
}
return aliases;
}
/**
* Gets the canonical net for this net name. This corresponds to the driving net
* in the netlist and/or the physical net name.
* @param netAlias An absolute net name alias (from logical netlist)
* @return The physical/parent net name or null if none could be found.
*/
public String getParentNetName(String netAlias){
return getParentNetMap().get(netAlias);
}
public Map<String,String> getParentNetMap(){
if(parentNetMap == null){
generateParentNetMap();
}
return parentNetMap;
}
public void resetParentNetMap(){
parentNetMap = null;
physicalNetPinMap = null;
}
private void generateParentNetMap(){
long start = 0;
if(DEBUG){
start = System.currentTimeMillis();
}
if(parentNetMap == null){
parentNetMap = new HashMap<>();
}
if(physicalNetPinMap == null){
physicalNetPinMap = new HashMap<String,ArrayList<EDIFHierPortInst>>();
}
EDIFCell c = getTopCell();
Queue<EDIFHierPortInst> queue = new LinkedList<>();
// All parent nets are either top-level inputs or outputs of leaf cells
// Here we gather all top-level inputs
for(EDIFNet n : c.getNets()){
for(EDIFPortInst p : n.getPortInsts()){
if(p.isTopLevelPort() && p.isInput()){
queue.add(new EDIFHierPortInst("", p));
}
}
}
// Here we search for all leaf cell insts
Queue<EDIFHierCellInst> instQueue = new LinkedList<>();
instQueue.add(new EDIFHierCellInst("", getTopCellInst()));
while(!instQueue.isEmpty()){
EDIFHierCellInst currInst = instQueue.poll();
for(EDIFCellInst eci : currInst.getInst().getCellType().getCellInsts()){
// Checks if cell is primitive or black box
if(eci.getCellType().getCellInsts().size() == 0 && eci.getCellType().getNets().size() == 0){
for(EDIFPortInst portInst : eci.getPortInsts()){
if(portInst.isOutput()){
queue.add(new EDIFHierPortInst(currInst.getFullHierarchicalInstName(), portInst));
}
}
}else{
String hName = currInst.getFullHierarchicalInstName();
instQueue.add(new EDIFHierCellInst(hName,eci));
}
}
}
for(EDIFHierPortInst pr : queue){
String parentNetName = pr.getHierarchicalNetName();
for(String alias : getNetAliases(parentNetName)){
parentNetMap.put(alias, parentNetName);
}
}
if(DEBUG){
long stop = System.currentTimeMillis();
System.out.println("generateParentNetMap() runtime: " + (stop-start)/1000.0f +" seconds ");
}
}
/**
* Traverses the netlist and produces a list of all primitive leaf cell instances.
* @return A list of all primitive leaf cell instances.
*/
public List<EDIFCellInst> getAllLeafCellInstances(){
List<EDIFCellInst> insts = new ArrayList<>();
Queue<EDIFCellInst> q = new LinkedList<>();
q.add(getTopCellInst());
while(!q.isEmpty()){
EDIFCellInst curr = q.poll();
for(EDIFCellInst eci : curr.getCellType().getCellInsts()){
if(eci.getCellType().isPrimitive())
insts.add(eci);
else
q.add(eci);
}
}
return insts;
}
/**
* @return the physicalNetPinMap
*/
public Map<String, ArrayList<EDIFHierPortInst>> getPhysicalNetPinMap() {
if(physicalNetPinMap == null){
generateParentNetMap();
}
return physicalNetPinMap;
}
public List<EDIFHierPortInst> getPhysicalPins(String parentNetName) {
return getPhysicalNetPinMap().get(parentNetName);
}
/**
* Gets all the primitive pin sinks that are strict descendants of
* this provided net.
* @param net The net to trace to its sinks.
* @return The list of all sink pins on primitive cells that are descendants
* of the provided net
*/
public List<EDIFHierPortInst> getSinksFromNet(EDIFHierNet net){
Queue<EDIFHierNet> q = new LinkedList<>();
q.add(net);
ArrayList<EDIFHierPortInst> sinks = new ArrayList<>();
HashSet<String> visited = new HashSet<>();
while(!q.isEmpty()){
EDIFHierNet curr = q.poll();
if(visited.contains(curr.getHierarchicalNetName())) continue;
visited.add(curr.getHierarchicalNetName());
for(EDIFPortInst portInst : curr.getNet().getPortInsts()){
if(portInst.isOutput()) continue;
if(portInst.isTopLevelPort()){
// Going up in hierarchy
EDIFCellInst cellInst = getCellInstFromHierName(curr.getHierarchicalInstName());
if(cellInst == null) continue;
EDIFPortInst epr = cellInst.getPortInst(portInst.getPortInstNameFromPort());
if(epr == null || epr.getNet() == null) continue;
String hierName = EDIFTools.getHierarchicalRootFromPinName(curr.getHierarchicalInstName());
q.add(new EDIFHierNet(hierName, epr.getNet()));
}else if(portInst.getCellInst().getCellType().isPrimitive()){
// We found a sink
sinks.add(new EDIFHierPortInst(curr.getHierarchicalInstName(),portInst));
continue;
}else{
// Going down in hierarchy
EDIFNet internalNet = portInst.getInternalNet();
if(internalNet == null) continue;
String hierName = curr.getHierarchicalInstName() + EDIFTools.EDIF_HIER_SEP + portInst.getCellInst().getName();
q.add(new EDIFHierNet(hierName,internalNet));
}
}
}
return sinks;
}
/**
* @param netlist
* @param cellInstMap
* @return
*/
public HashMap<String, EDIFNet> generateEDIFNetMap(HashMap<String, EDIFCellInst> cellInstMap) {
HashMap<String,EDIFNet> map = new HashMap<String, EDIFNet>();
Queue<EDIFHierCellInst> toProcess = new LinkedList<EDIFHierCellInst>();
// Add nets at the very top level to start
for(EDIFNet net : getTopCell().getNets()){
map.put(net.getName(), net);
}
Collection<EDIFCellInst> topInstances = getTopCellInst().getCellType().getCellInsts();
if(topInstances != null){
for(EDIFCellInst i : topInstances){
toProcess.add(new EDIFHierCellInst("",i));
}
}
while(!toProcess.isEmpty()){
EDIFHierCellInst curr = toProcess.poll();
String name = curr.getHierarchicalInstName() + curr.getInst().getName();
if(curr.getInst().getCellType().getNets() == null) continue;
for(EDIFNet net : curr.getInst().getCellType().getNets()){
map.put(name + "/" + net.getName(), net);
//System.out.println("NET: " + name + "/" + net.getOldName());
}
String parentName = curr.getHierarchicalInstName() + curr.getInst().getName() + "/";
if(curr.getInst().getCellType().getCellInsts()==null) continue;
for(EDIFCellInst i : curr.getInst().getCellType().getCellInsts()){
toProcess.add(new EDIFHierCellInst(parentName, i));
}
}
return map;
}
/**
* This will be removed in the next release.
* Consider using {@link EDIFCell#getPortMap()} instead
* @deprecated
* @return
*/
public HashMap<String,EDIFPort> generateEDIFPortMap(){
HashMap<String,EDIFPort> map = new HashMap<String, EDIFPort>();
for(EDIFPort port : getTopCellInst().getCellType().getPorts()){
if(port.isBus()){
for(int idx=0; idx < port.getWidth(); idx++){
map.put(port.getName() + "["+idx+"]",port);
}
}else{
map.put(port.getName(),port);
}
}
return map;
}
/**
* Identify primitive cell instances in EDIF netlist
* @param edif The environment to look through
* @return A map of hierarchical names (not including top-level name)
* to EdifCellInstances that use primitives in the library
*/
public HashMap<String,EDIFCellInst> generateCellInstMap(){
HashMap<String,EDIFCellInst> primitiveInstances = new HashMap<String, EDIFCellInst>();
HashSet<String> primitives = new HashSet<String>();
EDIFLibrary lib = getHDIPrimitivesLibrary();
if(lib != null){
for(EDIFCell c : lib.getCells()){
primitives.add(c.getName());
}
}
Queue<EDIFHierCellInst> toProcess = new LinkedList<EDIFHierCellInst>();
Collection<EDIFCellInst> topInstances = getTopCellInst().getCellType().getCellInsts();
if(topInstances != null){
for(EDIFCellInst i : topInstances){
toProcess.add(new EDIFHierCellInst("",i));
}
}
while(!toProcess.isEmpty()){
EDIFHierCellInst curr = toProcess.poll();
if(primitives.contains(curr.getInst().getCellType().getName())){
String name = curr.getHierarchicalInstName() + curr.getInst().getName();
primitiveInstances.put(name, curr.getInst());
}else{
String parentName = curr.getHierarchicalInstName() + curr.getInst().getName()+ "/";
if(curr.getInst().getCellType().getCellInsts() == null) {
//System.out.println("No instances for cell type: " + curr.inst.getCellType());
continue;
}
for(EDIFCellInst i : curr.getInst().getCellType().getCellInsts()){
toProcess.add(new EDIFHierCellInst(parentName, i));
}
}
}
return primitiveInstances;
}
public static void main(String[] args) throws FileNotFoundException {
CodePerfTracker t = new CodePerfTracker("EDIF Import/Export", true);
t.start("Read EDIF");
EDIFParser p = new EDIFParser(args[0]);
EDIFNetlist n = p.parseEDIFNetlist();
t.stop().start("Export EDIF");
n.exportEDIF(args[1]);
t.stop().printSummary();
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "moby_dev_test.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
config.py | import os
base_dir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
DEBUG = False
TESTING = False
CSRF_ENABLE = True
SECRET_KEY = os.environ['SECRET_KEY']
API_URL = os.environ['API_URL']
class TestingConfig(Config):
TESTING = True
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
SESSION_COOKIE_HTTPONLY = True
REMEMBER_COOKIE_HTTPONLY = True
class StagingConfig(Config):
DEVELOPMENT = True
DEBUG = True
class ProductionConfig(Config):
DEBUG = False
SESSION_COOKIE_SECURE = True
REMEMBER_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
REMEMBER_COOKIE_HTTPONLY = True
| [] | [] | [
"SECRET_KEY",
"API_URL"
] | [] | ["SECRET_KEY", "API_URL"] | python | 2 | 0 | |
pkg/cli/common.go | package cli
// the cli package contains urfave/cli related structs that help make up
// the command line for buildah commands. it resides here so other projects
// that vendor in this code can use them too.
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/containers/buildah"
"github.com/containers/buildah/util"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/spf13/pflag"
)
// LayerResults represents the results of the layer flags
type LayerResults struct {
ForceRm bool
Layers bool
}
// UserNSResults represents the results for the UserNS flags
type UserNSResults struct {
UserNS string
UserNSUIDMap []string
UserNSGIDMap []string
UserNSUIDMapUser string
UserNSGIDMapGroup string
}
// NameSpaceResults represents the results for Namespace flags
type NameSpaceResults struct {
IPC string
Network string
CNIConfigDir string
CNIPlugInPath string
PID string
UTS string
}
// BudResults represents the results for Bud flags
type BudResults struct {
Annotation []string
Authfile string
BuildArg []string
CacheFrom string
CertDir string
Compress bool
Creds string
DisableCompression bool
DisableContentTrust bool
File []string
Format string
Iidfile string
Label []string
Logfile string
Loglevel int
NoCache bool
Platform string
Pull bool
PullAlways bool
Quiet bool
Rm bool
Runtime string
RuntimeFlags []string
SignaturePolicy string
Squash bool
Tag []string
Target string
TLSVerify bool
}
// FromAndBugResults represents the results for common flags
// in bud and from
type FromAndBudResults struct {
AddHost []string
BlobCache string
CapAdd []string
CapDrop []string
CgroupParent string
CPUPeriod uint64
CPUQuota int64
CPUSetCPUs string
CPUSetMems string
CPUShares uint64
DNSSearch []string
DNSServers []string
DNSOptions []string
HTTPProxy bool
Isolation string
Memory string
MemorySwap string
SecurityOpt []string
ShmSize string
Ulimit []string
Volumes []string
}
// GetUserNSFlags returns the common flags for usernamespace
func GetUserNSFlags(flags *UserNSResults) pflag.FlagSet {
usernsFlags := pflag.FlagSet{}
usernsFlags.StringVar(&flags.UserNS, "userns", "", "'container', `path` of user namespace to join, or 'host'")
usernsFlags.StringSliceVar(&flags.UserNSUIDMap, "userns-uid-map", []string{}, "`containerID:hostID:length` UID mapping to use in user namespace")
usernsFlags.StringSliceVar(&flags.UserNSGIDMap, "userns-gid-map", []string{}, "`containerID:hostID:length` GID mapping to use in user namespace")
usernsFlags.StringVar(&flags.UserNSUIDMapUser, "userns-uid-map-user", "", "`name` of entries from /etc/subuid to use to set user namespace UID mapping")
usernsFlags.StringVar(&flags.UserNSGIDMapGroup, "userns-gid-map-group", "", "`name` of entries from /etc/subgid to use to set user namespace GID mapping")
return usernsFlags
}
// GetNameSpaceFlags returns the common flags for a namespace menu
func GetNameSpaceFlags(flags *NameSpaceResults) pflag.FlagSet {
fs := pflag.FlagSet{}
fs.StringVar(&flags.IPC, string(specs.IPCNamespace), "", "'container', `path` of IPC namespace to join, or 'host'")
fs.StringVar(&flags.Network, string(specs.NetworkNamespace), "", "'container', `path` of network namespace to join, or 'host'")
// TODO How do we alias net and network?
fs.StringVar(&flags.Network, "net", "", "'container', `path` of network namespace to join, or 'host'")
if err := fs.MarkHidden("net"); err != nil {
panic(fmt.Sprintf("error marking net flag as hidden: %v", err))
}
fs.StringVar(&flags.CNIConfigDir, "cni-config-dir", util.DefaultCNIConfigDir, "`directory` of CNI configuration files")
fs.StringVar(&flags.CNIPlugInPath, "cni-plugin-path", util.DefaultCNIPluginPath, "`path` of CNI network plugins")
fs.StringVar(&flags.PID, string(specs.PIDNamespace), "", "container, `path` of PID namespace to join, or 'host'")
fs.StringVar(&flags.UTS, string(specs.UTSNamespace), "", "container, :`path` of UTS namespace to join, or 'host'")
return fs
}
// GetLayerFlags returns the common flags for layers
func GetLayerFlags(flags *LayerResults) pflag.FlagSet {
fs := pflag.FlagSet{}
fs.BoolVar(&flags.ForceRm, "force-rm", false, "Always remove intermediate containers after a build, even if the build is unsuccessful.")
fs.BoolVar(&flags.Layers, "layers", UseLayers(), fmt.Sprintf("cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override."))
return fs
}
// GetBudFlags returns common bud flags
func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs := pflag.FlagSet{}
fs.StringArrayVar(&flags.Annotation, "annotation", []string{}, "Set metadata for an image (default [])")
fs.StringVar(&flags.Authfile, "authfile", GetDefaultAuthFile(), "path of the authentication file.")
fs.StringArrayVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder")
fs.StringVar(&flags.CacheFrom, "cache-from", "", "Images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.")
fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry")
fs.BoolVar(&flags.Compress, "compress", false, "This is legacy option, which has no effect on the image")
fs.StringVar(&flags.Creds, "creds", "", "use `[username[:password]]` for accessing the registry")
fs.BoolVarP(&flags.DisableCompression, "disable-compression", "D", true, "don't compress layers by default")
fs.BoolVar(&flags.DisableContentTrust, "disable-content-trust", false, "This is a Docker specific option and is a NOOP")
fs.StringSliceVarP(&flags.File, "file", "f", []string{}, "`pathname or URL` of a Dockerfile")
fs.StringVar(&flags.Format, "format", DefaultFormat(), "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.")
fs.StringVar(&flags.Iidfile, "iidfile", "", "`file` to write the image ID to")
fs.StringArrayVar(&flags.Label, "label", []string{}, "Set metadata for an image (default [])")
fs.BoolVar(&flags.NoCache, "no-cache", false, "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.")
fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr")
fs.IntVar(&flags.Loglevel, "loglevel", 0, "adjust logging level (range from -2 to 3)")
fs.StringVar(&flags.Platform, "platform", "", "CLI compatibility: no action or effect")
fs.BoolVar(&flags.Pull, "pull", true, "pull the image if not present")
fs.BoolVar(&flags.PullAlways, "pull-always", false, "pull the image, even if a version is present")
fs.BoolVarP(&flags.Quiet, "quiet", "q", false, "refrain from announcing build instructions and image read/write progress")
fs.BoolVar(&flags.Rm, "rm", true, "Remove intermediate containers after a successful build")
fs.StringVar(&flags.Runtime, "runtime", util.Runtime(), "`path` to an alternate runtime. Use BUILDAH_RUNTIME environment variable to override.")
fs.StringSliceVar(&flags.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime")
fs.StringVar(&flags.SignaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
fs.BoolVar(&flags.Squash, "squash", false, "Squash newly built layers into a single new layer.")
fs.StringArrayVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image")
fs.StringVar(&flags.Target, "target", "", "set the target build stage to build")
fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
return fs
}
func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults, namespaceResults *NameSpaceResults) pflag.FlagSet {
fs := pflag.FlagSet{}
fs.StringSliceVar(&flags.AddHost, "add-host", []string{}, "add a custom host-to-IP mapping (`host:ip`) (default [])")
fs.StringVar(&flags.BlobCache, "blob-cache", "", "assume image blobs in the specified directory will be available for pushing")
if err := fs.MarkHidden("blob-cache"); err != nil {
panic(fmt.Sprintf("error marking net flag as hidden: %v", err))
}
fs.StringSliceVar(&flags.CapAdd, "cap-add", []string{}, "add the specified capability when running (default [])")
fs.StringSliceVar(&flags.CapDrop, "cap-drop", []string{}, "drop the specified capability when running (default [])")
fs.StringVar(&flags.CgroupParent, "cgroup-parent", "", "optional parent cgroup for the container")
fs.Uint64Var(&flags.CPUPeriod, "cpu-period", 0, "limit the CPU CFS (Completely Fair Scheduler) period")
fs.Int64Var(&flags.CPUQuota, "cpu-quota", 0, "limit the CPU CFS (Completely Fair Scheduler) quota")
fs.Uint64VarP(&flags.CPUShares, "cpu-shares", "c", 0, "CPU shares (relative weight)")
fs.StringVar(&flags.CPUSetCPUs, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
fs.StringVar(&flags.CPUSetMems, "cpuset-mems", "", "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.")
fs.StringSliceVar(&flags.DNSSearch, "dns-search", []string{}, "Set custom DNS search domains")
fs.StringSliceVar(&flags.DNSServers, "dns", []string{}, "Set custom DNS servers or disable it completely by setting it to 'none', which prevents the automatic creation of `/etc/resolv.conf`.")
fs.StringSliceVar(&flags.DNSOptions, "dns-option", []string{}, "Set custom DNS options")
fs.BoolVar(&flags.HTTPProxy, "http-proxy", true, "pass thru HTTP Proxy environment variables")
fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.")
fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: <number>[<unit>], where unit = b, k, m or g)")
fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap")
fs.StringArrayVar(&flags.SecurityOpt, "security-opt", []string{}, "security options (default [])")
fs.StringVar(&flags.ShmSize, "shm-size", "65536k", "size of '/dev/shm'. The format is `<number><unit>`.")
fs.StringSliceVar(&flags.Ulimit, "ulimit", []string{}, "ulimit options (default [])")
fs.StringSliceVarP(&flags.Volumes, "volume", "v", []string{}, "bind mount a volume into the container (default [])")
// Add in the usernamespace and namespaceflags
usernsFlags := GetUserNSFlags(usernsResults)
namespaceFlags := GetNameSpaceFlags(namespaceResults)
fs.AddFlagSet(&usernsFlags)
fs.AddFlagSet(&namespaceFlags)
return fs
}
// UseLayers returns true if BUILDAH_LAYERS is set to "1" or "true"
// otherwise it returns false
func UseLayers() bool {
layers := os.Getenv("BUILDAH_LAYERS")
if strings.ToLower(layers) == "true" || layers == "1" {
return true
}
return false
}
// DefaultFormat returns the default image format
func DefaultFormat() string {
format := os.Getenv("BUILDAH_FORMAT")
if format != "" {
return format
}
return buildah.OCI
}
// DefaultIsolation returns the default image format
func DefaultIsolation() string {
isolation := os.Getenv("BUILDAH_ISOLATION")
if isolation != "" {
return isolation
}
return buildah.OCI
}
// DefaultHistory returns the default add-history setting
func DefaultHistory() bool {
history := os.Getenv("BUILDAH_HISTORY")
if strings.ToLower(history) == "true" || history == "1" {
return true
}
return false
}
func VerifyFlagsArgsOrder(args []string) error {
for _, arg := range args {
if strings.HasPrefix(arg, "-") {
return errors.Errorf("No options (%s) can be specified after the image or container name", arg)
}
}
return nil
}
func GetDefaultAuthFile() string {
authfile := os.Getenv("REGISTRY_AUTH_FILE")
if authfile != "" {
return authfile
}
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir != "" {
return filepath.Join(runtimeDir, "containers/auth.json")
}
return ""
}
| [
"\"BUILDAH_LAYERS\"",
"\"BUILDAH_FORMAT\"",
"\"BUILDAH_ISOLATION\"",
"\"BUILDAH_HISTORY\"",
"\"REGISTRY_AUTH_FILE\"",
"\"XDG_RUNTIME_DIR\""
] | [] | [
"BUILDAH_ISOLATION",
"BUILDAH_HISTORY",
"BUILDAH_FORMAT",
"XDG_RUNTIME_DIR",
"REGISTRY_AUTH_FILE",
"BUILDAH_LAYERS"
] | [] | ["BUILDAH_ISOLATION", "BUILDAH_HISTORY", "BUILDAH_FORMAT", "XDG_RUNTIME_DIR", "REGISTRY_AUTH_FILE", "BUILDAH_LAYERS"] | go | 6 | 0 | |
tests/bugs/core_0210_test.py | #coding:utf-8
#
# id: bugs.core_0210
# title: CS server crash altering SP in 2 connect
# decription:
#
# tracker_id: CORE-0210
# min_versions: ['2.5.0']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
# import os
# import fdb
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
# db_conn.close()
#
# stm1='''create or alter procedure sp_test as
# begin
# exit;
# end
# '''
# stm2='''create or alter procedure sp_test as
# declare x int;
# begin
# exit;
# end
# '''
#
# con1 = fdb.connect(dsn=dsn)
# con2 = fdb.connect(dsn=dsn)
#
# xtpb = ( [ fdb.isc_tpb_concurrency ] )
#
# con1.begin( tpb = xtpb )
#
# cur1=con1.cursor()
# cur2=con2.cursor()
#
# cur1.execute(stm1)
# con1.commit()
#
# con2.begin( tpb = xtpb )
# cur2.execute(stm2)
# con2.commit()
#
# con1.begin( tpb = xtpb )
# cur1.execute(stm1)
# con1.commit()
#
# con1.close()
# con2.close()
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
@pytest.mark.version('>=2.5')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
| [] | [] | [
"ISC_USER",
"ISC_PASSWORD"
] | [] | ["ISC_USER", "ISC_PASSWORD"] | python | 2 | 0 | |
airflow/providers/google/cloud/example_dags/example_compute.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that starts, stops and sets the machine type of a Google Compute
Engine instance.
This DAG relies on the following OS environment variables
* GCP_PROJECT_ID - Google Cloud Platform project where the Compute Engine instance exists.
* GCE_ZONE - Google Cloud Platform zone where the instance exists.
* GCE_INSTANCE - Name of the Compute Engine instance.
* GCE_SHORT_MACHINE_TYPE_NAME - Machine type resource name to set, e.g. 'n1-standard-1'.
See https://cloud.google.com/compute/docs/machine-types
"""
import os
from airflow import models
from airflow.providers.google.cloud.operators.compute import (
ComputeEngineSetMachineTypeOperator, ComputeEngineStartInstanceOperator,
ComputeEngineStopInstanceOperator,
)
from airflow.utils.dates import days_ago
# [START howto_operator_gce_args_common]
GCP_PROJECT_ID = os.environ.get('GCP_PROJECT_ID', 'example-project')
GCE_ZONE = os.environ.get('GCE_ZONE', 'europe-west1-b')
GCE_INSTANCE = os.environ.get('GCE_INSTANCE', 'testinstance')
# [END howto_operator_gce_args_common]
default_args = {
'start_date': days_ago(1),
}
# [START howto_operator_gce_args_set_machine_type]
GCE_SHORT_MACHINE_TYPE_NAME = os.environ.get('GCE_SHORT_MACHINE_TYPE_NAME', 'n1-standard-1')
SET_MACHINE_TYPE_BODY = {
'machineType': 'zones/{}/machineTypes/{}'.format(GCE_ZONE, GCE_SHORT_MACHINE_TYPE_NAME)
}
# [END howto_operator_gce_args_set_machine_type]
with models.DAG(
'example_gcp_compute',
default_args=default_args,
schedule_interval=None, # Override to match your needs
tags=['example'],
) as dag:
# [START howto_operator_gce_start]
gce_instance_start = ComputeEngineStartInstanceOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE,
task_id='gcp_compute_start_task'
)
# [END howto_operator_gce_start]
# Duplicate start for idempotence testing
# [START howto_operator_gce_start_no_project_id]
gce_instance_start2 = ComputeEngineStartInstanceOperator(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE,
task_id='gcp_compute_start_task2'
)
# [END howto_operator_gce_start_no_project_id]
# [START howto_operator_gce_stop]
gce_instance_stop = ComputeEngineStopInstanceOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE,
task_id='gcp_compute_stop_task'
)
# [END howto_operator_gce_stop]
# Duplicate stop for idempotence testing
# [START howto_operator_gce_stop_no_project_id]
gce_instance_stop2 = ComputeEngineStopInstanceOperator(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE,
task_id='gcp_compute_stop_task2'
)
# [END howto_operator_gce_stop_no_project_id]
# [START howto_operator_gce_set_machine_type]
gce_set_machine_type = ComputeEngineSetMachineTypeOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE,
body=SET_MACHINE_TYPE_BODY,
task_id='gcp_compute_set_machine_type'
)
# [END howto_operator_gce_set_machine_type]
# Duplicate set machine type for idempotence testing
# [START howto_operator_gce_set_machine_type_no_project_id]
gce_set_machine_type2 = ComputeEngineSetMachineTypeOperator(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE,
body=SET_MACHINE_TYPE_BODY,
task_id='gcp_compute_set_machine_type2'
)
# [END howto_operator_gce_set_machine_type_no_project_id]
gce_instance_start >> gce_instance_start2 >> gce_instance_stop >> \
gce_instance_stop2 >> gce_set_machine_type >> gce_set_machine_type2
| [] | [] | [
"GCE_INSTANCE",
"GCP_PROJECT_ID",
"GCE_ZONE",
"GCE_SHORT_MACHINE_TYPE_NAME"
] | [] | ["GCE_INSTANCE", "GCP_PROJECT_ID", "GCE_ZONE", "GCE_SHORT_MACHINE_TYPE_NAME"] | python | 4 | 0 | |
src/python/dxpy/utils/printing.py | # Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
This submodule gives basic utilities for printing to the terminal.
'''
import textwrap, subprocess, os, sys
import json
import platform
from ..compat import USING_PYTHON2, sys_encoding
from ..exceptions import DXCLIError
import contextlib
import io
if sys.stdout.isatty():
try:
tty_rows, tty_cols = map(int, subprocess.check_output(['stty', 'size'], stderr=open(os.devnull, 'w')).split())
std_width = min(tty_cols - 2, 100)
except:
tty_rows, tty_cols = 24, 80
std_width = 78
color_state = True
else:
tty_rows, tty_cols = 24, 80
std_width = 78
color_state = False
delimiter = None
# Utility functions to silence output for a function call
# https://stackoverflow.com/questions/2828953/silence-the-stdout-of-a-function-in-python-without-trashing-sys-stdout-and-resto
class DummyFile(object):
def write(self, x): pass
@contextlib.contextmanager
def nostderr():
save_stderr = sys.stderr
sys.stderr = DummyFile()
yield
sys.stderr = save_stderr
def CYAN(message=None):
if message is None:
return '\033[36m' if color_state else ''
else:
return CYAN() + message + ENDC()
def LIGHTBLUE(message=None):
if message is None:
return '\033[1;34m' if color_state else ''
else:
return LIGHTBLUE() + message + ENDC()
def BLUE(message=None):
if message is None:
return '\033[34m' if color_state else ''
else:
return BLUE() + message + ENDC()
def YELLOW(message=None):
if message is None:
return '\033[33m' if color_state else ''
else:
return YELLOW() + message + ENDC()
def GREEN(message=None):
if message is None:
return '\033[32m' if color_state else ''
else:
return GREEN() + message + ENDC()
def RED(message=None):
if message is None:
return '\033[31m' if color_state else ''
else:
return RED() + message + ENDC()
def WHITE(message=None):
if message is None:
return '\033[37m' if color_state else ''
else:
return WHITE() + message + ENDC()
def UNDERLINE(message=None):
if message is None:
return '\033[4m' if color_state else ''
else:
return UNDERLINE() + message + ENDC()
def BOLD(message=None):
if message is None:
return '\033[1m' if color_state else ''
else:
return BOLD() + message + ENDC()
def ENDC():
return '\033[0m' if color_state else ''
def DNANEXUS_LOGO():
return BOLD() + WHITE() + 'DNAne' + CYAN() + 'x' + WHITE() + 'us' + ENDC()
def DNANEXUS_X():
return BOLD() + CYAN() + 'x' + WHITE() + ENDC()
def set_colors(state=True):
global color_state
color_state = state
def set_delimiter(delim=None):
global delimiter
delimiter = delim
def get_delimiter(delim=None):
return delimiter
def DELIMITER(alt_delim):
return alt_delim if delimiter is None else delimiter
def fill(string, width_adjustment=0, **kwargs):
if "width" not in kwargs:
kwargs['width'] = max(std_width + width_adjustment, 20)
if "break_on_hyphens" not in kwargs:
kwargs["break_on_hyphens"] = False
return textwrap.fill(string, **kwargs)
def pager(content, pager=None, file=None):
if file is None:
file = sys.stdout
pager_process = None
try:
if file != sys.stdout or not file.isatty():
raise DXCLIError() # Just print the content, don't use a pager
content_lines = content.splitlines()
content_rows = len(content_lines)
content_cols = max(len(i) for i in content_lines)
if tty_rows > content_rows and tty_cols > content_cols:
raise DXCLIError() # Just print the content, don't use a pager
if pager is None:
pager = os.environ.get('PAGER', 'less -RS')
if platform.system() == 'Windows':
# Verify if the pager is available on Windows
try:
subprocess.call(pager)
except:
raise DXCLIError() # Just print the content, don't use a pager
pager_process = subprocess.Popen(pager, shell=True, stdin=subprocess.PIPE, stdout=file)
pager_process.stdin.write(content.encode(sys_encoding))
pager_process.stdin.close()
pager_process.wait()
if pager_process.returncode != os.EX_OK:
raise DXCLIError() # Pager had a problem, print the content without it
except:
file.write(content.encode(sys_encoding) if USING_PYTHON2 else content)
finally:
try:
pager_process.terminate()
except:
pass
def refill_paragraphs(string, ignored_prefix=' '):
"""Refills the given text, where the text is composed of paragraphs
separated by blank lines (i.e. '\n\n'). Lines that begin with
ignored_prefix are not touched; this can be used to keep indented
code snippets from being incorrectly reformatted.
"""
paragraphs = string.split('\n\n')
refilled_paragraphs = [fill(paragraph) if not paragraph.startswith(ignored_prefix) else paragraph for paragraph in paragraphs]
return '\n\n'.join(refilled_paragraphs).strip('\n')
def _format_find_projects_results(results):
for result in results:
print(result["id"] + DELIMITER(" : ") + result['describe']['name'] +
DELIMITER(' (') + result["level"] + DELIMITER(')'))
def _format_find_apps_results(results, verbose=False):
def maybe_x(result):
return DNANEXUS_X() if result['describe']['billTo'] in ['org-dnanexus', 'org-dnanexus_apps'] else ' '
if not verbose:
for result in results:
print(maybe_x(result) + DELIMITER(" ") + result['describe'].get('title', result['describe']['name']) + DELIMITER(' (') + result["describe"]["name"] + DELIMITER("), v") + result["describe"]["version"])
else:
for result in results:
print(maybe_x(result) + DELIMITER(" ") + result["id"] + DELIMITER(" ") + result['describe'].get('title', result['describe']['name']) + DELIMITER(' (') + result["describe"]["name"] + DELIMITER('), v') + result['describe']['version'] + DELIMITER(" (") + ("published" if result["describe"].get("published", 0) > 0 else "unpublished") + DELIMITER(")"))
def _format_find_org_members_results(results):
for result in results:
print(result["id"] + DELIMITER(" : ") + result['describe']['first'] + DELIMITER(' ') +
result['describe']['last'] + DELIMITER(' ') + DELIMITER(' (') + result["level"] +
DELIMITER(')'))
def format_find_results(args, results):
"""
Formats the output of ``dx find ...`` commands for `--json` and `--brief` arguments; also formats if no formatting
arguments are given.
Currently used for ``dx find projects``, ``dx find org_projects``, ``dx find org_apps``,
and ``dx find org_members``
"""
if args.json:
print(json.dumps(list(results), indent=4))
elif args.brief:
for result in results:
print(result['id'])
else:
if args.func.__name__ in ("find_projects", "org_find_projects"):
_format_find_projects_results(results)
elif args.func.__name__ in ("org_find_members"):
_format_find_org_members_results(results)
elif args.func.__name__ in ("org_find_apps"): # should have "find_apps" here one day
_format_find_apps_results(results, verbose=args.verbose)
| [] | [] | [
"PAGER"
] | [] | ["PAGER"] | python | 1 | 0 | |
contrib/gitian-build.py | #!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
def setup():
global args, workdir
programs = ['ruby', 'git', 'make', 'wget', 'curl']
if args.kvm:
programs += ['apt-cacher-ng', 'python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker and not os.path.isfile('/lib/systemd/system/docker.service'):
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
break
if return_code != 0:
print('Cannot find any way to install Docker.', file=sys.stderr)
sys.exit(1)
else:
programs += ['apt-cacher-ng', 'lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/qtumproject/gitian.sigs.git'])
if not os.path.isdir('qtum-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/qtumproject/qtum-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('qtum'):
subprocess.check_call(['git', 'clone', 'https://github.com/qtumproject/qtum.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
subprocess.check_call(make_image_prog)
os.chdir(workdir)
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
sys.exit(0)
def build():
global args, workdir
os.makedirs('qtum-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz'])
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch'])
subprocess.check_call(["echo 'a8c4e9cafba922f89de0df1f2152e7be286aba73f78505169bc351a7938dd911 inputs/osslsigncode-Backports-to-1.7.1.patch' | sha256sum -c"], shell=True)
subprocess.check_call(["echo 'f9a8cdb38b9c309326764ebc937cba1523a3a751a7ab05df3ecc99d18ae466c9 inputs/osslsigncode-1.7.1.tar.gz' | sha256sum -c"], shell=True)
subprocess.check_call(['make', '-C', '../qtum/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'qtum='+args.commit+',cpp-eth-qtum=develop', '--url', 'qtum='+args.url, '../qtum/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../qtum/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/qtum-*.tar.gz build/out/src/qtum-*.tar.gz ../qtum-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'qtum='+args.commit+',cpp-eth-qtum=develop', '--url', 'qtum='+args.url, '../qtum/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../qtum/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/qtum-*-win-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/qtum-*.zip build/out/qtum-*.exe build/out/src/qtum-*.tar.gz ../qtum-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'qtum='+args.commit+',cpp-eth-qtum=develop', '--url', 'qtum='+args.url, '../qtum/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../qtum/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/qtum-*-osx-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/qtum-*.tar.gz build/out/qtum-*.dmg build/out/src/qtum-*.tar.gz ../qtum-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
if args.windows:
print('\nSigning ' + args.version + ' Windows')
subprocess.check_call('cp inputs/qtum-' + args.version + '-win-unsigned.tar.gz inputs/qtum-win-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../qtum/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../qtum/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call('mv build/out/qtum-*win64-setup.exe ../qtum-binaries/'+args.version, shell=True)
if args.macos:
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call('cp inputs/qtum-' + args.version + '-osx-unsigned.tar.gz inputs/qtum-osx-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../qtum/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../qtum/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/qtum-osx-signed.dmg ../qtum-binaries/'+args.version+'/qtum-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Signed Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
os.chdir(workdir)
def verify():
global args, workdir
rc = 0
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../qtum/contrib/gitian-descriptors/gitian-linux.yml']):
print('Verifying v'+args.version+' Linux FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Windows\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../qtum/contrib/gitian-descriptors/gitian-win.yml']):
print('Verifying v'+args.version+' Windows FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../qtum/contrib/gitian-descriptors/gitian-osx.yml']):
print('Verifying v'+args.version+' MacOS FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Signed Windows\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../qtum/contrib/gitian-descriptors/gitian-win-signer.yml']):
print('Verifying v'+args.version+' Signed Windows FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Signed MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../qtum/contrib/gitian-descriptors/gitian-osx-signer.yml']):
print('Verifying v'+args.version+' Signed MacOS FAILED\n')
rc = 1
os.chdir(workdir)
return rc
def main():
global args, workdir
parser = argparse.ArgumentParser(description='Script for running full Gitian builds.')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/qtumproject/qtum', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', nargs='?', help='GPG signer to sign each build assert file')
parser.add_argument('version', nargs='?', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
# Ensure no more than one environment variable for gitian-builder (USE_LXC, USE_VBOX, USE_DOCKER) is set as they
# can interfere (e.g., USE_LXC being set shadows USE_DOCKER; for details see gitian-builder/libexec/make-clean-vm).
os.environ['USE_LXC'] = ''
os.environ['USE_VBOX'] = ''
os.environ['USE_DOCKER'] = ''
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if 'GITIAN_HOST_IP' not in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if 'LXC_GUEST_IP' not in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
if args.setup:
setup()
if args.buildsign:
args.build = True
args.sign = True
if not args.build and not args.sign and not args.verify:
sys.exit(0)
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
script_name = os.path.basename(sys.argv[0])
if not args.signer:
print(script_name+': Missing signer')
print('Try '+script_name+' --help for more information')
sys.exit(1)
if not args.version:
print(script_name+': Missing version')
print('Try '+script_name+' --help for more information')
sys.exit(1)
# Add leading 'v' for tags
if args.commit and args.pull:
raise Exception('Cannot have both commit and pull')
args.commit = ('' if args.commit else 'v') + args.version
os.chdir('qtum')
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
os.chdir('../gitian-builder/inputs/qtum')
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
args.version = 'pull-' + args.version
print(args.commit)
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
os.chdir('gitian-builder')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
sys.exit(verify())
if __name__ == '__main__':
main()
| [] | [] | [
"USE_DOCKER",
"USE_LXC",
"USE_VBOX",
"GITIAN_HOST_IP",
"LXC_GUEST_IP"
] | [] | ["USE_DOCKER", "USE_LXC", "USE_VBOX", "GITIAN_HOST_IP", "LXC_GUEST_IP"] | python | 5 | 0 | |
parsons/google/google_sheets.py | import os
import json
import logging
from parsons.etl.table import Table
from parsons.google.utitities import setup_google_application_credentials
import gspread
from google.oauth2.service_account import Credentials
logger = logging.getLogger(__name__)
class GoogleSheets:
"""
A connector for Google Sheets, handling data import and export.
`Args:`
google_keyfile_dict: dict
A dictionary of Google Drive API credentials, parsed from JSON provided
by the Google Developer Console. Required if env variable
``GOOGLE_DRIVE_CREDENTIALS`` is not populated.
subject: string
In order to use account impersonation, pass in the email address of the account to be
impersonated as a string.
"""
def __init__(self, google_keyfile_dict=None, subject=None):
scope = [
'https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive',
]
setup_google_application_credentials(google_keyfile_dict, 'GOOGLE_DRIVE_CREDENTIALS')
google_credential_file = open(os.environ['GOOGLE_DRIVE_CREDENTIALS'])
credentials_dict = json.load(google_credential_file)
credentials = Credentials.from_service_account_info(
credentials_dict, scopes=scope, subject=subject
)
self.gspread_client = gspread.authorize(credentials)
def _get_worksheet(self, spreadsheet_id, worksheet=0):
# Internal method to retrieve a worksheet object.
# Check if the worksheet is an integer, if so find the sheet by index
if isinstance(worksheet, int):
return self.gspread_client.open_by_key(spreadsheet_id).get_worksheet(worksheet)
elif isinstance(worksheet, str):
idx = self.list_worksheets(spreadsheet_id).index(worksheet)
try:
return self.gspread_client.open_by_key(spreadsheet_id).get_worksheet(idx)
except: # noqa: E722
raise ValueError(f"Couldn't find worksheet {worksheet}")
else:
raise ValueError(f"Couldn't find worksheet index or title {worksheet}")
def list_worksheets(self, spreadsheet_id):
"""
Return a list of worksheets in the spreadsheet.
`Args:`
spreadsheet_id: str
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
`Returns:`
list
A List of worksheets order by their index
"""
worksheets = self.gspread_client.open_by_key(spreadsheet_id).worksheets()
return [w.title for w in worksheets]
def get_worksheet_index(self, spreadsheet_id, title):
"""
Get the first sheet in a Google spreadsheet with the given title. The
title is case sensitive and the index begins with 0.
`Args:`
spreadsheet_id: str
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
title: str
The sheet title
`Returns:`
str
The sheet index
"""
sheets = self.gspread_client.open_by_key(spreadsheet_id).worksheets()
for index, sheet in enumerate(sheets):
if sheet.title == title:
return index
raise ValueError(f"Couldn't find sheet with title {title}")
def get_worksheet(self, spreadsheet_id, worksheet=0):
"""
Create a ``parsons table`` from a sheet in a Google spreadsheet, given the sheet index.
`Args:`
spreadsheet_id: str
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
worksheet: str or int
The index or the title of the worksheet. The index begins with
0.
`Returns:`
Parsons Table
See :ref:`parsons-table` for output options.
"""
worksheet = self._get_worksheet(spreadsheet_id, worksheet)
tbl = Table(worksheet.get_all_values())
logger.info(f'Retrieved worksheet with {tbl.num_rows} rows.')
return tbl
def share_spreadsheet(self, spreadsheet_id, sharee, share_type='user', role='reader',
notify=True, notify_message=None, with_link=False):
"""
Share a spreadsheet with a user, group of users, domain and/or the public.
`Args:`
spreadsheet_id: str
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
sharee: str
User or group e-mail address, domain name to share the spreadsheet
with. To share publicly, set sharee value to ``None``.
share_type: str
The sharee type. Allowed values are: ``user``, ``group``, ``domain``,
``anyone``.
role: str
The primary role for this user. Allowed values are: ``owner``,
``writer``, ``reader``.
notify: boolean
Whether to send an email to the target user/domain.
email_message: str
The email to be sent if notify kwarg set to True.
with_link: boolean
Whether a link is required for this permission.
"""
spreadsheet = self.gspread_client.open_by_key(spreadsheet_id)
spreadsheet.share(sharee, share_type, role, notify=notify,
email_message=notify_message, with_link=with_link)
logger.info(f'Shared spreadsheet {spreadsheet_id}.')
def get_spreadsheet_permissions(self, spreadsheet_id):
"""
List the permissioned users and groups for a spreadsheet.
`Args:`
spreadsheet_id: str
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
`Returns:`
Parsons Table
See :ref:`parsons-table` for output options.
"""
spreadsheet = self.gspread_client.open_by_key(spreadsheet_id)
tbl = Table(spreadsheet.list_permissions())
logger.info(f'Retrieved permissions for {spreadsheet_id} spreadsheet.')
return tbl
def create_spreadsheet(self, title, editor_email=None, folder_id=None):
"""
Create a Google spreadsheet from a Parsons table. Optionally shares the new doc with
the given email address. Optionally creates the sheet in a specified folder.
`Args:`
title: str
The human-readable title of the new spreadsheet
editor_email: str (optional)
Email address which should be given permissions on this spreadsheet
folder_id: str (optional)
ID of the Google folder where the spreadsheet should be created.
Tip: Get this from the folder URL.
Anyone shared on the folder will have access to the spreadsheet.
`Returns:`
str
The spreadsheet ID
"""
spreadsheet = self.gspread_client.create(title, folder_id=folder_id)
if editor_email:
self.gspread_client.insert_permission(
spreadsheet.id,
editor_email,
perm_type='user',
role='writer',
)
logger.info(f'Created spreadsheet {spreadsheet.id}')
return spreadsheet.id
def delete_spreadsheet(self, spreadsheet_id):
"""
Deletes a Google spreadsheet.
`Args:`
spreadsheet_id: str
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
"""
self.gspread_client.del_spreadsheet(spreadsheet_id)
logger.info(f'Deleted spreadsheet {spreadsheet_id}')
def add_sheet(self, spreadsheet_id, title=None, rows=100, cols=25):
"""
Adds a sheet to a Google spreadsheet.
`Args:`
spreadsheet_id: str
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
rows: int
Number of rows
cols
Number of cols
`Returns:`
str
The sheet index
"""
spreadsheet = self.gspread_client.open_by_key(spreadsheet_id)
spreadsheet.add_worksheet(title, rows, cols)
sheet_count = len(spreadsheet.worksheets())
logger.info('Created worksheet.')
return (sheet_count-1)
def append_to_sheet(self, spreadsheet_id, table, worksheet=0, user_entered_value=False,
**kwargs):
"""
Append data from a Parsons table to a Google sheet. Note that the table's columns are
ignored, as we'll be keeping whatever header row already exists in the Google sheet.
`Args:`
spreadsheet_id: str
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
table: obj
Parsons table
worksheet: str or int
The index or the title of the worksheet. The index begins with
0.
user_entered_value: bool (optional)
If True, will submit cell values as entered (required for entering formulas).
Otherwise, values will be entered as strings or numbers only.
"""
# This is in here to ensure backwards compatibility with previous versions of Parsons.
if 'sheet_index' in kwargs:
worksheet = kwargs['sheet_index']
logger.warning('Argument deprecated. Use worksheet instead.')
sheet = self._get_worksheet(spreadsheet_id, worksheet)
# Grab the existing data, so we can figure out where to start adding new data as a batch.
# TODO Figure out a way to do a batch append without having to read the whole sheet first.
# Maybe use gspread's low-level batch_update().
existing_table = self.get_worksheet(spreadsheet_id, worksheet)
# If the existing sheet is blank, then just overwrite the table.
if existing_table.num_rows == 0:
return self.overwrite_sheet(spreadsheet_id, table, worksheet, user_entered_value)
cells = []
for row_num, row in enumerate(table.data):
for col_num, cell in enumerate(row):
# Add 2 to allow for the header row, and for google sheets indexing starting at 1
sheet_row_num = existing_table.num_rows + row_num + 2
cells.append(gspread.Cell(sheet_row_num, col_num + 1, row[col_num]))
value_input_option = 'RAW'
if user_entered_value:
value_input_option = 'USER_ENTERED'
# Update the data in one batch
sheet.update_cells(cells, value_input_option=value_input_option)
logger.info(f'Appended {table.num_rows} rows to worksheet.')
def overwrite_sheet(self, spreadsheet_id, table, worksheet=0, user_entered_value=False,
**kwargs):
"""
Replace the data in a Google sheet with a Parsons table, using the table's columns as the
first row.
`Args:`
spreadsheet_id: str
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
table: obj
Parsons table
worksheet: str or int
The index or the title of the worksheet. The index begins with
0.
user_entered_value: bool (optional)
If True, will submit cell values as entered (required for entering formulas).
Otherwise, values will be entered as strings or numbers only.
"""
# This is in here to ensure backwards compatibility with previous versions of Parsons.
if 'sheet_index' in kwargs:
worksheet = kwargs['sheet_index']
logger.warning('Argument deprecated. Use worksheet instead.')
sheet = self._get_worksheet(spreadsheet_id, worksheet)
sheet.clear()
value_input_option = 'RAW'
if user_entered_value:
value_input_option = 'USER_ENTERED'
# Add header row
sheet.append_row(table.columns, value_input_option=value_input_option)
cells = []
for row_num, row in enumerate(table.data):
for col_num, cell in enumerate(row):
# We start at row #2 to keep room for the header row we added above
cells.append(gspread.Cell(row_num + 2, col_num + 1, row[col_num]))
# Update the data in one batch
sheet.update_cells(cells, value_input_option=value_input_option)
logger.info('Overwrote worksheet.')
def format_cells(self, spreadsheet_id, range, cell_format, worksheet=0):
"""
Format the cells of a worksheet.
`Args:`
spreadsheet_id: str
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
range: str
The cell range to format. E.g. ``"A2"`` or ``"A2:B100"``
cell_format: dict
The formatting to apply to the range. Full options are specified in
the GoogleSheets `API documentation <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/cells#cellformat>`_.
worksheet: str or int
The index or the title of the worksheet. The index begins with
0.
**Examples**
.. code-block:: python
# Set 'A4' cell's text format to bold
gs.format_cells(sheet_id, "A4", {"textFormat": {"bold": True}}, worksheet=0)
# Color the background of 'A2:B2' cell range yellow,
# change horizontal alignment, text color and font size
gs.format_cells.format(sheet_id, "A2:B2", {
"backgroundColor": {
"red": 0.0,
"green": 0.0,
"blue": 0.0
},
"horizontalAlignment": "CENTER",
"textFormat": {
"foregroundColor": {
"red": 1.0,
"green": 1.0,
"blue": 0.0
},
"fontSize": 12,
"bold": True
}
}, worksheet=0)
""" # noqa: E501,E261
ws = self._get_worksheet(spreadsheet_id, worksheet)
ws.format(range, cell_format)
logger.info('Formatted worksheet')
def read_sheet(self, spreadsheet_id, sheet_index=0):
# Deprecated method v0.14 of Parsons.
logger.warning('Deprecated method. Use get_worksheet() instead.')
return self.get_worksheet(spreadsheet_id, sheet_index)
def read_sheet_with_title(self, spreadsheet_id, title):
# Deprecated method v0.14 of Parsons.
logger.warning('Deprecated method. Use get_worksheet() instead.')
return self.get_worksheet(spreadsheet_id, title)
def get_sheet_index_with_title(self, spreadsheet_id, title):
# Deprecated method v0.14 of Parsons.
logger.warning('Deprecated method. Use get_worksheet_index instead.')
return self.get_worksheet_index(spreadsheet_id, title)
| [] | [] | [
"GOOGLE_DRIVE_CREDENTIALS"
] | [] | ["GOOGLE_DRIVE_CREDENTIALS"] | python | 1 | 0 | |
blockstore/splitstore/splitstore.go | package splitstore
import (
"context"
"errors"
"os"
"sync"
"sync/atomic"
"time"
"go.uber.org/multierr"
"golang.org/x/xerrors"
blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
dstore "github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log/v2"
"github.com/filecoin-project/go-state-types/abi"
bstore "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/metrics"
"go.opencensus.io/stats"
)
var (
// baseEpochKey stores the base epoch (last compaction epoch) in the
// metadata store.
baseEpochKey = dstore.NewKey("/splitstore/baseEpoch")
// warmupEpochKey stores whether a hot store warmup has been performed.
// On first start, the splitstore will walk the state tree and will copy
// all active blocks into the hotstore.
warmupEpochKey = dstore.NewKey("/splitstore/warmupEpoch")
// markSetSizeKey stores the current estimate for the mark set size.
// this is first computed at warmup and updated in every compaction
markSetSizeKey = dstore.NewKey("/splitstore/markSetSize")
// compactionIndexKey stores the compaction index (serial number)
compactionIndexKey = dstore.NewKey("/splitstore/compactionIndex")
log = logging.Logger("splitstore")
// set this to true if you are debugging the splitstore to enable debug logging
enableDebugLog = false
// set this to true if you want to track origin stack traces in the write log
enableDebugLogWriteTraces = false
)
func init() {
if os.Getenv("LOTUS_SPLITSTORE_DEBUG_LOG") == "1" {
enableDebugLog = true
}
if os.Getenv("LOTUS_SPLITSTORE_DEBUG_LOG_WRITE_TRACES") == "1" {
enableDebugLogWriteTraces = true
}
}
type Config struct {
// MarkSetType is the type of mark set to use.
//
// Only current sane value is "map", but we may add an option for a disk-backed
// markset for memory-constrained situations.
MarkSetType string
// DiscardColdBlocks indicates whether to skip moving cold blocks to the coldstore.
// If the splitstore is running with a noop coldstore then this option is set to true
// which skips moving (as it is a noop, but still takes time to read all the cold objects)
// and directly purges cold blocks.
DiscardColdBlocks bool
}
// ChainAccessor allows the Splitstore to access the chain. It will most likely
// be a ChainStore at runtime.
type ChainAccessor interface {
GetTipsetByHeight(context.Context, abi.ChainEpoch, *types.TipSet, bool) (*types.TipSet, error)
GetHeaviestTipSet() *types.TipSet
SubscribeHeadChanges(change func(revert []*types.TipSet, apply []*types.TipSet) error)
}
// hotstore is the interface that must be satisfied by the hot blockstore; it is an extension
// of the Blockstore interface with the traits we need for compaction.
type hotstore interface {
bstore.Blockstore
bstore.BlockstoreIterator
}
type SplitStore struct {
compacting int32 // compaction/prune/warmup in progress
closing int32 // the splitstore is closing
cfg *Config
mx sync.Mutex
warmupEpoch abi.ChainEpoch // protected by mx
baseEpoch abi.ChainEpoch // protected by compaction lock
headChangeMx sync.Mutex
coldPurgeSize int
chain ChainAccessor
ds dstore.Datastore
cold bstore.Blockstore
hot hotstore
markSetEnv MarkSetEnv
markSetSize int64
compactionIndex int64
ctx context.Context
cancel func()
debug *debugLog
// transactional protection for concurrent read/writes during compaction
txnLk sync.RWMutex
txnViewsMx sync.Mutex
txnViewsCond sync.Cond
txnViews int
txnViewsWaiting bool
txnActive bool
txnProtect MarkSet
txnRefsMx sync.Mutex
txnRefs map[cid.Cid]struct{}
txnMissing map[cid.Cid]struct{}
}
var _ bstore.Blockstore = (*SplitStore)(nil)
// Open opens an existing splistore, or creates a new splitstore. The splitstore
// is backed by the provided hot and cold stores. The returned SplitStore MUST be
// attached to the ChainStore with Start in order to trigger compaction.
func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Config) (*SplitStore, error) {
// hot blockstore must support the hotstore interface
hots, ok := hot.(hotstore)
if !ok {
// be specific about what is missing
if _, ok := hot.(bstore.BlockstoreIterator); !ok {
return nil, xerrors.Errorf("hot blockstore does not support efficient iteration: %T", hot)
}
return nil, xerrors.Errorf("hot blockstore does not support the necessary traits: %T", hot)
}
// the markset env
markSetEnv, err := OpenMarkSetEnv(path, cfg.MarkSetType)
if err != nil {
return nil, err
}
// and now we can make a SplitStore
ss := &SplitStore{
cfg: cfg,
ds: ds,
cold: cold,
hot: hots,
markSetEnv: markSetEnv,
coldPurgeSize: defaultColdPurgeSize,
}
ss.txnViewsCond.L = &ss.txnViewsMx
ss.ctx, ss.cancel = context.WithCancel(context.Background())
if enableDebugLog {
ss.debug, err = openDebugLog(path)
if err != nil {
return nil, err
}
}
return ss, nil
}
// Blockstore interface
func (s *SplitStore) DeleteBlock(_ cid.Cid) error {
// afaict we don't seem to be using this method, so it's not implemented
return errors.New("DeleteBlock not implemented on SplitStore; don't do this Luke!") //nolint
}
func (s *SplitStore) DeleteMany(_ []cid.Cid) error {
// afaict we don't seem to be using this method, so it's not implemented
return errors.New("DeleteMany not implemented on SplitStore; don't do this Luke!") //nolint
}
func (s *SplitStore) Has(cid cid.Cid) (bool, error) {
if isIdentiyCid(cid) {
return true, nil
}
s.txnLk.RLock()
defer s.txnLk.RUnlock()
has, err := s.hot.Has(cid)
if err != nil {
return has, err
}
if has {
s.trackTxnRef(cid)
return true, nil
}
return s.cold.Has(cid)
}
func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) {
if isIdentiyCid(cid) {
data, err := decodeIdentityCid(cid)
if err != nil {
return nil, err
}
return blocks.NewBlockWithCid(data, cid)
}
s.txnLk.RLock()
defer s.txnLk.RUnlock()
blk, err := s.hot.Get(cid)
switch err {
case nil:
s.trackTxnRef(cid)
return blk, nil
case bstore.ErrNotFound:
if s.isWarm() {
s.debug.LogReadMiss(cid)
}
blk, err = s.cold.Get(cid)
if err == nil {
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
}
return blk, err
default:
return nil, err
}
}
func (s *SplitStore) GetSize(cid cid.Cid) (int, error) {
if isIdentiyCid(cid) {
data, err := decodeIdentityCid(cid)
if err != nil {
return 0, err
}
return len(data), nil
}
s.txnLk.RLock()
defer s.txnLk.RUnlock()
size, err := s.hot.GetSize(cid)
switch err {
case nil:
s.trackTxnRef(cid)
return size, nil
case bstore.ErrNotFound:
if s.isWarm() {
s.debug.LogReadMiss(cid)
}
size, err = s.cold.GetSize(cid)
if err == nil {
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
}
return size, err
default:
return 0, err
}
}
func (s *SplitStore) Put(blk blocks.Block) error {
if isIdentiyCid(blk.Cid()) {
return nil
}
s.txnLk.RLock()
defer s.txnLk.RUnlock()
err := s.hot.Put(blk)
if err != nil {
return err
}
s.debug.LogWrite(blk)
s.trackTxnRef(blk.Cid())
return nil
}
func (s *SplitStore) PutMany(blks []blocks.Block) error {
// filter identites
idcids := 0
for _, blk := range blks {
if isIdentiyCid(blk.Cid()) {
idcids++
}
}
if idcids > 0 {
if idcids == len(blks) {
// it's all identities
return nil
}
filtered := make([]blocks.Block, 0, len(blks)-idcids)
for _, blk := range blks {
if isIdentiyCid(blk.Cid()) {
continue
}
filtered = append(filtered, blk)
}
blks = filtered
}
batch := make([]cid.Cid, 0, len(blks))
for _, blk := range blks {
batch = append(batch, blk.Cid())
}
s.txnLk.RLock()
defer s.txnLk.RUnlock()
err := s.hot.PutMany(blks)
if err != nil {
return err
}
s.debug.LogWriteMany(blks)
s.trackTxnRefMany(batch)
return nil
}
func (s *SplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
ctx, cancel := context.WithCancel(ctx)
chHot, err := s.hot.AllKeysChan(ctx)
if err != nil {
cancel()
return nil, err
}
chCold, err := s.cold.AllKeysChan(ctx)
if err != nil {
cancel()
return nil, err
}
seen := cid.NewSet()
ch := make(chan cid.Cid, 8) // buffer is arbitrary, just enough to avoid context switches
go func() {
defer cancel()
defer close(ch)
for _, in := range []<-chan cid.Cid{chHot, chCold} {
for c := range in {
// ensure we only emit each key once
if !seen.Visit(c) {
continue
}
select {
case ch <- c:
case <-ctx.Done():
return
}
}
}
}()
return ch, nil
}
func (s *SplitStore) HashOnRead(enabled bool) {
s.hot.HashOnRead(enabled)
s.cold.HashOnRead(enabled)
}
func (s *SplitStore) View(cid cid.Cid, cb func([]byte) error) error {
if isIdentiyCid(cid) {
data, err := decodeIdentityCid(cid)
if err != nil {
return err
}
return cb(data)
}
// views are (optimistically) protected two-fold:
// - if there is an active transaction, then the reference is protected.
// - if there is no active transaction, active views are tracked in a
// wait group and compaction is inhibited from starting until they
// have all completed. this is necessary to ensure that a (very) long-running
// view can't have its data pointer deleted, which would be catastrophic.
// Note that we can't just RLock for the duration of the view, as this could
// lead to deadlock with recursive views.
s.protectView(cid)
defer s.viewDone()
err := s.hot.View(cid, cb)
switch err {
case bstore.ErrNotFound:
if s.isWarm() {
s.debug.LogReadMiss(cid)
}
err = s.cold.View(cid, cb)
if err == nil {
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
}
return err
default:
return err
}
}
func (s *SplitStore) isWarm() bool {
s.mx.Lock()
defer s.mx.Unlock()
return s.warmupEpoch > 0
}
// State tracking
func (s *SplitStore) Start(chain ChainAccessor) error {
s.chain = chain
curTs := chain.GetHeaviestTipSet()
// should we warmup
warmup := false
// load base epoch from metadata ds
// if none, then use current epoch because it's a fresh start
bs, err := s.ds.Get(baseEpochKey)
switch err {
case nil:
s.baseEpoch = bytesToEpoch(bs)
case dstore.ErrNotFound:
if curTs == nil {
// this can happen in some tests
break
}
err = s.setBaseEpoch(curTs.Height())
if err != nil {
return xerrors.Errorf("error saving base epoch: %w", err)
}
default:
return xerrors.Errorf("error loading base epoch: %w", err)
}
// load warmup epoch from metadata ds
bs, err = s.ds.Get(warmupEpochKey)
switch err {
case nil:
s.warmupEpoch = bytesToEpoch(bs)
case dstore.ErrNotFound:
warmup = true
default:
return xerrors.Errorf("error loading warmup epoch: %w", err)
}
// load markSetSize from metadata ds to provide a size hint for marksets
bs, err = s.ds.Get(markSetSizeKey)
switch err {
case nil:
s.markSetSize = bytesToInt64(bs)
case dstore.ErrNotFound:
default:
return xerrors.Errorf("error loading mark set size: %w", err)
}
// load compactionIndex from metadata ds to provide a hint as to when to perform moving gc
bs, err = s.ds.Get(compactionIndexKey)
switch err {
case nil:
s.compactionIndex = bytesToInt64(bs)
case dstore.ErrNotFound:
// this is potentially an upgrade from splitstore v0; schedule a warmup as v0 has
// some issues with hot references leaking into the coldstore.
warmup = true
default:
return xerrors.Errorf("error loading compaction index: %w", err)
}
log.Infow("starting splitstore", "baseEpoch", s.baseEpoch, "warmupEpoch", s.warmupEpoch)
if warmup {
err = s.warmup(curTs)
if err != nil {
return xerrors.Errorf("error starting warmup: %w", err)
}
}
// watch the chain
chain.SubscribeHeadChanges(s.HeadChange)
return nil
}
func (s *SplitStore) Close() error {
if !atomic.CompareAndSwapInt32(&s.closing, 0, 1) {
// already closing
return nil
}
if atomic.LoadInt32(&s.compacting) == 1 {
log.Warn("close with ongoing compaction in progress; waiting for it to finish...")
for atomic.LoadInt32(&s.compacting) == 1 {
time.Sleep(time.Second)
}
}
s.cancel()
return multierr.Combine(s.markSetEnv.Close(), s.debug.Close())
}
func (s *SplitStore) checkClosing() error {
if atomic.LoadInt32(&s.closing) == 1 {
return xerrors.Errorf("splitstore is closing")
}
return nil
}
func (s *SplitStore) setBaseEpoch(epoch abi.ChainEpoch) error {
s.baseEpoch = epoch
return s.ds.Put(baseEpochKey, epochToBytes(epoch))
}
| [
"\"LOTUS_SPLITSTORE_DEBUG_LOG\"",
"\"LOTUS_SPLITSTORE_DEBUG_LOG_WRITE_TRACES\""
] | [] | [
"LOTUS_SPLITSTORE_DEBUG_LOG_WRITE_TRACES",
"LOTUS_SPLITSTORE_DEBUG_LOG"
] | [] | ["LOTUS_SPLITSTORE_DEBUG_LOG_WRITE_TRACES", "LOTUS_SPLITSTORE_DEBUG_LOG"] | go | 2 | 0 | |
hwi/src/java/org/apache/hadoop/hive/hwi/HWIServer.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.hwi;
import java.io.File;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.shims.JettyShims;
import org.apache.hadoop.hive.shims.ShimLoader;
/**
* This is the entry point for HWI. A web server is invoked in the same manner
* as the hive CLI. Rather then opening a command line session a web server is
* started and a web application to work with hive is started.
*/
public class HWIServer {
protected static final Log l4j = LogFactory.getLog(HWIServer.class.getName());
private JettyShims.Server webServer;
private final String[] args;
/**
*
* @param args
* These are the command line arguments. Usually -hiveconf.
* @throws java.io.IOException
*/
public HWIServer(String[] args) throws IOException {
this.args = args;
}
/**
* This method initialized the internal Jetty Servlet Engine. It adds the hwi
* context path.
*
* @throws java.io.IOException
* Port already in use, bad bind etc.
*/
public void start() throws IOException {
HiveConf conf = new HiveConf(this.getClass());
String listen = null;
int port = -1;
listen = conf.getVar(HiveConf.ConfVars.HIVEHWILISTENHOST);
port = conf.getIntVar(HiveConf.ConfVars.HIVEHWILISTENPORT);
if (listen.equals("")) {
l4j.warn("hive.hwi.listen.host was not specified defaulting to 0.0.0.0");
listen = "0.0.0.0";
}
if (port == -1) {
l4j.warn("hive.hwi.listen.port was not specified defaulting to 9999");
port = 9999;
}
String hwiWAR = conf.getVar(HiveConf.ConfVars.HIVEHWIWARFILE);
String hivehome = System.getenv().get("HIVE_HOME");
File hwiWARFile = new File(hivehome, hwiWAR);
if (!hwiWARFile.exists()) {
l4j.fatal("HWI WAR file not found at " + hwiWAR);
System.exit(1);
}
webServer = ShimLoader.getJettyShims().startServer(listen, port);
webServer.addWar(hwiWARFile.toString(), "/hwi");
/*
* The command line args may be used by multiple components. Rather by
* setting these as a system property we avoid having to specifically pass
* them
*/
StringBuilder sb = new StringBuilder();
for (String arg : args) {
sb.append(arg + " ");
}
System.setProperty("hwi-args", sb.toString());
try {
while (true) {
try {
webServer.start();
webServer.join();
l4j.debug(" HWI Web Server is started.");
break;
} catch (org.mortbay.util.MultiException ex) {
throw ex;
}
}
} catch (IOException ie) {
throw ie;
} catch (Exception e) {
IOException ie = new IOException("Problem starting HWI server");
ie.initCause(e);
l4j.error("Parsing hwi.listen.port caused exception ", e);
throw ie;
}
}
/**
*
* @param args
* as of now no arguments are supported
* @throws java.lang.Exception
* Could be thrown if due to issues with Jetty or bad configuration
* options
*
*/
public static void main(String[] args) throws Exception {
HWIServer hwi = new HWIServer(args);
l4j.info("HWI is starting up");
hwi.start();
}
/**
* Shut down the running HWI Server.
*
* @throws Exception
* Running Thread.stop() can and probably will throw this
*/
public void stop() throws Exception {
l4j.info("HWI is shutting down");
webServer.stop();
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
app.py | import importlib
import inspect
import os
import flask
import mercadopago
class App(flask.Flask):
def __init__(self):
super().__init__(__name__)
self.mp = mercadopago.SDK(os.environ["MERCADO_PAGO_ACCESS_TOKEN"])
self._load_blueprints()
def _load_blueprints(self):
for root, _, filenames in os.walk("blueprints"):
for fn in filenames:
if not fn.endswith(".py"):
continue
module = importlib.import_module(root.replace(os.sep, ".") + "." + fn[:-3])
blueprint = inspect.getmembers(module, lambda o: isinstance(o, flask.Blueprint))[0][1]
self.register_blueprint(blueprint)
app = App()
| [] | [] | [
"MERCADO_PAGO_ACCESS_TOKEN"
] | [] | ["MERCADO_PAGO_ACCESS_TOKEN"] | python | 1 | 0 | |
linkconfig_example.py | # LinkBot Configuration
import os
from ast import literal_eval
API_TOKEN = os.environ.get('API_TOKEN')
LOG_FILE = os.environ.get('LOG_FILE', 'linkbot.log')
JIRA_HOST = os.environ.get('JIRA_HOST')
UW_SAML_CREDENTIALS = os.environ.get('UW_SAML_CREDENTIALS')
SERVICE_NOW_HOST = os.environ.get('SERVICE_NOW_HOST')
SERVICE_NOW_CREDENTIALS = os.environ.get('SERVICE_NOW_CREDENTIALS')
LINKBOTS = []
if SERVICE_NOW_HOST and SERVICE_NOW_CREDENTIALS:
LINKBOTS.append({
'LINK_CLASS': 'ServiceNowBot',
'HOST': SERVICE_NOW_HOST,
'AUTH': literal_eval(SERVICE_NOW_CREDENTIALS)
})
if JIRA_HOST:
if UW_SAML_CREDENTIALS:
LINKBOTS.append({
'LINK_CLASS': 'JiraLinkBot',
'HOST': JIRA_HOST,
'AUTH': literal_eval(UW_SAML_CREDENTIALS)
})
else:
LINKBOTS += [
{
'MATCH': 'req[0-9]+',
'LINK': '<{}/u_simple_requests.do?sysparm_type=labels'
'&sysparm_table=u_simple_requests'
'&sysparm_query=number=%s|%s>'.format(JIRA_HOST)
},
{
'MATCH': 'inc[0-9]+',
'LINK': '<{}/incident.do?sys_id=%s|%s>'.format(JIRA_HOST)
},
{
'MATCH': '[Kk][Bb][0-9]+',
'LINK': '<{}/nav_to.do?uri=/kb_view.do?'
'sysparm_article=%s|%s>'.format(JIRA_HOST),
},
]
| [] | [] | [
"UW_SAML_CREDENTIALS",
"SERVICE_NOW_CREDENTIALS",
"API_TOKEN",
"LOG_FILE",
"JIRA_HOST",
"SERVICE_NOW_HOST"
] | [] | ["UW_SAML_CREDENTIALS", "SERVICE_NOW_CREDENTIALS", "API_TOKEN", "LOG_FILE", "JIRA_HOST", "SERVICE_NOW_HOST"] | python | 6 | 0 | |
support/exectest.py | #! python
## Copyright (c) 2018-2021, Carnegie Mellon University
## See LICENSE for details
import sys
import subprocess
import os
if len(sys.argv) < 4:
print ( 'Usage: ' + sys.argv[0] + ' gpu_flag gap_exe_name gap_fil1 ... gap_filN' )
sys.exit ( 'missing argument(s)' )
gpu_flag = sys.argv[1]
gap_exe = sys.argv[2]
gap_dir = os.path.dirname(gap_exe)
nscrpt = len(sys.argv) - 3
fils = sys.argv[3:3+nscrpt] ## all args after name and gap_exe
## print ( sys.argv[0] + ': gap_exe = ' + gap_exe + ' gpu-required-flag = ' + gpu_flag + ' and ' + str(nscrpt) + ' input files: ' )
## print ( fils )
## build a command string like this: cat gap_fil1 ... gap_filN | gap_exe
cmdstr = 'cat '
for val in fils:
cmdstr = cmdstr + val + ' '
cmdstr = cmdstr + '| ' + gap_exe
## print ( cmdstr )
spiral_path = os.getenv('SPIRAL_HOME', default=gap_dir)
if sys.platform == 'win32':
checkgpustr = spiral_path + '/gap/bin/checkforGpu.exe'
else:
checkgpustr = spiral_path + '/gap/bin/checkforGpu'
## print ( 'Test for GPU using ' + checkgpustr )
if (gpu_flag == 'True' or gpu_flag == 'true' or gpu_flag == 'TRUE'):
## GPU is required, test for one
## print ( 'Testing for GPU' )
result = subprocess.run ( checkgpustr ) ## , shell=True, check=True
res = result.returncode
if (res != 0):
print ( 'No suitable GPU found: Skipping test' )
sys.exit ( 0 ) ## exit normally so failed test is not indicated
## else:
## print ( 'A GPU was found -- run the test' )
result = subprocess.run ( cmdstr, shell=True, check=True )
res = result.returncode
if (res != 0):
print ( result )
sys.exit ( res )
sys.exit ( res )
| [] | [] | [
"SPIRAL_HOME"
] | [] | ["SPIRAL_HOME"] | python | 1 | 0 | |
vendor/cloud.google.com/go/datastore/datastore.go | // Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"errors"
"fmt"
"log"
"os"
"reflect"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
"google.golang.org/api/option"
gtransport "google.golang.org/api/transport/grpc"
pb "google.golang.org/genproto/googleapis/datastore/v1"
"google.golang.org/grpc"
)
const (
prodAddr = "datastore.googleapis.com:443"
userAgent = "gcloud-golang-datastore/20160401"
)
// ScopeDatastore grants permissions to view and/or manage datastore entities
const ScopeDatastore = "https://www.googleapis.com/auth/datastore"
// resourcePrefixHeader is the name of the metadata header used to indicate
// the resource being operated on.
const resourcePrefixHeader = "google-cloud-resource-prefix"
// Client is a client for reading and writing data in a datastore dataset.
type Client struct {
conn *grpc.ClientConn
client pb.DatastoreClient
endpoint string
dataset string // Called dataset by the datastore API, synonym for project ID.
}
// NewClient creates a new Client for a given dataset.
// If the project ID is empty, it is derived from the DATASTORE_PROJECT_ID environment variable.
// If the DATASTORE_EMULATOR_HOST environment variable is set, client will use its value
// to connect to a locally-running datastore emulator.
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
var o []option.ClientOption
// Environment variables for gcd emulator:
// https://cloud.google.com/datastore/docs/tools/datastore-emulator
// If the emulator is available, dial it directly (and don't pass any credentials).
if addr := os.Getenv("DATASTORE_EMULATOR_HOST"); addr != "" {
conn, err := grpc.Dial(addr, grpc.WithInsecure())
if err != nil {
return nil, fmt.Errorf("grpc.Dial: %v", err)
}
o = []option.ClientOption{option.WithGRPCConn(conn)}
} else {
o = []option.ClientOption{
option.WithEndpoint(prodAddr),
option.WithScopes(ScopeDatastore),
option.WithUserAgent(userAgent),
}
}
// Warn if we see the legacy emulator environment variables.
if os.Getenv("DATASTORE_HOST") != "" && os.Getenv("DATASTORE_EMULATOR_HOST") == "" {
log.Print("WARNING: legacy environment variable DATASTORE_HOST is ignored. Use DATASTORE_EMULATOR_HOST instead.")
}
if os.Getenv("DATASTORE_DATASET") != "" && os.Getenv("DATASTORE_PROJECT_ID") == "" {
log.Print("WARNING: legacy environment variable DATASTORE_DATASET is ignored. Use DATASTORE_PROJECT_ID instead.")
}
if projectID == "" {
projectID = os.Getenv("DATASTORE_PROJECT_ID")
}
if projectID == "" {
return nil, errors.New("datastore: missing project/dataset id")
}
o = append(o, opts...)
conn, err := gtransport.Dial(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
return &Client{
conn: conn,
client: newDatastoreClient(conn, projectID),
dataset: projectID,
}, nil
}
var (
// ErrInvalidEntityType is returned when functions like Get or Next are
// passed a dst or src argument of invalid type.
ErrInvalidEntityType = errors.New("datastore: invalid entity type")
// ErrInvalidKey is returned when an invalid key is presented.
ErrInvalidKey = errors.New("datastore: invalid key")
// ErrNoSuchEntity is returned when no entity was found for a given key.
ErrNoSuchEntity = errors.New("datastore: no such entity")
)
type multiArgType int
const (
multiArgTypeInvalid multiArgType = iota
multiArgTypePropertyLoadSaver
multiArgTypeStruct
multiArgTypeStructPtr
multiArgTypeInterface
)
// ErrFieldMismatch is returned when a field is to be loaded into a different
// type than the one it was stored from, or when a field is missing or
// unexported in the destination struct.
// StructType is the type of the struct pointed to by the destination argument
// passed to Get or to Iterator.Next.
type ErrFieldMismatch struct {
StructType reflect.Type
FieldName string
Reason string
}
func (e *ErrFieldMismatch) Error() string {
return fmt.Sprintf("datastore: cannot load field %q into a %q: %s",
e.FieldName, e.StructType, e.Reason)
}
// GeoPoint represents a location as latitude/longitude in degrees.
type GeoPoint struct {
Lat, Lng float64
}
// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
func (g GeoPoint) Valid() bool {
return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
}
func keyToProto(k *Key) *pb.Key {
if k == nil {
return nil
}
var path []*pb.Key_PathElement
for {
el := &pb.Key_PathElement{Kind: k.Kind}
if k.ID != 0 {
el.IdType = &pb.Key_PathElement_Id{Id: k.ID}
} else if k.Name != "" {
el.IdType = &pb.Key_PathElement_Name{Name: k.Name}
}
path = append(path, el)
if k.Parent == nil {
break
}
k = k.Parent
}
// The path should be in order [grandparent, parent, child]
// We did it backward above, so reverse back.
for i := 0; i < len(path)/2; i++ {
path[i], path[len(path)-i-1] = path[len(path)-i-1], path[i]
}
key := &pb.Key{Path: path}
if k.Namespace != "" {
key.PartitionId = &pb.PartitionId{
NamespaceId: k.Namespace,
}
}
return key
}
// protoToKey decodes a protocol buffer representation of a key into an
// equivalent *Key object. If the key is invalid, protoToKey will return the
// invalid key along with ErrInvalidKey.
func protoToKey(p *pb.Key) (*Key, error) {
var key *Key
var namespace string
if partition := p.PartitionId; partition != nil {
namespace = partition.NamespaceId
}
for _, el := range p.Path {
key = &Key{
Namespace: namespace,
Kind: el.Kind,
ID: el.GetId(),
Name: el.GetName(),
Parent: key,
}
}
if !key.valid() { // Also detects key == nil.
return key, ErrInvalidKey
}
return key, nil
}
// multiKeyToProto is a batch version of keyToProto.
func multiKeyToProto(keys []*Key) []*pb.Key {
ret := make([]*pb.Key, len(keys))
for i, k := range keys {
ret[i] = keyToProto(k)
}
return ret
}
// multiKeyToProto is a batch version of keyToProto.
func multiProtoToKey(keys []*pb.Key) ([]*Key, error) {
hasErr := false
ret := make([]*Key, len(keys))
err := make(MultiError, len(keys))
for i, k := range keys {
ret[i], err[i] = protoToKey(k)
if err[i] != nil {
hasErr = true
}
}
if hasErr {
return nil, err
}
return ret, nil
}
// multiValid is a batch version of Key.valid. It returns an error, not a
// []bool.
func multiValid(key []*Key) error {
invalid := false
for _, k := range key {
if !k.valid() {
invalid = true
break
}
}
if !invalid {
return nil
}
err := make(MultiError, len(key))
for i, k := range key {
if !k.valid() {
err[i] = ErrInvalidKey
}
}
return err
}
// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct
// type S, for some interface type I, or some non-interface non-pointer type P
// such that P or *P implements PropertyLoadSaver.
//
// It returns what category the slice's elements are, and the reflect.Type
// that represents S, I or P.
//
// As a special case, PropertyList is an invalid type for v.
//
// TODO(djd): multiArg is very confusing. Fold this logic into the
// relevant Put/Get methods to make the logic less opaque.
func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
if v.Kind() != reflect.Slice {
return multiArgTypeInvalid, nil
}
if v.Type() == typeOfPropertyList {
return multiArgTypeInvalid, nil
}
elemType = v.Type().Elem()
if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {
return multiArgTypePropertyLoadSaver, elemType
}
switch elemType.Kind() {
case reflect.Struct:
return multiArgTypeStruct, elemType
case reflect.Interface:
return multiArgTypeInterface, elemType
case reflect.Ptr:
elemType = elemType.Elem()
if elemType.Kind() == reflect.Struct {
return multiArgTypeStructPtr, elemType
}
}
return multiArgTypeInvalid, nil
}
// Close closes the Client.
func (c *Client) Close() error {
return c.conn.Close()
}
// Get loads the entity stored for key into dst, which must be a struct pointer
// or implement PropertyLoadSaver. If there is no such entity for the key, Get
// returns ErrNoSuchEntity.
//
// The values of dst's unmatched struct fields are not modified, and matching
// slice-typed fields are not reset before appending to them. In particular, it
// is recommended to pass a pointer to a zero valued struct on each Get call.
//
// ErrFieldMismatch is returned when a field is to be loaded into a different
// type than the one it was stored from, or when a field is missing or
// unexported in the destination struct. ErrFieldMismatch is only returned if
// dst is a struct pointer.
func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Get")
defer func() { trace.EndSpan(ctx, err) }()
if dst == nil { // get catches nil interfaces; we need to catch nil ptr here
return ErrInvalidEntityType
}
err = c.get(ctx, []*Key{key}, []interface{}{dst}, nil)
if me, ok := err.(MultiError); ok {
return me[0]
}
return err
}
// GetMulti is a batch version of Get.
//
// dst must be a []S, []*S, []I or []P, for some struct type S, some interface
// type I, or some non-interface non-pointer type P such that P or *P
// implements PropertyLoadSaver. If an []I, each element must be a valid dst
// for Get: it must be a struct pointer or implement PropertyLoadSaver.
//
// As a special case, PropertyList is an invalid type for dst, even though a
// PropertyList is a slice of structs. It is treated as invalid to avoid being
// mistakenly passed when []PropertyList was intended.
func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.GetMulti")
defer func() { trace.EndSpan(ctx, err) }()
return c.get(ctx, keys, dst, nil)
}
func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb.ReadOptions) error {
v := reflect.ValueOf(dst)
multiArgType, _ := checkMultiArg(v)
// Sanity checks
if multiArgType == multiArgTypeInvalid {
return errors.New("datastore: dst has invalid type")
}
if len(keys) != v.Len() {
return errors.New("datastore: keys and dst slices have different length")
}
if len(keys) == 0 {
return nil
}
// Go through keys, validate them, serialize then, and create a dict mapping them to their indices.
// Equal keys are deduped.
multiErr, any := make(MultiError, len(keys)), false
keyMap := make(map[string][]int, len(keys))
pbKeys := make([]*pb.Key, 0, len(keys))
for i, k := range keys {
if !k.valid() {
multiErr[i] = ErrInvalidKey
any = true
} else {
ks := k.String()
if _, ok := keyMap[ks]; !ok {
pbKeys = append(pbKeys, keyToProto(k))
}
keyMap[ks] = append(keyMap[ks], i)
}
}
if any {
return multiErr
}
req := &pb.LookupRequest{
ProjectId: c.dataset,
Keys: pbKeys,
ReadOptions: opts,
}
resp, err := c.client.Lookup(ctx, req)
if err != nil {
return err
}
found := resp.Found
missing := resp.Missing
// Upper bound 100 iterations to prevent infinite loop.
// We choose 100 iterations somewhat logically:
// Max number of Entities you can request from Datastore is 1,000.
// Max size for a Datastore Entity is 1 MiB.
// Max request size is 10 MiB, so we assume max response size is also 10 MiB.
// 1,000 / 10 = 100.
// Note that if ctx has a deadline, the deadline will probably
// be hit before we reach 100 iterations.
for i := 0; len(resp.Deferred) > 0 && i < 100; i++ {
req.Keys = resp.Deferred
resp, err = c.client.Lookup(ctx, req)
if err != nil {
return err
}
found = append(found, resp.Found...)
missing = append(missing, resp.Missing...)
}
filled := 0
for _, e := range found {
k, err := protoToKey(e.Entity.Key)
if err != nil {
return errors.New("datastore: internal error: server returned an invalid key")
}
filled += len(keyMap[k.String()])
for _, index := range keyMap[k.String()] {
elem := v.Index(index)
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
elem = elem.Addr()
}
if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
elem.Set(reflect.New(elem.Type().Elem()))
}
if err := loadEntityProto(elem.Interface(), e.Entity); err != nil {
multiErr[index] = err
any = true
}
}
}
for _, e := range missing {
k, err := protoToKey(e.Entity.Key)
if err != nil {
return errors.New("datastore: internal error: server returned an invalid key")
}
filled += len(keyMap[k.String()])
for _, index := range keyMap[k.String()] {
multiErr[index] = ErrNoSuchEntity
}
any = true
}
if filled != len(keys) {
return errors.New("datastore: internal error: server returned the wrong number of entities")
}
if any {
return multiErr
}
return nil
}
// Put saves the entity src into the datastore with key k. src must be a struct
// pointer or implement PropertyLoadSaver; if a struct pointer then any
// unexported fields of that struct will be skipped. If k is an incomplete key,
// the returned key will be a unique key generated by the datastore.
func (c *Client) Put(ctx context.Context, key *Key, src interface{}) (*Key, error) {
k, err := c.PutMulti(ctx, []*Key{key}, []interface{}{src})
if err != nil {
if me, ok := err.(MultiError); ok {
return nil, me[0]
}
return nil, err
}
return k[0], nil
}
// PutMulti is a batch version of Put.
//
// src must satisfy the same conditions as the dst argument to GetMulti.
// TODO(jba): rewrite in terms of Mutate.
func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) (ret []*Key, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.PutMulti")
defer func() { trace.EndSpan(ctx, err) }()
mutations, err := putMutations(keys, src)
if err != nil {
return nil, err
}
// Make the request.
req := &pb.CommitRequest{
ProjectId: c.dataset,
Mutations: mutations,
Mode: pb.CommitRequest_NON_TRANSACTIONAL,
}
resp, err := c.client.Commit(ctx, req)
if err != nil {
return nil, err
}
// Copy any newly minted keys into the returned keys.
ret = make([]*Key, len(keys))
for i, key := range keys {
if key.Incomplete() {
// This key is in the mutation results.
ret[i], err = protoToKey(resp.MutationResults[i].Key)
if err != nil {
return nil, errors.New("datastore: internal error: server returned an invalid key")
}
} else {
ret[i] = key
}
}
return ret, nil
}
func putMutations(keys []*Key, src interface{}) ([]*pb.Mutation, error) {
v := reflect.ValueOf(src)
multiArgType, _ := checkMultiArg(v)
if multiArgType == multiArgTypeInvalid {
return nil, errors.New("datastore: src has invalid type")
}
if len(keys) != v.Len() {
return nil, errors.New("datastore: key and src slices have different length")
}
if len(keys) == 0 {
return nil, nil
}
if err := multiValid(keys); err != nil {
return nil, err
}
mutations := make([]*pb.Mutation, 0, len(keys))
multiErr := make(MultiError, len(keys))
hasErr := false
for i, k := range keys {
elem := v.Index(i)
// Two cases where we need to take the address:
// 1) multiArgTypePropertyLoadSaver => &elem implements PLS
// 2) multiArgTypeStruct => saveEntity needs *struct
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
elem = elem.Addr()
}
p, err := saveEntity(k, elem.Interface())
if err != nil {
multiErr[i] = err
hasErr = true
}
var mut *pb.Mutation
if k.Incomplete() {
mut = &pb.Mutation{Operation: &pb.Mutation_Insert{Insert: p}}
} else {
mut = &pb.Mutation{Operation: &pb.Mutation_Upsert{Upsert: p}}
}
mutations = append(mutations, mut)
}
if hasErr {
return nil, multiErr
}
return mutations, nil
}
// Delete deletes the entity for the given key.
func (c *Client) Delete(ctx context.Context, key *Key) error {
err := c.DeleteMulti(ctx, []*Key{key})
if me, ok := err.(MultiError); ok {
return me[0]
}
return err
}
// DeleteMulti is a batch version of Delete.
// TODO(jba): rewrite in terms of Mutate.
func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.DeleteMulti")
defer func() { trace.EndSpan(ctx, err) }()
mutations, err := deleteMutations(keys)
if err != nil {
return err
}
req := &pb.CommitRequest{
ProjectId: c.dataset,
Mutations: mutations,
Mode: pb.CommitRequest_NON_TRANSACTIONAL,
}
_, err = c.client.Commit(ctx, req)
return err
}
func deleteMutations(keys []*Key) ([]*pb.Mutation, error) {
mutations := make([]*pb.Mutation, 0, len(keys))
set := make(map[string]bool, len(keys))
for _, k := range keys {
if k.Incomplete() {
return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k)
}
ks := k.String()
if !set[ks] {
mutations = append(mutations, &pb.Mutation{
Operation: &pb.Mutation_Delete{Delete: keyToProto(k)},
})
}
set[ks] = true
}
return mutations, nil
}
// Mutate applies one or more mutations atomically.
// It returns the keys of the argument Mutations, in the same order.
//
// If any of the mutations are invalid, Mutate returns a MultiError with the errors.
// Mutate returns a MultiError in this case even if there is only one Mutation.
func (c *Client) Mutate(ctx context.Context, muts ...*Mutation) (ret []*Key, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Mutate")
defer func() { trace.EndSpan(ctx, err) }()
pmuts, err := mutationProtos(muts)
if err != nil {
return nil, err
}
req := &pb.CommitRequest{
ProjectId: c.dataset,
Mutations: pmuts,
Mode: pb.CommitRequest_NON_TRANSACTIONAL,
}
resp, err := c.client.Commit(ctx, req)
if err != nil {
return nil, err
}
// Copy any newly minted keys into the returned keys.
ret = make([]*Key, len(muts))
for i, mut := range muts {
if mut.key.Incomplete() {
// This key is in the mutation results.
ret[i], err = protoToKey(resp.MutationResults[i].Key)
if err != nil {
return nil, errors.New("datastore: internal error: server returned an invalid key")
}
} else {
ret[i] = mut.key
}
}
return ret, nil
}
| [
"\"DATASTORE_EMULATOR_HOST\"",
"\"DATASTORE_HOST\"",
"\"DATASTORE_EMULATOR_HOST\"",
"\"DATASTORE_DATASET\"",
"\"DATASTORE_PROJECT_ID\"",
"\"DATASTORE_PROJECT_ID\""
] | [] | [
"DATASTORE_DATASET",
"DATASTORE_EMULATOR_HOST",
"DATASTORE_PROJECT_ID",
"DATASTORE_HOST"
] | [] | ["DATASTORE_DATASET", "DATASTORE_EMULATOR_HOST", "DATASTORE_PROJECT_ID", "DATASTORE_HOST"] | go | 4 | 0 | |
api/v2/api_v2.go | /*
Copyright (c) 2019 Dell Inc, or its subsidiaries.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v2
import (
"os"
"path"
"strconv"
"strings"
"github.com/dell/goisilon/api"
)
const (
namespacePath = "namespace"
exportsPath = "platform/2/protocols/nfs/exports"
quotaPath = "platform/2/quota/quotas"
snapshotsPath = "platform/2/snapshot/snapshots"
volumeSnapshotsPath = "/ifs/.snapshot"
)
var (
debug, _ = strconv.ParseBool(os.Getenv("GOISILON_DEBUG"))
colonBytes = []byte{byte(':')}
)
func realNamespacePath(c api.Client) string {
return path.Join(namespacePath, c.VolumesPath())
}
func realExportsPath(c api.Client) string {
return path.Join(exportsPath, c.VolumesPath())
}
func realVolumeSnapshotPath(c api.Client, name string) string {
parts := strings.SplitN(realNamespacePath(c), "/ifs/", 2)
return path.Join(parts[0], volumeSnapshotsPath, name, parts[1])
}
// GetAbsoluteSnapshotPath get the absolute path of a snapshot
func GetAbsoluteSnapshotPath(c api.Client, snapshotName, volumeName string) string {
absoluteVolumePath := c.VolumePath(volumeName)
return path.Join(volumeSnapshotsPath, snapshotName, strings.TrimLeft(absoluteVolumePath, "/ifs/"))
}
| [
"\"GOISILON_DEBUG\""
] | [] | [
"GOISILON_DEBUG"
] | [] | ["GOISILON_DEBUG"] | go | 1 | 0 | |
cmd/executor/cmd/root.go | /*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/GoogleContainerTools/kaniko/pkg/buildcontext"
"github.com/GoogleContainerTools/kaniko/pkg/config"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/GoogleContainerTools/kaniko/pkg/executor"
"github.com/GoogleContainerTools/kaniko/pkg/timing"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/genuinetools/amicontained/container"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
var (
opts = &config.KanikoOptions{}
logLevel string
force bool
)
func init() {
RootCmd.PersistentFlags().StringVarP(&logLevel, "verbosity", "v", constants.DefaultLogLevel, "Log level (debug, info, warn, error, fatal, panic")
RootCmd.PersistentFlags().BoolVarP(&force, "force", "", false, "Force building outside of a container")
addKanikoOptionsFlags()
addHiddenFlags(RootCmd)
}
// RootCmd is the kaniko command that is run
var RootCmd = &cobra.Command{
Use: "executor",
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if cmd.Use == "executor" {
resolveEnvironmentBuildArgs(opts.BuildArgs, os.Getenv)
if err := util.ConfigureLogging(logLevel); err != nil {
return err
}
if !opts.NoPush && len(opts.Destinations) == 0 {
return errors.New("You must provide --destination, or use --no-push")
}
if err := cacheFlagsValid(); err != nil {
return errors.Wrap(err, "cache flags invalid")
}
if err := resolveSourceContext(); err != nil {
return errors.Wrap(err, "error resolving source context")
}
if err := resolveDockerfilePath(); err != nil {
return errors.Wrap(err, "error resolving dockerfile path")
}
if len(opts.Destinations) == 0 && opts.ImageNameDigestFile != "" {
return errors.New("You must provide --destination if setting ImageNameDigestFile")
}
// Update whitelisted paths
util.UpdateWhitelist(opts.WhitelistVarRun)
}
return nil
},
Run: func(cmd *cobra.Command, args []string) {
if !checkContained() {
if !force {
exit(errors.New("kaniko should only be run inside of a container, run with the --force flag if you are sure you want to continue"))
}
logrus.Warn("kaniko is being run outside of a container. This can have dangerous effects on your system")
}
if err := executor.CheckPushPermissions(opts); err != nil {
exit(errors.Wrap(err, "error checking push permissions -- make sure you entered the correct tag name, and that you are authenticated correctly, and try again"))
}
if err := resolveRelativePaths(); err != nil {
exit(errors.Wrap(err, "error resolving relative paths to absolute paths"))
}
if err := os.Chdir("/"); err != nil {
exit(errors.Wrap(err, "error changing to root dir"))
}
image, err := executor.DoBuild(opts)
if err != nil {
exit(errors.Wrap(err, "error building image"))
}
if err := executor.DoPush(image, opts); err != nil {
exit(errors.Wrap(err, "error pushing image"))
}
benchmarkFile := os.Getenv("BENCHMARK_FILE")
// false is a keyword for integration tests to turn off benchmarking
if benchmarkFile != "" && benchmarkFile != "false" {
f, err := os.Create(benchmarkFile)
if err != nil {
logrus.Warnf("Unable to create benchmarking file %s: %s", benchmarkFile, err)
}
defer f.Close()
s, err := timing.JSON()
if err != nil {
logrus.Warnf("Unable to write benchmark file: %s", err)
}
f.WriteString(s)
}
},
}
// addKanikoOptionsFlags configures opts
func addKanikoOptionsFlags() {
RootCmd.PersistentFlags().StringVarP(&opts.DockerfilePath, "dockerfile", "f", "Dockerfile", "Path to the dockerfile to be built.")
RootCmd.PersistentFlags().StringVarP(&opts.SrcContext, "context", "c", "/workspace/", "Path to the dockerfile build context.")
RootCmd.PersistentFlags().StringVarP(&opts.Bucket, "bucket", "b", "", "Name of the GCS bucket from which to access build context as tarball.")
RootCmd.PersistentFlags().VarP(&opts.Destinations, "destination", "d", "Registry the final image should be pushed to. Set it repeatedly for multiple destinations.")
RootCmd.PersistentFlags().StringVarP(&opts.SnapshotMode, "snapshotMode", "", "full", "Change the file attributes inspected during snapshotting")
RootCmd.PersistentFlags().VarP(&opts.BuildArgs, "build-arg", "", "This flag allows you to pass in ARG values at build time. Set it repeatedly for multiple values.")
RootCmd.PersistentFlags().BoolVarP(&opts.Insecure, "insecure", "", false, "Push to insecure registry using plain HTTP")
RootCmd.PersistentFlags().BoolVarP(&opts.SkipTLSVerify, "skip-tls-verify", "", false, "Push to insecure registry ignoring TLS verify")
RootCmd.PersistentFlags().BoolVarP(&opts.InsecurePull, "insecure-pull", "", false, "Pull from insecure registry using plain HTTP")
RootCmd.PersistentFlags().BoolVarP(&opts.SkipTLSVerifyPull, "skip-tls-verify-pull", "", false, "Pull from insecure registry ignoring TLS verify")
RootCmd.PersistentFlags().StringVarP(&opts.TarPath, "tarPath", "", "", "Path to save the image in as a tarball instead of pushing")
RootCmd.PersistentFlags().BoolVarP(&opts.SingleSnapshot, "single-snapshot", "", false, "Take a single snapshot at the end of the build.")
RootCmd.PersistentFlags().BoolVarP(&opts.Reproducible, "reproducible", "", false, "Strip timestamps out of the image to make it reproducible")
RootCmd.PersistentFlags().StringVarP(&opts.Target, "target", "", "", "Set the target build stage to build")
RootCmd.PersistentFlags().BoolVarP(&opts.NoPush, "no-push", "", false, "Do not push the image to the registry")
RootCmd.PersistentFlags().StringVarP(&opts.CacheRepo, "cache-repo", "", "", "Specify a repository to use as a cache, otherwise one will be inferred from the destination provided")
RootCmd.PersistentFlags().StringVarP(&opts.CacheDir, "cache-dir", "", "/cache", "Specify a local directory to use as a cache.")
RootCmd.PersistentFlags().StringVarP(&opts.DigestFile, "digest-file", "", "", "Specify a file to save the digest of the built image to.")
RootCmd.PersistentFlags().StringVarP(&opts.ImageNameDigestFile, "image-name-with-digest-file", "", "", "Specify a file to save the image name w/ digest of the built image to.")
RootCmd.PersistentFlags().StringVarP(&opts.OCILayoutPath, "oci-layout-path", "", "", "Path to save the OCI image layout of the built image.")
RootCmd.PersistentFlags().BoolVarP(&opts.Cache, "cache", "", false, "Use cache when building image")
RootCmd.PersistentFlags().BoolVarP(&opts.Cleanup, "cleanup", "", false, "Clean the filesystem at the end")
RootCmd.PersistentFlags().DurationVarP(&opts.CacheTTL, "cache-ttl", "", time.Hour*336, "Cache timeout in hours. Defaults to two weeks.")
RootCmd.PersistentFlags().VarP(&opts.InsecureRegistries, "insecure-registry", "", "Insecure registry using plain HTTP to push and pull. Set it repeatedly for multiple registries.")
RootCmd.PersistentFlags().VarP(&opts.SkipTLSVerifyRegistries, "skip-tls-verify-registry", "", "Insecure registry ignoring TLS verify to push and pull. Set it repeatedly for multiple registries.")
RootCmd.PersistentFlags().BoolVarP(&opts.WhitelistVarRun, "whitelist-var-run", "", true, "Ignore /var/run directory when taking image snapshot. Set it to false to preserve /var/run/ in destination image. (Default true).")
}
// addHiddenFlags marks certain flags as hidden from the executor help text
func addHiddenFlags(cmd *cobra.Command) {
// This flag is added in a vendored directory, hide so that it doesn't come up via --help
pflag.CommandLine.MarkHidden("azure-container-registry-config")
// Hide this flag as we want to encourage people to use the --context flag instead
cmd.PersistentFlags().MarkHidden("bucket")
}
func checkContained() bool {
_, err := container.DetectRuntime()
return err == nil
}
// cacheFlagsValid makes sure the flags passed in related to caching are valid
func cacheFlagsValid() error {
if !opts.Cache {
return nil
}
// If --cache=true and --no-push=true, then cache repo must be provided
// since cache can't be inferred from destination
if opts.CacheRepo == "" && opts.NoPush {
return errors.New("if using cache with --no-push, specify cache repo with --cache-repo")
}
return nil
}
// resolveDockerfilePath resolves the Dockerfile path to an absolute path
func resolveDockerfilePath() error {
if isURL(opts.DockerfilePath) {
return nil
}
if util.FilepathExists(opts.DockerfilePath) {
abs, err := filepath.Abs(opts.DockerfilePath)
if err != nil {
return errors.Wrap(err, "getting absolute path for dockerfile")
}
opts.DockerfilePath = abs
return copyDockerfile()
}
// Otherwise, check if the path relative to the build context exists
if util.FilepathExists(filepath.Join(opts.SrcContext, opts.DockerfilePath)) {
abs, err := filepath.Abs(filepath.Join(opts.SrcContext, opts.DockerfilePath))
if err != nil {
return errors.Wrap(err, "getting absolute path for src context/dockerfile path")
}
opts.DockerfilePath = abs
return copyDockerfile()
}
return errors.New("please provide a valid path to a Dockerfile within the build context with --dockerfile")
}
// resolveEnvironmentBuildArgs replace build args without value by the same named environment variable
func resolveEnvironmentBuildArgs(arguments []string, resolver func(string) string) {
for index, argument := range arguments {
i := strings.Index(argument, "=")
if i < 0 {
value := resolver(argument)
arguments[index] = fmt.Sprintf("%s=%s", argument, value)
}
}
}
// copy Dockerfile to /kaniko/Dockerfile so that if it's specified in the .dockerignore
// it won't be copied into the image
func copyDockerfile() error {
if _, err := util.CopyFile(opts.DockerfilePath, constants.DockerfilePath, ""); err != nil {
return errors.Wrap(err, "copying dockerfile")
}
opts.DockerfilePath = constants.DockerfilePath
return nil
}
// resolveSourceContext unpacks the source context if it is a tar in a bucket
// it resets srcContext to be the path to the unpacked build context within the image
func resolveSourceContext() error {
if opts.SrcContext == "" && opts.Bucket == "" {
return errors.New("please specify a path to the build context with the --context flag or a bucket with the --bucket flag")
}
if opts.SrcContext != "" && !strings.Contains(opts.SrcContext, "://") {
return nil
}
if opts.Bucket != "" {
if !strings.Contains(opts.Bucket, "://") {
// if no prefix use Google Cloud Storage as default for backwards compatibility
opts.SrcContext = constants.GCSBuildContextPrefix + opts.Bucket
} else {
opts.SrcContext = opts.Bucket
}
}
contextExecutor, err := buildcontext.GetBuildContext(opts.SrcContext)
if err != nil {
return err
}
logrus.Debugf("Getting source context from %s", opts.SrcContext)
opts.SrcContext, err = contextExecutor.UnpackTarFromBuildContext()
if err != nil {
return err
}
logrus.Debugf("Build context located at %s", opts.SrcContext)
return nil
}
func resolveRelativePaths() error {
optsPaths := []*string{
&opts.DockerfilePath,
&opts.SrcContext,
&opts.CacheDir,
&opts.TarPath,
&opts.DigestFile,
&opts.ImageNameDigestFile,
}
for _, p := range optsPaths {
if path := *p; shdSkip(path) {
logrus.Debugf("Skip resolving path %s", path)
continue
}
// Resolve relative path to absolute path
var err error
relp := *p // save original relative path
if *p, err = filepath.Abs(*p); err != nil {
return errors.Wrapf(err, "Couldn't resolve relative path %s to an absolute path", *p)
}
logrus.Debugf("Resolved relative path %s to %s", relp, *p)
}
return nil
}
func exit(err error) {
fmt.Println(err)
os.Exit(1)
}
func isURL(path string) bool {
if match, _ := regexp.MatchString("^https?://", path); match {
return true
}
return false
}
func shdSkip(path string) bool {
return path == "" || isURL(path) || filepath.IsAbs(path)
}
| [
"\"BENCHMARK_FILE\""
] | [] | [
"BENCHMARK_FILE"
] | [] | ["BENCHMARK_FILE"] | go | 1 | 0 | |
testutils/pkg.go | package testutils
import (
"fmt"
"go/build"
"go/parser"
"io/ioutil"
"log"
"os"
"path"
"strings"
"github.com/golangci/gosec"
"golang.org/x/tools/go/loader"
)
type buildObj struct {
pkg *build.Package
config loader.Config
program *loader.Program
}
// TestPackage is a mock package for testing purposes
type TestPackage struct {
Path string
Files map[string]string
ondisk bool
build *buildObj
}
// NewTestPackage will create a new and empty package. Must call Close() to cleanup
// auxilary files
func NewTestPackage() *TestPackage {
// Files must exist in $GOPATH
sourceDir := path.Join(os.Getenv("GOPATH"), "src")
workingDir, err := ioutil.TempDir(sourceDir, "gosecs_test")
if err != nil {
return nil
}
return &TestPackage{
Path: workingDir,
Files: make(map[string]string),
ondisk: false,
build: nil,
}
}
// AddFile inserts the filename and contents into the package contents
func (p *TestPackage) AddFile(filename, content string) {
p.Files[path.Join(p.Path, filename)] = content
}
func (p *TestPackage) write() error {
if p.ondisk {
return nil
}
for filename, content := range p.Files {
if e := ioutil.WriteFile(filename, []byte(content), 0644); e != nil {
return e
}
}
p.ondisk = true
return nil
}
// Build ensures all files are persisted to disk and built
func (p *TestPackage) Build() error {
if p.build != nil {
return nil
}
if err := p.write(); err != nil {
return err
}
basePackage, err := build.Default.ImportDir(p.Path, build.ImportComment)
if err != nil {
return err
}
var packageFiles []string
packageConfig := loader.Config{Build: &build.Default, ParserMode: parser.ParseComments}
for _, filename := range basePackage.GoFiles {
packageFiles = append(packageFiles, path.Join(p.Path, filename))
}
packageConfig.CreateFromFilenames(basePackage.Name, packageFiles...)
program, err := packageConfig.Load()
if err != nil {
return err
}
p.build = &buildObj{
pkg: basePackage,
config: packageConfig,
program: program,
}
return nil
}
// CreateContext builds a context out of supplied package context
func (p *TestPackage) CreateContext(filename string) *gosec.Context {
if err := p.Build(); err != nil {
log.Fatal(err)
return nil
}
for _, pkg := range p.build.program.Created {
for _, file := range pkg.Files {
pkgFile := p.build.program.Fset.File(file.Pos()).Name()
strip := fmt.Sprintf("%s%c", p.Path, os.PathSeparator)
pkgFile = strings.TrimPrefix(pkgFile, strip)
if pkgFile == filename {
ctx := &gosec.Context{
FileSet: p.build.program.Fset,
Root: file,
Config: gosec.NewConfig(),
Info: &pkg.Info,
Pkg: pkg.Pkg,
Imports: gosec.NewImportTracker(),
}
ctx.Imports.TrackPackages(ctx.Pkg.Imports()...)
return ctx
}
}
}
return nil
}
// Close will delete the package and all files in that directory
func (p *TestPackage) Close() {
if p.ondisk {
err := os.RemoveAll(p.Path)
if err != nil {
log.Fatal(err)
}
}
}
| [
"\"GOPATH\""
] | [] | [
"GOPATH"
] | [] | ["GOPATH"] | go | 1 | 0 | |
moverscore/_moverscore.py | import inspect
import logging
import os
import string
from pathlib import Path
from collections import defaultdict
import numpy as np
import torch
from pyemd import emd
from torch import nn
from pytorch_pretrained_bert import BertModel, BertTokenizer
from pytorch_pretrained_bert.modeling import BertPreTrainedModel
from ._utils import DEVICE, Download, get_idf_dict, safe_divide, collate_idf, load_ngram, pairwise_distances, get_idf_dict
MOVERSCORE_DIR = Path(os.environ.get("MOVERSCORE", Path("~/.cache/moverscore").expanduser()))
MNLI_BERT_URL = (
"https://github.com/AIPHES/emnlp19-moverscore/releases/download/0.6/MNLI_BERT.zip"
)
plus_mask = lambda x, m: x + (1.0 - m).unsqueeze(-1) * 1e30
minus_mask = lambda x, m: x - (1.0 - m).unsqueeze(-1) * 1e30
mul_mask = lambda x, m: x * m.unsqueeze(-1)
masked_reduce_min = lambda x, m: torch.min(plus_mask(x, m), dim=1, out=None)
masked_reduce_max = lambda x, m: torch.max(minus_mask(x, m), dim=1, out=None)
masked_reduce_mean = lambda x, m: mul_mask(x, m).sum(1) / (
m.sum(1, keepdim=True) + 1e-10
)
masked_reduce_geomean = lambda x, m: np.exp(
mul_mask(np.log(x), m).sum(1) / (m.sum(1, keepdim=True) + 1e-10)
)
idf_reduce_mean = lambda x, m: mul_mask(x, m).sum(1)
idf_reduce_max = lambda x, m, idf: torch.max(
mul_mask(minus_mask(x, m), idf), dim=1, out=None
)
idf_reduce_min = lambda x, m, idf: torch.min(
mul_mask(plus_mask(x, m), idf), dim=1, out=None
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config, num_labels):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(
self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=None
):
encoded_layers, pooled_output = self.bert(
input_ids, token_type_ids, attention_mask, output_all_encoded_layers=True
)
return encoded_layers, pooled_output
class MoverScore:
def __init__(self):
self.tokenizer = BertTokenizer.from_pretrained(
MOVERSCORE_DIR, do_lower_case=True
)
self.model = BertForSequenceClassification.from_pretrained(MOVERSCORE_DIR, 3)
self.model.eval()
self.model.to(DEVICE)
@staticmethod
def model_setup():
MOVERSCORE_DIR.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger(inspect.currentframe().f_code.co_name)
Download(MNLI_BERT_URL, MOVERSCORE_DIR).download_zip(logger)
@staticmethod
def bert_encode(model, x, attention_mask):
model.eval()
x_seg = torch.zeros_like(x, dtype=torch.long)
with torch.no_grad():
x_encoded_layers, pooled_output = model(
x, x_seg, attention_mask=attention_mask, output_all_encoded_layers=True
)
return x_encoded_layers
def get_bert_embedding(self, all_sens, idf_dict, batch_size=-1, device=DEVICE):
tokenizer = self.tokenizer
model = self.model
padded_sens, padded_idf, lens, mask, tokens = collate_idf(
all_sens,
tokenizer,
tokenizer.convert_tokens_to_ids,
idf_dict,
device=device,
)
if batch_size == -1:
batch_size = len(all_sens)
embeddings = []
with torch.no_grad():
for i in range(0, len(all_sens), batch_size):
batch_embedding = self.bert_encode(
model,
padded_sens[i : i + batch_size],
attention_mask=mask[i : i + batch_size],
)
batch_embedding = torch.stack(batch_embedding)
embeddings.append(batch_embedding)
del batch_embedding
total_embedding = torch.cat(embeddings, dim=-3)
return total_embedding, lens, mask, padded_idf, tokens
def get_idf_dict(self, arr):
if len(arr) == 1:
return defaultdict(lambda: 1.)
return get_idf_dict(arr, self.tokenizer)
def score(
self,
refs,
hyps,
stop_words=[],
n_gram=1,
remove_subwords=True,
batch_size=256,
device=DEVICE,
):
idf_dict_ref = self.get_idf_dict(refs)
idf_dict_hyp = self.get_idf_dict(hyps)
preds = []
for batch_start in range(0, len(refs), batch_size):
batch_refs = refs[batch_start : batch_start + batch_size]
batch_hyps = hyps[batch_start : batch_start + batch_size]
(
ref_embedding,
ref_lens,
ref_masks,
ref_idf,
ref_tokens,
) = self.get_bert_embedding(batch_refs, idf_dict_ref, device=device)
(
hyp_embedding,
hyp_lens,
hyp_masks,
hyp_idf,
hyp_tokens,
) = self.get_bert_embedding(batch_hyps, idf_dict_hyp, device=device)
ref_embedding.div_(torch.norm(ref_embedding, dim=-1).unsqueeze(-1))
hyp_embedding.div_(torch.norm(hyp_embedding, dim=-1).unsqueeze(-1))
ref_embedding_max, _ = torch.max(ref_embedding[-5:], dim=0, out=None)
hyp_embedding_max, _ = torch.max(hyp_embedding[-5:], dim=0, out=None)
ref_embedding_min, _ = torch.min(ref_embedding[-5:], dim=0, out=None)
hyp_embedding_min, _ = torch.min(hyp_embedding[-5:], dim=0, out=None)
ref_embedding_avg = ref_embedding[-5:].mean(0)
hyp_embedding_avg = hyp_embedding[-5:].mean(0)
ref_embedding = torch.cat(
[ref_embedding_min, ref_embedding_avg, ref_embedding_max], -1
)
hyp_embedding = torch.cat(
[hyp_embedding_min, hyp_embedding_avg, hyp_embedding_max], -1
)
for i in range(len(ref_tokens)):
if remove_subwords:
ref_ids = [
k
for k, w in enumerate(ref_tokens[i])
if w not in set(string.punctuation)
and "##" not in w
and w not in stop_words
]
hyp_ids = [
k
for k, w in enumerate(hyp_tokens[i])
if w not in set(string.punctuation)
and "##" not in w
and w not in stop_words
]
else:
ref_ids = [
k
for k, w in enumerate(ref_tokens[i])
if w not in set(string.punctuation) and w not in stop_words
]
hyp_ids = [
k
for k, w in enumerate(hyp_tokens[i])
if w not in set(string.punctuation) and w not in stop_words
]
ref_embedding_i, ref_idf_i = load_ngram(
ref_ids, ref_embedding[i], ref_idf[i], n_gram, 1
)
hyp_embedding_i, hyp_idf_i = load_ngram(
hyp_ids, hyp_embedding[i], hyp_idf[i], n_gram, 1
)
raw = torch.cat([ref_embedding_i, hyp_embedding_i], 0)
raw.div_(torch.norm(raw, dim=-1).unsqueeze(-1) + 0.000001)
distance_matrix = pairwise_distances(raw, raw)
c1 = np.zeros(len(ref_idf_i) + len(hyp_idf_i), dtype=np.double)
c2 = np.zeros(len(ref_idf_i) + len(hyp_idf_i), dtype=np.double)
c1[: len(ref_idf_i)] = ref_idf_i
c2[-len(hyp_idf_i) :] = hyp_idf_i
c1 = safe_divide(c1, np.sum(c1))
c2 = safe_divide(c2, np.sum(c2))
score = 1 - emd(c1, c2, distance_matrix.double().cpu().numpy())
preds.append(score)
return preds
| [] | [] | [
"MOVERSCORE"
] | [] | ["MOVERSCORE"] | python | 1 | 0 | |
pkg/cmd/root/spring.go | package root
import (
"fmt"
"os"
"github.com/jenkins-x/jx-helpers/v3/pkg/cobras/helper"
"github.com/jenkins-x/jx-helpers/v3/pkg/homedir"
"github.com/jenkins-x/jx-helpers/v3/pkg/stringhelpers"
"github.com/jenkins-x/jx-helpers/v3/pkg/termcolor"
"github.com/jenkins-x-plugins/jx-project/pkg/cmd/common"
"github.com/jenkins-x-plugins/jx-project/pkg/cmd/importcmd"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/jenkins-x/jx-helpers/v3/pkg/cobras/templates"
"github.com/jenkins-x/jx-logging/v3/pkg/log"
"github.com/jenkins-x-plugins/jx-project/pkg/spring"
)
var (
createSpringLong = templates.LongDesc(`
Creates a new Spring Boot application and then optionally setups CI/CD pipelines and GitOps promotion.
You can see a demo of this command here: [https://jenkins-x.io/demos/create_spring/](https://jenkins-x.io/demos/create_spring/)
For more documentation see: [https://jenkins-x.io/developing/create-spring/](https://jenkins-x.io/developing/create-spring/)
` + helper.SeeAlsoText("jx create project"))
createSpringExample = templates.Examples(`
# Create a Spring Boot application where you use the terminal to pick the values
%s spring
# Creates a Spring Boot application passing in the required dependencies
%s spring -d web -d actuator
# To pick the advanced options (such as what package type maven-project/gradle-project) etc then use
%s spring -x
# To create a gradle project use:
%s spring --type gradle-project
`)
)
// CreateSpringOptions the options for the create spring command
type CreateSpringOptions struct {
Options
Advanced bool
SpringForm spring.SpringBootForm
}
// NewCmdCreateSpring creates a command object for the "create" command
func NewCmdCreateSpring() *cobra.Command {
options := &CreateSpringOptions{}
cmd := &cobra.Command{
Use: "spring",
Short: "Create a new Spring Boot application and import the generated code into Git and Jenkins for CI/CD",
Long: createSpringLong,
Example: fmt.Sprintf(createSpringExample, common.BinaryName, common.BinaryName, common.BinaryName, common.BinaryName),
Run: func(cmd *cobra.Command, args []string) {
options.Args = args
err := options.Run()
helper.CheckErr(err)
},
}
options.addCreateAppFlags(cmd)
cmd.Flags().BoolVarP(&options.Advanced, "advanced", "x", false, "Advanced mode can show more detailed forms for some resource kinds like springboot")
cmd.Flags().StringArrayVarP(&options.SpringForm.DependencyKinds, spring.OptionDependencyKind, "k", spring.DefaultDependencyKinds, "Default dependency kinds to choose from")
cmd.Flags().StringArrayVarP(&options.SpringForm.Dependencies, spring.OptionDependency, "d", []string{}, "Spring Boot dependencies")
cmd.Flags().StringVarP(&options.SpringForm.GroupId, spring.OptionGroupId, "g", "", "Group ID to generate")
cmd.Flags().StringVarP(&options.SpringForm.ArtifactId, spring.OptionArtifactId, "a", "", "Artifact ID to generate")
cmd.Flags().StringVarP(&options.SpringForm.Language, spring.OptionLanguage, "l", "", "Language to generate")
cmd.Flags().StringVarP(&options.SpringForm.BootVersion, spring.OptionBootVersion, "t", "", "Spring Boot version")
cmd.Flags().StringVarP(&options.SpringForm.JavaVersion, spring.OptionJavaVersion, "j", "", "Java version")
cmd.Flags().StringVarP(&options.SpringForm.Packaging, spring.OptionPackaging, "p", "", "Packaging")
cmd.Flags().StringVarP(&options.SpringForm.Type, spring.OptionType, "", "", "Project Type (such as maven-project or gradle-project)")
return cmd
}
// Run implements the command
func (o *CreateSpringOptions) Run() error {
err := o.Validate()
if err != nil {
return errors.Wrapf(err, "failed to validate options")
}
cacheDir, err := homedir.CacheDir(os.Getenv("JX3_HOME"), ".jx3")
if err != nil {
return err
}
data := &o.SpringForm
var details *importcmd.CreateRepoData
if !o.BatchMode {
details, err = o.GetGitRepositoryDetails()
if err != nil {
return err
}
data.ArtifactId = details.RepoName
}
model, err := spring.LoadSpringBoot(cacheDir)
if err != nil {
return fmt.Errorf("Failed to load Spring Boot model %s", err)
}
err = model.CreateSurvey(&o.SpringForm, o.Advanced, o.BatchMode)
if err != nil {
return err
}
// always add in actuator as its required for health checking
if stringhelpers.StringArrayIndex(o.SpringForm.Dependencies, "actuator") < 0 {
o.SpringForm.Dependencies = append(o.SpringForm.Dependencies, "actuator")
}
// always add web as the JVM tends to terminate if its not added
if stringhelpers.StringArrayIndex(o.SpringForm.Dependencies, "web") < 0 {
o.SpringForm.Dependencies = append(o.SpringForm.Dependencies, "web")
}
dir := o.OutDir
if dir == "" {
dir, err = os.Getwd()
if err != nil {
return err
}
}
outDir, err := data.CreateProject(dir)
if err != nil {
return err
}
log.Logger().Infof("Created Spring Boot project at %s", termcolor.ColorInfo(outDir))
if details != nil {
o.ConfigureImportOptions(details)
}
return o.ImportCreatedProject(outDir)
}
| [
"\"JX3_HOME\""
] | [] | [
"JX3_HOME"
] | [] | ["JX3_HOME"] | go | 1 | 0 | |
infra/modules/providers/azure/redis-cache/tests/integration/redis.go | package integration
import (
"fmt"
"math/rand"
"os"
"strconv"
"testing"
"time"
"github.com/microsoft/cobalt/test-harness/infratests"
"github.com/microsoft/cobalt/test-harness/terratest-extensions/modules/azure"
"github.com/stretchr/testify/require"
)
var subscription = os.Getenv("ARM_SUBSCRIPTION_ID")
// redisHealthCheck - Asserts that the deployment was succesful.
func redisHealthCheck(t *testing.T, provionState string) {
require.Equal(t, string(provionState), "Succeeded", "The redis deployment hasn't succeeded")
}
// validateNonSSLPort - Asserts that non SSL ports are disabled
func validateNonSSLPort(t *testing.T, NonSSLPort *bool) {
require.False(t, *NonSSLPort, "There's a non SSL port opened on the redis cluster")
}
// validateMinTLSVersion - Validate that the min TLS version isn't nil and >= 1.0
func validateMinTLSVersion(t *testing.T, minTLSVersion string) {
minTLSVersionFloat, err := strconv.ParseFloat(minTLSVersion, 32)
if err != nil {
t.Fatal(err)
}
require.True(t, minTLSVersionFloat >= 1, "Min TLS version should be >= 1.0")
}
// validateResourceGroupCaches - Validate the caches within the resource group
func validateResourceGroupCacheCount(t *testing.T, caches []string, expectedCacheName string) {
expectedResourceGroupCaches := []string{expectedCacheName}
require.Equal(t, expectedResourceGroupCaches, caches, "The provisioned caches in the RG don't match the expected result")
}
// InspectProvisionedCache - Runs a suite of test assertions to validate that a provisioned redis cache
// is operational.
func InspectProvisionedCache(cacheOutputName string, resourceGroupOutputName string) func(t *testing.T, output infratests.TerraformOutput) {
return func(t *testing.T, output infratests.TerraformOutput) {
cacheName := output[cacheOutputName].(string)
resourceGroup := output[resourceGroupOutputName].(string)
results := azure.GetCache(t, subscription, resourceGroup, cacheName)
cacheNameList := []string{}
for _, resourceType := range *azure.ListCachesByResourceGroup(t, subscription, resourceGroup) {
cacheNameList = append(cacheNameList, string(*resourceType.Name))
}
validateResourceGroupCacheCount(t, cacheNameList, cacheName)
redisHealthCheck(t, string(results.ProvisioningState))
validateNonSSLPort(t, results.EnableNonSslPort)
validateMinTLSVersion(t, string(results.MinimumTLSVersion))
}
}
// CheckRedisWriteOperations - Runs a suite of test assertions to validate that entries can be written and read from an redis cluster
func CheckRedisWriteOperations(hostnameOutputName string, primaryKeyOutputName string, hostPortOutputName string) func(t *testing.T, output infratests.TerraformOutput) {
return func(t *testing.T, output infratests.TerraformOutput) {
primaryKey := output[primaryKeyOutputName].(string)
hostname := output[hostnameOutputName].(string)
hostPort := output[hostPortOutputName].(float64)
address := fmt.Sprintf("%s:%d", hostname, int(hostPort))
rand.Seed(time.Now().UnixNano())
entryIdentifier := rand.Int()
keyName := fmt.Sprintf("key-%d", entryIdentifier)
client := azure.RedisClient(t, address, primaryKey)
keyValue := "entryTestValue"
require.Equal(t, azure.SetRedisCacheEntry(t, client, keyName, keyValue, 0), "OK", "Redis cache key set operation result doesn't match the expected result")
require.Equal(t, azure.GetRedisCacheEntryValueStr(t, client, keyName), keyValue, "Redis cache key get operation result doesn't match the expected result")
require.Equal(t, azure.RemoveRedisCacheEntry(t, client, keyName), int64(1), "Redis cache key removal operation result doesn't match the expected result")
}
}
| [
"\"ARM_SUBSCRIPTION_ID\""
] | [] | [
"ARM_SUBSCRIPTION_ID"
] | [] | ["ARM_SUBSCRIPTION_ID"] | go | 1 | 0 | |
tools/quantize_ZF2_dynamic.py | #!/usr/bin/env python
# --------------------------------------------------------
# Quantize Fast R-CNN based Network
# Written by Chia-Chi Tsai
# --------------------------------------------------------
"""Quantize a Fast R-CNN network on an image database."""
import os
os.environ['GLOG_minloglevel'] = '2'
import _init_paths
from fast_rcnn.test import test_net, test_net_silent, im_detect
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
import numpy as np
from caffe.proto import caffe_pb2
import google.protobuf.text_format as txtf
import math
import cv2
from utils.timer import Timer
import multiprocessing
import json
import shutil
import warnings
warnings.filterwarnings("ignore")
from utils.timer import Timer
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Quantize a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--def_quant', dest='prototxt_quantized',
help='quantized prototxt file defining the network',
default=None, type=str)
parser.add_argument('--def_quant_BAC', dest='prototxt_quantized_BAC',
help='quantized prototxt file defining the network',
default=None, type=str)
parser.add_argument('--act_analysis', dest='act_analysis',
help='input and output analysis file',
default=None, type=str)
parser.add_argument('--accumulator_analysis', dest='accumulator_analysis',
help='adder and multiplier analysis file',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--vis', dest='vis', help='visualize detections',
action='store_true')
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=100, type=int)
parser.add_argument('--error_margin', dest='error_margin',
help='tolerance error of quantized network',
default=0.1, type=float)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def analyze_network(net_proto):
has_fc = False
has_deconv = False
has_conv = False
for l in net_proto.layer:
if l.type == 'Convolution':
has_conv = True
elif l.type == 'Deconvolution':
has_deconv = True
elif l.type =='InnerProduct':
has_fc = True
return has_conv, has_deconv, has_fc
# convert network to quantized network with 32 bit width
def convert_net_to_qnet(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution':
l.type = 'ConvolutionIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
elif l.type =='InnerProduct':
l.type = 'FcIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
elif l.type =='Deconvolution':
l.type = 'DeconvolutionRistretto'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
write_to_prototxt(net_proto, q_net_path)
# convert network to quantized network with 32 bit width
def convert_net_to_qnet_BAC_analysis(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution' or l.type == 'ConvolutionIVS':
l.type = 'ConvolutionIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_add = 32
l.quantization_param.bw_multiply = 32
l.quantization_param.fl_add = 16
l.quantization_param.fl_multiply = 16
l.quantization_param.rounding_time = 1
l.quantization_param.analyze_mode = 3
if l.type == 'InnerProduct' or l.type == 'FcIVS':
l.type = 'FcIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_add = 32
l.quantization_param.bw_multiply = 32
l.quantization_param.fl_add = 16
l.quantization_param.fl_multiply = 16
l.quantization_param.rounding_time = 1
l.quantization_param.analyze_mode = 3
write_to_prototxt(net_proto, q_net_path)
def convert_net_to_qnet_BAC(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution' or l.type == 'ConvolutionIVS':
l.type = 'ConvolutionIVS'
l.quantization_param.analyze_mode = 0
l.quantization_param.rounding_time = 1
if l.type == 'InnerProduct' or l.type == 'FcIVS':
l.type = 'FcIVS'
l.quantization_param.analyze_mode = 0
l.quantization_param.rounding_time = 1
write_to_prototxt(net_proto, q_net_path)
#change single layer bit width
def change_layer_bw(net_proto, layer_name,
bw_layer_in, fl_layer_in,
bw_layer_out, fl_layer_out,
bw_params, fl_params,
bw_add, fl_add,
bw_multiply, fl_multiply):
for l in net_proto.layer:
if l.name == layer_name:
l.quantization_param.precision = 0
l.quantization_param.bw_layer_in = int(bw_layer_in)
l.quantization_param.bw_layer_out = int(bw_layer_out)
l.quantization_param.bw_params = int(bw_params)
l.quantization_param.bw_add = int(bw_add)
l.quantization_param.bw_multiply = int(bw_multiply)
l.quantization_param.fl_layer_in = int(fl_layer_in)
l.quantization_param.fl_layer_out= int(fl_layer_out)
l.quantization_param.fl_params = int(fl_params)
l.quantization_param.fl_add = int(fl_add)
l.quantization_param.fl_multiply = int(fl_multiply)
return net_proto
def change_layer_BAC_bw(net_proto, lVayer_name,
bw_add, fl_add,
bw_multiply, fl_multiply):
for l in net_proto.layer:
if l.name == layer_name:
l.quantization_param.bw_add = bw_add
l.quantization_param.fl_add = fl_add
l.quantization_param.bw_multiply = bw_multiply
l.quantization_param.fl_multiply = fw_multiply
return net_proto
def change_layer_bottom_name(net_proto, layer_name,
layer_bottom_name):
for l in net_proto.layer:
if l.name == layer_name:
l.bottom = layer_bottom_name
return net_proto
def change_layer_top_name(net_proto, layer_name,
layer_top_name):
for l in net_proto.layer:
if l.name == layer_name:
l.top = layer_top_name
return net_proto
#calculate needed Integer Length of layer parameters
def calc_layer_param_IL(net,layer):
layer_param = net.params[layer.name]
max_weight = max(layer_param[0].data[...].max(), layer_param[0].data[...].min(), key=abs)
if layer.convolution_param.bias_term:
max_bias = max(layer_param[1].data[...].max(), layer_param[1].data[...].min(), key=abs)
else:
max_bias = 0
max_param = max(max_weight, max_bias, key=abs)
return math.ceil(math.log(abs(max_param), 2)) + 1
def analyze_net_param_IL(net, net_proto):
net_param_IL = dict()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
net_param_IL[layer.name] = calc_layer_param_IL(net, layer)
return net_param_IL
#calculate needed Integer Length of layer output
def calc_layer_inout_IL(net, layer_bottom_name):
layer_output = net.blobs[layer_bottom_name].data
layer_output_max = abs(max(layer_output.max(), layer_output.min(), key=abs))
#if layer_bottom_name == 'data':
# print net.blobs[layer_bottom_name].data
# print math.ceil(math.log(layer_output_max, 2)) + 1
return math.ceil(math.log(layer_output_max, 2)) + 1
def analyze_net_output_IL(net, net_proto, imdb, max_per_image=100, thresh=0.05, vis=False):
num_images = len(imdb.image_index)
_t = {'im_preproc': Timer(), 'im_net' : Timer(), 'im_postproc': Timer(), 'misc' : Timer()}
if not cfg.TEST.HAS_RPN:
roidb = imdb.roidb
net_output_IL = dict()
net_input_IL = dict()
for layer in net_proto.layer:
assert layer.top[0] != layer.bottom[0],"bottom name cannot be the same as top name in the same layer, at layer:{} top:{} bottom:{}".format(layer.name,layer.top[0],layer.bottom[0])
#if layer.top[0] == layer.bottom[0]:
# print layer.name, layer.type
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
net_output_IL[layer.name] = -sys.maxint - 1
net_input_IL[layer.name] = -sys.maxint - 1
for i in xrange(num_images):
if cfg.TEST.HAS_RPN:
box_proposals = None
else:
box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
im = cv2.imread(imdb.image_path_at(i))
scores, boxes = im_detect(net, im, _t, box_proposals)
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
net_output_IL[layer.name] = max(calc_layer_inout_IL(net, layer.top[0]), net_output_IL[layer.name])
net_input_IL[layer.name] = max(calc_layer_inout_IL(net, layer.bottom[0]), net_input_IL[layer.name])
#print layer.type, layer.name, net_output_IL[layer.name],net_input_IL[layer.name]
return net_output_IL, net_input_IL
#calculate needed Integer Length of layer adder
def calc_layer_adder_IL(net, layer_top_name):
layer_adder_max = abs(max(
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[0],
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[1],
key=abs))
return math.ceil(math.log(layer_adder_max, 2)) + 1
#calculate needed Integer Length of layer multiplier
def calc_layer_multiplier_IL(net, layer_top_name):
layer_multiplier_max = abs(max(
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[2],
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[3],
key=abs))
return math.ceil(math.log(layer_multiplier_max, 2)) + 1
#analyze adder and multiplier of each layer in network
def analyze_net_adder_multiplier_IL(net, net_proto, imdb, max_per_image=100, thresh=0.05, vis=False):
num_images = len(imdb.image_index)
_t = {'im_preproc': Timer(), 'im_net' : Timer(), 'im_postproc': Timer(), 'misc' : Timer()}
if not cfg.TEST.HAS_RPN:
roidb = imdb.roidb
net_adder_IL = dict()
net_multiplier_IL = dict()
for layer in net_proto.layer:
assert layer.top[0] != layer.bottom[0],"bottom name cannot be the same as top name in the same layer, at layer:{} top:{} bottom:{}".format(layer.name,layer.top[0],layer.bottom[0])
#if layer.top[0] == layer.bottom[0]:
# print layer.name, layer.type
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' :
net_adder_IL[layer.name] = -sys.maxint - 1
net_multiplier_IL[layer.name] = -sys.maxint - 1
for i in xrange(num_images):
if cfg.TEST.HAS_RPN:
box_proposals = None
else:
box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
im = cv2.imread(imdb.image_path_at(i))
scores, boxes = im_detect(net, im, _t, box_proposals)
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS':
net.params[layer.name][0].data[0][0][0][0]=2610214
elif layer.type == 'FcIVS':
net.params[layer.name][0].data[0][0]=2610214
net.forward()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
net_adder_IL[layer.name] = max(calc_layer_adder_IL(net, layer.top[0]),
net_adder_IL[layer.name])
net_multiplier_IL[layer.name] = max(calc_layer_multiplier_IL(net, layer.top[0]),
net_multiplier_IL[layer.name])
return net_adder_IL, net_multiplier_IL
#quantize adder in network
def quantize_net_adder(net_proto, net_adder_IL, adder_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
adder_IL = net_adder_IL[layer.name] + extra_IL
adder_FL = adder_bw - adder_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
adder_bw, adder_FL, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize multiplier in network
def quantize_net_multiplier(net_proto, net_multiplier_IL, multiplier_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
multiplier_IL = net_multiplier_IL[layer.name] + extra_IL
multiplier_FL = multiplier_bw - multiplier_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
multiplier_bw, multiplier_FL, \
)
#quantize input and output of each layer in network
def quantize_net_output(net_proto, net_output_IL, net_input_IL, output_bw, extra_IL):
input_bw = output_bw;
#input_FL = 0;
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
output_IL = net_output_IL[layer.name] + extra_IL
output_FL = output_bw - output_IL
input_IL = net_input_IL[layer.name] + extra_IL
input_FL = input_bw - input_IL
#if layer.name=='conv1_1/conv':
# print input_IL,output_IL
#print layer.name
#if layer.name == 'conv1_1/conv':
# print output_IL
# continue
change_layer_bw(net_proto, layer.name, \
input_bw, input_FL, \
output_bw, output_FL, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
def quantize_net_output_dynamic(net_proto, net_output_IL, net_input_IL, output_bw, extra_IL, shared_dict):
input_bw = output_bw;
#input_FL = 0;
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
output_IL = net_output_IL[layer.name] + extra_IL
output_FL = output_bw - output_IL
input_IL = net_input_IL[layer.name] + extra_IL
input_FL = input_bw - input_IL
change_layer_bw(net_proto, layer.name, \
input_bw, input_FL, \
output_bw, output_FL, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
write_to_prototxt(net_proto, './temp.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('input', './temp.prototxt',
shared_dict, GPU1))
p.start()
p.join()
ap = shared_dict['input']
best_ap = ap
print best_ap
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
print layer.name + "-input"
extra_IL_final = extra_IL;
for extra_IL_ in range(-2,2):
input_IL = net_input_IL[layer.name] + extra_IL_
input_FL = input_bw - input_IL
change_layer_bw(net_proto, layer.name, \
input_bw, input_FL, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
write_to_prototxt(net_proto, './temp.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('input', './temp.prototxt',
shared_dict, GPU1))
p.start()
p.join()
ap = shared_dict['input']
if best_ap < ap:
best_ap = ap
extra_IL_final = extra_IL_
print best_ap
print input_IL,input_FL
input_IL = net_input_IL[layer.name] + extra_IL_final
input_FL = input_bw - input_IL
change_layer_bw(net_proto, layer.name, \
input_bw, input_FL, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
print layer.name + "-output"
extra_IL_final = extra_IL
for extra_IL_ in range(-2,2):
output_IL = net_output_IL[layer.name] + extra_IL_
output_FL = output_bw - output_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
output_bw, output_FL, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
write_to_prototxt(net_proto, './temp.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('input', './temp.prototxt',
shared_dict, GPU1))
p.start()
p.join()
ap = shared_dict['input']
if best_ap < ap:
best_ap = ap
extra_IL_final = extra_IL_
print output_IL,output_FL
print best_ap
output_IL = net_output_IL[layer.name] + extra_IL_final
output_FL = output_bw - output_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
output_bw, output_FL, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
write_to_prototxt(net_proto, './temp.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('input', './temp.prototxt',
shared_dict, GPU1))
p.start()
p.join()
ap = shared_dict['input']
best_ap = ap
print best_ap
#quantize convolution layers in network
def quantize_net_conv(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
def quantize_net_conv_dynamic(net_proto, net_param_IL, weighting_bw, extra_IL, shared_dict):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
write_to_prototxt(net_proto, './temp.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('conv', './temp.prototxt',
shared_dict, GPU1))
p.start()
p.join()
ap = shared_dict['conv']
best_ap = ap
print best_ap
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS':
print layer.name + "-param"
extra_IL_final = extra_IL;
for extra_IL_ in range(-2,2):
weighting_IL = net_param_IL[layer.name] + extra_IL_
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
write_to_prototxt(net_proto, './temp.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('input', './temp.prototxt',
shared_dict, GPU1))
p.start()
p.join()
ap = shared_dict['input']
if best_ap < ap:
best_ap = ap
extra_IL_final = extra_IL_
print best_ap
print input_IL,input_FL
weighting_IL = net_param_IL[layer.name] + extra_IL_
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize fully connected layer in network
def quantize_net_fc(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'FcIVS':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize deconvolution layer in network
def quantize_net_deconv(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'DeconvolutionRistretto':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#read network spec in prototxt
def read_from_prototxt(ori_net_path):
net_proto = caffe_pb2.NetParameter()
fn = ori_net_path;
with open(fn) as f:
s = f.read()
txtf.Merge(s, net_proto)
return net_proto
#write network spec to prototxt
def write_to_prototxt(net_proto, out_net_path):
outf = out_net_path
#print 'writing', outf
with open(outf, 'w') as f:
f.write(str(net_proto))
#test network with no string printed
def test_qnet(net_path, caffemodel_path, imdb):
net = caffe.Net(net_path, caffemodel_path, caffe.TEST)
net.name = os.path.splitext(os.path.basename(caffemodel_path))[0]
ap = test_net_silent(net, imdb, max_per_image=args.max_per_image, vis=args.vis)
return ap
#print each layer name and spec
def print_net_layer_names(net):
print("Network layers:")
for name, layer in zip(net._layer_names, net.layers):
if layer.type == 'ConvolutionIVS' or layer.type == 'Convolution':
print("{:<30}: {:22s}({} blobs)".format(name, layer.type, len(layer.blobs)))
print dir(layer)
print layer.reshape
print layer.convolution_param
print net.layer[1].name
def mAP_worker(i, net_path, shared_dict, GPU_ID):
#caffe.set_mode_cpu()
#GPU_ID = 2 # Switch between 0 and 1 depending on the GPU you want to use.
cfg.GPU_ID = GPU_ID
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
imdb = get_imdb(args.imdb_name)
imdb.competition_mode(args.comp_mode)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
ap = test_qnet(net_path, args.caffemodel, imdb)
shared_dict[i] = ap
def analyze_net_output_IL_worker(net_output_IL, net_input_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
#caffe.set_mode_cpu()
net_proto = read_from_prototxt(args.prototxt_quantized)
net = caffe.Net(args.prototxt_quantized, args.caffemodel, caffe.TEST)
imdb = get_imdb(args.imdb_name)
imdb.competition_mode(args.comp_mode)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
net_output_IL_, net_input_IL_ = analyze_net_output_IL(net, net_proto, imdb, max_per_image=args.max_per_image, vis=args.vis)
for t in net_output_IL_.keys():
net_output_IL[t] = net_output_IL_[t]
for t in net_input_IL_.keys():
net_input_IL[t] = net_input_IL_[t]
def analyze_net_adder_multiplier_IL_worker(net_adder_IL, net_multiplier_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
#caffe.set_mode_cpu()
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
net_BAC = caffe.Net(args.prototxt_quantized_BAC, args.caffemodel, caffe.TEST)
imdb = get_imdb(args.imdb_name)
imdb.competition_mode(args.comp_mode)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
net_adder_IL_, net_multiplier_IL_ = analyze_net_adder_multiplier_IL(net_BAC, net_proto_BAC, imdb,
max_per_image=args.max_per_image, vis=args.vis)
for t in net_adder_IL_.keys():
net_adder_IL[t] = net_adder_IL_[t]
for t in net_multiplier_IL_.keys():
net_multiplier_IL[t] = net_multiplier_IL_[t]
def analyze_net_param_IL_worker(net_param_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
net_proto = read_from_prototxt(args.prototxt_quantized)
net = caffe.Net(args.prototxt_quantized, args.caffemodel, caffe.TEST)
net_param_IL_ = analyze_net_param_IL(net, net_proto)
for t in net_param_IL_.keys():
net_param_IL[t] = net_param_IL_[t]
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
manager = multiprocessing.Manager()
shared_dict = manager.dict()
timer = Timer()
# Bit Width for Analyze
bw_range_conv = [8, 4] #bit width for convolution layers
bw_range_deconv = [32, 16, 8, 4, 2] #bit width for deconvolution layers
bw_range_fc = [32, 16, 8, 7, 6, 5, 4, 2] #bit width for fully connected layers
bw_range_output = [32, 16, 8, 4, 2] #bit width for layer input and output
bw_conv = 5 #just initial
bw_deconv = 5 #just initial
bw_fc = 5 #just initial
bw_output = 5 #just initial
bw_adder = 12 #just initial
bw_multiplier = 12 #just initial
convIL_reduction = -1
deconvIL_reduction = -1
fcIL_reduction = -1
actIL_reduction = -2
adderIL_reduction = 0
multIL_reduction = 0
GPU1 = 1
GPU2 = 2
convert_net_to_qnet(args.prototxt, args.prototxt_quantized)
print '-----------------------------------'
net_proto = read_from_prototxt(args.prototxt_quantized)
print 'Analyzing network parameter IL'
net_param_IL = manager.dict()
p = multiprocessing.Process(target=analyze_net_param_IL_worker,
args=(net_param_IL, GPU1, ))
p.start()
p.join()
net_output_IL = manager.dict()
net_input_IL = manager.dict()
if args.act_analysis == None:
print 'Analyzing network output IL'
p = multiprocessing.Process(target=analyze_net_output_IL_worker,
args=(net_output_IL, net_input_IL, GPU1))
p.start()
p.join()
with open('act_analysis.json', 'w') as outfile:
act_analysis = dict()
act_analysis['net_output_IL'] = dict()
act_analysis['net_input_IL'] = dict()
for t in net_output_IL.keys():
act_analysis['net_output_IL'][t] = net_output_IL[t]
for t in net_input_IL.keys():
act_analysis['net_input_IL'][t] = net_input_IL[t]
json.dump(act_analysis, outfile)
else:
print 'Loading network output IL'
with open(args.act_analysis) as json_data:
act_analysis = json.load(json_data)
for t in act_analysis['net_output_IL'].keys():
net_output_IL[t] = act_analysis['net_output_IL'][t]
for t in act_analysis['net_input_IL'].keys():
net_input_IL[t] = act_analysis['net_input_IL'][t]
#Make Final Quantized Prototxt
print 'Final Quantization Testing'
net_proto = read_from_prototxt(args.prototxt_quantized)
quantize_net_conv(net_proto, net_param_IL, bw_conv, convIL_reduction)
quantize_net_deconv(net_proto, net_param_IL, bw_conv, deconvIL_reduction)
quantize_net_fc(net_proto, net_param_IL, bw_fc, fcIL_reduction)
quantize_net_output_dynamic(net_proto, net_output_IL, net_input_IL, bw_output, actIL_reduction, shared_dict)
#sys.exit(0)
#ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
print '----------------------------------------'
print '{}bit CONV, {}bit FC, {}bit layer output'.format(bw_conv, bw_fc, bw_output)
print 'Dynamic fixed point net:'
print '{}bit CONV and DECONV weights'.format(bw_conv)
print '{}bit FC weights'.format(bw_fc)
print '{}bit layer activations'.format(bw_output)
print 'Please fine-tune'
write_to_prototxt(net_proto, args.prototxt_quantized)
print 'Quantized Model saved to', args.prototxt_quantized
sys.exit(0)
print 'Create Bit-Accurate quantized prototxt'
convert_net_to_qnet_BAC_analysis(args.prototxt_quantized, args.prototxt_quantized_BAC)
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
print 'Loading Bit-Accurate quantized prototxt'
#print 'Analyzing network adder and multiplier'
net_adder_IL = manager.dict()
net_multiplier_IL = manager.dict()
if args.accumulator_analysis == None:
print 'Analyzing network adder and multiplier'
p = multiprocessing.Process(target=analyze_net_adder_multiplier_IL_worker,
args=(net_adder_IL, net_multiplier_IL, GPU1))
p.start()
p.join()
with open('accumulator_analysis.json', 'w') as outfile:
accumulator_analysis = dict()
accumulator_analysis['net_adder_IL'] = dict()
accumulator_analysis['net_multiplier_IL'] = dict()
for t in net_adder_IL.keys():
accumulator_analysis['net_adder_IL'][t] = net_adder_IL[t]
for t in net_multiplier_IL.keys():
accumulator_analysis['net_multiplier_IL'][t] = net_multiplier_IL[t]
json.dump(accumulator_analysis, outfile)
else:
print 'Loading network adder and multiplier analysis file'
with open(args.accumulator_analysis) as json_data:
accumulator_analysis = json.load(json_data)
for t in accumulator_analysis['net_adder_IL'].keys():
net_adder_IL[t] = accumulator_analysis['net_adder_IL'][t]
for t in accumulator_analysis['net_multiplier_IL'].keys():
net_multiplier_IL[t] = accumulator_analysis['net_multiplier_IL'][t]
convert_net_to_qnet_BAC(args.prototxt_quantized, args.prototxt_quantized_BAC)
print 'Create Final Bit-Accurate quantized prototxt'
convert_net_to_qnet_BAC(args.prototxt_quantized, args.prototxt_quantized_BAC)
net_proto_final = read_from_prototxt(args.prototxt_quantized_BAC)
print 'Loading Final Bit-Accurate quantized prototxt'
quantize_net_conv(net_proto_final, net_param_IL, bw_conv, convIL_reduction)
quantize_net_deconv(net_proto_final, net_param_IL, bw_conv, deconvIL_reduction)
quantize_net_fc(net_proto_final, net_param_IL, bw_fc, fcIL_reduction)
quantize_net_output(net_proto_final, net_output_IL, net_input_IL, bw_output, actIL_reduction)
quantize_net_multiplier(net_proto_final, net_multiplier_IL, bw_multiplier, multIL_reduction)
quantize_net_adder(net_proto_final, net_adder_IL, bw_adder, adderIL_reduction)
#ap = test_qnet('./temp_f.prototxt', args.caffemodel, imdb)
print '----------------------------------------'
print '{}bit adder, {}bit multiplier,'.format(bw_adder, bw_multiplier)
print 'Dynamic fixed point net:'
print '{}bit CONV and DECONV weights'.format(bw_conv)
print '{}bit FC weights'.format(bw_fc)
print '{}bit layer activations'.format(bw_output)
print '{}bit adder'.format(bw_adder)
print '{}bit multiplier'.format(bw_multiplier)
print 'Please fine-tune'
write_to_prototxt(net_proto_final, args.prototxt_quantized_BAC)
print 'Bit-Accurate Quantized Model saved to', args.prototxt_quantized_BAC
| [] | [] | [
"GLOG_minloglevel"
] | [] | ["GLOG_minloglevel"] | python | 1 | 0 | |
tasks/q1.py | """
Simple exercises to get used to TensorFlow API
You should thoroughly test your code.
TensorFlow's official documentation should be your best friend here
CS20: "TensorFlow for Deep Learning Research"
cs20.stanford.edu
Created by Chip Huyen ([email protected])
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
sess = tf.InteractiveSession()
###############################################################################
# 1a: Create two random 0-d tensors x and y of any distribution.
# Create a TensorFlow object that returns x + y if x > y, and x - y otherwise.
# Hint: look up tf.cond()
# I do the first problem for you
###############################################################################
x = tf.random_uniform([]) # Empty array as shape creates a scalar.
y = tf.random_uniform([])
out = tf.cond(tf.greater(x, y), lambda: x + y, lambda: x - y)
print(sess.run(out))
###############################################################################
# 1b: Create two 0-d tensors x and y randomly selected from the range [-1, 1).
# Return x + y if x < y, x - y if x > y, 0 otherwise.
# Hint: Look up tf.case().
###############################################################################
x = tf.random_uniform([], -1, 1, dtype=tf.float32)
y = tf.random_uniform([], -1, 1, dtype=tf.float32)
out = tf.case({tf.less(x, y): lambda: tf.add(x, y),
tf.greater(x, y): lambda: tf.subtract(x, y)},
default=lambda: tf.constant(0.0), exclusive=True)
print(sess.run(out))
###############################################################################
# 1c: Create the tensor x of the value [[0, -2, -1], [0, 1, 2]]
# and y as a tensor of zeros with the same shape as x.
# Return a boolean tensor that yields Trues if x equals y element-wise.
# Hint: Look up tf.equal().
###############################################################################
x = tf.constant([[0, -2, -1], [0, 1, 2]])
y = tf.zeros_like(x)
out = tf.equal(x, y)
print(sess.run(out))
###############################################################################
# 1d: Create the tensor x of value
# [29.05088806, 27.61298943, 31.19073486, 29.35532951,
# 30.97266006, 26.67541885, 38.08450317, 20.74983215,
# 34.94445419, 34.45999146, 29.06485367, 36.01657104,
# 27.88236427, 20.56035233, 30.20379066, 29.51215172,
# 33.71149445, 28.59134293, 36.05556488, 28.66994858].
# Get the indices of elements in x whose values are greater than 30.
# Hint: Use tf.where().
# Then extract elements whose values are greater than 30.
# Hint: Use tf.gather().
###############################################################################
x = tf.constant([29.05088806, 27.61298943, 31.19073486, 29.35532951,
30.97266006, 26.67541885, 38.08450317, 20.74983215,
34.94445419, 34.45999146, 29.06485367, 36.01657104,
27.88236427, 20.56035233, 30.20379066, 29.51215172,
33.71149445, 28.59134293, 36.05556488, 28.66994858])
indices = tf.where(x>30)
out = tf.gather(x,indices)
print(sess.run(out))
###############################################################################
# 1e: Create a diagnoal 2-d tensor of size 6 x 6 with the diagonal values of 1,
# 2, ..., 6
# Hint: Use tf.range() and tf.diag().
###############################################################################
values = tf.range(1, 7)
out = tf.diag(values)
print(sess.run(out))
###############################################################################
# 1f: Create a random 2-d tensor of size 10 x 10 from any distribution.
# Calculate its determinant.
# Hint: Look at tf.matrix_determinant().
###############################################################################
m = tf.random_normal([10, 10], mean=10, stddev=1)
out = tf.matrix_determinant(m)
print(sess.run(out))
###############################################################################
# 1g: Create tensor x with value [5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9].
# Return the unique elements in x
# Hint: use tf.unique(). Keep in mind that tf.unique() returns a tuple.
###############################################################################
x = tf.constant([5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9])
unique_values, indices = tf.unique(x)
print(sess.run(unique_values))
###############################################################################
# 1h: Create two tensors x and y of shape 300 from any normal distribution,
# as long as they are from the same distribution.
# Use tf.cond() to return:
# - The mean squared error of (x - y) if the average of all elements in (x - y)
# is negative, or
# - The sum of absolute value of all elements in the tensor (x - y) otherwise.
# Hint: see the Huber loss function in the lecture slides 3.
###############################################################################
x = tf.random_normal([300], mean=5, stddev=1)
y = tf.random_normal([300], mean=5, stddev=1)
average = tf.reduce_mean(x - y)
def f1(): return tf.reduce_mean(tf.square(x - y))
def f2(): return tf.reduce_sum(tf.abs(x - y))
out = tf.cond(average < 0, f1, f2)
print(sess.run(out))
| [] | [] | [
"TF_CPP_MIN_LOG_LEVEL"
] | [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "polynize.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
application.go | package awsom
import (
"errors"
"github.com/GeertJohan/go.rice"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/secretsmanager"
awsom_session "github.com/hekonsek/awsom-session"
"os"
"strings"
)
const ErrorApplicationNameTooShort = "ERR_TO_SHORT"
// Application type
type Application struct {
Name string
GitUrl string
}
func (application *Application) CreateOrUpdate() error {
if len(application.Name) < 3 {
return errors.New(ErrorApplicationNameTooShort)
}
sess, err := awsom_session.NewSession()
if err != nil {
panic(err)
}
secretsManagerService := secretsmanager.New(sess)
secrets, err := secretsManagerService.ListSecrets(&secretsmanager.ListSecretsInput{MaxResults: aws.Int64(100)})
if err != nil {
panic(err)
}
secretExists := false
for _, secret := range secrets.SecretList {
if *secret.Name == application.Name {
secretExists = true
break
}
}
if !secretExists {
_, err := secretsManagerService.CreateSecret(&secretsmanager.CreateSecretInput{
Name: aws.String(application.Name),
SecretString: aws.String(os.Getenv("GITHUB_TOKEN")),
})
if err != nil {
panic(err)
}
}
err = ApplyCodeBuildDefaults(CodeBuild{
Name: BuildStageName(application.Name),
GitUrl: application.GitUrl,
}).CreateOrUpdate()
if err != nil {
return err
}
box, err := rice.FindBox("rice")
if err != nil {
return err
}
configureBuildSpec, err := box.String("buildspec-configure.yml")
if err != nil {
return err
}
err = ApplyCodeBuildDefaults(CodeBuild{
Name: ConfigureStageName(application.Name),
GitUrl: application.GitUrl,
BuildSpec: configureBuildSpec,
BuildImage: "hekonsek/awsom",
}).CreateOrUpdate()
if err != nil {
return err
}
err = ApplyCodeBuildDefaults(CodeBuild{
Name: VersionStageName(application.Name),
GitUrl: application.GitUrl,
BuildSpec: "buildspec-version.yml",
BuildImage: "hekonsek/awsom",
}).CreateOrUpdate()
if err != nil {
return err
}
err = ApplyCodeBuildDefaults(CodeBuild{
Name: DockerizeStageName(application.Name),
GitUrl: application.GitUrl,
BuildSpec: "buildspec-dockerize.yml",
BuildImage: "aws/codebuild/docker:18.09.0",
}).CreateOrUpdate()
if err != nil {
return err
}
err = (&CodePipeline{
Name: application.Name,
GitUrl: application.GitUrl,
}).CreateOrUpdate()
if err != nil {
return err
}
return nil
}
type ApplicationRecord struct {
Name string
}
func ListApplications() ([]ApplicationRecord, error) {
pipelines, err := ListCodePipelines()
if err != nil {
return nil, err
}
applications := []ApplicationRecord{}
for _, pipeline := range pipelines {
applications = append(applications, ApplicationRecord{
Name: pipeline.Name,
})
}
return applications, nil
}
func DeleteApplication(name string) error {
err := DeleteCodeBuild(name)
if err != nil {
return err
}
err = DeleteCodeBuild(VersionStageName(name))
if err != nil {
return err
}
err = DeleteCodeBuild(DockerizeStageName(name))
if err != nil {
return err
}
err = DeleteCodePipeline(name)
if err != nil {
return err
}
return nil
}
func ApplicationNameFromCurrentBuild() string {
buildName := strings.Split(os.Getenv("CODEBUILD_BUILD_ID"), ":")[0]
return strings.Split(buildName, "-")[0]
}
| [
"\"GITHUB_TOKEN\"",
"\"CODEBUILD_BUILD_ID\""
] | [] | [
"CODEBUILD_BUILD_ID",
"GITHUB_TOKEN"
] | [] | ["CODEBUILD_BUILD_ID", "GITHUB_TOKEN"] | go | 2 | 0 | |
app/src/main/java/com/batdemir/android/todolist/application/android/Tools/ToolStorage.java | package com.batdemir.android.todolist.application.android.Tools;
import android.annotation.SuppressLint;
import android.content.Context;
import android.os.Build;
import android.os.Environment;
import android.text.TextUtils;
import java.io.File;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
public class ToolStorage {
// Primary physical SD-CARD (not emulated)
private static final String RAW_EXTERNAL_STORAGE = System.getenv("EXTERNAL_STORAGE");
// All Secondary SD-CARDs (all exclude primary) separated by File.pathSeparator, i.e: ":", ";"
private static final String RAW_SECONDARY_STORAGES = System.getenv("SECONDARY_STORAGE");
// Primary emulated SD-CARD
private static final String RAW_EMULATED_STORAGE_TARGET = System.getenv("EMULATED_STORAGE_TARGET");
// PhysicalPaths based on phone model
@SuppressLint("SdCardPath")
@SuppressWarnings("SpellCheckingInspection")
private static final String[] KNOWN_PHYSICAL_PATHS = new String[]{
"/storage/sdcard0",
"/storage/sdcard1", //Motorola Xoom
"/storage/extsdcard", //Samsung SGS3
"/storage/sdcard0/external_sdcard", //User request
"/mnt/extsdcard",
"/mnt/sdcard/external_sd", //Samsung galaxy family
"/mnt/sdcard/ext_sd",
"/mnt/external_sd",
"/mnt/media_rw/sdcard1", //4.4.2 on CyanogenMod S3
"/removable/microsd", //Asus transformer prime
"/mnt/emmc",
"/storage/external_SD", //LG
"/storage/ext_sd", //HTC One Max
"/storage/removable/sdcard1", //Sony Xperia Z1
"/data/sdext",
"/data/sdext2",
"/data/sdext3",
"/data/sdext4",
"/sdcard1", //Sony Xperia Z
"/sdcard2", //HTC One M8s
"/storage/microsd" //ASUS ZenFone 2
};
/**
* Returns all available SD-Cards in the system (include emulated)
* <p/>
* Warning: Hack! Based on Android source code of version 4.3 (API 18)
* Because there is no standard way to get it.
*
* @return paths to all available SD-Cards in the system (include emulated)
*/
public static String[] getStorageDirectories(Context context) {
// Final set of paths
final Set<String> finalAvailableDirectoriesSet = new HashSet<>();
if (TextUtils.isEmpty(RAW_EMULATED_STORAGE_TARGET)) {
if (android.os.Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
// Solution of empty raw emulated storage for android version > marshmallow
// because the RAW_EXTERNAL_STORAGE become something i.e: "/Storage/A5F9-15F4"
File[] files = context.getExternalFilesDirs(null);
for (File file : files) {
if (file != null) {
String applicationSpecificAbsolutePath = file.getAbsolutePath();
String emulatedRootPath = applicationSpecificAbsolutePath.substring(
0, applicationSpecificAbsolutePath.indexOf("Android/data")
);
finalAvailableDirectoriesSet.add(emulatedRootPath);
}
}
} else {
if (TextUtils.isEmpty(RAW_EXTERNAL_STORAGE)) {
finalAvailableDirectoriesSet.addAll(getAvailablePhysicalPaths());
} else {
// Device has physical external storage; use plain paths.
finalAvailableDirectoriesSet.add(RAW_EXTERNAL_STORAGE);
}
}
} else {
// Device has emulated storage; external storage paths should have id in the last segment
String rawStorageId = "";
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1) {
final String path = Environment.getExternalStorageDirectory().getAbsolutePath();
final String[] folders = File.separator.split(path);
final String lastSegment = folders[folders.length - 1];
if (!TextUtils.isEmpty(lastSegment) && TextUtils.isDigitsOnly(lastSegment)) {
rawStorageId = lastSegment;
}
}
// i.e: "/storage/emulated/storageId" where storageId is 0, 1, 2, ...
if (TextUtils.isEmpty(rawStorageId)) {
finalAvailableDirectoriesSet.add(RAW_EMULATED_STORAGE_TARGET);
} else {
finalAvailableDirectoriesSet.add(RAW_EMULATED_STORAGE_TARGET + File.separator + rawStorageId);
}
}
// Add all secondary storages
if (!TextUtils.isEmpty(RAW_SECONDARY_STORAGES)) {
// All Secondary SD-CARDs split into array
final String[] rawSecondaryStorages = RAW_SECONDARY_STORAGES.split(File.pathSeparator);
Collections.addAll(finalAvailableDirectoriesSet, rawSecondaryStorages);
}
return finalAvailableDirectoriesSet.toArray(new String[finalAvailableDirectoriesSet.size()]);
}
public static String[] getSDCardDirectory(Context context) {
// Final set of paths
final Set<String> finalAvailableDirectoriesSet = new HashSet<>();
if (TextUtils.isEmpty(RAW_EMULATED_STORAGE_TARGET)) {
if (android.os.Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
// Solution of empty raw emulated storage for android version > marshmallow
// because the RAW_EXTERNAL_STORAGE become something i.e: "/Storage/A5F9-15F4"
File[] files = context.getExternalFilesDirs(null);
for (File file : files) {
if (file != null) {
String applicationSpecificAbsolutePath = file.getAbsolutePath();
String emulatedRootPath = applicationSpecificAbsolutePath.substring(
0, applicationSpecificAbsolutePath.indexOf("Android/data")
);
if (!emulatedRootPath.contains("emulated")) {
finalAvailableDirectoriesSet.add(emulatedRootPath);
}
}
}
} else {
if (TextUtils.isEmpty(RAW_EXTERNAL_STORAGE)) {
finalAvailableDirectoriesSet.addAll(getAvailablePhysicalPaths());
} else {
// Device has physical external storage; use plain paths.
finalAvailableDirectoriesSet.add(RAW_EXTERNAL_STORAGE);
}
}
} else {
// Device has emulated storage; external storage paths should have id in the last segment
String rawStorageId = "";
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1) {
final String path = Environment.getExternalStorageDirectory().getAbsolutePath();
final String[] folders = File.separator.split(path);
final String lastSegment = folders[folders.length - 1];
if (!TextUtils.isEmpty(lastSegment) && TextUtils.isDigitsOnly(lastSegment)) {
rawStorageId = lastSegment;
}
}
// i.e: "/storage/emulated/storageId" where storageId is 0, 1, 2, ...
if (TextUtils.isEmpty(rawStorageId)) {
finalAvailableDirectoriesSet.add(RAW_EMULATED_STORAGE_TARGET);
} else {
finalAvailableDirectoriesSet.add(RAW_EMULATED_STORAGE_TARGET + File.separator + rawStorageId);
}
}
// Add all secondary storages
// if (!TextUtils.isEmpty(RAW_SECONDARY_STORAGES)) {
// // All Secondary SD-CARDs split into array
// final String[] rawSecondaryStorages = RAW_SECONDARY_STORAGES.split(File.pathSeparator);
// Collections.addAll(finalAvailableDirectoriesSet, rawSecondaryStorages);
// }
return finalAvailableDirectoriesSet.toArray(new String[finalAvailableDirectoriesSet.size()]);
}
/**
* Filter available physical paths from known physical paths
*
* @return List of available physical paths from current device
*/
private static List<String> getAvailablePhysicalPaths() {
List<String> availablePhysicalPaths = new ArrayList<>();
for (String physicalPath : KNOWN_PHYSICAL_PATHS) {
File file = new File(physicalPath);
if (file.exists()) {
availablePhysicalPaths.add(physicalPath);
}
}
return availablePhysicalPaths;
}
}
| [
"\"EXTERNAL_STORAGE\"",
"\"SECONDARY_STORAGE\"",
"\"EMULATED_STORAGE_TARGET\""
] | [] | [
"EMULATED_STORAGE_TARGET",
"SECONDARY_STORAGE",
"EXTERNAL_STORAGE"
] | [] | ["EMULATED_STORAGE_TARGET", "SECONDARY_STORAGE", "EXTERNAL_STORAGE"] | java | 3 | 0 | |
vendor/github.com/ligato/vpp-agent/vendor/github.com/ligato/cn-infra/db/keyval/consul/consul.go | // Copyright (c) 2018 Cisco and/or its affiliates.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package consul
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/ligato/cn-infra/datasync"
"github.com/ligato/cn-infra/db/keyval"
"github.com/ligato/cn-infra/logging"
"github.com/ligato/cn-infra/logging/logrus"
"github.com/hashicorp/consul/api"
)
var consulLogger = logrus.NewLogger("consul")
func init() {
if os.Getenv("DEBUG_CONSUL_CLIENT") != "" {
consulLogger.SetLevel(logging.DebugLevel)
}
}
func transformKey(key string) string {
return strings.TrimPrefix(key, "/")
}
// Client serves as a client for Consul KV storage and implements keyval.CoreBrokerWatcher interface.
type Client struct {
client *api.Client
}
// NewClient creates new client for Consul using given address.
func NewClient(cfg *api.Config) (store *Client, err error) {
var c *api.Client
if c, err = api.NewClient(cfg); err != nil {
return nil, fmt.Errorf("failed to create Consul client %s", err)
}
peers, err := c.Status().Peers()
if err != nil {
return nil, err
}
consulLogger.Infof("consul peers: %v", peers)
return &Client{
client: c,
}, nil
}
// Put stores given data for the key.
func (c *Client) Put(key string, data []byte, opts ...datasync.PutOption) error {
consulLogger.Debugf("Put: %q", key)
p := &api.KVPair{Key: transformKey(key), Value: data}
_, err := c.client.KV().Put(p, nil)
if err != nil {
return err
}
return nil
}
// NewTxn creates new transaction.
func (c *Client) NewTxn() keyval.BytesTxn {
return &txn{
kv: c.client.KV(),
}
}
// GetValue returns data for the given key.
func (c *Client) GetValue(key string) (data []byte, found bool, revision int64, err error) {
consulLogger.Debugf("GetValue: %q", key)
pair, _, err := c.client.KV().Get(transformKey(key), nil)
if err != nil {
return nil, false, 0, err
} else if pair == nil {
return nil, false, 0, nil
}
return pair.Value, true, int64(pair.ModifyIndex), nil
}
// ListValues returns interator with key-value pairs for given key prefix.
func (c *Client) ListValues(key string) (keyval.BytesKeyValIterator, error) {
pairs, _, err := c.client.KV().List(transformKey(key), nil)
if err != nil {
return nil, err
}
return &bytesKeyValIterator{len: len(pairs), pairs: pairs}, nil
}
// ListKeys returns interator with keys for given key prefix.
func (c *Client) ListKeys(prefix string) (keyval.BytesKeyIterator, error) {
keys, _, err := c.client.KV().Keys(transformKey(prefix), "", nil)
if err != nil {
return nil, err
}
return &bytesKeyIterator{len: len(keys), keys: keys}, nil
}
// Delete deletes given key.
func (c *Client) Delete(key string, opts ...datasync.DelOption) (existed bool, err error) {
consulLogger.Debugf("Delete: %q", key)
if _, err := c.client.KV().Delete(transformKey(key), nil); err != nil {
return false, err
}
return true, nil
}
// Watch watches given list of key prefixes.
func (c *Client) Watch(resp func(keyval.BytesWatchResp), closeChan chan string, keys ...string) error {
consulLogger.Debug("Watch:", keys)
for _, k := range keys {
if err := c.watch(resp, closeChan, k); err != nil {
return err
}
}
return nil
}
type watchResp struct {
typ datasync.Op
key string
value, prevValue []byte
rev int64
}
// GetChangeType returns "Put" for BytesWatchPutResp.
func (resp *watchResp) GetChangeType() datasync.Op {
return resp.typ
}
// GetKey returns the key that the value has been inserted under.
func (resp *watchResp) GetKey() string {
return resp.key
}
// GetValue returns the value that has been inserted.
func (resp *watchResp) GetValue() []byte {
return resp.value
}
// GetPrevValue returns the previous value that has been inserted.
func (resp *watchResp) GetPrevValue() []byte {
return resp.prevValue
}
// GetRevision returns the revision associated with the 'put' operation.
func (resp *watchResp) GetRevision() int64 {
return resp.rev
}
func (c *Client) watch(resp func(watchResp keyval.BytesWatchResp), closeCh chan string, prefix string) error {
consulLogger.Debug("watch:", prefix)
ctx, cancel := context.WithCancel(context.Background())
recvChan := c.watchPrefix(ctx, prefix)
go func(regPrefix string) {
defer cancel()
for {
select {
case wr, ok := <-recvChan:
if !ok {
consulLogger.WithField("prefix", prefix).
Debug("Watch recv chan was closed")
return
}
for _, ev := range wr.Events {
key := ev.Key
if !strings.HasPrefix(key, "/") && strings.HasPrefix(regPrefix, "/") {
key = "/" + key
}
var r keyval.BytesWatchResp
if ev.Type == datasync.Put {
r = &watchResp{
typ: datasync.Put,
key: key,
value: ev.Value,
prevValue: ev.PrevValue,
rev: ev.Revision,
}
} else {
r = &watchResp{
typ: datasync.Delete,
key: key,
value: ev.Value,
rev: ev.Revision,
}
}
resp(r)
}
case closeVal, ok := <-closeCh:
if !ok || closeVal == regPrefix {
consulLogger.WithField("prefix", prefix).
Debug("Watch ended")
return
}
}
}
}(prefix)
return nil
}
type watchEvent struct {
Type datasync.Op
Key string
Value []byte
PrevValue []byte
Revision int64
}
type watchResponse struct {
Events []*watchEvent
Err error
}
func (c *Client) watchPrefix(ctx context.Context, prefix string) <-chan watchResponse {
consulLogger.Debug("watchPrefix:", prefix)
ch := make(chan watchResponse, 1)
// Retrieve KV pairs and latest index
qOpt := &api.QueryOptions{}
oldPairs, qm, err := c.client.KV().List(prefix, qOpt.WithContext(ctx))
if err != nil {
ch <- watchResponse{Err: err}
close(ch)
return ch
}
oldIndex := qm.LastIndex
oldPairsMap := make(map[string]*api.KVPair)
consulLogger.Debugf("prefix %v listing %v pairs (last index: %v)", prefix, len(oldPairs), oldIndex)
for _, pair := range oldPairs {
consulLogger.Debugf(" - key: %q create: %v modify: %v value: %v", pair.Key, pair.CreateIndex, pair.ModifyIndex, len(pair.Value))
oldPairsMap[pair.Key] = pair
}
go func() {
for {
// Wait for an update to occur since the last index
var newPairs api.KVPairs
qOpt := &api.QueryOptions{
WaitIndex: oldIndex,
}
newPairs, qm, err = c.client.KV().List(prefix, qOpt.WithContext(ctx))
if err != nil {
ch <- watchResponse{Err: err}
close(ch)
return
}
newIndex := qm.LastIndex
// If the index is same as old one, request probably timed out, so we start again
if oldIndex == newIndex {
consulLogger.Debug("index unchanged, next round")
continue
}
consulLogger.Debugf("prefix %q: listing %v new pairs, new index: %v (old index: %v)", prefix, len(newPairs), newIndex, oldIndex)
for _, pair := range newPairs {
consulLogger.Debugf(" + key: %q create: %v modify: %v value: %v", pair.Key, pair.CreateIndex, pair.ModifyIndex, len(pair.Value))
}
var evs []*watchEvent
// Search for all created and modified KV
for _, pair := range newPairs {
if pair.ModifyIndex > oldIndex {
var prevVal []byte
if oldPair, ok := oldPairsMap[pair.Key]; ok {
prevVal = oldPair.Value
}
consulLogger.Debugf(" * modified key: %v prevValue: %v prevModify: %v", pair.Key, len(pair.Value), len(prevVal))
evs = append(evs, &watchEvent{
Type: datasync.Put,
Key: pair.Key,
Value: pair.Value,
PrevValue: prevVal,
Revision: int64(pair.ModifyIndex),
})
}
delete(oldPairsMap, pair.Key)
}
// Search for all deleted KV
for _, pair := range oldPairsMap {
evs = append(evs, &watchEvent{
Type: datasync.Delete,
Key: pair.Key,
PrevValue: pair.Value,
Revision: int64(pair.ModifyIndex),
})
}
// Prepare latest KV pairs and last index for next round
oldIndex = newIndex
oldPairsMap = make(map[string]*api.KVPair)
for _, pair := range newPairs {
oldPairsMap[pair.Key] = pair
}
ch <- watchResponse{Events: evs}
}
}()
return ch
}
// Close returns nil.
func (c *Client) Close() error {
return nil
}
// NewBroker creates a new instance of a proxy that provides
// access to etcd. The proxy will reuse the connection from Client.
// <prefix> will be prepended to the key argument in all calls from the created
// BrokerWatcher. To avoid using a prefix, pass keyval. Root constant as
// an argument.
func (c *Client) NewBroker(prefix string) keyval.BytesBroker {
return &BrokerWatcher{
Client: c,
prefix: prefix,
}
}
// NewWatcher creates a new instance of a proxy that provides
// access to etcd. The proxy will reuse the connection from Client.
// <prefix> will be prepended to the key argument in all calls on created
// BrokerWatcher. To avoid using a prefix, pass keyval. Root constant as
// an argument.
func (c *Client) NewWatcher(prefix string) keyval.BytesWatcher {
return &BrokerWatcher{
Client: c,
prefix: prefix,
}
}
// BrokerWatcher uses Client to access the datastore.
// The connection can be shared among multiple BrokerWatcher.
// In case of accessing a particular subtree in Consul only,
// BrokerWatcher allows defining a keyPrefix that is prepended
// to all keys in its methods in order to shorten keys used in arguments.
type BrokerWatcher struct {
*Client
prefix string
}
func (pdb *BrokerWatcher) prefixKey(key string) string {
return filepath.Join(pdb.prefix, key)
}
// Put calls 'Put' function of the underlying BytesConnectionEtcd.
// KeyPrefix defined in constructor is prepended to the key argument.
func (pdb *BrokerWatcher) Put(key string, data []byte, opts ...datasync.PutOption) error {
return pdb.Client.Put(pdb.prefixKey(key), data, opts...)
}
// NewTxn creates a new transaction.
// KeyPrefix defined in constructor will be prepended to all key arguments
// in the transaction.
func (pdb *BrokerWatcher) NewTxn() keyval.BytesTxn {
return pdb.Client.NewTxn()
}
// GetValue calls 'GetValue' function of the underlying BytesConnectionEtcd.
// KeyPrefix defined in constructor is prepended to the key argument.
func (pdb *BrokerWatcher) GetValue(key string) (data []byte, found bool, revision int64, err error) {
return pdb.Client.GetValue(pdb.prefixKey(key))
}
// Delete calls 'Delete' function of the underlying BytesConnectionEtcd.
// KeyPrefix defined in constructor is prepended to the key argument.
func (pdb *BrokerWatcher) Delete(key string, opts ...datasync.DelOption) (existed bool, err error) {
return pdb.Client.Delete(pdb.prefixKey(key), opts...)
}
// ListValues calls 'ListValues' function of the underlying BytesConnectionEtcd.
// KeyPrefix defined in constructor is prepended to the key argument.
// The prefix is removed from the keys of the returned values.
func (pdb *BrokerWatcher) ListValues(key string) (keyval.BytesKeyValIterator, error) {
pairs, _, err := pdb.client.KV().List(pdb.prefixKey(key), nil)
if err != nil {
return nil, err
}
return &bytesKeyValIterator{len: len(pairs), pairs: pairs, prefix: pdb.prefix}, nil
}
// ListKeys calls 'ListKeys' function of the underlying BytesConnectionEtcd.
// KeyPrefix defined in constructor is prepended to the argument.
func (pdb *BrokerWatcher) ListKeys(prefix string) (keyval.BytesKeyIterator, error) {
keys, qm, err := pdb.client.KV().Keys(pdb.prefixKey(prefix), "", nil)
if err != nil {
return nil, err
}
return &bytesKeyIterator{len: len(keys), keys: keys, prefix: pdb.prefix, lastIndex: qm.LastIndex}, nil
}
// Watch starts subscription for changes associated with the selected <keys>.
// KeyPrefix defined in constructor is prepended to all <keys> in the argument
// list. The prefix is removed from the keys returned in watch events.
// Watch events will be delivered to <resp> callback.
func (pdb *BrokerWatcher) Watch(resp func(keyval.BytesWatchResp), closeChan chan string, keys ...string) error {
var prefixedKeys []string
for _, key := range keys {
prefixedKeys = append(prefixedKeys, pdb.prefixKey(key))
}
return pdb.Client.Watch(func(origResp keyval.BytesWatchResp) {
r := origResp.(*watchResp)
r.key = strings.TrimPrefix(r.key, pdb.prefix)
resp(r)
}, closeChan, prefixedKeys...)
}
// bytesKeyIterator is an iterator returned by ListKeys call.
type bytesKeyIterator struct {
index int
len int
keys []string
prefix string
lastIndex uint64
}
// GetNext returns the following key (+ revision) from the result set.
// When there are no more keys to get, <stop> is returned as *true*
// and <key> and <rev> are default values.
func (it *bytesKeyIterator) GetNext() (key string, rev int64, stop bool) {
if it.index >= it.len {
return "", 0, true
}
key = string(it.keys[it.index])
if !strings.HasPrefix(key, "/") && strings.HasPrefix(it.prefix, "/") {
key = "/" + key
}
if it.prefix != "" {
key = strings.TrimPrefix(key, it.prefix)
}
rev = int64(it.lastIndex)
it.index++
return key, rev, false
}
// Close does nothing since db cursors are not needed.
// The method is required by the code since it implements Iterator API.
func (it *bytesKeyIterator) Close() error {
return nil
}
// bytesKeyValIterator is an iterator returned by ListValues call.
type bytesKeyValIterator struct {
index int
len int
pairs api.KVPairs
prefix string
}
// GetNext returns the following item from the result set.
// When there are no more items to get, <stop> is returned as *true* and <val>
// is simply *nil*.
func (it *bytesKeyValIterator) GetNext() (val keyval.BytesKeyVal, stop bool) {
if it.index >= it.len {
return nil, true
}
key := string(it.pairs[it.index].Key)
if !strings.HasPrefix(key, "/") && strings.HasPrefix(it.prefix, "/") {
key = "/" + key
}
if it.prefix != "" {
key = strings.TrimPrefix(key, it.prefix)
}
data := it.pairs[it.index].Value
rev := int64(it.pairs[it.index].ModifyIndex)
var prevValue []byte
if len(it.pairs) > 0 && it.index > 0 {
prevValue = it.pairs[it.index-1].Value
}
it.index++
return &bytesKeyVal{key, data, prevValue, rev}, false
}
// Close does nothing since db cursors are not needed.
// The method is required by the code since it implements Iterator API.
func (it *bytesKeyValIterator) Close() error {
return nil
}
// bytesKeyVal represents a single key-value pair.
type bytesKeyVal struct {
key string
value []byte
prevValue []byte
revision int64
}
// Close does nothing since db cursors are not needed.
// The method is required by the code since it implements Iterator API.
func (kv *bytesKeyVal) Close() error {
return nil
}
// GetValue returns the value of the pair.
func (kv *bytesKeyVal) GetValue() []byte {
return kv.value
}
// GetPrevValue returns the previous value of the pair.
func (kv *bytesKeyVal) GetPrevValue() []byte {
return kv.prevValue
}
// GetKey returns the key of the pair.
func (kv *bytesKeyVal) GetKey() string {
return kv.key
}
// GetRevision returns the revision associated with the pair.
func (kv *bytesKeyVal) GetRevision() int64 {
return kv.revision
}
| [
"\"DEBUG_CONSUL_CLIENT\""
] | [] | [
"DEBUG_CONSUL_CLIENT"
] | [] | ["DEBUG_CONSUL_CLIENT"] | go | 1 | 0 | |
examples/cosmos_static_credentials/main.go | package main
import (
"log"
"os"
"time"
"github.com/rs/zerolog"
gremcos "github.com/supplyon/gremcos"
"github.com/supplyon/gremcos/api"
)
func main() {
host := os.Getenv("CDB_HOST")
username := os.Getenv("CDB_USERNAME")
password := os.Getenv("CDB_KEY")
logger := zerolog.New(os.Stdout).Output(zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: zerolog.TimeFieldFormat}).With().Timestamp().Logger()
if len(host) == 0 {
logger.Fatal().Msg("Host not set. Use export CDB_HOST=<CosmosDB Gremlin Endpoint> to specify it")
}
if len(username) == 0 {
logger.Fatal().Msg("Username not set. Use export CDB_USERNAME=/dbs/<cosmosdb name>/colls/<graph name> to specify it")
}
if len(password) == 0 {
logger.Fatal().Msg("Key not set. Use export CDB_KEY=<key> to specify it")
}
log.Println("Connecting using:")
log.Printf("\thost: %s\n", host)
log.Printf("\tusername: %s\n", username)
log.Printf("\tpassword is set %v\n", len(password) > 0)
cosmos, err := gremcos.New(host,
gremcos.WithAuth(username, password), // <- static password obtained and set only once at startup
gremcos.WithLogger(logger),
gremcos.NumMaxActiveConnections(10),
gremcos.ConnectionIdleTimeout(time.Second*30),
gremcos.MetricsPrefix("myservice"),
)
if err != nil {
logger.Fatal().Err(err).Msg("Failed to create the cosmos connector")
}
queryCosmos(cosmos, logger)
queryCosmosWithBindings(cosmos, logger)
if err := cosmos.Stop(); err != nil {
logger.Error().Err(err).Msg("Failed to stop cosmos connector")
}
logger.Info().Msg("Teared down")
}
func queryCosmos(cosmos gremcos.Cosmos, logger zerolog.Logger) {
g := api.NewGraph("g")
// adds an edge from vertex with property name:jan to vertex with property name:hans
// jan <-knows- hans
query := g.V().Has("name", "jan").AddE("knows").From(g.V().Has("name", "hans"))
logger.Info().Msgf("Query: %s", query)
res, err := cosmos.ExecuteQuery(query)
if err != nil {
logger.Error().Err(err).Msg("Failed to execute a gremlin command")
return
}
responses := api.ResponseArray(res)
values, err := responses.ToValues()
if err == nil {
logger.Info().Msgf("Received Values: %v", values)
}
properties, err := responses.ToProperties()
if err == nil {
logger.Info().Msgf("Received Properties: %v", properties)
}
vertices, err := responses.ToVertices()
if err == nil {
logger.Info().Msgf("Received Vertices: %v", vertices)
}
edges, err := responses.ToEdges()
if err == nil {
logger.Info().Msgf("Received Edges: %v", edges)
}
}
func queryCosmosWithBindings(cosmos gremcos.Cosmos, logger zerolog.Logger) {
// adds an edge from vertex with property name:jan to vertex with property name:hans
// jan <-likes- hans
nameFrom := "jan"
nameTo := "hans"
relationship := "likes"
query := api.NewSimpleQB(`g.V().has("name", nameFrom).addE(relationship).from(g.V().has("name", nameTo))`)
logger.Info().Msgf("Query: %s", query)
res, err := cosmos.ExecuteWithBindings(query.String(), map[string]interface{}{
"nameFrom": nameFrom,
"nameTo": nameTo,
"relationship": relationship,
}, nil)
if err != nil {
logger.Error().Err(err).Msg("Failed to execute a gremlin command")
return
}
responses := api.ResponseArray(res)
values, err := responses.ToValues()
if err == nil {
logger.Info().Msgf("Received Values: %v", values)
}
properties, err := responses.ToProperties()
if err == nil {
logger.Info().Msgf("Received Properties: %v", properties)
}
vertices, err := responses.ToVertices()
if err == nil {
logger.Info().Msgf("Received Vertices: %v", vertices)
}
edges, err := responses.ToEdges()
if err == nil {
logger.Info().Msgf("Received Edges: %v", edges)
}
}
| [
"\"CDB_HOST\"",
"\"CDB_USERNAME\"",
"\"CDB_KEY\""
] | [] | [
"CDB_KEY",
"CDB_HOST",
"CDB_USERNAME"
] | [] | ["CDB_KEY", "CDB_HOST", "CDB_USERNAME"] | go | 3 | 0 | |
test/suite_test.go | package test
import (
"fmt"
"os"
"testing"
"time"
"github.com/cybozu-go/log"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/reporters"
. "github.com/onsi/gomega"
)
func Test(t *testing.T) {
if os.Getenv("SSH_PRIVKEY") == "" {
t.Skip("no SSH_PRIVKEY envvar")
}
RegisterFailHandler(Fail)
junitReporter := reporters.NewJUnitReporter("/tmp/junit.xml")
RunSpecsWithDefaultAndCustomReporters(t, "Test", []Reporter{junitReporter})
}
var _ = BeforeSuite(func() {
fmt.Println("Preparing...")
SetDefaultEventuallyPollingInterval(time.Second)
SetDefaultEventuallyTimeout(40 * time.Minute)
prepare()
log.DefaultLogger().SetOutput(GinkgoWriter)
fmt.Println("Begin tests...")
})
// This must be the only top-level test container.
// Other tests and test containers must be listed in this.
var _ = Describe("Test applications", func() {
BeforeEach(func() {
fmt.Printf("START: %s\n", time.Now().Format(time.RFC3339))
})
AfterEach(func() {
fmt.Printf("END: %s\n", time.Now().Format(time.RFC3339))
})
switch testSuite {
case "bootstrap":
bootstrapTest()
case "prepare":
bootstrapTest()
prepareTest()
case "run":
runTest()
}
})
func bootstrapTest() {
Context("prepareNodes", prepareNodes)
Context("prepareLoadPods", prepareLoadPods)
Context("setup", testSetup)
}
func prepareTest() {
if doReboot {
Context("prepare reboot rook-ceph", prepareRebootRookCeph)
Context("reboot", testRebootAllNodes)
Context("reboot rook-ceph", testRebootRookCeph)
}
// preparing resources before test to make things faster
Context("preparing moco", prepareMoco)
Context("preparing rook-ceph", prepareRookCeph)
Context("preparing argocd-ingress", prepareArgoCDIngress)
Context("preparing contour", prepareContour)
Context("preparing elastic", prepareElastic)
Context("preparing local-pv-provisioner", prepareLocalPVProvisioner)
Context("preparing metallb", prepareMetalLB)
Context("preparing pushgateway", preparePushgateway)
Context("preparing hpa", prepareHPA)
Context("preparing grafana-operator", prepareGrafanaOperator)
Context("preparing sandbox grafana", prepareSandboxGrafanaIngress)
Context("preparing topolvm", prepareTopoLVM)
Context("preparing teleport", prepareTeleport)
Context("preparing customer-egress", prepareCustomerEgress)
Context("preparing domestic-egress", prepareDomesticEgress)
Context("preparing sealed-secret", prepareSealedSecret)
Context("preparing pod-security-admission", preparePodSecurityAdmission)
Context("preparing accurate", prepareAccurate)
Context("preparing meows", prepareMeows)
Context("preparing network-policy", prepareNetworkPolicy) // this must be the last preparation.
}
func runTest() {
// running tests
Context("rook-ceph", testRookCeph)
Context("network-policy", testNetworkPolicy)
Context("metallb", testMetalLB)
Context("contour", testContour)
Context("machines-endpoints", testMachinesEndpoints)
Context("kube-state-metrics", testKubeStateMetrics)
Context("logging", testLogging)
Context("grafana-operator", testGrafanaOperator)
Context("sandbox-grafana", testSandboxGrafana)
Context("pushgateway", testPushgateway)
Context("hpa", testHPA)
Context("victoriametrics-operator", testVictoriaMetricsOperator)
Context("vmsmallset-components", testVMSmallsetClusterComponents)
Context("vmlargeset-components", testVMLargesetClusterComponents)
Context("topolvm", testTopoLVM)
Context("elastic", testElastic)
Context("argocd-ingress", testArgoCDIngress)
Context("admission", testAdmission)
Context("bmc-reverse-proxy", testBMCReverseProxy)
Context("local-pv-provisioner", testLocalPVProvisioner)
Context("teleport", testTeleport)
Context("team-management", testTeamManagement)
Context("moco", testMoco)
Context("sealed-secret", testSealedSecret)
Context("customer-egress", testCustomerEgress)
Context("domestic-egress", testDomesticEgress)
Context("pod-security-admission", testPodSecurityAdmission)
Context("meows", testMeows)
Context("session-log", testSessionLog)
Context("accurate", testAccurate)
}
| [
"\"SSH_PRIVKEY\""
] | [] | [
"SSH_PRIVKEY"
] | [] | ["SSH_PRIVKEY"] | go | 1 | 0 | |
symphony/integration/pytests/utils/constant.py | #!/usr/bin/env python3
# Copyright (c) 2004-present Facebook All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import os
TEST_USER_EMAIL = "[email protected]"
PLATFORM_SERVER_HEALTH_CHECK_URL = os.getenv(
"PLATFORM_SERVER_HEALTH_CHECK_URL", "http://platform-server/healthz"
)
| [] | [] | [
"PLATFORM_SERVER_HEALTH_CHECK_URL"
] | [] | ["PLATFORM_SERVER_HEALTH_CHECK_URL"] | python | 1 | 0 | |
antipetros_discordbot/utility/nextcloud.py | import os
def get_nextcloud_options():
# _options = {"recv_speed": 50 * (1024**2)}
_options = {}
if os.getenv('NEXTCLOUD_USERNAME') is not None:
_options['webdav_hostname'] = f"https://antistasi.de/dev_drive/remote.php/dav/files/{os.getenv('NEXTCLOUD_USERNAME')}/"
_options['webdav_login'] = os.getenv('NEXTCLOUD_USERNAME')
_options["webdav_timeout"] = 120
else:
if os.getenv('INFO_RUN') != "1":
raise RuntimeError('no nextcloud Username set')
if os.getenv('NEXTCLOUD_PASSWORD') is not None:
_options['webdav_password'] = os.getenv('NEXTCLOUD_PASSWORD')
else:
if os.getenv('INFO_RUN') != "1":
raise RuntimeError('no nextcloud Password set')
return _options
| [] | [] | [
"NEXTCLOUD_USERNAME",
"NEXTCLOUD_PASSWORD",
"INFO_RUN"
] | [] | ["NEXTCLOUD_USERNAME", "NEXTCLOUD_PASSWORD", "INFO_RUN"] | python | 3 | 0 | |
test/unit/common/test_utils.py | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.utils"""
from __future__ import print_function
import hashlib
from test.unit import temptree, debug_logger, make_timestamp_iter, \
with_tempdir, mock_timestamp_now
import ctypes
import contextlib
import errno
import eventlet
import eventlet.debug
import eventlet.event
import eventlet.patcher
import functools
import grp
import logging
import platform
import os
import mock
import posix
import pwd
import random
import re
import socket
import string
import sys
import json
import math
import inspect
import six
from six import BytesIO, StringIO
from six.moves.queue import Queue, Empty
from six.moves import http_client
from six.moves import range
from textwrap import dedent
import tempfile
import time
import unittest
import fcntl
import shutil
from getpass import getuser
from shutil import rmtree
from functools import partial
from tempfile import TemporaryFile, NamedTemporaryFile, mkdtemp
from netifaces import AF_INET6
from mock import MagicMock, patch
from six.moves.configparser import NoSectionError, NoOptionError
from uuid import uuid4
from swift.common.exceptions import Timeout, MessageTimeout, \
ConnectionTimeout, LockTimeout, ReplicationLockTimeout, \
MimeInvalid
from swift.common import utils
from swift.common.utils import is_valid_ip, is_valid_ipv4, is_valid_ipv6, \
set_swift_dir
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.storage_policy import POLICIES, reload_storage_policies
from swift.common.swob import Request, Response
from test.unit import FakeLogger, requires_o_tmpfile_support, \
requires_o_tmpfile_support_in_tmp, quiet_eventlet_exceptions
threading = eventlet.patcher.original('threading')
class MockOs(object):
def __init__(self, pass_funcs=None, called_funcs=None, raise_funcs=None):
if pass_funcs is None:
pass_funcs = []
if called_funcs is None:
called_funcs = []
if raise_funcs is None:
raise_funcs = []
self.closed_fds = []
for func in pass_funcs:
setattr(self, func, self.pass_func)
self.called_funcs = {}
for func in called_funcs:
c_func = partial(self.called_func, func)
setattr(self, func, c_func)
for func in raise_funcs:
r_func = partial(self.raise_func, func)
setattr(self, func, r_func)
def pass_func(self, *args, **kwargs):
pass
setgroups = chdir = setsid = setgid = setuid = umask = pass_func
def called_func(self, name, *args, **kwargs):
self.called_funcs[name] = args
def raise_func(self, name, *args, **kwargs):
self.called_funcs[name] = args
raise OSError()
def dup2(self, source, target):
self.closed_fds.append(target)
def geteuid(self):
'''Pretend we are running as root.'''
return 0
def __getattr__(self, name):
# I only over-ride portions of the os module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(os, name)
class MockUdpSocket(object):
def __init__(self, sendto_errno=None):
self.sent = []
self.sendto_errno = sendto_errno
def sendto(self, data, target):
if self.sendto_errno:
raise socket.error(self.sendto_errno,
'test errno %s' % self.sendto_errno)
self.sent.append((data, target))
def close(self):
pass
class MockSys(object):
def __init__(self):
self.stdin = TemporaryFile('w')
self.stdout = TemporaryFile('r')
self.stderr = TemporaryFile('r')
self.__stderr__ = self.stderr
self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()]
def reset_loggers():
if hasattr(utils.get_logger, 'handler4logger'):
for logger, handler in utils.get_logger.handler4logger.items():
logger.removeHandler(handler)
delattr(utils.get_logger, 'handler4logger')
if hasattr(utils.get_logger, 'console_handler4logger'):
for logger, h in utils.get_logger.console_handler4logger.items():
logger.removeHandler(h)
delattr(utils.get_logger, 'console_handler4logger')
# Reset the LogAdapter class thread local state. Use get_logger() here
# to fetch a LogAdapter instance because the items from
# get_logger.handler4logger above are the underlying logger instances,
# not the LogAdapter.
utils.get_logger(None).thread_locals = (None, None)
def reset_logger_state(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
reset_loggers()
try:
return f(self, *args, **kwargs)
finally:
reset_loggers()
return wrapper
class TestTimestamp(unittest.TestCase):
"""Tests for swift.common.utils.Timestamp"""
def test_invalid_input(self):
self.assertRaises(ValueError, utils.Timestamp, time.time(), offset=-1)
self.assertRaises(ValueError, utils.Timestamp, '123.456_78_90')
def test_invalid_string_conversion(self):
t = utils.Timestamp.now()
self.assertRaises(TypeError, str, t)
def test_offset_limit(self):
t = 1417462430.78693
# can't have a offset above MAX_OFFSET
self.assertRaises(ValueError, utils.Timestamp, t,
offset=utils.MAX_OFFSET + 1)
# exactly max offset is fine
ts = utils.Timestamp(t, offset=utils.MAX_OFFSET)
self.assertEqual(ts.internal, '1417462430.78693_ffffffffffffffff')
# but you can't offset it further
self.assertRaises(ValueError, utils.Timestamp, ts.internal, offset=1)
# unless you start below it
ts = utils.Timestamp(t, offset=utils.MAX_OFFSET - 1)
self.assertEqual(utils.Timestamp(ts.internal, offset=1),
'1417462430.78693_ffffffffffffffff')
def test_normal_format_no_offset(self):
expected = '1402436408.91203'
test_values = (
'1402436408.91203',
'1402436408.91203_00000000',
'1402436408.912030000',
'1402436408.912030000_0000000000000',
'000001402436408.912030000',
'000001402436408.912030000_0000000000',
1402436408.91203,
1402436408.912029,
1402436408.9120300000000000,
1402436408.91202999999999999,
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.912029),
utils.Timestamp(1402436408.912029, offset=0),
utils.Timestamp('1402436408.91203'),
utils.Timestamp('1402436408.91203', offset=0),
utils.Timestamp('1402436408.91203_00000000'),
utils.Timestamp('1402436408.91203_00000000', offset=0),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(timestamp.normal, expected)
# timestamp instance can also compare to string or float
self.assertEqual(timestamp, expected)
self.assertEqual(timestamp, float(expected))
self.assertEqual(timestamp, utils.normalize_timestamp(expected))
def test_isoformat(self):
expected = '2014-06-10T22:47:32.054580'
test_values = (
'1402440452.05458',
'1402440452.054579',
'1402440452.05458_00000000',
'1402440452.054579_00000000',
'1402440452.054580000',
'1402440452.054579999',
'1402440452.054580000_0000000000000',
'1402440452.054579999_0000ff00',
'000001402440452.054580000',
'000001402440452.0545799',
'000001402440452.054580000_0000000000',
'000001402440452.054579999999_00000fffff',
1402440452.05458,
1402440452.054579,
1402440452.0545800000000000,
1402440452.054579999,
utils.Timestamp(1402440452.05458),
utils.Timestamp(1402440452.0545799),
utils.Timestamp(1402440452.05458, offset=0),
utils.Timestamp(1402440452.05457999999, offset=0),
utils.Timestamp(1402440452.05458, offset=100),
utils.Timestamp(1402440452.054579, offset=100),
utils.Timestamp('1402440452.05458'),
utils.Timestamp('1402440452.054579999'),
utils.Timestamp('1402440452.05458', offset=0),
utils.Timestamp('1402440452.054579', offset=0),
utils.Timestamp('1402440452.05458', offset=300),
utils.Timestamp('1402440452.05457999', offset=300),
utils.Timestamp('1402440452.05458_00000000'),
utils.Timestamp('1402440452.05457999_00000000'),
utils.Timestamp('1402440452.05458_00000000', offset=0),
utils.Timestamp('1402440452.05457999_00000aaa', offset=0),
utils.Timestamp('1402440452.05458_00000000', offset=400),
utils.Timestamp('1402440452.054579_0a', offset=400),
)
for value in test_values:
self.assertEqual(utils.Timestamp(value).isoformat, expected)
expected = '1970-01-01T00:00:00.000000'
test_values = (
'0',
'0000000000.00000',
'0000000000.00000_ffffffffffff',
0,
0.0,
)
for value in test_values:
self.assertEqual(utils.Timestamp(value).isoformat, expected)
def test_not_equal(self):
ts = '1402436408.91203_0000000000000001'
test_values = (
utils.Timestamp('1402436408.91203_0000000000000002'),
utils.Timestamp('1402436408.91203'),
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91204),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.91203, offset=2),
)
for value in test_values:
self.assertTrue(value != ts)
self.assertIs(True, utils.Timestamp(ts) == ts) # sanity
self.assertIs(False, utils.Timestamp(ts) != utils.Timestamp(ts))
self.assertIs(False, utils.Timestamp(ts) != ts)
self.assertIs(False, utils.Timestamp(ts) is None)
self.assertIs(True, utils.Timestamp(ts) is not None)
def test_no_force_internal_no_offset(self):
"""Test that internal is the same as normal with no offset"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(utils.Timestamp(0).internal, '0000000000.00000')
self.assertEqual(utils.Timestamp(1402437380.58186).internal,
'1402437380.58186')
self.assertEqual(utils.Timestamp(1402437380.581859).internal,
'1402437380.58186')
self.assertEqual(utils.Timestamp(0).internal,
utils.normalize_timestamp(0))
def test_no_force_internal_with_offset(self):
"""Test that internal always includes the offset if significant"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(utils.Timestamp(0, offset=1).internal,
'0000000000.00000_0000000000000001')
self.assertEqual(
utils.Timestamp(1402437380.58186, offset=16).internal,
'1402437380.58186_0000000000000010')
self.assertEqual(
utils.Timestamp(1402437380.581859, offset=240).internal,
'1402437380.58186_00000000000000f0')
self.assertEqual(
utils.Timestamp('1402437380.581859_00000001',
offset=240).internal,
'1402437380.58186_00000000000000f1')
def test_force_internal(self):
"""Test that internal always includes the offset if forced"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=True):
self.assertEqual(utils.Timestamp(0).internal,
'0000000000.00000_0000000000000000')
self.assertEqual(utils.Timestamp(1402437380.58186).internal,
'1402437380.58186_0000000000000000')
self.assertEqual(utils.Timestamp(1402437380.581859).internal,
'1402437380.58186_0000000000000000')
self.assertEqual(utils.Timestamp(0, offset=1).internal,
'0000000000.00000_0000000000000001')
self.assertEqual(
utils.Timestamp(1402437380.58186, offset=16).internal,
'1402437380.58186_0000000000000010')
self.assertEqual(
utils.Timestamp(1402437380.581859, offset=16).internal,
'1402437380.58186_0000000000000010')
def test_internal_format_no_offset(self):
expected = '1402436408.91203_0000000000000000'
test_values = (
'1402436408.91203',
'1402436408.91203_00000000',
'1402436408.912030000',
'1402436408.912030000_0000000000000',
'000001402436408.912030000',
'000001402436408.912030000_0000000000',
1402436408.91203,
1402436408.9120300000000000,
1402436408.912029,
1402436408.912029999999999999,
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.912029),
utils.Timestamp(1402436408.91202999999999999, offset=0),
utils.Timestamp('1402436408.91203'),
utils.Timestamp('1402436408.91203', offset=0),
utils.Timestamp('1402436408.912029'),
utils.Timestamp('1402436408.912029', offset=0),
utils.Timestamp('1402436408.912029999999999'),
utils.Timestamp('1402436408.912029999999999', offset=0),
)
for value in test_values:
# timestamp instance is always equivalent
self.assertEqual(utils.Timestamp(value), expected)
if utils.FORCE_INTERNAL:
# the FORCE_INTERNAL flag makes the internal format always
# include the offset portion of the timestamp even when it's
# not significant and would be bad during upgrades
self.assertEqual(utils.Timestamp(value).internal, expected)
else:
# unless we FORCE_INTERNAL, when there's no offset the
# internal format is equivalent to the normalized format
self.assertEqual(utils.Timestamp(value).internal,
'1402436408.91203')
def test_internal_format_with_offset(self):
expected = '1402436408.91203_00000000000000f0'
test_values = (
'1402436408.91203_000000f0',
u'1402436408.91203_000000f0',
b'1402436408.91203_000000f0',
'1402436408.912030000_0000000000f0',
'1402436408.912029_000000f0',
'1402436408.91202999999_0000000000f0',
'000001402436408.912030000_000000000f0',
'000001402436408.9120299999_000000000f0',
utils.Timestamp(1402436408.91203, offset=240),
utils.Timestamp(1402436408.912029, offset=240),
utils.Timestamp('1402436408.91203', offset=240),
utils.Timestamp('1402436408.91203_00000000', offset=240),
utils.Timestamp('1402436408.91203_0000000f', offset=225),
utils.Timestamp('1402436408.9120299999', offset=240),
utils.Timestamp('1402436408.9120299999_00000000', offset=240),
utils.Timestamp('1402436408.9120299999_00000010', offset=224),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(timestamp.internal, expected)
# can compare with offset if the string is internalized
self.assertEqual(timestamp, expected)
# if comparison value only includes the normalized portion and the
# timestamp includes an offset, it is considered greater
normal = utils.Timestamp(expected).normal
self.assertTrue(timestamp > normal,
'%r is not bigger than %r given %r' % (
timestamp, normal, value))
self.assertTrue(timestamp > float(normal),
'%r is not bigger than %f given %r' % (
timestamp, float(normal), value))
def test_short_format_with_offset(self):
expected = '1402436408.91203_f0'
timestamp = utils.Timestamp(1402436408.91203, 0xf0)
self.assertEqual(expected, timestamp.short)
expected = '1402436408.91203'
timestamp = utils.Timestamp(1402436408.91203)
self.assertEqual(expected, timestamp.short)
def test_raw(self):
expected = 140243640891203
timestamp = utils.Timestamp(1402436408.91203)
self.assertEqual(expected, timestamp.raw)
# 'raw' does not include offset
timestamp = utils.Timestamp(1402436408.91203, 0xf0)
self.assertEqual(expected, timestamp.raw)
def test_delta(self):
def _assertWithinBounds(expected, timestamp):
tolerance = 0.00001
minimum = expected - tolerance
maximum = expected + tolerance
self.assertTrue(float(timestamp) > minimum)
self.assertTrue(float(timestamp) < maximum)
timestamp = utils.Timestamp(1402436408.91203, delta=100)
_assertWithinBounds(1402436408.91303, timestamp)
self.assertEqual(140243640891303, timestamp.raw)
timestamp = utils.Timestamp(1402436408.91203, delta=-100)
_assertWithinBounds(1402436408.91103, timestamp)
self.assertEqual(140243640891103, timestamp.raw)
timestamp = utils.Timestamp(1402436408.91203, delta=0)
_assertWithinBounds(1402436408.91203, timestamp)
self.assertEqual(140243640891203, timestamp.raw)
# delta is independent of offset
timestamp = utils.Timestamp(1402436408.91203, offset=42, delta=100)
self.assertEqual(140243640891303, timestamp.raw)
self.assertEqual(42, timestamp.offset)
# cannot go negative
self.assertRaises(ValueError, utils.Timestamp, 1402436408.91203,
delta=-140243640891203)
def test_int(self):
expected = 1402437965
test_values = (
'1402437965.91203',
'1402437965.91203_00000000',
'1402437965.912030000',
'1402437965.912030000_0000000000000',
'000001402437965.912030000',
'000001402437965.912030000_0000000000',
1402437965.91203,
1402437965.9120300000000000,
1402437965.912029,
1402437965.912029999999999999,
utils.Timestamp(1402437965.91203),
utils.Timestamp(1402437965.91203, offset=0),
utils.Timestamp(1402437965.91203, offset=500),
utils.Timestamp(1402437965.912029),
utils.Timestamp(1402437965.91202999999999999, offset=0),
utils.Timestamp(1402437965.91202999999999999, offset=300),
utils.Timestamp('1402437965.91203'),
utils.Timestamp('1402437965.91203', offset=0),
utils.Timestamp('1402437965.91203', offset=400),
utils.Timestamp('1402437965.912029'),
utils.Timestamp('1402437965.912029', offset=0),
utils.Timestamp('1402437965.912029', offset=200),
utils.Timestamp('1402437965.912029999999999'),
utils.Timestamp('1402437965.912029999999999', offset=0),
utils.Timestamp('1402437965.912029999999999', offset=100),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(int(timestamp), expected)
self.assertTrue(timestamp > expected)
def test_float(self):
expected = 1402438115.91203
test_values = (
'1402438115.91203',
'1402438115.91203_00000000',
'1402438115.912030000',
'1402438115.912030000_0000000000000',
'000001402438115.912030000',
'000001402438115.912030000_0000000000',
1402438115.91203,
1402438115.9120300000000000,
1402438115.912029,
1402438115.912029999999999999,
utils.Timestamp(1402438115.91203),
utils.Timestamp(1402438115.91203, offset=0),
utils.Timestamp(1402438115.91203, offset=500),
utils.Timestamp(1402438115.912029),
utils.Timestamp(1402438115.91202999999999999, offset=0),
utils.Timestamp(1402438115.91202999999999999, offset=300),
utils.Timestamp('1402438115.91203'),
utils.Timestamp('1402438115.91203', offset=0),
utils.Timestamp('1402438115.91203', offset=400),
utils.Timestamp('1402438115.912029'),
utils.Timestamp('1402438115.912029', offset=0),
utils.Timestamp('1402438115.912029', offset=200),
utils.Timestamp('1402438115.912029999999999'),
utils.Timestamp('1402438115.912029999999999', offset=0),
utils.Timestamp('1402438115.912029999999999', offset=100),
)
tolerance = 0.00001
minimum = expected - tolerance
maximum = expected + tolerance
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertTrue(float(timestamp) > minimum,
'%f is not bigger than %f given %r' % (
timestamp, minimum, value))
self.assertTrue(float(timestamp) < maximum,
'%f is not smaller than %f given %r' % (
timestamp, maximum, value))
# direct comparison of timestamp works too
self.assertTrue(timestamp > minimum,
'%s is not bigger than %f given %r' % (
timestamp.normal, minimum, value))
self.assertTrue(timestamp < maximum,
'%s is not smaller than %f given %r' % (
timestamp.normal, maximum, value))
# ... even against strings
self.assertTrue(timestamp > '%f' % minimum,
'%s is not bigger than %s given %r' % (
timestamp.normal, minimum, value))
self.assertTrue(timestamp < '%f' % maximum,
'%s is not smaller than %s given %r' % (
timestamp.normal, maximum, value))
def test_false(self):
self.assertFalse(utils.Timestamp(0))
self.assertFalse(utils.Timestamp(0, offset=0))
self.assertFalse(utils.Timestamp('0'))
self.assertFalse(utils.Timestamp('0', offset=0))
self.assertFalse(utils.Timestamp(0.0))
self.assertFalse(utils.Timestamp(0.0, offset=0))
self.assertFalse(utils.Timestamp('0.0'))
self.assertFalse(utils.Timestamp('0.0', offset=0))
self.assertFalse(utils.Timestamp(00000000.00000000))
self.assertFalse(utils.Timestamp(00000000.00000000, offset=0))
self.assertFalse(utils.Timestamp('00000000.00000000'))
self.assertFalse(utils.Timestamp('00000000.00000000', offset=0))
def test_true(self):
self.assertTrue(utils.Timestamp(1))
self.assertTrue(utils.Timestamp(1, offset=1))
self.assertTrue(utils.Timestamp(0, offset=1))
self.assertTrue(utils.Timestamp('1'))
self.assertTrue(utils.Timestamp('1', offset=1))
self.assertTrue(utils.Timestamp('0', offset=1))
self.assertTrue(utils.Timestamp(1.1))
self.assertTrue(utils.Timestamp(1.1, offset=1))
self.assertTrue(utils.Timestamp(0.0, offset=1))
self.assertTrue(utils.Timestamp('1.1'))
self.assertTrue(utils.Timestamp('1.1', offset=1))
self.assertTrue(utils.Timestamp('0.0', offset=1))
self.assertTrue(utils.Timestamp(11111111.11111111))
self.assertTrue(utils.Timestamp(11111111.11111111, offset=1))
self.assertTrue(utils.Timestamp(00000000.00000000, offset=1))
self.assertTrue(utils.Timestamp('11111111.11111111'))
self.assertTrue(utils.Timestamp('11111111.11111111', offset=1))
self.assertTrue(utils.Timestamp('00000000.00000000', offset=1))
def test_greater_no_offset(self):
now = time.time()
older = now - 1
timestamp = utils.Timestamp(now)
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443112.213252, '1402443112.213252', '1402443112.213252_ffff',
older, '%f' % older, '%f_0000ffff' % older,
)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp > value,
'%r is not greater than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp > other,
'%r is not greater than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp > other.normal,
'%r is not greater than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp > other.internal,
'%r is not greater than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp > float(other),
'%r is not greater than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp > int(other),
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def _test_greater_with_offset(self, now, test_values):
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp > value,
'%r is not greater than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp > other,
'%r is not greater than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp > other.normal,
'%r is not greater than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp > other.internal,
'%r is not greater than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp > float(other),
'%r is not greater than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp > int(other),
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def test_greater_with_offset(self):
# Part 1: use the natural time of the Python. This is deliciously
# unpredictable, but completely legitimate and realistic. Finds bugs!
now = time.time()
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
older, now,
)
self._test_greater_with_offset(now, test_values)
# Part 2: Same as above, but with fixed time values that reproduce
# specific corner cases.
now = 1519830570.6949348
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
older, now,
)
self._test_greater_with_offset(now, test_values)
# Part 3: The '%f' problem. Timestamps cannot be converted to %f
# strings, then back to timestamps, then compared with originals.
# You can only "import" a floating point representation once.
now = 1519830570.6949348
now = float('%f' % now)
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
older, '%f' % older, '%f_0000ffff' % older,
now, '%f' % now, '%s_00000000' % now,
)
self._test_greater_with_offset(now, test_values)
def test_smaller_no_offset(self):
now = time.time()
newer = now + 1
timestamp = utils.Timestamp(now)
test_values = (
9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
newer, '%f' % newer, '%f_0000ffff' % newer,
)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp < value,
'%r is not smaller than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp < other,
'%r is not smaller than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp < other.normal,
'%r is not smaller than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp < other.internal,
'%r is not smaller than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp < float(other),
'%r is not smaller than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp < int(other),
'%r is not smaller than %r given %r' % (
timestamp, int(other), value))
def test_smaller_with_offset(self):
now = time.time()
newer = now + 1
test_values = (
9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
newer, '%f' % newer, '%f_0000ffff' % newer,
)
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp < value,
'%r is not smaller than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp < other,
'%r is not smaller than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp < other.normal,
'%r is not smaller than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp < other.internal,
'%r is not smaller than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp < float(other),
'%r is not smaller than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp < int(other),
'%r is not smaller than %r given %r' % (
timestamp, int(other), value))
def test_cmp_with_none(self):
self.assertGreater(utils.Timestamp(0), None)
self.assertGreater(utils.Timestamp(1.0), None)
self.assertGreater(utils.Timestamp(1.0, 42), None)
def test_ordering(self):
given = [
'1402444820.62590_000000000000000a',
'1402444820.62589_0000000000000001',
'1402444821.52589_0000000000000004',
'1402444920.62589_0000000000000004',
'1402444821.62589_000000000000000a',
'1402444821.72589_000000000000000a',
'1402444920.62589_0000000000000002',
'1402444820.62589_0000000000000002',
'1402444820.62589_000000000000000a',
'1402444820.62590_0000000000000004',
'1402444920.62589_000000000000000a',
'1402444820.62590_0000000000000002',
'1402444821.52589_0000000000000002',
'1402444821.52589_0000000000000000',
'1402444920.62589',
'1402444821.62589_0000000000000004',
'1402444821.72589_0000000000000001',
'1402444820.62590',
'1402444820.62590_0000000000000001',
'1402444820.62589_0000000000000004',
'1402444821.72589_0000000000000000',
'1402444821.52589_000000000000000a',
'1402444821.72589_0000000000000004',
'1402444821.62589',
'1402444821.52589_0000000000000001',
'1402444821.62589_0000000000000001',
'1402444821.62589_0000000000000002',
'1402444821.72589_0000000000000002',
'1402444820.62589',
'1402444920.62589_0000000000000001']
expected = [
'1402444820.62589',
'1402444820.62589_0000000000000001',
'1402444820.62589_0000000000000002',
'1402444820.62589_0000000000000004',
'1402444820.62589_000000000000000a',
'1402444820.62590',
'1402444820.62590_0000000000000001',
'1402444820.62590_0000000000000002',
'1402444820.62590_0000000000000004',
'1402444820.62590_000000000000000a',
'1402444821.52589',
'1402444821.52589_0000000000000001',
'1402444821.52589_0000000000000002',
'1402444821.52589_0000000000000004',
'1402444821.52589_000000000000000a',
'1402444821.62589',
'1402444821.62589_0000000000000001',
'1402444821.62589_0000000000000002',
'1402444821.62589_0000000000000004',
'1402444821.62589_000000000000000a',
'1402444821.72589',
'1402444821.72589_0000000000000001',
'1402444821.72589_0000000000000002',
'1402444821.72589_0000000000000004',
'1402444821.72589_000000000000000a',
'1402444920.62589',
'1402444920.62589_0000000000000001',
'1402444920.62589_0000000000000002',
'1402444920.62589_0000000000000004',
'1402444920.62589_000000000000000a',
]
# less visual version
"""
now = time.time()
given = [
utils.Timestamp(now + i, offset=offset).internal
for i in (0, 0.00001, 0.9, 1.0, 1.1, 100.0)
for offset in (0, 1, 2, 4, 10)
]
expected = [t for t in given]
random.shuffle(given)
"""
self.assertEqual(len(given), len(expected)) # sanity
timestamps = [utils.Timestamp(t) for t in given]
# our expected values don't include insignificant offsets
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(
[t.internal for t in sorted(timestamps)], expected)
# string sorting works as well
self.assertEqual(
sorted([t.internal for t in timestamps]), expected)
def test_hashable(self):
ts_0 = utils.Timestamp('1402444821.72589')
ts_0_also = utils.Timestamp('1402444821.72589')
self.assertEqual(ts_0, ts_0_also) # sanity
self.assertEqual(hash(ts_0), hash(ts_0_also))
d = {ts_0: 'whatever'}
self.assertIn(ts_0, d) # sanity
self.assertIn(ts_0_also, d)
class TestTimestampEncoding(unittest.TestCase):
def setUp(self):
t0 = utils.Timestamp(0.0)
t1 = utils.Timestamp(997.9996)
t2 = utils.Timestamp(999)
t3 = utils.Timestamp(1000, 24)
t4 = utils.Timestamp(1001)
t5 = utils.Timestamp(1002.00040)
# encodings that are expected when explicit = False
self.non_explicit_encodings = (
('0000001000.00000_18', (t3, t3, t3)),
('0000001000.00000_18', (t3, t3, None)),
)
# mappings that are expected when explicit = True
self.explicit_encodings = (
('0000001000.00000_18+0+0', (t3, t3, t3)),
('0000001000.00000_18+0', (t3, t3, None)),
)
# mappings that are expected when explicit = True or False
self.encodings = (
('0000001000.00000_18+0+186a0', (t3, t3, t4)),
('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
('0000001000.00000_18-186a0+0', (t3, t2, t2)),
('0000001000.00000_18+0-186a0', (t3, t3, t2)),
('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
('0000001000.00000_18', (t3, None, None)),
('0000001000.00000_18+186a0', (t3, t4, None)),
('0000001000.00000_18-186a0', (t3, t2, None)),
('0000001000.00000_18', (t3, None, t1)),
('0000001000.00000_18-5f5e100', (t3, t0, None)),
('0000001000.00000_18+0-5f5e100', (t3, t3, t0)),
('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
)
# decodings that are expected when explicit = False
self.non_explicit_decodings = (
('0000001000.00000_18', (t3, t3, t3)),
('0000001000.00000_18+186a0', (t3, t4, t4)),
('0000001000.00000_18-186a0', (t3, t2, t2)),
('0000001000.00000_18+186a0', (t3, t4, t4)),
('0000001000.00000_18-186a0', (t3, t2, t2)),
('0000001000.00000_18-5f5e100', (t3, t0, t0)),
)
# decodings that are expected when explicit = True
self.explicit_decodings = (
('0000001000.00000_18+0+0', (t3, t3, t3)),
('0000001000.00000_18+0', (t3, t3, None)),
('0000001000.00000_18', (t3, None, None)),
('0000001000.00000_18+186a0', (t3, t4, None)),
('0000001000.00000_18-186a0', (t3, t2, None)),
('0000001000.00000_18-5f5e100', (t3, t0, None)),
)
# decodings that are expected when explicit = True or False
self.decodings = (
('0000001000.00000_18+0+186a0', (t3, t3, t4)),
('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
('0000001000.00000_18-186a0+0', (t3, t2, t2)),
('0000001000.00000_18+0-186a0', (t3, t3, t2)),
('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
)
def _assertEqual(self, expected, actual, test):
self.assertEqual(expected, actual,
'Got %s but expected %s for parameters %s'
% (actual, expected, test))
def test_encoding(self):
for test in self.explicit_encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], True)
self._assertEqual(test[0], actual, test[1])
for test in self.non_explicit_encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], False)
self._assertEqual(test[0], actual, test[1])
for explicit in (True, False):
for test in self.encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], explicit)
self._assertEqual(test[0], actual, test[1])
def test_decoding(self):
for test in self.explicit_decodings:
actual = utils.decode_timestamps(test[0], True)
self._assertEqual(test[1], actual, test[0])
for test in self.non_explicit_decodings:
actual = utils.decode_timestamps(test[0], False)
self._assertEqual(test[1], actual, test[0])
for explicit in (True, False):
for test in self.decodings:
actual = utils.decode_timestamps(test[0], explicit)
self._assertEqual(test[1], actual, test[0])
class TestUtils(unittest.TestCase):
"""Tests for swift.common.utils """
def setUp(self):
utils.HASH_PATH_SUFFIX = b'endcap'
utils.HASH_PATH_PREFIX = b'startcap'
def test_get_zero_indexed_base_string(self):
self.assertEqual(utils.get_zero_indexed_base_string('something', 0),
'something')
self.assertEqual(utils.get_zero_indexed_base_string('something', None),
'something')
self.assertEqual(utils.get_zero_indexed_base_string('something', 1),
'something-1')
self.assertRaises(ValueError, utils.get_zero_indexed_base_string,
'something', 'not_integer')
@with_tempdir
def test_lock_path(self, tmpdir):
# 2 locks with limit=1 must fail
success = False
with utils.lock_path(tmpdir, 0.1):
with self.assertRaises(LockTimeout):
with utils.lock_path(tmpdir, 0.1):
success = True
self.assertFalse(success)
# 2 locks with limit=2 must succeed
success = False
with utils.lock_path(tmpdir, 0.1, limit=2):
try:
with utils.lock_path(tmpdir, 0.1, limit=2):
success = True
except LockTimeout as exc:
self.fail('Unexpected exception %s' % exc)
self.assertTrue(success)
# 3 locks with limit=2 must fail
success = False
with utils.lock_path(tmpdir, 0.1, limit=2):
with utils.lock_path(tmpdir, 0.1, limit=2):
with self.assertRaises(LockTimeout):
with utils.lock_path(tmpdir, 0.1):
success = True
self.assertFalse(success)
@with_tempdir
def test_lock_path_invalid_limit(self, tmpdir):
success = False
with self.assertRaises(ValueError):
with utils.lock_path(tmpdir, 0.1, limit=0):
success = True
self.assertFalse(success)
with self.assertRaises(ValueError):
with utils.lock_path(tmpdir, 0.1, limit=-1):
success = True
self.assertFalse(success)
with self.assertRaises(TypeError):
with utils.lock_path(tmpdir, 0.1, limit='1'):
success = True
self.assertFalse(success)
with self.assertRaises(TypeError):
with utils.lock_path(tmpdir, 0.1, limit=1.1):
success = True
self.assertFalse(success)
@with_tempdir
def test_lock_path_num_sleeps(self, tmpdir):
num_short_calls = [0]
exception_raised = [False]
def my_sleep(to_sleep):
if to_sleep == 0.01:
num_short_calls[0] += 1
else:
raise Exception('sleep time changed: %s' % to_sleep)
try:
with mock.patch('swift.common.utils.sleep', my_sleep):
with utils.lock_path(tmpdir):
with utils.lock_path(tmpdir):
pass
except Exception as e:
exception_raised[0] = True
self.assertTrue('sleep time changed' in str(e))
self.assertEqual(num_short_calls[0], 11)
self.assertTrue(exception_raised[0])
@with_tempdir
def test_lock_path_class(self, tmpdir):
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is not None)
self.assertTrue(exc2 is None)
self.assertTrue(not success)
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is None)
self.assertTrue(exc2 is not None)
self.assertTrue(not success)
def test_normalize_timestamp(self):
# Test swift.common.utils.normalize_timestamp
self.assertEqual(utils.normalize_timestamp('1253327593.48174'),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp(1253327593.48174),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp('1253327593.48'),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp(1253327593.48),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp('253327593.48'),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp(253327593.48),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp('1253327593'),
"1253327593.00000")
self.assertEqual(utils.normalize_timestamp(1253327593),
"1253327593.00000")
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_normalize_delete_at_timestamp(self):
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593.67890),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593.67890'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593.67890),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593.67890'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593.67890),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593'),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593.67890'),
'9999999999')
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_last_modified_date_to_timestamp(self):
expectations = {
'1970-01-01T00:00:00.000000': 0.0,
'2014-02-28T23:22:36.698390': 1393629756.698390,
'2011-03-19T04:03:00.604554': 1300507380.604554,
}
for last_modified, ts in expectations.items():
real = utils.last_modified_date_to_timestamp(last_modified)
self.assertEqual(real, ts, "failed for %s" % last_modified)
def test_last_modified_date_to_timestamp_when_system_not_UTC(self):
try:
old_tz = os.environ.get('TZ')
# Western Argentina Summer Time. Found in glibc manual; this
# timezone always has a non-zero offset from UTC, so this test is
# always meaningful.
os.environ['TZ'] = 'WART4WARST,J1/0,J365/25'
self.assertEqual(utils.last_modified_date_to_timestamp(
'1970-01-01T00:00:00.000000'),
0.0)
finally:
if old_tz is not None:
os.environ['TZ'] = old_tz
else:
os.environ.pop('TZ')
def test_backwards(self):
# Test swift.common.utils.backward
# The lines are designed so that the function would encounter
# all of the boundary conditions and typical conditions.
# Block boundaries are marked with '<>' characters
blocksize = 25
lines = [b'123456789x12345678><123456789\n', # block larger than rest
b'123456789x123>\n', # block ends just before \n character
b'123423456789\n',
b'123456789x\n', # block ends at the end of line
b'<123456789x123456789x123\n',
b'<6789x123\n', # block ends at the beginning of the line
b'6789x1234\n',
b'1234><234\n', # block ends typically in the middle of line
b'123456789x123456789\n']
with TemporaryFile() as f:
for line in lines:
f.write(line)
count = len(lines) - 1
for line in utils.backward(f, blocksize):
self.assertEqual(line, lines[count].split(b'\n')[0])
count -= 1
# Empty file case
with TemporaryFile('r') as f:
self.assertEqual([], list(utils.backward(f)))
def test_mkdirs(self):
testdir_base = mkdtemp()
testroot = os.path.join(testdir_base, 'mkdirs')
try:
self.assertTrue(not os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
rmtree(testroot, ignore_errors=1)
testdir = os.path.join(testroot, 'one/two/three')
self.assertTrue(not os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
rmtree(testroot, ignore_errors=1)
open(testroot, 'wb').close()
self.assertTrue(not os.path.exists(testdir))
self.assertRaises(OSError, utils.mkdirs, testdir)
os.unlink(testroot)
finally:
rmtree(testdir_base)
def test_split_path(self):
# Test swift.common.utils.split_account_path
self.assertRaises(ValueError, utils.split_path, '')
self.assertRaises(ValueError, utils.split_path, '/')
self.assertRaises(ValueError, utils.split_path, '//')
self.assertEqual(utils.split_path('/a'), ['a'])
self.assertRaises(ValueError, utils.split_path, '//a')
self.assertEqual(utils.split_path('/a/'), ['a'])
self.assertRaises(ValueError, utils.split_path, '/a/c')
self.assertRaises(ValueError, utils.split_path, '//c')
self.assertRaises(ValueError, utils.split_path, '/a/c/')
self.assertRaises(ValueError, utils.split_path, '/a//')
self.assertRaises(ValueError, utils.split_path, '/a', 2)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3, True)
self.assertEqual(utils.split_path('/a/c', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/o', 3), ['a', 'c', 'o'])
self.assertRaises(ValueError, utils.split_path, '/a/c/o/r', 3, 3)
self.assertEqual(utils.split_path('/a/c/o/r', 3, 3, True),
['a', 'c', 'o/r'])
self.assertEqual(utils.split_path('/a/c', 2, 3, True),
['a', 'c', None])
self.assertRaises(ValueError, utils.split_path, '/a', 5, 4)
self.assertEqual(utils.split_path('/a/c/', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/', 2, 3), ['a', 'c', ''])
try:
utils.split_path('o\nn e', 2)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
try:
utils.split_path('o\nn e', 2, 3, True)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
def test_validate_device_partition(self):
# Test swift.common.utils.validate_device_partition
utils.validate_device_partition('foo', 'bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '', '')
self.assertRaises(ValueError,
utils.validate_device_partition, '', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo/bar', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', 'foo/bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '.', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, '..', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '.')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '..')
try:
utils.validate_device_partition('o\nn e', 'foo')
except ValueError as err:
self.assertEqual(str(err), 'Invalid device: o%0An%20e')
try:
utils.validate_device_partition('foo', 'o\nn e')
except ValueError as err:
self.assertEqual(str(err), 'Invalid partition: o%0An%20e')
def test_NullLogger(self):
# Test swift.common.utils.NullLogger
sio = StringIO()
nl = utils.NullLogger()
nl.write('test')
self.assertEqual(sio.getvalue(), '')
def test_LoggerFileObject(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sio = StringIO()
handler = logging.StreamHandler(sio)
logger = logging.getLogger()
logger.addHandler(handler)
lfo_stdout = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger, 'STDERR')
print('test1')
self.assertEqual(sio.getvalue(), '')
sys.stdout = lfo_stdout
print('test2')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\n')
sys.stderr = lfo_stderr
print('test4', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
sys.stdout = orig_stdout
print('test5')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
print('test6', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
sys.stderr = orig_stderr
print('test8')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
lfo_stdout.writelines(['a', 'b', 'c'])
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\n')
lfo_stdout.close()
lfo_stderr.close()
lfo_stdout.write('d')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
lfo_stdout.flush()
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
for lfo in (lfo_stdout, lfo_stderr):
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
self.assertRaises(IOError, lfo.read)
self.assertRaises(IOError, lfo.read, 1024)
self.assertRaises(IOError, lfo.readline)
self.assertRaises(IOError, lfo.readline, 1024)
lfo.tell()
def test_LoggerFileObject_recursion(self):
crashy_calls = [0]
class CrashyLogger(logging.Handler):
def emit(self, record):
crashy_calls[0] += 1
try:
# Pretend to be trying to send to syslog, but syslogd is
# dead. We need the raise here to set sys.exc_info.
raise socket.error(errno.ENOTCONN, "This is an ex-syslog")
except socket.error:
self.handleError(record)
logger = logging.getLogger()
logger.addHandler(CrashyLogger())
# Set up some real file descriptors for stdio. If you run
# nosetests with "-s", you already have real files there, but
# otherwise they're StringIO objects.
#
# In any case, since capture_stdio() closes sys.stdin and friends,
# we'd want to set up some sacrificial files so as to not goof up
# the testrunner.
new_stdin = open(os.devnull, 'r+b')
new_stdout = open(os.devnull, 'w+b')
new_stderr = open(os.devnull, 'w+b')
with contextlib.closing(new_stdin), contextlib.closing(new_stdout), \
contextlib.closing(new_stderr):
# logging.raiseExceptions is set to False in test/__init__.py, but
# is True in Swift daemons, and the error doesn't manifest without
# it.
with mock.patch('sys.stdin', new_stdin), \
mock.patch('sys.stdout', new_stdout), \
mock.patch('sys.stderr', new_stderr), \
mock.patch.object(logging, 'raiseExceptions', True):
# Note: since stdio is hooked up to /dev/null in here, using
# pdb is basically impossible. Sorry about that.
utils.capture_stdio(logger)
logger.info("I like ham")
self.assertTrue(crashy_calls[0], 1)
def test_parse_options(self):
# Get a file that is definitely on disk
with NamedTemporaryFile() as f:
conf_file = f.name
conf, options = utils.parse_options(test_args=[conf_file])
self.assertEqual(conf, conf_file)
# assert defaults
self.assertEqual(options['verbose'], False)
self.assertNotIn('once', options)
# assert verbose as option
conf, options = utils.parse_options(test_args=[conf_file, '-v'])
self.assertEqual(options['verbose'], True)
# check once option
conf, options = utils.parse_options(test_args=[conf_file],
once=True)
self.assertEqual(options['once'], False)
test_args = [conf_file, '--once']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['once'], True)
# check options as arg parsing
test_args = [conf_file, 'once', 'plugin_name', 'verbose']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['verbose'], True)
self.assertEqual(options['once'], True)
self.assertEqual(options['extra_args'], ['plugin_name'])
def test_parse_options_errors(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
stdo = StringIO()
stde = StringIO()
utils.sys.stdout = stdo
utils.sys.stderr = stde
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[])
self.assertTrue('missing config' in stdo.getvalue())
# verify conf file must exist, context manager will delete temp file
with NamedTemporaryFile() as f:
conf_file = f.name
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[conf_file])
self.assertTrue('unable to locate' in stdo.getvalue())
# reset stdio
utils.sys.stdout = orig_stdout
utils.sys.stderr = orig_stderr
def test_dump_recon_cache(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
logger = utils.get_logger(None, 'server', log_route='server')
try:
submit_dict = {'key0': 99,
'key1': {'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(submit_dict, file_dict)
# Use a nested entry
submit_dict = {'key0': 101,
'key1': {'key2': {'value1': 1, 'value2': 2}}}
expect_dict = {'key0': 101,
'key1': {'key2': {'value1': 1, 'value2': 2},
'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# nested dict items are not sticky
submit_dict = {'key1': {'key2': {'value3': 3}}}
expect_dict = {'key0': 101,
'key1': {'key2': {'value3': 3},
'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# cached entries are sticky
submit_dict = {}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# nested dicts can be erased...
submit_dict = {'key1': {'key2': {}}}
expect_dict = {'key0': 101,
'key1': {'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# ... and erasure is idempotent
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# top level dicts can be erased...
submit_dict = {'key1': {}}
expect_dict = {'key0': 101}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# ... and erasure is idempotent
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
finally:
rmtree(testdir_base)
def test_dump_recon_cache_set_owner(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
logger = utils.get_logger(None, 'server', log_route='server')
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
_ret = lambda: None
_ret.pw_uid = 100
_mock_getpwnam = MagicMock(return_value=_ret)
_mock_chown = mock.Mock()
with patch('os.chown', _mock_chown), \
patch('pwd.getpwnam', _mock_getpwnam):
utils.dump_recon_cache(submit_dict, testcache_file,
logger, set_owner="swift")
_mock_getpwnam.assert_called_once_with("swift")
self.assertEqual(_mock_chown.call_args[0][1], 100)
finally:
rmtree(testdir_base)
def test_dump_recon_cache_permission_denied(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
class MockLogger(object):
def __init__(self):
self._excs = []
def exception(self, message):
_junk, exc, _junk = sys.exc_info()
self._excs.append(exc)
logger = MockLogger()
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
with mock.patch(
'swift.common.utils.NamedTemporaryFile',
side_effect=IOError(13, 'Permission Denied')):
utils.dump_recon_cache(submit_dict, testcache_file, logger)
self.assertIsInstance(logger._excs[0], IOError)
finally:
rmtree(testdir_base)
def test_load_recon_cache(self):
stub_data = {'test': 'foo'}
with NamedTemporaryFile() as f:
f.write(json.dumps(stub_data).encode("utf-8"))
f.flush()
self.assertEqual(stub_data, utils.load_recon_cache(f.name))
# missing files are treated as empty
self.assertFalse(os.path.exists(f.name)) # sanity
self.assertEqual({}, utils.load_recon_cache(f.name))
# Corrupt files are treated as empty. We could crash and make an
# operator fix the corrupt file, but they'll "fix" it with "rm -f
# /var/cache/swift/*.recon", so let's just do it for them.
with NamedTemporaryFile() as f:
f.write(b"{not [valid (json")
f.flush()
self.assertEqual({}, utils.load_recon_cache(f.name))
def test_get_logger(self):
sio = StringIO()
logger = logging.getLogger('server')
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server', log_route='server')
logger.warning('test1')
self.assertEqual(sio.getvalue(), 'test1\n')
logger.debug('test2')
self.assertEqual(sio.getvalue(), 'test1\n')
logger = utils.get_logger({'log_level': 'DEBUG'}, 'server',
log_route='server')
logger.debug('test3')
self.assertEqual(sio.getvalue(), 'test1\ntest3\n')
# Doesn't really test that the log facility is truly being used all the
# way to syslog; but exercises the code.
logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server',
log_route='server')
logger.warning('test4')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure debug doesn't log by default
logger.debug('test5')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure notice lvl logs by default
logger.notice('test6')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\ntest6\n')
def test_get_logger_sysloghandler_plumbing(self):
orig_sysloghandler = utils.ThreadSafeSysLogHandler
syslog_handler_args = []
def syslog_handler_catcher(*args, **kwargs):
syslog_handler_args.append((args, kwargs))
return orig_sysloghandler(*args, **kwargs)
syslog_handler_catcher.LOG_LOCAL0 = orig_sysloghandler.LOG_LOCAL0
syslog_handler_catcher.LOG_LOCAL3 = orig_sysloghandler.LOG_LOCAL3
# Some versions of python perform host resolution while initializing
# the handler. See https://bugs.python.org/issue30378
orig_getaddrinfo = socket.getaddrinfo
def fake_getaddrinfo(host, *args):
return orig_getaddrinfo('localhost', *args)
with mock.patch.object(utils, 'ThreadSafeSysLogHandler',
syslog_handler_catcher), \
mock.patch.object(socket, 'getaddrinfo', fake_getaddrinfo):
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
}, 'server', log_route='server')
expected_args = [((), {'address': '/dev/log',
'facility': orig_sysloghandler.LOG_LOCAL3})]
if not os.path.exists('/dev/log') or \
os.path.isfile('/dev/log') or \
os.path.isdir('/dev/log'):
# Since socket on OSX is in /var/run/syslog, there will be
# a fallback to UDP.
expected_args.append(
((), {'facility': orig_sysloghandler.LOG_LOCAL3}))
self.assertEqual(expected_args, syslog_handler_args)
syslog_handler_args = []
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': '/foo/bar',
}, 'server', log_route='server')
self.assertEqual(
((), {'address': '/foo/bar',
'facility': orig_sysloghandler.LOG_LOCAL3}),
syslog_handler_args[0])
# Using UDP with default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com',
logging.handlers.SYSLOG_UDP_PORT),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
# Using UDP with non-default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
'log_udp_port': '2123',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com', 2123),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
@reset_logger_state
def test_clean_logger_exception(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
def log_exception(exc):
try:
raise exc
except (Exception, Timeout):
logger.exception('blah')
try:
# establish base case
self.assertEqual(strip_value(sio), '')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\n')
self.assertEqual(strip_value(sio), '')
logger.info('test')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\ntest\n')
self.assertEqual(strip_value(sio), '')
# test OSError
for en in (errno.EIO, errno.ENOSPC):
log_exception(OSError(en, 'my %s error message' % en))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertIn('my %s error message' % en, log_msg)
# unfiltered
log_exception(OSError())
self.assertTrue('Traceback' in strip_value(sio))
# test socket.error
log_exception(socket.error(errno.ECONNREFUSED,
'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('errno.ECONNREFUSED message test', log_msg)
self.assertIn('Connection refused', log_msg)
log_exception(socket.error(errno.EHOSTUNREACH,
'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('my error message', log_msg)
self.assertIn('Host unreachable', log_msg)
log_exception(socket.error(errno.ETIMEDOUT, 'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('my error message', log_msg)
self.assertIn('Connection timeout', log_msg)
# unfiltered
log_exception(socket.error(0, 'my error message'))
log_msg = strip_value(sio)
self.assertIn('Traceback', log_msg)
self.assertIn('my error message', log_msg)
# test eventlet.Timeout
connection_timeout = ConnectionTimeout(42, 'my error message')
log_exception(connection_timeout)
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertTrue('ConnectionTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertNotIn('my error message', log_msg)
connection_timeout.cancel()
message_timeout = MessageTimeout(42, 'my error message')
log_exception(message_timeout)
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertTrue('MessageTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertTrue('my error message' in log_msg)
message_timeout.cancel()
# test BadStatusLine
log_exception(http_client.BadStatusLine(''))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertIn('BadStatusLine', log_msg)
self.assertIn("''", log_msg)
# test unhandled
log_exception(Exception('my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' in log_msg)
self.assertTrue('my error message' in log_msg)
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter_max_line_length(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
formatter = utils.SwiftLogFormatter(max_line_length=10)
handler.setFormatter(formatter)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
logger.info('12345')
self.assertEqual(strip_value(sio), '12345\n')
logger.info('1234567890')
self.assertEqual(strip_value(sio), '1234567890\n')
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12 ... de\n')
formatter.max_line_length = 11
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123 ... cde\n')
formatter.max_line_length = 0
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
formatter.max_line_length = 1
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1\n')
formatter.max_line_length = 2
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12\n')
formatter.max_line_length = 3
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123\n')
formatter.max_line_length = 4
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234\n')
formatter.max_line_length = 5
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12345\n')
formatter.max_line_length = 6
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123456\n')
formatter.max_line_length = 7
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1 ... e\n')
formatter.max_line_length = -10
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
handler.setFormatter(utils.SwiftLogFormatter())
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
self.assertFalse(logger.txn_id)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertIn('my error message', log_msg)
self.assertNotIn('txn', log_msg)
logger.txn_id = '12345'
logger.error('test')
log_msg = strip_value(sio)
self.assertIn('txn', log_msg)
self.assertIn('12345', log_msg)
# test txn in info message
self.assertEqual(logger.txn_id, '12345')
logger.info('test')
log_msg = strip_value(sio)
self.assertIn('txn', log_msg)
self.assertIn('12345', log_msg)
# test txn already in message
self.assertEqual(logger.txn_id, '12345')
logger.warning('test 12345 test')
self.assertEqual(strip_value(sio), 'test 12345 test\n')
# Test multi line collapsing
logger.error('my\nerror\nmessage')
log_msg = strip_value(sio)
self.assertIn('my#012error#012message', log_msg)
# test client_ip
self.assertFalse(logger.client_ip)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertIn('my error message', log_msg)
self.assertNotIn('client_ip', log_msg)
logger.client_ip = '1.2.3.4'
logger.error('test')
log_msg = strip_value(sio)
self.assertIn('client_ip', log_msg)
self.assertIn('1.2.3.4', log_msg)
# test no client_ip on info message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.info('test')
log_msg = strip_value(sio)
self.assertNotIn('client_ip', log_msg)
self.assertNotIn('1.2.3.4', log_msg)
# test client_ip (and txn) already in message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.warning('test 1.2.3.4 test 12345')
self.assertEqual(strip_value(sio), 'test 1.2.3.4 test 12345\n')
finally:
logger.logger.removeHandler(handler)
def test_storage_directory(self):
self.assertEqual(utils.storage_directory('objects', '1', 'ABCDEF'),
'objects/1/DEF/ABCDEF')
def test_is_valid_ip(self):
self.assertTrue(is_valid_ip("127.0.0.1"))
self.assertTrue(is_valid_ip("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "::1"
self.assertTrue(is_valid_ip(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ip(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ip(not_ipv6))
def test_is_valid_ipv4(self):
self.assertTrue(is_valid_ipv4("127.0.0.1"))
self.assertTrue(is_valid_ipv4("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "::1"
self.assertFalse(is_valid_ipv4(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ipv4(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ipv4(not_ipv6))
def test_is_valid_ipv6(self):
self.assertFalse(is_valid_ipv6("127.0.0.1"))
self.assertFalse(is_valid_ipv6("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "::1"
self.assertTrue(is_valid_ipv6(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ipv6(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ipv6(not_ipv6))
def test_expand_ipv6(self):
expanded_ipv6 = "fe80::204:61ff:fe9d:f156"
upper_ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(upper_ipv6))
omit_ipv6 = "fe80:0000:0000::0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(omit_ipv6))
less_num_ipv6 = "fe80:0:00:000:0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(less_num_ipv6))
def test_whataremyips(self):
myips = utils.whataremyips()
self.assertTrue(len(myips) > 1)
self.assertTrue('127.0.0.1' in myips)
def test_whataremyips_bind_to_all(self):
for any_addr in ('0.0.0.0', '0000:0000:0000:0000:0000:0000:0000:0000',
'::0', '::0000', '::',
# Wacky parse-error input produces all IPs
'I am a bear'):
myips = utils.whataremyips(any_addr)
self.assertTrue(len(myips) > 1)
self.assertTrue('127.0.0.1' in myips)
def test_whataremyips_bind_ip_specific(self):
self.assertEqual(['1.2.3.4'], utils.whataremyips('1.2.3.4'))
def test_whataremyips_error(self):
def my_interfaces():
return ['eth0']
def my_ifaddress_error(interface):
raise ValueError
with patch('netifaces.interfaces', my_interfaces), \
patch('netifaces.ifaddresses', my_ifaddress_error):
self.assertEqual(utils.whataremyips(), [])
def test_whataremyips_ipv6(self):
test_ipv6_address = '2001:6b0:dead:beef:2::32'
test_interface = 'eth0'
def my_ipv6_interfaces():
return ['eth0']
def my_ipv6_ifaddresses(interface):
return {AF_INET6:
[{'netmask': 'ffff:ffff:ffff:ffff::',
'addr': '%s%%%s' % (test_ipv6_address, test_interface)}]}
with patch('netifaces.interfaces', my_ipv6_interfaces), \
patch('netifaces.ifaddresses', my_ipv6_ifaddresses):
myips = utils.whataremyips()
self.assertEqual(len(myips), 1)
self.assertEqual(myips[0], test_ipv6_address)
def test_hash_path(self):
# Yes, these tests are deliberately very fragile. We want to make sure
# that if someones changes the results hash_path produces, they know it
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''):
self.assertEqual(utils.hash_path('a'),
'1c84525acb02107ea475dcd3d09c2c58')
self.assertEqual(utils.hash_path('a', 'c'),
'33379ecb053aa5c9e356c68997cbb59e')
self.assertEqual(utils.hash_path('a', 'c', 'o'),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=True),
b'\x06\xfb\xf0\xb5\x14\xe5\x19\x9d\xfcN'
b'\x00\xf4.\xb5\xea\x83')
self.assertRaises(ValueError, utils.hash_path, 'a', object='o')
utils.HASH_PATH_PREFIX = b'abcdef'
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'363f9b535bfb7d17a43a46a358afca0e')
def test_validate_hash_conf(self):
# no section causes InvalidHashPathConfigError
self._test_validate_hash_conf([], [], True)
# 'swift-hash' section is there but no options causes
# InvalidHashPathConfigError
self._test_validate_hash_conf(['swift-hash'], [], True)
# if we have the section and either of prefix or suffix,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_prefix'], False)
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_suffix'], False)
# definitely, we have the section and both of them,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], False)
# But invalid section name should make an error even if valid
# options are there
self._test_validate_hash_conf(
['swift-hash-xxx'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], True)
def _test_validate_hash_conf(self, sections, options, should_raise_error):
class FakeConfigParser(object):
def read(self, conf_path, encoding=None):
return [conf_path]
def get(self, section, option):
if section not in sections:
raise NoSectionError('section error')
elif option not in options:
raise NoOptionError('option error', 'this option')
else:
return 'some_option_value'
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''), \
mock.patch('swift.common.utils.HASH_PATH_SUFFIX', b''), \
mock.patch('swift.common.utils.ConfigParser',
FakeConfigParser):
try:
utils.validate_hash_conf()
except utils.InvalidHashPathConfigError:
if not should_raise_error:
self.fail('validate_hash_conf should not raise an error')
else:
if should_raise_error:
self.fail('validate_hash_conf should raise an error')
def test_load_libc_function(self):
self.assertTrue(callable(
utils.load_libc_function('printf')))
self.assertTrue(callable(
utils.load_libc_function('some_not_real_function')))
self.assertRaises(AttributeError,
utils.load_libc_function, 'some_not_real_function',
fail_if_missing=True)
def test_readconf(self):
conf = '''[section1]
foo = bar
[section2]
log_name = yarr'''
# setup a real file
fd, temppath = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': 'yarr'}}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1')
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar'}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile,
'section2').get('log_name')
expected = 'yarr'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
log_name='foo').get('log_name')
expected = 'foo'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
defaults={'bar': 'baz'})
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar', 'bar': 'baz'}
self.assertEqual(result, expected)
self.assertRaisesRegexp(
ValueError, 'Unable to find section3 config section in.*',
utils.readconf, temppath, 'section3')
os.unlink(temppath)
self.assertRaises(IOError, utils.readconf, temppath)
def test_readconf_raw(self):
conf = '''[section1]
foo = bar
[section2]
log_name = %(yarr)s'''
# setup a real file
fd, temppath = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile, raw=True)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': '%(yarr)s'}}
self.assertEqual(result, expected)
os.unlink(temppath)
self.assertRaises(IOError, utils.readconf, temppath)
def test_readconf_dir(self):
config_dir = {
'server.conf.d/01.conf': """
[DEFAULT]
port = 8080
foo = bar
[section1]
name=section1
""",
'server.conf.d/section2.conf': """
[DEFAULT]
port = 8081
bar = baz
[section2]
name=section2
""",
'other-server.conf.d/01.conf': """
[DEFAULT]
port = 8082
[section3]
name=section3
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section1',
},
'section2': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section2',
},
}
self.assertEqual(conf, expected)
def test_readconf_dir_ignores_hidden_and_nondotconf_files(self):
config_dir = {
'server.conf.d/01.conf': """
[section1]
port = 8080
""",
'server.conf.d/.01.conf.swp': """
[section]
port = 8081
""",
'server.conf.d/01.conf-bak': """
[section]
port = 8082
""",
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8080',
},
}
self.assertEqual(conf, expected)
def _check_drop_privileges(self, mock_os, required_func_calls,
call_setsid=True):
user = getuser()
user_data = pwd.getpwnam(user)
self.assertFalse(mock_os.called_funcs) # sanity check
# over-ride os with mock
with mock.patch('swift.common.utils.os', mock_os):
# exercise the code
utils.drop_privileges(user, call_setsid=call_setsid)
for func in required_func_calls:
self.assertIn(func, mock_os.called_funcs)
self.assertEqual(user_data[5], mock_os.environ['HOME'])
groups = {g.gr_gid for g in grp.getgrall() if user in g.gr_mem}
self.assertEqual(groups, set(mock_os.called_funcs['setgroups'][0]))
self.assertEqual(user_data[3], mock_os.called_funcs['setgid'][0])
self.assertEqual(user_data[2], mock_os.called_funcs['setuid'][0])
self.assertEqual('/', mock_os.called_funcs['chdir'][0])
self.assertEqual(0o22, mock_os.called_funcs['umask'][0])
def test_drop_privileges(self):
required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid',
'chdir', 'umask')
mock_os = MockOs(called_funcs=required_func_calls)
self._check_drop_privileges(mock_os, required_func_calls)
def test_drop_privileges_setsid_error(self):
# OSError trying to get session leader
required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid',
'chdir', 'umask')
mock_os = MockOs(called_funcs=required_func_calls,
raise_funcs=('setsid',))
self._check_drop_privileges(mock_os, required_func_calls)
def test_drop_privileges_no_call_setsid(self):
required_func_calls = ('setgroups', 'setgid', 'setuid', 'chdir',
'umask')
# OSError if trying to get session leader, but it shouldn't be called
bad_func_calls = ('setsid',)
mock_os = MockOs(called_funcs=required_func_calls,
raise_funcs=bad_func_calls)
self._check_drop_privileges(mock_os, required_func_calls,
call_setsid=False)
for func in bad_func_calls:
self.assertNotIn(func, mock_os.called_funcs)
@reset_logger_state
def test_capture_stdio(self):
# stubs
logger = utils.get_logger(None, 'dummy')
# mock utils system modules
_orig_sys = utils.sys
_orig_os = utils.os
try:
utils.sys = MockSys()
utils.os = MockOs()
# basic test
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds)
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test same args, but exc when trying to close stdio
utils.os = MockOs(raise_funcs=('dup2',))
utils.sys = MockSys()
# test unable to close stdio
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, [])
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test some other args
utils.os = MockOs()
utils.sys = MockSys()
logger = utils.get_logger(None, log_to_console=True)
# test console log
utils.capture_stdio(logger, capture_stdout=False,
capture_stderr=False)
self.assertTrue(utils.sys.excepthook is not None)
# when logging to console, stderr remains open
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds[:2])
reset_loggers()
# stdio not captured
self.assertFalse(isinstance(utils.sys.stdout,
utils.LoggerFileObject))
self.assertFalse(isinstance(utils.sys.stderr,
utils.LoggerFileObject))
finally:
utils.sys = _orig_sys
utils.os = _orig_os
@reset_logger_state
def test_get_logger_console(self):
logger = utils.get_logger(None)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertFalse(console_handlers)
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertTrue(console_handlers)
# make sure you can't have two console handlers
self.assertEqual(len(console_handlers), 1)
old_handler = console_handlers[0]
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertEqual(len(console_handlers), 1)
new_handler = console_handlers[0]
self.assertNotEqual(new_handler, old_handler)
def verify_under_pseudo_time(
self, func, target_runtime_ms=1, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('time.sleep', my_sleep), \
patch('eventlet.sleep', my_sleep):
start = time.time()
func(*args, **kwargs)
# make sure it's accurate to 10th of a second, converting the time
# difference to milliseconds, 100 milliseconds is 1/10 of a second
diff_from_target_ms = abs(
target_runtime_ms - ((time.time() - start) * 1000))
self.assertTrue(diff_from_target_ms < 100,
"Expected %d < 100" % diff_from_target_ms)
def test_ratelimit_sleep(self):
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, -5)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, 0)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(50):
running_time = utils.ratelimit_sleep(running_time, 200)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=250)
def test_ratelimit_sleep_with_incr(self):
def testfunc():
running_time = 0
vals = [5, 17, 0, 3, 11, 30,
40, 4, 13, 2, -1] * 2 # adds up to 248
total = 0
for i in vals:
running_time = utils.ratelimit_sleep(running_time,
500, incr_by=i)
total += i
self.assertEqual(248, total)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=500)
def test_ratelimit_sleep_with_sleep(self):
def testfunc():
running_time = 0
sleeps = [0] * 7 + [.2] * 3 + [0] * 30
for i in sleeps:
running_time = utils.ratelimit_sleep(running_time, 40,
rate_buffer=1)
time.sleep(i)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=900)
def test_urlparse(self):
parsed = utils.urlparse('http://127.0.0.1/')
self.assertEqual(parsed.scheme, 'http')
self.assertEqual(parsed.hostname, '127.0.0.1')
self.assertEqual(parsed.path, '/')
parsed = utils.urlparse('http://127.0.0.1:8080/')
self.assertEqual(parsed.port, 8080)
parsed = utils.urlparse('https://127.0.0.1/')
self.assertEqual(parsed.scheme, 'https')
parsed = utils.urlparse('http://[::1]/')
self.assertEqual(parsed.hostname, '::1')
parsed = utils.urlparse('http://[::1]:8080/')
self.assertEqual(parsed.hostname, '::1')
self.assertEqual(parsed.port, 8080)
parsed = utils.urlparse('www.example.com')
self.assertEqual(parsed.hostname, '')
def test_search_tree(self):
# file match & ext miss
with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t:
asdf = utils.search_tree(t, 'a*', '.conf')
self.assertEqual(len(asdf), 1)
self.assertEqual(asdf[0],
os.path.join(t, 'asdf.conf'))
# multi-file match & glob miss & sort
with temptree(['application.bin', 'apple.bin', 'apropos.bin']) as t:
app_bins = utils.search_tree(t, 'app*', 'bin')
self.assertEqual(len(app_bins), 2)
self.assertEqual(app_bins[0],
os.path.join(t, 'apple.bin'))
self.assertEqual(app_bins[1],
os.path.join(t, 'application.bin'))
# test file in folder & ext miss & glob miss
files = (
'sub/file1.ini',
'sub/file2.conf',
'sub.bin',
'bus.ini',
'bus/file3.ini',
)
with temptree(files) as t:
sub_ini = utils.search_tree(t, 'sub*', '.ini')
self.assertEqual(len(sub_ini), 1)
self.assertEqual(sub_ini[0],
os.path.join(t, 'sub/file1.ini'))
# test multi-file in folder & sub-folder & ext miss & glob miss
files = (
'folder_file.txt',
'folder/1.txt',
'folder/sub/2.txt',
'folder2/3.txt',
'Folder3/4.txt'
'folder.rc',
)
with temptree(files) as t:
folder_texts = utils.search_tree(t, 'folder*', '.txt')
self.assertEqual(len(folder_texts), 4)
f1 = os.path.join(t, 'folder_file.txt')
f2 = os.path.join(t, 'folder/1.txt')
f3 = os.path.join(t, 'folder/sub/2.txt')
f4 = os.path.join(t, 'folder2/3.txt')
for f in [f1, f2, f3, f4]:
self.assertTrue(f in folder_texts)
def test_search_tree_with_directory_ext_match(self):
files = (
'object-server/object-server.conf-base',
'object-server/1.conf.d/base.conf',
'object-server/1.conf.d/1.conf',
'object-server/2.conf.d/base.conf',
'object-server/2.conf.d/2.conf',
'object-server/3.conf.d/base.conf',
'object-server/3.conf.d/3.conf',
'object-server/4.conf.d/base.conf',
'object-server/4.conf.d/4.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'object-server', '.conf',
dir_ext='conf.d')
self.assertEqual(len(conf_dirs), 4)
for i in range(4):
conf_dir = os.path.join(t, 'object-server/%d.conf.d' % (i + 1))
self.assertTrue(conf_dir in conf_dirs)
def test_search_tree_conf_dir_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.conf.d/base.conf',
'proxy-server/proxy-server.conf.d/pipeline.conf',
'proxy-server/proxy-noauth.conf.d/base.conf',
'proxy-server/proxy-noauth.conf.d/pipeline.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'proxy-server', 'noauth.conf',
dir_ext='noauth.conf.d')
self.assertEqual(len(conf_dirs), 1)
conf_dir = conf_dirs[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.conf.d')
self.assertEqual(conf_dir, expected)
def test_search_tree_conf_dir_pid_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.pid.d',
'proxy-server/proxy-noauth.pid.d',
)
with temptree(files) as t:
pid_files = utils.search_tree(t, 'proxy-server',
exts=['noauth.pid', 'noauth.pid.d'])
self.assertEqual(len(pid_files), 1)
pid_file = pid_files[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.pid.d')
self.assertEqual(pid_file, expected)
def test_write_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'test')
utils.write_file(file_name, 'test')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test')
# and also subdirs
file_name = os.path.join(t, 'subdir/test2')
utils.write_file(file_name, 'test2')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test2')
# but can't over-write files
file_name = os.path.join(t, 'subdir/test2/test3')
self.assertRaises(IOError, utils.write_file, file_name,
'test3')
def test_remove_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'blah.pid')
# assert no raise
self.assertEqual(os.path.exists(file_name), False)
self.assertIsNone(utils.remove_file(file_name))
with open(file_name, 'w') as f:
f.write('1')
self.assertTrue(os.path.exists(file_name))
self.assertIsNone(utils.remove_file(file_name))
self.assertFalse(os.path.exists(file_name))
def test_human_readable(self):
self.assertEqual(utils.human_readable(0), '0')
self.assertEqual(utils.human_readable(1), '1')
self.assertEqual(utils.human_readable(10), '10')
self.assertEqual(utils.human_readable(100), '100')
self.assertEqual(utils.human_readable(999), '999')
self.assertEqual(utils.human_readable(1024), '1Ki')
self.assertEqual(utils.human_readable(1535), '1Ki')
self.assertEqual(utils.human_readable(1536), '2Ki')
self.assertEqual(utils.human_readable(1047552), '1023Ki')
self.assertEqual(utils.human_readable(1048063), '1023Ki')
self.assertEqual(utils.human_readable(1048064), '1Mi')
self.assertEqual(utils.human_readable(1048576), '1Mi')
self.assertEqual(utils.human_readable(1073741824), '1Gi')
self.assertEqual(utils.human_readable(1099511627776), '1Ti')
self.assertEqual(utils.human_readable(1125899906842624), '1Pi')
self.assertEqual(utils.human_readable(1152921504606846976), '1Ei')
self.assertEqual(utils.human_readable(1180591620717411303424), '1Zi')
self.assertEqual(utils.human_readable(1208925819614629174706176),
'1Yi')
self.assertEqual(utils.human_readable(1237940039285380274899124224),
'1024Yi')
def test_validate_sync_to(self):
fname = 'container-sync-realms.conf'
fcontents = '''
[US]
key = 9ff3b71c849749dbaec4ccdd3cbab62b
cluster_dfw1 = http://dfw1.host/v1/
'''
with temptree([fname], [fcontents]) as tempdir:
logger = FakeLogger()
fpath = os.path.join(tempdir, fname)
csr = ContainerSyncRealms(fpath, logger)
for realms_conf in (None, csr):
for goodurl, result in (
('http://1.1.1.1/v1/a/c',
(None, 'http://1.1.1.1/v1/a/c', None, None)),
('http://1.1.1.1:8080/a/c',
(None, 'http://1.1.1.1:8080/a/c', None, None)),
('http://2.2.2.2/a/c',
(None, 'http://2.2.2.2/a/c', None, None)),
('https://1.1.1.1/v1/a/c',
(None, 'https://1.1.1.1/v1/a/c', None, None)),
('//US/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/dfw1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//',
(None, None, None, None)),
('',
(None, None, None, None))):
if goodurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
for badurl, result in (
('http://1.1.1.1',
('Path required in X-Container-Sync-To', None, None,
None)),
('httpq://1.1.1.1/v1/a/c',
('Invalid scheme \'httpq\' in X-Container-Sync-To, '
'must be "//", "http", or "https".', None, None,
None)),
('http://1.1.1.1/v1/a/c?query',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.2/v1/a/c',
("Invalid host '1.1.1.2' in X-Container-Sync-To",
None, None, None)),
('//us/invalid/a/c',
("No cluster endpoint for 'us' 'invalid'", None,
None, None)),
('//invalid/dfw1/a/c',
("No realm key for 'invalid'", None, None, None)),
('//us/invalid1/a/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a/'", None, None, None)),
('//us/invalid1/a',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a'", None, None, None)),
('//us/invalid1/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/'", None, None, None)),
('//us/invalid1',
("Invalid X-Container-Sync-To format "
"'//us/invalid1'", None, None, None)),
('//us/',
("Invalid X-Container-Sync-To format "
"'//us/'", None, None, None)),
('//us',
("Invalid X-Container-Sync-To format "
"'//us'", None, None, None))):
if badurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
def test_TRUE_VALUES(self):
for v in utils.TRUE_VALUES:
self.assertEqual(v, v.lower())
def test_config_true_value(self):
orig_trues = utils.TRUE_VALUES
try:
utils.TRUE_VALUES = 'hello world'.split()
for val in 'hello world HELLO WORLD'.split():
self.assertTrue(utils.config_true_value(val) is True)
self.assertTrue(utils.config_true_value(True) is True)
self.assertTrue(utils.config_true_value('foo') is False)
self.assertTrue(utils.config_true_value(False) is False)
finally:
utils.TRUE_VALUES = orig_trues
def test_config_positive_int_value(self):
expectations = {
# value : expected,
u'1': 1,
b'1': 1,
1: 1,
u'2': 2,
b'2': 2,
u'1024': 1024,
b'1024': 1024,
u'0': ValueError,
b'0': ValueError,
u'-1': ValueError,
b'-1': ValueError,
u'0x01': ValueError,
b'0x01': ValueError,
u'asdf': ValueError,
b'asdf': ValueError,
None: ValueError,
0: ValueError,
-1: ValueError,
u'1.2': ValueError, # string expresses float should be value error
b'1.2': ValueError, # string expresses float should be value error
}
for value, expected in expectations.items():
try:
rv = utils.config_positive_int_value(value)
except Exception as e:
if e.__class__ is not expected:
raise
else:
self.assertEqual(
'Config option must be an positive int number, '
'not "%s".' % value, e.args[0])
else:
self.assertEqual(expected, rv)
def test_config_float_value(self):
for args, expected in (
((99, None, None), 99.0),
((99.01, None, None), 99.01),
(('99', None, None), 99.0),
(('99.01', None, None), 99.01),
((99, 99, None), 99.0),
((99.01, 99.01, None), 99.01),
(('99', 99, None), 99.0),
(('99.01', 99.01, None), 99.01),
((99, None, 99), 99.0),
((99.01, None, 99.01), 99.01),
(('99', None, 99), 99.0),
(('99.01', None, 99.01), 99.01),
((-99, -99, -99), -99.0),
((-99.01, -99.01, -99.01), -99.01),
(('-99', -99, -99), -99.0),
(('-99.01', -99.01, -99.01), -99.01),):
actual = utils.config_float_value(*args)
self.assertEqual(expected, actual)
for val, minimum in ((99, 100),
('99', 100),
(-99, -98),
('-98.01', -98)):
with self.assertRaises(ValueError) as cm:
utils.config_float_value(val, minimum=minimum)
self.assertIn('greater than %s' % minimum, cm.exception.args[0])
self.assertNotIn('less than', cm.exception.args[0])
for val, maximum in ((99, 98),
('99', 98),
(-99, -100),
('-97.9', -98)):
with self.assertRaises(ValueError) as cm:
utils.config_float_value(val, maximum=maximum)
self.assertIn('less than %s' % maximum, cm.exception.args[0])
self.assertNotIn('greater than', cm.exception.args[0])
for val, minimum, maximum in ((99, 99, 98),
('99', 100, 100),
(99, 98, 98),):
with self.assertRaises(ValueError) as cm:
utils.config_float_value(val, minimum=minimum, maximum=maximum)
self.assertIn('greater than %s' % minimum, cm.exception.args[0])
self.assertIn('less than %s' % maximum, cm.exception.args[0])
def test_config_auto_int_value(self):
expectations = {
# (value, default) : expected,
('1', 0): 1,
(1, 0): 1,
('asdf', 0): ValueError,
('auto', 1): 1,
('AutO', 1): 1,
('Aut0', 1): ValueError,
(None, 1): 1,
}
for (value, default), expected in expectations.items():
try:
rv = utils.config_auto_int_value(value, default)
except Exception as e:
if e.__class__ is not expected:
raise
else:
self.assertEqual(expected, rv)
def test_streq_const_time(self):
self.assertTrue(utils.streq_const_time('abc123', 'abc123'))
self.assertFalse(utils.streq_const_time('a', 'aaaaa'))
self.assertFalse(utils.streq_const_time('ABC123', 'abc123'))
def test_quorum_size(self):
expected_sizes = {1: 1,
2: 1,
3: 2,
4: 2,
5: 3}
got_sizes = dict([(n, utils.quorum_size(n))
for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_majority_size(self):
expected_sizes = {1: 1,
2: 2,
3: 2,
4: 3,
5: 3}
got_sizes = dict([(n, utils.majority_size(n))
for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_rsync_ip_ipv4_localhost(self):
self.assertEqual(utils.rsync_ip('127.0.0.1'), '127.0.0.1')
def test_rsync_ip_ipv6_random_ip(self):
self.assertEqual(
utils.rsync_ip('fe80:0000:0000:0000:0202:b3ff:fe1e:8329'),
'[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]')
def test_rsync_ip_ipv6_ipv4_compatible(self):
self.assertEqual(
utils.rsync_ip('::ffff:192.0.2.128'), '[::ffff:192.0.2.128]')
def test_rsync_module_interpolation(self):
fake_device = {'ip': '127.0.0.1', 'port': 11,
'replication_ip': '127.0.0.2', 'replication_port': 12,
'region': '1', 'zone': '2', 'device': 'sda1',
'meta': 'just_a_string'}
self.assertEqual(
utils.rsync_module_interpolation('{ip}', fake_device),
'127.0.0.1')
self.assertEqual(
utils.rsync_module_interpolation('{port}', fake_device),
'11')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}', fake_device),
'127.0.0.2')
self.assertEqual(
utils.rsync_module_interpolation('{replication_port}',
fake_device),
'12')
self.assertEqual(
utils.rsync_module_interpolation('{region}', fake_device),
'1')
self.assertEqual(
utils.rsync_module_interpolation('{zone}', fake_device),
'2')
self.assertEqual(
utils.rsync_module_interpolation('{device}', fake_device),
'sda1')
self.assertEqual(
utils.rsync_module_interpolation('{meta}', fake_device),
'just_a_string')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}::object',
fake_device),
'127.0.0.2::object')
self.assertEqual(
utils.rsync_module_interpolation('{ip}::container{port}',
fake_device),
'127.0.0.1::container11')
self.assertEqual(
utils.rsync_module_interpolation(
'{replication_ip}::object_{device}', fake_device),
'127.0.0.2::object_sda1')
self.assertEqual(
utils.rsync_module_interpolation(
'127.0.0.3::object_{replication_port}', fake_device),
'127.0.0.3::object_12')
self.assertRaises(ValueError, utils.rsync_module_interpolation,
'{replication_ip}::object_{deivce}', fake_device)
def test_generate_trans_id(self):
fake_time = 1366428370.5163341
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('')
self.assertEqual(len(trans_id), 34)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:], 16), int(fake_time))
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('-suffix')
self.assertEqual(len(trans_id), 41)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[34:], '-suffix')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:34], 16), int(fake_time))
def test_get_trans_id_time(self):
ts = utils.get_trans_id_time('tx8c8bc884cdaf499bb29429aa9c46946e')
self.assertIsNone(ts)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-0051720c06')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time(
'tx1df4ff4f55ea45f7b2ec2-0051720c06-suffix')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time('')
self.assertIsNone(ts)
ts = utils.get_trans_id_time('garbage')
self.assertIsNone(ts)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright')
self.assertIsNone(ts)
def test_config_fallocate_value(self):
fallocate_value, is_percent = utils.config_fallocate_value('10%')
self.assertEqual(fallocate_value, 10)
self.assertTrue(is_percent)
fallocate_value, is_percent = utils.config_fallocate_value('10')
self.assertEqual(fallocate_value, 10)
self.assertFalse(is_percent)
try:
fallocate_value, is_percent = utils.config_fallocate_value('ab%')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: ab% is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('ab')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: ab is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('1%%')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: 1%% is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('10.0')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: 10.0 is an invalid value for '
'fallocate_reserve.')
fallocate_value, is_percent = utils.config_fallocate_value('10.5%')
self.assertEqual(fallocate_value, 10.5)
self.assertTrue(is_percent)
fallocate_value, is_percent = utils.config_fallocate_value('10.000%')
self.assertEqual(fallocate_value, 10.000)
self.assertTrue(is_percent)
def test_lock_file(self):
flags = os.O_CREAT | os.O_RDWR
with NamedTemporaryFile(delete=False) as nt:
nt.write(b"test string")
nt.flush()
nt.close()
with utils.lock_file(nt.name, unlink=False) as f:
self.assertEqual(f.read(), b"test string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, unlink=False, append=True) as f:
f.seek(0)
self.assertEqual(f.read(), b"test string")
f.seek(0)
f.write(b"\nanother string")
f.flush()
f.seek(0)
self.assertEqual(f.read(), b"test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, timeout=3, unlink=False) as f:
try:
with utils.lock_file(
nt.name, timeout=1, unlink=False) as f:
self.assertTrue(
False, "Expected LockTimeout exception")
except LockTimeout:
pass
with utils.lock_file(nt.name, unlink=True) as f:
self.assertEqual(f.read(), b"test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.assertRaises(OSError, os.remove, nt.name)
def test_lock_file_unlinked_after_open(self):
os_open = os.open
first_pass = [True]
def deleting_open(filename, flags):
# unlink the file after it's opened. once.
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', deleting_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
first_pass = [True]
def recreating_open(filename, flags):
# unlink and recreate the file after it's opened
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
os.close(os_open(filename, os.O_CREAT | os.O_RDWR))
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', recreating_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
def test_lock_file_held_on_unlink(self):
os_unlink = os.unlink
def flocking_unlink(filename):
# make sure the lock is held when we unlink
fd = os.open(filename, os.O_RDWR)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
os.close(fd)
os_unlink(filename)
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.unlink', flocking_unlink):
with utils.lock_file(nt.name, unlink=True):
pass
def test_lock_file_no_unlink_if_fail(self):
os_open = os.open
with NamedTemporaryFile(delete=True) as nt:
def lock_on_open(filename, flags):
# lock the file on another fd after it's opened.
fd = os_open(filename, flags)
fd2 = os_open(filename, flags)
fcntl.flock(fd2, fcntl.LOCK_EX | fcntl.LOCK_NB)
return fd
try:
timedout = False
with mock.patch('os.open', lock_on_open):
with utils.lock_file(nt.name, unlink=False, timeout=0.01):
pass
except LockTimeout:
timedout = True
self.assertTrue(timedout)
self.assertTrue(os.path.exists(nt.name))
def test_ismount_path_does_not_exist(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(os.path.join(tmpdir, 'bar')))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_not_mount(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_symlink(self):
tmpdir = mkdtemp()
try:
link = os.path.join(tmpdir, "tmp")
os.symlink(tempfile.gettempdir(), link)
self.assertFalse(utils.ismount(link))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_root(self):
self.assertTrue(utils.ismount('/'))
def test_ismount_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_dev(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
parent = _os_lstat(path)
return MockStat(parent.st_mode, parent.st_dev + 1,
parent.st_ino)
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_ino(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
return _os_lstat(path)
else:
parent_path = os.path.join(path, "..")
child = _os_lstat(path)
parent = _os_lstat(parent_path)
return MockStat(child.st_mode, parent.st_ino,
child.st_dev)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_stubfile(self):
tmpdir = mkdtemp()
fname = os.path.join(tmpdir, ".ismount")
try:
with open(fname, "w") as stubfile:
stubfile.write("")
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_parse_content_type(self):
self.assertEqual(utils.parse_content_type('text/plain'),
('text/plain', []))
self.assertEqual(utils.parse_content_type('text/plain;charset=utf-8'),
('text/plain', [('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain;hello="world";charset=utf-8'),
('text/plain', [('hello', '"world"'), ('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain; hello="world"; a=b'),
('text/plain', [('hello', '"world"'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a=b'),
('text/plain', [('x', r'"\""'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x; a=b'),
('text/plain', [('x', ''), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a'),
('text/plain', [('x', r'"\""'), ('a', '')]))
def test_override_bytes_from_content_type(self):
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=15'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEqual(listing_dict['bytes'], 15)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=hey'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEqual(listing_dict['bytes'], 1234)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
def test_extract_swift_bytes(self):
scenarios = {
# maps input value -> expected returned tuple
'': ('', None),
'text/plain': ('text/plain', None),
'text/plain; other=thing': ('text/plain;other=thing', None),
'text/plain; swift_bytes=123': ('text/plain', '123'),
'text/plain; other=thing;swift_bytes=123':
('text/plain;other=thing', '123'),
'text/plain; swift_bytes=123; other=thing':
('text/plain;other=thing', '123'),
'text/plain; swift_bytes=123; swift_bytes=456':
('text/plain', '456'),
'text/plain; swift_bytes=123; other=thing;swift_bytes=456':
('text/plain;other=thing', '456')}
for test_value, expected in scenarios.items():
self.assertEqual(expected, utils.extract_swift_bytes(test_value))
def test_clean_content_type(self):
subtests = {
'': '', 'text/plain': 'text/plain',
'text/plain; someother=thing': 'text/plain; someother=thing',
'text/plain; swift_bytes=123': 'text/plain',
'text/plain; someother=thing; swift_bytes=123':
'text/plain; someother=thing',
# Since Swift always tacks on the swift_bytes, clean_content_type()
# only strips swift_bytes if it's last. The next item simply shows
# that if for some other odd reason it's not last,
# clean_content_type() will not remove it from the header.
'text/plain; swift_bytes=123; someother=thing':
'text/plain; swift_bytes=123; someother=thing'}
for before, after in subtests.items():
self.assertEqual(utils.clean_content_type(before), after)
def test_get_valid_utf8_str(self):
def do_test(input_value, expected):
actual = utils.get_valid_utf8_str(input_value)
self.assertEqual(expected, actual)
self.assertIsInstance(actual, six.binary_type)
actual.decode('utf-8')
do_test(b'abc', b'abc')
do_test(u'abc', b'abc')
do_test(u'\uc77c\uc601', b'\xec\x9d\xbc\xec\x98\x81')
do_test(b'\xec\x9d\xbc\xec\x98\x81', b'\xec\x9d\xbc\xec\x98\x81')
# test some invalid UTF-8
do_test(b'\xec\x9d\xbc\xec\x98', b'\xec\x9d\xbc\xef\xbf\xbd')
# check surrogate pairs, too
do_test(u'\U0001f0a1', b'\xf0\x9f\x82\xa1'),
do_test(u'\uD83C\uDCA1', b'\xf0\x9f\x82\xa1'),
do_test(b'\xf0\x9f\x82\xa1', b'\xf0\x9f\x82\xa1'),
do_test(b'\xed\xa0\xbc\xed\xb2\xa1', b'\xf0\x9f\x82\xa1'),
def test_quote_bytes(self):
self.assertEqual(b'/v1/a/c3/subdirx/',
utils.quote(b'/v1/a/c3/subdirx/'))
self.assertEqual(b'/v1/a%26b/c3/subdirx/',
utils.quote(b'/v1/a&b/c3/subdirx/'))
self.assertEqual(b'%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F',
utils.quote(b'/v1/a&b/c3/subdirx/', safe='&'))
self.assertEqual(b'abc_%EC%9D%BC%EC%98%81',
utils.quote(u'abc_\uc77c\uc601'.encode('utf8')))
# Invalid utf8 is parsed as latin1, then re-encoded as utf8??
self.assertEqual(b'%EF%BF%BD%EF%BF%BD%EC%BC%9D%EF%BF%BD',
utils.quote(u'\uc77c\uc601'.encode('utf8')[::-1]))
def test_quote_unicode(self):
self.assertEqual(u'/v1/a/c3/subdirx/',
utils.quote(u'/v1/a/c3/subdirx/'))
self.assertEqual(u'/v1/a%26b/c3/subdirx/',
utils.quote(u'/v1/a&b/c3/subdirx/'))
self.assertEqual(u'%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F',
utils.quote(u'/v1/a&b/c3/subdirx/', safe='&'))
self.assertEqual(u'abc_%EC%9D%BC%EC%98%81',
utils.quote(u'abc_\uc77c\uc601'))
def test_get_hmac(self):
self.assertEqual(
utils.get_hmac('GET', '/path', 1, 'abc'),
'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f')
def test_get_hmac_ip_range(self):
self.assertEqual(
utils.get_hmac('GET', '/path', 1, 'abc', ip_range='127.0.0.1'),
'b30dde4d2b8562b8496466c3b46b2b9ac5054461')
def test_get_hmac_ip_range_non_binary_type(self):
self.assertEqual(
utils.get_hmac(u'GET', u'/path', 1, u'abc', ip_range=u'127.0.0.1'),
'b30dde4d2b8562b8496466c3b46b2b9ac5054461')
def test_parse_override_options(self):
# When override_<thing> is passed in, it takes precedence.
opts = utils.parse_override_options(
override_policies=[0, 1],
override_devices=['sda', 'sdb'],
override_partitions=[100, 200],
policies='0,1,2,3',
devices='sda,sdb,sdc,sdd',
partitions='100,200,300,400')
self.assertEqual(opts.policies, [0, 1])
self.assertEqual(opts.devices, ['sda', 'sdb'])
self.assertEqual(opts.partitions, [100, 200])
# When override_<thing> is passed in, it applies even in run-once
# mode.
opts = utils.parse_override_options(
once=True,
override_policies=[0, 1],
override_devices=['sda', 'sdb'],
override_partitions=[100, 200],
policies='0,1,2,3',
devices='sda,sdb,sdc,sdd',
partitions='100,200,300,400')
self.assertEqual(opts.policies, [0, 1])
self.assertEqual(opts.devices, ['sda', 'sdb'])
self.assertEqual(opts.partitions, [100, 200])
# In run-once mode, we honor the passed-in overrides.
opts = utils.parse_override_options(
once=True,
policies='0,1,2,3',
devices='sda,sdb,sdc,sdd',
partitions='100,200,300,400')
self.assertEqual(opts.policies, [0, 1, 2, 3])
self.assertEqual(opts.devices, ['sda', 'sdb', 'sdc', 'sdd'])
self.assertEqual(opts.partitions, [100, 200, 300, 400])
# In run-forever mode, we ignore the passed-in overrides.
opts = utils.parse_override_options(
policies='0,1,2,3',
devices='sda,sdb,sdc,sdd',
partitions='100,200,300,400')
self.assertEqual(opts.policies, [])
self.assertEqual(opts.devices, [])
self.assertEqual(opts.partitions, [])
def test_get_policy_index(self):
# Account has no information about a policy
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'GET'})
res = Response()
self.assertIsNone(utils.get_policy_index(req.headers,
res.headers))
# The policy of a container can be specified by the response header
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'})
res = Response(headers={'X-Backend-Storage-Policy-Index': '1'})
self.assertEqual('1', utils.get_policy_index(req.headers,
res.headers))
# The policy of an object to be created can be specified by the request
# header
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Backend-Storage-Policy-Index': '2'})
res = Response()
self.assertEqual('2', utils.get_policy_index(req.headers,
res.headers))
def test_get_log_line(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
res = Response()
trans_time = 1.2
additional_info = 'some information'
server_pid = 1234
exp_line = '1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD ' \
'/sda1/p/a/c/o" 200 - "-" "-" "-" 1.2000 "some information" 1234 -'
with mock.patch(
'time.gmtime',
mock.MagicMock(side_effect=[time.gmtime(10001.0)])):
with mock.patch(
'os.getpid', mock.MagicMock(return_value=server_pid)):
self.assertEqual(
exp_line,
utils.get_log_line(req, res, trans_time, additional_info))
def test_cache_from_env(self):
# should never get logging when swift.cache is found
env = {'swift.cache': 42}
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, False))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
# check allow_none controls logging when swift.cache is not found
err_msg = 'ERROR: swift.cache could not be found in env!'
env = {}
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env, False))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
def test_fsync_dir(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp()
fd, temppath = tempfile.mkstemp(dir=tempdir)
_mock_fsync = mock.Mock()
_mock_close = mock.Mock()
with patch('swift.common.utils.fsync', _mock_fsync):
with patch('os.close', _mock_close):
utils.fsync_dir(tempdir)
self.assertTrue(_mock_fsync.called)
self.assertTrue(_mock_close.called)
self.assertTrue(isinstance(_mock_fsync.call_args[0][0], int))
self.assertEqual(_mock_fsync.call_args[0][0],
_mock_close.call_args[0][0])
# Not a directory - arg is file path
self.assertRaises(OSError, utils.fsync_dir, temppath)
logger = FakeLogger()
def _mock_fsync(fd):
raise OSError(errno.EBADF, os.strerror(errno.EBADF))
with patch('swift.common.utils.fsync', _mock_fsync):
with mock.patch('swift.common.utils.logging', logger):
utils.fsync_dir(tempdir)
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
finally:
if fd is not None:
os.close(fd)
os.unlink(temppath)
if tempdir:
os.rmdir(tempdir)
def test_renamer_with_fsync_dir(self):
tempdir = None
try:
tempdir = mkdtemp()
# Simulate part of object path already existing
part_dir = os.path.join(tempdir, 'objects/1234/')
os.makedirs(part_dir)
obj_dir = os.path.join(part_dir, 'aaa', 'a' * 32)
obj_path = os.path.join(obj_dir, '1425276031.12345.data')
# Object dir had to be created
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir on parents of all newly create dirs
self.assertEqual(_m_fsync_dir.call_count, 3)
# Object dir existed
_m_os_rename.reset_mock()
_m_fsync_dir.reset_mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir only on the leaf dir
self.assertEqual(_m_fsync_dir.call_count, 1)
finally:
if tempdir:
shutil.rmtree(tempdir)
def test_renamer_when_fsync_is_false(self):
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
_m_makedirs_count = mock.Mock(return_value=2)
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
with patch('swift.common.utils.makedirs_count',
_m_makedirs_count):
utils.renamer("fake_path", "/a/b/c.data", fsync=False)
_m_makedirs_count.assert_called_once_with("/a/b")
_m_os_rename.assert_called_once_with('fake_path', "/a/b/c.data")
self.assertFalse(_m_fsync_dir.called)
def test_makedirs_count(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp()
os.makedirs(os.path.join(tempdir, 'a/b'))
# 4 new dirs created
dirpath = os.path.join(tempdir, 'a/b/1/2/3/4')
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 4)
# no new dirs created - dir already exists
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 0)
# path exists and is a file
fd, temppath = tempfile.mkstemp(dir=dirpath)
os.close(fd)
self.assertRaises(OSError, utils.makedirs_count, temppath)
finally:
if tempdir:
shutil.rmtree(tempdir)
def test_find_shard_range(self):
ts = utils.Timestamp.now().internal
start = utils.ShardRange('a/-a', ts, '', 'a')
atof = utils.ShardRange('a/a-f', ts, 'a', 'f')
ftol = utils.ShardRange('a/f-l', ts, 'f', 'l')
ltor = utils.ShardRange('a/l-r', ts, 'l', 'r')
rtoz = utils.ShardRange('a/r-z', ts, 'r', 'z')
end = utils.ShardRange('a/z-', ts, 'z', '')
ranges = [start, atof, ftol, ltor, rtoz, end]
found = utils.find_shard_range('', ranges)
self.assertEqual(found, None)
found = utils.find_shard_range(' ', ranges)
self.assertEqual(found, start)
found = utils.find_shard_range(' ', ranges[1:])
self.assertEqual(found, None)
found = utils.find_shard_range('b', ranges)
self.assertEqual(found, atof)
found = utils.find_shard_range('f', ranges)
self.assertEqual(found, atof)
found = utils.find_shard_range('f\x00', ranges)
self.assertEqual(found, ftol)
found = utils.find_shard_range('x', ranges)
self.assertEqual(found, rtoz)
found = utils.find_shard_range('r', ranges)
self.assertEqual(found, ltor)
found = utils.find_shard_range('}', ranges)
self.assertEqual(found, end)
found = utils.find_shard_range('}', ranges[:-1])
self.assertEqual(found, None)
# remove l-r from list of ranges and try and find a shard range for an
# item in that range.
found = utils.find_shard_range('p', ranges[:-3] + ranges[-2:])
self.assertEqual(found, None)
# add some sub-shards; a sub-shard's state is less than its parent
# while the parent is undeleted, so insert these ahead of the
# overlapping parent in the list of ranges
ftoh = utils.ShardRange('a/f-h', ts, 'f', 'h')
htok = utils.ShardRange('a/h-k', ts, 'h', 'k')
overlapping_ranges = ranges[:2] + [ftoh, htok] + ranges[2:]
found = utils.find_shard_range('g', overlapping_ranges)
self.assertEqual(found, ftoh)
found = utils.find_shard_range('h', overlapping_ranges)
self.assertEqual(found, ftoh)
found = utils.find_shard_range('k', overlapping_ranges)
self.assertEqual(found, htok)
found = utils.find_shard_range('l', overlapping_ranges)
self.assertEqual(found, ftol)
found = utils.find_shard_range('m', overlapping_ranges)
self.assertEqual(found, ltor)
ktol = utils.ShardRange('a/k-l', ts, 'k', 'l')
overlapping_ranges = ranges[:2] + [ftoh, htok, ktol] + ranges[2:]
found = utils.find_shard_range('l', overlapping_ranges)
self.assertEqual(found, ktol)
def test_parse_db_filename(self):
actual = utils.parse_db_filename('hash.db')
self.assertEqual(('hash', None, '.db'), actual)
actual = utils.parse_db_filename('hash_1234567890.12345.db')
self.assertEqual(('hash', '1234567890.12345', '.db'), actual)
actual = utils.parse_db_filename(
'/dev/containers/part/ash/hash/hash_1234567890.12345.db')
self.assertEqual(('hash', '1234567890.12345', '.db'), actual)
self.assertRaises(ValueError, utils.parse_db_filename, '/path/to/dir/')
# These shouldn't come up in practice; included for completeness
self.assertEqual(utils.parse_db_filename('hashunder_.db'),
('hashunder', '', '.db'))
self.assertEqual(utils.parse_db_filename('lots_of_underscores.db'),
('lots', 'of', '.db'))
def test_make_db_file_path(self):
epoch = utils.Timestamp.now()
actual = utils.make_db_file_path('hash.db', epoch)
self.assertEqual('hash_%s.db' % epoch.internal, actual)
actual = utils.make_db_file_path('hash_oldepoch.db', epoch)
self.assertEqual('hash_%s.db' % epoch.internal, actual)
actual = utils.make_db_file_path('/path/to/hash.db', epoch)
self.assertEqual('/path/to/hash_%s.db' % epoch.internal, actual)
epoch = utils.Timestamp.now()
actual = utils.make_db_file_path(actual, epoch)
self.assertEqual('/path/to/hash_%s.db' % epoch.internal, actual)
# None strips epoch
self.assertEqual('hash.db', utils.make_db_file_path('hash.db', None))
self.assertEqual('/path/to/hash.db', utils.make_db_file_path(
'/path/to/hash_withepoch.db', None))
# epochs shouldn't have offsets
epoch = utils.Timestamp.now(offset=10)
actual = utils.make_db_file_path(actual, epoch)
self.assertEqual('/path/to/hash_%s.db' % epoch.normal, actual)
self.assertRaises(ValueError, utils.make_db_file_path,
'/path/to/hash.db', 'bad epoch')
def test_modify_priority(self):
pid = os.getpid()
logger = debug_logger()
called = {}
def _fake_setpriority(*args):
called['setpriority'] = args
def _fake_syscall(*args):
called['syscall'] = args
# Test if current architecture supports changing of priority
try:
utils.NR_ioprio_set()
except OSError as e:
raise unittest.SkipTest(e)
with patch('swift.common.utils._libc_setpriority',
_fake_setpriority), \
patch('swift.common.utils._posix_syscall', _fake_syscall):
called = {}
# not set / default
utils.modify_priority({}, logger)
self.assertEqual(called, {})
called = {}
# just nice
utils.modify_priority({'nice_priority': '1'}, logger)
self.assertEqual(called, {'setpriority': (0, pid, 1)})
called = {}
# just ionice class uses default priority 0
utils.modify_priority({'ionice_class': 'IOPRIO_CLASS_RT'}, logger)
architecture = os.uname()[4]
arch_bits = platform.architecture()[0]
if architecture == 'x86_64' and arch_bits == '64bit':
self.assertEqual(called, {'syscall': (251, 1, pid, 1 << 13)})
elif architecture == 'aarch64' and arch_bits == '64bit':
self.assertEqual(called, {'syscall': (30, 1, pid, 1 << 13)})
else:
self.fail("Unexpected call: %r" % called)
called = {}
# just ionice priority is ignored
utils.modify_priority({'ionice_priority': '4'}, logger)
self.assertEqual(called, {})
called = {}
# bad ionice class
utils.modify_priority({'ionice_class': 'class_foo'}, logger)
self.assertEqual(called, {})
called = {}
# ionice class & priority
utils.modify_priority({
'ionice_class': 'IOPRIO_CLASS_BE',
'ionice_priority': '4',
}, logger)
if architecture == 'x86_64' and arch_bits == '64bit':
self.assertEqual(called, {
'syscall': (251, 1, pid, 2 << 13 | 4)
})
elif architecture == 'aarch64' and arch_bits == '64bit':
self.assertEqual(called, {
'syscall': (30, 1, pid, 2 << 13 | 4)
})
else:
self.fail("Unexpected call: %r" % called)
called = {}
# all
utils.modify_priority({
'nice_priority': '-15',
'ionice_class': 'IOPRIO_CLASS_IDLE',
'ionice_priority': '6',
}, logger)
if architecture == 'x86_64' and arch_bits == '64bit':
self.assertEqual(called, {
'setpriority': (0, pid, -15),
'syscall': (251, 1, pid, 3 << 13 | 6),
})
elif architecture == 'aarch64' and arch_bits == '64bit':
self.assertEqual(called, {
'setpriority': (0, pid, -15),
'syscall': (30, 1, pid, 3 << 13 | 6),
})
else:
self.fail("Unexpected call: %r" % called)
def test__NR_ioprio_set(self):
with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \
patch('platform.architecture', return_value=('64bit', '')):
self.assertEqual(251, utils.NR_ioprio_set())
with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \
patch('platform.architecture', return_value=('32bit', '')):
self.assertRaises(OSError, utils.NR_ioprio_set)
with patch('os.uname', return_value=('', '', '', '', 'aarch64')), \
patch('platform.architecture', return_value=('64bit', '')):
self.assertEqual(30, utils.NR_ioprio_set())
with patch('os.uname', return_value=('', '', '', '', 'aarch64')), \
patch('platform.architecture', return_value=('32bit', '')):
self.assertRaises(OSError, utils.NR_ioprio_set)
with patch('os.uname', return_value=('', '', '', '', 'alpha')), \
patch('platform.architecture', return_value=('64bit', '')):
self.assertRaises(OSError, utils.NR_ioprio_set)
@requires_o_tmpfile_support_in_tmp
def test_link_fd_to_path_linkat_success(self):
tempdir = mkdtemp()
fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY)
data = b"I'm whatever Gotham needs me to be"
_m_fsync_dir = mock.Mock()
try:
os.write(fd, data)
# fd is O_WRONLY
self.assertRaises(OSError, os.read, fd, 1)
file_path = os.path.join(tempdir, uuid4().hex)
with mock.patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.link_fd_to_path(fd, file_path, 1)
with open(file_path, 'rb') as f:
self.assertEqual(f.read(), data)
self.assertEqual(_m_fsync_dir.call_count, 2)
finally:
os.close(fd)
shutil.rmtree(tempdir)
@requires_o_tmpfile_support_in_tmp
def test_link_fd_to_path_target_exists(self):
tempdir = mkdtemp()
# Create and write to a file
fd, path = tempfile.mkstemp(dir=tempdir)
os.write(fd, b"hello world")
os.fsync(fd)
os.close(fd)
self.assertTrue(os.path.exists(path))
fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY)
try:
os.write(fd, b"bye world")
os.fsync(fd)
utils.link_fd_to_path(fd, path, 0, fsync=False)
# Original file now should have been over-written
with open(path, 'rb') as f:
self.assertEqual(f.read(), b"bye world")
finally:
os.close(fd)
shutil.rmtree(tempdir)
@requires_o_tmpfile_support
def test_link_fd_to_path_errno_not_EEXIST_or_ENOENT(self):
_m_linkat = mock.Mock(
side_effect=IOError(errno.EACCES, os.strerror(errno.EACCES)))
with mock.patch('swift.common.utils.linkat', _m_linkat):
try:
utils.link_fd_to_path(0, '/path', 1)
except IOError as err:
self.assertEqual(err.errno, errno.EACCES)
else:
self.fail("Expecting IOError exception")
self.assertTrue(_m_linkat.called)
@requires_o_tmpfile_support_in_tmp
def test_linkat_race_dir_not_exists(self):
tempdir = mkdtemp()
target_dir = os.path.join(tempdir, uuid4().hex)
target_path = os.path.join(target_dir, uuid4().hex)
os.mkdir(target_dir)
fd = os.open(target_dir, utils.O_TMPFILE | os.O_WRONLY)
# Simulating directory deletion by other backend process
os.rmdir(target_dir)
self.assertFalse(os.path.exists(target_dir))
try:
utils.link_fd_to_path(fd, target_path, 1)
self.assertTrue(os.path.exists(target_dir))
self.assertTrue(os.path.exists(target_path))
finally:
os.close(fd)
shutil.rmtree(tempdir)
def test_safe_json_loads(self):
expectations = {
None: None,
'': None,
0: None,
1: None,
'"asdf"': 'asdf',
'[]': [],
'{}': {},
"{'foo': 'bar'}": None,
'{"foo": "bar"}': {'foo': 'bar'},
}
failures = []
for value, expected in expectations.items():
try:
result = utils.safe_json_loads(value)
except Exception as e:
# it's called safe, if it blows up the test blows up
self.fail('%r caused safe method to throw %r!' % (
value, e))
try:
self.assertEqual(expected, result)
except AssertionError:
failures.append('%r => %r (expected %r)' % (
value, result, expected))
if failures:
self.fail('Invalid results from pure function:\n%s' %
'\n'.join(failures))
def test_strict_b64decode(self):
expectations = {
None: ValueError,
0: ValueError,
b'': b'',
u'': b'',
b'A': ValueError,
b'AA': ValueError,
b'AAA': ValueError,
b'AAAA': b'\x00\x00\x00',
u'AAAA': b'\x00\x00\x00',
b'////': b'\xff\xff\xff',
u'////': b'\xff\xff\xff',
b'A===': ValueError,
b'AA==': b'\x00',
b'AAA=': b'\x00\x00',
b' AAAA': ValueError,
b'AAAA ': ValueError,
b'AAAA============': b'\x00\x00\x00',
b'AA&AA==': ValueError,
b'====': b'',
}
failures = []
for value, expected in expectations.items():
try:
result = utils.strict_b64decode(value)
except Exception as e:
if inspect.isclass(expected) and issubclass(
expected, Exception):
if not isinstance(e, expected):
failures.append('%r raised %r (expected to raise %r)' %
(value, e, expected))
else:
failures.append('%r raised %r (expected to return %r)' %
(value, e, expected))
else:
if inspect.isclass(expected) and issubclass(
expected, Exception):
failures.append('%r => %r (expected to raise %r)' %
(value, result, expected))
elif result != expected:
failures.append('%r => %r (expected %r)' % (
value, result, expected))
if failures:
self.fail('Invalid results from pure function:\n%s' %
'\n'.join(failures))
def test_replace_partition_in_path(self):
# Check for new part = part * 2
old = '/s/n/d/o/700/c77/af088baea4806dcaba30bf07d9e64c77/f'
new = '/s/n/d/o/1400/c77/af088baea4806dcaba30bf07d9e64c77/f'
# Expected outcome
self.assertEqual(utils.replace_partition_in_path(old, 11), new)
# Make sure there is no change if the part power didn't change
self.assertEqual(utils.replace_partition_in_path(old, 10), old)
self.assertEqual(utils.replace_partition_in_path(new, 11), new)
# Check for new part = part * 2 + 1
old = '/s/n/d/o/693/c77/ad708baea4806dcaba30bf07d9e64c77/f'
new = '/s/n/d/o/1387/c77/ad708baea4806dcaba30bf07d9e64c77/f'
# Expected outcome
self.assertEqual(utils.replace_partition_in_path(old, 11), new)
# Make sure there is no change if the part power didn't change
self.assertEqual(utils.replace_partition_in_path(old, 10), old)
self.assertEqual(utils.replace_partition_in_path(new, 11), new)
def test_round_robin_iter(self):
it1 = iter([1, 2, 3])
it2 = iter([4, 5])
it3 = iter([6, 7, 8, 9])
it4 = iter([])
rr_its = utils.round_robin_iter([it1, it2, it3, it4])
got = list(rr_its)
# Expect that items get fetched in a round-robin fashion from the
# iterators
self.assertListEqual([1, 4, 6, 2, 5, 7, 3, 8, 9], got)
@with_tempdir
def test_get_db_files(self, tempdir):
dbdir = os.path.join(tempdir, 'dbdir')
self.assertEqual([], utils.get_db_files(dbdir))
path_1 = os.path.join(dbdir, 'dbfile.db')
self.assertEqual([], utils.get_db_files(path_1))
os.mkdir(dbdir)
self.assertEqual([], utils.get_db_files(path_1))
with open(path_1, 'wb'):
pass
self.assertEqual([path_1], utils.get_db_files(path_1))
path_2 = os.path.join(dbdir, 'dbfile_2.db')
self.assertEqual([path_1], utils.get_db_files(path_2))
with open(path_2, 'wb'):
pass
self.assertEqual([path_1, path_2], utils.get_db_files(path_1))
self.assertEqual([path_1, path_2], utils.get_db_files(path_2))
path_3 = os.path.join(dbdir, 'dbfile_3.db')
self.assertEqual([path_1, path_2], utils.get_db_files(path_3))
with open(path_3, 'wb'):
pass
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_1))
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_2))
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_3))
other_hash = os.path.join(dbdir, 'other.db')
self.assertEqual([], utils.get_db_files(other_hash))
other_hash = os.path.join(dbdir, 'other_1.db')
self.assertEqual([], utils.get_db_files(other_hash))
pending = os.path.join(dbdir, 'dbfile.pending')
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(pending))
with open(pending, 'wb'):
pass
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(pending))
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_1))
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_2))
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_3))
self.assertEqual([], utils.get_db_files(dbdir))
os.unlink(path_1)
self.assertEqual([path_2, path_3], utils.get_db_files(path_1))
self.assertEqual([path_2, path_3], utils.get_db_files(path_2))
self.assertEqual([path_2, path_3], utils.get_db_files(path_3))
os.unlink(path_2)
self.assertEqual([path_3], utils.get_db_files(path_1))
self.assertEqual([path_3], utils.get_db_files(path_2))
self.assertEqual([path_3], utils.get_db_files(path_3))
os.unlink(path_3)
self.assertEqual([], utils.get_db_files(path_1))
self.assertEqual([], utils.get_db_files(path_2))
self.assertEqual([], utils.get_db_files(path_3))
self.assertEqual([], utils.get_db_files('/path/to/nowhere'))
def test_get_redirect_data(self):
ts_now = utils.Timestamp.now()
headers = {'X-Backend-Redirect-Timestamp': ts_now.internal}
response = FakeResponse(200, headers, b'')
self.assertIsNone(utils.get_redirect_data(response))
headers = {'Location': '/a/c/o',
'X-Backend-Redirect-Timestamp': ts_now.internal}
response = FakeResponse(200, headers, b'')
path, ts = utils.get_redirect_data(response)
self.assertEqual('a/c', path)
self.assertEqual(ts_now, ts)
headers = {'Location': '/a/c',
'X-Backend-Redirect-Timestamp': ts_now.internal}
response = FakeResponse(200, headers, b'')
path, ts = utils.get_redirect_data(response)
self.assertEqual('a/c', path)
self.assertEqual(ts_now, ts)
def do_test(headers):
response = FakeResponse(200, headers, b'')
with self.assertRaises(ValueError) as cm:
utils.get_redirect_data(response)
return cm.exception
exc = do_test({'Location': '/a',
'X-Backend-Redirect-Timestamp': ts_now.internal})
self.assertIn('Invalid path', str(exc))
exc = do_test({'Location': '',
'X-Backend-Redirect-Timestamp': ts_now.internal})
self.assertIn('Invalid path', str(exc))
exc = do_test({'Location': '/a/c',
'X-Backend-Redirect-Timestamp': 'bad'})
self.assertIn('Invalid timestamp', str(exc))
exc = do_test({'Location': '/a/c'})
self.assertIn('Invalid timestamp', str(exc))
exc = do_test({'Location': '/a/c',
'X-Backend-Redirect-Timestamp': '-1'})
self.assertIn('Invalid timestamp', str(exc))
@mock.patch('pkg_resources.load_entry_point')
def test_load_pkg_resource(self, mock_driver):
tests = {
('swift.diskfile', 'egg:swift#replication.fs'):
('swift', 'swift.diskfile', 'replication.fs'),
('swift.diskfile', 'egg:swift#erasure_coding.fs'):
('swift', 'swift.diskfile', 'erasure_coding.fs'),
('swift.section', 'egg:swift#thing.other'):
('swift', 'swift.section', 'thing.other'),
('swift.section', 'swift#thing.other'):
('swift', 'swift.section', 'thing.other'),
('swift.section', 'thing.other'):
('swift', 'swift.section', 'thing.other'),
}
for args, expected in tests.items():
utils.load_pkg_resource(*args)
mock_driver.assert_called_with(*expected)
with self.assertRaises(TypeError) as cm:
args = ('swift.diskfile', 'nog:swift#replication.fs')
utils.load_pkg_resource(*args)
self.assertEqual("Unhandled URI scheme: 'nog'", str(cm.exception))
class ResellerConfReader(unittest.TestCase):
def setUp(self):
self.default_rules = {'operator_roles': ['admin', 'swiftoperator'],
'service_roles': [],
'require_group': ''}
def test_defaults(self):
conf = {}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_same_as_default(self):
conf = {'reseller_prefix': 'AUTH',
'operator_roles': 'admin, swiftoperator'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_single_blank_reseller(self):
conf = {'reseller_prefix': ''}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_single_blank_reseller_with_conf(self):
conf = {'reseller_prefix': '',
"''operator_roles": 'role1, role2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''].get('operator_roles'),
['role1', 'role2'])
self.assertEqual(options[''].get('service_roles'),
self.default_rules.get('service_roles'))
self.assertEqual(options[''].get('require_group'),
self.default_rules.get('require_group'))
def test_multiple_same_resellers(self):
conf = {'reseller_prefix': " '' , '' "}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
conf = {'reseller_prefix': '_, _'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['_'])
conf = {'reseller_prefix': 'AUTH, PRE2, AUTH, PRE2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
def test_several_resellers_with_conf(self):
conf = {'reseller_prefix': 'PRE1, PRE2',
'PRE1_operator_roles': 'role1, role2',
'PRE1_service_roles': 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['PRE1_', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['PRE1_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['PRE1_'].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['PRE1_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_first_blank(self):
conf = {'reseller_prefix': " '' , PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_with_blank_comma(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_stray_comma(self):
conf = {'reseller_prefix': "AUTH ,, PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_multiple_stray_commas_resellers(self):
conf = {'reseller_prefix': ' , , ,'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_unprefixed_options(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"operator_roles": 'role1, role2',
"service_roles": 'role3, role4',
'require_group': 'auth_blank_group',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['AUTH_'].get('service_roles')))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('auth_blank_group',
options['AUTH_'].get('require_group'))
self.assertEqual('auth_blank_group', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
class TestUnlinkOlder(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
self.mtime = {}
self.ts = make_timestamp_iter()
def tearDown(self):
rmtree(self.tempdir, ignore_errors=True)
def touch(self, fpath, mtime=None):
self.mtime[fpath] = mtime or next(self.ts)
open(fpath, 'w')
@contextlib.contextmanager
def high_resolution_getmtime(self):
orig_getmtime = os.path.getmtime
def mock_getmtime(fpath):
mtime = self.mtime.get(fpath)
if mtime is None:
mtime = orig_getmtime(fpath)
return mtime
with mock.patch('os.path.getmtime', mock_getmtime):
yield
def test_unlink_older_than_path_not_exists(self):
path = os.path.join(self.tempdir, 'does-not-exist')
# just make sure it doesn't blow up
utils.unlink_older_than(path, next(self.ts))
def test_unlink_older_than_file(self):
path = os.path.join(self.tempdir, 'some-file')
self.touch(path)
with self.assertRaises(OSError) as ctx:
utils.unlink_older_than(path, next(self.ts))
self.assertEqual(ctx.exception.errno, errno.ENOTDIR)
def test_unlink_older_than_now(self):
self.touch(os.path.join(self.tempdir, 'test'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, next(self.ts))
self.assertEqual([], os.listdir(self.tempdir))
def test_unlink_not_old_enough(self):
start = next(self.ts)
self.touch(os.path.join(self.tempdir, 'test'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, start)
self.assertEqual(['test'], os.listdir(self.tempdir))
def test_unlink_mixed(self):
self.touch(os.path.join(self.tempdir, 'first'))
cutoff = next(self.ts)
self.touch(os.path.join(self.tempdir, 'second'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, cutoff)
self.assertEqual(['second'], os.listdir(self.tempdir))
def test_unlink_paths(self):
paths = []
for item in ('first', 'second', 'third'):
path = os.path.join(self.tempdir, item)
self.touch(path)
paths.append(path)
# don't unlink everyone
with self.high_resolution_getmtime():
utils.unlink_paths_older_than(paths[:2], next(self.ts))
self.assertEqual(['third'], os.listdir(self.tempdir))
def test_unlink_empty_paths(self):
# just make sure it doesn't blow up
utils.unlink_paths_older_than([], next(self.ts))
def test_unlink_not_exists_paths(self):
path = os.path.join(self.tempdir, 'does-not-exist')
# just make sure it doesn't blow up
utils.unlink_paths_older_than([path], next(self.ts))
class TestSwiftInfo(unittest.TestCase):
def tearDown(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_register_swift_info(self):
utils.register_swift_info(foo='bar')
utils.register_swift_info(lorem='ipsum')
utils.register_swift_info('cap1', cap1_foo='cap1_bar')
utils.register_swift_info('cap1', cap1_lorem='cap1_ipsum')
self.assertTrue('swift' in utils._swift_info)
self.assertTrue('foo' in utils._swift_info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('lorem' in utils._swift_info['swift'])
self.assertEqual(utils._swift_info['swift']['lorem'], 'ipsum')
self.assertTrue('cap1' in utils._swift_info)
self.assertTrue('cap1_foo' in utils._swift_info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
self.assertTrue('cap1_lorem' in utils._swift_info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_lorem'], 'cap1_ipsum')
self.assertRaises(ValueError,
utils.register_swift_info, 'admin', foo='bar')
self.assertRaises(ValueError,
utils.register_swift_info, 'disallowed_sections',
disallowed_sections=None)
utils.register_swift_info('goodkey', foo='5.6')
self.assertRaises(ValueError,
utils.register_swift_info, 'bad.key', foo='5.6')
data = {'bad.key': '5.6'}
self.assertRaises(ValueError,
utils.register_swift_info, 'goodkey', **data)
def test_get_swift_info(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info()
self.assertNotIn('admin', info)
self.assertIn('swift', info)
self.assertIn('foo', info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertIn('cap1', info)
self.assertIn('cap1_foo', info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_info_with_disallowed_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(disallowed_sections=['cap1', 'cap3'])
self.assertNotIn('admin', info)
self.assertIn('swift', info)
self.assertIn('foo', info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertNotIn('cap1', info)
self.assertIn('cap2', info)
self.assertIn('cap2_foo', info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertNotIn('cap3', info)
def test_register_swift_admin_info(self):
utils.register_swift_info(admin=True, admin_foo='admin_bar')
utils.register_swift_info(admin=True, admin_lorem='admin_ipsum')
utils.register_swift_info('cap1', admin=True, ac1_foo='ac1_bar')
utils.register_swift_info('cap1', admin=True, ac1_lorem='ac1_ipsum')
self.assertIn('swift', utils._swift_admin_info)
self.assertIn('admin_foo', utils._swift_admin_info['swift'])
self.assertEqual(
utils._swift_admin_info['swift']['admin_foo'], 'admin_bar')
self.assertIn('admin_lorem', utils._swift_admin_info['swift'])
self.assertEqual(
utils._swift_admin_info['swift']['admin_lorem'], 'admin_ipsum')
self.assertIn('cap1', utils._swift_admin_info)
self.assertIn('ac1_foo', utils._swift_admin_info['cap1'])
self.assertEqual(
utils._swift_admin_info['cap1']['ac1_foo'], 'ac1_bar')
self.assertIn('ac1_lorem', utils._swift_admin_info['cap1'])
self.assertEqual(
utils._swift_admin_info['cap1']['ac1_lorem'], 'ac1_ipsum')
self.assertNotIn('swift', utils._swift_info)
self.assertNotIn('cap1', utils._swift_info)
def test_get_swift_admin_info(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(admin=True)
self.assertIn('admin', info)
self.assertIn('admin_cap1', info['admin'])
self.assertIn('ac1_foo', info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertIn('swift', info)
self.assertIn('foo', info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertIn('cap1', info)
self.assertIn('cap1_foo', info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_admin_info_with_disallowed_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(
admin=True, disallowed_sections=['cap1', 'cap3'])
self.assertIn('admin', info)
self.assertIn('admin_cap1', info['admin'])
self.assertIn('ac1_foo', info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertIn('disallowed_sections', info['admin'])
self.assertIn('cap1', info['admin']['disallowed_sections'])
self.assertNotIn('cap2', info['admin']['disallowed_sections'])
self.assertIn('cap3', info['admin']['disallowed_sections'])
self.assertIn('swift', info)
self.assertIn('foo', info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertNotIn('cap1', info)
self.assertIn('cap2', info)
self.assertIn('cap2_foo', info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertNotIn('cap3', info)
def test_get_swift_admin_info_with_disallowed_sub_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar',
'cap1_moo': 'cap1_baa'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap2_foo': 'cap2_bar'},
'cap4': {'a': {'b': {'c': 'c'},
'b.c': 'b.c'}}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(
admin=True, disallowed_sections=['cap1.cap1_foo', 'cap3',
'cap4.a.b.c'])
self.assertNotIn('cap3', info)
self.assertEqual(info['cap1']['cap1_moo'], 'cap1_baa')
self.assertNotIn('cap1_foo', info['cap1'])
self.assertNotIn('c', info['cap4']['a']['b'])
self.assertEqual(info['cap4']['a']['b.c'], 'b.c')
def test_get_swift_info_with_unmatched_disallowed_sections(self):
cap1 = {'cap1_foo': 'cap1_bar',
'cap1_moo': 'cap1_baa'}
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': cap1}
# expect no exceptions
info = utils.get_swift_info(
disallowed_sections=['cap2.cap1_foo', 'cap1.no_match',
'cap1.cap1_foo.no_match.no_match'])
self.assertEqual(info['cap1'], cap1)
class TestFileLikeIter(unittest.TestCase):
def test_iter_file_iter(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
for chunk in utils.FileLikeIter(in_iter):
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_next(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
try:
chunk = next(iter_file)
except StopIteration:
break
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_read(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
iter_file = utils.FileLikeIter(in_iter)
self.assertEqual(iter_file.read(), b''.join(in_iter))
def test_read_with_size(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
chunk = iter_file.read(2)
if not chunk:
break
self.assertTrue(len(chunk) <= 2)
chunks.append(chunk)
self.assertEqual(b''.join(chunks), b''.join(in_iter))
def test_read_with_size_zero(self):
# makes little sense, but file supports it, so...
self.assertEqual(utils.FileLikeIter(b'abc').read(0), b'')
def test_readline(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline()
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[v if v == b'trailing.' else v + b'\n'
for v in b''.join(in_iter).split(b'\n')])
def test_readline2(self):
self.assertEqual(
utils.FileLikeIter([b'abc', b'def\n']).readline(4),
b'abcd')
def test_readline3(self):
self.assertEqual(
utils.FileLikeIter([b'a' * 1111, b'bc\ndef']).readline(),
(b'a' * 1111) + b'bc\n')
def test_readline_with_size(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline(2)
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[b'ab', b'c\n', b'd\n', b'ef', b'g\n', b'h\n', b'ij', b'\n', b'\n',
b'k\n', b'tr', b'ai', b'li', b'ng', b'.'])
def test_readlines(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = utils.FileLikeIter(in_iter).readlines()
self.assertEqual(
lines,
[v if v == b'trailing.' else v + b'\n'
for v in b''.join(in_iter).split(b'\n')])
def test_readlines_with_size(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
iter_file = utils.FileLikeIter(in_iter)
lists_of_lines = []
while True:
lines = iter_file.readlines(2)
if not lines:
break
lists_of_lines.append(lines)
self.assertEqual(
lists_of_lines,
[[b'ab'], [b'c\n'], [b'd\n'], [b'ef'], [b'g\n'], [b'h\n'], [b'ij'],
[b'\n', b'\n'], [b'k\n'], [b'tr'], [b'ai'], [b'li'], [b'ng'],
[b'.']])
def test_close(self):
iter_file = utils.FileLikeIter([b'a', b'b', b'c'])
self.assertEqual(next(iter_file), b'a')
iter_file.close()
self.assertTrue(iter_file.closed)
self.assertRaises(ValueError, iter_file.next)
self.assertRaises(ValueError, iter_file.read)
self.assertRaises(ValueError, iter_file.readline)
self.assertRaises(ValueError, iter_file.readlines)
# Just make sure repeated close calls don't raise an Exception
iter_file.close()
self.assertTrue(iter_file.closed)
class TestStatsdLogging(unittest.TestCase):
def setUp(self):
def fake_getaddrinfo(host, port, *args):
# this is what a real getaddrinfo('localhost', port,
# socket.AF_INET) returned once
return [(socket.AF_INET, # address family
socket.SOCK_STREAM, # socket type
socket.IPPROTO_TCP, # socket protocol
'', # canonical name,
('127.0.0.1', port)), # socket address
(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('127.0.0.1', port))]
self.real_getaddrinfo = utils.socket.getaddrinfo
self.getaddrinfo_patcher = mock.patch.object(
utils.socket, 'getaddrinfo', fake_getaddrinfo)
self.mock_getaddrinfo = self.getaddrinfo_patcher.start()
self.addCleanup(self.getaddrinfo_patcher.stop)
def test_get_logger_statsd_client_not_specified(self):
logger = utils.get_logger({}, 'some-name', log_route='some-route')
# white-box construction validation
self.assertIsNone(logger.logger.statsd_client)
def test_get_logger_statsd_client_defaults(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'},
'some-name', log_route='some-route')
# white-box construction validation
self.assertTrue(isinstance(logger.logger.statsd_client,
utils.StatsdClient))
self.assertEqual(logger.logger.statsd_client._host, 'some.host.com')
self.assertEqual(logger.logger.statsd_client._port, 8125)
self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.')
self.assertEqual(logger.logger.statsd_client._default_sample_rate, 1)
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, '')
def test_get_logger_statsd_client_non_defaults(self):
logger = utils.get_logger({
'log_statsd_host': 'another.host.com',
'log_statsd_port': '9876',
'log_statsd_default_sample_rate': '0.75',
'log_statsd_sample_rate_factor': '0.81',
'log_statsd_metric_prefix': 'tomato.sauce',
}, 'some-name', log_route='some-route')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.')
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.')
self.assertEqual(logger.logger.statsd_client._host, 'another.host.com')
self.assertEqual(logger.logger.statsd_client._port, 9876)
self.assertEqual(logger.logger.statsd_client._default_sample_rate,
0.75)
self.assertEqual(logger.logger.statsd_client._sample_rate_factor,
0.81)
def test_ipv4_or_ipv6_hostname_defaults_to_ipv4(self):
def stub_getaddrinfo_both_ipv4_and_ipv6(host, port, family, *rest):
if family == socket.AF_INET:
return [(socket.AF_INET, 'blah', 'blah', 'blah',
('127.0.0.1', int(port)))]
elif family == socket.AF_INET6:
# Implemented so an incorrectly ordered implementation (IPv6
# then IPv4) would realistically fail.
return [(socket.AF_INET6, 'blah', 'blah', 'blah',
('::1', int(port), 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo',
new=stub_getaddrinfo_both_ipv4_and_ipv6):
logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target, ('localhost', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
def test_ipv4_instantiation_and_socket_creation(self):
logger = utils.get_logger({
'log_statsd_host': '127.0.0.1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target, ('127.0.0.1', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
def test_ipv6_instantiation_and_socket_creation(self):
# We have to check the given hostname or IP for IPv4/IPv6 on logger
# instantiation so we don't call getaddrinfo() too often and don't have
# to call bind() on our socket to detect IPv4/IPv6 on every send.
#
# This test uses the real getaddrinfo, so we patch over the mock to
# put the real one back. If we just stop the mock, then
# unittest.exit() blows up, but stacking real-fake-real works okay.
with mock.patch.object(utils.socket, 'getaddrinfo',
self.real_getaddrinfo):
logger = utils.get_logger({
'log_statsd_host': '::1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET6)
self.assertEqual(statsd_client._target, ('::1', 9876, 0, 0))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET6)
def test_bad_hostname_instantiation(self):
with mock.patch.object(utils.socket, 'getaddrinfo',
side_effect=utils.socket.gaierror("whoops")):
logger = utils.get_logger({
'log_statsd_host': 'i-am-not-a-hostname-or-ip',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target,
('i-am-not-a-hostname-or-ip', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
# Maybe the DNS server gets fixed in a bit and it starts working... or
# maybe the DNS record hadn't propagated yet. In any case, failed
# statsd sends will warn in the logs until the DNS failure or invalid
# IP address in the configuration is fixed.
def test_sending_ipv6(self):
def fake_getaddrinfo(host, port, *args):
# this is what a real getaddrinfo('::1', port,
# socket.AF_INET6) returned once
return [(socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
'', ('::1', port, 0, 0)),
(socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('::1', port, 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo', fake_getaddrinfo):
logger = utils.get_logger({
'log_statsd_host': '::1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
fl = FakeLogger()
statsd_client.logger = fl
mock_socket = MockUdpSocket()
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
self.assertEqual(fl.get_lines_for_level('warning'), [])
self.assertEqual(mock_socket.sent,
[(b'some-name.tunafish:1|c', ('::1', 9876, 0, 0))])
def test_no_exception_when_cant_send_udp_packet(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
statsd_client = logger.logger.statsd_client
fl = FakeLogger()
statsd_client.logger = fl
mock_socket = MockUdpSocket(sendto_errno=errno.EPERM)
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
expected = ["Error sending UDP message to ('some.host.com', 8125): "
"[Errno 1] test errno 1"]
self.assertEqual(fl.get_lines_for_level('warning'), expected)
def test_sample_rates(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: 0.50001
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: 0.49999
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith(b"|@0.5"))
def test_sample_rates_with_sample_rate_factor(self):
logger = utils.get_logger({
'log_statsd_host': 'some.host.com',
'log_statsd_default_sample_rate': '0.82',
'log_statsd_sample_rate_factor': '0.91',
})
effective_sample_rate = 0.82 * 0.91
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: effective_sample_rate + 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
effective_sample_rate = 0.587 * 0.91
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles', sample_rate=0.587)
self.assertEqual(len(mock_socket.sent), 2)
payload = mock_socket.sent[1][0]
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
def test_timing_stats(self):
class MockController(object):
def __init__(self, status):
self.status = status
self.logger = self
self.args = ()
self.called = 'UNKNOWN'
def timing_since(self, *args):
self.called = 'timing'
self.args = args
@utils.timing_stats()
def METHOD(controller):
return Response(status=controller.status)
mock_controller = MockController(200)
METHOD(mock_controller)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(400)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(404)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(412)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(416)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(500)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(507)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing')
self.assertTrue(mock_controller.args[1] > 0)
class UnsafeXrange(object):
"""
Like xrange(limit), but with extra context switching to screw things up.
"""
def __init__(self, upper_bound):
self.current = 0
self.concurrent_calls = 0
self.upper_bound = upper_bound
self.concurrent_call = False
def __iter__(self):
return self
def next(self):
if self.concurrent_calls > 0:
self.concurrent_call = True
self.concurrent_calls += 1
try:
if self.current >= self.upper_bound:
raise StopIteration
else:
val = self.current
self.current += 1
eventlet.sleep() # yield control
return val
finally:
self.concurrent_calls -= 1
__next__ = next
class TestAffinityKeyFunction(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_single_region(self):
keyfn = utils.affinity_key_function("r3=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 5, 0, 1, 2, 3, 6, 7], ids)
def test_bogus_value(self):
self.assertRaises(ValueError,
utils.affinity_key_function, "r3")
self.assertRaises(ValueError,
utils.affinity_key_function, "r3=elephant")
def test_empty_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function("")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_all_whitespace_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function(" \n")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_with_zone_zero(self):
keyfn = utils.affinity_key_function("r4z0=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([6, 0, 1, 2, 3, 4, 5, 7], ids)
def test_multiple(self):
keyfn = utils.affinity_key_function("r1=100, r4=200, r3z1=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 0, 1, 6, 7, 2, 3, 5], ids)
def test_more_specific_after_less_specific(self):
keyfn = utils.affinity_key_function("r2=100, r2z2=50")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([3, 2, 0, 1, 4, 5, 6, 7], ids)
class TestAffinityLocalityPredicate(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_empty(self):
pred = utils.affinity_locality_predicate('')
self.assertTrue(pred is None)
def test_region(self):
pred = utils.affinity_locality_predicate('r1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1], ids)
def test_zone(self):
pred = utils.affinity_locality_predicate('r1z1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0], ids)
def test_multiple(self):
pred = utils.affinity_locality_predicate('r1, r3, r4z0')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1, 4, 5, 6], ids)
def test_invalid(self):
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'falafel')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r8zQ')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r2d2')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r1z1=1')
class TestRateLimitedIterator(unittest.TestCase):
def run_under_pseudo_time(
self, func, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('eventlet.sleep', my_sleep):
return func(*args, **kwargs)
def test_rate_limiting(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(range(9999), 100)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 11, not 10, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 11)
def test_rate_limiting_sometimes(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100,
ratelimit_if=lambda item: item % 23 != 0)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.5:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# we'd get 51 without the ratelimit_if, but because 0, 23 and 46
# weren't subject to ratelimiting, we get 54 instead
self.assertEqual(len(got), 54)
def test_limit_after(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100, limit_after=5)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 16, not 15, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 16)
class TestGreenthreadSafeIterator(unittest.TestCase):
def increment(self, iterable):
plus_ones = []
for n in iterable:
plus_ones.append(n + 1)
return plus_ones
def test_setup_works(self):
# it should work without concurrent access
self.assertEqual([0, 1, 2, 3], list(UnsafeXrange(4)))
iterable = UnsafeXrange(10)
pile = eventlet.GreenPile(2)
for _ in range(2):
pile.spawn(self.increment, iterable)
sorted([resp for resp in pile])
self.assertTrue(
iterable.concurrent_call, 'test setup is insufficiently crazy')
def test_access_is_serialized(self):
pile = eventlet.GreenPile(2)
unsafe_iterable = UnsafeXrange(10)
iterable = utils.GreenthreadSafeIterator(unsafe_iterable)
for _ in range(2):
pile.spawn(self.increment, iterable)
response = sorted(sum([resp for resp in pile], []))
self.assertEqual(list(range(1, 11)), response)
self.assertTrue(
not unsafe_iterable.concurrent_call, 'concurrent call occurred')
class TestStatsdLoggingDelegation(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('localhost', 0))
self.port = self.sock.getsockname()[1]
self.queue = Queue()
self.reader_thread = threading.Thread(target=self.statsd_reader)
self.reader_thread.setDaemon(1)
self.reader_thread.start()
def tearDown(self):
# The "no-op when disabled" test doesn't set up a real logger, so
# create one here so we can tell the reader thread to stop.
if not getattr(self, 'logger', None):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.logger.increment('STOP')
self.reader_thread.join(timeout=4)
self.sock.close()
del self.logger
def statsd_reader(self):
while True:
try:
payload = self.sock.recv(4096)
if payload and b'STOP' in payload:
return 42
self.queue.put(payload)
except Exception as e:
sys.stderr.write('statsd_reader thread: %r' % (e,))
break
def _send_and_get(self, sender_fn, *args, **kwargs):
"""
Because the client library may not actually send a packet with
sample_rate < 1, we keep trying until we get one through.
"""
got = None
while not got:
sender_fn(*args, **kwargs)
try:
got = self.queue.get(timeout=0.5)
except Empty:
pass
return got
def assertStat(self, expected, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertEqual(expected, got)
def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertTrue(re.search(expected_regexp, got),
[got, expected_regexp])
def test_methods_are_no_ops_when_not_enabled(self):
logger = utils.get_logger({
# No "log_statsd_host" means "disabled"
'log_statsd_port': str(self.port),
}, 'some-name')
# Delegate methods are no-ops
self.assertIsNone(logger.update_stats('foo', 88))
self.assertIsNone(logger.update_stats('foo', 88, 0.57))
self.assertIsNone(logger.update_stats('foo', 88,
sample_rate=0.61))
self.assertIsNone(logger.increment('foo'))
self.assertIsNone(logger.increment('foo', 0.57))
self.assertIsNone(logger.increment('foo', sample_rate=0.61))
self.assertIsNone(logger.decrement('foo'))
self.assertIsNone(logger.decrement('foo', 0.57))
self.assertIsNone(logger.decrement('foo', sample_rate=0.61))
self.assertIsNone(logger.timing('foo', 88.048))
self.assertIsNone(logger.timing('foo', 88.57, 0.34))
self.assertIsNone(logger.timing('foo', 88.998, sample_rate=0.82))
self.assertIsNone(logger.timing_since('foo', 8938))
self.assertIsNone(logger.timing_since('foo', 8948, 0.57))
self.assertIsNone(logger.timing_since('foo', 849398,
sample_rate=0.61))
# Now, the queue should be empty (no UDP packets sent)
self.assertRaises(Empty, self.queue.get_nowait)
def test_delegate_methods_with_no_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.assertStat('some-name.some.counter:1|c', self.logger.increment,
'some.counter')
self.assertStat('some-name.some.counter:-1|c', self.logger.decrement,
'some.counter')
self.assertStat('some-name.some.operation:4900.0|ms',
self.logger.timing, 'some.operation', 4.9 * 1000)
self.assertStatMatches('some-name\.another\.operation:\d+\.\d+\|ms',
self.logger.timing_since, 'another.operation',
time.time())
self.assertStat('some-name.another.counter:42|c',
self.logger.update_stats, 'another.counter', 42)
# Each call can override the sample_rate (also, bonus prefix test)
self.logger.set_statsd_prefix('pfx')
self.assertStat('pfx.some.counter:1|c|@0.972', self.logger.increment,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.counter:-1|c|@0.972', self.logger.decrement,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.operation:4900.0|ms|@0.972',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.972)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.972',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.972)
self.assertStat('pfx.another.counter:3|c|@0.972',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.972)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.939', self.logger.increment,
'some.counter', 0.939)
self.assertStat('some.counter:-1|c|@0.939', self.logger.decrement,
'some.counter', 0.939)
self.assertStat('some.operation:4900.0|ms|@0.939',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.939)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.939',
self.logger.timing_since, 'another.op',
time.time(), 0.939)
self.assertStat('another.counter:3|c|@0.939',
self.logger.update_stats, 'another.counter', 3, 0.939)
def test_delegate_methods_with_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_default_sample_rate': '0.93',
}, 'pfx')
self.assertStat('pfx.some.counter:1|c|@0.93', self.logger.increment,
'some.counter')
self.assertStat('pfx.some.counter:-1|c|@0.93', self.logger.decrement,
'some.counter')
self.assertStat('pfx.some.operation:4760.0|ms|@0.93',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.93',
self.logger.timing_since, 'another.op',
time.time())
self.assertStat('pfx.another.counter:3|c|@0.93',
self.logger.update_stats, 'another.counter', 3)
# Each call can override the sample_rate
self.assertStat('pfx.some.counter:1|c|@0.9912', self.logger.increment,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.counter:-1|c|@0.9912', self.logger.decrement,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('pfx.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.987654', self.logger.increment,
'some.counter', 0.987654)
self.assertStat('some.counter:-1|c|@0.987654', self.logger.decrement,
'some.counter', 0.987654)
self.assertStat('some.operation:4900.0|ms|@0.987654',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.987654)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.987654',
self.logger.timing_since, 'another.op',
time.time(), 0.987654)
self.assertStat('another.counter:3|c|@0.987654',
self.logger.update_stats, 'another.counter',
3, 0.987654)
def test_delegate_methods_with_metric_prefix(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_metric_prefix': 'alpha.beta',
}, 'pfx')
self.assertStat('alpha.beta.pfx.some.counter:1|c',
self.logger.increment, 'some.counter')
self.assertStat('alpha.beta.pfx.some.counter:-1|c',
self.logger.decrement, 'some.counter')
self.assertStat('alpha.beta.pfx.some.operation:4760.0|ms',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches(
'alpha\.beta\.pfx\.another\.op:\d+\.\d+\|ms',
self.logger.timing_since, 'another.op', time.time())
self.assertStat('alpha.beta.pfx.another.counter:3|c',
self.logger.update_stats, 'another.counter', 3)
self.logger.set_statsd_prefix('')
self.assertStat('alpha.beta.some.counter:1|c|@0.9912',
self.logger.increment, 'some.counter',
sample_rate=0.9912)
self.assertStat('alpha.beta.some.counter:-1|c|@0.9912',
self.logger.decrement, 'some.counter', 0.9912)
self.assertStat('alpha.beta.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('alpha\.beta\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('alpha.beta.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
@reset_logger_state
def test_thread_locals(self):
logger = utils.get_logger(None)
# test the setter
logger.thread_locals = ('id', 'ip')
self.assertEqual(logger.thread_locals, ('id', 'ip'))
# reset
logger.thread_locals = (None, None)
self.assertEqual(logger.thread_locals, (None, None))
logger.txn_id = '1234'
logger.client_ip = '1.2.3.4'
self.assertEqual(logger.thread_locals, ('1234', '1.2.3.4'))
logger.txn_id = '5678'
logger.client_ip = '5.6.7.8'
self.assertEqual(logger.thread_locals, ('5678', '5.6.7.8'))
def test_no_fdatasync(self):
called = []
class NoFdatasync(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.os', NoFdatasync()):
with patch('swift.common.utils.fsync', fsync):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_yes_fdatasync(self):
called = []
class YesFdatasync(object):
def fdatasync(self, fd):
called.append(fd)
with patch('swift.common.utils.os', YesFdatasync()):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_fsync_bad_fullsync(self):
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
raise IOError(18)
with patch('swift.common.utils.fcntl', FCNTL()):
self.assertRaises(OSError, lambda: utils.fsync(12345))
def test_fsync_f_fullsync(self):
called = []
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
called[:] = [fd, op]
return 0
with patch('swift.common.utils.fcntl', FCNTL()):
utils.fsync(12345)
self.assertEqual(called, [12345, 123])
def test_fsync_no_fullsync(self):
called = []
class FCNTL(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.fcntl', FCNTL()):
with patch('os.fsync', fsync):
utils.fsync(12345)
self.assertEqual(called, [12345])
class TestAuditLocationGenerator(unittest.TestCase):
def test_drive_tree_access(self):
orig_listdir = utils.listdir
def _mock_utils_listdir(path):
if 'bad_part' in path:
raise OSError(errno.EACCES)
elif 'bad_suffix' in path:
raise OSError(errno.EACCES)
elif 'bad_hash' in path:
raise OSError(errno.EACCES)
else:
return orig_listdir(path)
# Check Raise on Bad partition
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
obj_path = os.path.join(data, "bad_part")
with open(obj_path, "w"):
pass
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Suffix
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
obj_path = os.path.join(part1, "bad_suffix")
with open(obj_path, 'w'):
pass
suffix = os.path.join(part2, "suffix")
os.makedirs(suffix)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Hash
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
suffix = os.path.join(part1, "suffix")
os.makedirs(suffix)
hash1 = os.path.join(suffix, "hash1")
os.makedirs(hash1)
obj_path = os.path.join(suffix, "bad_hash")
with open(obj_path, 'w'):
pass
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
def test_non_dir_drive(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False
)
self.assertEqual(list(locations), [])
def test_mount_check_drive(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(2, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True
)
self.assertEqual(list(locations), [])
def test_non_dir_contents(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
with open(os.path.join(data, "partition1"), "w"):
pass
partition = os.path.join(data, "partition2")
os.makedirs(partition)
with open(os.path.join(partition, "suffix1"), "w"):
pass
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
with open(os.path.join(suffix, "hash1"), "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
def test_find_objects(self):
with temptree([]) as tmpdir:
expected_objs = list()
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
partition = os.path.join(data, "partition1")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition1'))
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj2.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition2'))
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
got_objs = list(locations)
self.assertEqual(len(got_objs), len(expected_objs))
self.assertEqual(sorted(got_objs), sorted(expected_objs))
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
def test_ignore_metadata(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.dat")
with open(obj_path, "w"):
pass
meta_path = os.path.join(hash_path, "obj1.meta")
with open(meta_path, "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", ".dat", mount_check=False, logger=logger
)
self.assertEqual(list(locations),
[(obj_path, "drive", "partition2")])
class TestGreenAsyncPile(unittest.TestCase):
def test_runs_everything(self):
def run_test():
tests_ran[0] += 1
return tests_ran[0]
tests_ran = [0]
pile = utils.GreenAsyncPile(3)
for x in range(3):
pile.spawn(run_test)
self.assertEqual(sorted(x for x in pile), [1, 2, 3])
def test_is_asynchronous(self):
def run_test(index):
events[index].wait()
return index
pile = utils.GreenAsyncPile(3)
for order in ((1, 2, 0), (0, 1, 2), (2, 1, 0), (0, 2, 1)):
events = [eventlet.event.Event(), eventlet.event.Event(),
eventlet.event.Event()]
for x in range(3):
pile.spawn(run_test, x)
for x in order:
events[x].send()
self.assertEqual(next(pile), x)
def test_next_when_empty(self):
def run_test():
pass
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test)
self.assertIsNone(next(pile))
self.assertRaises(StopIteration, lambda: next(pile))
def test_waitall_timeout_timesout(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 1.0)
self.assertEqual(pile.waitall(0.5), [0.1])
self.assertEqual(completed[0], 1)
def test_waitall_timeout_completes(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 0.1)
self.assertEqual(pile.waitall(0.5), [0.1, 0.1])
self.assertEqual(completed[0], 2)
def test_waitfirst_only_returns_first(self):
def run_test(name):
eventlet.sleep(0)
completed.append(name)
return name
completed = []
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 'first')
pile.spawn(run_test, 'second')
pile.spawn(run_test, 'third')
self.assertEqual(pile.waitfirst(0.5), completed[0])
# 3 still completed, but only the first was returned.
self.assertEqual(3, len(completed))
def test_wait_with_firstn(self):
def run_test(name):
eventlet.sleep(0)
completed.append(name)
return name
for first_n in [None] + list(range(6)):
completed = []
pile = utils.GreenAsyncPile(10)
for i in range(10):
pile.spawn(run_test, i)
actual = pile._wait(1, first_n)
expected_n = first_n if first_n else 10
self.assertEqual(completed[:expected_n], actual)
self.assertEqual(10, len(completed))
def test_pending(self):
pile = utils.GreenAsyncPile(3)
self.assertEqual(0, pile._pending)
for repeats in range(2):
# repeat to verify that pending will go again up after going down
for i in range(4):
pile.spawn(lambda: i)
self.assertEqual(4, pile._pending)
for i in range(3, -1, -1):
next(pile)
self.assertEqual(i, pile._pending)
# sanity check - the pile is empty
self.assertRaises(StopIteration, pile.next)
# pending remains 0
self.assertEqual(0, pile._pending)
class TestLRUCache(unittest.TestCase):
def test_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
_orig_math_sqrt = math.sqrt
# setup cache [0-10)
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# update cache [10-20)
for i in range(10, 20):
self.assertEqual(math.sqrt(i), f(i))
# cache size is fixed
self.assertEqual(f.size(), 10)
# validate cache [10-20)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
for i in range(10):
self.assertRaises(TypeError, f, i)
# cache unchanged
self.assertEqual(f.size(), 10)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
def test_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
_orig_math_sqrt = math.sqrt
now = time.time()
the_future = now + 31
# setup cache [0-10)
with patch('time.time', lambda: now):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate expired [0-10)
with patch('math.sqrt', new=None):
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertRaises(TypeError, f, i)
# validate repopulates [0-10)
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
# reuses cache space
self.assertEqual(f.size(), 10)
def test_set_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
self.assertEqual(2, f(4))
self.assertEqual(1, f.size())
# expire everything
f.maxtime = -1
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
self.assertRaises(TypeError, f, 4)
def test_set_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
for i in range(12):
f(i)
self.assertEqual(f.size(), 10)
f.maxsize = 4
for i in range(12):
f(i)
self.assertEqual(f.size(), 4)
class TestSpliterator(unittest.TestCase):
def test_string(self):
input_chunks = ["coun", "ter-", "b", "ra", "nch-mater",
"nit", "y-fungusy", "-nummular"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(8)), "counter-")
self.assertEqual(''.join(si.take(7)), "branch-")
self.assertEqual(''.join(si.take(10)), "maternity-")
self.assertEqual(''.join(si.take(8)), "fungusy-")
self.assertEqual(''.join(si.take(8)), "nummular")
def test_big_input_string(self):
input_chunks = ["iridium"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(2)), "ir")
self.assertEqual(''.join(si.take(1)), "i")
self.assertEqual(''.join(si.take(2)), "di")
self.assertEqual(''.join(si.take(1)), "u")
self.assertEqual(''.join(si.take(1)), "m")
def test_chunk_boundaries(self):
input_chunks = ["soylent", "green", "is", "people"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(7)), "soylent")
self.assertEqual(''.join(si.take(5)), "green")
self.assertEqual(''.join(si.take(2)), "is")
self.assertEqual(''.join(si.take(6)), "people")
def test_no_empty_strings(self):
input_chunks = ["soylent", "green", "is", "people"]
si = utils.Spliterator(input_chunks)
outputs = (list(si.take(7)) # starts and ends on chunk boundary
+ list(si.take(2)) # spans two chunks
+ list(si.take(3)) # begins but does not end chunk
+ list(si.take(2)) # ends but does not begin chunk
+ list(si.take(6))) # whole chunk + EOF
self.assertNotIn('', outputs)
def test_running_out(self):
input_chunks = ["not much"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(4)), "not ")
self.assertEqual(''.join(si.take(99)), "much") # short
self.assertEqual(''.join(si.take(4)), "")
self.assertEqual(''.join(si.take(4)), "")
def test_overlap(self):
input_chunks = ["one fish", "two fish", "red fish", "blue fish"]
si = utils.Spliterator(input_chunks)
t1 = si.take(20) # longer than first chunk
self.assertLess(len(next(t1)), 20) # it's not exhausted
t2 = si.take(20)
self.assertRaises(ValueError, next, t2)
def test_closing(self):
input_chunks = ["abcd", "efg", "hij"]
si = utils.Spliterator(input_chunks)
it = si.take(3) # shorter than first chunk
self.assertEqual(next(it), 'abc')
it.close()
self.assertEqual(list(si.take(20)), ['d', 'efg', 'hij'])
si = utils.Spliterator(input_chunks)
self.assertEqual(list(si.take(1)), ['a'])
it = si.take(1) # still shorter than first chunk
self.assertEqual(next(it), 'b')
it.close()
self.assertEqual(list(si.take(20)), ['cd', 'efg', 'hij'])
si = utils.Spliterator(input_chunks)
it = si.take(6) # longer than first chunk, shorter than first + second
self.assertEqual(next(it), 'abcd')
self.assertEqual(next(it), 'ef')
it.close()
self.assertEqual(list(si.take(20)), ['g', 'hij'])
si = utils.Spliterator(input_chunks)
self.assertEqual(list(si.take(2)), ['ab'])
it = si.take(3) # longer than rest of chunk
self.assertEqual(next(it), 'cd')
it.close()
self.assertEqual(list(si.take(20)), ['efg', 'hij'])
class TestParseContentRange(unittest.TestCase):
def test_good(self):
start, end, total = utils.parse_content_range("bytes 100-200/300")
self.assertEqual(start, 100)
self.assertEqual(end, 200)
self.assertEqual(total, 300)
def test_bad(self):
self.assertRaises(ValueError, utils.parse_content_range,
"100-300/500")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes 100-200/aardvark")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes bulbous-bouffant/4994801")
class TestParseContentDisposition(unittest.TestCase):
def test_basic_content_type(self):
name, attrs = utils.parse_content_disposition('text/plain')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {})
def test_content_type_with_charset(self):
name, attrs = utils.parse_content_disposition(
'text/plain; charset=UTF8')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {'charset': 'UTF8'})
def test_content_disposition(self):
name, attrs = utils.parse_content_disposition(
'form-data; name="somefile"; filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
def test_content_disposition_without_white_space(self):
name, attrs = utils.parse_content_disposition(
'form-data;name="somefile";filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
class TestIterMultipartMimeDocuments(unittest.TestCase):
def test_bad_start(self):
it = utils.iter_multipart_mime_documents(BytesIO(b'blah'), b'unique')
exc = None
try:
next(it)
except MimeInvalid as err:
exc = err
self.assertTrue('invalid starting boundary' in str(exc))
self.assertTrue('--unique' in str(exc))
def test_empty(self):
it = utils.iter_multipart_mime_documents(BytesIO(b'--unique'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'')
self.assertRaises(StopIteration, next, it)
def test_basic(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique--'), b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'abcdefg')
self.assertRaises(StopIteration, next, it)
def test_basic2(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'abcdefg')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_tiny_reads(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(2), b'ab')
self.assertEqual(fp.read(2), b'cd')
self.assertEqual(fp.read(2), b'ef')
self.assertEqual(fp.read(2), b'g')
self.assertEqual(fp.read(2), b'')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_big_reads(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(65536), b'abcdefg')
self.assertEqual(fp.read(), b'')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_leading_crlfs(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'\r\n\r\n\r\n--unique\r\nabcdefg\r\n'
b'--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(65536), b'abcdefg')
self.assertEqual(fp.read(), b'')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_broken_mid_stream(self):
# We go ahead and accept whatever is sent instead of rejecting the
# whole request, in case the partial form is still useful.
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabc'), b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'abc')
self.assertRaises(StopIteration, next, it)
def test_readline(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n\r\n'
b'jkl\r\n\r\n--unique--'), b'unique')
fp = next(it)
self.assertEqual(fp.readline(), b'ab\r\n')
self.assertEqual(fp.readline(), b'cd\ref\ng')
self.assertEqual(fp.readline(), b'')
fp = next(it)
self.assertEqual(fp.readline(), b'hi\r\n')
self.assertEqual(fp.readline(), b'\r\n')
self.assertEqual(fp.readline(), b'jkl\r\n')
self.assertRaises(StopIteration, next, it)
def test_readline_with_tiny_chunks(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n'
b'\r\njkl\r\n\r\n--unique--'),
b'unique',
read_chunk_size=2)
fp = next(it)
self.assertEqual(fp.readline(), b'ab\r\n')
self.assertEqual(fp.readline(), b'cd\ref\ng')
self.assertEqual(fp.readline(), b'')
fp = next(it)
self.assertEqual(fp.readline(), b'hi\r\n')
self.assertEqual(fp.readline(), b'\r\n')
self.assertEqual(fp.readline(), b'jkl\r\n')
self.assertRaises(StopIteration, next, it)
class TestParseMimeHeaders(unittest.TestCase):
def test_parse_mime_headers(self):
doc_file = BytesIO(b"""Content-Disposition: form-data; name="file_size"
Foo: Bar
NOT-title-cAsED: quux
Connexion: =?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=
Status: =?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=
Latin-1: Resincronizaci\xf3n realizada con \xe9xito
Utf-8: \xd0\xba\xd0\xbe\xd0\xbd\xd1\x82\xd0\xb5\xd0\xb9\xd0\xbd\xd0\xb5\xd1\x80
This is the body
""")
headers = utils.parse_mime_headers(doc_file)
utf8 = u'\u043a\u043e\u043d\u0442\u0435\u0439\u043d\u0435\u0440'
if six.PY2:
utf8 = utf8.encode('utf-8')
expected_headers = {
'Content-Disposition': 'form-data; name="file_size"',
'Foo': "Bar",
'Not-Title-Cased': "quux",
# Encoded-word or non-ASCII values are treated just like any other
# bytestring (at least for now)
'Connexion': "=?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=",
'Status': "=?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=",
'Latin-1': "Resincronizaci\xf3n realizada con \xe9xito",
'Utf-8': utf8,
}
self.assertEqual(expected_headers, headers)
self.assertEqual(b"This is the body\n", doc_file.read())
class FakeResponse(object):
def __init__(self, status, headers, body):
self.status = status
self.headers = HeaderKeyDict(headers)
self.body = BytesIO(body)
def getheader(self, header_name):
return str(self.headers.get(header_name, ''))
def getheaders(self):
return self.headers.items()
def read(self, length=None):
return self.body.read(length)
def readline(self, length=None):
return self.body.readline(length)
class TestDocumentItersToHTTPResponseBody(unittest.TestCase):
def test_no_parts(self):
body = utils.document_iters_to_http_response_body(
iter([]), 'dontcare',
multipart=False, logger=FakeLogger())
self.assertEqual(body, '')
def test_single_part(self):
body = "time flies like an arrow; fruit flies like a banana"
doc_iters = [{'part_iter': iter(StringIO(body).read, '')}]
resp_body = ''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), 'dontcare',
multipart=False, logger=FakeLogger()))
self.assertEqual(resp_body, body)
def test_multiple_parts(self):
part1 = "two peanuts were walking down a railroad track"
part2 = "and one was a salted. ... peanut."
doc_iters = [{
'start_byte': 88,
'end_byte': 133,
'content_type': 'application/peanut',
'entity_length': 1024,
'part_iter': iter(StringIO(part1).read, ''),
}, {
'start_byte': 500,
'end_byte': 532,
'content_type': 'application/salted',
'entity_length': 1024,
'part_iter': iter(StringIO(part2).read, ''),
}]
resp_body = ''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), 'boundaryboundary',
multipart=True, logger=FakeLogger()))
self.assertEqual(resp_body, (
"--boundaryboundary\r\n" +
# This is a little too strict; we don't actually care that the
# headers are in this order, but the test is much more legible
# this way.
"Content-Type: application/peanut\r\n" +
"Content-Range: bytes 88-133/1024\r\n" +
"\r\n" +
part1 + "\r\n" +
"--boundaryboundary\r\n"
"Content-Type: application/salted\r\n" +
"Content-Range: bytes 500-532/1024\r\n" +
"\r\n" +
part2 + "\r\n" +
"--boundaryboundary--"))
def test_closed_part_iterator(self):
print('test')
useful_iter_mock = mock.MagicMock()
useful_iter_mock.__iter__.return_value = ['']
body_iter = utils.document_iters_to_http_response_body(
iter([{'part_iter': useful_iter_mock}]), 'dontcare',
multipart=False, logger=FakeLogger())
body = ''
for s in body_iter:
body += s
self.assertEqual(body, '')
useful_iter_mock.close.assert_called_once_with()
# Calling "close" on the mock will now raise an AttributeError
del useful_iter_mock.close
body_iter = utils.document_iters_to_http_response_body(
iter([{'part_iter': useful_iter_mock}]), 'dontcare',
multipart=False, logger=FakeLogger())
body = ''
for s in body_iter:
body += s
class TestPairs(unittest.TestCase):
def test_pairs(self):
items = [10, 20, 30, 40, 50, 60]
got_pairs = set(utils.pairs(items))
self.assertEqual(got_pairs,
set([(10, 20), (10, 30), (10, 40), (10, 50), (10, 60),
(20, 30), (20, 40), (20, 50), (20, 60),
(30, 40), (30, 50), (30, 60),
(40, 50), (40, 60),
(50, 60)]))
class TestSocketStringParser(unittest.TestCase):
def test_socket_string_parser(self):
default = 1337
addrs = [('1.2.3.4', '1.2.3.4', default),
('1.2.3.4:5000', '1.2.3.4', 5000),
('[dead:beef::1]', 'dead:beef::1', default),
('[dead:beef::1]:5000', 'dead:beef::1', 5000),
('example.com', 'example.com', default),
('example.com:5000', 'example.com', 5000),
('foo.1-2-3.bar.com:5000', 'foo.1-2-3.bar.com', 5000),
('1.2.3.4:10:20', None, None),
('dead:beef::1:5000', None, None)]
for addr, expected_host, expected_port in addrs:
if expected_host:
host, port = utils.parse_socket_string(addr, default)
self.assertEqual(expected_host, host)
self.assertEqual(expected_port, int(port))
else:
with self.assertRaises(ValueError):
utils.parse_socket_string(addr, default)
class TestHashForFileFunction(unittest.TestCase):
def setUp(self):
self.tempfilename = tempfile.mktemp()
def tearDown(self):
try:
os.unlink(self.tempfilename)
except OSError:
pass
def test_hash_for_file_smallish(self):
stub_data = b'some data'
with open(self.tempfilename, 'wb') as fd:
fd.write(stub_data)
with mock.patch('swift.common.utils.md5') as mock_md5:
mock_hasher = mock_md5.return_value
rv = utils.md5_hash_for_file(self.tempfilename)
self.assertTrue(mock_hasher.hexdigest.called)
self.assertEqual(rv, mock_hasher.hexdigest.return_value)
self.assertEqual([mock.call(stub_data)],
mock_hasher.update.call_args_list)
def test_hash_for_file_big(self):
num_blocks = 10
block_size = utils.MD5_BLOCK_READ_BYTES
truncate = 523
start_char = ord('a')
expected_blocks = [chr(i).encode('utf8') * block_size
for i in range(start_char, start_char + num_blocks)]
full_data = b''.join(expected_blocks)
trimmed_data = full_data[:-truncate]
# sanity
self.assertEqual(len(trimmed_data), block_size * num_blocks - truncate)
with open(self.tempfilename, 'wb') as fd:
fd.write(trimmed_data)
with mock.patch('swift.common.utils.md5') as mock_md5:
mock_hasher = mock_md5.return_value
rv = utils.md5_hash_for_file(self.tempfilename)
self.assertTrue(mock_hasher.hexdigest.called)
self.assertEqual(rv, mock_hasher.hexdigest.return_value)
self.assertEqual(num_blocks, len(mock_hasher.update.call_args_list))
found_blocks = []
for i, (expected_block, call) in enumerate(zip(
expected_blocks, mock_hasher.update.call_args_list)):
args, kwargs = call
self.assertEqual(kwargs, {})
self.assertEqual(1, len(args))
block = args[0]
if i < num_blocks - 1:
self.assertEqual(block, expected_block)
else:
self.assertEqual(block, expected_block[:-truncate])
found_blocks.append(block)
self.assertEqual(b''.join(found_blocks), trimmed_data)
def test_hash_for_file_empty(self):
with open(self.tempfilename, 'wb'):
pass
with mock.patch('swift.common.utils.md5') as mock_md5:
mock_hasher = mock_md5.return_value
rv = utils.md5_hash_for_file(self.tempfilename)
self.assertTrue(mock_hasher.hexdigest.called)
self.assertIs(rv, mock_hasher.hexdigest.return_value)
self.assertEqual([], mock_hasher.update.call_args_list)
def test_hash_for_file_brittle(self):
data_to_expected_hash = {
b'': 'd41d8cd98f00b204e9800998ecf8427e',
b'some data': '1e50210a0202497fb79bc38b6ade6c34',
(b'a' * 4096 * 10)[:-523]: '06a41551609656c85f14f659055dc6d3',
}
# unlike some other places where the concrete implementation really
# matters for backwards compatibility these brittle tests are probably
# not needed or justified, if a future maintainer rips them out later
# they're probably doing the right thing
failures = []
for stub_data, expected_hash in data_to_expected_hash.items():
with open(self.tempfilename, 'wb') as fd:
fd.write(stub_data)
rv = utils.md5_hash_for_file(self.tempfilename)
try:
self.assertEqual(expected_hash, rv)
except AssertionError:
trim_cap = 80
if len(stub_data) > trim_cap:
stub_data = '%s...<truncated>' % stub_data[:trim_cap]
failures.append('hash for %r was %s instead of expected %s' % (
stub_data, rv, expected_hash))
if failures:
self.fail('Some data did not compute expected hash:\n' +
'\n'.join(failures))
class TestFsHasFreeSpace(unittest.TestCase):
def test_bytes(self):
fake_result = posix.statvfs_result([
4096, # f_bsize
4096, # f_frsize
2854907, # f_blocks
1984802, # f_bfree (free blocks for root)
1728089, # f_bavail (free blocks for non-root)
1280000, # f_files
1266040, # f_ffree,
1266040, # f_favail,
4096, # f_flag
255, # f_namemax
])
with mock.patch('os.statvfs', return_value=fake_result):
self.assertTrue(utils.fs_has_free_space("/", 0, False))
self.assertTrue(utils.fs_has_free_space("/", 1, False))
# free space left = f_bavail * f_bsize = 7078252544
self.assertTrue(utils.fs_has_free_space("/", 7078252544, False))
self.assertFalse(utils.fs_has_free_space("/", 7078252545, False))
self.assertFalse(utils.fs_has_free_space("/", 2 ** 64, False))
def test_percent(self):
fake_result = posix.statvfs_result([
4096, # f_bsize
4096, # f_frsize
2854907, # f_blocks
1984802, # f_bfree (free blocks for root)
1728089, # f_bavail (free blocks for non-root)
1280000, # f_files
1266040, # f_ffree,
1266040, # f_favail,
4096, # f_flag
255, # f_namemax
])
with mock.patch('os.statvfs', return_value=fake_result):
self.assertTrue(utils.fs_has_free_space("/", 0, True))
self.assertTrue(utils.fs_has_free_space("/", 1, True))
# percentage of free space for the faked statvfs is 60%
self.assertTrue(utils.fs_has_free_space("/", 60, True))
self.assertFalse(utils.fs_has_free_space("/", 61, True))
self.assertFalse(utils.fs_has_free_space("/", 100, True))
self.assertFalse(utils.fs_has_free_space("/", 110, True))
class TestSetSwiftDir(unittest.TestCase):
def setUp(self):
self.swift_dir = tempfile.mkdtemp()
self.swift_conf = os.path.join(self.swift_dir, 'swift.conf')
self.policy_name = ''.join(random.sample(string.ascii_letters, 20))
with open(self.swift_conf, "wt") as sc:
sc.write('''
[swift-hash]
swift_hash_path_suffix = changeme
[storage-policy:0]
name = default
default = yes
[storage-policy:1]
name = %s
''' % self.policy_name)
def tearDown(self):
shutil.rmtree(self.swift_dir, ignore_errors=True)
def test_set_swift_dir(self):
set_swift_dir(None)
reload_storage_policies()
self.assertIsNone(POLICIES.get_by_name(self.policy_name))
set_swift_dir(self.swift_dir)
reload_storage_policies()
self.assertIsNotNone(POLICIES.get_by_name(self.policy_name))
class TestPipeMutex(unittest.TestCase):
def setUp(self):
self.mutex = utils.PipeMutex()
def tearDown(self):
self.mutex.close()
def test_nonblocking(self):
evt_lock1 = eventlet.event.Event()
evt_lock2 = eventlet.event.Event()
evt_unlock = eventlet.event.Event()
def get_the_lock():
self.mutex.acquire()
evt_lock1.send('got the lock')
evt_lock2.wait()
self.mutex.release()
evt_unlock.send('released the lock')
eventlet.spawn(get_the_lock)
evt_lock1.wait() # Now, the other greenthread has the lock.
self.assertFalse(self.mutex.acquire(blocking=False))
evt_lock2.send('please release the lock')
evt_unlock.wait() # The other greenthread has released the lock.
self.assertTrue(self.mutex.acquire(blocking=False))
def test_recursive(self):
self.assertTrue(self.mutex.acquire(blocking=False))
self.assertTrue(self.mutex.acquire(blocking=False))
def try_acquire_lock():
return self.mutex.acquire(blocking=False)
self.assertFalse(eventlet.spawn(try_acquire_lock).wait())
self.mutex.release()
self.assertFalse(eventlet.spawn(try_acquire_lock).wait())
self.mutex.release()
self.assertTrue(eventlet.spawn(try_acquire_lock).wait())
def test_release_without_acquire(self):
self.assertRaises(RuntimeError, self.mutex.release)
def test_too_many_releases(self):
self.mutex.acquire()
self.mutex.release()
self.assertRaises(RuntimeError, self.mutex.release)
def test_wrong_releaser(self):
self.mutex.acquire()
with quiet_eventlet_exceptions():
self.assertRaises(RuntimeError,
eventlet.spawn(self.mutex.release).wait)
def test_blocking(self):
evt = eventlet.event.Event()
sequence = []
def coro1():
eventlet.sleep(0) # let coro2 go
self.mutex.acquire()
sequence.append('coro1 acquire')
evt.send('go')
self.mutex.release()
sequence.append('coro1 release')
def coro2():
evt.wait() # wait for coro1 to start us
self.mutex.acquire()
sequence.append('coro2 acquire')
self.mutex.release()
sequence.append('coro2 release')
c1 = eventlet.spawn(coro1)
c2 = eventlet.spawn(coro2)
c1.wait()
c2.wait()
self.assertEqual(sequence, [
'coro1 acquire',
'coro1 release',
'coro2 acquire',
'coro2 release'])
def test_blocking_tpool(self):
# Note: this test's success isn't a guarantee that the mutex is
# working. However, this test's failure means that the mutex is
# definitely broken.
sequence = []
def do_stuff():
n = 10
while n > 0:
self.mutex.acquire()
sequence.append("<")
eventlet.sleep(0.0001)
sequence.append(">")
self.mutex.release()
n -= 1
greenthread1 = eventlet.spawn(do_stuff)
greenthread2 = eventlet.spawn(do_stuff)
real_thread1 = eventlet.patcher.original('threading').Thread(
target=do_stuff)
real_thread1.start()
real_thread2 = eventlet.patcher.original('threading').Thread(
target=do_stuff)
real_thread2.start()
greenthread1.wait()
greenthread2.wait()
real_thread1.join()
real_thread2.join()
self.assertEqual(''.join(sequence), "<>" * 40)
def test_blocking_preserves_ownership(self):
pthread1_event = eventlet.patcher.original('threading').Event()
pthread2_event1 = eventlet.patcher.original('threading').Event()
pthread2_event2 = eventlet.patcher.original('threading').Event()
thread_id = []
owner = []
def pthread1():
thread_id.append(id(eventlet.greenthread.getcurrent()))
self.mutex.acquire()
owner.append(self.mutex.owner)
pthread2_event1.set()
orig_os_write = utils.os.write
def patched_os_write(*a, **kw):
try:
return orig_os_write(*a, **kw)
finally:
pthread1_event.wait()
with mock.patch.object(utils.os, 'write', patched_os_write):
self.mutex.release()
pthread2_event2.set()
def pthread2():
pthread2_event1.wait() # ensure pthread1 acquires lock first
thread_id.append(id(eventlet.greenthread.getcurrent()))
self.mutex.acquire()
pthread1_event.set()
pthread2_event2.wait()
owner.append(self.mutex.owner)
self.mutex.release()
real_thread1 = eventlet.patcher.original('threading').Thread(
target=pthread1)
real_thread1.start()
real_thread2 = eventlet.patcher.original('threading').Thread(
target=pthread2)
real_thread2.start()
real_thread1.join()
real_thread2.join()
self.assertEqual(thread_id, owner)
self.assertIsNone(self.mutex.owner)
@classmethod
def tearDownClass(cls):
# PipeMutex turns this off when you instantiate one
eventlet.debug.hub_prevent_multiple_readers(True)
class TestDistributeEvenly(unittest.TestCase):
def test_evenly_divided(self):
out = utils.distribute_evenly(range(12), 3)
self.assertEqual(out, [
[0, 3, 6, 9],
[1, 4, 7, 10],
[2, 5, 8, 11],
])
out = utils.distribute_evenly(range(12), 4)
self.assertEqual(out, [
[0, 4, 8],
[1, 5, 9],
[2, 6, 10],
[3, 7, 11],
])
def test_uneven(self):
out = utils.distribute_evenly(range(11), 3)
self.assertEqual(out, [
[0, 3, 6, 9],
[1, 4, 7, 10],
[2, 5, 8],
])
def test_just_one(self):
out = utils.distribute_evenly(range(5), 1)
self.assertEqual(out, [[0, 1, 2, 3, 4]])
def test_more_buckets_than_items(self):
out = utils.distribute_evenly(range(5), 7)
self.assertEqual(out, [[0], [1], [2], [3], [4], [], []])
class TestShardRange(unittest.TestCase):
def setUp(self):
self.ts_iter = make_timestamp_iter()
def test_min_max_bounds(self):
# max
self.assertEqual(utils.ShardRange.MAX, utils.ShardRange.MAX)
self.assertFalse(utils.ShardRange.MAX > utils.ShardRange.MAX)
self.assertFalse(utils.ShardRange.MAX < utils.ShardRange.MAX)
for val in 'z', u'\u00e4':
self.assertFalse(utils.ShardRange.MAX == val)
self.assertFalse(val > utils.ShardRange.MAX)
self.assertTrue(val < utils.ShardRange.MAX)
self.assertTrue(utils.ShardRange.MAX > val)
self.assertFalse(utils.ShardRange.MAX < val)
self.assertEqual('', str(utils.ShardRange.MAX))
self.assertFalse(utils.ShardRange.MAX)
self.assertTrue(utils.ShardRange.MAX == utils.ShardRange.MAX)
self.assertFalse(utils.ShardRange.MAX != utils.ShardRange.MAX)
self.assertTrue(
utils.ShardRange.MaxBound() == utils.ShardRange.MaxBound())
self.assertFalse(
utils.ShardRange.MaxBound() != utils.ShardRange.MaxBound())
# min
self.assertEqual(utils.ShardRange.MIN, utils.ShardRange.MIN)
self.assertFalse(utils.ShardRange.MIN > utils.ShardRange.MIN)
self.assertFalse(utils.ShardRange.MIN < utils.ShardRange.MIN)
for val in 'z', u'\u00e4':
self.assertFalse(utils.ShardRange.MIN == val)
self.assertFalse(val < utils.ShardRange.MIN)
self.assertTrue(val > utils.ShardRange.MIN)
self.assertTrue(utils.ShardRange.MIN < val)
self.assertFalse(utils.ShardRange.MIN > val)
self.assertFalse(utils.ShardRange.MIN)
self.assertEqual('', str(utils.ShardRange.MIN))
self.assertFalse(utils.ShardRange.MIN)
self.assertTrue(utils.ShardRange.MIN == utils.ShardRange.MIN)
self.assertFalse(utils.ShardRange.MIN != utils.ShardRange.MIN)
self.assertTrue(
utils.ShardRange.MinBound() == utils.ShardRange.MinBound())
self.assertFalse(
utils.ShardRange.MinBound() != utils.ShardRange.MinBound())
self.assertFalse(utils.ShardRange.MAX == utils.ShardRange.MIN)
self.assertFalse(utils.ShardRange.MIN == utils.ShardRange.MAX)
self.assertTrue(utils.ShardRange.MAX != utils.ShardRange.MIN)
self.assertTrue(utils.ShardRange.MIN != utils.ShardRange.MAX)
self.assertEqual(utils.ShardRange.MAX,
max(utils.ShardRange.MIN, utils.ShardRange.MAX))
self.assertEqual(utils.ShardRange.MIN,
min(utils.ShardRange.MIN, utils.ShardRange.MAX))
def test_shard_range_initialisation(self):
def assert_initialisation_ok(params, expected):
pr = utils.ShardRange(**params)
self.assertDictEqual(dict(pr), expected)
def assert_initialisation_fails(params, err_type=ValueError):
with self.assertRaises(err_type):
utils.ShardRange(**params)
ts_1 = next(self.ts_iter)
ts_2 = next(self.ts_iter)
ts_3 = next(self.ts_iter)
ts_4 = next(self.ts_iter)
empty_run = dict(name=None, timestamp=None, lower=None,
upper=None, object_count=0, bytes_used=0,
meta_timestamp=None, deleted=0,
state=utils.ShardRange.FOUND, state_timestamp=None,
epoch=None)
# name, timestamp must be given
assert_initialisation_fails(empty_run.copy())
assert_initialisation_fails(dict(empty_run, name='a/c'), TypeError)
assert_initialisation_fails(dict(empty_run, timestamp=ts_1))
# name must be form a/c
assert_initialisation_fails(dict(empty_run, name='c', timestamp=ts_1))
assert_initialisation_fails(dict(empty_run, name='', timestamp=ts_1))
assert_initialisation_fails(dict(empty_run, name='/a/c',
timestamp=ts_1))
assert_initialisation_fails(dict(empty_run, name='/c',
timestamp=ts_1))
# lower, upper can be None
expect = dict(name='a/c', timestamp=ts_1.internal, lower='',
upper='', object_count=0, bytes_used=0,
meta_timestamp=ts_1.internal, deleted=0,
state=utils.ShardRange.FOUND,
state_timestamp=ts_1.internal, epoch=None)
assert_initialisation_ok(dict(empty_run, name='a/c', timestamp=ts_1),
expect)
assert_initialisation_ok(dict(name='a/c', timestamp=ts_1), expect)
good_run = dict(name='a/c', timestamp=ts_1, lower='l',
upper='u', object_count=2, bytes_used=10,
meta_timestamp=ts_2, deleted=0,
state=utils.ShardRange.CREATED,
state_timestamp=ts_3.internal, epoch=ts_4)
expect.update({'lower': 'l', 'upper': 'u', 'object_count': 2,
'bytes_used': 10, 'meta_timestamp': ts_2.internal,
'state': utils.ShardRange.CREATED,
'state_timestamp': ts_3.internal, 'epoch': ts_4})
assert_initialisation_ok(good_run.copy(), expect)
# obj count and bytes used as int strings
good_str_run = good_run.copy()
good_str_run.update({'object_count': '2', 'bytes_used': '10'})
assert_initialisation_ok(good_str_run, expect)
good_no_meta = good_run.copy()
good_no_meta.pop('meta_timestamp')
assert_initialisation_ok(good_no_meta,
dict(expect, meta_timestamp=ts_1.internal))
good_deleted = good_run.copy()
good_deleted['deleted'] = 1
assert_initialisation_ok(good_deleted,
dict(expect, deleted=1))
assert_initialisation_fails(dict(good_run, timestamp='water balloon'))
assert_initialisation_fails(
dict(good_run, meta_timestamp='water balloon'))
assert_initialisation_fails(dict(good_run, lower='water balloon'))
assert_initialisation_fails(dict(good_run, upper='balloon'))
assert_initialisation_fails(
dict(good_run, object_count='water balloon'))
assert_initialisation_fails(dict(good_run, bytes_used='water ballon'))
assert_initialisation_fails(dict(good_run, object_count=-1))
assert_initialisation_fails(dict(good_run, bytes_used=-1))
assert_initialisation_fails(dict(good_run, state=-1))
assert_initialisation_fails(dict(good_run, state_timestamp='not a ts'))
assert_initialisation_fails(dict(good_run, name='/a/c'))
assert_initialisation_fails(dict(good_run, name='/a/c/'))
assert_initialisation_fails(dict(good_run, name='a/c/'))
assert_initialisation_fails(dict(good_run, name='a'))
assert_initialisation_fails(dict(good_run, name=''))
def _check_to_from_dict(self, lower, upper):
ts_1 = next(self.ts_iter)
ts_2 = next(self.ts_iter)
ts_3 = next(self.ts_iter)
ts_4 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, lower, upper, 10, 100, ts_2,
state=None, state_timestamp=ts_3, epoch=ts_4)
sr_dict = dict(sr)
expected = {
'name': 'a/test', 'timestamp': ts_1.internal, 'lower': lower,
'upper': upper, 'object_count': 10, 'bytes_used': 100,
'meta_timestamp': ts_2.internal, 'deleted': 0,
'state': utils.ShardRange.FOUND, 'state_timestamp': ts_3.internal,
'epoch': ts_4}
self.assertEqual(expected, sr_dict)
self.assertIsInstance(sr_dict['lower'], six.string_types)
self.assertIsInstance(sr_dict['upper'], six.string_types)
sr_new = utils.ShardRange.from_dict(sr_dict)
self.assertEqual(sr, sr_new)
self.assertEqual(sr_dict, dict(sr_new))
sr_new = utils.ShardRange(**sr_dict)
self.assertEqual(sr, sr_new)
self.assertEqual(sr_dict, dict(sr_new))
for key in sr_dict:
bad_dict = dict(sr_dict)
bad_dict.pop(key)
with self.assertRaises(KeyError):
utils.ShardRange.from_dict(bad_dict)
# But __init__ still (generally) works!
if key not in ('name', 'timestamp'):
utils.ShardRange(**bad_dict)
else:
with self.assertRaises(TypeError):
utils.ShardRange(**bad_dict)
def test_to_from_dict(self):
self._check_to_from_dict('l', 'u')
self._check_to_from_dict('', '')
def test_timestamp_setter(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None)
self.assertEqual(ts_1, sr.timestamp)
ts_2 = next(self.ts_iter)
sr.timestamp = ts_2
self.assertEqual(ts_2, sr.timestamp)
sr.timestamp = 0
self.assertEqual(utils.Timestamp(0), sr.timestamp)
with self.assertRaises(TypeError):
sr.timestamp = None
def test_meta_timestamp_setter(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None)
self.assertEqual(ts_1, sr.timestamp)
self.assertEqual(ts_1, sr.meta_timestamp)
ts_2 = next(self.ts_iter)
sr.meta_timestamp = ts_2
self.assertEqual(ts_1, sr.timestamp)
self.assertEqual(ts_2, sr.meta_timestamp)
ts_3 = next(self.ts_iter)
sr.timestamp = ts_3
self.assertEqual(ts_3, sr.timestamp)
self.assertEqual(ts_2, sr.meta_timestamp)
# meta_timestamp defaults to tracking timestamp
sr.meta_timestamp = None
self.assertEqual(ts_3, sr.timestamp)
self.assertEqual(ts_3, sr.meta_timestamp)
ts_4 = next(self.ts_iter)
sr.timestamp = ts_4
self.assertEqual(ts_4, sr.timestamp)
self.assertEqual(ts_4, sr.meta_timestamp)
sr.meta_timestamp = 0
self.assertEqual(ts_4, sr.timestamp)
self.assertEqual(utils.Timestamp(0), sr.meta_timestamp)
def test_update_meta(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None)
with mock_timestamp_now(next(self.ts_iter)) as now:
sr.update_meta(9, 99)
self.assertEqual(9, sr.object_count)
self.assertEqual(99, sr.bytes_used)
self.assertEqual(now, sr.meta_timestamp)
with mock_timestamp_now(next(self.ts_iter)) as now:
sr.update_meta(99, 999, None)
self.assertEqual(99, sr.object_count)
self.assertEqual(999, sr.bytes_used)
self.assertEqual(now, sr.meta_timestamp)
ts_2 = next(self.ts_iter)
sr.update_meta(21, 2112, ts_2)
self.assertEqual(21, sr.object_count)
self.assertEqual(2112, sr.bytes_used)
self.assertEqual(ts_2, sr.meta_timestamp)
sr.update_meta('11', '12')
self.assertEqual(11, sr.object_count)
self.assertEqual(12, sr.bytes_used)
def check_bad_args(*args):
with self.assertRaises(ValueError):
sr.update_meta(*args)
check_bad_args('bad', 10)
check_bad_args(10, 'bad')
check_bad_args(10, 11, 'bad')
def test_increment_meta(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 1, 2, None)
with mock_timestamp_now(next(self.ts_iter)) as now:
sr.increment_meta(9, 99)
self.assertEqual(10, sr.object_count)
self.assertEqual(101, sr.bytes_used)
self.assertEqual(now, sr.meta_timestamp)
sr.increment_meta('11', '12')
self.assertEqual(21, sr.object_count)
self.assertEqual(113, sr.bytes_used)
def check_bad_args(*args):
with self.assertRaises(ValueError):
sr.increment_meta(*args)
check_bad_args('bad', 10)
check_bad_args(10, 'bad')
def test_state_timestamp_setter(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None)
self.assertEqual(ts_1, sr.timestamp)
self.assertEqual(ts_1, sr.state_timestamp)
ts_2 = next(self.ts_iter)
sr.state_timestamp = ts_2
self.assertEqual(ts_1, sr.timestamp)
self.assertEqual(ts_2, sr.state_timestamp)
ts_3 = next(self.ts_iter)
sr.timestamp = ts_3
self.assertEqual(ts_3, sr.timestamp)
self.assertEqual(ts_2, sr.state_timestamp)
# state_timestamp defaults to tracking timestamp
sr.state_timestamp = None
self.assertEqual(ts_3, sr.timestamp)
self.assertEqual(ts_3, sr.state_timestamp)
ts_4 = next(self.ts_iter)
sr.timestamp = ts_4
self.assertEqual(ts_4, sr.timestamp)
self.assertEqual(ts_4, sr.state_timestamp)
sr.state_timestamp = 0
self.assertEqual(ts_4, sr.timestamp)
self.assertEqual(utils.Timestamp(0), sr.state_timestamp)
def test_state_setter(self):
for state in utils.ShardRange.STATES:
for test_value in (state, str(state)):
sr = utils.ShardRange('a/test', next(self.ts_iter), 'l', 'u')
sr.state = test_value
actual = sr.state
self.assertEqual(
state, actual,
'Expected %s but got %s for %s' %
(state, actual, test_value)
)
for bad_state in (max(utils.ShardRange.STATES) + 1,
-1, 99, None, 'stringy', 1.1):
sr = utils.ShardRange('a/test', next(self.ts_iter), 'l', 'u')
with self.assertRaises(ValueError) as cm:
sr.state = bad_state
self.assertIn('Invalid state', str(cm.exception))
def test_update_state(self):
sr = utils.ShardRange('a/c', next(self.ts_iter))
old_sr = sr.copy()
self.assertEqual(utils.ShardRange.FOUND, sr.state)
self.assertEqual(dict(sr), dict(old_sr)) # sanity check
for state in utils.ShardRange.STATES:
if state == utils.ShardRange.FOUND:
continue
self.assertTrue(sr.update_state(state))
self.assertEqual(dict(old_sr, state=state), dict(sr))
self.assertFalse(sr.update_state(state))
self.assertEqual(dict(old_sr, state=state), dict(sr))
sr = utils.ShardRange('a/c', next(self.ts_iter))
old_sr = sr.copy()
for state in utils.ShardRange.STATES:
ts = next(self.ts_iter)
self.assertTrue(sr.update_state(state, state_timestamp=ts))
self.assertEqual(dict(old_sr, state=state, state_timestamp=ts),
dict(sr))
def test_resolve_state(self):
for name, number in utils.ShardRange.STATES_BY_NAME.items():
self.assertEqual(
(number, name), utils.ShardRange.resolve_state(name))
self.assertEqual(
(number, name), utils.ShardRange.resolve_state(name.upper()))
self.assertEqual(
(number, name), utils.ShardRange.resolve_state(name.title()))
self.assertEqual(
(number, name), utils.ShardRange.resolve_state(number))
def check_bad_value(value):
with self.assertRaises(ValueError) as cm:
utils.ShardRange.resolve_state(value)
self.assertIn('Invalid state %r' % value, str(cm.exception))
check_bad_value(min(utils.ShardRange.STATES) - 1)
check_bad_value(max(utils.ShardRange.STATES) + 1)
check_bad_value('badstate')
def test_epoch_setter(self):
sr = utils.ShardRange('a/c', next(self.ts_iter))
self.assertIsNone(sr.epoch)
ts = next(self.ts_iter)
sr.epoch = ts
self.assertEqual(ts, sr.epoch)
ts = next(self.ts_iter)
sr.epoch = ts.internal
self.assertEqual(ts, sr.epoch)
sr.epoch = None
self.assertIsNone(sr.epoch)
with self.assertRaises(ValueError):
sr.epoch = 'bad'
def test_deleted_setter(self):
sr = utils.ShardRange('a/c', next(self.ts_iter))
for val in (True, 1):
sr.deleted = val
self.assertIs(True, sr.deleted)
for val in (False, 0, None):
sr.deleted = val
self.assertIs(False, sr.deleted)
def test_set_deleted(self):
sr = utils.ShardRange('a/c', next(self.ts_iter))
# initialise other timestamps
sr.update_state(utils.ShardRange.ACTIVE,
state_timestamp=utils.Timestamp.now())
sr.update_meta(1, 2)
old_sr = sr.copy()
self.assertIs(False, sr.deleted) # sanity check
self.assertEqual(dict(sr), dict(old_sr)) # sanity check
with mock_timestamp_now(next(self.ts_iter)) as now:
self.assertTrue(sr.set_deleted())
self.assertEqual(now, sr.timestamp)
self.assertIs(True, sr.deleted)
old_sr_dict = dict(old_sr)
old_sr_dict.pop('deleted')
old_sr_dict.pop('timestamp')
sr_dict = dict(sr)
sr_dict.pop('deleted')
sr_dict.pop('timestamp')
self.assertEqual(old_sr_dict, sr_dict)
# no change
self.assertFalse(sr.set_deleted())
self.assertEqual(now, sr.timestamp)
self.assertIs(True, sr.deleted)
# force timestamp change
with mock_timestamp_now(next(self.ts_iter)) as now:
self.assertTrue(sr.set_deleted(timestamp=now))
self.assertEqual(now, sr.timestamp)
self.assertIs(True, sr.deleted)
def test_lower_setter(self):
sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', '')
# sanity checks
self.assertEqual('b', sr.lower)
self.assertEqual(sr.MAX, sr.upper)
def do_test(good_value, expected):
sr.lower = good_value
self.assertEqual(expected, sr.lower)
self.assertEqual(sr.MAX, sr.upper)
do_test(utils.ShardRange.MIN, utils.ShardRange.MIN)
do_test(utils.ShardRange.MAX, utils.ShardRange.MAX)
do_test('', utils.ShardRange.MIN)
do_test(u'', utils.ShardRange.MIN)
do_test(None, utils.ShardRange.MIN)
do_test('a', 'a')
do_test('y', 'y')
sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', 'y')
sr.lower = ''
self.assertEqual(sr.MIN, sr.lower)
sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', 'y')
with self.assertRaises(ValueError) as cm:
sr.lower = 'z'
self.assertIn("lower ('z') must be less than or equal to upper ('y')",
str(cm.exception))
self.assertEqual('b', sr.lower)
self.assertEqual('y', sr.upper)
def do_test(bad_value):
with self.assertRaises(TypeError) as cm:
sr.lower = bad_value
self.assertIn("lower must be a string", str(cm.exception))
self.assertEqual('b', sr.lower)
self.assertEqual('y', sr.upper)
do_test(1)
do_test(1.234)
def test_upper_setter(self):
sr = utils.ShardRange('a/c', utils.Timestamp.now(), '', 'y')
# sanity checks
self.assertEqual(sr.MIN, sr.lower)
self.assertEqual('y', sr.upper)
def do_test(good_value, expected):
sr.upper = good_value
self.assertEqual(expected, sr.upper)
self.assertEqual(sr.MIN, sr.lower)
do_test(utils.ShardRange.MIN, utils.ShardRange.MIN)
do_test(utils.ShardRange.MAX, utils.ShardRange.MAX)
do_test('', utils.ShardRange.MAX)
do_test(u'', utils.ShardRange.MAX)
do_test(None, utils.ShardRange.MAX)
do_test('z', 'z')
do_test('b', 'b')
sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', 'y')
sr.upper = ''
self.assertEqual(sr.MAX, sr.upper)
sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', 'y')
with self.assertRaises(ValueError) as cm:
sr.upper = 'a'
self.assertIn(
"upper ('a') must be greater than or equal to lower ('b')",
str(cm.exception))
self.assertEqual('b', sr.lower)
self.assertEqual('y', sr.upper)
def do_test(bad_value):
with self.assertRaises(TypeError) as cm:
sr.upper = bad_value
self.assertIn("upper must be a string", str(cm.exception))
self.assertEqual('b', sr.lower)
self.assertEqual('y', sr.upper)
do_test(1)
do_test(1.234)
def test_end_marker(self):
sr = utils.ShardRange('a/c', utils.Timestamp.now(), '', 'y')
self.assertEqual('y\x00', sr.end_marker)
sr = utils.ShardRange('a/c', utils.Timestamp.now(), '', '')
self.assertEqual('', sr.end_marker)
def test_bounds_serialization(self):
sr = utils.ShardRange('a/c', utils.Timestamp.now())
self.assertEqual('a/c', sr.name)
self.assertEqual(utils.ShardRange.MIN, sr.lower)
self.assertEqual('', sr.lower_str)
self.assertEqual(utils.ShardRange.MAX, sr.upper)
self.assertEqual('', sr.upper_str)
self.assertEqual('', sr.end_marker)
lower = u'\u00e4'
upper = u'\u00fb'
sr = utils.ShardRange('a/%s-%s' % (lower, upper),
utils.Timestamp.now(), lower, upper)
if six.PY3:
self.assertEqual(u'\u00e4', sr.lower)
self.assertEqual(u'\u00e4', sr.lower_str)
self.assertEqual(u'\u00fb', sr.upper)
self.assertEqual(u'\u00fb', sr.upper_str)
self.assertEqual(u'\u00fb\x00', sr.end_marker)
else:
self.assertEqual(u'\u00e4'.encode('utf8'), sr.lower)
self.assertEqual(u'\u00e4'.encode('utf8'), sr.lower_str)
self.assertEqual(u'\u00fb'.encode('utf8'), sr.upper)
self.assertEqual(u'\u00fb'.encode('utf8'), sr.upper_str)
self.assertEqual(u'\u00fb\x00'.encode('utf8'), sr.end_marker)
def test_entire_namespace(self):
# test entire range (no boundaries)
entire = utils.ShardRange('a/test', utils.Timestamp.now())
self.assertEqual(utils.ShardRange.MAX, entire.upper)
self.assertEqual(utils.ShardRange.MIN, entire.lower)
self.assertIs(True, entire.entire_namespace())
for x in range(100):
self.assertTrue(str(x) in entire)
self.assertTrue(chr(x) in entire)
for x in ('a', 'z', 'zzzz', '124fsdf', u'\u00e4'):
self.assertTrue(x in entire, '%r should be in %r' % (x, entire))
entire.lower = 'a'
self.assertIs(False, entire.entire_namespace())
def test_comparisons(self):
ts = utils.Timestamp.now().internal
# upper (if provided) *must* be greater than lower
with self.assertRaises(ValueError):
utils.ShardRange('f-a', ts, 'f', 'a')
# test basic boundaries
btoc = utils.ShardRange('a/b-c', ts, 'b', 'c')
atof = utils.ShardRange('a/a-f', ts, 'a', 'f')
ftol = utils.ShardRange('a/f-l', ts, 'f', 'l')
ltor = utils.ShardRange('a/l-r', ts, 'l', 'r')
rtoz = utils.ShardRange('a/r-z', ts, 'r', 'z')
lower = utils.ShardRange('a/lower', ts, '', 'mid')
upper = utils.ShardRange('a/upper', ts, 'mid', '')
entire = utils.ShardRange('a/test', utils.Timestamp.now())
# overlapping ranges
dtof = utils.ShardRange('a/d-f', ts, 'd', 'f')
dtom = utils.ShardRange('a/d-m', ts, 'd', 'm')
# test range > and <
# non-adjacent
self.assertFalse(rtoz < atof)
self.assertTrue(atof < ltor)
self.assertTrue(ltor > atof)
self.assertFalse(ftol > rtoz)
# adjacent
self.assertFalse(rtoz < ltor)
self.assertTrue(ltor < rtoz)
self.assertFalse(ltor > rtoz)
self.assertTrue(rtoz > ltor)
# wholly within
self.assertFalse(btoc < atof)
self.assertFalse(btoc > atof)
self.assertFalse(atof < btoc)
self.assertFalse(atof > btoc)
self.assertFalse(atof < dtof)
self.assertFalse(dtof > atof)
self.assertFalse(atof > dtof)
self.assertFalse(dtof < atof)
self.assertFalse(dtof < dtom)
self.assertFalse(dtof > dtom)
self.assertFalse(dtom > dtof)
self.assertFalse(dtom < dtof)
# overlaps
self.assertFalse(atof < dtom)
self.assertFalse(atof > dtom)
self.assertFalse(ltor > dtom)
# ranges including min/max bounds
self.assertTrue(upper > lower)
self.assertTrue(lower < upper)
self.assertFalse(upper < lower)
self.assertFalse(lower > upper)
self.assertFalse(lower < entire)
self.assertFalse(entire > lower)
self.assertFalse(lower > entire)
self.assertFalse(entire < lower)
self.assertFalse(upper < entire)
self.assertFalse(entire > upper)
self.assertFalse(upper > entire)
self.assertFalse(entire < upper)
self.assertFalse(entire < entire)
self.assertFalse(entire > entire)
# test range < and > to an item
# range is > lower and <= upper to lower boundary isn't
# actually included
self.assertTrue(ftol > 'f')
self.assertFalse(atof < 'f')
self.assertTrue(ltor < 'y')
self.assertFalse(ftol < 'f')
self.assertFalse(atof > 'f')
self.assertFalse(ltor > 'y')
self.assertTrue('f' < ftol)
self.assertFalse('f' > atof)
self.assertTrue('y' > ltor)
self.assertFalse('f' > ftol)
self.assertFalse('f' < atof)
self.assertFalse('y' < ltor)
# Now test ranges with only 1 boundary
start_to_l = utils.ShardRange('a/None-l', ts, '', 'l')
l_to_end = utils.ShardRange('a/l-None', ts, 'l', '')
for x in ('l', 'm', 'z', 'zzz1231sd'):
if x == 'l':
self.assertFalse(x in l_to_end)
self.assertFalse(start_to_l < x)
self.assertFalse(x > start_to_l)
else:
self.assertTrue(x in l_to_end)
self.assertTrue(start_to_l < x)
self.assertTrue(x > start_to_l)
# Now test some of the range to range checks with missing boundaries
self.assertFalse(atof < start_to_l)
self.assertFalse(start_to_l < entire)
# Now test ShardRange.overlaps(other)
self.assertTrue(atof.overlaps(atof))
self.assertFalse(atof.overlaps(ftol))
self.assertFalse(ftol.overlaps(atof))
self.assertTrue(atof.overlaps(dtof))
self.assertTrue(dtof.overlaps(atof))
self.assertFalse(dtof.overlaps(ftol))
self.assertTrue(dtom.overlaps(ftol))
self.assertTrue(ftol.overlaps(dtom))
self.assertFalse(start_to_l.overlaps(l_to_end))
def test_contains(self):
ts = utils.Timestamp.now().internal
lower = utils.ShardRange('a/-h', ts, '', 'h')
mid = utils.ShardRange('a/h-p', ts, 'h', 'p')
upper = utils.ShardRange('a/p-', ts, 'p', '')
entire = utils.ShardRange('a/all', ts, '', '')
self.assertTrue('a' in entire)
self.assertTrue('x' in entire)
# the empty string is not a valid object name, so it cannot be in any
# range
self.assertFalse('' in lower)
self.assertFalse('' in upper)
self.assertFalse('' in entire)
self.assertTrue('a' in lower)
self.assertTrue('h' in lower)
self.assertFalse('i' in lower)
self.assertFalse('h' in mid)
self.assertTrue('p' in mid)
self.assertFalse('p' in upper)
self.assertTrue('x' in upper)
self.assertIn(utils.ShardRange.MAX, entire)
self.assertNotIn(utils.ShardRange.MAX, lower)
self.assertIn(utils.ShardRange.MAX, upper)
# lower bound is excluded so MIN cannot be in any range.
self.assertNotIn(utils.ShardRange.MIN, entire)
self.assertNotIn(utils.ShardRange.MIN, upper)
self.assertNotIn(utils.ShardRange.MIN, lower)
def test_includes(self):
ts = utils.Timestamp.now().internal
_to_h = utils.ShardRange('a/-h', ts, '', 'h')
d_to_t = utils.ShardRange('a/d-t', ts, 'd', 't')
d_to_k = utils.ShardRange('a/d-k', ts, 'd', 'k')
e_to_l = utils.ShardRange('a/e-l', ts, 'e', 'l')
k_to_t = utils.ShardRange('a/k-t', ts, 'k', 't')
p_to_ = utils.ShardRange('a/p-', ts, 'p', '')
t_to_ = utils.ShardRange('a/t-', ts, 't', '')
entire = utils.ShardRange('a/all', ts, '', '')
self.assertTrue(entire.includes(entire))
self.assertTrue(d_to_t.includes(d_to_t))
self.assertTrue(_to_h.includes(_to_h))
self.assertTrue(p_to_.includes(p_to_))
self.assertTrue(entire.includes(_to_h))
self.assertTrue(entire.includes(d_to_t))
self.assertTrue(entire.includes(p_to_))
self.assertTrue(d_to_t.includes(d_to_k))
self.assertTrue(d_to_t.includes(e_to_l))
self.assertTrue(d_to_t.includes(k_to_t))
self.assertTrue(p_to_.includes(t_to_))
self.assertFalse(_to_h.includes(d_to_t))
self.assertFalse(p_to_.includes(d_to_t))
self.assertFalse(k_to_t.includes(d_to_k))
self.assertFalse(d_to_k.includes(e_to_l))
self.assertFalse(k_to_t.includes(e_to_l))
self.assertFalse(t_to_.includes(p_to_))
self.assertFalse(_to_h.includes(entire))
self.assertFalse(p_to_.includes(entire))
self.assertFalse(d_to_t.includes(entire))
def test_repr(self):
ts = next(self.ts_iter)
ts.offset = 1234
meta_ts = next(self.ts_iter)
state_ts = next(self.ts_iter)
sr = utils.ShardRange('a/c', ts, 'l', 'u', 100, 1000,
meta_timestamp=meta_ts,
state=utils.ShardRange.ACTIVE,
state_timestamp=state_ts)
self.assertEqual(
"ShardRange<'l' to 'u' as of %s, (100, 1000) as of %s, "
"active as of %s>"
% (ts.internal, meta_ts.internal, state_ts.internal), str(sr))
ts.offset = 0
meta_ts.offset = 2
state_ts.offset = 3
sr = utils.ShardRange('a/c', ts, '', '', 100, 1000,
meta_timestamp=meta_ts,
state=utils.ShardRange.FOUND,
state_timestamp=state_ts)
self.assertEqual(
"ShardRange<MinBound to MaxBound as of %s, (100, 1000) as of %s, "
"found as of %s>"
% (ts.internal, meta_ts.internal, state_ts.internal), str(sr))
def test_copy(self):
sr = utils.ShardRange('a/c', next(self.ts_iter), 'x', 'y', 99, 99000,
meta_timestamp=next(self.ts_iter),
state=utils.ShardRange.CREATED,
state_timestamp=next(self.ts_iter))
new = sr.copy()
self.assertEqual(dict(sr), dict(new))
new = sr.copy(deleted=1)
self.assertEqual(dict(sr, deleted=1), dict(new))
new_timestamp = next(self.ts_iter)
new = sr.copy(timestamp=new_timestamp)
self.assertEqual(dict(sr, timestamp=new_timestamp.internal,
meta_timestamp=new_timestamp.internal,
state_timestamp=new_timestamp.internal),
dict(new))
new = sr.copy(timestamp=new_timestamp, object_count=99)
self.assertEqual(dict(sr, timestamp=new_timestamp.internal,
meta_timestamp=new_timestamp.internal,
state_timestamp=new_timestamp.internal,
object_count=99),
dict(new))
def test_make_path(self):
ts = utils.Timestamp.now()
actual = utils.ShardRange.make_path('a', 'root', 'parent', ts, 0)
parent_hash = hashlib.md5(b'parent').hexdigest()
self.assertEqual('a/root-%s-%s-0' % (parent_hash, ts.internal), actual)
actual = utils.ShardRange.make_path('a', 'root', 'parent', ts, 3)
self.assertEqual('a/root-%s-%s-3' % (parent_hash, ts.internal), actual)
actual = utils.ShardRange.make_path('a', 'root', 'parent', ts, '3')
self.assertEqual('a/root-%s-%s-3' % (parent_hash, ts.internal), actual)
actual = utils.ShardRange.make_path(
'a', 'root', 'parent', ts.internal, '3')
self.assertEqual('a/root-%s-%s-3' % (parent_hash, ts.internal), actual)
actual = utils.ShardRange.make_path('a', 'root', 'parent', ts, 'foo')
self.assertEqual('a/root-%s-%s-foo' % (parent_hash, ts.internal),
actual)
@patch('ctypes.get_errno')
@patch.object(utils, '_sys_posix_fallocate')
@patch.object(utils, '_sys_fallocate')
@patch.object(utils, 'FALLOCATE_RESERVE', 0)
class TestFallocate(unittest.TestCase):
def test_fallocate(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = 0
utils.fallocate(1234, 5000 * 2 ** 20)
# We can't use sys_fallocate_mock.assert_called_once_with because no
# two ctypes.c_uint64 objects are equal even if their values are
# equal. Yes, ctypes.c_uint64(123) != ctypes.c_uint64(123).
calls = sys_fallocate_mock.mock_calls
self.assertEqual(len(calls), 1)
args = calls[0][1]
self.assertEqual(len(args), 4)
self.assertEqual(args[0], 1234)
self.assertEqual(args[1], utils.FALLOC_FL_KEEP_SIZE)
self.assertEqual(args[2].value, 0)
self.assertEqual(args[3].value, 5000 * 2 ** 20)
sys_posix_fallocate_mock.assert_not_called()
def test_fallocate_offset(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = 0
utils.fallocate(1234, 5000 * 2 ** 20, offset=3 * 2 ** 30)
calls = sys_fallocate_mock.mock_calls
self.assertEqual(len(calls), 1)
args = calls[0][1]
self.assertEqual(len(args), 4)
self.assertEqual(args[0], 1234)
self.assertEqual(args[1], utils.FALLOC_FL_KEEP_SIZE)
self.assertEqual(args[2].value, 3 * 2 ** 30)
self.assertEqual(args[3].value, 5000 * 2 ** 20)
sys_posix_fallocate_mock.assert_not_called()
def test_fallocate_fatal_error(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = -1
get_errno_mock.return_value = errno.EIO
with self.assertRaises(OSError) as cm:
utils.fallocate(1234, 5000 * 2 ** 20)
self.assertEqual(cm.exception.errno, errno.EIO)
def test_fallocate_silent_errors(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = -1
for silent_error in (0, errno.ENOSYS, errno.EOPNOTSUPP, errno.EINVAL):
get_errno_mock.return_value = silent_error
try:
utils.fallocate(1234, 5678)
except OSError:
self.fail("fallocate() raised an error on %d", silent_error)
def test_posix_fallocate_fallback(self, sys_fallocate_mock,
sys_posix_fallocate_mock,
get_errno_mock):
sys_fallocate_mock.available = False
sys_fallocate_mock.side_effect = NotImplementedError
sys_posix_fallocate_mock.available = True
sys_posix_fallocate_mock.return_value = 0
utils.fallocate(1234, 567890)
sys_fallocate_mock.assert_not_called()
calls = sys_posix_fallocate_mock.mock_calls
self.assertEqual(len(calls), 1)
args = calls[0][1]
self.assertEqual(len(args), 3)
self.assertEqual(args[0], 1234)
self.assertEqual(args[1].value, 0)
self.assertEqual(args[2].value, 567890)
def test_posix_fallocate_offset(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = False
sys_fallocate_mock.side_effect = NotImplementedError
sys_posix_fallocate_mock.available = True
sys_posix_fallocate_mock.return_value = 0
utils.fallocate(1234, 5000 * 2 ** 20, offset=3 * 2 ** 30)
calls = sys_posix_fallocate_mock.mock_calls
self.assertEqual(len(calls), 1)
args = calls[0][1]
self.assertEqual(len(args), 3)
self.assertEqual(args[0], 1234)
self.assertEqual(args[1].value, 3 * 2 ** 30)
self.assertEqual(args[2].value, 5000 * 2 ** 20)
sys_fallocate_mock.assert_not_called()
def test_no_fallocates_available(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = False
sys_posix_fallocate_mock.available = False
with mock.patch("logging.warning") as warning_mock, \
mock.patch.object(utils, "_fallocate_warned_about_missing",
False):
utils.fallocate(321, 654)
utils.fallocate(321, 654)
sys_fallocate_mock.assert_not_called()
sys_posix_fallocate_mock.assert_not_called()
get_errno_mock.assert_not_called()
self.assertEqual(len(warning_mock.mock_calls), 1)
def test_arg_bounds(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = 0
with self.assertRaises(ValueError):
utils.fallocate(0, 1 << 64, 0)
with self.assertRaises(ValueError):
utils.fallocate(0, 0, -1)
with self.assertRaises(ValueError):
utils.fallocate(0, 0, 1 << 64)
self.assertEqual([], sys_fallocate_mock.mock_calls)
# sanity check
utils.fallocate(0, 0, 0)
self.assertEqual(
[mock.call(0, utils.FALLOC_FL_KEEP_SIZE, mock.ANY, mock.ANY)],
sys_fallocate_mock.mock_calls)
# Go confirm the ctypes values separately; apparently == doesn't
# work the way you'd expect with ctypes :-/
self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 0)
@patch.object(os, 'fstatvfs')
@patch.object(utils, '_sys_fallocate', available=True, return_value=0)
@patch.object(utils, 'FALLOCATE_RESERVE', 0)
@patch.object(utils, 'FALLOCATE_IS_PERCENT', False)
@patch.object(utils, '_fallocate_enabled', True)
class TestFallocateReserve(unittest.TestCase):
def _statvfs_result(self, f_frsize, f_bavail):
# Only 3 values are relevant to us, so use zeros for the rest
f_blocks = 100
return posix.statvfs_result((0, f_frsize, f_blocks, 0, f_bavail,
0, 0, 0, 0, 0))
def test_disabled(self, sys_fallocate_mock, fstatvfs_mock):
utils.disable_fallocate()
utils.fallocate(123, 456)
sys_fallocate_mock.assert_not_called()
fstatvfs_mock.assert_not_called()
def test_zero_reserve(self, sys_fallocate_mock, fstatvfs_mock):
utils.fallocate(123, 456)
fstatvfs_mock.assert_not_called()
self.assertEqual(len(sys_fallocate_mock.mock_calls), 1)
def test_enough_space(self, sys_fallocate_mock, fstatvfs_mock):
# Want 1024 bytes in reserve plus 1023 allocated, and have 2 blocks
# of size 1024 free, so succeed
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
utils.fallocate(88, 1023)
def test_not_enough_space(self, sys_fallocate_mock, fstatvfs_mock):
# Want 1024 bytes in reserve plus 1024 allocated, and have 2 blocks
# of size 1024 free, so fail
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
with self.assertRaises(OSError) as catcher:
utils.fallocate(88, 1024)
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
% errno.ENOSPC)
sys_fallocate_mock.assert_not_called()
def test_not_enough_space_large(self, sys_fallocate_mock, fstatvfs_mock):
# Want 1024 bytes in reserve plus 1GB allocated, and have 2 blocks
# of size 1024 free, so fail
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
with self.assertRaises(OSError) as catcher:
utils.fallocate(88, 1 << 30)
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail %g <= 1024'
% (errno.ENOSPC, ((2 * 1024) - (1 << 30))))
sys_fallocate_mock.assert_not_called()
def test_enough_space_small_blocks(self, sys_fallocate_mock,
fstatvfs_mock):
# Want 1024 bytes in reserve plus 1023 allocated, and have 4 blocks
# of size 512 free, so succeed
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
fstatvfs_mock.return_value = self._statvfs_result(512, 4)
utils.fallocate(88, 1023)
def test_not_enough_space_small_blocks(self, sys_fallocate_mock,
fstatvfs_mock):
# Want 1024 bytes in reserve plus 1024 allocated, and have 4 blocks
# of size 512 free, so fail
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
fstatvfs_mock.return_value = self._statvfs_result(512, 4)
with self.assertRaises(OSError) as catcher:
utils.fallocate(88, 1024)
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
% errno.ENOSPC)
sys_fallocate_mock.assert_not_called()
def test_free_space_under_reserve(self, sys_fallocate_mock, fstatvfs_mock):
# Want 2048 bytes in reserve but have only 3 blocks of size 512, so
# allocating even 0 bytes fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2048')
fstatvfs_mock.return_value = self._statvfs_result(512, 3)
with self.assertRaises(OSError) as catcher:
utils.fallocate(88, 0)
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1536 <= 2048'
% errno.ENOSPC)
sys_fallocate_mock.assert_not_called()
def test_all_reserved(self, sys_fallocate_mock, fstatvfs_mock):
# Filesystem is empty, but our reserve is bigger than the
# filesystem, so any allocation will fail
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('9999999999999')
fstatvfs_mock.return_value = self._statvfs_result(1024, 100)
self.assertRaises(OSError, utils.fallocate, 88, 0)
sys_fallocate_mock.assert_not_called()
def test_enough_space_pct(self, sys_fallocate_mock, fstatvfs_mock):
# Want 1% reserved, filesystem has 3/100 blocks of size 1024 free
# and file size is 2047, so succeed
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1%')
fstatvfs_mock.return_value = self._statvfs_result(1024, 3)
utils.fallocate(88, 2047)
def test_not_enough_space_pct(self, sys_fallocate_mock, fstatvfs_mock):
# Want 1% reserved, filesystem has 3/100 blocks of size 1024 free
# and file size is 2048, so fail
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1%')
fstatvfs_mock.return_value = self._statvfs_result(1024, 3)
with self.assertRaises(OSError) as catcher:
utils.fallocate(88, 2048)
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1 <= 1'
% errno.ENOSPC)
sys_fallocate_mock.assert_not_called()
def test_all_space_reserved_pct(self, sys_fallocate_mock, fstatvfs_mock):
# Filesystem is empty, but our reserve is the whole filesystem, so
# any allocation will fail
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('100%')
fstatvfs_mock.return_value = self._statvfs_result(1024, 100)
with self.assertRaises(OSError) as catcher:
utils.fallocate(88, 0)
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 100 <= 100'
% errno.ENOSPC)
sys_fallocate_mock.assert_not_called()
@patch('ctypes.get_errno')
@patch.object(utils, '_sys_fallocate')
class TestPunchHole(unittest.TestCase):
def test_punch_hole(self, sys_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = 0
utils.punch_hole(123, 456, 789)
calls = sys_fallocate_mock.mock_calls
self.assertEqual(len(calls), 1)
args = calls[0][1]
self.assertEqual(len(args), 4)
self.assertEqual(args[0], 123)
self.assertEqual(
args[1], utils.FALLOC_FL_PUNCH_HOLE | utils.FALLOC_FL_KEEP_SIZE)
self.assertEqual(args[2].value, 456)
self.assertEqual(args[3].value, 789)
def test_error(self, sys_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = -1
get_errno_mock.return_value = errno.EISDIR
with self.assertRaises(OSError) as cm:
utils.punch_hole(123, 456, 789)
self.assertEqual(cm.exception.errno, errno.EISDIR)
def test_arg_bounds(self, sys_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = 0
with self.assertRaises(ValueError):
utils.punch_hole(0, 1, -1)
with self.assertRaises(ValueError):
utils.punch_hole(0, -1, 1)
with self.assertRaises(ValueError):
utils.punch_hole(0, 1, 0)
with self.assertRaises(ValueError):
utils.punch_hole(0, 1, 1 << 64)
self.assertEqual([], sys_fallocate_mock.mock_calls)
# sanity check
utils.punch_hole(0, 0, 1)
self.assertEqual(
[mock.call(
0, utils.FALLOC_FL_PUNCH_HOLE | utils.FALLOC_FL_KEEP_SIZE,
mock.ANY, mock.ANY)],
sys_fallocate_mock.mock_calls)
# Go confirm the ctypes values separately; apparently == doesn't
# work the way you'd expect with ctypes :-/
self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 1)
def test_no_fallocate(self, sys_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = False
with self.assertRaises(OSError) as cm:
utils.punch_hole(123, 456, 789)
self.assertEqual(cm.exception.errno, errno.ENOTSUP)
class TestPunchHoleReally(unittest.TestCase):
def setUp(self):
if not utils._sys_fallocate.available:
raise unittest.SkipTest("utils._sys_fallocate not available")
def test_punch_a_hole(self):
with TemporaryFile() as tf:
tf.write(b"x" * 64 + b"y" * 64 + b"z" * 64)
tf.flush()
# knock out the first half of the "y"s
utils.punch_hole(tf.fileno(), 64, 32)
tf.seek(0)
contents = tf.read(4096)
self.assertEqual(
contents,
b"x" * 64 + b"\0" * 32 + b"y" * 32 + b"z" * 64)
class Test_LibcWrapper(unittest.TestCase):
def test_available_function(self):
# This should pretty much always exist
getpid_wrapper = utils._LibcWrapper('getpid')
self.assertTrue(getpid_wrapper.available)
self.assertEqual(getpid_wrapper(), os.getpid())
def test_unavailable_function(self):
# This won't exist
no_func_wrapper = utils._LibcWrapper('diffractively_protectorship')
self.assertFalse(no_func_wrapper.available)
self.assertRaises(NotImplementedError, no_func_wrapper)
def test_argument_plumbing(self):
lseek_wrapper = utils._LibcWrapper('lseek')
with TemporaryFile() as tf:
tf.write(b"abcdefgh")
tf.flush()
lseek_wrapper(tf.fileno(),
ctypes.c_uint64(3),
# 0 is SEEK_SET
0)
self.assertEqual(tf.read(100), b"defgh")
| [] | [] | [
"TZ",
"HOME"
] | [] | ["TZ", "HOME"] | python | 2 | 0 | |
internal/gather/processor_backend_runner.go | /*-------------------------------------------------------------------------
*
* processor_backend_runner.go
* Polardb backend runner
*
*
* Copyright (c) 2021, Alibaba Group Holding Limited
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* IDENTIFICATION
* internal/gather/processor_backend_runner.go
*-------------------------------------------------------------------------
*/
package gather
import (
"fmt"
"net"
"strconv"
"github.com/ApsaraDB/PolarDB-NodeAgent/common/consts"
"github.com/ApsaraDB/PolarDB-NodeAgent/common/log"
)
var g_HostIP string
var PluginNameFormalizeMap map[string]string
var DatatypeFormalizeMap map[string]string
func (runner *Runner) buildBackendCtxMap(
backCtxMap map[string]interface{},
collectHeaderMap map[string]interface{},
collectContentMap interface{},
collectStartTime int64,
ins *Instance) {
backCtxMap["logical_ins_name"] = ins.env["logical_ins_name"]
backCtxMap["physical_ins_name"] = ins.env["physical_ins_name"]
backCtxMap["backend_hostname"] = collectHeaderMap[consts.SchemaHeaderHostname].(string)
backCtxMap["backend_host"] = getHostIP()
backCtxMap["backend_port"] = collectHeaderMap[consts.SchemaHeaderPort].(string)
backCtxMap["time"] = collectStartTime
// normalization for plugin name
if name, ok := PluginNameFormalizeMap[runner.pInfo.Name]; ok {
backCtxMap["plugin"] = name
} else {
backCtxMap["plugin"] = runner.pInfo.Name
}
if datatype, ok := DatatypeFormalizeMap[backCtxMap["plugin"].(string)]; ok {
backCtxMap["datatype"] = datatype
} else {
backCtxMap["datatype"] = backCtxMap["plugin"].(string)
}
backCtxMap["collect_type"] = runner.externConf.Type
backCtxMap["schema"] = runner.schema
}
func (runner *Runner) getBackCtxAndRunFunc(backend string,
backCtxesMap map[string]interface{},
backRunMap map[string]func(interface{}, interface{}) error) (map[string]interface{},
func(interface{}, interface{}) error, error) {
var backRun func(interface{}, interface{}) error
var backCtxMapI interface{}
var backCtxMap map[string]interface{}
var ok bool
if backCtxMapI, ok = backCtxesMap[backend]; !ok {
log.Warn("[multibackend_runner] processor context not found",
log.String("context", backend))
return nil, nil, fmt.Errorf("backend ctx not found %s", backend)
}
if backRun, ok = backRunMap[backend]; !ok {
log.Warn("[multibackend_runner] processor not found",
log.String("processor", backend))
return nil, nil, fmt.Errorf("backend func not found %s", backend)
}
if backCtxMap, ok = backCtxMapI.(map[string]interface{}); !ok {
log.Warn("[multibackend_runner] backend ctx type not map[string]interface{}",
log.String("context", backend))
return nil, nil, fmt.Errorf("backend %s ctx type not map[string]interface{}", backend)
}
return backCtxMap, backRun, nil
}
func (runner *Runner) runProcessorBackends(
backCtxesMap map[string]interface{},
backRunMap map[string]func(interface{}, interface{}) error,
collectHeaderMap map[string]interface{},
collectContentMap interface{},
collectStartTime int64,
ins *Instance) {
processContent := make(map[string]interface{})
processContent["in"] = collectContentMap
// 1. processor
if runner.pInfo.Processor != "" {
backCtxMap, backRun, err := runner.getBackCtxAndRunFunc(runner.pInfo.Processor,
backCtxesMap, backRunMap)
if err != nil {
log.Warn("[multibackend_runner] panic processor fail.")
return
}
backCtxMap["processor"] = backCtxMap["backend"]
runner.buildBackendCtxMap(backCtxMap, collectHeaderMap, collectContentMap,
collectStartTime, ins)
err = backRun(backCtxMap, processContent)
if err != nil {
log.Error("[multibackend_runner] panic processor fail.")
return
}
}
// 2. multibackend
backendIdxMap := make(map[string]int)
for _, backend := range runner.pInfo.ProcessorBackends {
backCtxMap, backRun, err := runner.getBackCtxAndRunFunc(backend, backCtxesMap, backRunMap)
if err != nil {
log.Warn("[multibackend_runner] panic backend fail.")
return
}
if idx, iok := backendIdxMap[backend]; iok {
backCtxMap["backend_idx"] = idx + 1
backendIdxMap[backend] = idx + 1
} else {
backCtxMap["backend_idx"] = 0
backendIdxMap[backend] = 0
}
if separator, ok := runner.externConf.Context["line_separator"].(string); ok {
backCtxMap["line_separator"] = separator
}
runner.buildBackendCtxMap(backCtxMap, collectHeaderMap, collectContentMap,
collectStartTime, ins)
if runner.pInfo.Processor != "" {
err = backRun(backCtxMap, processContent["out"])
if err != nil {
log.Error("[multibackend_runner] panic backend fail",
log.String("module", ins.pInfo.Name),
log.String("backend", backend),
log.Int("port", ins.port),
log.String("ctx", fmt.Sprintf("%+v", backCtxMap)),
log.String("err", err.Error()))
}
} else {
err = backRun(backCtxMap, collectContentMap)
if err != nil {
log.Error("[multibackend_runner] panic backend fail without processor",
log.String("module", ins.pInfo.Name),
log.String("backend", backend),
log.Int("port", ins.port),
log.String("ctx", fmt.Sprintf("%+v", backCtxMap)),
log.String("err", err.Error()))
}
}
}
}
func (runner *Runner) exitProcessorBackends(initCtx interface{}, ins *Instance) error {
backCtxMap := make(map[string]interface{})
backendIdxMap := make(map[string]int)
// normalization for plugin name
if name, ok := PluginNameFormalizeMap[runner.pInfo.Name]; ok {
backCtxMap["plugin"] = name
} else {
backCtxMap["plugin"] = runner.pInfo.Name
}
if datatype, ok := DatatypeFormalizeMap[backCtxMap["plugin"].(string)]; ok {
backCtxMap["datatype"] = datatype
} else {
backCtxMap["datatype"] = backCtxMap["plugin"].(string)
}
backCtxMap["logical_ins_name"] = ins.env["logical_ins_name"]
backCtxMap["physical_ins_name"] = ins.env["physical_ins_name"]
backCtxMap["backend_port"] = strconv.Itoa(ins.port)
for _, backend := range runner.pInfo.ProcessorBackends {
module, ok := runner.mInfoMap.Load(backend)
if !ok {
err := fmt.Errorf("backend:%s module not found,plugin:%s", backend, ins.pInfo.Name)
log.Error("[multibackend_runner] load backend failed", log.String("error", err.Error()))
return err
}
backendModule, ok := module.(*ModuleInfo)
if !ok {
err := fmt.Errorf("backend:%s module not match,plugin:%s", backend, ins.pInfo.Name)
log.Error("[multibackend_runner] module not match", log.String("error", err.Error()))
return err
}
ctx, ok := backendModule.Contexts.Load(backend)
if !ok {
err := fmt.Errorf("backend:%s ctx not found,plugin:%s", backend, ins.pInfo.Name)
log.Error("[multibackend_runner] ctx not found", log.String("error", err.Error()))
return err
}
backCtxMap["backend"] = ctx
if idx, iok := backendIdxMap[backend]; iok {
backCtxMap["backend_idx"] = idx + 1
backendIdxMap[backend] = idx + 1
} else {
backCtxMap["backend_idx"] = 0
backendIdxMap[backend] = 0
}
backExit := backendModule.PluginABI.Exit
err := backExit(backCtxMap)
if err != nil {
log.Error("[runner] backend exit failed.",
log.String("module", ins.pInfo.Name),
log.String("backend", backend),
log.Int("port", ins.port),
log.String("ctx", fmt.Sprintf("%+v", ctx)),
log.String("err", err.Error()))
}
}
return nil
}
func getHostIP() string {
if g_HostIP != "" {
return g_HostIP
}
addrs, err := net.InterfaceAddrs()
if err != nil {
return ""
}
for _, address := range addrs {
// check the address type and if it is not a loopback the display it
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
g_HostIP = ipnet.IP.String()
return g_HostIP
}
}
}
return ""
}
func init() {
PluginNameFormalizeMap = map[string]string{
"golang-collector-polardb_pg": "polardb_pg_collector",
"golang-collector-polardb_pg_k8s": "polardb_pg_collector",
"golang-collector-polardb_pg_opensource_refactor": "polardb_pg_collector",
"golang-collector-polarbox_oracle_perf": "polardb_pg_collector",
"golang-collector-polardb_pg_multidimension": "polardb_pg_multidimension_collector",
"golang-collector-polardb_pg_multidimension_k8s": "polardb_pg_multidimension_collector",
"golang-collector-polarbox-maxscale-perf": "maxscale_perf",
"golang-collector-sar": "sar",
"golang-collector-cluster-manager-eventlog": "cluster_manager_eventlog",
"golang-collector-polardb_pg_errorlog": "polardb_pg_errlog",
"golang-collector-polarbox_oracle_errorlog": "polardb_pg_errlog",
}
DatatypeFormalizeMap = map[string]string{
"polardb_pg_collector": "polardb-o",
"polardb_pg_multidimension_collector": "polardb-o",
"maxscale_perf": "maxscale",
"sar": "host",
"cluster_manager_eventlog": "cluster_manager",
"polardb_pg_errlog": "polardb-o",
}
}
| [] | [] | [] | [] | [] | go | null | null | null |
actionkit_templates/aktemplates.py | #!/usr/bin/env python
import os
import sys
#STATIC_FALLBACK="/static/js/fallback_local" PYTHONPATH=./djanger django-admin runserver --settings=settings
def serve_templates():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "actionkit_templates.settings")
sys.path.append(os.getcwd()) # run `aktemplate test` from the same dir where tests live
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
if __name__ == "__main__":
serve_templates()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
main.go | package main
import (
"bufio"
"fmt"
"os"
"os/exec"
"os/user"
"regexp"
"strings"
)
var Host, HomeDir, User string
var HomeDirRegex *regexp.Regexp
var scanner = bufio.NewScanner(os.Stdin)
func getCommandInput() []string {
var input string
if scanner.Scan() {
input = scanner.Text()
}
return strings.Split(input, " ")
}
var (
ANSIReset = "\033[0m"
ANSIRedBold = "\033[31;1m"
ANSIWhiteBold = "\033[97;1m"
ANSICyan = "\033[0;36m"
)
func printPrompt() {
workDir, err := os.Getwd()
if err != nil {
panic("Cannot get current working directory!")
}
var prompt rune
if os.Getuid() == 0 {
prompt = '#'
} else {
prompt = '$'
}
truncatedWorkDir := HomeDirRegex.ReplaceAllLiteralString(workDir, "~")
fmt.Printf("%s%s@%s%s:%s%s %s%c ", ANSIRedBold, User, Host, ANSIWhiteBold,
ANSICyan, truncatedWorkDir, ANSIReset, prompt)
}
func isBuiltin(command []string) bool {
if command[0] == "exit" {
os.Exit(0)
return true
} else if command[0] == "cd" {
if len(command) == 1 {
os.Chdir(HomeDir)
} else {
os.Chdir(command[1])
}
return true
}
return false
}
func main() {
HomeDir = os.Getenv("HOME")
HomeDirRegex = regexp.MustCompile(fmt.Sprintf("^%s", HomeDir))
user, err := user.Current()
if err != nil {
panic("Cannot get current system user")
}
User = user.Username
Host, _ = os.Hostname()
for {
printPrompt()
input := getCommandInput()
if len(input) > 0 && input[0] != "" {
var cmd *exec.Cmd
if len(input) == 1 {
cmd = exec.Command(input[0])
} else if len(input) > 1 {
cmd = exec.Command(input[0], input[1:]...)
}
if isBuiltin(input) {
continue
}
cmd.Stdout = os.Stdout
cmd.Stdin = os.Stdin
cmd.Stderr = os.Stderr
if err = cmd.Run(); err != nil {
fmt.Printf("hmsh: %s\n", err)
}
}
}
}
| [
"\"HOME\""
] | [] | [
"HOME"
] | [] | ["HOME"] | go | 1 | 0 | |
test/generator_test.py | import torch
import torch.nn as nn
import torch.utils.data
import h5py
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from skimage.transform import resize
import os
os.environ['CUDA_VISIBLE_DEVICES']='2'
# Hyper-parameters
latent_size = 5184
hidden_size = 1728
image_size = 216
num_epochs = 200
batch_size = 128
sample_dir = '/home/nhjeong/MLPGAN/db' # Directory of database
# Generator
G = nn.Sequential(
nn.Linear(latent_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, image_size))
# Device configuration
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:2" if use_cuda else "cpu")
G = torch.load('weight_sample.pkl')
class MyDataset(torch.utils.data.Dataset):
def __init__(self, train=True):
self.train = train
if self.train:
self.train_X_mat = h5py.File(os.path.join(sample_dir, 'db.mat'), 'r')
self.train_X_input = self.train_X_mat['db'][:]
self.train_Y_mat = h5py.File(os.path.join(sample_dir, 'gt.mat'), 'r')
self.train_Y_input = self.train_Y_mat['gt'][:]
self.train_X_mat.close()
self.train_Y_mat.close()
else:
self.test_X_mat = h5py.File(os.path.join(sample_dir, 'test_db.mat'), 'r')
self.test_X_input = self.test_X_mat['test_db'][:]
self.test_Y_mat = h5py.File(os.path.join(sample_dir, 'test_gt.mat'), 'r')
self.test_Y_input = self.test_Y_mat['test_gt'][:]
self.test_X_mat.close()
self.test_Y_mat.close()
def __len__(self):
if self.train:
return self.train_X_input.shape[0]
else:
return self.test_X_input.shape[0]
def __getitem__(self, index):
if self.train:
raw, target = self.train_X_input[index,], self.train_Y_input[index,]
else:
raw, target = self.test_X_input[index,], self.test_Y_input[index,]
return raw, target
testset = MyDataset(train=False)
output = G(torch.tensor(testset.test_X_input).to(device))
test_result = output.cpu().detach().numpy()
nrmse = []
for i in range(36):
tmp = testset.test_X_input[384*i:384*(i+1),0:2592] + 1j*testset.test_X_input[384*i:384*(i+1),2592:5184]
undersampled = np.zeros((384, 216))
for k in range(12):
undersampled += np.abs(tmp[:,k*216:(k+1)*216])
ans = testset.test_Y_input[384*i:384*(i+1),:]
pred = test_result[384*i:384*(i+1),:]
error = ans - pred
rmse = (np.sum(error ** 2) / np.sum(ans ** 2)) ** 0.5
plt.figure(figsize=[40, 10])
plt.subplot(1,4,1)
plt.imshow(resize(undersampled, (216, 216), preserve_range=True))
plt.title('Aliased image')
plt.axis('off')
plt.subplot(1,4,2)
plt.imshow(resize(pred, (216, 216), preserve_range=True))
plt.title('Predicted image')
plt.axis('off')
plt.subplot(1,4,3)
plt.imshow(resize(ans, (216, 216), preserve_range=True))
plt.title('Ground truth')
plt.axis('off')
plt.subplot(1,4,4)
plt.imshow(resize(np.abs(error), (216, 216), preserve_range=True), clim=[0,1])
plt.title('Difference')
plt.axis('off')
plt.savefig('test'+str(i+1))
plt.show()
nrmse.append(rmse)
print('Saved Fig. %d' % (i+1))
print('nRMSE: %.3lf %%' % (np.mean(nrmse)*100))
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
setup.py | from setuptools import setup, find_packages
import os
# Taken from setup.py in seaborn.
# temporarily redirect config directory to prevent matplotlib importing
# testing that for writeable directory which results in sandbox error in
# certain easy_install versions
os.environ["MPLCONFIGDIR"]="."
# Modified from from setup.py in seaborn.
try:
from setuptools import setup
_has_setuptools=True
except ImportError:
from distutils.core import setup
def check_dependencies():
to_install=[]
try:
import numpy
except ImportError:
to_install.append('numpy>=1.13.1')
try:
import scipy
except ImportError:
to_install.append('scipy>=0.19.1')
try:
import matplotlib
except ImportError:
to_install.append('matplotlib>=2.0.2')
try:
import pandas
if int(pandas.__version__.split('.')[1])<20:
to_install.append('pandas>=0.20.1')
except ImportError:
to_install.append('pandas>=0.20.1')
try:
import seaborn
except ImportError:
to_install.append('seaborn>0.8')
return to_install
if __name__=="__main__":
installs=check_dependencies()
setup(name='bootstrap_contrast',
author='Joses Ho',
author_email='[email protected]',
version=1.0,
description='Calculation and Visualization of Confidence Intervals and Effect Sizes for Python.',
packages=find_packages(),
install_requires=installs,
url='http://github.com/josesho/bootstrap_contrast',
license='MIT'
)
| [] | [] | [
"MPLCONFIGDIR"
] | [] | ["MPLCONFIGDIR"] | python | 1 | 0 | |
appdata_test.go | // Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcutil_test
import (
"os"
"os/user"
"path/filepath"
"runtime"
"testing"
"unicode"
"github.com/FactomProject/btcutil"
)
// TestAppDataDir tests the API for AppDataDir to ensure it gives expected
// results for various operating systems.
func TestAppDataDir(t *testing.T) {
// App name plus upper and lowercase variants.
appName := "myapp"
appNameUpper := string(unicode.ToUpper(rune(appName[0]))) + appName[1:]
appNameLower := string(unicode.ToLower(rune(appName[0]))) + appName[1:]
// When we're on Windows, set the expected local and roaming directories
// per the environment vars. When we aren't on Windows, the function
// should return the current directory when forced to provide the
// Windows path since the environment variables won't exist.
winLocal := "."
winRoaming := "."
if runtime.GOOS == "windows" {
localAppData := os.Getenv("LOCALAPPDATA")
roamingAppData := os.Getenv("APPDATA")
if localAppData == "" {
localAppData = roamingAppData
}
winLocal = filepath.Join(localAppData, appNameUpper)
winRoaming = filepath.Join(roamingAppData, appNameUpper)
}
// Get the home directory to use for testing expected results.
var homeDir string
usr, err := user.Current()
if err != nil {
t.Errorf("user.Current: %v", err)
return
}
homeDir = usr.HomeDir
// Mac app data directory.
macAppData := filepath.Join(homeDir, "Library", "Application Support")
tests := []struct {
goos string
appName string
roaming bool
want string
}{
// Various combinations of application name casing, leading
// period, operating system, and roaming flags.
{"windows", appNameLower, false, winLocal},
{"windows", appNameUpper, false, winLocal},
{"windows", "." + appNameLower, false, winLocal},
{"windows", "." + appNameUpper, false, winLocal},
{"windows", appNameLower, true, winRoaming},
{"windows", appNameUpper, true, winRoaming},
{"windows", "." + appNameLower, true, winRoaming},
{"windows", "." + appNameUpper, true, winRoaming},
{"linux", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"darwin", appNameLower, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", "." + appNameLower, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", "." + appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
{"openbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"plan9", appNameLower, false, filepath.Join(homeDir, appNameLower)},
{"plan9", appNameUpper, false, filepath.Join(homeDir, appNameLower)},
{"plan9", "." + appNameLower, false, filepath.Join(homeDir, appNameLower)},
{"plan9", "." + appNameUpper, false, filepath.Join(homeDir, appNameLower)},
{"unrecognized", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
// No application name provided, so expect current directory.
{"windows", "", false, "."},
{"windows", "", true, "."},
{"linux", "", false, "."},
{"darwin", "", false, "."},
{"openbsd", "", false, "."},
{"freebsd", "", false, "."},
{"netbsd", "", false, "."},
{"plan9", "", false, "."},
{"unrecognized", "", false, "."},
// Single dot provided for application name, so expect current
// directory.
{"windows", ".", false, "."},
{"windows", ".", true, "."},
{"linux", ".", false, "."},
{"darwin", ".", false, "."},
{"openbsd", ".", false, "."},
{"freebsd", ".", false, "."},
{"netbsd", ".", false, "."},
{"plan9", ".", false, "."},
{"unrecognized", ".", false, "."},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
ret := btcutil.TstAppDataDir(test.goos, test.appName, test.roaming)
if ret != test.want {
t.Errorf("appDataDir #%d (%s) does not match - "+
"expected got %s, want %s", i, test.goos, ret,
test.want)
continue
}
}
}
| [
"\"LOCALAPPDATA\"",
"\"APPDATA\""
] | [] | [
"APPDATA",
"LOCALAPPDATA"
] | [] | ["APPDATA", "LOCALAPPDATA"] | go | 2 | 0 | |
backend/zaka_28999/wsgi.py | """
WSGI config for zaka_28999 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zaka_28999.settings")
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
api/run.py | from flask import Flask, send_from_directory,send_file
from flask_restful import Resource, Api
from flask_cors import CORS
from flask_pymongo import PyMongo
from database.emailKeysDOM import makeUser, verifyKey, verifyUser
from database.FormsDOM import getForm
from generateKey import generateKey
import os
import json
import gridfs
from database import testDB, studentsDOM, usersDOM, FormsDOM, blankFormsDOM, parentsDOM, utilitiesDOM
from flask import jsonify, request, jsonify, _request_ctx_stack
import subprocess
from datetime import datetime
from bson.objectid import ObjectId
from jose import jwt
from functools import wraps
from six.moves.urllib.request import urlopen
import gridfs
import werkzeug
import requests
from werkzeug.utils import secure_filename
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
import cgi
import mimetypes
import io
UPLOAD_FOLDER = './upload'
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif', 'doc', 'docx'}
app = Flask(__name__)
CORS(app)
api = Api(app)
app.config['SENDGRID_API_KEY'] = os.environ.get('SENDGRID_API_KEY') #to be put in heroku
app.config['SENDGRID_DEFAULT_FROM'] = '[email protected]'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
MONGO_URL = os.environ.get('DB_URI')
app.config["MONGO_URI"] = MONGO_URL
mongo = PyMongo(app)
db = mongo.db
fs = gridfs.GridFS(db)
AUTH0_DOMAIN = os.environ.get('AUTH0_DOMAIN')
API_IDENTIFIER = os.environ.get('API_IDENTIFIER')
ALGORITHMS = ["RS256"]
# we store users here to prevent 429s from Auth0
tokensAndUsers = {}
# big thanks to https://auth0.com/docs/quickstart/backend/python/01-authorization?download=true
# Format error response and append status code.
class AuthError(Exception):
def __init__(self, error, status_code):
self.error = error
self.status_code = status_code
@app.errorhandler(AuthError)
def handle_auth_error(ex):
response = jsonify(ex.error)
response.status_code = ex.status_code
return response
def get_token_auth_header():
"""Obtains the access token from the Authorization Header
"""
auth = request.headers.get("Authorization", None)
if not auth:
raise AuthError({"code": "authorization_header_missing",
"description":
"Authorization header is expected"}, 401)
parts = auth.split()
if parts[0].lower() != "bearer":
raise AuthError({"code": "invalid_header",
"description":
"Authorization header must start with"
" Bearer"}, 401)
elif len(parts) == 1:
raise AuthError({"code": "invalid_header",
"description": "Token not found"}, 401)
elif len(parts) > 2:
raise AuthError({"code": "invalid_header",
"description":
"Authorization header must be"
" Bearer token"}, 401)
token = parts[1]
return token
def requires_scope(required_scope):
"""Determines if the required scope is present in the access token
Args:
required_scope (str): The scope required to access the resource
"""
token = get_token_auth_header()
unverified_claims = jwt.get_unverified_claims(token)
if unverified_claims.get("scope"):
token_scopes = unverified_claims["scope"].split()
for token_scope in token_scopes:
if token_scope == required_scope:
return True
return False
def requires_auth(f):
"""Determines if the access token is valid
"""
@wraps(f)
def decorated(*args, **kwargs):
token = get_token_auth_header()
jsonurl = urlopen("https://"+AUTH0_DOMAIN+"/.well-known/jwks.json")
jwks = json.loads(jsonurl.read())
try:
unverified_header = jwt.get_unverified_header(token)
except jwt.JWTError:
raise AuthError({"code": "invalid_header1",
"description":
"Invalid header. "
"Use an RS256 signed JWT Access Token"}, 401)
if unverified_header["alg"] == "HS256":
raise AuthError({"code": "invalid_header2",
"description":
"Invalid header. "
"Use an RS256 signed JWT Access Token"}, 401)
rsa_key = {}
for key in jwks["keys"]:
if key["kid"] == unverified_header["kid"]:
rsa_key = {
"kty": key["kty"],
"kid": key["kid"],
"use": key["use"],
"n": key["n"],
"e": key["e"]
}
if rsa_key:
try:
payload = jwt.decode(
token,
rsa_key,
algorithms=ALGORITHMS,
audience=API_IDENTIFIER,
issuer="https://"+AUTH0_DOMAIN+"/"
)
except jwt.ExpiredSignatureError:
raise AuthError({"code": "token_expired",
"description": "token is expired"}, 401)
except jwt.JWTClaimsError:
raise AuthError({"code": "invalid_claims",
"description":
"incorrect claims,"
" please check the audience and issuer"}, 401)
except Exception:
raise AuthError({"code": "invalid_header",
"description":
"Unable to parse authentication"
" token."}, 401)
_request_ctx_stack.top.current_user = payload
return f(*args, **kwargs)
raise AuthError({"code": "invalid_header",
"description": "Unable to find appropriate key"}, 401)
return decorated
'''~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~API~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'''
'''~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~PUBLIC~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'''
'''==================== PARENT/STUDENT DASHBOARDS ===================='''
@app.route('/getParentInfo', methods = ['POST'])
def getParentInfo():
curr_link = request.json['curr_link']
try:
parent_id = parentsDOM.get(currLink=curr_link)
except AssertionError as e:
raise AuthError({'wrong link': True}, 401)
if parentsDOM.isExpired(parent_id):
emailParent(parent_id,'', 'Your updated link is below:')
raise AuthError({'expired': True}, 426)
parentInfo = parentsDOM.getParentProfile(parent_id)
return {
'first_name': parentInfo['first_name'],
'last_name': parentInfo['last_name'],
'email': parentInfo['email'],
}
@app.route('/getStudentsOfParent', methods = ['GET', 'POST'])
def getStudentsOfParent():
curr_link = request.json['curr_link']
try:
parent_id = parentsDOM.get(currLink=curr_link)
except AssertionError as e:
raise AuthError({'wrong link': True}, 401)
if parentsDOM.isExpired(parent_id):
emailParent(parent_id,'', 'Your updated link is below:')
raise AuthError({'expired': True}, 426)
all_student_ids = parentsDOM.getStudentIds(parent_id)
unarchived_student_ids = []
for id in all_student_ids:
if not studentsDOM.isArchived(id):
unarchived_student_ids.append(id)
students = []
for id in unarchived_student_ids:
students.append({
'id': str(id),
'name': studentsDOM.getFirstName(id),
})
return {'students': students}
@app.route('/getStudentForms', methods = ['GET', 'POST'])
def getStudentForms():
student_id = ObjectId(request.json['student_id'])
curr_link = request.json['parent_key']
try:
parent_id = parentsDOM.get(currLink=curr_link)
except AssertionError as e:
raise AuthError({'wrong link': True}, 401)
if studentsDOM.isArchived(student_id):
raise AuthError({'archived': True}, 401)
if parentsDOM.isExpired(parent_id):
emailParent(parent_id,'', 'Your updated link is below:')
raise AuthError({'expired': True}, 426)
form_ids = studentsDOM.getAllFormIds(student_id)
form_data = []
for id in form_ids:
if parent_id == FormsDOM.getParentId(id):
blank_form_data = FormsDOM.getBlankFormId(id) # will assert if formid does not exist
form_data.append({
'form_id': str(id),
'form_name': FormsDOM.getFormName(id),
'last_updated': FormsDOM.getLastUpdated(id),
'last_viewed': FormsDOM.getLastViewed(id),
'completed': FormsDOM.isComplete(id)
})
return {
'form_data': form_data,
'student_info': studentsDOM.getBasicInfo(student_id),
}
@app.route('/getForm', methods=['GET', 'POST'])
def getForm():
curr_link = request.json['curr_link']
try:
parent_id = parentsDOM.get(currLink=curr_link)
except AssertionError as e:
raise AuthError({'wrong link': True}, 401)
if parentsDOM.isExpired(parent_id):
emailParent(parent_id,'', 'Your updated link is below:')
raise AuthError({'expired': True}, 426)
form_id = ObjectId(request.json['form_id'])
blank_form_id = FormsDOM.getBlankFormId(form_id)
blank_form_data = blankFormsDOM.getFormData(blank_form_id)
form_data = FormsDOM.getFormData(form_id)
return {
'blank_form_data': blank_form_data,
'form_data': form_data,
'submitted': form_data != [],
}
@app.route('/submitForm', methods = ['POST'])
def submitForm():
curr_link = request.json['curr_link']
try:
parent_id = parentsDOM.get(currLink=curr_link)
except AssertionError as e:
raise AuthError({'wrong link': True}, 401)
if parentsDOM.isExpired(parent_id):
emailParent(parent_id,'', 'Your updated link is below:')
raise AuthError({'expired': True}, 426)
form_id = request.json['form_id']
answer_data = request.json['answer_data']
FormsDOM.updateFormData(form_id, answer_data)
return '0'
'''~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~PRIVATE~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'''
def getUser(token):
# retrieve stored user
if token in tokensAndUsers.keys():
user_info = tokensAndUsers[token]
else:
endpoint = "https://" + AUTH0_DOMAIN + "/userinfo"
headers = {"Authorization": "Bearer " + get_token_auth_header()}
user_info = requests.post(endpoint, headers=headers)
if user_info.status_code == 200:
user_info = user_info.json()
# delete old key
if user_info in tokensAndUsers.values():
for key, value in tokensAndUsers.items():
if value == user_info:
to_delete = key
break
del tokensAndUsers[to_delete]
# add new key
tokensAndUsers[token] = user_info
else:
user_info = {}
user_info['nickname'] = 'error'
user_info['email'] = 'error'
user_info['http://role'] = 'error'
if len(tokensAndUsers.keys()) > 100:
tokensAndUsers.clear()
return user_info
def log_action(action):
def log_action_inner(f):
@wraps(f)
def decorated(*args, **kwargs):
user_info = getUser(get_token_auth_header())
usersDOM.createUser(user_info['nickname'], user_info['email'], [])
usersDOM.addAction(user_info['nickname'], datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"), action)
return f(*args, **kwargs)
return decorated
return log_action_inner
def specific_roles(roles):
def specific_roles_inner(f):
@wraps(f)
def decorated(*args, **kwargs):
if not isAuthorized(get_token_auth_header(), roles):
raise AuthError({"code": "unauthorized",
"description": "unauthorized to this endpoint"}, 403)
return f(*args, **kwargs)
return decorated
return specific_roles_inner
def isAuthorized(token, roles):
user_info = getUser(token)
return user_info['http://role'] in roles
@app.route('/students', methods = ['GET', 'POST'])
@requires_auth
@log_action('Get students')
def getStudents():
blankFormIds = list(map(lambda currForm: ObjectId(currForm['id']), request.json['blankForms']))
noBlankFormFilter = len(blankFormIds) == 0 # hoping caching will reduce complexity
students = studentsDOM.getStudents()
studentsWithForms = []
for student in students:
if noBlankFormFilter:
forms_completed = 0
for form in student['form_ids']:
if FormsDOM.isComplete(form):
forms_completed += 1
student['forms_completed'] = str(forms_completed) + "/" + str(len(student['form_ids']))
student['completion_rate'] = forms_completed / len(student['form_ids'])
del student['form_ids']
studentsWithForms.append(student)
else:
forms_completed = 0
forms_available = 0
for form in student['form_ids']:
currFormBlankFormId = FormsDOM.getBlankFormId(form)
if currFormBlankFormId in blankFormIds:
if (FormsDOM.isComplete(form)):
forms_completed += 1
forms_available += 1
if forms_available > 0:
student['forms_completed'] = str(forms_completed) + "/" + str(forms_available)
student['completion_rate'] = forms_completed / forms_available
del student['form_ids']
studentsWithForms.append(student)
return {
'students': studentsWithForms,
'authorized': isAuthorized(get_token_auth_header(), ['developer', 'admin']),
'forms': blankFormsDOM.getAll(),
}
def escape_html(text):
"""escape strings for display in HTML"""
return cgi.escape(text, quote=True).\
replace(u'\n', u'<br />').\
replace(u'\t', u' ').\
replace(u' ', u' ')
def makeNote(element):
element = escape_html(element)
return ' <div style="font-family: inherit; text-align: center"><span style="caret-color: rgb(255, 255, 255); color: #ffffff; font-family: arial; font-size: 16px; font-style: normal; font-variant-caps: normal; font-weight: normal; letter-spacing: normal; text-align: center; text-indent: 0px; text-transform: none; white-space: pre-wrap; word-spacing: 0px; -webkit-text-stroke-width: 0px; background-color: rgb(0, 104, 175); text-decoration: none; float: none; display: inline">' + element + '</span></div>'
def emailParent(parentId, comments=None, message=None):
generatedKey = generateKey()
# succeeded to insert into database
succeeded = parentsDOM.updateKey(parentId, generatedKey)
send_to = parentsDOM.getEmail(parentId)
print(comments)
print(message)
body = ''
if comments is not None and message is not None:
specialNote = True
for comment in comments:
if comment['comment'] != '':
if specialNote:
body = body + makeNote('Special Note:')
specialNote = False
body = body + makeNote(blankFormsDOM.getBlankFormName(ObjectId(comment['id'])) + ' -- ' + comment['comment'])
if message != '':
if specialNote:
body = body + makeNote('Special Note:')
body = body + makeNote(message)
#currently only sends the email if a new user could be made
if succeeded:
if os.environ.get('ENV') == 'dev':
base = 'http://localhost:3000/parentdash/'
else:
base = 'https://sfjaforms.herokuapp.com/parentdash/'
target = base + generatedKey
message = Mail(
from_email='[email protected]',
to_emails=send_to,
subject='South Florida Jewish Academy Forms',
html_content='<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"><html data-editor-version="2" class="sg-campaigns" xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1, maximum-scale=1"> <!--[if !mso]><!--> <meta http-equiv="X-UA-Compatible" content="IE=Edge"> <!--<![endif]--> <!--[if (gte mso 9)|(IE)]> <xml> <o:OfficeDocumentSettings> <o:AllowPNG/> <o:PixelsPerInch>96</o:PixelsPerInch> </o:OfficeDocumentSettings> </xml> <![endif]--> <!--[if (gte mso 9)|(IE)]> <style type="text/css"> body {width: 600px;margin: 0 auto;} table {border-collapse: collapse;} table, td {mso-table-lspace: 0pt;mso-table-rspace: 0pt;} img {-ms-interpolation-mode: bicubic;} </style><![endif]--> <style type="text/css"> body, p, div { font-family: arial; font-size: 16px; } body { color: #FFFFFF; } body a { color: #0068af; text-decoration: none; } p { margin: 0; padding: 0; } table.wrapper { width:100% !important; table-layout: fixed; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: 100%; -moz-text-size-adjust: 100%; -ms-text-size-adjust: 100%; } img.max-width { max-width: 100% !important; } .column.of-2 { width: 50%; } .column.of-3 { width: 33.333%; } .column.of-4 { width: 25%; } @media screen and (max-width:480px) { .preheader .rightColumnContent, .footer .rightColumnContent { text-align: left !important; } .preheader .rightColumnContent div, .preheader .rightColumnContent span, .footer .rightColumnContent div, .footer .rightColumnContent span { text-align: left !important; } .preheader .rightColumnContent, .preheader .leftColumnContent { font-size: 80% !important; padding: 5px 0; } table.wrapper-mobile { width: 100% !important; table-layout: fixed; } img.max-width { height: auto !important; max-width: 100% !important; } a.bulletproof-button { display: block !important; width: auto !important; font-size: 80%; padding-left: 0 !important; padding-right: 0 !important; } .columns { width: 100% !important; } .column { display: block !important; width: 100% !important; padding-left: 0 !important; padding-right: 0 !important; margin-left: 0 !important; margin-right: 0 !important; } .social-icon-column { display: inline-block !important; } } </style> <!--user entered Head Start--> <!--End Head user entered--> </head> <body> <center class="wrapper" data-link-color="#0068af" data-body-style="background-color:#f2f4fb; color:#FFFFFF; font-size:16px; font-family:arial;"> <div class="webkit"> <table cellpadding="0" cellspacing="0" border="0" width="100%" class="wrapper" bgcolor="#f2f4fb"> <tr> <td valign="top" bgcolor="#f2f4fb" width="100%"> <table width="100%" role="content-container" class="outer" align="center" cellpadding="0" cellspacing="0" border="0"> <tr> <td width="100%"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td> <!--[if mso]> <center> <table><tr><td width="600"> <![endif]--> <table width="100%" cellpadding="0" cellspacing="0" border="0" style="width:100%; max-width:600px;" align="center"> <tr> <td role="modules-container" style="padding:0px 0px 0px 0px; text-align:left; color:#FFFFFF;" bgcolor="#f2f4fb" width="100%" align="left"><table class="module preheader preheader-hide" role="module" data-type="preheader" border="0" cellpadding="0" cellspacing="0" width="100%" style="display: none !important; mso-hide: all; visibility: hidden; opacity: 0; color: transparent; height: 0; width: 0;"> <tr> <td role="module-content"> <p></p> </td> </tr> </table><table class="wrapper" role="module" data-type="image" border="0" cellpadding="0" cellspacing="0" width="100%" style="table-layout: fixed;" data-muid="98ndJyAY9BSGjoVqrr6FYx"> <tbody><tr> <td style="font-size:6px; line-height:10px; padding:30px 0px 30px 0px;" valign="top" align="left"> <img class="max-width" border="0" style="display:block; color:#000000; text-decoration:none; font-family:Helvetica, arial, sans-serif; font-size:16px; height:auto !important; max-width:20% !important; width:20%;" src="http://cdn.mcauto-images-production.sendgrid.net/4b5a02c40a9e98de/b2e01ba8-b54b-40a3-b380-739ae06826d3/115x122.jpg" alt="Off Grid Adventures" width="120" data-responsive="true" data-proportionally-constrained="false"> </td> </tr> </tbody></table><table class="module" role="module" data-type="text" border="0" cellpadding="0" cellspacing="0" width="100%" style="table-layout: fixed;" data-muid="7pyDCmyDaGcm5WsBBSaEgv" data-mc-module-version="2019-10-22"> <tbody><tr> <td style="line-height:22px; text-align:inherit; background-color:#0068af; padding:30px 0px 30px 0px;" height="100%" valign="top" bgcolor="#0068af"><div><div style="font-family: inherit; text-align: center">South Florida Jewish Academy</div><div style="font-family: inherit; text-align: center"><br></div><div style="font-family: inherit; text-align: center"><span style="font-size: 24px">You've got a form!</span></div><div style="font-family: inherit; text-align: center"><br></div><div style="font-family: inherit; text-align: center"><span style="caret-color: rgb(255, 255, 255); color: #ffffff; font-family: arial; font-size: 16px; font-style: normal; font-variant-caps: normal; font-weight: normal; letter-spacing: normal; text-align: center; text-indent: 0px; text-transform: none; white-space: pre-wrap; word-spacing: 0px; -webkit-text-stroke-width: 0px; background-color: rgb(0, 104, 175); text-decoration: none; float: none; display: inline">Please click on the below link to review and complete your students' forms.<br/>Please note that this link is unique and will expire in one week, when a new will be emailed to you.<br/>Do not share this link with anybody!</span></div>'+ body + '<div></div></div></td> </tr> </tbody></table><table border="0" cellpadding="0" cellspacing="0" class="module" data-role="module-button" data-type="button" role="module" style="table-layout:fixed" width="100%" data-muid="4ywPd9vJ6WFyV1Si75h9vh"><tbody><tr><td align="center" bgcolor="#0068af" class="outer-td" style="padding:10px 10px 10px 10px; background-color:#0068af;"><table border="0" cellpadding="0" cellspacing="0" class="button-css__deep-table___2OZyb wrapper-mobile" style="text-align:center"><tbody><tr><td align="center" bgcolor="#ffffff" class="inner-td" style="border-radius:6px; font-size:16px; text-align:center; background-color:inherit;"><a style="background-color:#ffffff; border:1px solid #ffffff; border-color:#ffffff; border-radius:3px; border-width:1px; display:inline-block; font-size:16px; font-weight:700; letter-spacing:1px; line-height:40px; padding:12px 20px 12px 20px; text-align:center; text-decoration:none; border-style:solid; color:#0068af;" href="' + target + '" target="_blank">View Forms</a></td></tr></tbody></table></td></tr></tbody></table><table class="module" role="module" data-type="divider" border="0" cellpadding="0" cellspacing="0" width="100%" style="table-layout: fixed;" data-muid="mVyZz43HETwfwb72TGh4iy"> <tbody><tr> <td style="padding:0px 0px 0px 0px;" role="module-content" height="100%" valign="top" bgcolor=""> <table border="0" cellpadding="0" cellspacing="0" align="center" width="100%" height="3px" style="line-height:3px; font-size:3px;"> <tbody><tr> <td style="padding:0px 0px 3px 0px;" bgcolor="#ffffff"></td> </tr> </tbody></table> </td> </tr> </tbody></table><div data-role="module-unsubscribe" class="module unsubscribe-css__unsubscribe___2CDlR" role="module" data-type="unsubscribe" style="color:#0068af; font-size:12px; line-height:20px; padding:16px 16px 16px 16px; text-align:center;" data-muid="txBUUpmixSjuZ5Ad69p1sX"><div class="Unsubscribe--addressLine"></div><p style="font-family:arial,helvetica,sans-serif; font-size:12px; line-height:20px;"><a target="_blank" class="Unsubscribe--unsubscribeLink zzzzzzz" href="{{{unsubscribe}}}" style=""></a></p></div></td> </tr> </table> <!--[if mso]> </td> </tr> </table> </center> <![endif]--> </td> </tr> </table> </td> </tr> </table> </td> </tr> </table> </div> </center> </body> </html>'
)
try:
sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
return []
except Exception as e:
print(e)
return [send_to]
'''==================== AUDITING ===================='''
@app.route('/users', methods = ['GET', 'POST'])
@requires_auth
@log_action('Get users')
def getUsers():
return {'users': usersDOM.getUsers()}
'''==================== STUDENT INFO ===================='''
@app.route('/studentProfile', methods = ['POST'])
@requires_auth
@log_action('Get student profile')
def getStudentProfile():
studentID = ObjectId(request.json['id'])
students_forms = studentsDOM.getForms(studentID)
forms = []
for formId in students_forms:
curr_form_data_raw = FormsDOM.getForm(formId)
formName = blankFormsDOM.getBlankFormName(curr_form_data_raw['blank_forms_id'])
formYear = blankFormsDOM.getFormYear(curr_form_data_raw['blank_forms_id'])
formTag = blankFormsDOM.getFormTag(curr_form_data_raw['blank_forms_id'])
curr_form_data = dict()
curr_form_data['form_name'] = str(formName)
curr_form_data['form_year'] = str(formYear)
curr_form_data['form_tag'] = str(formTag)
curr_form_data['form_id'] = str(curr_form_data_raw['_id'])
curr_form_data['blank_forms_id'] = str(curr_form_data_raw['blank_forms_id'])
curr_form_data['last_updated'] = curr_form_data_raw['last_updated']
curr_form_data['completed'] = FormsDOM.isComplete(formId)
parent_data = parentsDOM.getParentProfile(ObjectId(curr_form_data_raw['parent_id']))
curr_form_data['p_first_name'] = parent_data['first_name']
curr_form_data['p_last_name'] = parent_data['last_name']
curr_form_data['p_email'] = parent_data['email']
forms.append(curr_form_data)
parentIds = studentsDOM.getParents(studentID)
parents = []
for parentId in parentIds:
parent = parentsDOM.getParentProfile(parentId)
studentsOfParent = []
for currStudentID in parent['student_ids']:
student = studentsDOM.getFullInfo(currStudentID)
forms_completed = 0
for form in student['form_ids']:
if FormsDOM.isComplete(form):
forms_completed += 1
student['forms_completed'] = str(forms_completed) + "/" + str(len(student['form_ids']))
student['completion_rate'] = forms_completed / len(student['form_ids'])
cleanedStudent = {}
cleanedStudent['id'] = str(student['_id'])
cleanedStudent['first_name'] = student['first_name']
cleanedStudent['middle_name'] = student['middle_name']
cleanedStudent['last_name'] = student['last_name']
cleanedStudent['DOB'] = student['DOB']
cleanedStudent['grade'] = student['grade']
cleanedStudent['archived'] = student['archived']
cleanedStudent['forms_completed'] = student['forms_completed']
cleanedStudent['completion_rate'] = student['completion_rate']
studentsOfParent.append(cleanedStudent)
cleanedParent = {}
cleanedParent['id'] = str(parentId)
cleanedParent['children'] = studentsOfParent
cleanedParent['first_name'] = parent['first_name']
cleanedParent['last_name'] = parent['last_name']
cleanedParent['email'] = parent['email']
parents.append(cleanedParent)
return {
'forms': forms,
'basic_info': studentsDOM.getBasicInfo(studentID),
'blank_forms': blankFormsDOM.getAll(),
'parents': parents,
'authorized': isAuthorized(get_token_auth_header(), ['developer', 'admin']),
'tags': utilitiesDOM.getTags(),
'years': utilitiesDOM.getYears(),
}
@app.route('/studentProfileForm', methods = ['POST'])
@requires_auth
@log_action('Get student form')
def getStudentProfileForm():
studentID = ObjectId(request.json['student_id'])
form_id = ObjectId(request.json['form_id'])
form_data = FormsDOM.getFormData(ObjectId(form_id))
blank_form_id = FormsDOM.getBlankFormId(form_id)
blank_form_data = blankFormsDOM.getFormData(blank_form_id)
parent_id = FormsDOM.getInfo(form_id, 'parent_id')
parent_profile = parentsDOM.getParentProfile(parent_id)
cleaned_parent_profile = {}
cleaned_parent_profile['first_name'] = parent_profile['first_name']
cleaned_parent_profile['last_name'] = parent_profile['last_name']
cleaned_parent_profile['email'] = parent_profile['email']
form_info = {}
form_info['name'] = blankFormsDOM.getFormName(blank_form_id)
form_info['last_updated'] = FormsDOM.getLastUpdated(form_id)
form_info['completed'] = FormsDOM.isComplete(form_id)
isAuthorizedBool = isAuthorized(get_token_auth_header(), ['developer', 'admin'])
return {
'form_data': form_data,
'blank_form_data': blank_form_data,
'basic_info': studentsDOM.getBasicInfo(studentID),
'parent_profile': cleaned_parent_profile,
'form_info': form_info,
'isAuthorized': isAuthorizedBool,
}
@app.route('/resendForms', methods = ['POST'])
@requires_auth
@log_action('Resend forms')
def resendForms():
studentId = ObjectId(request.json['id'])
comments = request.json['comments']
message = request.json['message']
newBlankForms = map(lambda form: ObjectId(form['id']), filter(lambda form: form['checked'], request.json['forms']))
parentIds = resendForm(studentId, newBlankForms)
for parentId in parentIds:
emailParent(parentId, comments, message)
result = {'success': True}
return jsonify(result), 200
@app.route('/bulkResendEmails', methods=['POST'])
@requires_auth
@log_action('Bulk Resend Emails')
def bulkResendEmails():
blankFormIds = list(map(lambda form: ObjectId(form['id']), request.json['blankForms']))
students = list(map(lambda student: ObjectId(student), request.json['students']))
message = request.json['message']
uniqueParentIds = set()
for student in students:
for parentId in resendForm(student, blankFormIds):
uniqueParentIds.add(parentId)
# only email parents once
for parentId in uniqueParentIds:
emailParent(parentId, [], message)
return '0'
def resendForm(studentId, newBlankFormIds):
blankFormIds = map(lambda form: FormsDOM.getBlankFormId(form), studentsDOM.getForms(studentId))
uniqueBlankFormIds = set(blankFormIds)
parentIds = studentsDOM.getParents(studentId)
formIds = []
additionalBlankForms = []
for newBlankFormId in newBlankFormIds:
if newBlankFormId not in uniqueBlankFormIds:
for parentId in parentIds:
currID = FormsDOM.createForm(newBlankFormId, None, None, True, False, [], parentId)
formIds.append(currID)
additionalBlankForms.append(newBlankFormId)
for formId in formIds:
studentsDOM.addNewFormId(studentId, formId)
# return parentIds
return parentIds
'''==================== FORM MANAGEMENT ===================='''
@app.route('/getBlankFormDetails', methods=['GET'])
@requires_auth
@log_action('Get blank forms')
def getBlankFormDetails():
return { 'forms': blankFormsDOM.getBlankFormDetails()}
@app.route('/deleteBlankForm', methods=['POST'])
@requires_auth
@log_action('Delete blank form')
def deleteBlankForm():
id = request.json['form_id']
blankFormsDOM.deleteForm(ObjectId(id))
return '0'
@app.route('/updateFormName', methods=['POST'])
@requires_auth
@log_action('Update form name')
def updateFormName():
id = request.json['form_id']
form_name = request.json['form_name']
blankFormsDOM.updateFormName(ObjectId(id), form_name)
return '0'
@app.route('/updateFormYear', methods=['POST'])
@requires_auth
@log_action('Update form year')
def updateFormYear():
id = request.json['form_id']
form_year = request.json['form_year']
blankFormsDOM.updateFormYear(ObjectId(id), form_year)
utilitiesDOM.updateYears(form_year)
return '0'
@app.route('/updateFormTag', methods=['POST'])
@requires_auth
@log_action('Update form tag')
def updateFormTag():
id = request.json['form_id']
form_tag = request.json['form_tag']
blankFormsDOM.updateFormTag(ObjectId(id), form_tag)
utilitiesDOM.updateTags(form_tag)
return '0'
@app.route('/newform', methods = ['POST'])
@requires_auth
@log_action('Add form')
def addForm():
data = request.json['data']
form_name = request.json['formName']
form_year = request.json['formYear']
form_tag = request.json['formTag']
blankFormsDOM.createForm(form_name, form_year, form_tag, data)
utilitiesDOM.updateYears(form_year)
utilitiesDOM.updateTags(form_tag)
return '0'
@app.route('/forms', methods = ['GET', 'POST'])
@requires_auth
@log_action('Get forms')
def getForms():
return {'forms': FormsDOM.getForms()}
@app.route('/getBlankForm', methods = ['GET', 'POST'])
@requires_auth
@log_action('Get blank form')
def getBlankForm():
blankForm_id = ObjectId(request.json['form_id'])
return {
'data': blankFormsDOM.getFormData(blankForm_id),
'name': blankFormsDOM.getFormName(blankForm_id),
'year': blankFormsDOM.getFormYear(blankForm_id),
'tag': blankFormsDOM.getFormTag(blankForm_id),
}
@app.route('/changeStatus', methods = ['POST'])
@requires_auth
@log_action('Status of form changed')
def changeStatus():
form_id = ObjectId(request.json['form_id'])
status = request.json['form_status']
FormsDOM.changeCompletion(form_id,status)
newStatus = not status
return {'status': newStatus}
@app.route('/resetForm', methods = ['POST'])
@requires_auth
@log_action('Form Data Reset')
def resetForm():
form_id = ObjectId(request.json['form_id'])
newData = FormsDOM.clearForm(form_id)
newData['_id'] = str(newData['_id'])
newData['blank_forms_id'] = str(newData['blank_forms_id'])
newData['parent_id'] = str(newData['parent_id'])
return {'new_form_info':newData,}
'''====================== UPLOAD FILE ======================'''
@app.route('/saveImage', methods=['POST'])
@requires_auth
@log_action('Uploaded File')
def saveImg():
studentId = ObjectId(request.args.get('studentId'))
# check if the post request has the file part
if 'file' not in request.files:
return '0'
file = request.files['file']
# # if user does not select file, browser also
# # submit an empty part without filename
if file.filename == '':
return '0'
if file:
filename = secure_filename(file.filename)
file.save(filename)
with open(filename, "rb") as byteFile:
f = byteFile.read()
fileId = fs.put(f)
studentsDOM.addNewFile(studentId, fileId, filename)
files = studentsDOM.getFiles(studentId)
cleanFiles=[]
try:
os.remove(filename) # delete the file on the server after saved to mongo
except:
print(filename + ' not removable') # should not occur, but safety check
for file in files:
tempDict ={}
tempDict['file_id'] = str(file['fileId'])
tempDict['file_name'] = file['filename']
cleanFiles.append(tempDict)
return{'files': cleanFiles}
@app.route('/getFiles', methods=['POST'])
@requires_auth
@log_action('Get File')
def getFiles():
studentId = ObjectId(request.args.get('studentId'))
files = studentsDOM.getFiles(studentId)
cleanFiles=[]
for file in files:
tempDict = {}
tempDict['file_id'] = str(file['fileId'])
tempDict['file_name'] = file['filename']
cleanFiles.append(tempDict)
return{'files': cleanFiles}
@app.route('/downloadFile', methods=['POST'])
@requires_auth
@log_action('Downloaded File')
def downloadFile():
file_id = ObjectId(request.json['file_id'])
data = fs.get(file_id)
file_name = request.json['file_name']
file_type = mimetypes.MimeTypes().guess_type(str(file_name))[0]
fileBytes = data.read()
return send_file(io.BytesIO(fileBytes),
attachment_filename= file_name,
mimetype=file_type)
@app.route('/deleteFile', methods=['POST'])
@requires_auth
@log_action('Deleted File')
def deleteFile():
file_id = ObjectId(request.json['file_id'])
student_id = ObjectId(request.json['studentId'])
fs.delete(file_id)
files = studentsDOM.deleteFile(student_id,file_id)
cleanFiles=[]
for file in files:
tempDict = {}
tempDict['file_id'] = str(file['fileId'])
tempDict['file_name'] = file['filename']
cleanFiles.append(tempDict)
return{'files': cleanFiles}
@app.route('/renameFile', methods=['POST'])
@requires_auth
@log_action('Renamed File')
def renameFile():
file_id = ObjectId(request.json['file_id'])
student_id = ObjectId(request.json['studentId'])
new_file_name = request.json['newFileName']
studentsDOM.gridFile(file_id,new_file_name)
files = studentsDOM.renameFile(student_id,file_id,new_file_name)
cleanFiles=[]
for file in files:
tempDict = {}
tempDict['file_id'] = str(file['fileId'])
tempDict['file_name'] = file['filename']
cleanFiles.append(tempDict)
return{'files': cleanFiles}
'''====================== ADD STUDENT ======================'''
@app.route('/getAllForms', methods=['GET'])
@requires_auth
@log_action('Get all forms')
def getAllForms():
return { 'forms': blankFormsDOM.getAll()}
@app.route('/addStudent', methods = ['POST'])
@requires_auth
@log_action('Add student')
def addStudent():
student = request.json['studentData']
parentIds = []
parents = request.json['parentData']
for parent in parents:
currID = None
if parentsDOM.exists(parent['email']):
currID = parentsDOM.get(email=parent['email'])
else:
currID = parentsDOM.createParent(parent['firstName'], parent['lastName'], parent['email'])
parentIds.append(currID)
formIds = []
for form in request.json['forms']:
for parentId in parentIds:
id = form['id']
# createForm(id, date, required, comp, data, parentID):
currID = FormsDOM.createForm(ObjectId(id), None, None, True, False, [], parentId)
formIds.append(currID)
dateOfBirth = datetime.strptime(student['dob'], '%m/%d/%Y')
studentId = studentsDOM.createStudent(student['firstName'], student['middleName'], student['lastName'], dateOfBirth, int(student['grade']), formIds, parentIds, student['class'])
for parentId in parentIds:
parentsDOM.addStudentId(parentId, studentId)
# send emails
failed = []
for parentId in parentIds:
failed = failed + emailParent(parentId)
return {
'failed': failed,
}
'''====================== HIGHER ROLE ENDPOINTS ======================'''
@app.route('/checkRoleAdmin', methods = ['GET'])
@requires_auth
@log_action('check role admin')
def checkRoleAdmin():
isAuthorizedBool = isAuthorized(get_token_auth_header(), ['developer', 'admin'])
return {
'isAuthorized': isAuthorizedBool,
'numArchived': len(studentsDOM.getArchivedStudents()) if isAuthorizedBool else 0,
'cacheSize': len(tokensAndUsers) if isAuthorizedBool else 0,
}
@app.route('/archiveStudent', methods = ['POST'])
@requires_auth
@log_action('archive student')
@specific_roles(['admin', 'developer'])
def archiveStudent():
studentID = ObjectId(request.json['id'])
studentsDOM.updateInfo(studentID, 'archived', True)
return '0'
@app.route('/unarchiveStudent', methods = ['POST'])
@requires_auth
@log_action('unarchive student')
@specific_roles(['admin', 'developer'])
def unarchiveStudent():
studentID = ObjectId(request.json['id'])
studentsDOM.updateInfo(studentID, 'archived', False)
return '0'
@app.route('/changeGrades', methods = ['POST'])
@requires_auth
@log_action('change grades')
@specific_roles(['admin', 'developer'])
def changeGrades():
studentsDOM.changeGrades(int(request.json['difference']))
return '0'
@app.route('/dataDownload', methods = ['POST'])
@requires_auth
@log_action('data download')
@specific_roles(['admin', 'developer'])
def dataDownload():
toDownload = request.json['toDownload']
if toDownload == 'students':
subprocess.call('rm -f students.csv', shell=True)
subprocess.call('mongoexport --db sfja --collection students --type=csv --fields _id,first_name,middle_name,last_name,grade,DOB,archived,parent_ids,form_ids --out students.csv', shell=True)
return send_from_directory('.', 'students.csv', as_attachment=True)
elif toDownload == 'parents':
subprocess.call('rm -f parents.csv', shell=True)
subprocess.call('mongoexport --db sfja --collection parents --type=csv --fields _id,first_name,last_name,email,student_ids --out parents.csv', shell=True)
return send_from_directory('.', 'parents.csv', as_attachment=True)
elif toDownload == 'forms':
subprocess.call('rm -f forms.csv', shell=True)
subprocess.call('mongoexport --db sfja --collection forms --type=csv --fields _id,blank_forms_id,parent_id,last_updated,last_viewed,completed,form_data --out forms.csv', shell=True)
return send_from_directory('.', 'forms.csv', as_attachment=True)
elif toDownload == 'users':
subprocess.call('rm -f users.csv', shell=True)
subprocess.call('mongoexport --db sfja --collection users --type=csv --fields _id,user_id,email,actions --out users.csv', shell=True)
return send_from_directory('.', 'users.csv', as_attachment=True)
def deleteStudents(toDeleteStudents):
parentsToUpdate = set()
for student in toDeleteStudents:
studentsDOM.deleteStudent(student['_id'])
for formId in student['form_ids']:
parentsToUpdate.add((FormsDOM.getParentID(formId), student['_id']))
FormsDOM.deleteForm(formId)
for (parentId, studentId) in parentsToUpdate:
parentsDOM.removeStudentId(parentId, studentId)
@app.route('/deleteArchivedStudents', methods = ['POST'])
@requires_auth
@log_action('delete archived students')
@specific_roles(['admin', 'developer'])
def deleteArchivedStudents():
toDeleteStudents = studentsDOM.getArchivedStudents()
deleteStudents(toDeleteStudents)
return {
'numDeleted': len(toDeleteStudents)
}
@app.route('/deleteStudent', methods = ['POST'])
@requires_auth
@log_action('delete archived students')
@specific_roles(['admin', 'developer'])
def deleteStudent():
student = studentsDOM.getFullInfo(ObjectId(request.json['id']))
deleteStudents([student])
return {
'success': True,
}
@app.route('/clearLogins', methods = ['POST'])
@requires_auth
@log_action('clear logins')
@specific_roles(['admin', 'developer'])
def clearLogins():
tokensAndUsers.clear()
return {
'success': True,
}
@app.route('/submitFormAuth', methods = ['POST'])
@requires_auth
@log_action('Submit form')
@specific_roles(['admin', 'developer'])
def submitFormAuth():
form_id = request.json['form_id']
answer_data = request.json['answer_data']
FormsDOM.updateFormData(form_id, answer_data)
return '0'
@app.route('/studentProfileUpdate', methods = ['POST'])
@requires_auth
@log_action('Update Profile')
@specific_roles(['admin', 'developer'])
def studentProfileUpdate():
studentID = ObjectId(request.json['id'])
basicInfo = request.json['basicInfo']
parents = request.json['parents']
for key, value in basicInfo.items():
if key == '_id':
continue
if key == 'archived':
continue
if key == 'DOB':
value = datetime.strptime(basicInfo['DOB'], '%m/%d/%Y')
studentsDOM.updateInfo(studentID, key, value)
for parent in parents:
for key, value in parent.items():
parentID = ObjectId(parent['id'])
if key == 'children':
continue
if key == 'id':
continue
parentsDOM.updateInfoBasic(parentID, key, value)
return '0'
if __name__ == '__main__':
app.run(debug=True) | [] | [] | [
"API_IDENTIFIER",
"ENV",
"SENDGRID_API_KEY",
"DB_URI",
"AUTH0_DOMAIN"
] | [] | ["API_IDENTIFIER", "ENV", "SENDGRID_API_KEY", "DB_URI", "AUTH0_DOMAIN"] | python | 5 | 0 | |
cli/keyconjurer/credentials.go | package keyconjurer
import (
"fmt"
"os"
"time"
)
// Credentials are used to store and print out temporary AWS Credentials
// Note: verified that onelogin uses int as ID (so no leading 0's)
// ... but does mean we can have negative user ids
type Credentials struct {
accountId uint
AccessKeyID string `json:"AccessKeyId"`
SecretAccessKey string `json:"SecretAccessKey"`
SessionToken string `json:"SessionToken"`
Expiration string `json:"Expiration"`
}
func GetCredentials(u *UserData, accountName string, ttl uint) (*Credentials, error) {
// check if account is an account currently assigned
accountFound, err := u.FindAccount(accountName)
if err != nil {
Logger.Warnf("account <%s> is not currently cached by Key Conjurer; please login or have the account assigned to you\n", accountName)
return nil, err
}
// check if account asked for is in ENV
// and still valid
//
// on false always build new Credential
var credentials *Credentials
switch envCredsValid(accountFound, u.TimeRemaining) {
case true:
// use current creds
credentials = getCredentialsFromENV()
case false:
// generate new creds
var credsTTL uint
if ttl == 0 {
credsTTL = u.TTL
} else {
credsTTL = ttl
}
fmt.Fprintln(os.Stderr, "Sending Duo Push")
credentials, err = getCredentialsFromKeyConjurer(u.Creds, accountFound, credsTTL)
if err != nil {
Logger.Fatalln(err.Error())
return nil, err
}
}
credentials.accountId = accountFound.ID
return credentials, nil
}
// requests a set of temporary credentials for the requested AWS account and returns
// them via the inputed credentials
func getCredentialsFromKeyConjurer(encryptedAD string, account *Account, ttl uint) (*Credentials, error) {
Logger.Debugf("getting aws credentials using user creds <%v,%v,%v>", encryptedAD, account, ttl)
data := newKeyConjurerCredRequestJSON(Client, Version, "encrypted", encryptedAD, account.ID, ttl)
responseCredData := &Credentials{}
err := doKeyConjurerAPICall("/get_aws_creds", data, responseCredData)
if err != nil {
Logger.Debugln("error calling Key Conjurer /get_aws_creds api")
return nil, err
}
return responseCredData, nil
}
// load current ENV credentials is available
func getCredentialsFromENV() *Credentials {
return &Credentials{
AccessKeyID: os.Getenv("AWS_ACCESS_KEY_ID"),
SecretAccessKey: os.Getenv("AWS_SECRET_ACCESS_KEY"),
SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
Expiration: os.Getenv("AWSKEY_EXPIRATION"),
}
}
func envCredsValid(account *Account, minutesTimeWindow uint) bool {
// if no env var AWSKEY_ACCOUNT or they dont match
// generate new creds
currentAccount, ok := os.LookupEnv("AWSKEY_ACCOUNT")
if !ok || currentAccount != fmt.Sprint(account.ID) {
Logger.Infof("env creds invalid as account <%v> doesnt match AWSKEY_ACCOUNT <%v>\n", fmt.Sprint(account.ID), currentAccount)
return false
}
// if no env var AWSKEY_EXPIRATION
// generate new creds
currentExpiration, ok := os.LookupEnv("AWSKEY_EXPIRATION")
if !ok {
Logger.Warnln("env creds invalid as key expired")
return false
}
// if expiration cant be parsed
// generate new creds
expiration, err := time.Parse(time.RFC3339, currentExpiration)
if err != nil {
Logger.Warnf("env cerds invalid as timestamp <%v> is not RFC3339 compliant\n", currentExpiration)
Logger.Errorln(err)
return false
}
// use creds if they havent expired or generate new one is they have
// also take into account a time window in which the creds must still be valid
// example: the creds must still be valid in now + 5m
return expiration.After(time.Now().Add(time.Minute * time.Duration(minutesTimeWindow)))
}
/*
Help Funcs
*/
// PrintCredsForEnv detects the users shell and outputs the credentials for use
// as environment variables for said shell
func (c Credentials) PrintCredsForEnv() {
exportStatement := ""
switch getShellType() {
case "powershell":
exportStatement = `$Env:AWS_ACCESS_KEY_ID = "%v"
$Env:AWS_SECRET_ACCESS_KEY = "%v"
$Env:AWS_SESSION_TOKEN = "%v"
$Env:AWS_SECURITY_TOKEN = "%v"
$Env:TF_VAR_access_key = $Env:AWS_ACCESS_KEY_ID
$Env:TF_VAR_secret_key = $Env:AWS_SECRET_ACCESS_KEY
$Env:TF_VAR_token = $Env:AWS_SESSION_TOKEN
$Env:AWSKEY_EXPIRATION = "%v"
$Env:AWSKEY_ACCOUNT = "%v"
`
case "cmd":
exportStatement = `SET AWS_ACCESS_KEY_ID=%v
SET AWS_SECRET_ACCESS_KEY=%v
SET AWS_SESSION_TOKEN=%v
SET AWS_SECURITY_TOKEN=%v
SET TF_VAR_access_key=%%AWS_ACCESS_KEY_ID%%
SET TF_VAR_secret_key=%%AWS_SECRET_ACCESS_KEY%%
SET TF_VAR_token=%%AWS_SESSION_TOKEN%%
SET AWSKEY_EXPIRATION=%v
SET AWSKEY_ACCOUNT=%v
`
case "bash":
fallthrough
default:
exportStatement = `export AWS_ACCESS_KEY_ID=%v
export AWS_SECRET_ACCESS_KEY=%v
export AWS_SESSION_TOKEN=%v
export AWS_SECURITY_TOKEN=%v
export TF_VAR_access_key=$AWS_ACCESS_KEY_ID
export TF_VAR_secret_key=$AWS_SECRET_ACCESS_KEY
export TF_VAR_token=$AWS_SESSION_TOKEN
export AWSKEY_EXPIRATION=%v
export AWSKEY_ACCOUNT=%v
`
}
fmt.Printf(exportStatement, c.AccessKeyID, c.SecretAccessKey, c.SessionToken,
c.SessionToken, c.Expiration, c.accountId)
}
| [
"\"AWS_ACCESS_KEY_ID\"",
"\"AWS_SECRET_ACCESS_KEY\"",
"\"AWS_SESSION_TOKEN\"",
"\"AWSKEY_EXPIRATION\""
] | [] | [
"AWS_SESSION_TOKEN",
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"AWSKEY_EXPIRATION"
] | [] | ["AWS_SESSION_TOKEN", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWSKEY_EXPIRATION"] | go | 4 | 0 | |
jax/numpy/lax_numpy.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pytype: skip-file
"""
Implements the NumPy API, using the primitives in :mod:`jax.lax`.
NumPy operations are implemented in Python in terms of the primitive operations
in :mod:`jax.lax`. Since NumPy operations are not primitive and instead are
implemented in terms of :mod:`jax.lax` operations, we do not need to define
transformation rules such as gradient or batching rules. Instead,
transformations for NumPy primitives can be derived from the transformation
rules for the underlying :code:`lax` primitives.
"""
import builtins
import collections
import operator
import os
import types
from typing import Sequence, Set, Tuple, Union
import warnings
import numpy as np
import opt_einsum
from jax import jit, custom_jvp
from .vectorize import vectorize
from ._util import _wraps
from .. import core
from .. import dtypes
from ..abstract_arrays import UnshapedArray, ShapedArray, ConcreteArray, canonicalize_shape
from ..config import flags, config
from ..interpreters.xla import DeviceArray
from ..interpreters.masking import Poly
from .. import lax
from ..lax.lax import _device_put_raw
from .. import ops
from ..util import (partial, unzip2, prod as _prod,
subvals, safe_zip)
from ..tree_util import tree_leaves, tree_flatten
FLAGS = flags.FLAGS
flags.DEFINE_enum(
'jax_numpy_rank_promotion', os.getenv('JAX_NUMPY_RANK_PROMOTION', 'allow'),
enum_values=['allow', 'warn', 'raise'],
help=
'Control NumPy-style automatic rank promotion broadcasting '
'("allow", "warn", or "raise").')
newaxis = None
# Common docstring additions:
_PRECISION_DOC = """\
In addition to the original NumPy arguments listed below, also supports
``precision`` for extra control over matrix-multiplication precision
on supported devices. ``precision`` may be set to ``None``, which means
default precision for the backend, or any ``jax.lax.Precision`` enum value
(``Precision.DEFAULT``, ``Precision.HIGH`` or ``Precision.HIGHEST``).
"""
# We replace some builtin names to follow Numpy's API, so we capture here.
_abs = builtins.abs
_all = builtins.all
_any = builtins.any
_max = builtins.max
_min = builtins.min
_sum = builtins.sum
_divmod = builtins.divmod
# NumPy constants
pi = np.pi
e = np.e
euler_gamma = np.euler_gamma
inf = np.inf
NINF = np.NINF
PZERO = np.PZERO
NZERO = np.NZERO
nan = np.nan
# And some numpy utility functions
set_printoptions = np.set_printoptions
# We want isinstance(x, np.ndarray) checks in user code to work with the our
# array-like types, including DeviceArray and UnshapedArray (i.e. the abstract
# array base class). We can override the isinstance behavior directly, without
# having the complexity of multiple inheritance on those classes, by defining
# the ndarray class to have a metaclass with special __instancecheck__ behavior.
_arraylike_types = (np.ndarray, UnshapedArray, DeviceArray)
class _ArrayMeta(type(np.ndarray)): # type: ignore
"""Metaclass for overriding ndarray isinstance checks."""
def __instancecheck__(self, instance):
try:
return isinstance(instance.aval, _arraylike_types)
except AttributeError:
return isinstance(instance, _arraylike_types)
class ndarray(np.ndarray, metaclass=_ArrayMeta):
dtype: np.dtype
shape: Tuple[int, ...]
size: int
def __init__(shape, dtype=None, buffer=None, offset=0, strides=None,
order=None):
raise TypeError("jax.numpy.ndarray() should not be instantiated explicitly."
" Use jax.numpy.array, or jax.numpy.zeros instead.")
iscomplexobj = np.iscomplexobj
shape = _shape = np.shape
ndim = _ndim = np.ndim
size = np.size
_dtype = dtypes.result_type
# At present JAX doesn't have a reason to distinguish between scalars and arrays
# in its object system. Further, we want JAX scalars to have the same type
# promotion behaviors as JAX arrays. Rather than introducing a new type of JAX
# scalar object with JAX promotion behaviors, instead we make the JAX scalar
# types return JAX arrays when instantiated.
class _ScalarMeta(type):
def __hash__(self):
return hash(self.dtype.type)
def __eq__(self, other):
return id(self) == id(other) or self.dtype.type == other
def __ne__(self, other):
return not (self == other)
def __call__(self, x):
return array(x, dtype=self.dtype)
def _make_scalar_type(np_scalar_type):
return _ScalarMeta(np_scalar_type.__name__, (object,),
{"dtype": np.dtype(np_scalar_type)})
bool_ = _make_scalar_type(np.bool_)
uint8 = _make_scalar_type(np.uint8)
uint16 = _make_scalar_type(np.uint16)
uint32 = _make_scalar_type(np.uint32)
uint64 = _make_scalar_type(np.uint64)
int8 = _make_scalar_type(np.int8)
int16 = _make_scalar_type(np.int16)
int32 = _make_scalar_type(np.int32)
int64 = _make_scalar_type(np.int64)
bfloat16 = _make_scalar_type(dtypes.bfloat16)
float16 = _make_scalar_type(np.float16)
float32 = single = _make_scalar_type(np.float32)
float64 = double = _make_scalar_type(np.float64)
complex64 = csingle = _make_scalar_type(np.complex64)
complex128 = cdouble = _make_scalar_type(np.complex128)
int_ = int32 if dtypes.int_ == np.int32 else int64
float_ = float32 if dtypes.float_ == np.float32 else float64
complex_ = complex64 if dtypes.complex_ == np.complex64 else complex128
number = np.number
inexact = np.inexact
complexfloating = np.complexfloating
floating = np.floating
integer = np.integer
signedinteger = np.signedinteger
unsignedinteger = np.unsignedinteger
flexible = np.flexible
character = np.character
object_ = np.object_
iinfo = dtypes.iinfo
dtype = np.dtype
can_cast = dtypes.can_cast
issubsctype = dtypes.issubsctype
promote_types = dtypes.promote_types
ComplexWarning = np.ComplexWarning
array_str = np.array_str
array_repr = np.array_repr
save = np.save
savez = np.savez
load = np.load
### utility functions
_canonicalize_axis = lax._canonicalize_axis
_DEFAULT_TYPEMAP = {
np.bool_: bool_,
np.int_: int_,
np.float_: float_,
np.complex_: complex_
}
def _np_array(obj, dtype=None, **kwargs):
"""Return a properly-typed numpy array.
`_np_array(obj, **kwds)` is equivalent to `np.array(obj, **kwds)`, with the
exception that when obj.dtype is not defined and dtype is not specified, it
uses Jax's default dtypes.
"""
arr = np.array(obj, dtype=dtype, **kwargs)
obj_dtype = getattr(obj, 'dtype', None)
arr_dtype = np.dtype(arr.dtype).type
if dtype is None and obj_dtype is None and arr_dtype in _DEFAULT_TYPEMAP:
arr = arr.astype(_DEFAULT_TYPEMAP[arr_dtype])
return arr
_np_asarray = partial(_np_array, copy=False)
def _promote_shapes(fun_name, *args):
"""Prepend implicit leading singleton dimensions for Numpy broadcasting."""
if len(args) < 2:
return args
else:
shapes = [shape(arg) for arg in args]
nonscalar_ranks = [len(shp) for shp in shapes if shp]
if not nonscalar_ranks or len(set(nonscalar_ranks)) == 1:
return args
else:
if FLAGS.jax_numpy_rank_promotion != "allow":
_rank_promotion_warning_or_error(fun_name, shapes)
result_rank = len(lax.broadcast_shapes(*shapes))
return [broadcast_to(arg, (1,) * (result_rank - len(shp)) + shp)
for arg, shp in zip(args, shapes)]
def _rank_promotion_warning_or_error(fun_name, shapes):
if FLAGS.jax_numpy_rank_promotion == "warn":
msg = ("Following NumPy automatic rank promotion for {} on shapes {}. "
"Set the jax_numpy_rank_promotion config option to 'allow' to "
"disable this warning; for more information, see "
"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.")
warnings.warn(msg.format(fun_name, ' '.join(map(str, shapes))))
elif FLAGS.jax_numpy_rank_promotion == "raise":
msg = ("Operands could not be broadcast together for {} on shapes {} "
"and with the config option jax_numpy_rank_promotion='raise'. "
"For more information, see "
"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.")
raise ValueError(msg.format(fun_name, ' '.join(map(str, shapes))))
def _promote_dtypes(*args):
"""Convenience function to apply Numpy argument dtype promotion."""
# TODO(dougalm,mattjj): This is a performance bottleneck. Consider memoizing.
if len(args) < 2:
return args
else:
to_dtype = result_type(*args)
return [lax.convert_element_type(x, to_dtype) for x in args]
def _promote_dtypes_inexact(*args):
"""Convenience function to apply Numpy argument dtype promotion.
Promotes arguments to an inexact type."""
to_dtype = _to_inexact_dtype(result_type(*args))
return [lax.convert_element_type(x, to_dtype) for x in args]
def _to_inexact_dtype(dtype):
"""Promotes a dtype into an inexact dtype, if it is not already one."""
return dtype if issubdtype(dtype, inexact) else promote_types(dtype, float_)
def _complex_elem_type(dtype):
"""Returns the float type of the real/imaginary parts of a complex dtype."""
return np.abs(np.zeros((), dtype)).dtype
def _result_dtype(op, *args):
"""Compute result dtype of applying op to arguments with given dtypes."""
args = [np.ones((0,) * ndim(arg), _dtype(arg)) for arg in args]
return _dtype(op(*args))
def _arraylike(x): return isinstance(x, ndarray) or isscalar(x)
def _check_arraylike(fun_name, *args):
"""Check if all args fit JAX's definition of arraylike (ndarray or scalar)."""
if _any(not _arraylike(arg) for arg in args):
pos, arg = next((i, arg) for i, arg in enumerate(args)
if not _arraylike(arg))
msg = "{} requires ndarray or scalar arguments, got {} at position {}."
raise TypeError(msg.format(fun_name, type(arg), pos))
def _promote_args(fun_name, *args):
"""Convenience function to apply Numpy argument shape and dtype promotion."""
_check_arraylike(fun_name, *args)
return _promote_shapes(fun_name, *_promote_dtypes(*args))
def _promote_args_inexact(fun_name, *args):
"""Convenience function to apply Numpy argument shape and dtype promotion.
Promotes non-inexact types to an inexact type."""
_check_arraylike(fun_name, *args)
return _promote_shapes(fun_name, *_promote_dtypes_inexact(*args))
def _constant_like(x, const):
return np.array(const, dtype=_dtype(x))
### implementations of numpy functions in terms of lax
@_wraps(np.fmin)
def fmin(x1, x2):
return where((x1 < x2) | isnan(x2), x1, x2)
@_wraps(np.fmax)
def fmax(x1, x2):
return where((x1 > x2) | isnan(x2), x1, x2)
@_wraps(np.finfo)
def finfo(dtype):
return dtypes.finfo(dtype)
@_wraps(np.issubdtype)
def issubdtype(arg1, arg2):
return dtypes.issubdtype(arg1, arg2)
@_wraps(np.isscalar)
def isscalar(element):
return dtypes.is_python_scalar(element) or np.isscalar(element)
iterable = np.iterable
@_wraps(np.result_type)
def result_type(*args):
return dtypes.result_type(*args)
def _one_to_one_unop(numpy_fn, lax_fn, promote_to_inexact=False):
if promote_to_inexact:
def fn(x):
x = lax.convert_element_type(x, _to_inexact_dtype(_dtype(x)))
return lax_fn(x)
else:
fn = lambda x: lax_fn(x)
return _wraps(numpy_fn)(fn)
def _one_to_one_binop(numpy_fn, lax_fn, promote_to_inexact=False):
if promote_to_inexact:
fn = lambda x1, x2: lax_fn(*_promote_args_inexact(numpy_fn.__name__, x1, x2))
else:
fn = lambda x1, x2: lax_fn(*_promote_args(numpy_fn.__name__, x1, x2))
return _wraps(numpy_fn)(fn)
def _maybe_bool_binop(numpy_fn, lax_fn, bool_lax_fn):
def fn(x1, x2):
x1, x2 = _promote_args(numpy_fn.__name__, x1, x2)
return lax_fn(x1, x2) if x1.dtype != bool_ else bool_lax_fn(x1, x2)
return _wraps(numpy_fn)(fn)
fabs = _one_to_one_unop(np.fabs, lax.abs, True)
bitwise_not = _one_to_one_unop(np.bitwise_not, lax.bitwise_not)
invert = _one_to_one_unop(np.invert, lax.bitwise_not)
negative = _one_to_one_unop(np.negative, lax.neg)
positive = _one_to_one_unop(np.positive, lambda x: x)
floor = _one_to_one_unop(np.floor, lax.floor, True)
ceil = _one_to_one_unop(np.ceil, lax.ceil, True)
exp = _one_to_one_unop(np.exp, lax.exp, True)
log = _one_to_one_unop(np.log, lax.log, True)
expm1 = _one_to_one_unop(np.expm1, lax.expm1, True)
log1p = _one_to_one_unop(np.log1p, lax.log1p, True)
sin = _one_to_one_unop(np.sin, lax.sin, True)
cos = _one_to_one_unop(np.cos, lax.cos, True)
tan = _one_to_one_unop(np.tan, lax.tan, True)
arcsin = _one_to_one_unop(np.arcsin, lax.asin, True)
arccos = _one_to_one_unop(np.arccos, lax.acos, True)
arctan = _one_to_one_unop(np.arctan, lax.atan, True)
sinh = _one_to_one_unop(np.sinh, lax.sinh, True)
cosh = _one_to_one_unop(np.cosh, lax.cosh, True)
arcsinh = _one_to_one_unop(np.arcsinh, lax.asinh, True)
tanh = _one_to_one_unop(np.tanh, lax.tanh, True)
arcsinh = _one_to_one_unop(np.arcsinh, lax.asinh, True)
arccosh = _one_to_one_unop(np.arccosh, lax.acosh, True)
arctanh = _one_to_one_unop(np.arctanh, lax.atanh, True)
sqrt = _one_to_one_unop(np.sqrt, lax.sqrt, True)
add = _maybe_bool_binop(np.add, lax.add, lax.bitwise_or)
bitwise_and = _one_to_one_binop(np.bitwise_and, lax.bitwise_and)
bitwise_or = _one_to_one_binop(np.bitwise_or, lax.bitwise_or)
bitwise_xor = _one_to_one_binop(np.bitwise_xor, lax.bitwise_xor)
left_shift = _one_to_one_binop(np.left_shift, lax.shift_left)
equal = _one_to_one_binop(np.equal, lax.eq)
multiply = _maybe_bool_binop(np.multiply, lax.mul, lax.bitwise_and)
not_equal = _one_to_one_binop(np.not_equal, lax.ne)
subtract = _one_to_one_binop(np.subtract, lax.sub)
arctan2 = _one_to_one_binop(np.arctan2, lax.atan2, True)
minimum = _one_to_one_binop(np.minimum, lax.min)
maximum = _one_to_one_binop(np.maximum, lax.max)
float_power = _one_to_one_binop(np.float_power, lax.pow, True)
nextafter = _one_to_one_binop(np.nextafter, lax.nextafter, True)
def _comparison_op(numpy_fn, lax_fn):
def fn(x1, x2):
x1, x2 = _promote_args(numpy_fn.__name__, x1, x2)
# Comparison on complex types are defined as a lexicographic ordering on
# the (real, imag) pair.
if issubdtype(_dtype(x1), complexfloating):
rx = lax.real(x1)
ry = lax.real(x2)
return lax.select(lax.eq(rx, ry), lax_fn(lax.imag(x1), lax.imag(x2)),
lax_fn(rx, ry))
return lax_fn(x1, x2)
return _wraps(numpy_fn)(fn)
greater_equal = _comparison_op(np.greater_equal, lax.ge)
greater = _comparison_op(np.greater, lax.gt)
less_equal = _comparison_op(np.less_equal, lax.le)
less = _comparison_op(np.less, lax.lt)
def _logical_op(np_op, bitwise_op):
@_wraps(np_op, update_doc=False)
def op(*args):
zero = lambda x: lax.full_like(x, shape=(), fill_value=0)
args = (x if issubdtype(_dtype(x), bool_) else lax.ne(x, zero(x))
for x in args)
return bitwise_op(*_promote_args(np_op.__name__, *args))
return op
logical_and = _logical_op(np.logical_and, lax.bitwise_and)
logical_not = _logical_op(np.logical_not, lax.bitwise_not)
logical_or = _logical_op(np.logical_or, lax.bitwise_or)
logical_xor = _logical_op(np.logical_xor, lax.bitwise_xor)
@_wraps(np.right_shift)
def right_shift(x1, x2):
x1, x2 = _promote_args(np.right_shift.__name__, x1, x2)
lax_fn = lax.shift_right_logical if \
np.issubdtype(x1.dtype, np.unsignedinteger) else lax.shift_right_arithmetic
return lax_fn(x1, x2)
@_wraps(np.absolute)
def absolute(x):
return x if issubdtype(_dtype(x), unsignedinteger) else lax.abs(x)
abs = _wraps(np.abs)(absolute)
@_wraps(np.rint)
def rint(x):
dtype = _dtype(x)
if issubdtype(dtype, integer):
return lax.convert_element_type(x, float_)
if issubdtype(dtype, complexfloating):
return lax.complex(rint(lax.real(x)), rint(lax.imag(x)))
return _round_to_nearest_even(x)
@_wraps(np.sign)
def sign(x):
dtype = _dtype(x)
if issubdtype(dtype, complexfloating):
re = lax.real(x)
return lax.complex(
lax.sign(where(re != 0, re, lax.imag(x))), _constant_like(re, 0))
return lax.sign(x)
@_wraps(np.copysign)
def copysign(x1, x2):
if issubdtype(_dtype(x1), complexfloating) or issubdtype(_dtype(x2), complexfloating):
raise TypeError("copysign does not support complex-valued inputs")
x1, x2 = _promote_shapes("copysign", x1, x2)
return where(signbit(x2), -lax.abs(x1), lax.abs(x1))
@_wraps(np.true_divide)
def true_divide(x1, x2):
x1, x2 = _promote_args_inexact("true_divide", x1, x2)
return lax.div(x1, x2)
@_wraps(np.divide)
def divide(x1, x2):
# decide whether to perform integer division based on Numpy result dtype, as a
# way to check whether Python 3 style division is active in Numpy
result_dtype = _result_dtype(np.divide, x1, x2)
if issubdtype(result_dtype, integer):
return floor_divide(x1, x2)
else:
return true_divide(x1, x2)
@_wraps(np.floor_divide)
def floor_divide(x1, x2):
x1, x2 = _promote_args("floor_divide", x1, x2)
dtype = _dtype(x1)
if issubdtype(dtype, integer):
quotient = lax.div(x1, x2)
select = logical_and(lax.sign(x1) != lax.sign(x2), lax.rem(x1, x2) != 0)
# TODO(mattjj): investigate why subtracting a scalar was causing promotion
return where(select, quotient - np.array(1, _dtype(quotient)), quotient)
elif issubdtype(dtype, complexfloating):
x1r = lax.real(x1)
x1i = lax.imag(x1)
x2r = lax.real(x2)
x2i = lax.imag(x2)
which = lax.ge(lax.abs(x2r), lax.abs(x2i))
rat1 = where(which, lax._const(x2i, 1), lax.div(x2r, x2i))
rat2 = where(which, lax.div(x2i, x2r), lax._const(x2i, 1))
out = lax.floor(lax.div(lax.add(lax.mul(x1r, rat1), lax.mul(x1i, rat2)),
lax.add(lax.mul(x2r, rat1), lax.mul(x2i, rat2))))
return lax.convert_element_type(out, dtype)
else:
return _float_divmod(x1, x2)[0]
@_wraps(np.divmod)
def divmod(x1, x2):
x1, x2 = _promote_args("divmod", x1, x2)
if issubdtype(_dtype(x1), integer):
return floor_divide(x1, x2), remainder(x1, x2)
else:
return _float_divmod(x1, x2)
def _float_divmod(x1, x2):
# see float_divmod in floatobject.c of CPython
mod = lax.rem(x1, x2)
div = lax.div(lax.sub(x1, mod), x2)
ind = lax.bitwise_and(mod != 0, lax.sign(x2) != lax.sign(mod))
mod = lax.select(ind, mod + x2, mod)
div = lax.select(ind, div - _constant_like(div, 1), div)
return lax.round(div), mod
@_wraps(np.power)
def power(x1, x2):
# Special case for small positive integer scalars: use binary exponentiation.
# Using lax.pow may be imprecise for floating-point values; the goal of this
# code path is to make sure we end up with a precise output for the common
# pattern ``x ** 2`` or similar.
if isinstance(x2, int):
return lax.integer_pow(x1, x2)
x1, x2 = _promote_args(np.power, x1, x2)
dtype = _dtype(x1)
if not issubdtype(dtype, integer):
return lax.pow(x1, x2)
# Integer power => use binary exponentiation.
# TODO(phawkins): add integer pow support to XLA.
bits = 6 # Anything more would overflow for any x1 > 1
acc = ones(shape(x1), dtype=dtype)
for _ in range(bits):
acc = where(lax.bitwise_and(x2, _constant_like(x2, 1)),
lax.mul(acc, x1), acc)
x1 = lax.mul(x1, x1)
x2 = lax.shift_right_logical(x2, _constant_like(x2, 1))
return acc
@custom_jvp
@_wraps(np.logaddexp)
def logaddexp(x1, x2):
x1, x2 = _promote_shapes("logaddexp", *_promote_dtypes_inexact(x1, x2))
amax = lax.max(x1, x2)
delta = lax.sub(x1, x2)
return lax.select(isnan(delta),
lax.add(x1, x2), # NaNs or infinities of the same sign.
lax.add(amax, lax.log1p(lax.exp(-lax.abs(delta)))))
@logaddexp.defjvp
def _logaddexp_jvp(primals, tangents):
x1, x2 = primals
t1, t2 = tangents
x1, x2, t1, t2 = broadcast_arrays(x1, x2, t1, t2)
primal_out = logaddexp(x1, x2)
tangent_out = (t1 * exp(_replace_inf(x1) - _replace_inf(primal_out)) +
t2 * exp(_replace_inf(x2) - _replace_inf(primal_out)))
return primal_out, tangent_out
def _replace_inf(x):
return lax.select(isposinf(x), zeros_like(x), x)
@custom_jvp
@_wraps(np.logaddexp2)
def logaddexp2(x1, x2):
x1, x2 = _promote_shapes("logaddexp2", *_promote_dtypes_inexact(x1, x2))
amax = lax.max(x1, x2)
delta = lax.sub(x1, x2)
return lax.select(isnan(delta),
lax.add(x1, x2), # NaNs or infinities of the same sign.
lax.add(amax, lax.div(lax.log1p(exp2(-lax.abs(delta))),
_constant_like(x1, np.log(2)))))
@logaddexp2.defjvp
def _logaddexp2_jvp(primals, tangents):
x1, x2 = primals
t1, t2 = tangents
x1, x2, t1, t2 = broadcast_arrays(x1, x2, t1, t2)
primal_out = logaddexp2(x1, x2)
tangent_out = (t1 * 2 ** (_replace_inf(x1) - _replace_inf(primal_out)) +
t2 * 2 ** (_replace_inf(x2) - _replace_inf(primal_out)))
return primal_out, tangent_out
@_wraps(np.log2)
def log2(x):
x, = _promote_dtypes_inexact(x)
return lax.div(lax.log(x), lax.log(_constant_like(x, 2)))
@_wraps(np.log10)
def log10(x):
x, = _promote_dtypes_inexact(x)
return lax.div(lax.log(x), lax.log(_constant_like(x, 10)))
@_wraps(np.exp2)
def exp2(x):
x, = _promote_dtypes_inexact(x)
return lax.exp(lax.mul(lax.log(_constant_like(x, 2)), x))
@_wraps(np.signbit)
def signbit(x):
x, = _promote_shapes("signbit", x)
dtype = _dtype(x)
if issubdtype(dtype, integer):
return lax.lt(x, _constant_like(x, 0))
elif issubdtype(dtype, bool_):
return full_like(x, False, dtype=bool_)
elif not issubdtype(dtype, floating):
raise ValueError(
"jax.numpy.signbit is not well defined for %s" % dtype)
# TPU supports BF16 but not S16 types, so as a workaround, convert BF16 to
# F32.
if dtype == bfloat16:
dtype = float32
x = lax.convert_element_type(x, float32)
info = finfo(dtype)
if info.bits == 16:
int_type = np.int16
elif info.bits == 32:
int_type = np.int32
elif info.bits == 64:
int_type = np.int64
else:
raise NotImplementedError(
"jax.numpy.signbit only supports 16, 32, and 64-bit types.")
x = lax.bitcast_convert_type(x, int_type)
return lax.convert_element_type(x >> (info.nexp + info.nmant), np.bool_)
@_wraps(np.trapz)
def trapz(y, x=None, dx=1.0, axis=-1):
y = moveaxis(y, axis, -1)
if x is not None:
if ndim(x) == 1:
dx = diff(x)
else:
dx = moveaxis(diff(x, axis=axis), axis, -1)
return 0.5 * (dx * (y[..., 1:] + y[..., :-1])).sum(-1)
@_wraps(np.trunc)
def trunc(x):
return where(lax.lt(x, lax._const(x, 0)), ceil(x), floor(x))
def _conv(x, y, mode, op, precision):
if issubdtype(x.dtype, complexfloating) or issubdtype(y.dtype, complexfloating):
raise NotImplementedError(f"{op}() does not support complex inputs")
if ndim(x) != 1 or ndim(y) != 1:
raise ValueError(f"{op}() only support 1-dimensional inputs.")
x, y = _promote_dtypes_inexact(x, y)
if len(x) == 0 or len(y) == 0:
raise ValueError(f"{op}: inputs cannot be empty, got shapes {x.shape} and {y.shape}.")
out_order = slice(None)
if len(x) < len(y):
x, y = y, x
if op == "correlate":
out_order = slice(None, None, -1)
if op == 'convolve':
y = y[::-1]
if mode == 'valid':
padding = [(0, 0)]
elif mode == 'same':
padding = [(y.shape[0] // 2, y.shape[0] - y.shape[0] // 2 - 1)]
elif mode == 'full':
padding = [(y.shape[0] - 1, y.shape[0] - 1)]
else:
raise ValueError("mode must be one of ['full', 'same', 'valid']")
result = lax.conv_general_dilated(x[None, None, :], y[None, None, :], (1,),
padding, precision=precision)
return result[0, 0, out_order]
@_wraps(np.convolve, lax_description=_PRECISION_DOC)
def convolve(a, v, mode='full', *, precision=None):
return _conv(a, v, mode, 'convolve', precision)
@_wraps(np.correlate, lax_description=_PRECISION_DOC)
def correlate(a, v, mode='valid', *, precision=None):
return _conv(a, v, mode, 'correlate', precision)
def _normalize_float(x):
info = finfo(_dtype(x))
cond = lax.abs(x) < info.tiny
x1 = where(cond, x * (1 << info.nmant), x)
x2 = where(cond,
full_like(x, -info.nmant, dtype=np.int32),
zeros_like(x, dtype=np.int32))
return lax.convert_element_type(x1, _dtype(x)), x2
_INT_DTYPES = {
16: np.int16,
32: np.int32,
64: np.int64,
}
@_wraps(np.ldexp)
@jit
def ldexp(x1, x2):
dtype = _result_dtype(np.ldexp, x1, x2)
x1, x2 = _promote_shapes("ldexp", x1, x2)
x1 = lax.convert_element_type(x1, dtype)
info = finfo(dtype)
mask = (1 << info.nexp) - 1
bias = ((1 << info.nexp) - 1) >> 1
int_type = _INT_DTYPES[info.bits]
x, e = _normalize_float(x1)
x2 += lax.convert_element_type(e, np.int32)
x = lax.bitcast_convert_type(x, int_type)
x2 += ((x >> info.nmant) & mask) - bias
# find underflow/overflow before denormalization
underflow_cond = x2 < -(bias + info.nmant)
overflow_cond = x2 > bias
m = ones_like(x, dtype=dtype)
# denormals
cond = x2 < -bias + 1
x2 = where(cond, x2 + info.nmant, x2)
m = where(cond, m / (1 << info.nmant), m)
x2 = lax.convert_element_type(x2, np.int32)
x &= ~(mask << info.nmant)
x |= ((lax.convert_element_type(x2, int_type) + bias) << info.nmant)
x = lax.convert_element_type(m, dtype) * lax.bitcast_convert_type(x, dtype)
# underflow
x = where(underflow_cond, zeros_like(x, dtype=dtype), x)
# overflow
x = where(overflow_cond, lax.sign(x1) * full_like(x, np.inf), x)
# ldexp(x1, x2) = x1 for x1 = inf, -inf, nan, 0
return where(isinf(x1) | isnan(x1) | (x1 == 0), x1, x)
@_wraps(np.frexp)
@jit
def frexp(x):
x = asarray(x)
if issubdtype(x.dtype, complexfloating):
raise TypeError("frexp does not support complex-valued inputs")
elif not issubdtype(x.dtype, floating):
x = lax.convert_element_type(x, float_)
dtype = _dtype(x)
info = finfo(dtype)
mask = (1 << info.nexp) - 1
bias = ((1 << info.nexp) - 1) >> 1
int_type = _INT_DTYPES[info.bits]
x1, x2 = _normalize_float(x)
x1 = lax.bitcast_convert_type(x1, int_type)
x2 += ((x1 >> info.nmant) & mask) - bias + 1
x1 &= ~(mask << info.nmant)
x1 |= (bias - 1) << info.nmant
x1 = lax.bitcast_convert_type(x1, dtype)
cond = isinf(x) | isnan(x) | (x == 0)
x2 = where(cond, zeros_like(x2), x2)
return where(cond, x, x1), lax.convert_element_type(x2, int32)
@_wraps(np.remainder)
def remainder(x1, x2):
x1, x2 = _promote_args("remainder", x1, x2)
zero = _constant_like(x1, 0)
trunc_mod = lax.rem(x1, x2)
trunc_mod_not_zero = lax.ne(trunc_mod, zero)
do_plus = lax.bitwise_and(
lax.ne(lax.lt(trunc_mod, zero), lax.lt(x2, zero)), trunc_mod_not_zero)
return lax.select(do_plus, lax.add(trunc_mod, x2), trunc_mod)
mod = _wraps(np.mod)(remainder)
@_wraps(np.fmod)
def fmod(x1, x2):
if issubdtype(_dtype(x1, x2), integer):
x2 = where(x2 == 0, 1, x2)
return lax.rem(*_promote_args(np.fmod, x1, x2))
@_wraps(np.cbrt)
def cbrt(x):
x, = _promote_dtypes_inexact(x)
return lax.sign(x) * power(lax.abs(x), _constant_like(x, 1. / 3.))
@_wraps(np.square)
def square(x): return lax.integer_pow(x, 2)
@_wraps(np.deg2rad)
def deg2rad(x):
x, = _promote_dtypes_inexact(x)
return lax.mul(x, lax._const(x, pi / 180))
@_wraps(np.rad2deg)
def rad2deg(x):
x, = _promote_dtypes_inexact(x)
return lax.mul(x, lax._const(x, 180 / pi))
degrees = rad2deg
radians = deg2rad
@_wraps(np.histogram_bin_edges)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
if isinstance(bins, str):
raise NotImplementedError("string values for `bins` not implemented.")
a = ravel(a)
b = array(bins)
if b.ndim == 1:
return b
if range is None:
range = (a.min(), a.max())
assert len(range) == 2
range = asarray(range)
range = (where(ptp(range) == 0, range[0] - 0.5, range[0]),
where(ptp(range) == 0, range[1] + 0.5, range[1]))
dtype = _dtype(a)
if issubdtype(dtype, integer):
dtype = promote_types(dtype, float32)
return linspace(range[0], range[1], bins + 1, dtype=dtype)
@_wraps(np.histogram)
def histogram(a, bins=10, range=None, weights=None, density=None):
if weights is not None and a.shape != weights.shape:
raise ValueError("weights should have the same shape as a.")
a = ravel(a)
if weights is not None:
weights = ravel(weights)
else:
weights = ones_like(a)
bin_edges = histogram_bin_edges(a, bins, range, weights)
bin_idx = searchsorted(bin_edges, a, side='right')
bin_idx = where(a == bin_edges[-1], len(bin_edges) - 1, bin_idx)
counts = bincount(bin_idx, weights, length=len(bin_edges))[1:]
if density:
bin_widths = diff(bin_edges)
counts = counts / bin_widths / counts.sum()
return counts, bin_edges
@_wraps(np.heaviside)
def heaviside(x1, x2):
x1, x2 = _promote_dtypes_inexact(x1, x2)
zero = lax._const(x1, 0)
return where(lax.lt(x1, zero), zero,
where(lax.gt(x1, zero), lax._const(x1, 1), x2))
@_wraps(np.hypot)
def hypot(x1, x2):
x1, x2 = _promote_dtypes_inexact(x1, x2)
return lax.sqrt(x1*x1 + x2*x2)
@_wraps(np.reciprocal)
def reciprocal(x):
x, = _promote_dtypes_inexact(x)
return lax.integer_pow(x, -1)
@_wraps(np.sinc, update_doc=False)
def sinc(x):
x, = _promote_dtypes_inexact(x)
eq_zero = lax.eq(x, lax._const(x, 0))
safe_x = where(eq_zero, lax._const(x, 0), x)
pi_x = lax.mul(lax._const(x, pi), safe_x)
return where(eq_zero,
lax._const(x, 1), lax.div(lax.sin(pi_x), pi_x))
@_wraps(np.transpose)
def transpose(a, axes=None):
axes = np.arange(ndim(a))[::-1] if axes is None else axes
return lax.transpose(a, axes)
@_wraps(np.rot90)
def rot90(m, k=1, axes=(0, 1)):
ax1, ax2 = axes
ax1 = _canonicalize_axis(ax1, m.ndim)
ax2 = _canonicalize_axis(ax2, m.ndim)
if ax1 == ax2:
raise ValueError("Axes must be different") # same as numpy error
k = k % 4
if k == 0:
return m
elif k == 2:
return flip(flip(m, ax1), ax2)
else:
perm = list(range(m.ndim))
perm[ax1], perm[ax2] = perm[ax2], perm[ax1]
if k == 1:
return transpose(flip(m, ax2), perm)
else:
return flip(transpose(m, perm), ax2)
@_wraps(np.flip)
def flip(m, axis=None):
if axis is None:
return lax.rev(m, list(range(len(m.shape))))
return lax.rev(m, [_canonicalize_axis(axis, len(m.shape))])
@_wraps(np.fliplr)
def fliplr(m):
return flip(m, 1)
@_wraps(np.flipud)
def flipud(m):
return flip(m, 0)
@_wraps(np.conjugate)
def conjugate(x):
return lax.conj(x) if iscomplexobj(x) else x
conj = conjugate
@_wraps(np.imag)
def imag(val):
return lax.imag(val) if iscomplexobj(val) else zeros_like(val)
@_wraps(np.real)
def real(val):
return lax.real(val) if iscomplexobj(val) else val
@_wraps(np.iscomplex)
def iscomplex(x):
i = imag(x)
return lax.ne(i, lax._const(i, 0))
@_wraps(np.isreal)
def isreal(x):
i = imag(x)
return lax.eq(i, lax._const(i, 0))
@_wraps(np.angle)
def angle(z):
re = real(z)
im = imag(z)
dtype = _dtype(re)
if not issubdtype(dtype, inexact) or (
issubdtype(_dtype(z), floating) and ndim(z) == 0):
dtype = dtypes.canonicalize_dtype(float_)
re = lax.convert_element_type(re, dtype)
im = lax.convert_element_type(im, dtype)
return lax.atan2(im, re)
@_wraps(np.diff)
def diff(a, n=1, axis=-1,):
if not isinstance(a, ndarray) or a.ndim == 0:
return a
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
nd = a.ndim
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
op = not_equal if a.dtype == np.bool_ else subtract
for _ in range(n):
a = op(a[slice1], a[slice2])
return a
_EDIFF1D_DOC = """\
Unlike NumPy's implementation of ediff1d, :py:func:`jax.numpy.ediff1d` will not
issue an error if casting ``to_end`` or ``to_begin`` to the type of ``ary``
loses precision.
"""
@_wraps(np.ediff1d, lax_description=_EDIFF1D_DOC)
def ediff1d(ary, to_end=None, to_begin=None):
ary = ravel(asarray(ary))
result = lax.sub(ary[1:], ary[:-1])
if to_begin is not None:
result = concatenate((ravel(asarray(to_begin, dtype=ary.dtype)), result))
if to_end is not None:
result = concatenate((result, ravel(asarray(to_end, dtype=ary.dtype))))
return result
@partial(jit, static_argnums=2)
def _gradient(a, varargs, axis):
def gradient_along_axis(a, h, axis):
sliced = partial(lax.slice_in_dim, a, axis=axis)
a_grad = concatenate((
(sliced(1, 2) - sliced(0, 1)), # upper edge
(sliced(2, None) - sliced(None, -2)) * 0.5, # inner
(sliced(-1, None) - sliced(-2, -1)), # lower edge
), axis)
return a_grad / h
if axis is None:
axis = range(a.ndim)
else:
if isinstance(axis, int):
axis = (axis,)
if not isinstance(axis, tuple) and not isinstance(axis, list):
raise ValueError("Give `axis` either as int or iterable")
elif len(axis) == 0:
return []
axis = [_canonicalize_axis(i, a.ndim) for i in axis]
if _min([s for i, s in enumerate(a.shape) if i in axis]) < 2:
raise ValueError("Shape of array too small to calculate "
"a numerical gradient, "
"at least 2 elements are required.")
len_axes = len(axis)
n = len(varargs)
if n == 0 or varargs is None:
# no spacing
dx = [1.0] * len_axes
elif n == 1:
# single value for all axes
dx = varargs * len_axes
elif n == len_axes:
dx = varargs
else:
TypeError("Invalid number of spacing arguments %d" % n)
if ndim(dx[0]) != 0:
raise NotImplementedError("Non-constant spacing not implemented")
# TODO: use jax.lax loop tools if possible
a_grad = [gradient_along_axis(a, h, ax) for ax, h in zip(axis, dx)]
if len(axis) == 1:
a_grad = a_grad[0]
return a_grad
@_wraps(np.gradient)
def gradient(f, *args, **kwargs):
axis = kwargs.pop("axis", None)
if not len(kwargs) == 0:
raise ValueError("Only `axis` keyword is implemented")
return _gradient(f, args, axis)
@_wraps(np.isrealobj)
def isrealobj(x):
return not iscomplexobj(x)
@_wraps(np.reshape)
def reshape(a, newshape, order="C"):
try:
return a.reshape(newshape, order=order) # forward to method for ndarrays
except AttributeError:
return _reshape(a, newshape, order=order)
def _compute_newshape(a, newshape):
"""Fixes a -1 value in newshape, if present."""
# other errors, like having more than one -1, are caught downstream
newsize = _prod(newshape)
if newsize < 0:
fix = a.size // -newsize
return [d if d != -1 else fix for d in newshape]
else:
return newshape
def _reshape(a, newshape, order="C"):
computed_newshape = _compute_newshape(a, newshape)
if order == "C":
return lax.reshape(a, computed_newshape, None)
elif order == "F":
dims = np.arange(ndim(a))[::-1]
return lax.reshape(a, computed_newshape[::-1], dims).T
elif order == "A":
raise NotImplementedError("np.reshape order=A is not implemented.")
else:
raise ValueError("Unexpected value for 'order' argument: {}.".format(order))
def _reshape_method(a, *newshape, **kwargs):
order = kwargs.pop("order", "C")
if len(kwargs) == 1:
invalid_kwarg, = kwargs
msg = "'{}' is an invalid keyword argument for this function"
raise TypeError(msg.format(invalid_kwarg)) # same as NumPy error
elif kwargs:
invalid_kwargs = "'{}'".format("'".join(kwargs))
msg = "{} are invalid keyword arguments for this function"
raise TypeError(msg.format(invalid_kwargs)) # different from NumPy error
if len(newshape) == 1 and not isinstance(newshape[0], int):
newshape = newshape[0]
return _reshape(a, newshape, order=order)
@_wraps(np.ravel)
def ravel(a, order="C"):
if order == "K":
raise NotImplementedError("Ravel not implemented for order='K'.")
return reshape(a, (size(a),), order)
_UNRAVEL_INDEX_DOC = """\
Unlike numpy's implementation of unravel_index, negative indices are accepted
and out-of-bounds indices are clipped.
"""
@_wraps(np.unravel_index, lax_description=_UNRAVEL_INDEX_DOC)
def unravel_index(indices, shape):
indices = asarray(indices)
sizes = pad(shape, (0, 1), constant_values=1)
cumulative_sizes = cumprod(sizes[::-1])[::-1]
total_size = cumulative_sizes[0]
# Clip so raveling and unraveling an oob index will not change the behavior
clipped_indices = clip(indices, -total_size, total_size - 1)
# Add enough trailing dims to avoid conflict with flat_index
cumulative_sizes = cumulative_sizes.reshape([-1] + [1] * indices.ndim)
idx = clipped_indices % cumulative_sizes[:-1] // cumulative_sizes[1:]
return tuple(idx)
@_wraps(np.squeeze)
def squeeze(a, axis: Union[int, Tuple[int, ...]] = None):
if axis is None:
a_shape = shape(a)
axis = tuple(i for i, d in enumerate(a_shape) if d == 1)
elif not isinstance(axis, tuple):
axis = (axis,)
return lax.squeeze(a, axis)
@_wraps(np.expand_dims)
def expand_dims(a, axis: Union[int, Tuple[int, ...]]):
if not isinstance(axis, tuple):
axis = (axis,)
return lax.expand_dims(a, axis)
@_wraps(np.swapaxes)
def swapaxes(a, axis1, axis2):
perm = np.arange(ndim(a))
perm[axis1], perm[axis2] = perm[axis2], perm[axis1]
return lax.transpose(a, perm)
@_wraps(np.moveaxis)
def moveaxis(a, source, destination):
if isinstance(source, int):
source = (source,)
if isinstance(destination, int):
destination = (destination,)
source = tuple(_canonicalize_axis(i, ndim(a)) for i in source)
destination = tuple(_canonicalize_axis(i, ndim(a)) for i in destination)
if len(source) != len(destination):
raise ValueError("Inconsistent number of elements: {} vs {}"
.format(len(source), len(destination)))
perm = [i for i in range(ndim(a)) if i not in source]
for dest, src in sorted(zip(destination, source)):
perm.insert(dest, src)
return lax.transpose(a, perm)
@_wraps(np.isclose)
def isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):
a, b = _promote_args("isclose", asarray(a), asarray(b))
dtype = _dtype(a)
if issubdtype(dtype, inexact):
if issubdtype(dtype, complexfloating):
dtype = _complex_elem_type(dtype)
rtol = lax.convert_element_type(rtol, dtype)
atol = lax.convert_element_type(atol, dtype)
out = lax.le(
lax.abs(lax.sub(a, b)),
lax.add(atol, lax.mul(rtol, lax.abs(b))))
# This corrects the comparisons for infinite and nan values
a_inf = isinf(a)
b_inf = isinf(b)
any_inf = logical_or(a_inf, b_inf)
both_inf = logical_and(a_inf, b_inf)
# Make all elements where either a or b are infinite to False
out = logical_and(out, logical_not(any_inf))
# Make all elements where both a or b are the same inf to True
same_value = lax.eq(a, b)
same_inf = logical_and(both_inf, same_value)
out = logical_or(out, same_inf)
# Make all elements where either a or b is NaN to False
a_nan = isnan(a)
b_nan = isnan(b)
any_nan = logical_or(a_nan, b_nan)
out = logical_and(out, logical_not(any_nan))
if equal_nan:
# Make all elements where both a and b is NaN to True
both_nan = logical_and(a_nan, b_nan)
out = logical_or(out, both_nan)
return _maybe_numpy_1_13_isclose_behavior(a, out)
else:
return lax.eq(a, b)
numpy_version = tuple(map(int, np.version.version.split('.')[:2]))
if numpy_version < (1, 14):
# see discussion at https://github.com/numpy/numpy/pull/9720
def _maybe_numpy_1_13_isclose_behavior(a, out):
if size(out) == 1 and issubdtype(_dtype(a), complexfloating):
return lax.reshape(out, (1,))
else:
return out
else:
def _maybe_numpy_1_13_isclose_behavior(a, out):
return out
@_wraps(np.interp)
def interp(x, xp, fp, left=None, right=None, period=None):
if shape(xp) != shape(fp) or ndim(xp) != 1:
raise ValueError("xp and fp must be one-dimensional arrays of equal size")
x, xp, fp = map(asarray, _promote_dtypes_inexact(x, xp, fp))
if period is not None:
if period == 0:
raise ValueError(f"period must be a non-zero value; got {period}")
period = abs(period)
x = x % period
xp = xp % period
xp, fp = lax.sort_key_val(xp, fp)
xp = concatenate([xp[-1:] - period, xp, xp[:1] + period])
fp = concatenate([fp[-1:], fp, fp[:1]])
i = clip(searchsorted(xp, x, side='right'), 1, len(xp) - 1)
df = fp[i] - fp[i - 1]
dx = xp[i] - xp[i - 1]
delta = x - xp[i - 1]
f = where((dx == 0), fp[i], fp[i - 1] + (delta / dx) * df)
if period is None:
f = where(x < xp[0], fp[0] if left is None else left, f)
f = where(x > xp[-1], fp[-1] if right is None else right, f)
return f
@_wraps(np.in1d, lax_description="""
In the JAX version, the `assume_unique` argument is not referenced.
""")
def in1d(ar1, ar2, assume_unique=False, invert=False):
# TODO(vanderplas): use sorting-based approach for larger inputs.
ar1 = ravel(ar1)
ar2 = ravel(ar2)
if invert:
return (ar1[:, None] != ar2).all(-1)
else:
return (ar1[:, None] == ar2).any(-1)
@partial(jit, static_argnums=2)
def _intersect1d_sorted_mask(ar1, ar2, return_indices=False):
"""
Helper function for intersect1d which is jit-able
"""
ar = concatenate((ar1, ar2))
if return_indices:
iota = lax.broadcasted_iota(np.int64, shape(ar), dimension=0)
aux, indices = lax.sort_key_val(ar, iota)
else:
aux = sort(ar)
mask = aux[1:] == aux[:-1]
if return_indices:
return aux, mask, indices
else:
return aux, mask
@_wraps(np.intersect1d)
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
if not assume_unique:
if return_indices:
ar1, ind1 = unique(ar1, return_index=True)
ar2, ind2 = unique(ar2, return_index=True)
else:
ar1 = unique(ar1)
ar2 = unique(ar2)
else:
ar1 = ravel(ar1)
ar2 = ravel(ar2)
if return_indices:
aux, mask, aux_sort_indices = _intersect1d_sorted_mask(ar1, ar2, return_indices)
else:
aux, mask = _intersect1d_sorted_mask(ar1, ar2, return_indices)
int1d = aux[:-1][mask]
if return_indices:
ar1_indices = aux_sort_indices[:-1][mask]
ar2_indices = aux_sort_indices[1:][mask] - ar1.size
if not assume_unique:
ar1_indices = ind1[ar1_indices]
ar2_indices = ind2[ar2_indices]
return int1d, ar1_indices, ar2_indices
else:
return int1d
@_wraps(np.isin, lax_description="""
In the JAX version, the `assume_unique` argument is not referenced.
""")
def isin(element, test_elements, assume_unique=False, invert=False):
result = in1d(element, test_elements, assume_unique=assume_unique, invert=invert)
return result.reshape(shape(element))
# The `jit` on `where` exists to avoid materializing constants in cases like
# `np.where(np.zeros(1000), 7, 4)`. In op-by-op mode, we don't want to
# materialize the broadcast forms of scalar arguments.
@jit
def _where(condition, x=None, y=None):
if x is None or y is None:
raise ValueError("Either both or neither of the x and y arguments should "
"be provided to jax.numpy.where, got {} and {}."
.format(x, y))
if not issubdtype(_dtype(condition), bool_):
condition = lax.ne(condition, zeros_like(condition))
x, y = _promote_dtypes(x, y)
condition, x, y = broadcast_arrays(condition, x, y)
return lax.select(condition, x, y) if np.size(x) else x
_WHERE_DOC = """\
At present, JAX does not support JIT-compilation of the single-argument form
of :py:func:`jax.numpy.where` because its output shape is data-dependent. The
three-argument form does not have a data-dependent shape and can be JIT-compiled
successfully.
"""
@_wraps(np.where, update_doc=False, lax_description=_WHERE_DOC)
def where(condition, x=None, y=None):
if x is None and y is None:
return nonzero(asarray(condition))
else:
return _where(condition, x, y)
@_wraps(np.select)
def select(condlist, choicelist, default=0):
if len(condlist) != len(choicelist):
msg = "condlist must have length equal to choicelist ({} vs {})"
raise ValueError(msg.format(len(condlist), len(choicelist)))
if len(condlist) == 0:
raise ValueError("condlist must be non-empty")
choices = _promote_dtypes(default, *choicelist)
choicelist = choices[1:]
output = choices[0]
for cond, choice in zip(condlist[::-1], choicelist[::-1]):
output = where(cond, choice, output)
return output
@_wraps(np.bincount, lax_description="""\
Jax adds the optional `length` parameter which specifies the output length, and
defaults to ``x.max() + 1``. It must be specified for bincount to be compilable.
Values larger than the specified length will be discarded.
Additionally, while ``np.bincount`` raises an error if the input array contains
negative values, ``jax.numpy.bincount`` treats negative values as zero.
""")
def bincount(x, weights=None, minlength=0, *, length=None):
if not issubdtype(_dtype(x), integer):
msg = f"x argument to bincount must have an integer type; got {x.dtype}"
raise TypeError(msg)
if length is None:
length = max(x) + 1
length = _max(length, minlength)
if ndim(x) != 1:
raise ValueError("only 1-dimensional input supported.")
if weights is None:
weights = array(1, dtype=int32)
else:
if shape(x) != shape(weights):
raise ValueError("shape of weights must match shape of x.")
return ops.index_add(zeros((length,), _dtype(weights)), ops.index[clip(x, 0)], weights)
def broadcast_arrays(*args):
"""Like Numpy's broadcast_arrays but doesn't return views."""
shapes = [shape(arg) for arg in args]
if len(set(shapes)) == 1:
return [arg if isinstance(arg, ndarray) or isscalar(arg) else array(arg)
for arg in args]
result_shape = lax.broadcast_shapes(*shapes)
return [broadcast_to(arg, result_shape) for arg in args]
@_wraps(np.broadcast_to, lax_description="""\
The JAX version does not necessarily return a view of the input.
""")
def broadcast_to(arr, shape):
arr = arr if isinstance(arr, ndarray) else array(arr)
shape = canonicalize_shape(shape) # check that shape is concrete
arr_shape = _shape(arr)
if arr_shape == shape:
return arr
else:
nlead = len(shape) - len(arr_shape)
compatible = np.equal(arr_shape, shape[nlead:]) | np.equal(arr_shape, 1)
if nlead < 0 or not np.all(compatible):
msg = "Incompatible shapes for broadcasting: {} and requested shape {}"
raise ValueError(msg.format(arr_shape, shape))
diff, = np.where(np.not_equal(shape[nlead:], arr_shape))
new_dims = tuple(range(nlead)) + tuple(nlead + diff)
kept_dims = tuple(np.delete(np.arange(len(shape)), new_dims))
return lax.broadcast_in_dim(squeeze(arr, tuple(diff)), shape, kept_dims)
@_wraps(np.split)
def split(ary, indices_or_sections, axis=0):
axis = core.concrete_or_error(int, axis, "in jax.numpy.split argument `axis`")
size = ary.shape[axis]
if isinstance(indices_or_sections, (tuple, list) + _arraylike_types):
indices_or_sections = [core.concrete_or_error(int, i_s, "in jax.numpy.split argument 1")
for i_s in indices_or_sections]
split_indices = np.concatenate([[0], indices_or_sections, [size]])
else:
indices_or_sections = core.concrete_or_error(int, indices_or_sections,
"in jax.numpy.split argument 1")
part_size, r = _divmod(size, indices_or_sections)
if r != 0:
raise ValueError("array split does not result in an equal division")
split_indices = np.arange(indices_or_sections + 1) * part_size
starts, ends = [0] * ndim(ary), shape(ary)
_subval = lambda x, i, v: subvals(x, [(i, v)])
return [lax.slice(ary, _subval(starts, axis, start), _subval(ends, axis, end))
for start, end in zip(split_indices[:-1], split_indices[1:])]
def _split_on_axis(np_fun, axis):
@_wraps(np_fun, update_doc=False)
def f(ary, indices_or_sections):
return split(ary, indices_or_sections, axis=axis)
return f
vsplit = _split_on_axis(np.vsplit, axis=0)
hsplit = _split_on_axis(np.hsplit, axis=1)
dsplit = _split_on_axis(np.dsplit, axis=2)
@_wraps(np.clip)
def clip(a, a_min=None, a_max=None):
if a_min is None and a_max is None:
raise ValueError("At most one of a_min and a_max may be None")
if a_min is not None:
if _dtype(a_min) != _dtype(a):
a_min = lax.convert_element_type(a_min, _dtype(a))
a = maximum(a_min, a)
if a_max is not None:
if _dtype(a_max) != _dtype(a):
a_max = lax.convert_element_type(a_max, _dtype(a))
a = minimum(a_max, a)
return a
def _round_to_nearest_even(x):
half = lax._const(x, 0.5)
one = lax._const(x, 1)
round_val = lax.floor(x)
fraction = x - round_val
nearest_even_int = lax.sub(
round_val, lax.mul(lax._const(x, 2), lax.floor(lax.mul(half, x))))
is_odd = lax.eq(nearest_even_int, one)
return lax.select(
lax.bitwise_or(lax.gt(fraction, half),
lax.bitwise_and(lax.eq(fraction, half), is_odd)),
lax.add(round_val, one), round_val)
@_wraps(np.round, update_doc=False)
def round(a, decimals=0):
dtype = _dtype(a)
if issubdtype(dtype, integer):
if decimals < 0:
raise NotImplementedError(
"integer np.round not implemented for decimals < 0")
return a # no-op on integer types
def _round_float(x):
if decimals == 0:
return _round_to_nearest_even(x)
# TODO(phawkins): the strategy of rescaling the value isn't necessarily a
# good one since we may be left with an incorrectly rounded value at the
# end due to precision problems. As a workaround for float16, convert to
# float32,
x = lax.convert_element_type(x, np.float32) if dtype == np.float16 else x
factor = _constant_like(x, 10 ** decimals)
out = lax.div(_round_to_nearest_even(lax.mul(x, factor)), factor)
return lax.convert_element_type(out, dtype) if dtype == np.float16 else out
if issubdtype(dtype, complexfloating):
return lax.complex(_round_float(lax.real(a)), _round_float(lax.imag(a)))
else:
return _round_float(a)
around = round
@_wraps(np.fix)
def fix(x, out=None):
if out is not None:
raise ValueError("fix does not support the `out` argument.")
zero = lax._const(x, 0)
return where(lax.ge(x, zero), floor(x), ceil(x))
@_wraps(np.modf)
def modf(x, out=None):
if out is not None:
raise ValueError("modf does not support the `out` argument.")
whole = fix(x)
return x - whole, whole
@_wraps(np.isfinite)
def isfinite(x):
dtype = _dtype(x)
if issubdtype(dtype, floating):
return lax.is_finite(x)
elif issubdtype(dtype, complexfloating):
return lax.bitwise_and(lax.is_finite(real(x)), lax.is_finite(imag(x)))
else:
return full_like(x, True, dtype=bool_)
@_wraps(np.isinf)
def isinf(x):
dtype = _dtype(x)
if issubdtype(dtype, floating):
return lax.eq(lax.abs(x), _constant_like(x, inf))
elif issubdtype(dtype, complexfloating):
re = lax.real(x)
im = lax.imag(x)
return lax.bitwise_or(lax.eq(lax.abs(re), _constant_like(re, inf)),
lax.eq(lax.abs(im), _constant_like(im, inf)))
else:
return full_like(x, False, dtype=bool_)
def _isposneginf(infinity, x):
dtype = _dtype(x)
if issubdtype(dtype, floating):
return lax.eq(x, _constant_like(x, infinity))
elif issubdtype(dtype, complexfloating):
raise ValueError("isposinf/isneginf are not well defined for complex types")
else:
return full_like(x, False, dtype=bool_)
isposinf = _wraps(np.isposinf)(lambda x: _isposneginf(inf, x))
isneginf = _wraps(np.isneginf)(lambda x: _isposneginf(-inf, x))
@_wraps(np.isnan)
def isnan(x):
return lax.bitwise_and(lax.bitwise_not(isfinite(x)),
lax.bitwise_not(isinf(x)))
@_wraps(np.nan_to_num)
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
del copy
dtype = _dtype(x)
if issubdtype(dtype, complexfloating):
return lax.complex(
nan_to_num(lax.real(x), nan=nan, posinf=posinf, neginf=neginf),
nan_to_num(lax.imag(x), nan=nan, posinf=posinf, neginf=neginf))
info = finfo(dtypes.canonicalize_dtype(dtype))
posinf = info.max if posinf is None else posinf
neginf = info.min if neginf is None else neginf
x = where(isnan(x), _constant_like(x, nan), x)
x = where(isposinf(x), _constant_like(x, posinf), x)
x = where(isneginf(x), _constant_like(x, neginf), x)
return x
### Reducers
def _make_reduction(np_fun, op, init_val, preproc=None, bool_op=None,
upcast_f16_for_computation=False):
"""Creates reduction function given a binary operation and monoid identity."""
bool_op = bool_op or op
@_wraps(np_fun)
def reduction(a, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise ValueError("reduction does not support the `out` argument.")
if isinstance(a, (list, tuple)):
msg = ("jax.numpy reductions won't accept lists and tuples in future "
"versions, only scalars and ndarrays")
warnings.warn(msg, category=FutureWarning)
a = a if isinstance(a, ndarray) else asarray(a)
a = preproc(a) if preproc else a
dims = _reduction_dims(a, axis)
result_dtype = dtype or _dtype(np_fun(np.ones((), dtype=_dtype(a))))
if upcast_f16_for_computation and issubdtype(result_dtype, inexact):
computation_dtype = promote_types(result_dtype, float32)
else:
computation_dtype = result_dtype
a = lax.convert_element_type(a, computation_dtype)
result = lax.reduce(a, _reduction_init_val(a, init_val),
op if computation_dtype != np.bool_ else bool_op, dims)
if keepdims:
result = expand_dims(result, dims)
return lax.convert_element_type(result, dtype or result_dtype)
return reduction
def _reduction_dims(a, axis):
if axis is None:
return tuple(range(ndim(a)))
elif isinstance(axis, (np.ndarray, tuple, list)):
if len(axis) != len(set(axis)):
raise ValueError(f"duplicate value in 'axis': {axis}")
return tuple(_canonicalize_axis(x, ndim(a)) for x in axis)
elif isinstance(axis, int):
return (_canonicalize_axis(axis, ndim(a)),)
else:
raise TypeError("Unexpected type of axis argument: {}".format(type(axis)))
def _reduction_init_val(a, init_val):
a_dtype = dtypes.canonicalize_dtype(_dtype(a))
if a_dtype == 'bool':
return np.array(init_val > 0, dtype=a_dtype)
try:
return np.array(init_val, dtype=a_dtype)
except OverflowError:
assert issubdtype(a_dtype, integer)
sign, info = np.sign(init_val), iinfo(a_dtype)
return np.array(info.min if sign < 0 else info.max, dtype=a_dtype)
_cast_to_bool = partial(lax.convert_element_type, new_dtype=bool_)
sum = _make_reduction(np.sum, lax.add, 0, upcast_f16_for_computation=True,
bool_op=lax.bitwise_or)
product = prod = _make_reduction(np.prod, lax.mul, 1, bool_op=lax.bitwise_and,
upcast_f16_for_computation=True)
amax = max = _make_reduction(np.max, lax.max, -np.inf)
amin = min = _make_reduction(np.min, lax.min, np.inf)
all = alltrue = _make_reduction(np.all, lax.bitwise_and, True, _cast_to_bool)
any = sometrue = _make_reduction(np.any, lax.bitwise_or, False, _cast_to_bool)
@_wraps(np.mean)
def mean(a, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise ValueError("mean does not support the `out` argument.")
if axis is None:
normalizer = size(a)
else:
normalizer = np.prod(np.take(shape(a), axis))
if dtype is None:
if issubdtype(_dtype(a), bool_) or issubdtype(_dtype(a), integer):
dtype = float_
else:
dtype = _dtype(a)
return lax.div(
sum(a, axis, dtype=dtype, keepdims=keepdims),
lax.convert_element_type(normalizer, dtype))
@_wraps(np.average)
def average(a, axis=None, weights=None, returned=False):
a = asarray(a)
if weights is None: # Treat all weights as 1
avg = mean(a, axis=axis)
if axis is None:
weights_sum = full((), size(a), dtype=avg.dtype)
else:
weights_sum = full_like(avg, a.shape[axis], dtype=avg.dtype)
else:
weights = asarray(weights)
if issubdtype(a.dtype, inexact):
out_dtype = result_type(a.dtype, weights.dtype)
else:
out_dtype = result_type(a.dtype, weights.dtype, float_)
out_dtype = dtypes.canonicalize_dtype(out_dtype)
a_shape = shape(a)
a_ndim = len(a_shape)
weights_shape = shape(weights)
axis = None if axis is None else _canonicalize_axis(axis, a_ndim)
if a_shape != weights_shape:
# Make sure the dimensions work out
if axis is None:
raise ValueError("Axis must be specified when shapes of a and "
"weights differ.")
if len(weights_shape) != 1:
raise ValueError("1D weights expected when shapes of a and "
"weights differ.")
if weights_shape[0] != a_shape[axis]:
raise ValueError("Length of weights not "
"compatible with specified axis.")
weights = broadcast_to(weights, (a_ndim - 1) * (1,) + weights_shape)
weights = moveaxis(weights, -1, axis)
weights_sum = sum(weights, axis=axis, dtype=out_dtype)
avg = sum(multiply(a, weights), axis=axis, dtype=out_dtype) / weights_sum
if returned:
if avg.shape != weights_sum.shape:
weights_sum = broadcast_to(weights_sum, avg.shape)
return avg, weights_sum
return avg
@_wraps(np.var)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
if out is not None:
raise ValueError("var does not support the `out` argument.")
a_dtype, dtype = _var_promote_types(_dtype(a), dtype)
a_mean = mean(a, axis, dtype=a_dtype, keepdims=True)
centered = a - a_mean
if issubdtype(centered.dtype, complexfloating):
centered = lax.real(lax.mul(centered, lax.conj(centered)))
else:
centered = lax.square(centered)
if axis is None:
normalizer = size(a)
else:
normalizer = np.prod(np.take(shape(a), axis))
normalizer = normalizer - ddof
result = sum(centered, axis, keepdims=keepdims)
out = lax.div(result, lax.convert_element_type(normalizer, result.dtype))
return lax.convert_element_type(out, dtype)
def _var_promote_types(a_dtype, dtype):
if dtype:
if (not issubdtype(dtype, complexfloating) and
issubdtype(a_dtype, complexfloating)):
msg = ("jax.numpy.var does not yet support real dtype parameters when "
"computing the variance of an array of complex values. The "
"semantics of numpy.var seem unclear in this case. Please comment "
"on https://github.com/google/jax/issues/2283 if this behavior is "
"important to you.")
raise ValueError(msg)
a_dtype = promote_types(a_dtype, dtype)
else:
if not issubdtype(a_dtype, inexact):
dtype = a_dtype = float_
else:
dtype = _complex_elem_type(a_dtype)
a_dtype = promote_types(a_dtype, float32)
return a_dtype, dtype
@_wraps(np.std)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
if out is not None:
raise ValueError("std does not support the `out` argument.")
return sqrt(var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims))
@_wraps(np.ptp)
def ptp(a, axis=None, out=None, keepdims=False):
if out is not None:
raise ValueError("ptp does not support the `out` argument.")
x = amax(a, axis=axis, keepdims=keepdims)
y = amin(a, axis=axis, keepdims=keepdims)
return lax.sub(x, y)
@_wraps(np.allclose)
def allclose(a, b, rtol=1e-05, atol=1e-08):
return all(isclose(a, b, rtol, atol))
@_wraps(np.count_nonzero)
def count_nonzero(a, axis=None, keepdims=False):
return sum(lax.ne(a, _constant_like(a, 0)), axis=axis,
dtype=dtypes.canonicalize_dtype(np.int_), keepdims=keepdims)
_NONZERO_DOC = """\
At present, JAX does not support JIT-compilation of :py:func:`jax.numpy.nonzero`
because its output shape is data-dependent.
"""
@_wraps(np.nonzero, lax_description=_NONZERO_DOC)
def nonzero(a):
# Note: this function cannot be jitted because its output has a dynamic
# shape.
a = atleast_1d(a)
dims = shape(a)
ndims = len(dims)
ds = [lax.broadcasted_iota(int_, dims + (1,), i) for i in range(ndims)]
d = concatenate(ds, axis=-1)
indexes = d[a != 0]
return tuple(indexes[..., i] for i in range(ndims))
@_wraps(np.flatnonzero)
def flatnonzero(a):
return nonzero(ravel(a))[0]
def _make_nan_reduction(np_reduction, jnp_reduction, init_val, nan_if_all_nan):
@_wraps(np_reduction)
def nan_reduction(a, axis=None, out=None, keepdims=False, **kwargs):
out = jnp_reduction(where(isnan(a), _reduction_init_val(a, init_val), a),
axis=axis, out=out, keepdims=keepdims, **kwargs)
if nan_if_all_nan:
return where(all(isnan(a), axis=axis, keepdims=keepdims),
_constant_like(a, nan), out)
else:
return out
return nan_reduction
nanmin = _make_nan_reduction(np.nanmin, min, inf, nan_if_all_nan=True)
nanmax = _make_nan_reduction(np.nanmax, max, -inf, nan_if_all_nan=True)
nansum = _make_nan_reduction(np.nansum, sum, 0, nan_if_all_nan=False)
nanprod = _make_nan_reduction(np.nanprod, prod, 1, nan_if_all_nan=False)
@_wraps(np.nanmean)
def nanmean(a, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise ValueError("nanmean does not support the `out` argument.")
if issubdtype(_dtype(a), bool_) or issubdtype(_dtype(a), integer):
return mean(a, axis, dtype, out, keepdims)
if dtype is None:
dtype = _dtype(a)
nan_mask = logical_not(isnan(a))
normalizer = sum(nan_mask, axis=axis, dtype=int32, keepdims=keepdims)
normalizer = lax.convert_element_type(normalizer, dtype)
td = lax.div(nansum(a, axis, dtype=dtype, keepdims=keepdims), normalizer)
return td
@_wraps(np.nanvar)
def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
if out is not None:
raise ValueError("nanvar does not support the `out` argument.")
a_dtype, dtype = _var_promote_types(_dtype(a), dtype)
a_mean = nanmean(a, axis, dtype=a_dtype, keepdims=True)
centered = a - a_mean
if issubdtype(centered.dtype, complexfloating):
centered = lax.real(lax.mul(centered, lax.conj(centered)))
else:
centered = lax.square(centered)
normalizer = sum(logical_not(isnan(a)), axis=axis, keepdims=keepdims)
normalizer = normalizer - ddof
if config.omnistaging_enabled:
normalizer_mask = lax.le(normalizer, 0)
else:
zero = lax.full_like(normalizer, 0, shape=())
normalizer_mask = lax.le(normalizer, zero)
result = nansum(centered, axis, keepdims=keepdims)
result = where(normalizer_mask, nan, result)
divisor = where(normalizer_mask, 1, normalizer)
out = lax.div(result, lax.convert_element_type(divisor, result.dtype))
return lax.convert_element_type(out, dtype)
@_wraps(np.nanstd)
def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
if out is not None:
raise ValueError("nanstd does not support the `out` argument.")
return sqrt(nanvar(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims))
def _make_cumulative_reduction(np_reduction, reduction, fill_nan=False, fill_value=0):
# We want to allow XLA to fuse the pad and reduce-window operators to
# avoid materializing the padded output.
# Consider removing `jit` once again if reduce-window is generalized to
# support arbitrary padding.
@partial(jit, static_argnums=(1, 2))
def _cumulative_reduction(a, axis, dtype):
if axis is None or isscalar(a):
a = ravel(a)
axis = 0
a_shape = list(shape(a))
num_dims = len(a_shape)
if axis < 0:
axis = axis + num_dims
if axis < 0 or axis >= num_dims:
raise ValueError(
"axis {} is out of bounds for array of dimension {}".format(
axis, num_dims))
if fill_nan:
a = where(isnan(a), _constant_like(a, fill_value), a)
if not dtype and _dtype(a) == bool_:
dtype = int_
if dtype:
a = lax.convert_element_type(a, dtype)
return reduction(a, axis)
@_wraps(np_reduction)
def cumulative_reduction(a, axis=None, dtype=None):
# jit doesn't support kwargs as static_args.
return _cumulative_reduction(a, axis, dtype)
return cumulative_reduction
cumsum = _make_cumulative_reduction(np.cumsum, lax.cumsum, fill_nan=False)
cumprod = _make_cumulative_reduction(np.cumprod, lax.cumprod, fill_nan=False)
cumproduct = cumprod
nancumsum = _make_cumulative_reduction(np.nancumsum, lax.cumsum,
fill_nan=True, fill_value=0)
nancumprod = _make_cumulative_reduction(np.nancumprod, lax.cumprod,
fill_nan=True, fill_value=1)
@_wraps(np.unwrap)
def unwrap(p, discont=pi, axis=-1):
dd = diff(p, axis=axis)
ddmod = mod(dd + pi, 2 * pi) - pi
ddmod = where((ddmod == -pi) & (dd > 0), pi, ddmod)
ph_correct = where(abs(dd) < discont, 0, ddmod - dd)
up = concatenate((
lax.slice_in_dim(p, 0, 1, axis=axis),
lax.slice_in_dim(p, 1, None, axis=axis) + cumsum(ph_correct, axis=axis)
), axis=axis)
return up
### Array-creation functions
def _check_no_padding(axis_padding, mode):
if (axis_padding[0] > 0 or axis_padding[1] > 0):
msg = "Cannot apply '{}' padding to empty axis"
raise ValueError(msg.format(mode))
def _pad_constant(array, pad_width, constant_values):
nd = ndim(array)
constant_values = broadcast_to(asarray(constant_values), (nd, 2))
constant_values = lax.convert_element_type(constant_values, array.dtype)
for i in range(nd):
widths = [(0, 0, 0)] * nd
widths[i] = (pad_width[i, 0], 0, 0)
array = lax.pad(array, constant_values[i, 0], widths)
widths[i] = (0, pad_width[i, 1], 0)
array = lax.pad(array, constant_values[i, 1], widths)
return array
def _pad_wrap(array, pad_width):
for i in range(ndim(array)):
if array.shape[i] == 0:
_check_no_padding(pad_width[i], "wrap")
continue
size = array.shape[i]
repeats, (left_remainder, right_remainder) = _divmod(pad_width[i], size)
total_repeats = repeats.sum() + 1
parts = []
if left_remainder:
parts += [lax.slice_in_dim(array, size - left_remainder, size, axis=i)]
parts += total_repeats * [array]
if right_remainder:
parts += [lax.slice_in_dim(array, 0, right_remainder, axis=i)]
array = lax.concatenate(parts, dimension=i)
return array
def _pad_symmetric_or_reflect(array, pad_width, mode):
assert mode in ("symmetric", "reflect")
for i in range(ndim(array)):
if array.shape[i] == 0:
_check_no_padding(pad_width[i], mode)
continue
n = array.shape[i]
rarray = lax.rev(array, dimensions=(i,))
offset = 1 if (mode == "reflect" and n > 1) else 0
def build_padding(padding, forward):
xs = []
delta = n - offset
while padding > delta:
padding -= delta
p = array if forward else rarray
xs.append(lax.slice_in_dim(p, offset, n, axis=i))
forward = not forward
if padding > 0:
x = lax.slice_in_dim(array if forward else rarray, offset,
padding + offset, axis=i)
xs.append(x)
return xs
parts = reversed(build_padding(pad_width[i, 0], forward=True))
parts = [lax.rev(x, dimensions=(i,)) for x in parts]
parts += [array]
parts += build_padding(pad_width[i, 1], forward=False)
array = lax.concatenate(parts, dimension=i)
return array
def _pad_edge(array, pad_width):
nd = ndim(array)
for i in range(nd):
if array.shape[i] == 0:
_check_no_padding(pad_width[i], "edge")
continue
n = array.shape[i]
npad_before, npad_after = pad_width[i]
edge_before = lax.slice_in_dim(array, 0, 1, axis=i)
pad_before = repeat(edge_before, npad_before, axis=i)
edge_after = lax.slice_in_dim(array, n-1, n, axis=i)
pad_after = repeat(edge_after, npad_after, axis=i)
array = lax.concatenate([pad_before, array, pad_after], dimension=i)
return array
@partial(jit, static_argnums=(1, 2))
def _pad(array, pad_width, mode, constant_values):
array = asarray(array)
nd = ndim(array)
pad_width = np.broadcast_to(np.asarray(pad_width), (nd, 2))
if np.any(pad_width < 0):
raise ValueError("index can't contain negative values")
if mode == "constant":
return _pad_constant(array, pad_width, constant_values)
elif mode == "wrap":
return _pad_wrap(array, pad_width)
elif mode in ("symmetric", "reflect"):
return _pad_symmetric_or_reflect(array, pad_width, mode)
elif mode == "edge":
return _pad_edge(array, pad_width)
else:
msg = "Unimplemented padding mode '{}' for np.pad."
raise NotImplementedError(msg.format(mode))
@_wraps(np.pad)
def pad(array, pad_width, mode='constant', constant_values=0):
if isinstance(pad_width, list):
pad_width = tuple(pad_width)
return _pad(array, pad_width, mode, constant_values)
@_wraps(np.stack)
def stack(arrays, axis=0):
if not len(arrays):
raise ValueError("Need at least one array to stack.")
shape0 = shape(arrays[0])
axis = _canonicalize_axis(axis, len(shape0) + 1)
new_arrays = []
for a in arrays:
if shape(a) != shape0:
raise ValueError("All input arrays must have the same shape.")
new_arrays.append(expand_dims(a, axis))
return concatenate(new_arrays, axis=axis)
@_wraps(np.tile)
def tile(A, reps):
if isinstance(reps, int):
reps = (reps,)
A = reshape(A, (1,) * (len(reps) - ndim(A)) + shape(A))
reps = (1,) * (ndim(A) - len(reps)) + tuple(reps)
for i, rep in enumerate(reps):
if rep == 0:
A = A[tuple(slice(0 if j == i else None) for j in range(A.ndim))]
elif rep != 1:
A = concatenate([A] * int(rep), axis=i)
return A
@_wraps(np.concatenate)
def concatenate(arrays, axis=0):
if not len(arrays):
raise ValueError("Need at least one array to concatenate.")
if ndim(arrays[0]) == 0:
raise ValueError("Zero-dimensional arrays cannot be concatenated.")
if axis is None:
return concatenate([ravel(a) for a in arrays], axis=0)
axis = _canonicalize_axis(axis, ndim(arrays[0]))
arrays = _promote_dtypes(*arrays)
# lax.concatenate can be slow to compile for wide concatenations, so form a
# tree of concatenations as a workaround especially for op-by-op mode.
# (https://github.com/google/jax/issues/653).
k = 16
if len(arrays) == 1:
return array(arrays[0])
else:
while len(arrays) > 1:
arrays = [lax.concatenate(arrays[i:i+k], axis)
for i in range(0, len(arrays), k)]
return arrays[0]
@_wraps(np.vstack)
def vstack(tup):
return concatenate([atleast_2d(m) for m in tup], axis=0)
row_stack = vstack
@_wraps(np.hstack)
def hstack(tup):
arrs = [atleast_1d(m) for m in tup]
if arrs[0].ndim == 1:
return concatenate(arrs, 0)
return concatenate(arrs, 1)
@_wraps(np.dstack)
def dstack(tup):
return concatenate([atleast_3d(m) for m in tup], axis=2)
@_wraps(np.column_stack)
def column_stack(tup):
arrays = []
for v in tup:
arr = array(v)
if arr.ndim < 2:
arr = atleast_2d(arr).T
arrays.append(arr)
return concatenate(arrays, 1)
def _atleast_nd(x, n):
m = ndim(x)
return lax.broadcast(x, (1,) * (n - m)) if m < n else x
def _block(xs):
if isinstance(xs, tuple):
raise ValueError("jax.numpy.block does not allow tuples, got {}"
.format(xs))
elif isinstance(xs, list):
if len(xs) == 0:
raise ValueError("jax.numpy.block does not allow empty list arguments")
xs, depths = unzip2([_block(x) for x in xs])
if _any(d != depths[0] for d in depths[1:]):
raise ValueError("Mismatched list depths in jax.numpy.block")
rank = _max(depths[0], _max(ndim(x) for x in xs))
xs = [_atleast_nd(x, rank) for x in xs]
return concatenate(xs, axis=-depths[0]), depths[0] + 1
else:
return asarray(xs), 1
@_wraps(np.block)
@jit
def block(arrays):
out, _ = _block(arrays)
return out
@_wraps(np.atleast_1d, update_doc=False)
def atleast_1d(*arys):
if len(arys) == 1:
arr = array(arys[0])
return arr if ndim(arr) >= 1 else reshape(arr, -1)
else:
return [atleast_1d(arr) for arr in arys]
@_wraps(np.atleast_2d, update_doc=False)
def atleast_2d(*arys):
if len(arys) == 1:
arr = array(arys[0])
if ndim(arr) >= 2:
return arr
elif ndim(arr) == 1:
return expand_dims(arr, axis=0)
else:
return expand_dims(arr, axis=(0, 1))
else:
return [atleast_2d(arr) for arr in arys]
@_wraps(np.atleast_3d, update_doc=False)
def atleast_3d(*arys):
if len(arys) == 1:
arr = array(arys[0])
if ndim(arr) == 0:
arr = expand_dims(arr, axis=(0, 1, 2))
elif ndim(arr) == 1:
arr = expand_dims(arr, axis=(0, 2))
elif ndim(arr) == 2:
arr = expand_dims(arr, axis=2)
return arr
else:
return [atleast_3d(arr) for arr in arys]
@_wraps(np.array)
def array(object, dtype=None, copy=True, order="K", ndmin=0):
if order is not None and order != "K":
raise NotImplementedError("Only implemented for order='K'")
lax._check_user_dtype_supported(dtype, "array")
dtype = dtype and dtypes.canonicalize_dtype(dtype)
if _can_call_numpy_array(object):
object = _np_array(object, dtype=dtype, ndmin=ndmin)
assert type(object) not in dtypes.python_scalar_dtypes
if type(object) is np.ndarray:
out = _device_put_raw(object)
if dtype: assert _dtype(out) == dtype
elif isinstance(object, (DeviceArray, core.Tracer)):
if isinstance(object, DeviceArray) and copy:
# We perform a copy by bouncing back to the host
# TODO(phawkins): add a device runtime function to copy a buffer
out = _device_put_raw(_np_asarray(object))
else:
out = object
elif isinstance(object, (list, tuple)):
if object:
out = stack([array(elt, dtype=dtype) for elt in object])
else:
out = _device_put_raw(_np_array([], dtype=dtype))
else:
try:
view = memoryview(object)
except TypeError:
pass # `object` does not support the buffer interface.
else:
return array(_np_asarray(view), dtype, copy)
raise TypeError("Unexpected input type for array: {}".format(type(object)))
if dtype and _dtype(out) != dtype:
out = lax.convert_element_type(out, dtype)
if ndmin > ndim(out):
out = lax.broadcast(out, (1,) * (ndmin - ndim(out)))
return out
def _can_call_numpy_array(x):
return _all(not isinstance(l, (core.Tracer, DeviceArray))
for l in tree_leaves(x))
@_wraps(np.asarray)
def asarray(a, dtype=None, order=None):
lax._check_user_dtype_supported(dtype, "asarray")
return array(a, dtype=dtype, copy=False, order=order)
@_wraps(np.zeros_like)
def zeros_like(a, dtype=None):
lax._check_user_dtype_supported(dtype, "zeros_like")
return lax.full_like(a, 0, dtype)
@_wraps(np.ones_like)
def ones_like(a, dtype=None):
lax._check_user_dtype_supported(dtype, "ones_like")
return lax.full_like(a, 1, dtype)
@_wraps(np.full)
def full(shape, fill_value, dtype=None):
lax._check_user_dtype_supported(dtype, "full")
shape = (shape,) if ndim(shape) == 0 else shape
return lax.full(shape, fill_value, dtype)
@_wraps(np.full_like)
def full_like(a, fill_value, dtype=None):
lax._check_user_dtype_supported(dtype, "full_like")
return lax.full_like(a, fill_value, dtype)
@_wraps(np.zeros)
def zeros(shape, dtype=None):
if isinstance(shape, types.GeneratorType):
raise TypeError("expected sequence object with len >= 0 or a single integer")
lax._check_user_dtype_supported(dtype, "zeros")
dtype = float_ if dtype is None else dtype
shape = (shape,) if ndim(shape) == 0 else shape
return lax.full(shape, 0, dtype)
@_wraps(np.ones)
def ones(shape, dtype=None):
if isinstance(shape, types.GeneratorType):
raise TypeError("expected sequence object with len >= 0 or a single integer")
lax._check_user_dtype_supported(dtype, "ones")
dtype = float_ if dtype is None else dtype
shape = (shape,) if ndim(shape) == 0 else shape
return lax.full(shape, 1, dtype)
@_wraps(np.array_equal)
def array_equal(a1, a2, equal_nan=False):
try:
a1, a2 = asarray(a1), asarray(a2)
except Exception:
return False
if shape(a1) != shape(a2):
return False
eq = asarray(a1 == a2)
if equal_nan:
eq = logical_or(eq, logical_and(isnan(a1), isnan(a2)))
return all(eq)
# We can't create uninitialized arrays in XLA; use zeros for empty.
empty_like = zeros_like
empty = zeros
@_wraps(np.eye)
def eye(N, M=None, k=0, dtype=None):
lax._check_user_dtype_supported(dtype, "eye")
dtype = float_ if dtype is None else dtype
M = N if M is None else M
k = int(k)
if N < 0 or M < 0:
msg = "negative dimensions are not allowed, got {} and {}"
raise ValueError(msg.format(N, M))
if k is not None:
k_dtype = _dtype(k)
if not issubdtype(k_dtype, integer):
msg = "eye argument `k` must be of integer dtype, got {}"
raise TypeError(msg.format(k_dtype))
return lax._eye(dtype, (N, M), k)
@_wraps(np.identity)
def identity(n, dtype=None):
lax._check_user_dtype_supported(dtype, "identity")
return eye(n, dtype=dtype)
@_wraps(np.arange)
def arange(start, stop=None, step=None, dtype=None):
lax._check_user_dtype_supported(dtype, "arange")
require = partial(core.concrete_or_error, _np_asarray)
msg = "in jax.numpy.arange argument `{}`".format
if stop is None and step is None:
start = require(start, msg("stop"))
dtype = dtype or _dtype(start)
return lax.iota(dtype, np.ceil(start)) # avoids materializing
else:
start = require(start, msg("start"))
stop = None if stop is None else require(stop, msg("stop"))
step = None if step is None else require(step, msg("step"))
if dtype is None:
dtype = _dtype(start, *(x for x in [stop, step] if x is not None))
return array(np.arange(start, stop=stop, step=step, dtype=dtype))
def _wrap_numpy_nullary_function(f):
"""Adapts `f` to return a DeviceArray instead of an np.ndarray.
`f` cannot have any non-static array arguments.
"""
@_wraps(f, update_doc=False)
def wrapper(*args, **kwargs):
return asarray(f(*args, **kwargs))
return wrapper
@_wraps(np.linspace)
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
axis=0):
"""Implementation of linspace differentiable in start and stop args."""
lax._check_user_dtype_supported(dtype, "linspace")
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
dtype = dtype or result_type(start, stop, dtypes.canonicalize_dtype(float_))
computation_dtype = promote_types(dtype, dtypes.canonicalize_dtype(float_))
start = asarray(start, dtype=computation_dtype)
stop = asarray(stop, dtype=computation_dtype)
bounds_shape = list(lax.broadcast_shapes(shape(start), shape(stop)))
broadcast_start = broadcast_to(start, bounds_shape)
broadcast_stop = broadcast_to(stop, bounds_shape)
axis = len(bounds_shape) + axis + 1 if axis < 0 else axis
bounds_shape.insert(axis, 1)
iota_shape = [1,] * len(bounds_shape)
iota_shape[axis] = num
div = (num - 1) if endpoint else num
if num > 1:
delta = lax.convert_element_type(stop - start, computation_dtype) / div
if issubdtype(dtype, integer):
# This is similar to how numpy computes linspace, but it
# can fail to recover the endpoints in float32 arithmetic.
out = (reshape(broadcast_start, bounds_shape) +
reshape(lax.iota(dtype, num), iota_shape) *
reshape(delta, bounds_shape))
else:
# This approach recovers the endpoints with float32 arithmetic,
# but can lead to rounding errors for integer outputs.
step = reshape(lax.iota(computation_dtype, num), iota_shape) / div
out = (reshape(broadcast_start, bounds_shape) * (1 - step) +
reshape(broadcast_stop, bounds_shape) * step)
elif num == 1:
delta = nan if endpoint else stop - start
out = reshape(broadcast_start, bounds_shape)
else: # num == 0 degenerate case, match numpy behavior
empty_shape = list(lax.broadcast_shapes(shape(start), shape(stop)))
empty_shape.insert(axis, 0)
delta = nan
out = reshape(array([], dtype=dtype), empty_shape)
if retstep:
return lax.convert_element_type(out, dtype), delta
else:
return lax.convert_element_type(out, dtype)
@_wraps(np.logspace)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0):
"""Implementation of logspace differentiable in start and stop args."""
dtype = dtype or result_type(start, stop, dtypes.canonicalize_dtype(float_))
computation_dtype = promote_types(dtype, dtypes.canonicalize_dtype(float_))
start = asarray(start, dtype=computation_dtype)
stop = asarray(stop, dtype=computation_dtype)
lin = linspace(start, stop, num,
endpoint=endpoint, retstep=False, dtype=None, axis=axis)
return lax.convert_element_type(power(base, lin), dtype)
@_wraps(np.geomspace)
def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
"""Implementation of geomspace differentiable in start and stop args."""
dtype = dtype or result_type(start, stop, dtypes.canonicalize_dtype(float_))
computation_dtype = promote_types(dtype, dtypes.canonicalize_dtype(float_))
start = asarray(start, dtype=computation_dtype)
stop = asarray(stop, dtype=computation_dtype)
# follow the numpy geomspace convention for negative and complex endpoints
signflip = 1 - (1 - sign(real(start))) * (1 - sign(real(stop))) // 2
res = signflip * logspace(log10(signflip * start),
log10(signflip * stop), num,
endpoint=endpoint, base=10.0,
dtype=computation_dtype, axis=0)
if axis != 0:
res = moveaxis(res, 0, axis)
return lax.convert_element_type(res, dtype)
@_wraps(np.meshgrid)
def meshgrid(*args, **kwargs):
indexing = kwargs.get("indexing", "xy")
sparse = kwargs.get("sparse", False)
copy = kwargs.get("copy", True)
if not copy:
raise ValueError("jax.numpy.meshgrid only supports copy=True")
args = list(args)
if indexing == "xy":
if len(args) >= 2:
args[0], args[1] = args[1], args[0]
elif indexing != "ij":
raise ValueError("Valid values for indexing are 'xy' and 'ij', got {}"
.format(indexing))
shape = []
for i, a in enumerate(args):
args[i] = a = asarray(a)
if len(a.shape) != 1:
msg = "Arguments to jax.numpy.meshgrid must be 1D, got shape {}"
raise ValueError(msg.format(a.shape))
shape.append(1 if sparse else a.shape[0])
output = []
for i, a in enumerate(args):
a = asarray(a)
s = shape
if sparse:
s = list(s)
s[i] = a.shape[0]
output.append(lax.broadcast_in_dim(a, s, (i,)))
if indexing == "xy" and len(args) >= 2:
output[0], output[1] = output[1], output[0]
return output
@_wraps(np.i0)
def i0(x):
x = lax.abs(*_promote_args_inexact("i0", x))
return lax.mul(lax.exp(x), lax.bessel_i0e(x))
@_wraps(np.ix_)
def ix_(*args):
n = len(args)
output = []
for i, a in enumerate(args):
a = asarray(a)
if len(a.shape) != 1:
msg = "Arguments to jax.numpy.ix_ must be 1-dimensional, got shape {}"
raise ValueError(msg.format(a.shape))
if _dtype(a) == bool_:
raise NotImplementedError(
"Boolean arguments to jax.numpy.ix_ are not implemented")
shape = [1] * n
shape[i] = a.shape[0]
if a.size == 0:
# Numpy uses an integer index type for empty arrays.
output.append(lax.full(shape, np.zeros((), np.intp)))
else:
output.append(lax.broadcast_in_dim(a, shape, (i,)))
return tuple(output)
@_wraps(np.indices)
def indices(dimensions, dtype=int32, sparse=False):
dimensions = tuple(dimensions)
N = len(dimensions)
output = []
s = dimensions
for i, dim in enumerate(dimensions):
idx = lax.iota(dtype, dim)
if sparse:
s = (1,)*i + (dim,) + (1,)*(N - i - 1)
output.append(lax.broadcast_in_dim(idx, s, (i,)))
if sparse:
return tuple(output)
return stack(output, 0) if output else array([], dtype=dtype)
_TOTAL_REPEAT_LENGTH_DOC = """\
Jax adds the optional `total_repeat_length` parameter which specifies the total
number of repeat, and defaults to sum(repeats). It must be specified for repeat
to be compilable. If `sum(repeats)` is larger than the specified
`total_repeat_length` the remaining values will be discarded. In the case of
`sum(repeats)` being smaller than the specified target length, the final value
will be repeated.
"""
@_wraps(np.repeat, lax_description=_TOTAL_REPEAT_LENGTH_DOC)
def repeat(a, repeats, axis=None, *, total_repeat_length=None):
if axis is None:
a = ravel(a)
axis = 0
# If total_repeat_length is not given, can't compile, use a default.
if total_repeat_length is None:
repeats = core.concrete_or_error(np.array, repeats, "jax.numpy.repeat")
repeats = np.ravel(repeats)
if ndim(a) != 0:
repeats = np.broadcast_to(repeats, [a.shape[axis]])
total_repeat_length = np.sum(repeats)
else:
repeats = ravel(repeats)
if ndim(a) != 0:
repeats = broadcast_to(repeats, [a.shape[axis]])
# Special case when a is a scalar.
if ndim(a) == 0:
if repeats.shape == (1,):
return full([total_repeat_length], a)
else:
raise ValueError('`repeat` with a scalar parameter `a` is only '
'implemented for scalar values of the parameter `repeats`.')
# Special case if total_repeat_length is zero.
if total_repeat_length == 0:
result_shape = list(a.shape)
result_shape[axis] = 0
return reshape(array([], dtype=a.dtype), result_shape)
# If repeats is on a zero sized axis, then return the array.
if a.shape[axis] == 0:
return a
# This implementation of repeat avoid having to instantiate a large.
# intermediate tensor.
# Modify repeats from e.g. [1,2,0,5] -> [0,1,2,0] for exclusive repeat.
exclusive_repeats = roll(repeats, shift=1).at[0].set(0)
# Cumsum to get indices of new number in repeated tensor, e.g. [0, 1, 3, 3]
scatter_indices = cumsum(exclusive_repeats)
# Scatter these onto a zero buffer, e.g. [1,1,0,2,0,0,0,0]
block_split_indicators = ops.index_add(
x=zeros([total_repeat_length], dtype=int32),
idx=scatter_indices,
y=1)
# Cumsum again to get scatter indices for repeat, e.g. [0,1,1,3,3,3,3,3]
gather_indices = cumsum(block_split_indicators) - 1
return take(a, gather_indices, axis=axis)
@_wraps(np.tri)
def tri(N, M=None, k=0, dtype=None):
lax._check_user_dtype_supported(dtype, "tri")
M = M if M is not None else N
dtype = dtype or float32
return lax._tri(dtype, (N, M), k)
@_wraps(np.tril)
def tril(m, k=0):
m_shape = shape(m)
if len(m_shape) < 2:
raise ValueError("Argument to jax.numpy.tril must be at least 2D")
mask = tri(*m_shape[-2:], k=k, dtype=bool)
return lax.select(lax.broadcast(mask, m_shape[:-2]), m, zeros_like(m))
@_wraps(np.triu, update_doc=False)
def triu(m, k=0):
m_shape = shape(m)
if len(m_shape) < 2:
raise ValueError("Argument to jax.numpy.triu must be at least 2D")
mask = tri(*m_shape[-2:], k=k - 1, dtype=bool)
return lax.select(lax.broadcast(mask, m_shape[:-2]), zeros_like(m), m)
@_wraps(np.trace)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
if out:
raise NotImplementedError("The 'out' argument to trace is not supported.")
lax._check_user_dtype_supported(dtype, "trace")
axis1 = _canonicalize_axis(axis1, ndim(a))
axis2 = _canonicalize_axis(axis2, ndim(a))
a_shape = shape(a)
if dtype is None:
dtype = _dtype(a)
if issubdtype(dtype, integer):
default_int = dtypes.canonicalize_dtype(np.int_)
if iinfo(dtype).bits < iinfo(default_int).bits:
dtype = default_int
# Move the axis? dimensions to the end.
perm = [i for i in range(len(a_shape)) if i != axis1 and i != axis2]
perm = perm + [axis1, axis2]
a = lax.transpose(a, perm)
# Mask out the diagonal and reduce.
a = where(eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool),
a, zeros_like(a))
return sum(a, axis=(-2, -1), dtype=dtype)
def _wrap_indices_function(f):
@_wraps(f, update_doc=False)
def wrapper(*args, **kwargs):
return tuple(asarray(x) for x in f(*args, **kwargs))
return wrapper
tril_indices = _wrap_indices_function(np.tril_indices)
triu_indices = _wrap_indices_function(np.triu_indices)
mask_indices = _wrap_indices_function(np.mask_indices)
@_wraps(np.triu_indices_from)
def triu_indices_from(arr, k=0):
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
@_wraps(np.tril_indices_from)
def tril_indices_from(arr, k=0):
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
@_wraps(np.diag_indices)
def diag_indices(n, ndim=2):
if n < 0:
raise ValueError("n argument to diag_indices must be nonnegative, got {}"
.format(n))
if ndim < 0:
raise ValueError("ndim argument to diag_indices must be nonnegative, got {}"
.format(ndim))
return (lax.iota(int_, n),) * ndim
@_wraps(np.diag_indices_from)
def diag_indices_from(arr):
if not arr.ndim >= 2:
raise ValueError("input array must be at least 2-d")
if len(set(arr.shape)) != 1:
raise ValueError("All dimensions of input must be of equal length")
return diag_indices(arr.shape[0], ndim=arr.ndim)
@_wraps(np.diagonal)
def diagonal(a, offset=0, axis1=0, axis2=1):
a_shape = shape(a)
a_ndims = len(a_shape)
# Move the two dimensions to the end.
axis1 = _canonicalize_axis(axis1, a_ndims)
axis2 = _canonicalize_axis(axis2, a_ndims)
perm = [i for i in range(a_ndims) if i != axis1 and i != axis2]
perm = perm + [axis1, axis2]
a = lax.transpose(a, perm)
# Mask out the diagonal and reduce over one of the axes
a = where(eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool),
a, zeros_like(a))
reduce_axis = -2 if offset < 0 else -1
d = sum(a, axis=reduce_axis, dtype=_dtype(a))
# Slice out the correct diagonal size.
diag_size = _max(0, _min(a_shape[axis1] + _min(offset, 0),
a_shape[axis2] - _max(offset, 0)))
return lax.slice_in_dim(d, 0, diag_size, axis=-1)
@_wraps(np.diag)
def diag(v, k=0):
v_shape = shape(v)
if len(v_shape) == 1:
zero = lambda x: lax.full_like(x, shape=(), fill_value=0)
n = v_shape[0] + _abs(k)
v = lax.pad(v, zero(v), ((_max(0, k), _max(0, -k), 0),))
return where(eye(n, k=k, dtype=bool), v, zeros_like(v))
elif len(v_shape) == 2:
return diagonal(v, offset=k)
else:
raise ValueError("diag input must be 1d or 2d")
_SCALAR_VALUE_DOC="""\
This differs from np.diagflat for some scalar values of v,
jax always returns a two-dimensional array, whereas numpy may
return a scalar depending on the type of v.
"""
@_wraps(np.diagflat, lax_description=_SCALAR_VALUE_DOC)
def diagflat(v, k=0):
v = ravel(v)
v_length = len(v)
adj_length = v_length + _abs(k)
res = zeros(adj_length*adj_length, dtype=v.dtype)
i = arange(0, adj_length-_abs(k))
if (k >= 0):
fi = i+k+i*adj_length
else:
fi = i+(i-k)*adj_length
res = ops.index_update(res, ops.index[fi], v)
res = res.reshape(adj_length,adj_length)
return res
@_wraps(np.polyval)
def polyval(p, x):
if isinstance(p, np.poly1d):
p = np.asarray(p)
if isinstance(x, np.poly1d):
y = 0
else:
y = zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
@_wraps(np.polyadd)
def polyadd(a1, a2):
a1 = asarray(a1)
a2 = asarray(a2)
if a2.shape[0] <= a1.shape[0]:
return a1.at[-a2.shape[0]:].add(a2)
else:
return a2.at[-a1.shape[0]:].add(a1)
@_wraps(np.polyder)
def polyder(p, m=1):
p = asarray(p)
if m < 0:
raise ValueError("Order of derivative must be positive")
if m == 0:
return p
if m % 1:
raise ValueError("m must be an integer")
coeff = (arange(len(p), m, -1) - 1 - arange(m)[:, newaxis]).prod(0)
return p[:-m] * coeff
def _trim_zeros(a):
for i, v in enumerate(a):
if v != 0:
return a[i:]
return a[:0]
_LEADING_ZEROS_DOC="""\
Setting trim_leading_zeros=True makes the output match that of numpy.
But prevents the function from being able to be used in compiled code.
"""
@_wraps(np.polymul, lax_description=_LEADING_ZEROS_DOC)
def polymul(a1, a2, *, trim_leading_zeros=False):
if isinstance(a1, np.poly1d):
a1 = asarray(a1)
if isinstance(a2, np.poly1d):
a2 = asarray(a2)
if trim_leading_zeros and (len(a1) > 1 or len(a2) > 1):
a1, a2 = _trim_zeros(a1), _trim_zeros(a2)
if len(a1) == 0:
a1 = asarray([0.])
if len(a2) == 0:
a2 = asarray([0.])
val = convolve(a1, a2, mode='full')
return val
@_wraps(np.polysub)
def polysub(a1, a2):
return polyadd(asarray(a1), -asarray(a2))
@_wraps(np.append)
def append(arr, values, axis=None):
if axis is None:
return concatenate([ravel(arr), ravel(values)], 0)
else:
return concatenate([arr, values], axis=axis)
### Tensor contraction operations
@_wraps(np.dot, lax_description=_PRECISION_DOC)
def dot(a, b, *, precision=None): # pylint: disable=missing-docstring
_check_arraylike("dot", a, b)
a, b = _promote_dtypes(a, b)
a_ndim, b_ndim = ndim(a), ndim(b)
if a_ndim == 0 or b_ndim == 0:
return lax.mul(a, b)
if _max(a_ndim, b_ndim) <= 2:
return lax.dot(a, b, precision=precision)
if b_ndim == 1:
contract_dims = ((a_ndim - 1,), (0,))
else:
contract_dims = ((a_ndim - 1,), (b_ndim - 2,))
batch_dims = ((), ())
return lax.dot_general(a, b, (contract_dims, batch_dims), precision)
@_wraps(np.matmul, lax_description=_PRECISION_DOC)
def matmul(a, b, *, precision=None): # pylint: disable=missing-docstring
_check_arraylike("matmul", a, b)
for i, x in enumerate((a, b)):
if ndim(x) < 1:
msg = (f"matmul input operand {i} must have ndim at least 1, "
f"but it has ndim {ndim(x)}")
raise ValueError(msg)
a, b = _promote_dtypes(a, b)
a_is_mat, b_is_mat = (ndim(a) > 1), (ndim(b) > 1)
a_batch_dims = shape(a)[:-2] if a_is_mat else ()
b_batch_dims = shape(b)[:-2] if b_is_mat else ()
num_batch_dims = _max(len(a_batch_dims), len(b_batch_dims))
a_batch_dims = (None,) * (num_batch_dims - len(a_batch_dims)) + a_batch_dims
b_batch_dims = (None,) * (num_batch_dims - len(b_batch_dims)) + b_batch_dims
# Dimensions to squeeze from the inputs.
a_squeeze = []
b_squeeze = []
# Positions of batch dimensions in squeezed inputs.
a_batch = []
b_batch = []
# Desired index in final output of each kind of dimension, in the order that
# lax.dot_general will emit them.
idx_batch = []
idx_a_other = [] # other = non-batch, non-contracting.
idx_b_other = []
for i, (ba, bb) in enumerate(zip(a_batch_dims, b_batch_dims)):
if ba is None:
idx_b_other.append(i)
elif bb is None:
idx_a_other.append(i)
elif ba == 1:
idx_b_other.append(i)
a_squeeze.append(len(idx_batch) + len(idx_a_other) + len(a_squeeze))
elif bb == 1:
idx_a_other.append(i)
b_squeeze.append(len(idx_batch) + len(idx_b_other) + len(b_squeeze))
elif ba == bb:
a_batch.append(len(idx_batch) + len(idx_a_other))
b_batch.append(len(idx_batch) + len(idx_b_other))
idx_batch.append(i)
else:
raise ValueError("Incompatible shapes for matmul arguments: {} and {}"
.format(shape(a), shape(b)))
if a_is_mat: idx_a_other.append(num_batch_dims)
if b_is_mat: idx_b_other.append(num_batch_dims + a_is_mat)
perm = np.argsort(np.concatenate([idx_batch, idx_a_other, idx_b_other]))
a = lax.squeeze(a, tuple(a_squeeze))
b = lax.squeeze(b, tuple(b_squeeze))
out = lax.dot_general(
a, b, (((ndim(a) - 1,), (ndim(b) - 1 - b_is_mat,)), (a_batch, b_batch)),
precision=precision)
return lax.transpose(out, perm)
@_wraps(np.vdot, lax_description=_PRECISION_DOC)
def vdot(a, b, *, precision=None):
if issubdtype(_dtype(a), complexfloating):
a = conj(a)
return dot(a.ravel(), b.ravel(), precision=precision)
@_wraps(np.tensordot, lax_description=_PRECISION_DOC)
def tensordot(a, b, axes=2, *, precision=None):
_check_arraylike("tensordot", a, b)
a_ndim = ndim(a)
b_ndim = ndim(b)
a, b = _promote_dtypes(a, b)
if type(axes) is int:
if axes > _min(a_ndim, b_ndim):
msg = "Number of tensordot axes (axes {}) exceeds input ranks ({} and {})"
raise TypeError(msg.format(axes, a.shape, b.shape))
contracting_dims = tuple(range(a_ndim - axes, a_ndim)), tuple(range(axes))
elif type(axes) in (list, tuple) and len(axes) == 2:
ax1, ax2 = axes
if type(ax1) == type(ax2) == int:
contracting_dims = ((_canonicalize_axis(ax1, a_ndim),),
(_canonicalize_axis(ax2, b_ndim),))
elif type(ax1) in (list, tuple) and type(ax2) in (list, tuple):
if len(ax1) != len(ax2):
msg = "tensordot requires axes lists to have equal length, got {} and {}."
raise TypeError(msg.format(ax1, ax2))
contracting_dims = (tuple(_canonicalize_axis(i, a_ndim) for i in ax1),
tuple(_canonicalize_axis(i, b_ndim) for i in ax2))
else:
msg = "tensordot requires both axes lists to be either ints, tuples or lists, got {} and {}"
raise TypeError(msg.format(ax1, ax2))
else:
msg = ("tensordot axes argument must be an int, a pair of ints, or a pair "
"of lists/tuples of ints.")
raise TypeError(msg)
return lax.dot_general(a, b, (contracting_dims, ((), ())),
precision=precision)
@_wraps(np.einsum, lax_description=_PRECISION_DOC)
def einsum(*operands, optimize='greedy', precision=None):
optimize = 'greedy' if optimize is True else optimize
# using einsum_call=True here is an internal api for opt_einsum
operands, contractions = opt_einsum.contract_path(
*operands, einsum_call=True, use_blas=True, optimize=optimize)
contractions = tuple(data[:3] for data in contractions)
return _einsum(operands, contractions, precision)
@_wraps(np.einsum_path)
def einsum_path(subscripts, *operands, optimize='greedy'):
# using einsum_call=True here is an internal api for opt_einsum
return opt_einsum.contract_path(subscripts, *operands, optimize=optimize)
def _removechars(s, chars):
return s.translate(str.maketrans(dict.fromkeys(chars)))
@partial(jit, static_argnums=(1, 2))
def _einsum(operands: Sequence,
contractions: Sequence[Tuple[Tuple[int, ...], Set[str], str]],
precision):
operands = list(_promote_dtypes(*operands))
def sum(x, axes):
return lax.reduce(x, np.array(0, x.dtype),
lax.add if x.dtype != bool_ else lax.bitwise_or, axes)
def sum_uniques(operand, names, uniques):
if uniques:
axes = [names.index(name) for name in uniques]
operand = sum(operand, axes)
names = _removechars(names, uniques)
return operand, names
def sum_repeats(operand, names, counts, keep_names):
for name, count in counts.items():
if count > 1:
axes = [i for i, n in enumerate(names) if n == name]
eye = lax._delta(operand.dtype, operand.shape, axes)
if name not in keep_names:
operand = sum(operand * eye, axes)
names = names.replace(name, '')
else:
operand = sum(operand * eye, axes[:-1])
names = names.replace(name, '', count - 1)
return operand, names
def filter_singleton_dims(operand, names, other_shape, other_names):
s = shape(operand)
new_shape = []
new_names = []
for i, d in enumerate(names):
other_i = other_names.find(d)
if s[i] != 1 or other_i == -1 or other_shape[other_i] == 1:
new_shape.append(s[i])
new_names.append(d)
return reshape(operand, tuple(new_shape)), "".join(new_names)
for operand_indices, contracted_names_set, einstr in contractions:
contracted_names = sorted(contracted_names_set)
input_str, result_names = einstr.split('->')
input_names = input_str.split(',')
# switch on the number of operands to be processed in this loop iteration.
# every case here sets 'operand' and 'names'.
if len(operand_indices) == 1:
operand = operands.pop(operand_indices[0])
names, = input_names
counts = collections.Counter(names)
# sum out unique contracted indices with a single reduce-sum
uniques = [name for name in contracted_names if counts[name] == 1]
operand, names = sum_uniques(operand, names, uniques)
# for every repeated index, do a contraction against an identity matrix
operand, names = sum_repeats(operand, names, counts, result_names)
elif len(operand_indices) == 2:
lhs, rhs = map(operands.pop, operand_indices)
lhs_names, rhs_names = input_names
# handle cases where one side of a contracting or batch dimension is 1
# but its counterpart is not.
lhs, lhs_names = filter_singleton_dims(lhs, lhs_names, shape(rhs),
rhs_names)
rhs, rhs_names = filter_singleton_dims(rhs, rhs_names, shape(lhs),
lhs_names)
lhs_counts = collections.Counter(lhs_names)
rhs_counts = collections.Counter(rhs_names)
# sum out unique contracted indices in lhs and rhs
lhs_uniques = [name for name in contracted_names
if lhs_counts[name] == 1 and rhs_counts[name] == 0]
lhs, lhs_names = sum_uniques(lhs, lhs_names, lhs_uniques)
rhs_uniques = [name for name in contracted_names
if rhs_counts[name] == 1 and lhs_counts[name] == 0]
rhs, rhs_names = sum_uniques(rhs, rhs_names, rhs_uniques)
# for every repeated index, contract against an identity matrix
lhs, lhs_names = sum_repeats(lhs, lhs_names, lhs_counts,
result_names + rhs_names)
rhs, rhs_names = sum_repeats(rhs, rhs_names, rhs_counts,
result_names + lhs_names)
lhs_or_rhs_names = set(lhs_names) | set(rhs_names)
contracted_names = [x for x in contracted_names if x in lhs_or_rhs_names]
lhs_and_rhs_names = set(lhs_names) & set(rhs_names)
batch_names = [x for x in result_names if x in lhs_and_rhs_names]
lhs_batch, rhs_batch = unzip2((lhs_names.find(n), rhs_names.find(n))
for n in batch_names)
# NOTE(mattjj): this can fail non-deterministically in python3, maybe
# due to opt_einsum
assert _all(
name in lhs_names and name in rhs_names and
lhs.shape[lhs_names.index(name)] == rhs.shape[rhs_names.index(name)]
for name in contracted_names)
# contract using lax.dot_general
batch_names_str = ''.join(batch_names)
lhs_cont, rhs_cont = unzip2((lhs_names.index(n), rhs_names.index(n))
for n in contracted_names)
dimension_numbers = ((lhs_cont, rhs_cont), (lhs_batch, rhs_batch))
operand = lax.dot_general(lhs, rhs, dimension_numbers, precision)
deleted_names = batch_names_str + ''.join(contracted_names)
names = (batch_names_str + _removechars(lhs_names, deleted_names)
+ _removechars(rhs_names, deleted_names))
else:
raise NotImplementedError # if this is actually reachable, open an issue!
# the resulting 'operand' with axis labels 'names' should be a permutation
# of the desired result
assert len(names) == len(result_names) == len(set(names))
assert set(names) == set(result_names)
if names != result_names:
perm = tuple([names.index(name) for name in result_names])
operand = lax.transpose(operand, perm)
operands.append(operand) # used in next iteration
return operands[0]
def _movechars(s, src, dst):
"""Helper for einsum string munging, like moveaxis on identifier strings."""
chars = [c for i, c in enumerate(s) if i not in src]
for i, j in sorted(zip(dst, src)):
chars.insert(i, s[j])
return ''.join(chars)
@_wraps(np.inner, lax_description=_PRECISION_DOC)
def inner(a, b, *, precision=None):
if ndim(a) == 0 or ndim(b) == 0:
return a * b
return tensordot(a, b, (-1, -1), precision=precision)
@_wraps(np.outer)
def outer(a, b, out=None):
if out:
raise NotImplementedError("The 'out' argument to outer is not supported.")
a, b = _promote_dtypes(a, b)
return ravel(a)[:, None] * ravel(b)
@partial(jit, static_argnums=(2, 3, 4))
def _cross(a, b, axisa, axisb, axisc):
a = moveaxis(a, axisa, -1)
b = moveaxis(b, axisb, -1)
if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
raise ValueError("Dimension must be either 2 or 3 for cross product")
if a.shape[-1] == 2 and b.shape[-1] == 2:
return a[..., 0] * b[..., 1] - a[..., 1] * b[..., 0]
a0 = a[..., 0]
a1 = a[..., 1]
a2 = a[..., 2] if a.shape[-1] == 3 else zeros_like(a0)
b0 = b[..., 0]
b1 = b[..., 1]
b2 = b[..., 2] if b.shape[-1] == 3 else zeros_like(b0)
c = array([a1 * b2 - a2 * b1, a2 * b0 - a0 * b2, a0 * b1 - a1 * b0])
return moveaxis(c, 0, axisc)
@_wraps(np.cross)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
if axis is not None:
axisa = axis
axisb = axis
axisc = axis
return _cross(a, b, axisa, axisb, axisc)
@_wraps(np.kron)
def kron(a, b):
a, b = _promote_dtypes(a, b)
if ndim(a) < ndim(b):
a = reshape(a, (1,) * (ndim(b) - ndim(a)) + shape(a))
elif ndim(b) < ndim(a):
b = reshape(b, (1,) * (ndim(a) - ndim(b)) + shape(b))
a_reshaped = reshape(a, [i for d in shape(a) for i in (d, 1)])
b_reshaped = reshape(b, [i for d in shape(b) for i in (1, d)])
out_shape = tuple(np.multiply(shape(a), shape(b)))
return reshape(lax.mul(a_reshaped, b_reshaped), out_shape)
@_wraps(np.vander)
def vander(x, N=None, increasing=False):
x = asarray(x)
dtype = _dtype(x)
if ndim(x) != 1:
raise ValueError("x must be a one-dimensional array")
x_shape = shape(x)
N = N or x_shape[0]
if N < 0:
raise ValueError("N must be nonnegative")
iota = lax.iota(dtype, N)
if not increasing:
iota = lax.sub(lax._const(iota, N - 1), iota)
return power(x[..., None], iota)
### Misc
@_wraps(np.argwhere)
def argwhere(a):
result = transpose(vstack(nonzero(a)))
if ndim(a) == 0:
return result[:0].reshape(result.shape[0], 0)
return result.reshape(result.shape[0], ndim(a))
@_wraps(np.argmax)
def argmax(a, axis=None):
if axis is None:
a = ravel(a)
axis = 0
if a.shape[axis] == 0:
raise ValueError("attempt to get argmax of an empty sequence")
return lax.argmax(a, _canonicalize_axis(axis, a.ndim), int64)
@_wraps(np.argmin)
def argmin(a, axis=None):
if axis is None:
a = ravel(a)
axis = 0
if a.shape[axis] == 0:
raise ValueError("attempt to get argmin of an empty sequence")
return lax.argmin(a, _canonicalize_axis(axis, a.ndim), int64)
_NANARG_DOC = """\
Warning: jax.numpy.arg{} returns -1 for all-NaN slices and does not raise
an error.
"""
@_wraps(np.nanargmax, lax_description=_NANARG_DOC.format("max"))
def nanargmax(a, axis=None):
if not issubdtype(_dtype(a), inexact):
return argmax(a, axis=axis)
nan_mask = isnan(a)
a = where(nan_mask, -inf, a)
res = argmax(a, axis=axis)
return where(all(nan_mask, axis=axis), -1, res)
@_wraps(np.nanargmin, lax_description=_NANARG_DOC.format("min"))
def nanargmin(a, axis=None):
if not issubdtype(_dtype(a), inexact):
return argmin(a, axis=axis)
nan_mask = isnan(a)
a = where(nan_mask, inf, a)
res = argmin(a, axis=axis)
return where(all(nan_mask, axis=axis), -1, res)
@_wraps(np.sort)
def sort(a, axis=-1, kind='quicksort', order=None):
if kind != 'quicksort':
warnings.warn("'kind' argument to sort is ignored.")
if order is not None:
raise ValueError("'order' argument to sort is not supported.")
if axis is None:
return lax.sort(a.ravel(), dimension=0)
else:
return lax.sort(a, dimension=_canonicalize_axis(axis, ndim(a)))
@_wraps(np.sort_complex)
def sort_complex(a):
a = lax.sort(a, dimension=0)
return lax.convert_element_type(a, result_type(a, dtypes.canonicalize_dtype(complex_)))
@_wraps(np.lexsort)
def lexsort(keys, axis=-1):
keys = tuple(keys)
if len(keys) == 0:
raise TypeError("need sequence of keys with len > 0 in lexsort")
if len(set(shape(key) for key in keys)) > 1:
raise ValueError("all keys need to be the same shape")
if ndim(keys[0]) == 0:
return np.int64(0)
axis = _canonicalize_axis(axis, ndim(keys[0]))
iota = lax.broadcasted_iota(np.int64, shape(keys[0]), axis)
return lax.sort((*keys[::-1], iota), dimension=axis, num_keys=len(keys))[-1]
@_wraps(np.argsort)
def argsort(a, axis=-1, kind='quicksort', order=None):
if kind != 'quicksort':
warnings.warn("'kind' argument to argsort is ignored.")
if order is not None:
raise ValueError("'order' argument to argsort is not supported.")
if axis is None:
return argsort(a.ravel(), 0)
else:
axis = _canonicalize_axis(axis, ndim(a))
iota = lax.broadcasted_iota(np.int64, shape(a), axis)
_, perm = lax.sort_key_val(a, iota, dimension=axis)
return perm
@_wraps(np.msort)
def msort(a):
return sort(a, axis=0)
@partial(jit, static_argnums=(2,))
def _roll(a, shift, axis):
a = asarray(a)
a_shape = shape(a)
if axis is None:
return lax.reshape(roll(ravel(a), shift, axis=0), a_shape)
a_ndim = len(a_shape)
shift = asarray(shift)
axis = np.asarray(axis)
b_shape = lax.broadcast_shapes(shift.shape, axis.shape, (1,))
if len(b_shape) != 1:
msg = "'shift' and 'axis' arguments to roll must be scalars or 1D arrays"
raise ValueError(msg)
for x, i in zip(broadcast_to(shift, b_shape),
np.broadcast_to(axis, b_shape)):
i = _canonicalize_axis(i, a_ndim)
x = remainder(x, (a_shape[i] or 1))
a = lax.concatenate((a, a), i)
a = lax.dynamic_slice_in_dim(a, a_shape[i] - x, a_shape[i], axis=i)
return a
@_wraps(np.roll)
def roll(a, shift, axis=None):
return _roll(a, shift, axis)
@_wraps(np.rollaxis)
def rollaxis(a, axis, start=0):
a_ndim = ndim(a)
if not (-a_ndim <= axis < a_ndim):
raise ValueError(f"axis={axis} is out of bounds for array of dimension {a_ndim}")
if not (-a_ndim <= start <= a_ndim):
raise ValueError(f"start={start} must satisfy {-a_ndim}<=start<={a_ndim}")
if start < 0:
start += a_ndim
if axis < 0:
axis += a_ndim
if start > axis:
start -= 1
return moveaxis(a, axis, start)
@_wraps(np.packbits)
def packbits(a, axis=None, bitorder='big'):
a = asarray(a)
if not (issubdtype(dtype(a), integer) or issubdtype(dtype(a), bool_)):
raise TypeError('Expected an input array of integer or boolean data type')
if bitorder not in ['little', 'big']:
raise ValueError("'order' must be either 'little' or 'big'")
a = (a > 0).astype('uint8')
bits = arange(8, dtype='uint8')
if bitorder == 'big':
bits = bits[::-1]
if axis is None:
a = ravel(a)
axis = 0
a = swapaxes(a, axis, -1)
remainder = a.shape[-1] % 8
if remainder:
a = pad(a, (a.ndim - 1) * [(0, 0)] + [(0, 8 - remainder)])
a = a.reshape(a.shape[:-1] + (a.shape[-1] // 8, 8))
packed = (a << bits).sum(-1).astype('uint8')
return swapaxes(packed, axis, -1)
@_wraps(np.unpackbits)
def unpackbits(a, axis=None, count=None, bitorder='big'):
a = asarray(a)
if dtype(a) != uint8:
raise TypeError("Expected an input array of unsigned byte data type")
if bitorder not in ['little', 'big']:
raise ValueError("'order' must be either 'little' or 'big'")
bits = asarray(1) << arange(8, dtype='uint8')
if bitorder == 'big':
bits = bits[::-1]
if axis is None:
a = a.ravel()
axis = 0
a = swapaxes(a, axis, -1)
unpacked = ((a[..., None] & bits) > 0).astype('uint8')
unpacked = unpacked.reshape(unpacked.shape[:-2] + (-1,))[..., :count]
return swapaxes(unpacked, axis, -1)
@_wraps(np.take)
def take(a, indices, axis=None, out=None, mode=None):
if out:
raise NotImplementedError("The 'out' argument to np.take is not supported.")
a = asarray(a)
indices = asarray(indices)
if axis is None:
a = ravel(a)
axis = 0
axis = _canonicalize_axis(axis, ndim(a))
if mode == "raise":
# TODO(phawkins): we have no way to report out of bounds errors yet.
raise NotImplementedError("The 'raise' mode to np.take is not supported.")
elif mode == "wrap":
indices = mod(indices, _constant_like(indices, a.shape[axis]))
elif mode != "clip" and mode is not None:
raise ValueError("Invalid mode '{}' for np.take".format(mode))
index_dims = len(shape(indices))
slice_sizes = list(shape(a))
slice_sizes[axis] = _min(indices.size, 1)
dnums = lax.GatherDimensionNumbers(
offset_dims=tuple(
list(range(axis)) +
list(range(axis + index_dims, len(a.shape) + index_dims - 1))),
collapsed_slice_dims=(axis,),
start_index_map=(axis,))
return lax.gather(a, indices[..., None], dimension_numbers=dnums,
slice_sizes=tuple(slice_sizes))
def _normalize_index(index, axis_size):
"""Normalizes an index value in the range [-N, N) to the range [0, N)."""
if type(axis_size) is Poly:
return index + axis_size if index < 0 else index
return lax.select(
lax.lt(index, _constant_like(index, 0)),
lax.add(index, _constant_like(index, axis_size)),
index)
@partial(jit, static_argnums=(2,))
def _take_along_axis(arr, indices, axis):
if axis is None:
if ndim(indices) != 1:
msg = "take_along_axis indices must be 1D if axis=None, got shape {}"
raise ValueError(msg.format(indices.shape))
return take_along_axis(arr.ravel(), indices, 0)
rank = ndim(arr)
if rank != ndim(indices):
msg = "indices and arr must have the same number of dimensions; {} vs. {}"
raise ValueError(msg.format(ndim(indices), ndim(arr)))
axis = _canonicalize_axis(axis, rank)
def replace(tup, val):
lst = list(tup)
lst[axis] = val
return tuple(lst)
bcast_shape = lax.broadcast_shapes(replace(arr.shape, 1), replace(indices.shape, 1))
indices = broadcast_to(indices, replace(bcast_shape, indices.shape[axis]))
arr = broadcast_to(arr, replace(bcast_shape, arr.shape[axis]))
axis_size = arr.shape[axis]
arr_shape = replace(arr.shape, 1)
idx_shape = indices.shape
out_shape = lax.broadcast_shapes(idx_shape, arr_shape)
index_dims = [i for i, idx in enumerate(idx_shape) if i == axis or idx != 1]
gather_index_shape = tuple(np.array(out_shape)[index_dims]) + (1,)
gather_indices = []
slice_sizes = []
offset_dims = []
start_index_map = []
collapsed_slice_dims = []
j = 0
for i in range(rank):
if i == axis:
indices = _normalize_index(indices, axis_size)
gather_indices.append(lax.reshape(indices, gather_index_shape))
slice_sizes.append(1)
start_index_map.append(i)
collapsed_slice_dims.append(i)
j += 1
elif idx_shape[i] != 1:
iota = lax.iota(_dtype(indices), out_shape[i])
if not config.omnistaging_enabled:
iota = lax.tie_in(arr, iota)
iota = lax.broadcast_in_dim(iota, gather_index_shape, (j,))
gather_indices.append(iota)
slice_sizes.append(1)
start_index_map.append(i)
collapsed_slice_dims.append(i)
j += 1
else:
# If idx_shape[i] == 1, we can just take the entirety of the arr's axis
# and avoid forming an iota index.
offset_dims.append(i)
slice_sizes.append(arr_shape[i])
gather_indices = lax.concatenate(gather_indices, dimension=j)
dnums = lax.GatherDimensionNumbers(
offset_dims=tuple(offset_dims),
collapsed_slice_dims=tuple(collapsed_slice_dims),
start_index_map=tuple(start_index_map))
return lax.gather(arr, gather_indices, dnums, tuple(slice_sizes))
@_wraps(getattr(np, "take_along_axis", None), update_doc=False)
def take_along_axis(arr, indices, axis):
return _take_along_axis(arr, indices, axis)
### SetOps
@partial(jit, static_argnums=1)
def _unique1d_sorted_mask(ar, optional_indices=False):
"""
Helper function for unique which is jit-able
"""
ar = asarray(ar).flatten()
if optional_indices:
perm = ar.argsort()
aux = ar[perm]
else:
aux = ar.sort()
mask = empty(aux.shape, dtype=bool_)
mask = ops.index_update(mask, ops.index[:1], True)
mask = ops.index_update(mask, ops.index[1:], aux[1:] != aux[:-1])
if optional_indices:
return aux, mask, perm
else:
return aux, mask
def _unique1d(ar, return_index=False, return_inverse=False,
return_counts=False):
"""
Find the unique elements of an array, ignoring shape.
"""
optional_indices = return_index or return_inverse
if optional_indices:
aux, mask, perm = _unique1d_sorted_mask(ar, optional_indices)
else:
aux, mask = _unique1d_sorted_mask(ar, optional_indices)
ret = (aux[mask],)
if return_index:
ret += (perm[mask],)
if return_inverse:
imask = cumsum(mask) - 1
inv_idx = zeros(mask.shape, dtype=dtypes.canonicalize_dtype(int_))
inv_idx = ops.index_update(inv_idx, perm, imask)
ret += (inv_idx,)
if return_counts:
idx = concatenate(nonzero(mask) + (array([mask.size]),))
ret += (diff(idx),)
return ret
@_wraps(np.unique)
def unique(ar, return_index=False, return_inverse=False,
return_counts=False, axis=None):
if iscomplexobj(ar):
raise NotImplementedError(
"np.unique is not implemented for complex valued arrays")
if axis is None:
ret = _unique1d(ar, return_index, return_inverse, return_counts)
if len(ret) == 1:
return ret[0]
else:
return ret
raise NotImplementedError(
"np.unique is not implemented for the axis argument")
### Indexing
def _rewriting_take(arr, idx):
# Computes arr[idx].
# All supported cases of indexing can be implemented as an XLA gather,
# followed by an optional reverse and broadcast_in_dim.
arr = asarray(arr)
treedef, static_idx, dynamic_idx = _split_index_for_jit(idx)
return _gather(arr, treedef, static_idx, dynamic_idx)
# TODO(phawkins): re-enable jit after fixing excessive recompilation for
# slice indexes (e.g., slice(0, 5, None), slice(10, 15, None), etc.).
# @partial(jit, static_argnums=(1, 2))
def _gather(arr, treedef, static_idx, dynamic_idx):
idx = _merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx)
indexer = _index_to_gather(shape(arr), idx) # shared with _scatter_update
y = arr
# Avoid calling gather if the slice shape is empty, both as a fast path and to
# handle cases like zeros(0)[array([], int32)].
if _prod(indexer.slice_shape) == 0:
return zeros(indexer.slice_shape, dtype=y.dtype)
# We avoid generating a gather when indexer.gather_indices.size is empty.
if indexer.gather_indices.size:
y = lax.gather(y, indexer.gather_indices, indexer.dnums,
indexer.gather_slice_shape)
# Reverses axes with negative strides.
if indexer.reversed_y_dims:
y = lax.rev(y, indexer.reversed_y_dims)
# This adds np.newaxis/None dimensions.
return expand_dims(y, indexer.newaxis_dims)
_Indexer = collections.namedtuple("_Indexer", [
# The expected shape of the slice output.
"slice_shape",
# The slice shape to pass to lax.gather().
"gather_slice_shape",
# The gather indices to use.
"gather_indices",
# A GatherDimensionNumbers object describing the gather to perform.
"dnums",
# Slice dimensions that have negative strides, and so must be reversed after
# the gather.
"reversed_y_dims",
# Keep track of any axes created by `newaxis`. These must be inserted for
# gathers and eliminated for scatters.
"newaxis_dims",
])
def _split_index_for_jit(idx):
"""Splits indices into necessarily-static and dynamic parts.
Used to pass indices into `jit`-ted function.
"""
# Convert list indices to tuples in cases (deprecated by NumPy.)
idx = _eliminate_deprecated_list_indexing(idx)
# Expand any (concrete) boolean indices. We can then use advanced integer
# indexing logic to handle them.
idx = _expand_bool_indices(idx)
leaves, treedef = tree_flatten(idx)
dynamic = [None] * len(leaves)
static = [None] * len(leaves)
for i, x in enumerate(leaves):
if x is Ellipsis:
static[i] = x
elif isinstance(x, slice):
# slice objects aren't hashable.
static[i] = (x.start, x.stop, x.step)
else:
dynamic[i] = x
return treedef, tuple(static), dynamic
def _merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx):
"""Recombines indices that were split by _split_index_for_jit."""
idx = []
for s, d in zip(static_idx, dynamic_idx):
if d is not None:
idx.append(d)
elif isinstance(s, tuple):
idx.append(slice(s[0], s[1], s[2]))
else:
idx.append(s)
return treedef.unflatten(idx)
def _int(aval):
return not aval.shape and issubdtype(aval.dtype, integer)
def _index_to_gather(x_shape, idx):
# Remove ellipses and add trailing slice(None)s.
idx = _canonicalize_tuple_index(len(x_shape), idx)
# Check for advanced indexing:
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
# Do the advanced indexing axes appear contiguously? If not, NumPy semantics
# move the advanced axes to the front.
advanced_axes_are_contiguous = False
advanced_indexes = None
# The positions of the advanced indexing axes in `idx`.
idx_advanced_axes = []
# The positions of the advanced indexes in x's shape.
# collapsed, after None axes have been removed. See below.
x_advanced_axes = None
if _is_advanced_int_indexer(idx):
idx_no_nones = [(i, d) for i, d in enumerate(idx) if d is not None]
advanced_pairs = (
(asarray(e), i, j) for j, (i, e) in enumerate(idx_no_nones)
if (isinstance(e, Sequence) or isinstance(e, ndarray)))
advanced_pairs = ((_normalize_index(e, x_shape[j]), i, j)
for e, i, j in advanced_pairs)
advanced_indexes, idx_advanced_axes, x_advanced_axes = zip(*advanced_pairs)
advanced_axes_are_contiguous = np.all(np.diff(idx_advanced_axes) == 1)
x_axis = 0 # Current axis in x.
y_axis = 0 # Current axis in y, before collapsing. See below.
collapsed_y_axis = 0 # Current axis in y, after collapsing.
# Scatter dimension numbers.
offset_dims = []
collapsed_slice_dims = []
start_index_map = []
use_64bit_index = _any([type(d) is Poly or d >= (1 << 31) for d in x_shape])
index_dtype = int64 if use_64bit_index else int32
gather_indices = np.zeros((0,), dtype=index_dtype) # use np to save a compilation
# We perform three transformations to y before the scatter op, in order:
# First, y is broadcast to slice_shape. In general `y` only need broadcast to
# the right shape.
slice_shape = []
# Next, y is squeezed to remove newaxis_dims. This removes np.newaxis/`None`
# indices, which the scatter cannot remove itself.
newaxis_dims = []
# Finally, we reverse reversed_y_dims to handle slices with negative strides.
reversed_y_dims = []
gather_slice_shape = []
for idx_pos, i in enumerate(idx):
# Handle the advanced indices here if:
# * the advanced indices were not contiguous and we are the start.
# * we are at the position of the first advanced index.
if (advanced_indexes is not None and
(advanced_axes_are_contiguous and idx_pos == idx_advanced_axes[0] or
not advanced_axes_are_contiguous and idx_pos == 0)):
advanced_indexes = broadcast_arrays(*advanced_indexes)
shape = advanced_indexes[0].shape
ndim = len(shape)
advanced_indexes = [
lax.convert_element_type(lax.reshape(a, shape + (1,)), index_dtype)
for a in advanced_indexes]
# Broadcast gather_indices from [..., k] to [..., 1, 1, ..., 1, k].
gather_indices = lax.broadcast_in_dim(
gather_indices, np.insert(gather_indices.shape, -1, shape),
tuple(range(gather_indices.ndim - 1)) + (gather_indices.ndim + ndim - 1,))
gather_indices = concatenate([gather_indices] + advanced_indexes, -1)
start_index_map.extend(x_advanced_axes)
collapsed_slice_dims.extend(x_advanced_axes)
slice_shape.extend(shape)
y_axis += ndim
collapsed_y_axis += ndim
# Per-index bookkeeping for advanced indexes.
if idx_pos in idx_advanced_axes:
x_axis += 1
gather_slice_shape.append(1)
continue
try:
abstract_i = core.get_aval(i)
except TypeError:
abstract_i = None
# Handle basic int indexes.
if (isinstance(abstract_i, ConcreteArray) or
isinstance(abstract_i, ShapedArray)) and _int(abstract_i):
if x_shape[x_axis] == 0:
# XLA gives error when indexing into an axis of size 0
raise IndexError(f"index is out of bounds for axis {x_axis} with size 0")
i = _normalize_index(i, x_shape[x_axis])
if type(i) is Poly:
# dummy index if i is polynomial, doesn't matter for shape inference
# TODO(mattjj,j-towns,juliuskunze): revise this logic
i = 0
i = lax.convert_element_type(i, index_dtype)
i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,))
gather_indices = concatenate((gather_indices, i), -1)
collapsed_slice_dims.append(x_axis)
gather_slice_shape.append(1)
start_index_map.append(x_axis)
x_axis += 1
# Handle np.newaxis (None)
elif i is None:
slice_shape.append(1)
newaxis_dims.append(y_axis)
y_axis += 1
# Handle slice(None)
elif _is_slice_none(i):
slice_shape.append(x_shape[x_axis])
gather_slice_shape.append(x_shape[x_axis])
offset_dims.append(collapsed_y_axis)
collapsed_y_axis += 1
y_axis += 1
x_axis += 1
# Handle slice index (only static, otherwise an error is raised)
elif isinstance(i, slice):
if not _all(elt is None or type(elt) is Poly
or type(core.get_aval(elt)) is ConcreteArray
for elt in (i.start, i.stop, i.step)):
msg = ("Array slice indices must have static start/stop/step to be used "
"with NumPy indexing syntax. To index a statically sized "
"array at a dynamic position, try lax.dynamic_slice/"
"dynamic_update_slice (JAX does not support dynamically sized "
"arrays within JIT compiled functions).")
raise IndexError(msg)
start, limit, stride, needs_rev = _static_idx(i, x_shape[x_axis])
if needs_rev:
reversed_y_dims.append(collapsed_y_axis)
if stride == 1:
i = lax.convert_element_type(start, index_dtype)
i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,))
gather_indices = concatenate((gather_indices, i), -1)
slice_shape.append(limit - start)
gather_slice_shape.append(limit - start)
offset_dims.append(collapsed_y_axis)
start_index_map.append(x_axis)
else:
i = arange(start, limit, stride, dtype=index_dtype)
size = i.shape[0]
slice_shape.append(size)
gather_slice_shape.append(1)
gather_indices_shape = tuple(gather_indices.shape[:-1]) + (size,)
i = lax.broadcast_in_dim(
i, shape=gather_indices_shape + (1,),
broadcast_dimensions=(len(gather_indices_shape) - 1,))
gather_indices = lax.broadcast_in_dim(
gather_indices,
shape=gather_indices_shape + (len(start_index_map),),
broadcast_dimensions=(
tuple(range(len(gather_indices_shape) - 1)) +
(len(gather_indices_shape),)))
gather_indices = concatenate(
(gather_indices, i), len(gather_indices_shape))
start_index_map.append(x_axis)
collapsed_slice_dims.append(x_axis)
collapsed_y_axis += 1
y_axis += 1
x_axis += 1
else:
if (abstract_i is not None and
not (issubdtype(abstract_i.dtype, integer) or issubdtype(abstract_i.dtype, bool_))):
msg = ("Indexer must have integer or boolean type, got indexer "
"with type {} at position {}, indexer value {}")
raise TypeError(msg.format(abstract_i.dtype.name, idx_pos, i))
msg = "Indexing mode not yet supported. Open a feature request!\n{}"
raise IndexError(msg.format(idx))
dnums = lax.GatherDimensionNumbers(
offset_dims = tuple(offset_dims),
collapsed_slice_dims = tuple(sorted(collapsed_slice_dims)),
start_index_map = tuple(start_index_map)
)
return _Indexer(
slice_shape=slice_shape,
newaxis_dims=tuple(newaxis_dims),
gather_slice_shape=gather_slice_shape,
reversed_y_dims=reversed_y_dims,
dnums=dnums,
gather_indices=gather_indices)
def _should_unpack_list_index(x):
"""Helper for _eliminate_deprecated_list_indexing."""
return (isinstance(x, ndarray) and np.ndim(x) != 0
or isinstance(x, Sequence)
or isinstance(x, slice) or x is Ellipsis or x is None)
def _eliminate_deprecated_list_indexing(idx):
# "Basic slicing is initiated if the selection object is a non-array,
# non-tuple sequence containing slice objects, [Ellipses, or newaxis
# objects]". Detects this case and canonicalizes to a tuple. This case is
# deprecated by NumPy and exists for backward compatibility.
if not isinstance(idx, tuple):
if isinstance(idx, Sequence) and not isinstance(idx, ndarray):
if _any(_should_unpack_list_index(i) for i in idx):
idx = tuple(idx)
else:
idx = (idx,)
else:
idx = (idx,)
return idx
def _expand_bool_indices(idx):
"""Converts concrete bool indexes into advanced integer indexes."""
out = []
for i in idx:
try:
abstract_i = core.get_aval(i)
except TypeError:
abstract_i = None
if (isinstance(abstract_i, ShapedArray) and issubdtype(abstract_i.dtype, bool_)
or isinstance(i, list) and _all(not _shape(e) and issubdtype(_dtype(e), bool_)
for e in i)):
if isinstance(i, list):
i = array(i)
abstract_i = core.get_aval(i)
if not type(abstract_i) is ConcreteArray:
# TODO(mattjj): improve this error by tracking _why_ the indices are not
# concrete
raise IndexError("Array boolean indices must be concrete.")
else:
out.extend(np.where(i))
else:
out.append(i)
return tuple(out)
def _is_slice_none(idx):
"""Return True if idx is equal to slice(None), False otherwise."""
if isinstance(idx, slice):
return idx.start is None and idx.stop is None and idx.step is None
# TODO(mattjj): clean up this logic
def _is_advanced_int_indexer(idx):
"""Returns True if idx should trigger int array indexing, False otherwise."""
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
assert isinstance(idx, tuple)
if _all(np.ndim(elt) == 0 for elt in idx):
return False
return _all(e is None or e is Ellipsis or isinstance(e, slice)
or _is_int_arraylike(e) for e in idx)
def _is_int_arraylike(x):
"""Returns True if x is array-like with integer dtype, False otherwise."""
return (isinstance(x, int) and not isinstance(x, bool)
or issubdtype(getattr(x, "dtype", None), np.integer)
or isinstance(x, (list, tuple)) and _all(_is_int_arraylike(e) for e in x))
def _canonicalize_tuple_index(arr_ndim, idx):
"""Helper to remove Ellipsis and add in the implicit trailing slice(None)."""
len_without_none = _sum(1 for e in idx if e is not None and e is not Ellipsis)
if len_without_none > arr_ndim:
msg = "Too many indices for array: {} non-None/Ellipsis indices for dim {}."
raise IndexError(msg.format(len_without_none, arr_ndim))
ellipses = (i for i, elt in enumerate(idx) if elt is Ellipsis)
ellipsis_index = next(ellipses, None)
if ellipsis_index is not None:
if next(ellipses, None) is not None:
msg = "Multiple ellipses (...) not supported: {}."
raise IndexError(msg.format(list(map(type, idx))))
colons = (slice(None),) * (arr_ndim - len_without_none)
idx = idx[:ellipsis_index] + colons + idx[ellipsis_index + 1:]
elif len_without_none < arr_ndim:
colons = (slice(None),) * (arr_ndim - len_without_none)
idx = tuple(idx) + colons
return idx
def _polymorphic_slice_indices(idx: slice, size: Union[int, Poly]):
# like idx.indices(size), but allows for polymorphic indices and size
# see https://github.com/python/cpython/blob/6d6508765514c7c10719478a0430f5e47c9a96ac/Objects/sliceobject.c#L372
assert isinstance(idx, slice)
step = 1 if idx.step is None else idx.step
step_is_negative = step < 0
lower = -1 if step_is_negative else 0
upper = size + lower
def sanitize(index, default):
if index is None:
return default
elif type(index) is Poly:
return index
elif index < 0:
return _max(index + size, lower)
else:
return _min(index, upper)
start = sanitize(idx.start, default=upper if step_is_negative else lower)
stop = sanitize(idx.stop, default=lower if step_is_negative else upper)
return start, stop, step
def _static_idx(idx: slice, size: Union[int, Poly]):
"""Helper function to compute the static slice start/limit/stride values."""
if _any(type(s) is Poly for s in (idx.start, idx.stop, idx.step, size)):
start, stop, step = _polymorphic_slice_indices(idx, size)
elif isinstance(size, int):
start, stop, step = idx.indices(size)
else:
raise TypeError(size)
if type(start) is not Poly and type(stop) is not Poly:
if (step < 0 and stop >= start) or (step > 0 and start >= stop):
return 0, 0, 1, False # sliced to size zero
if step > 0:
return start, stop, step, False
else:
k = (start - stop - 1) % (-step)
return stop + k + 1, start + 1, -step, True
blackman = _wrap_numpy_nullary_function(np.blackman)
bartlett = _wrap_numpy_nullary_function(np.bartlett)
hamming = _wrap_numpy_nullary_function(np.hamming)
hanning = _wrap_numpy_nullary_function(np.hanning)
# TODO: lower `kaiser` via lax to allow non-constant beta values.
kaiser = _wrap_numpy_nullary_function(np.kaiser)
def _gcd_cond_fn(xs):
x1, x2 = xs
return any(x2 != 0)
def _gcd_body_fn(xs):
x1, x2 = xs
x1, x2 = (where(x2 != 0, x2, x1),
where(x2 != 0, lax.rem(x1, x2), lax._const(x2, 0)))
return (where(x1 < x2, x2, x1), where(x1 < x2, x1, x2))
@_wraps(getattr(np, "gcd", None))
def gcd(x1, x2):
if (not issubdtype(_dtype(x1), integer) or
not issubdtype(_dtype(x2), integer)):
raise ValueError("Arguments to jax.numpy.gcd must be integers.")
x1, x2 = _promote_dtypes(x1, x2)
x1, x2 = broadcast_arrays(x1, x2)
gcd, _ = lax.while_loop(_gcd_cond_fn, _gcd_body_fn, (abs(x1), abs(x2)))
return gcd
@_wraps(getattr(np, "lcm", None))
def lcm(x1, x2):
x1, x2 = _promote_dtypes(x1, x2)
d = gcd(x1, x2)
return where(d == 0, lax._const(d, 0),
abs(multiply(x1, floor_divide(x2, d))))
@_wraps(np.extract)
def extract(condition, arr):
return compress(ravel(condition), ravel(arr))
@_wraps(np.compress)
def compress(condition, a, axis=None, out=None):
if out is not None:
raise NotImplementedError("out argument is not supported.")
if ndim(condition) != 1:
raise ValueError("condition must be a 1D array")
condition = array(condition).astype(bool)
a = array(a)
if axis is None:
axis = 0
a = ravel(a)
else:
a = moveaxis(a, axis, 0)
condition, extra = condition[:a.shape[0]], condition[a.shape[0]:]
if any(extra):
raise ValueError("condition contains entries that are out of bounds")
a = a[:condition.shape[0]]
return moveaxis(a[condition], 0, axis)
@_wraps(np.cov)
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None):
msg = ("jax.numpy.cov not implemented for nontrivial {}. "
"Open a feature request at https://github.com/google/jax/issues !")
if y is not None: raise NotImplementedError(msg.format('y'))
# These next two are actually implemented, just not tested.
if fweights is not None: raise NotImplementedError(msg.format('fweights'))
if aweights is not None: raise NotImplementedError(msg.format('aweights'))
if m.ndim > 2:
raise ValueError("m has more than 2 dimensions") # same as numpy error
X = array(m, ndmin=2, dtype=dtypes.canonicalize_dtype(result_type(m, float_)))
if not rowvar and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return array([]).reshape(0, 0)
if ddof is None:
ddof = 1 if bias == 0 else 0
w = None
if fweights is not None:
if np.ndim(fweights) > 1:
raise RuntimeError("cannot handle multidimensional fweights")
if np.shape(fweights)[0] != X.shape[1]:
raise RuntimeError("incompatible numbers of samples and fweights")
w = asarray(fweights)
if aweights is not None:
if np.ndim(aweights) > 1:
raise RuntimeError("cannot handle multidimensional aweights")
if np.shape(aweights)[0] != X.shape[1]:
raise RuntimeError("incompatible numbers of samples and aweights")
w = aweights if w is None else w * aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
if w is None:
f = X.shape[1] - ddof
elif ddof == 0:
f = w_sum
elif aweights is None:
f = w_sum - ddof
else:
f = w_sum - ddof * sum(w * aweights) / w_sum
X = X - avg[:, None]
X_T = X.T if w is None else (X * w).T
return true_divide(dot(X, X_T.conj()), f).squeeze()
@_wraps(np.corrcoef)
def corrcoef(x, y=None, rowvar=True):
c = cov(x, y, rowvar)
if len(shape(c)) == 0:
# scalar - this should yield nan for values (nan/nan, inf/inf, 0/0), 1 otherwise
return divide(c, c)
d = diag(c)
stddev = sqrt(real(d))
c = divide(c, stddev[:,None])
c = divide(c, stddev[None,:])
real_part = clip(real(c), -1, 1)
if iscomplexobj(c):
complex_part = clip(imag(c), -1, 1)
c = lax.complex(real_part, complex_part)
else:
c = real_part
return c
@_wraps(getattr(np, "quantile", None))
def quantile(a, q, axis=None, out=None, overwrite_input=False,
interpolation="linear", keepdims=False):
if overwrite_input or out is not None:
msg = ("jax.numpy.quantile does not support overwrite_input=True or "
"out != None")
raise ValueError(msg)
return _quantile(a, q, axis, interpolation, keepdims, False)
@_wraps(getattr(np, "nanquantile", None))
def nanquantile(a, q, axis=None, out=None, overwrite_input=False,
interpolation="linear", keepdims=False):
if overwrite_input or out is not None:
msg = ("jax.numpy.nanquantile does not support overwrite_input=True or "
"out != None")
raise ValueError(msg)
return _quantile(a, q, axis, interpolation, keepdims, True)
@partial(jit, static_argnums=(2, 3, 4, 5))
def _quantile(a, q, axis, interpolation, keepdims, squash_nans):
if interpolation not in ["linear", "lower", "higher", "midpoint", "nearest"]:
raise ValueError("interpolation can only be 'linear', 'lower', 'higher', "
"'midpoint', or 'nearest'")
a = asarray(a, dtype=promote_types(_dtype(a), float32))
q = asarray(q, dtype=promote_types(_dtype(q), float32))
if axis is None:
a = ravel(a)
axis = 0
elif isinstance(axis, tuple):
raise NotImplementedError("Tuple values for axis are not implemented")
else:
axis = _canonicalize_axis(axis, ndim(a))
q_shape = shape(q)
q_ndim = ndim(q)
if q_ndim > 1:
raise ValueError("q must be have rank <= 1, got shape {}".format(shape(q)))
a_shape = shape(a)
a = lax.sort(a, dimension=axis)
if squash_nans:
counts = sum(logical_not(isnan(a)), axis=axis, dtype=q.dtype,
keepdims=keepdims)
shape_after_reduction = counts.shape
q = lax.expand_dims(
q, tuple(range(q_ndim, len(shape_after_reduction) + q_ndim)))
counts = lax.expand_dims(counts, tuple(range(q_ndim)))
q = lax.mul(q, lax.sub(counts, _constant_like(q, 1)))
low = lax.floor(q)
high = lax.ceil(q)
high_weight = lax.sub(q, low)
low_weight = lax.sub(_constant_like(high_weight, 1), high_weight)
low = lax.max(_constant_like(low, 0), lax.min(low, counts - 1))
high = lax.max(_constant_like(high, 0), lax.min(high, counts - 1))
low = lax.convert_element_type(low, int64)
high = lax.convert_element_type(high, int64)
out_shape = q_shape + shape_after_reduction
index = [lax.broadcasted_iota(int64, out_shape, dim + q_ndim)
for dim in range(len(shape_after_reduction))]
if keepdims:
index[axis] = low
else:
index.insert(axis, low)
low_value = a[tuple(index)]
index[axis] = high
high_value = a[tuple(index)]
else:
n = a_shape[axis]
q = lax.mul(q, _constant_like(q, n - 1))
low = lax.floor(q)
high = lax.ceil(q)
high_weight = lax.sub(q, low)
low_weight = lax.sub(_constant_like(high_weight, 1), high_weight)
low = lax.clamp(_constant_like(low, 0), low, _constant_like(low, n - 1))
high = lax.clamp(_constant_like(high, 0), high, _constant_like(high, n - 1))
low = lax.convert_element_type(low, int64)
high = lax.convert_element_type(high, int64)
slice_sizes = list(a_shape)
slice_sizes[axis] = 1
dnums = lax.GatherDimensionNumbers(
offset_dims=tuple(range(
q_ndim,
len(a_shape) + q_ndim if keepdims else len(a_shape) + q_ndim - 1)),
collapsed_slice_dims=() if keepdims else (axis,),
start_index_map=(axis,))
low_value = lax.gather(a, low[..., None], dimension_numbers=dnums,
slice_sizes=slice_sizes)
high_value = lax.gather(a, high[..., None], dimension_numbers=dnums,
slice_sizes=slice_sizes)
if q_ndim == 1:
low_weight = lax.broadcast_in_dim(low_weight, low_value.shape,
broadcast_dimensions=(0,))
high_weight = lax.broadcast_in_dim(high_weight, high_value.shape,
broadcast_dimensions=(0,))
if interpolation == "linear":
result = lax.add(lax.mul(low_value.astype(q.dtype), low_weight),
lax.mul(high_value.astype(q.dtype), high_weight))
elif interpolation == "lower":
result = low_value
elif interpolation == "higher":
result = high_value
elif interpolation == "nearest":
pred = lax.le(high_weight, _constant_like(high_weight, 0.5))
result = lax.select(pred, low_value, high_value)
elif interpolation == "midpoint":
result = lax.mul(lax.add(low_value, high_value), _constant_like(low_value, 0.5))
else:
raise ValueError(f"interpolation={interpolation!r} not recognized")
return lax.convert_element_type(result, a.dtype)
@partial(jit, static_argnums=2)
@partial(vectorize, excluded={0, 2})
def _searchsorted(a, v, side):
if len(a) == 0:
return 0
op = operator.le if side == 'left' else operator.lt
def body_fun(i, state):
low, high = state
mid = (low + high) // 2
go_left = op(v, a[mid])
return (where(go_left, low, mid), where(go_left, mid, high))
n_levels = int(np.ceil(np.log2(len(a) + 1)))
return lax.fori_loop(0, n_levels, body_fun, (0, len(a)))[1]
@_wraps(np.searchsorted)
def searchsorted(a, v, side='left', sorter=None):
if side not in ['left', 'right']:
raise ValueError(f"{side!r} is an invalid value for keyword 'side'")
if sorter is not None:
raise NotImplementedError("sorter is not implemented")
a = asarray(a)
v = asarray(v)
if ndim(a) != 1:
raise ValueError("a should be 1-dimensional")
return _searchsorted(a, v, side)
@_wraps(np.digitize)
def digitize(x, bins, right=False):
if len(bins) == 0:
return zeros(x, dtype=dtypes.canonicalize_dtype(int_))
side = 'right' if not right else 'left'
return where(
bins[-1] >= bins[0],
searchsorted(bins, x, side=side),
len(bins) - searchsorted(bins[::-1], x, side=side)
)
_PIECEWISE_DOC = """\
Unlike `np.piecewise`, :py:func:`jax.numpy.piecewise` requires functions in
`funclist` to be traceable by JAX, as it is implemeted via :func:`jax.lax.switch`.
See the :func:`jax.lax.switch` documentation for more information.
"""
@_wraps(np.piecewise, lax_description=_PIECEWISE_DOC)
def piecewise(x, condlist, funclist, *args, **kw):
condlist = array(condlist, dtype=bool_)
nc, nf = len(condlist), len(funclist)
if nf == nc + 1:
funclist = funclist[-1:] + funclist[:-1]
elif nf == nc:
funclist = [0] + list(funclist)
else:
raise ValueError(f"with {nc} condition(s), either {nc} or {nc+1} functions are expected; got {nf}")
indices = argmax(cumsum(vstack([zeros_like(condlist[:1]), condlist]), 0), 0)
dtype = _dtype(x)
def _call(f):
return lambda x: f(x, *args, **kw).astype(dtype)
def _const(v):
return lambda x: full_like(x, v)
funclist = [_call(f) if callable(f) else _const(f) for f in funclist]
return vectorize(lax.switch, excluded=(1,))(indices, funclist, x)
@_wraps(np.percentile)
def percentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation="linear", keepdims=False):
q = true_divide(asarray(q), float32(100.0))
return quantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
interpolation=interpolation, keepdims=keepdims)
@_wraps(np.nanpercentile)
def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation="linear", keepdims=False):
q = true_divide(asarray(q), float32(100.0))
return nanquantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
interpolation=interpolation, keepdims=keepdims)
@_wraps(np.median)
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
return quantile(a, 0.5, axis=axis, out=out, overwrite_input=overwrite_input,
keepdims=keepdims, interpolation='midpoint')
@_wraps(np.nanmedian)
def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=False):
return nanquantile(a, 0.5, axis=axis, out=out,
overwrite_input=overwrite_input, keepdims=keepdims,
interpolation='midpoint')
def _astype(arr, dtype):
lax._check_user_dtype_supported(dtype, "astype")
return lax.convert_element_type(arr, dtype)
def _nbytes(arr):
return size(arr) * _dtype(arr).itemsize
def _view(arr, dtype=None, type=None):
if type is not None:
raise NotImplementedError("`type` argument of array.view()")
if dtype is None:
return arr
arr_dtype = _dtype(arr)
if arr_dtype == dtype:
return arr
# bool is implemented as lax:PRED, which is not compatible with lax.bitcast_convert_type.
# We work around this by casting bool to uint8.
if arr_dtype == bool_:
arr = arr.astype(uint8)
nbits_in = 8 * arr_dtype.itemsize
nbits_out = 8 * _dtype(dtype).itemsize
if nbits_in == nbits_out:
if dtype == bool_:
return lax.bitcast_convert_type(arr, uint8).astype(dtype)
return lax.bitcast_convert_type(arr, dtype)
if nbits_out > nbits_in and (shape(arr)[-1] * nbits_in) % nbits_out != 0:
raise ValueError("When changing to a larger dtype, its size must be a divisor "
"of the total size in bytes of the last axis of the array.")
byte_dtypes = {8: uint8, 16: uint16, 32: uint32, 64: uint64}
if nbits_in not in byte_dtypes:
raise NotImplementedError(f"arr.view() for arr.dtype={arr_dtype}")
if nbits_out not in byte_dtypes:
raise NotImplementedError(f"arr.view(dtype) for dtype={dtype}")
dt_in = byte_dtypes[nbits_in]
dt_out = byte_dtypes[nbits_out]
arr_bytes = lax.bitcast_convert_type(arr, dt_in)
if nbits_in < nbits_out:
shifts = arange(0, nbits_out, nbits_in, dtype=dt_out)
arr_bytes = arr_bytes.reshape(arr.shape[:-1] + (-1, nbits_out // nbits_in)).astype(dt_out)
arr_bytes = (arr_bytes << shifts).sum(-1).astype(dt_out)
else:
shifts = arange(0, nbits_in, nbits_out, dtype=dt_in)
arr_bytes = ((arr_bytes[..., newaxis] >> shifts) & iinfo(dt_out).max).astype(dt_out)
arr_bytes = arr_bytes.reshape(arr_bytes.shape[:-2] + (-1,))
if dtype == bool_:
return lax.bitcast_convert_type(arr_bytes, uint8).astype(dtype)
return lax.bitcast_convert_type(arr_bytes, dtype)
### track unimplemented functions
def _not_implemented(fun):
@_wraps(fun)
def wrapped(*args, **kwargs):
msg = "Numpy function {} not yet implemented"
raise NotImplementedError(msg.format(fun))
return wrapped
### add method and operator overloads to arraylike classes
# We add operator overloads to DeviceArray and ShapedArray. These method and
# operator overloads mainly just forward calls to the corresponding lax_numpy
# functions, which can themselves handle instances from any of these classes.
_scalar_types = (int, float, complex, np.generic)
def _defer_to_unrecognized_arg(binary_op):
# Ensure that other array types have the chance to override arithmetic.
def deferring_binary_op(self, other):
if not isinstance(other, _scalar_types + _arraylike_types + (core.Tracer,)):
return NotImplemented
return binary_op(self, other)
return deferring_binary_op
def _swap_args(f):
return lambda x, y: f(y, x)
def _unimplemented_setitem(self, i, x):
msg = ("'{}' object does not support item assignment. JAX arrays are "
"immutable; perhaps you want jax.ops.index_update or "
"jax.ops.index_add instead?")
raise TypeError(msg.format(type(self)))
def _operator_round(number, ndigits=None):
out = round(number, decimals=ndigits or 0)
# If `ndigits` is None, for a builtin float round(7.5) returns an integer.
return out.astype(int_) if ndigits is None else out
_operators = {
"getitem": _rewriting_take,
"setitem": _unimplemented_setitem,
"neg": negative,
"pos": positive,
"eq": _defer_to_unrecognized_arg(equal),
"ne": _defer_to_unrecognized_arg(not_equal),
"lt": _defer_to_unrecognized_arg(less),
"le": _defer_to_unrecognized_arg(less_equal),
"gt": _defer_to_unrecognized_arg(greater),
"ge": _defer_to_unrecognized_arg(greater_equal),
"abs": abs,
"add": _defer_to_unrecognized_arg(add),
"radd": _defer_to_unrecognized_arg(add),
"sub": _defer_to_unrecognized_arg(subtract),
"rsub": _defer_to_unrecognized_arg(_swap_args(subtract)),
"mul": _defer_to_unrecognized_arg(multiply),
"rmul": _defer_to_unrecognized_arg(multiply),
"div": _defer_to_unrecognized_arg(divide),
"rdiv": _defer_to_unrecognized_arg(_swap_args(divide)),
"truediv": _defer_to_unrecognized_arg(true_divide),
"rtruediv": _defer_to_unrecognized_arg(_swap_args(true_divide)),
"floordiv": _defer_to_unrecognized_arg(floor_divide),
"rfloordiv": _defer_to_unrecognized_arg(_swap_args(floor_divide)),
"divmod": _defer_to_unrecognized_arg(divmod),
"rdivmod": _defer_to_unrecognized_arg(_swap_args(divmod)),
"mod": _defer_to_unrecognized_arg(mod),
"rmod": _defer_to_unrecognized_arg(_swap_args(mod)),
"pow": _defer_to_unrecognized_arg(power),
"rpow": _defer_to_unrecognized_arg(_swap_args(power)),
"matmul": _defer_to_unrecognized_arg(matmul),
"rmatmul": _defer_to_unrecognized_arg(_swap_args(matmul)),
"and": _defer_to_unrecognized_arg(bitwise_and),
"rand": _defer_to_unrecognized_arg(bitwise_and),
"or": _defer_to_unrecognized_arg(bitwise_or),
"ror": _defer_to_unrecognized_arg(bitwise_or),
"xor": _defer_to_unrecognized_arg(bitwise_xor),
"rxor": _defer_to_unrecognized_arg(bitwise_xor),
"invert": bitwise_not,
"lshift": _defer_to_unrecognized_arg(left_shift),
"rshift": _defer_to_unrecognized_arg(right_shift),
"rlshift": _defer_to_unrecognized_arg(_swap_args(left_shift)),
"rrshift": _defer_to_unrecognized_arg(_swap_args(right_shift)),
"round": _operator_round,
}
# These numpy.ndarray methods are just refs to an equivalent numpy function
_nondiff_methods = ["all", "any", "argmax", "argmin", "argpartition", "argsort",
"nonzero", "searchsorted", "round"]
_diff_methods = ["clip", "conj", "conjugate", "cumprod", "cumsum",
"diagonal", "dot", "max", "mean", "min", "prod", "ptp",
"ravel", "repeat", "sort", "squeeze", "std", "sum",
"swapaxes", "take", "tile", "trace", "transpose", "var"]
# These methods are mentioned explicitly by nondiff_methods, so we create
# _not_implemented implementations of them here rather than in __init__.py.
# TODO(phawkins): implement these.
argpartition = _not_implemented(np.argpartition)
_NOT_IMPLEMENTED = ['argpartition']
# Set up operator, method, and property forwarding on Tracer instances containing
# ShapedArray avals by following the forwarding conventions for Tracer.
# Forward operators using a single-underscore-prefix naming convention:
for operator_name, function in _operators.items():
setattr(ShapedArray, "_{}".format(operator_name), staticmethod(function))
# Forward methods and properties using core.aval_method and core.aval_property:
for method_name in _nondiff_methods + _diff_methods:
setattr(ShapedArray, method_name, core.aval_method(globals()[method_name]))
setattr(ShapedArray, "reshape", core.aval_method(_reshape_method))
setattr(ShapedArray, "flatten", core.aval_method(ravel))
setattr(ShapedArray, "T", core.aval_property(transpose))
setattr(ShapedArray, "real", core.aval_property(real))
setattr(ShapedArray, "imag", core.aval_property(imag))
setattr(ShapedArray, "astype", core.aval_method(_astype))
setattr(ShapedArray, "view", core.aval_method(_view))
setattr(ShapedArray, "nbytes", core.aval_property(_nbytes))
# Forward operators, methods, and properties on DeviceArray to lax_numpy
# functions (with no Tracers involved; this forwarding is direct)
for operator_name, function in _operators.items():
setattr(DeviceArray, "__{}__".format(operator_name), function)
for method_name in _nondiff_methods + _diff_methods:
setattr(DeviceArray, method_name, globals()[method_name])
setattr(DeviceArray, "reshape", _reshape_method)
setattr(DeviceArray, "flatten", ravel)
setattr(DeviceArray, "T", property(transpose))
setattr(DeviceArray, "real", property(real))
setattr(DeviceArray, "imag", property(imag))
setattr(DeviceArray, "astype", _astype)
setattr(DeviceArray, "view", _view)
setattr(DeviceArray, "nbytes", property(_nbytes))
# Extra methods that are handy
setattr(ShapedArray, "broadcast", core.aval_method(lax.broadcast))
setattr(ShapedArray, "broadcast_in_dim", core.aval_method(lax.broadcast_in_dim))
setattr(ShapedArray, "split", core.aval_method(split))
setattr(DeviceArray, "broadcast", lax.broadcast)
setattr(DeviceArray, "broadcast_in_dim", lax.broadcast_in_dim)
setattr(DeviceArray, "split", split)
def _compress_method(a, condition, axis=None, out=None):
return compress(condition, a, axis, out)
setattr(ShapedArray, "compress", _compress_method)
setattr(DeviceArray, "compress", _compress_method)
@partial(jit, static_argnums=(1,2,3))
def _multi_slice(arr: DeviceArray,
start_indices: Tuple[Tuple[int, ...]],
limit_indices: Tuple[Tuple[int, ...]],
removed_dims: Tuple[Tuple[int, ...]]):
"""Extracts multiple slices from `arr`.
This is used to shard DeviceArray arguments to pmap. It's implemented as a
DeviceArray method here to avoid circular imports.
"""
results = []
for starts, limits, removed in safe_zip(start_indices, limit_indices, removed_dims):
sliced = lax.slice(arr, starts, limits)
if removed:
sliced = sliced.reshape(np.delete(sliced.shape, removed_dims))
results.append(sliced)
return results
setattr(DeviceArray, "_multi_slice", _multi_slice)
# Syntactic sugar for scatter operations.
class _IndexUpdateHelper:
# Note: this docstring will appear as the docstring for the `at` property.
"""Indexable helper object to call indexed update functions.
The `at` property is syntactic sugar for calling the indexed update functions
defined in :mod:`jax.ops`, and acts as a pure equivalent of in-place
modificatons.
In particular:
- ``x = x.at[idx].set(y)`` is a pure equivalent of ``x[idx] = y``.
- ``x = x.at[idx].add(y)`` is a pure equivalent of ``x[idx] += y``.
- ``x = x.at[idx].mul(y)`` is a pure equivalent of ``x[idx] *= y``.
- ``x = x.at[idx].min(y)`` is a pure equivalent of
``x[idx] = minimum(x[idx], y)``.
- ``x = x.at[idx].max(y)`` is a pure equivalent of
``x[idx] = maximum(x[idx], y)``.
"""
__slots__ = ("array",)
def __init__(self, array):
self.array = array
def __getitem__(self, index):
return _IndexUpdateRef(self.array, index)
def __repr__(self):
return f"_IndexUpdateHelper({repr(self.array)})"
class _IndexUpdateRef:
"""Helper object to call indexed update functions for an (advanced) index.
This object references a source array and a specific indexer into that array.
Methods on this object return copies of the source array that have been
modified at the positions specified by the indexer.
"""
__slots__ = ("array", "index")
def __init__(self, array, index):
self.array = array
self.index = index
def __repr__(self):
return f"_IndexUpdateRef({repr(self.array)}, {repr(self.index)})"
def set(self, values, indices_are_sorted=False, unique_indices=False):
"""Pure equivalent of ``x[idx] = y``.
``x.at[idx].set(y)`` is syntactic sugar for
``jax.ops.index_update(x, jax.ops.index[idx], y)``, and
returns the value of ``x`` that would result from the NumPy-style
:mod:indexed assignment <numpy.doc.indexing>` ``x[idx] = y``.
See :mod:`jax.ops` for details.
"""
return ops.index_update(self.array, self.index, values,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
def add(self, values, indices_are_sorted=False, unique_indices=False):
"""Pure equivalent of ``x[idx] += y``.
``x.at[idx].add(y)`` is syntactic sugar for
``jax.ops.index_add(x, jax.ops.index[idx], y)``, and
returns the value of ``x`` that would result from the NumPy-style
:mod:indexed assignment <numpy.doc.indexing>` ``x[idx] += y``.
See :mod:`jax.ops` for details.
"""
return ops.index_add(self.array, self.index, values,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
def mul(self, values, indices_are_sorted=False, unique_indices=False):
"""Pure equivalent of ``x[idx] += y``.
``x.at[idx].mul(y)`` is syntactic sugar for
``jax.ops.index_mul(x, jax.ops.index[idx], y)``, and
returns the value of ``x`` that would result from the NumPy-style
:mod:indexed assignment <numpy.doc.indexing>` ``x[idx] *= y``.
See :mod:`jax.ops` for details.
"""
return ops.index_mul(self.array, self.index, values,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
def min(self, values, indices_are_sorted=False, unique_indices=False):
"""Pure equivalent of ``x[idx] = minimum(x[idx], y)``.
``x.at[idx].min(y)`` is syntactic sugar for
``jax.ops.index_min(x, jax.ops.index[idx], y)``, and
returns the value of ``x`` that would result from the NumPy-style
:mod:indexed assignment <numpy.doc.indexing>`
``x[idx] = minimum(x[idx], y)``.
See :mod:`jax.ops` for details.
"""
return ops.index_min(self.array, self.index, values,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
def max(self, values, indices_are_sorted=False, unique_indices=False):
"""Pure equivalent of ``x[idx] = maximum(x[idx], y)``.
``x.at[idx].max(y)`` is syntactic sugar for
``jax.ops.index_max(x, jax.ops.index[idx], y)``, and
returns the value of ``x`` that would result from the NumPy-style
:mod:indexed assignment <numpy.doc.indexing>`
``x[idx] = maximum(x[idx], y)``.
See :mod:`jax.ops` for details.
"""
return ops.index_max(self.array, self.index, values,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
setattr(DeviceArray, "at", property(_IndexUpdateHelper))
setattr(ShapedArray, "at", core.aval_property(_IndexUpdateHelper))
| [] | [] | [
"JAX_NUMPY_RANK_PROMOTION"
] | [] | ["JAX_NUMPY_RANK_PROMOTION"] | python | 1 | 0 | |
2018/src/Solver.py |
import sys
import argparse
from src.Parser import parseIn, parseOut
from src.Graph import Graph
def main(argv=None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(description='Solve car sharing problem')
parser.add_argument('--input', help='Path to input file.', required=True)
parser.add_argument('--output', help='Path to output file', required=True)
args = parser.parse_args()
inPath = args.input
outPath = args.output
print("Parsing...")
rides, rows, cols, numCars, numRides, bonus, maxTime = parseIn(inPath)
print("numCars: {n}, numRides: {m}, gridSize: ({y},{x}), totalTime: {t}".format(n=numCars,
m=numRides,
x=cols,
y=rows,
t=maxTime))
print("Solving...")
# build graph
graph = Graph(rows, cols, maxTime)
# add weights
graph.add_rides_to_graph(rides)
# build schedule greedily (car by car)
schedule = []
for car in range(numCars):
edges = graph.find_shortest_path()
print(edges)
ridesTaken = []
for edge in edges:
if edge['label'] != 'none':
ridesTaken.append(edge['label'])
ridesTaken.sort()
print("Taken rides: ", ridesTaken)
schedule.append(ridesTaken)
graph.remove_rides(ridesTaken)
# write solution to file
print("Writing solution to file...")
parseOut(outPath, schedule)
print("Done")
if __name__ == '__main__' :
sys.exit(main())
| [] | [] | [] | [] | [] | python | null | null | null |
.history/Hackerrank/Algorithms/fibonacci_modified_20200524195733.py | '''
We define a modified Fibonacci sequence using the following definition:
Given terms ti and ti+1 where ie(0, +inf), term ti+2 is computed using the following relation: ti+2 = ti + (ti+1)^2
For example, if t1 = 0 and t2 = 1,
t3 = 0 + 1^2=1,
t4 = 1 + 1^2=2,
t5 = 1 + 2^2=5,
and so on.
Given three integers, t1, t2, and n, compute and print the nth term of a modified Fibonacci sequence.
Function Description
Complete the fibonacciModified function in the editor below. It must return the nth number in the sequence.
fibonacciModified has the following parameter(s):
t1: an integer
t2: an integer
n: an integer
Note: The value of tn may far exceed the range of a 64-bit integer. Many submission languages have libraries that can handle such large results but, for those that don't (e.g., C++), you will need to compensate for the size of the result.
Input Format
A single line of three space-separated integers describing the respective values of t1, t2, and n.
Output Format
Print a single integer denoting the value of term in the modified Fibonacci sequence where the first two terms are and .
Sample Input
0 1 5
Sample Output
5
Explanation
The first two terms of the sequence are t1 = 0 and t2 = 1, which gives us a modified Fibonacci sequence of {1,1,2,5,27,...}. Because n = 5, we return the 5th term.
https://www.hackerrank.com/challenges/fibonacci-modified/problem
'''
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the fibonacciModified function below.
def fibonacciModified(t1, t2, n):
t3 = t1 + t2**2
if n <= 1:
return t3
else:
return (fibonacciModified(t2, t3, n-1))
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t1T2n = input().split()
t1 = int(t1T2n[0])
t2 = int(t1T2n[1])
n = int(t1T2n[2])
result = fibonacciModified(t1, t2, n-2)
fptr.write(str(result) + '\n')
fptr.close()
| [] | [] | [
"OUTPUT_PATH"
] | [] | ["OUTPUT_PATH"] | python | 1 | 0 | |
integration-tests/src/test/java/oracle/kubernetes/operator/utils/ExecCommand.java | // Copyright (c) 2018, 2019, Oracle Corporation and/or its affiliates. All rights reserved.
// Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
package oracle.kubernetes.operator.utils;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Collectors;
import com.google.common.io.ByteStreams;
/** Class for executing shell commands from java. */
public class ExecCommand {
public static ExecResult exec(String command) throws Exception {
return exec(command, false, null);
}
public static ExecResult exec(String command, boolean isRedirectToOut) throws Exception {
return exec(command, isRedirectToOut, null);
}
public static ExecResult exec(
String command, boolean isRedirectToOut, Map<String, String> additionalEnvMap)
throws Exception {
Process p = null;
if (additionalEnvMap == null) {
p = Runtime.getRuntime().exec(new String[] {"/bin/sh", "-c", command});
} else {
// Combine new env vars with existing ones and generate a string array with those values
// If the 2 maps have a dup key then the additional env map entry will replace the existing.
Map<String, String> combinedEnvMap = new HashMap();
combinedEnvMap.putAll(System.getenv());
combinedEnvMap.putAll(additionalEnvMap);
String[] envParams = generateNameValueArrayFromMap(combinedEnvMap);
p = Runtime.getRuntime().exec(new String[] {"/bin/sh", "-c", command}, envParams);
}
InputStreamWrapper in = new SimpleInputStreamWrapper(p.getInputStream());
Thread out = null;
try {
if (isRedirectToOut) {
InputStream i = in.getInputStream();
@SuppressWarnings("resource")
CopyingOutputStream copyOut = new CopyingOutputStream(System.out);
// this makes sense because CopyingOutputStream is an InputStreamWrapper
in = copyOut;
out =
new Thread(
() -> {
try {
ByteStreams.copy(i, copyOut);
} catch (IOException ex) {
ex.printStackTrace();
}
});
out.start();
}
p.waitFor();
return new ExecResult(p.exitValue(), read(in.getInputStream()), read(p.getErrorStream()));
} finally {
if (out != null) {
out.join();
}
p.destroy();
}
}
/**
* Generate a string array of name=value items, one for each env map entry.
*
* @return
*/
private static String[] generateNameValueArrayFromMap(Map<String, String> map) {
int mapSize = map.size();
String[] strArray = new String[mapSize];
int i = 0;
for (Map.Entry<String, String> entry : map.entrySet()) {
strArray[i++] = entry.getKey() + "=" + entry.getValue();
}
return strArray;
}
private static String read(InputStream is) throws Exception {
return new BufferedReader(new InputStreamReader(is)).lines().collect(Collectors.joining("\n"));
}
private interface InputStreamWrapper {
InputStream getInputStream();
}
private static class SimpleInputStreamWrapper implements InputStreamWrapper {
final InputStream in;
SimpleInputStreamWrapper(InputStream in) {
this.in = in;
}
@Override
public InputStream getInputStream() {
return in;
}
}
private static class CopyingOutputStream extends OutputStream implements InputStreamWrapper {
final OutputStream out;
final ByteArrayOutputStream copy = new ByteArrayOutputStream();
CopyingOutputStream(OutputStream out) {
this.out = out;
}
@Override
public void write(int b) throws IOException {
out.write(b);
copy.write(b);
}
@Override
public InputStream getInputStream() {
return new ByteArrayInputStream(copy.toByteArray());
}
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
test/e2e/instrumentation/monitoring/stackdriver.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package monitoring
import (
"context"
"fmt"
"math"
"os"
"time"
"golang.org/x/oauth2/google"
"github.com/onsi/ginkgo"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
gcm "google.golang.org/api/monitoring/v3"
)
var (
// Stackdriver container metrics, as described here:
// https://cloud.google.com/monitoring/api/metrics#gcp-container
stackdriverMetrics = []string{
"uptime",
"memory/bytes_total",
"memory/bytes_used",
"cpu/reserved_cores",
"cpu/usage_time",
"memory/page_fault_count",
"disk/bytes_used",
"disk/bytes_total",
"cpu/utilization",
}
pollFrequency = time.Second * 5
pollTimeout = time.Minute * 7
rcName = "resource-consumer"
memoryUsed = 64
memoryLimit int64 = 200
tolerance = 0.25
)
var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
})
f := framework.NewDefaultFramework("stackdriver-monitoring")
ginkgo.It("should have cluster metrics [Feature:StackdriverMonitoring]", func() {
testStackdriverMonitoring(f, 1, 100, 200)
})
})
func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, perPodCPU int64) {
projectID := framework.TestContext.CloudConfig.ProjectID
ctx := context.Background()
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
// Hack for running tests locally
// If this is your use case, create application default credentials:
// $ gcloud auth application-default login
// and uncomment following lines (comment out the two lines above): (DON'T set the env var below)
/*
ts, err := google.DefaultTokenSource(oauth2.NoContext)
framework.Logf("Couldn't get application default credentials, %v", err)
if err != nil {
framework.Failf("Error accessing application default credentials, %v", err)
}
client := oauth2.NewClient(oauth2.NoContext, ts)
*/
gcmService, err := gcm.New(client)
// set this env var if accessing Stackdriver test endpoint (default is prod):
// $ export STACKDRIVER_API_ENDPOINT_OVERRIDE=https://test-monitoring.sandbox.googleapis.com/
basePathOverride := os.Getenv("STACKDRIVER_API_ENDPOINT_OVERRIDE")
if basePathOverride != "" {
gcmService.BasePath = basePathOverride
}
framework.ExpectNoError(err)
rc := common.NewDynamicResourceConsumer(rcName, f.Namespace.Name, common.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.ScalesGetter)
defer rc.CleanUp()
rc.WaitForReplicas(pods, 15*time.Minute)
metricsMap := map[string]bool{}
pollingFunction := checkForMetrics(projectID, gcmService, time.Now(), metricsMap, allPodsCPU, perPodCPU)
err = wait.Poll(pollFrequency, pollTimeout, pollingFunction)
if err != nil {
framework.Logf("Missing metrics: %+v\n", metricsMap)
}
framework.ExpectNoError(err)
}
func checkForMetrics(projectID string, gcmService *gcm.Service, start time.Time, metricsMap map[string]bool, cpuUsed int, cpuLimit int64) func() (bool, error) {
return func() (bool, error) {
counter := 0
correctUtilization := false
for _, metric := range stackdriverMetrics {
metricsMap[metric] = false
}
for _, metric := range stackdriverMetrics {
// TODO: check only for metrics from this cluster
ts, err := fetchTimeSeries(projectID, gcmService, metric, start, time.Now())
framework.ExpectNoError(err)
if len(ts) > 0 {
counter = counter + 1
metricsMap[metric] = true
framework.Logf("Received %v timeseries for metric %v\n", len(ts), metric)
} else {
framework.Logf("No timeseries for metric %v\n", metric)
}
var sum float64
switch metric {
case "cpu/utilization":
for _, t := range ts {
max := t.Points[0]
maxEnd, _ := time.Parse(time.RFC3339, max.Interval.EndTime)
for _, p := range t.Points {
pEnd, _ := time.Parse(time.RFC3339, p.Interval.EndTime)
if pEnd.After(maxEnd) {
max = p
maxEnd, _ = time.Parse(time.RFC3339, max.Interval.EndTime)
}
}
sum = sum + *max.Value.DoubleValue
framework.Logf("Received %v points for metric %v\n",
len(t.Points), metric)
}
framework.Logf("Most recent cpu/utilization sum*cpu/limit: %v\n", sum*float64(cpuLimit))
if math.Abs(sum*float64(cpuLimit)-float64(cpuUsed)) > tolerance*float64(cpuUsed) {
return false, nil
}
correctUtilization = true
}
}
if counter < 9 || !correctUtilization {
return false, nil
}
return true, nil
}
}
func createMetricFilter(metric string, containerName string) string {
return fmt.Sprintf(`metric.type="container.googleapis.com/container/%s" AND
resource.label.container_name="%s"`, metric, containerName)
}
func fetchTimeSeries(projectID string, gcmService *gcm.Service, metric string, start time.Time, end time.Time) ([]*gcm.TimeSeries, error) {
response, err := gcmService.Projects.TimeSeries.
List(fullProjectName(projectID)).
Filter(createMetricFilter(metric, rcName)).
IntervalStartTime(start.Format(time.RFC3339)).
IntervalEndTime(end.Format(time.RFC3339)).
Do()
if err != nil {
return nil, err
}
return response.TimeSeries, nil
}
func fullProjectName(name string) string {
return fmt.Sprintf("projects/%s", name)
}
| [
"\"STACKDRIVER_API_ENDPOINT_OVERRIDE\""
] | [] | [
"STACKDRIVER_API_ENDPOINT_OVERRIDE"
] | [] | ["STACKDRIVER_API_ENDPOINT_OVERRIDE"] | go | 1 | 0 | |
internal/pkg/build/assemblers/sif.go | // Copyright (c) 2018-2020, Sylabs Inc. All rights reserved.
// This software is licensed under a 3-clause BSD license. Please consult the
// LICENSE.md file distributed with the sources of this project regarding your
// rights to use or distribute this software.
package assemblers
import (
"encoding/binary"
"fmt"
"io/ioutil"
"os"
"regexp"
"runtime"
"sort"
"strconv"
"syscall"
uuid "github.com/satori/go.uuid"
"github.com/sylabs/sif/pkg/sif"
"github.com/sylabs/singularity/internal/pkg/util/machine"
"github.com/sylabs/singularity/pkg/build/types"
"github.com/sylabs/singularity/pkg/image/packer"
"github.com/sylabs/singularity/pkg/sylog"
"github.com/sylabs/singularity/pkg/util/crypt"
)
// SIFAssembler doesn't store anything.
type SIFAssembler struct {
GzipFlag bool
MksquashfsProcs uint
MksquashfsMem string
MksquashfsPath string
}
type encryptionOptions struct {
keyInfo crypt.KeyInfo
plaintext []byte
}
func createSIF(path string, b *types.Bundle, squashfile string, encOpts *encryptionOptions, arch string) (err error) {
definition := b.Recipe.Raw
// general info for the new SIF file creation
cinfo := sif.CreateInfo{
Pathname: path,
Launchstr: sif.HdrLaunch,
Sifversion: sif.HdrVersion,
ID: uuid.NewV4(),
}
// data we need to create a definition file descriptor
definput := sif.DescriptorInput{
Datatype: sif.DataDeffile,
Groupid: sif.DescrDefaultGroup,
Link: sif.DescrUnusedLink,
Data: definition,
}
definput.Size = int64(binary.Size(definput.Data))
// add this descriptor input element to creation descriptor slice
cinfo.InputDescr = append(cinfo.InputDescr, definput)
// add all JSON data object within SIF by alphabetical order
sorted := make([]string, 0, len(b.JSONObjects))
for name := range b.JSONObjects {
sorted = append(sorted, name)
}
sort.Strings(sorted)
for _, name := range sorted {
if len(b.JSONObjects[name]) > 0 {
// data we need to create a definition file descriptor
in := sif.DescriptorInput{
Datatype: sif.DataGenericJSON,
Groupid: sif.DescrDefaultGroup,
Link: sif.DescrUnusedLink,
Data: b.JSONObjects[name],
Fname: name,
}
in.Size = int64(binary.Size(in.Data))
// add this descriptor input element to creation descriptor slice
cinfo.InputDescr = append(cinfo.InputDescr, in)
}
}
// data we need to create a system partition descriptor
parinput := sif.DescriptorInput{
Datatype: sif.DataPartition,
Groupid: sif.DescrDefaultGroup,
Link: sif.DescrUnusedLink,
Fname: squashfile,
}
// open up the data object file for this descriptor
fp, err := os.Open(parinput.Fname)
if err != nil {
return fmt.Errorf("while opening partition file: %s", err)
}
defer fp.Close()
fi, err := fp.Stat()
if err != nil {
return fmt.Errorf("while calling stat on partition file: %s", err)
}
parinput.Fp = fp
parinput.Size = fi.Size()
sifType := sif.FsSquash
if encOpts != nil {
sifType = sif.FsEncryptedSquashfs
}
err = parinput.SetPartExtra(sifType, sif.PartPrimSys, sif.GetSIFArch(arch))
if err != nil {
return
}
// add this descriptor input element to the list
cinfo.InputDescr = append(cinfo.InputDescr, parinput)
if encOpts != nil {
data, err := crypt.EncryptKey(encOpts.keyInfo, encOpts.plaintext)
if err != nil {
return fmt.Errorf("while encrypting filesystem key: %s", err)
}
if data != nil {
syspartID := uint32(len(cinfo.InputDescr))
part := sif.DescriptorInput{
Datatype: sif.DataCryptoMessage,
Groupid: sif.DescrDefaultGroup,
Link: syspartID,
Data: data,
Size: int64(len(data)),
}
// extra data needed for the creation of a signature descriptor
err := part.SetCryptoMsgExtra(sif.FormatPEM, sif.MessageRSAOAEP)
if err != nil {
return err
}
cinfo.InputDescr = append(cinfo.InputDescr, part)
}
}
// remove anything that may exist at the build destination at last moment
os.RemoveAll(path)
// test container creation with two partition input descriptors
if _, err := sif.CreateContainer(cinfo); err != nil {
return fmt.Errorf("while creating container: %s", err)
}
// chown the sif file to the calling user
if uid, gid, ok := changeOwner(); ok {
if err := os.Chown(path, uid, gid); err != nil {
return fmt.Errorf("while changing image ownership: %s", err)
}
}
return nil
}
// Assemble creates a SIF image from a Bundle.
func (a *SIFAssembler) Assemble(b *types.Bundle, path string) error {
sylog.Infof("Creating SIF file...")
s := packer.NewSquashfs()
s.MksquashfsPath = a.MksquashfsPath
f, err := ioutil.TempFile(b.TmpDir, "squashfs-")
if err != nil {
return fmt.Errorf("while creating temporary file for squashfs: %v", err)
}
fsPath := f.Name()
f.Close()
defer os.Remove(fsPath)
flags := []string{"-noappend"}
// build squashfs with all-root flag when building as a user
if syscall.Getuid() != 0 {
flags = append(flags, "-all-root")
}
// specify compression if needed
if a.GzipFlag {
flags = append(flags, "-comp", "gzip")
}
if a.MksquashfsMem != "" {
flags = append(flags, "-mem", a.MksquashfsMem)
}
if a.MksquashfsProcs != 0 {
flags = append(flags, "-processors", fmt.Sprint(a.MksquashfsProcs))
}
arch := machine.ArchFromContainer(b.RootfsPath)
if arch == "" {
sylog.Infof("Architecture not recognized, use native")
arch = runtime.GOARCH
}
sylog.Verbosef("Set SIF container architecture to %s", arch)
if err := s.Create([]string{b.RootfsPath}, fsPath, flags); err != nil {
return fmt.Errorf("while creating squashfs: %v", err)
}
var encOpts *encryptionOptions
if b.Opts.EncryptionKeyInfo != nil {
plaintext, err := crypt.NewPlaintextKey(*b.Opts.EncryptionKeyInfo)
if err != nil {
return fmt.Errorf("unable to obtain encryption key: %+v", err)
}
// A dm-crypt device needs to be created with squashfs
cryptDev := &crypt.Device{}
// TODO (schebro): Fix #3876
// Detach the following code from the squashfs creation. SIF can be
// created first and encrypted after. This gives the flexibility to
// encrypt an existing SIF
loopPath, err := cryptDev.EncryptFilesystem(fsPath, plaintext)
if err != nil {
return fmt.Errorf("unable to encrypt filesystem at %s: %+v", fsPath, err)
}
defer os.Remove(loopPath)
fsPath = loopPath
encOpts = &encryptionOptions{
keyInfo: *b.Opts.EncryptionKeyInfo,
plaintext: plaintext,
}
}
err = createSIF(path, b, fsPath, encOpts, arch)
if err != nil {
return fmt.Errorf("while creating SIF: %v", err)
}
return nil
}
// changeOwner check the command being called with sudo with the environment
// variable SUDO_COMMAND. Pattern match that for the singularity bin.
func changeOwner() (int, int, bool) {
r := regexp.MustCompile("(singularity)")
sudoCmd := os.Getenv("SUDO_COMMAND")
if !r.MatchString(sudoCmd) {
return 0, 0, false
}
if os.Getenv("SUDO_USER") == "" || syscall.Getuid() != 0 {
return 0, 0, false
}
_uid := os.Getenv("SUDO_UID")
_gid := os.Getenv("SUDO_GID")
if _uid == "" || _gid == "" {
sylog.Warningf("Env vars SUDO_UID or SUDO_GID are not set, won't call chown over built SIF")
return 0, 0, false
}
uid, err := strconv.Atoi(_uid)
if err != nil {
sylog.Warningf("Error while calling strconv: %v", err)
return 0, 0, false
}
gid, err := strconv.Atoi(_gid)
if err != nil {
sylog.Warningf("Error while calling strconv : %v", err)
return 0, 0, false
}
return uid, gid, true
}
| [
"\"SUDO_COMMAND\"",
"\"SUDO_USER\"",
"\"SUDO_UID\"",
"\"SUDO_GID\""
] | [] | [
"SUDO_COMMAND",
"SUDO_USER",
"SUDO_GID",
"SUDO_UID"
] | [] | ["SUDO_COMMAND", "SUDO_USER", "SUDO_GID", "SUDO_UID"] | go | 4 | 0 | |
cmd/commands/path/path.go | // Package path defines "path" command i.e. outputs proper $PATH
package path
import (
"fmt"
"os"
"strings"
"github.com/spf13/cobra"
"github.com/markelog/eclectica/plugins"
"github.com/markelog/eclectica/shell"
)
// Command config
var Command = &cobra.Command{
Use: "path",
Short: "echo path environment variable with eclectica specific keys",
Run: run,
Hidden: true,
}
// Updates the path environment variable
func run(c *cobra.Command, args []string) {
path := os.Getenv("PATH")
addition := shell.Compose(plugins.Plugins)
if strings.Contains(path, addition) {
fmt.Print(path)
} else {
fmt.Print(addition + ":" + path)
}
os.Exit(0)
}
| [
"\"PATH\""
] | [] | [
"PATH"
] | [] | ["PATH"] | go | 1 | 0 | |
setup.py | from setuptools import setup, Extension
from Cython.Build import cythonize
import os
import platform
import glob
libraries = {
"Linux": [
"bgfx-shared-libRelease",
#"z", "jpeg", "png", "webp", "jbig85",
"SDL2", "SDL2_image", "SDL2_mixer",
"assimp",
"chipmunk",
"openal",
"FLAC", "opus",
"ogg", "vorbis", "vorbisfile", "vorbisenc",
"freetype", "harfbuzz",
"cimgui",
],
"Windows": [
"bgfx-shared-libRelease",
"SDL2main", "SDL2", "SDL2_image", "SDL2_mixer", "SDL2_net",
"assimp",
"chipmunk",
"openal-1",
"FLAC-8", "opus-0", "opusfile-0", "vorbis-0", "vorbisfile-3",
"webp-7",
"zlib1",
"freetype-6", "harfbuzz-0",
"cimgui",
#"cgltf",
"xxhash",
],
}
language = "c"
release_args = ["-w", "-std=c11", "-O3", "-ffast-math", "-march=native"]
debug_args = ["-w", "-std=c11", "-O0"]
#args = debug_args
args = release_args
macros = [
("CIMGUI_DEFINE_ENUMS_AND_STRUCTS", True),
("CGLTF_IMPLEMENTATION", True),
("STB_IMAGE_IMPLEMENTATION", True),
("STBI_FAILURE_USERMSG", True),
("CGLM_CLIPSPACE_INCLUDE_ALL", True),
("CGLM_ALL_UNALIGNED", True),
]
include_dirs = ["./pyorama/libs/include"]
library_dirs = {
"Linux": ["./pyorama/libs/shared/Linux"],
"Windows": ["./pyorama/libs/shared/Windows"],
}
annotate = True
quiet = False
directives = {
"binding": True,
"boundscheck": False,
"cdivision": True,
"initializedcheck": False,
"language_level": "3",
"nonecheck": False,
"wraparound": False,
}
if __name__ == "__main__":
system = platform.system()
old_path = os.environ["PATH"]
os.environ["PATH"] = old_path + os.pathsep + library_dirs[system][0]
libs = libraries[system]
lib_dirs = library_dirs[system]
extensions = []
ext_modules = []
#create extensions
for path, dirs, file_names in os.walk("."):
for file_name in file_names:
if file_name.endswith("pyx"):
ext_path = "{0}/{1}".format(path, file_name)
ext_name = ext_path \
.replace("./", "") \
.replace("/", ".") \
.replace(".pyx", "")
ext = Extension(
name=ext_name,
sources=[ext_path],
libraries=libs,
language=language,
extra_compile_args=args,
include_dirs=include_dirs,
library_dirs=lib_dirs,
runtime_library_dirs=lib_dirs,
define_macros=macros,
)
extensions.append(ext)
#setup all extensions
ext_modules = cythonize(
extensions,
annotate=annotate,
compiler_directives=directives,
quiet=quiet,
#gdb_debug=True
)
setup(
ext_modules=ext_modules,
)
os.environ["PATH"] = old_path
| [] | [] | [
"PATH"
] | [] | ["PATH"] | python | 1 | 0 | |
functions/cmd/main.go | package main
import (
"fmt"
"log"
"os"
"upload-token.functions"
"github.com/GoogleCloudPlatform/functions-framework-go/funcframework"
)
func main() {
funcframework.RegisterHTTPFunction("/challenge", functions.ChallengeHandler)
// Use PORT environment variable, or default to 8080.
port := "8080"
if envPort := os.Getenv("PORT"); envPort != "" {
port = envPort
}
fmt.Println("Listening port:", port)
if err := funcframework.Start(port); err != nil {
log.Fatalf("funcframework.Start: %v\n", err)
}
}
| [
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
acceptance_test.go | // +build acceptance
package nfpm_test
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
"github.com/goreleaser/nfpm/v2"
_ "github.com/goreleaser/nfpm/v2/apk"
_ "github.com/goreleaser/nfpm/v2/deb"
_ "github.com/goreleaser/nfpm/v2/rpm"
)
// nolint: gochecknoglobals
var formats = []string{"deb", "rpm", "apk"}
func TestSimple(t *testing.T) {
for _, format := range formats {
format := format
t.Run(fmt.Sprintf("amd64-%s", format), func(t *testing.T) {
accept(t, acceptParms{
Name: fmt.Sprintf("simple_%s", format),
Conf: "simple.yaml",
Format: format,
Dockerfile: fmt.Sprintf("%s.dockerfile", format),
})
})
t.Run(fmt.Sprintf("i386-%s", format), func(t *testing.T) {
accept(t, acceptParms{
Name: fmt.Sprintf("simple_%s_386", format),
Conf: "simple.386.yaml",
Format: format,
Dockerfile: fmt.Sprintf("%s.386.dockerfile", format),
})
})
t.Run(fmt.Sprintf("ppc64le-%s", format), func(t *testing.T) {
t.Skip("for some reason travis fails to run those")
accept(t, acceptParms{
Name: fmt.Sprintf("simple_%s_ppc64le", format),
Conf: "simple.ppc64le.yaml",
Format: format,
Dockerfile: fmt.Sprintf("%s.ppc64le.dockerfile", format),
})
})
t.Run(fmt.Sprintf("arm64-%s", format), func(t *testing.T) {
accept(t, acceptParms{
Name: fmt.Sprintf("simple_%s_arm64", format),
Conf: "simple.arm64.yaml",
Format: format,
Dockerfile: fmt.Sprintf("%s.arm64.dockerfile", format),
})
})
}
}
func TestComplex(t *testing.T) {
for _, format := range formats {
format := format
t.Run(fmt.Sprintf("amd64-%s", format), func(t *testing.T) {
accept(t, acceptParms{
Name: fmt.Sprintf("complex_%s", format),
Conf: "complex.yaml",
Format: format,
Dockerfile: fmt.Sprintf("%s.complex.dockerfile", format),
})
})
t.Run(fmt.Sprintf("i386-%s", format), func(t *testing.T) {
accept(t, acceptParms{
Name: fmt.Sprintf("complex_%s_386", format),
Conf: "complex.386.yaml",
Format: format,
Dockerfile: fmt.Sprintf("%s.386.complex.dockerfile", format),
})
})
}
}
func TestConfigNoReplace(t *testing.T) {
var target = "./testdata/acceptance/tmp/noreplace_old_rpm.rpm"
require.NoError(t, os.MkdirAll("./testdata/acceptance/tmp", 0700))
config, err := nfpm.ParseFile("./testdata/acceptance/config-noreplace-old.yaml")
require.NoError(t, err)
info, err := config.Get("rpm")
require.NoError(t, err)
require.NoError(t, nfpm.Validate(info))
pkg, err := nfpm.Get("rpm")
require.NoError(t, err)
f, err := os.Create(target)
require.NoError(t, err)
info.Target = target
require.NoError(t, pkg.Package(nfpm.WithDefaults(info), f))
t.Run("rpm", func(t *testing.T) {
accept(t, acceptParms{
Name: "noreplace_rpm",
Conf: "config-noreplace.yaml",
Format: "rpm",
Dockerfile: "rpm.config-noreplace.dockerfile",
})
})
}
func TestEnvVarVersion(t *testing.T) {
for _, format := range formats {
format := format
t.Run(fmt.Sprintf("amd64-%s", format), func(t *testing.T) {
os.Setenv("SEMVER", "v1.0.0-0.1.b1+git.abcdefgh")
accept(t, acceptParms{
Name: fmt.Sprintf("env-var-version_%s", format),
Conf: "env-var-version.yaml",
Format: format,
Dockerfile: fmt.Sprintf("%s.env-var-version.dockerfile", format),
})
})
}
}
func TestComplexOverrides(t *testing.T) {
for _, format := range formats {
format := format
t.Run(fmt.Sprintf("amd64-%s", format), func(t *testing.T) {
accept(t, acceptParms{
Name: fmt.Sprintf("overrides_%s", format),
Conf: "overrides.yaml",
Format: format,
Dockerfile: fmt.Sprintf("%s.overrides.dockerfile", format),
})
})
}
}
func TestMin(t *testing.T) {
for _, format := range formats {
format := format
t.Run(fmt.Sprintf("amd64-%s", format), func(t *testing.T) {
accept(t, acceptParms{
Name: fmt.Sprintf("min_%s", format),
Conf: "min.yaml",
Format: format,
Dockerfile: fmt.Sprintf("%s.min.dockerfile", format),
})
})
}
}
func TestMeta(t *testing.T) {
for _, format := range formats {
format := format
t.Run(fmt.Sprintf("amd64-%s", format), func(t *testing.T) {
accept(t, acceptParms{
Name: fmt.Sprintf("meta_%s", format),
Conf: "meta.yaml",
Format: format,
Dockerfile: fmt.Sprintf("%s.meta.dockerfile", format),
})
})
}
}
func TestRPMCompression(t *testing.T) {
compressFormats := []string{"gzip", "xz", "lzma"}
for _, format := range compressFormats {
format := format
t.Run(format, func(t *testing.T) {
accept(t, acceptParms{
Name: fmt.Sprintf("%s_compression_rpm", format),
Conf: fmt.Sprintf("%s.compression.yaml", format),
Format: "rpm",
Dockerfile: fmt.Sprintf("%s.rpm.compression.dockerfile", format),
})
})
}
}
func TestRPMRelease(t *testing.T) {
accept(t, acceptParms{
Name: "release_rpm",
Conf: "release.rpm.yaml",
Format: "rpm",
Dockerfile: "release.rpm.dockerfile",
})
}
func TestDebRules(t *testing.T) {
accept(t, acceptParms{
Name: "rules.deb",
Conf: "rules.deb.yaml",
Format: "deb",
Dockerfile: "rules.deb.dockerfile",
})
}
func TestChangelog(t *testing.T) {
for _, format := range formats {
format := format
t.Run(fmt.Sprintf("changelog-%s", format), func(t *testing.T) {
accept(t, acceptParms{
Name: fmt.Sprintf("changelog_%s", format),
Conf: "withchangelog.yaml",
Format: format,
Dockerfile: fmt.Sprintf("%s.changelog.dockerfile", format),
})
})
}
}
func TestDebTriggers(t *testing.T) {
t.Run("triggers-deb", func(t *testing.T) {
accept(t, acceptParms{
Name: "triggers-deb",
Conf: "triggers.yaml",
Format: "deb",
Dockerfile: "deb.triggers.dockerfile",
})
})
}
func TestSymlink(t *testing.T) {
for _, format := range formats {
format := format
t.Run(fmt.Sprintf("symlink-%s", format), func(t *testing.T) {
accept(t, acceptParms{
Name: fmt.Sprintf("symlink_%s", format),
Conf: "symlink.yaml",
Format: format,
Dockerfile: fmt.Sprintf("%s.symlink.dockerfile", format),
})
})
}
}
func TestDebBreaks(t *testing.T) {
t.Run("breaks-deb", func(t *testing.T) {
accept(t, acceptParms{
Name: "breaks-deb",
Conf: "breaks.yaml",
Format: "deb",
Dockerfile: "deb.breaks.dockerfile",
})
})
}
func TestSignatures(t *testing.T) {
for _, format := range formats {
format := format
t.Run("signed", func(t *testing.T) {
accept(t, acceptParms{
Name: fmt.Sprintf("signed_%s", format),
Conf: "signed.yaml",
Format: format,
Dockerfile: fmt.Sprintf("%s.signed.dockerfile", format),
})
})
}
}
type acceptParms struct {
Name string
Conf string
Format string
Dockerfile string
}
type testWriter struct {
t *testing.T
}
func (t testWriter) Write(p []byte) (n int, err error) {
t.t.Log(string(p))
return len(p), nil
}
func accept(t *testing.T, params acceptParms) {
var configFile = filepath.Join("./testdata/acceptance/", params.Conf)
tmp, err := filepath.Abs("./testdata/acceptance/tmp")
require.NoError(t, err)
var packageName = params.Name + "." + params.Format
var target = filepath.Join(tmp, packageName)
t.Log("package: " + target)
require.NoError(t, os.MkdirAll(tmp, 0700))
config, err := nfpm.ParseFile(configFile)
require.NoError(t, err)
info, err := config.Get(params.Format)
require.NoError(t, err)
require.NoError(t, nfpm.Validate(info))
pkg, err := nfpm.Get(params.Format)
require.NoError(t, err)
f, err := os.Create(target)
require.NoError(t, err)
info.Target = target
require.NoError(t, pkg.Package(nfpm.WithDefaults(info), f))
//nolint:gosec
cmd := exec.Command(
os.Getenv("CONTAINER_RUNTIME"), "build", "--rm", "--force-rm",
"-f", params.Dockerfile,
"--build-arg", "package="+filepath.Join("tmp", packageName),
".",
)
cmd.Dir = "./testdata/acceptance"
cmd.Stderr = testWriter{t}
cmd.Stdout = cmd.Stderr
t.Log("will exec:", cmd.Args)
require.NoError(t, cmd.Run())
}
| [
"\"CONTAINER_RUNTIME\""
] | [] | [
"CONTAINER_RUNTIME"
] | [] | ["CONTAINER_RUNTIME"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'datafriends.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
gallery/wsgi.py | """
WSGI config for gallery project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gallery.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 |