filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
sequence | variablearg
sequence | constarg
sequence | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
manager/main.go | // Copyright 2020 IBM Corp.
// SPDX-License-Identifier: Apache-2.0
package main
import (
"flag"
"os"
"strings"
"github.com/mesh-for-data/mesh-for-data/pkg/multicluster"
"github.com/mesh-for-data/mesh-for-data/pkg/multicluster/local"
"github.com/mesh-for-data/mesh-for-data/pkg/multicluster/razee"
"github.com/mesh-for-data/mesh-for-data/pkg/storage"
"github.com/mesh-for-data/mesh-for-data/pkg/vault"
"github.com/mesh-for-data/mesh-for-data/manager/controllers/motion"
kruntime "k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
comv1alpha1 "github.com/datashim-io/datashim/src/dataset-operator/pkg/apis/com/v1alpha1"
appv1 "github.com/mesh-for-data/mesh-for-data/manager/apis/app/v1alpha1"
motionv1 "github.com/mesh-for-data/mesh-for-data/manager/apis/motion/v1alpha1"
"github.com/mesh-for-data/mesh-for-data/manager/controllers/app"
"github.com/mesh-for-data/mesh-for-data/manager/controllers/utils"
"github.com/mesh-for-data/mesh-for-data/pkg/helm"
pc "github.com/mesh-for-data/mesh-for-data/pkg/policy-compiler/policy-compiler"
// +kubebuilder:scaffold:imports
)
var (
scheme = kruntime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
_ = clientgoscheme.AddToScheme(scheme)
_ = motionv1.AddToScheme(scheme)
_ = appv1.AddToScheme(scheme)
_ = comv1alpha1.SchemeBuilder.AddToScheme(scheme)
// +kubebuilder:scaffold:scheme
}
// This component starts all the controllers of the CRDs of the manager.
// This includes the following components:
// - application-controller
// - blueprint-contoller
// - movement-controller
func main() {
var metricsAddr string
var enableLeaderElection bool
var enableApplicationController bool
var enableBlueprintController bool
var enablePlotterController bool
var enableMotionController bool
var enableAllControllers bool
var namespace string
address := utils.ListeningAddress(8085)
flag.StringVar(&metricsAddr, "metrics-addr", address, "The address the metric endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
flag.BoolVar(&enableApplicationController, "enable-application-controller", false,
"Enable application controller of the manager. This manages CRDs of type M4DApplication.")
flag.BoolVar(&enableBlueprintController, "enable-blueprint-controller", false,
"Enable blueprint controller of the manager. This manages CRDs of type Blueprint.")
flag.BoolVar(&enablePlotterController, "enable-plotter-controller", false,
"Enable plotter controller of the manager. This manages CRDs of type Plotter.")
flag.BoolVar(&enableMotionController, "enable-motion-controller", false,
"Enable motion controller of the manager. This manages CRDs of type BatchTransfer or StreamTransfer.")
flag.BoolVar(&enableAllControllers, "enable-all-controllers", false,
"Enables all controllers.")
flag.StringVar(&namespace, "namespace", "", "The namespace to which this controller manager is limited.")
flag.Parse()
if enableAllControllers {
enableApplicationController = true
enableBlueprintController = true
enablePlotterController = true
enableMotionController = true
}
if !enableApplicationController && !enablePlotterController && !enableBlueprintController && !enableMotionController {
setupLog.Info("At least one controller flag must be set!")
os.Exit(1)
}
ctrl.SetLogger(zap.New(zap.UseDevMode(true)))
var ctrlOps manager.Options
if len(namespace) > 0 {
// manager restricted to a single namespace
ctrlOps = ctrl.Options{
Scheme: scheme,
Namespace: namespace,
MetricsBindAddress: metricsAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "m4d-operator-leader-election",
Port: 9443,
}
} else {
// manager not restricted to a namespace.
ctrlOps = ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "m4d-operator-leader-election",
Port: 9443,
}
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrlOps)
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
// Initialize ClusterManager
var clusterManager multicluster.ClusterManager
if enableApplicationController || enablePlotterController {
clusterManager, err = NewClusterManager(mgr)
if err != nil {
setupLog.Error(err, "unable to initialize cluster manager")
os.Exit(1)
}
}
if enableApplicationController {
// Initiate vault client
vaultConn, errVaultSetup := initVaultConnection()
if errVaultSetup != nil {
setupLog.Error(errVaultSetup, "Error setting up vault")
os.Exit(1)
}
// Initialize PolicyCompiler interface
policyCompiler := pc.NewPolicyCompiler()
// Initiate the M4DApplication Controller
applicationController, err := app.NewM4DApplicationReconciler(mgr, "M4DApplication", vaultConn, policyCompiler, clusterManager, storage.NewProvisionImpl(mgr.GetClient()))
if err != nil {
setupLog.Error(err, "unable to create controller")
os.Exit(1)
}
if err := applicationController.SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "M4DApplication")
os.Exit(1)
}
}
if enablePlotterController {
// Initiate the Plotter Controller
plotterController := app.NewPlotterReconciler(mgr, "Plotter", clusterManager)
if err := plotterController.SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", plotterController.Name)
os.Exit(1)
}
}
if enableBlueprintController {
// Initiate the Blueprint Controller
blueprintController := app.NewBlueprintReconciler(mgr, "Blueprint", new(helm.Impl))
if err := blueprintController.SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", blueprintController.Name)
os.Exit(1)
}
}
if enableMotionController {
motion.SetupMotionControllers(mgr)
}
// +kubebuilder:scaffold:builder
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
// init vault client
// TODO: the vault client should be used to add roles for the modules.
func initVaultConnection() (vault.Interface, error) {
vaultConn, err := vault.InitConnection(utils.GetVaultAddress(), utils.GetVaultToken())
if err != nil {
return vaultConn, err
}
return vaultConn, nil
}
// NewClusterManager decides based on the environment variables that are set which
// cluster manager instance should be initiated.
func NewClusterManager(mgr manager.Manager) (multicluster.ClusterManager, error) {
setupLog := ctrl.Log.WithName("setup")
multiClusterGroup := os.Getenv("MULTICLUSTER_GROUP")
if user, razeeLocal := os.LookupEnv("RAZEE_USER"); razeeLocal {
razeeURL := strings.TrimSpace(os.Getenv("RAZEE_URL"))
password := strings.TrimSpace(os.Getenv("RAZEE_PASSWORD"))
setupLog.Info("Using razee local at " + razeeURL)
return razee.NewRazeeLocalManager(strings.TrimSpace(razeeURL), strings.TrimSpace(user), password, multiClusterGroup)
} else if apiKey, satConf := os.LookupEnv("IAM_API_KEY"); satConf {
setupLog.Info("Using IBM Satellite config")
return razee.NewSatConfManager(strings.TrimSpace(apiKey), multiClusterGroup)
} else if apiKey, razeeOauth := os.LookupEnv("API_KEY"); razeeOauth {
setupLog.Info("Using Razee oauth")
razeeURL := strings.TrimSpace(os.Getenv("RAZEE_URL"))
return razee.NewRazeeOAuthManager(strings.TrimSpace(razeeURL), strings.TrimSpace(apiKey), multiClusterGroup)
} else {
setupLog.Info("Using local cluster manager")
return local.NewManager(mgr.GetClient(), utils.GetSystemNamespace())
}
}
| [
"\"MULTICLUSTER_GROUP\"",
"\"RAZEE_URL\"",
"\"RAZEE_PASSWORD\"",
"\"RAZEE_URL\""
] | [] | [
"RAZEE_URL",
"RAZEE_PASSWORD",
"MULTICLUSTER_GROUP"
] | [] | ["RAZEE_URL", "RAZEE_PASSWORD", "MULTICLUSTER_GROUP"] | go | 3 | 0 | |
account/internal/dao/init.go | package dao
import (
"fmt"
"os"
"time"
configs "github.com/mike955/zebra/account/configs"
"gorm.io/driver/mysql"
"gorm.io/gorm"
)
var DB *gorm.DB
type CommonTimeModel struct {
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
func Init(config configs.Mysql) {
var err error
addr, username, password, database := config.MysqlAddr, config.MysqlUsername, config.MysqlPassword, config.MysqlDatabase
if os.Getenv("MYSQL_ADDR") != "" && os.Getenv("MYSQL_PORT") != "" {
addr = os.Getenv("MYSQL_ADDR") + ":" + os.Getenv("MYSQL_PORT")
}
if os.Getenv("MYSQL_USERNAME") != "" {
username = os.Getenv("MYSQL_USERNAME")
}
if os.Getenv("MYSQL_PASSWORD") != "" {
password = os.Getenv("MYSQL_PASSWORD")
}
if os.Getenv("MYSQL_DATABASE") != "" {
database = os.Getenv("MYSQL_DATABASE")
}
dsn := fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local", username, password, addr, database)
DB, err = gorm.Open(mysql.Open(dsn))
DB = DB.Debug()
if err != nil {
panic("connect mysql error: " + err.Error())
}
}
| [
"\"MYSQL_ADDR\"",
"\"MYSQL_PORT\"",
"\"MYSQL_ADDR\"",
"\"MYSQL_PORT\"",
"\"MYSQL_USERNAME\"",
"\"MYSQL_USERNAME\"",
"\"MYSQL_PASSWORD\"",
"\"MYSQL_PASSWORD\"",
"\"MYSQL_DATABASE\"",
"\"MYSQL_DATABASE\""
] | [] | [
"MYSQL_PASSWORD",
"MYSQL_PORT",
"MYSQL_USERNAME",
"MYSQL_ADDR",
"MYSQL_DATABASE"
] | [] | ["MYSQL_PASSWORD", "MYSQL_PORT", "MYSQL_USERNAME", "MYSQL_ADDR", "MYSQL_DATABASE"] | go | 5 | 0 | |
salt/modules/cmdmod.py | # -*- coding: utf-8 -*-
'''
A module for shelling out.
Keep in mind that this module is insecure, in that it can give whomever has
access to the master root execution access to all salt minions.
'''
from __future__ import absolute_import
# Import python libs
import functools
import glob
import json
import logging
import os
import shutil
import subprocess
import sys
import time
import traceback
from salt.utils import vt
# Import salt libs
import salt.utils
import salt.utils.timed_subprocess
import salt.grains.extra
import salt.ext.six as six
from salt.exceptions import CommandExecutionError, TimedProcTimeoutError
from salt.log import LOG_LEVELS
from salt.ext.six.moves import range
from salt.ext.six.moves import shlex_quote as _cmd_quote
# Only available on POSIX systems, nonfatal on windows
try:
import pwd
except ImportError:
pass
if salt.utils.is_windows():
from salt.utils.win_runas import runas as win_runas
HAS_WIN_RUNAS = True
else:
HAS_WIN_RUNAS = False
__proxyenabled__ = ['*']
# Define the module's virtual name
__virtualname__ = 'cmd'
# Set up logging
log = logging.getLogger(__name__)
DEFAULT_SHELL = salt.grains.extra.shell()['shell']
def __virtual__():
'''
Overwriting the cmd python module makes debugging modules
with pdb a bit harder so lets do it this way instead.
'''
return __virtualname__
def _check_cb(cb_):
'''
If the callback is None or is not callable, return a lambda that returns
the value passed.
'''
if cb_ is not None:
if hasattr(cb_, '__call__'):
return cb_
else:
log.error('log_callback is not callable, ignoring')
return lambda x: x
def _python_shell_default(python_shell, __pub_jid):
'''
Set python_shell default based on remote execution and __opts__['cmd_safe']
'''
try:
# Default to python_shell=True when run directly from remote execution
# system. Cross-module calls won't have a jid.
if __pub_jid and python_shell is None:
return True
elif __opts__.get('cmd_safe', True) is False and python_shell is None:
# Override-switch for python_shell
return True
except NameError:
pass
return python_shell
def _chroot_pids(chroot):
pids = []
for root in glob.glob('/proc/[0-9]*/root'):
try:
link = os.path.realpath(root)
if link.startswith(chroot):
pids.append(int(os.path.basename(
os.path.dirname(root)
)))
except OSError:
pass
return pids
def _render_cmd(cmd, cwd, template, saltenv='base', pillarenv=None, pillar_override=None):
'''
If template is a valid template engine, process the cmd and cwd through
that engine.
'''
if not template:
return (cmd, cwd)
# render the path as a template using path_template_engine as the engine
if template not in salt.utils.templates.TEMPLATE_REGISTRY:
raise CommandExecutionError(
'Attempted to render file paths with unavailable engine '
'{0}'.format(template)
)
kwargs = {}
kwargs['salt'] = __salt__
if pillarenv is not None or pillar_override is not None:
pillarenv = pillarenv or __opts__['pillarenv']
kwargs['pillar'] = _gather_pillar(pillarenv, pillar_override)
else:
kwargs['pillar'] = __pillar__
kwargs['grains'] = __grains__
kwargs['opts'] = __opts__
kwargs['saltenv'] = saltenv
def _render(contents):
# write out path to temp file
tmp_path_fn = salt.utils.mkstemp()
with salt.utils.fopen(tmp_path_fn, 'w+') as fp_:
fp_.write(contents)
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
tmp_path_fn,
to_str=True,
**kwargs
)
salt.utils.safe_rm(tmp_path_fn)
if not data['result']:
# Failed to render the template
raise CommandExecutionError(
'Failed to execute cmd with error: {0}'.format(
data['data']
)
)
else:
return data['data']
cmd = _render(cmd)
cwd = _render(cwd)
return (cmd, cwd)
def _check_loglevel(level='info', quiet=False):
'''
Retrieve the level code for use in logging.Logger.log().
'''
def _bad_level(level):
log.error(
'Invalid output_loglevel \'{0}\'. Valid levels are: {1}. Falling '
'back to \'info\'.'
.format(
level,
', '.join(
sorted(LOG_LEVELS, key=LOG_LEVELS.get, reverse=True)
)
)
)
return LOG_LEVELS['info']
if salt.utils.is_true(quiet) or str(level).lower() == 'quiet':
return None
try:
level = level.lower()
if level not in LOG_LEVELS:
return _bad_level(level)
except AttributeError:
return _bad_level(level)
return LOG_LEVELS[level]
def _parse_env(env):
if not env:
env = {}
if isinstance(env, list):
env = salt.utils.repack_dictlist(env)
if not isinstance(env, dict):
env = {}
return env
def _gather_pillar(pillarenv, pillar_override):
'''
Whenever a state run starts, gather the pillar data fresh
'''
pillar = salt.pillar.get_pillar(
__opts__,
__grains__,
__opts__['id'],
__opts__['environment'],
pillar=pillar_override,
pillarenv=pillarenv
)
ret = pillar.compile_pillar()
if pillar_override and isinstance(pillar_override, dict):
ret.update(pillar_override)
return ret
def _run(cmd,
cwd=None,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
output_loglevel='debug',
log_callback=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
clean_env=False,
rstrip=True,
template=None,
umask=None,
timeout=None,
with_communicate=True,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
pillarenv=None,
pillar_override=None,
use_vt=False,
password=None,
bg=False,
**kwargs):
'''
Do the DRY thing and only call subprocess.Popen() once
'''
if _is_valid_shell(shell) is False:
log.warning(
'Attempt to run a shell command with what may be an invalid shell! '
'Check to ensure that the shell <{0}> is valid for this user.'
.format(shell))
log_callback = _check_cb(log_callback)
# Set the default working directory to the home directory of the user
# salt-minion is running as. Defaults to home directory of user under which
# the minion is running.
if not cwd:
cwd = os.path.expanduser('~{0}'.format('' if not runas else runas))
# make sure we can access the cwd
# when run from sudo or another environment where the euid is
# changed ~ will expand to the home of the original uid and
# the euid might not have access to it. See issue #1844
if not os.access(cwd, os.R_OK):
cwd = '/'
if salt.utils.is_windows():
cwd = os.tempnam()[:3]
else:
# Handle edge cases where numeric/other input is entered, and would be
# yaml-ified into non-string types
cwd = str(cwd)
if not salt.utils.is_windows():
if not os.path.isfile(shell) or not os.access(shell, os.X_OK):
msg = 'The shell {0} is not available'.format(shell)
raise CommandExecutionError(msg)
if salt.utils.is_windows() and use_vt: # Memozation so not much overhead
raise CommandExecutionError('VT not available on windows')
if shell.lower().strip() == 'powershell':
# If we were called by script(), then fakeout the Windows
# shell to run a Powershell script.
# Else just run a Powershell command.
stack = traceback.extract_stack(limit=2)
# extract_stack() returns a list of tuples.
# The last item in the list [-1] is the current method.
# The third item[2] in each tuple is the name of that method.
if stack[-2][2] == 'script':
cmd = 'Powershell -NonInteractive -ExecutionPolicy Bypass -File ' + cmd
else:
cmd = 'Powershell -NonInteractive "{0}"'.format(cmd.replace('"', '\\"'))
# munge the cmd and cwd through the template
(cmd, cwd) = _render_cmd(cmd, cwd, template, saltenv, pillarenv, pillar_override)
ret = {}
env = _parse_env(env)
for bad_env_key in (x for x, y in six.iteritems(env) if y is None):
log.error('Environment variable \'{0}\' passed without a value. '
'Setting value to an empty string'.format(bad_env_key))
env[bad_env_key] = ''
if runas and salt.utils.is_windows():
if not password:
msg = 'password is a required argument for runas on Windows'
raise CommandExecutionError(msg)
if not HAS_WIN_RUNAS:
msg = 'missing salt/utils/win_runas.py'
raise CommandExecutionError(msg)
if not isinstance(cmd, list):
cmd = salt.utils.shlex_split(cmd, posix=False)
cmd = ' '.join(cmd)
return win_runas(cmd, runas, password, cwd)
if runas:
# Save the original command before munging it
try:
pwd.getpwnam(runas)
except KeyError:
raise CommandExecutionError(
'User \'{0}\' is not available'.format(runas)
)
try:
# Getting the environment for the runas user
# There must be a better way to do this.
py_code = (
'import sys, os, itertools; '
'sys.stdout.write(\"\\0\".join(itertools.chain(*os.environ.items())))'
)
if __grains__['os'] in ['MacOS', 'Darwin']:
env_cmd = ('sudo', '-i', '-u', runas, '--',
sys.executable)
elif __grains__['os'] in ['FreeBSD']:
env_cmd = ('su', '-', runas, '-c',
"{0} -c {1}".format(shell, sys.executable))
elif __grains__['os_family'] in ['Solaris']:
env_cmd = ('su', '-', runas, '-c', sys.executable)
elif __grains__['os_family'] in ['AIX']:
env_cmd = ('su', runas, '-c', sys.executable)
else:
env_cmd = ('su', '-s', shell, '-', runas, '-c', sys.executable)
env_encoded = subprocess.Popen(
env_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE
).communicate(py_code)[0]
import itertools
env_runas = dict(itertools.izip(*[iter(env_encoded.split(b'\0'))]*2))
env_runas.update(env)
env = env_runas
# Encode unicode kwargs to filesystem encoding to avoid a
# UnicodeEncodeError when the subprocess is invoked.
fse = sys.getfilesystemencoding()
for key, val in six.iteritems(env):
if isinstance(val, six.text_type):
env[key] = val.encode(fse)
except ValueError:
raise CommandExecutionError(
'Environment could not be retrieved for User \'{0}\''.format(
runas
)
)
if _check_loglevel(output_loglevel) is not None:
# Always log the shell commands at INFO unless quiet logging is
# requested. The command output is what will be controlled by the
# 'loglevel' parameter.
msg = (
'Executing command {0}{1}{0} {2}in directory \'{3}\''.format(
'\'' if not isinstance(cmd, list) else '',
cmd,
'as user \'{0}\' '.format(runas) if runas else '',
cwd
)
)
log.info(log_callback(msg))
if reset_system_locale is True:
if not salt.utils.is_windows():
# Default to C!
# Salt only knows how to parse English words
# Don't override if the user has passed LC_ALL
env.setdefault('LC_CTYPE', 'C')
env.setdefault('LC_NUMERIC', 'C')
env.setdefault('LC_TIME', 'C')
env.setdefault('LC_COLLATE', 'C')
env.setdefault('LC_MONETARY', 'C')
env.setdefault('LC_MESSAGES', 'C')
env.setdefault('LC_PAPER', 'C')
env.setdefault('LC_NAME', 'C')
env.setdefault('LC_ADDRESS', 'C')
env.setdefault('LC_TELEPHONE', 'C')
env.setdefault('LC_MEASUREMENT', 'C')
env.setdefault('LC_IDENTIFICATION', 'C')
else:
# On Windows set the codepage to US English.
if python_shell:
cmd = 'chcp 437 > nul & ' + cmd
if clean_env:
run_env = env
else:
run_env = os.environ.copy()
run_env.update(env)
if python_shell is None:
python_shell = False
kwargs = {'cwd': cwd,
'shell': python_shell,
'env': run_env,
'stdin': str(stdin) if stdin is not None else stdin,
'stdout': stdout,
'stderr': stderr,
'with_communicate': with_communicate,
'timeout': timeout,
'bg': bg,
}
if umask is not None:
_umask = str(umask).lstrip('0')
if _umask == '':
msg = 'Zero umask is not allowed.'
raise CommandExecutionError(msg)
try:
_umask = int(_umask, 8)
except ValueError:
msg = 'Invalid umask: \'{0}\''.format(umask)
raise CommandExecutionError(msg)
else:
_umask = None
if runas or umask:
kwargs['preexec_fn'] = functools.partial(
salt.utils.chugid_and_umask,
runas,
_umask)
if not salt.utils.is_windows():
# close_fds is not supported on Windows platforms if you redirect
# stdin/stdout/stderr
if kwargs['shell'] is True:
kwargs['executable'] = shell
kwargs['close_fds'] = True
if not os.path.isabs(cwd) or not os.path.isdir(cwd):
raise CommandExecutionError(
'Specified cwd \'{0}\' either not absolute or does not exist'
.format(cwd)
)
if python_shell is not True and not isinstance(cmd, list):
posix = True
if salt.utils.is_windows():
posix = False
cmd = salt.utils.shlex_split(cmd, posix=posix)
if not use_vt:
# This is where the magic happens
try:
proc = salt.utils.timed_subprocess.TimedProc(cmd, **kwargs)
except (OSError, IOError) as exc:
raise CommandExecutionError(
'Unable to run command \'{0}\' with the context \'{1}\', '
'reason: {2}'.format(cmd, kwargs, exc)
)
try:
proc.run()
except TimedProcTimeoutError as exc:
ret['stdout'] = str(exc)
ret['stderr'] = ''
ret['retcode'] = None
ret['pid'] = proc.process.pid
# ok return code for timeouts?
ret['retcode'] = 1
return ret
out, err = proc.stdout, proc.stderr
if rstrip:
if out is not None:
out = salt.utils.to_str(out).rstrip()
if err is not None:
err = salt.utils.to_str(err).rstrip()
ret['pid'] = proc.process.pid
ret['retcode'] = proc.process.returncode
ret['stdout'] = out
ret['stderr'] = err
else:
to = ''
if timeout:
to = ' (timeout: {0}s)'.format(timeout)
if _check_loglevel(output_loglevel) is not None:
msg = 'Running {0} in VT{1}'.format(cmd, to)
log.debug(log_callback(msg))
stdout, stderr = '', ''
now = time.time()
if timeout:
will_timeout = now + timeout
else:
will_timeout = -1
try:
proc = vt.Terminal(cmd,
shell=True,
log_stdout=True,
log_stderr=True,
cwd=cwd,
preexec_fn=kwargs.get('preexec_fn', None),
env=run_env,
log_stdin_level=output_loglevel,
log_stdout_level=output_loglevel,
log_stderr_level=output_loglevel,
stream_stdout=True,
stream_stderr=True)
ret['pid'] = proc.pid
while proc.has_unread_data:
try:
try:
time.sleep(0.5)
try:
cstdout, cstderr = proc.recv()
except IOError:
cstdout, cstderr = '', ''
if cstdout:
stdout += cstdout
else:
cstdout = ''
if cstderr:
stderr += cstderr
else:
cstderr = ''
if timeout and (time.time() > will_timeout):
ret['stderr'] = (
'SALT: Timeout after {0}s\n{1}').format(
timeout, stderr)
ret['retcode'] = None
break
except KeyboardInterrupt:
ret['stderr'] = 'SALT: User break\n{0}'.format(stderr)
ret['retcode'] = 1
break
except vt.TerminalException as exc:
log.error(
'VT: {0}'.format(exc),
exc_info_on_loglevel=logging.DEBUG)
ret = {'retcode': 1, 'pid': '2'}
break
# only set stdout on success as we already mangled in other
# cases
ret['stdout'] = stdout
if not proc.isalive():
# Process terminated, i.e., not canceled by the user or by
# the timeout
ret['stderr'] = stderr
ret['retcode'] = proc.exitstatus
ret['pid'] = proc.pid
finally:
proc.close(terminate=True, kill=True)
try:
if ignore_retcode:
__context__['retcode'] = 0
else:
__context__['retcode'] = ret['retcode']
except NameError:
# Ignore the context error during grain generation
pass
return ret
def _run_quiet(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
template=None,
umask=None,
timeout=None,
reset_system_locale=True,
saltenv='base',
pillarenv=None,
pillar_override=None):
'''
Helper for running commands quietly for minion startup
'''
return _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
stderr=subprocess.STDOUT,
output_loglevel='quiet',
log_callback=None,
shell=shell,
python_shell=python_shell,
env=env,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
pillarenv=pillarenv,
pillar_override=pillar_override)['stdout']
def _run_all_quiet(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
template=None,
umask=None,
timeout=None,
reset_system_locale=True,
saltenv='base',
pillarenv=None,
pillar_override=None,
output_loglevel=None):
'''
Helper for running commands quietly for minion startup.
Returns a dict of return data.
output_loglevel argument is ignored. This is here for when we alias
cmd.run_all directly to _run_all_quiet in certain chicken-and-egg
situations where modules need to work both before and after
the __salt__ dictionary is populated (cf dracr.py)
'''
return _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
output_loglevel='quiet',
log_callback=None,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
pillarenv=pillarenv,
pillar_override=pillar_override)
def run(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
bg=False,
**kwargs):
r'''
Execute the passed command and return the output as a string
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to `/root` (`C:\` in windows)
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: Boron
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param bool bg: If True, run command in background and do not await or deliver it's results
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
.. warning::
This function does not process commands through a shell
unless the python_shell flag is set to True. This means that any
shell-specific functionality such as 'echo' or the use of pipes,
redirection or &&, should either be migrated to cmd.shell or
have the python_shell=True flag set here.
The use of python_shell=True means that the shell will accept _any_ input
including potentially malicious commands such as 'good_command;rm -rf /'.
Be absolutely certain that you have sanitized your input prior to using
python_shell=True
CLI Example:
.. code-block:: bash
salt '*' cmd.run "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
Specify an alternate shell with the shell parameter:
.. code-block:: bash
salt '*' cmd.run "Get-ChildItem C:\\ " shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.run "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' cmd.run cmd='sed -e s/=/:/g'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
ret = _run(cmd,
runas=runas,
shell=shell,
python_shell=python_shell,
cwd=cwd,
stdin=stdin,
stderr=subprocess.STDOUT,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar_override=kwargs.get('pillar'),
use_vt=use_vt,
password=kwargs.get('password', None),
bg=bg)
log_callback = _check_cb(log_callback)
if 'pid' in ret and '__pub_jid' in kwargs:
# Stuff the child pid in the JID file
try:
proc_dir = os.path.join(__opts__['cachedir'], 'proc')
jid_file = os.path.join(proc_dir, kwargs['__pub_jid'])
if os.path.isfile(jid_file):
serial = salt.payload.Serial(__opts__)
with salt.utils.fopen(jid_file, 'rb') as fn_:
jid_dict = serial.load(fn_)
if 'child_pids' in jid_dict:
jid_dict['child_pids'].append(ret['pid'])
else:
jid_dict['child_pids'] = [ret['pid']]
# Rewrite file
with salt.utils.fopen(jid_file, 'w+b') as fn_:
fn_.write(serial.dumps(jid_dict))
except (NameError, TypeError):
# Avoids errors from msgpack not being loaded in salt-ssh
pass
lvl = _check_loglevel(output_loglevel)
if lvl is not None:
if not ignore_retcode and ret['retcode'] != 0:
if lvl < LOG_LEVELS['error']:
lvl = LOG_LEVELS['error']
msg = (
'Command \'{0}\' failed with return code: {1}'.format(
cmd,
ret['retcode']
)
)
log.error(log_callback(msg))
log.log(lvl, 'output: {0}'.format(log_callback(ret['stdout'])))
return ret['stdout']
def shell(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
log_callback=None,
quiet=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
bg=False,
**kwargs):
'''
Execute the passed command and return the output as a string.
.. versionadded:: 2015.5.0
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: Boron
:param int shell: Shell to execute under. Defaults to the system default
shell.
:param bool bg: If True, run command in background and do not await or deliver it's results
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
.. warning::
This passes the cmd argument directly to the shell
without any further processing! Be absolutely sure that you
have properly santized the command passed to this function
and do not use untrusted inputs.
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
CLI Example:
.. code-block:: bash
salt '*' cmd.shell "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.shell template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
Specify an alternate shell with the shell parameter:
.. code-block:: bash
salt '*' cmd.shell "Get-ChildItem C:\\ " shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.shell "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' cmd.shell cmd='sed -e s/=/:/g'
'''
if 'python_shell' in kwargs:
python_shell = kwargs.pop('python_shell')
else:
python_shell = True
return run(cmd,
cwd=cwd,
stdin=stdin,
runas=runas,
shell=shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
quiet=quiet,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
python_shell=python_shell,
bg=bg,
**kwargs)
def run_stdout(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
**kwargs):
'''
Execute a command, and only return the standard out
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: Boron
:param str shell: Shell to execute under. Defaults to the system default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
CLI Example:
.. code-block:: bash
salt '*' cmd.run_stdout "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_stdout template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.run_stdout "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
ret = _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar_override=kwargs.get('pillar'),
use_vt=use_vt,
**kwargs)
log_callback = _check_cb(log_callback)
lvl = _check_loglevel(output_loglevel)
if lvl is not None:
if not ignore_retcode and ret['retcode'] != 0:
if lvl < LOG_LEVELS['error']:
lvl = LOG_LEVELS['error']
msg = (
'Command \'{0}\' failed with return code: {1}'.format(
cmd,
ret['retcode']
)
)
log.error(log_callback(msg))
if ret['stdout']:
log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout'])))
if ret['stderr']:
log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr'])))
if ret['retcode']:
log.log(lvl, 'retcode: {0}'.format(ret['retcode']))
return ret['stdout']
def run_stderr(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
**kwargs):
'''
Execute a command and only return the standard error
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: Boron
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
CLI Example:
.. code-block:: bash
salt '*' cmd.run_stderr "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_stderr template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.run_stderr "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
ret = _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
use_vt=use_vt,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar_override=kwargs.get('pillar'),
password=kwargs.get('password', None))
log_callback = _check_cb(log_callback)
lvl = _check_loglevel(output_loglevel)
if lvl is not None:
if not ignore_retcode and ret['retcode'] != 0:
if lvl < LOG_LEVELS['error']:
lvl = LOG_LEVELS['error']
msg = (
'Command \'{0}\' failed with return code: {1}'.format(
cmd,
ret['retcode']
)
)
log.error(log_callback(msg))
if ret['stdout']:
log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout'])))
if ret['stderr']:
log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr'])))
if ret['retcode']:
log.log(lvl, 'retcode: {0}'.format(ret['retcode']))
return ret['stderr']
def run_all(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
redirect_stderr=False,
**kwargs):
'''
Execute the passed command and return a dict of return data
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: Boron
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:parama str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
redirect_stderr : False
If set to ``True``, then stderr will be redirected to stdout. This is
helpful for cases where obtaining both the retcode and output is
desired, but it is not desired to have the output separated into both
stdout and stderr.
.. versionadded:: 2015.8.2
CLI Example:
.. code-block:: bash
salt '*' cmd.run_all "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_all template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.run_all "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
stderr = subprocess.STDOUT if redirect_stderr else subprocess.PIPE
ret = _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
stderr=stderr,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar_override=kwargs.get('pillar'),
use_vt=use_vt,
password=kwargs.get('password', None))
log_callback = _check_cb(log_callback)
lvl = _check_loglevel(output_loglevel)
if lvl is not None:
if not ignore_retcode and ret['retcode'] != 0:
if lvl < LOG_LEVELS['error']:
lvl = LOG_LEVELS['error']
msg = (
'Command \'{0}\' failed with return code: {1}'.format(
cmd,
ret['retcode']
)
)
log.error(log_callback(msg))
if ret['stdout']:
log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout'])))
if ret['stderr']:
log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr'])))
if ret['retcode']:
log.log(lvl, 'retcode: {0}'.format(ret['retcode']))
return ret
def retcode(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
umask=None,
output_loglevel='debug',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
**kwargs):
'''
Execute a shell command and return the command's return code.
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: Boron
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
:rtype: int
:rtype: None
:returns: Return Code as an int or None if there was an exception.
CLI Example:
.. code-block:: bash
salt '*' cmd.retcode "file /bin/bash"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.retcode template=jinja "file {{grains.pythonpath[0]}}/python"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.retcode "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
ret = _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
stderr=subprocess.STDOUT,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar_override=kwargs.get('pillar'),
use_vt=use_vt,
password=kwargs.get('password', None))
log_callback = _check_cb(log_callback)
lvl = _check_loglevel(output_loglevel)
if lvl is not None:
if not ignore_retcode and ret['retcode'] != 0:
if lvl < LOG_LEVELS['error']:
lvl = LOG_LEVELS['error']
msg = (
'Command \'{0}\' failed with return code: {1}'.format(
cmd,
ret['retcode']
)
)
log.error(log_callback(msg))
log.log(lvl, 'output: {0}'.format(log_callback(ret['stdout'])))
return ret['retcode']
def _retcode_quiet(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
clean_env=False,
template=None,
umask=None,
output_loglevel='quiet',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
**kwargs):
'''
Helper for running commands quietly for minion startup.
Returns same as retcode
'''
return retcode(cmd,
cwd=cwd,
stdin=stdin,
runas=runas,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
**kwargs)
def script(source,
args=None,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
template=None,
umask=None,
output_loglevel='debug',
log_callback=None,
quiet=False,
timeout=None,
reset_system_locale=True,
__env__=None,
saltenv='base',
use_vt=False,
bg=False,
**kwargs):
'''
Download a script from a remote location and execute the script locally.
The script can be located on the salt master file server or on an HTTP/FTP
server.
The script will be executed directly, so it can be written in any available
programming language.
:param str source: The location of the script to download. If the file is
located on the master in the directory named spam, and is called eggs, the
source string is salt://spam/eggs
:param str args: String of command line args to pass to the script. Only
used if no args are specified as part of the `name` argument. To pass a
string containing spaces in YAML, you will need to doubly-quote it:
"arg1 'arg two' arg3"
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: Boron
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param bool bg: If True, run script in background and do not await or deliver it's results
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG)regardless, unless ``quiet`` is used for this value.
:param bool quiet: The command will be executed quietly, meaning no log
entries of the actual command or its return data. This is deprecated as of
the **2014.1.0** release, and is being replaced with ``output_loglevel: quiet``.
:param int timeout: If the command has not terminated after timeout seconds,
send the subprocess sigterm, and if sigterm is ignored, follow up with
sigkill
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
CLI Example:
.. code-block:: bash
salt '*' cmd.script salt://scripts/runme.sh
salt '*' cmd.script salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt '*' cmd.script salt://scripts/windows_task.ps1 args=' -Input c:\\tmp\\infile.txt' shell='powershell'
.. code-block:: bash
salt '*' cmd.script salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
def _cleanup_tempfile(path):
try:
os.remove(path)
except (IOError, OSError) as exc:
log.error(
'cmd.script: Unable to clean tempfile \'{0}\': {1}'.format(
path,
exc
)
)
if isinstance(__env__, six.string_types):
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' not '
'\'__env__\'. This functionality will be removed in Salt Boron.'
)
# Backwards compatibility
saltenv = __env__
path = salt.utils.mkstemp(dir=cwd, suffix=os.path.splitext(source)[1])
if template:
if 'pillarenv' in kwargs or 'pillar' in kwargs:
pillarenv = kwargs.get('pillarenv', __opts__.get('pillarenv'))
kwargs['pillar'] = _gather_pillar(pillarenv, kwargs.get('pillar'))
fn_ = __salt__['cp.get_template'](source,
path,
template,
saltenv,
**kwargs)
if not fn_:
_cleanup_tempfile(path)
return {'pid': 0,
'retcode': 1,
'stdout': '',
'stderr': '',
'cache_error': True}
else:
fn_ = __salt__['cp.cache_file'](source, saltenv)
if not fn_:
_cleanup_tempfile(path)
return {'pid': 0,
'retcode': 1,
'stdout': '',
'stderr': '',
'cache_error': True}
shutil.copyfile(fn_, path)
if not salt.utils.is_windows():
os.chmod(path, 320)
os.chown(path, __salt__['file.user_to_uid'](runas), -1)
ret = _run(path + ' ' + str(args) if args else path,
cwd=cwd,
stdin=stdin,
output_loglevel=output_loglevel,
log_callback=log_callback,
runas=runas,
shell=shell,
python_shell=python_shell,
env=env,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar_override=kwargs.get('pillar'),
use_vt=use_vt,
password=kwargs.get('password', None),
bg=bg)
_cleanup_tempfile(path)
return ret
def script_retcode(source,
args=None,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
template='jinja',
umask=None,
timeout=None,
reset_system_locale=True,
__env__=None,
saltenv='base',
output_loglevel='debug',
log_callback=None,
use_vt=False,
**kwargs):
'''
Download a script from a remote location and execute the script locally.
The script can be located on the salt master file server or on an HTTP/FTP
server.
The script will be executed directly, so it can be written in any available
programming language.
The script can also be formatted as a template, the default is jinja.
Only evaluate the script return code and do not block for terminal output
:param str source: The location of the script to download. If the file is
located on the master in the directory named spam, and is called eggs, the
source string is salt://spam/eggs
:param str args: String of command line args to pass to the script. Only
used if no args are specified as part of the `name` argument. To pass a
string containing spaces in YAML, you will need to doubly-quote it: "arg1
'arg two' arg3"
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: Boron
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param bool quiet: The command will be executed quietly, meaning no log
entries of the actual command or its return data. This is deprecated as of
the **2014.1.0** release, and is being replaced with ``output_loglevel:
quiet``.
:param int timeout: If the command has not terminated after timeout seconds,
send the subprocess sigterm, and if sigterm is ignored, follow up with
sigkill
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
CLI Example:
.. code-block:: bash
salt '*' cmd.script_retcode salt://scripts/runme.sh
salt '*' cmd.script_retcode salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt '*' cmd.script_retcode salt://scripts/windows_task.ps1 args=' -Input c:\\tmp\\infile.txt' shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.script_retcode salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
return script(source=source,
args=args,
cwd=cwd,
stdin=stdin,
runas=runas,
shell=shell,
python_shell=python_shell,
env=env,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
__env__=__env__,
saltenv=saltenv,
output_loglevel=output_loglevel,
log_callback=log_callback,
use_vt=use_vt,
**kwargs)['retcode']
def which(cmd):
'''
Returns the path of an executable available on the minion, None otherwise
CLI Example:
.. code-block:: bash
salt '*' cmd.which cat
'''
return salt.utils.which(cmd)
def which_bin(cmds):
'''
Returns the first command found in a list of commands
CLI Example:
.. code-block:: bash
salt '*' cmd.which_bin '[pip2, pip, pip-python]'
'''
return salt.utils.which_bin(cmds)
def has_exec(cmd):
'''
Returns true if the executable is available on the minion, false otherwise
CLI Example:
.. code-block:: bash
salt '*' cmd.has_exec cat
'''
return which(cmd) is not None
def exec_code(lang, code, cwd=None):
'''
Pass in two strings, the first naming the executable language, aka -
python2, python3, ruby, perl, lua, etc. the second string containing
the code you wish to execute. The stdout will be returned.
CLI Example:
.. code-block:: bash
salt '*' cmd.exec_code ruby 'puts "cheese"'
'''
return exec_code_all(lang, code, cwd)['stdout']
def exec_code_all(lang, code, cwd=None):
'''
Pass in two strings, the first naming the executable language, aka -
python2, python3, ruby, perl, lua, etc. the second string containing
the code you wish to execute. All cmd artifacts (stdout, stderr, retcode, pid)
will be returned.
CLI Example:
.. code-block:: bash
salt '*' cmd.exec_code_all ruby 'puts "cheese"'
'''
codefile = salt.utils.mkstemp()
with salt.utils.fopen(codefile, 'w+t') as fp_:
fp_.write(code)
cmd = [lang, codefile]
ret = run_all(cmd, cwd=cwd, python_shell=False)
os.remove(codefile)
return ret
def tty(device, echo=None):
'''
Echo a string to a specific tty
CLI Example:
.. code-block:: bash
salt '*' cmd.tty tty0 'This is a test'
salt '*' cmd.tty pts3 'This is a test'
'''
if device.startswith('tty'):
teletype = '/dev/{0}'.format(device)
elif device.startswith('pts'):
teletype = '/dev/{0}'.format(device.replace('pts', 'pts/'))
else:
return {'Error': 'The specified device is not a valid TTY'}
try:
with salt.utils.fopen(teletype, 'wb') as tty_device:
tty_device.write(echo)
return {
'Success': 'Message was successfully echoed to {0}'.format(teletype)
}
except IOError:
return {
'Error': 'Echoing to {0} returned error'.format(teletype)
}
def run_chroot(root,
cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=True,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='quiet',
log_callback=None,
quiet=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
bg=False,
**kwargs):
'''
.. versionadded:: 2014.7.0
This function runs :mod:`cmd.run_all <salt.modules.cmdmod.run_all>` wrapped
within a chroot, with dev and proc mounted in the chroot
root:
Path to the root of the jail to use.
cmd:
The command to run. ex: 'ls -lart /home'
cwd
The current working directory to execute the command in, defaults to
/root
stdin
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
runas
User to run script as.
shell
Shell to execute under. Defaults to the system default shell.
python_shell
If False, let python handle the positional arguments. Set to True
to use shell features, such as pipes or redirection
env
A list of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
clean_env:
Attempt to clean out all other shell environment variables and set
only those provided in the 'env' argument to this function.
template
If this setting is applied then the named templating engine will be
used to render the downloaded file. Currently jinja, mako, and wempy
are supported
rstrip
Strip all whitespace off the end of output before it is returned.
umask
The umask (in octal) to use when running the command.
output_loglevel
Control the loglevel at which the output from the command is logged.
Note that the command being run will still be logged (loglevel: DEBUG)
regardless, unless ``quiet`` is used for this value.
timeout
A timeout in seconds for the executed process to return.
use_vt
Use VT utils (saltstack) to stream the command output more
interactively to the console and the logs.
This is experimental.
CLI Example:
.. code-block:: bash
salt '*' cmd.run_chroot /var/lib/lxc/container_name/rootfs 'sh /tmp/bootstrap.sh'
'''
__salt__['mount.mount'](
os.path.join(root, 'dev'),
'udev',
fstype='devtmpfs')
__salt__['mount.mount'](
os.path.join(root, 'proc'),
'proc',
fstype='proc')
# Execute chroot routine
sh_ = '/bin/sh'
if os.path.isfile(os.path.join(root, 'bin/bash')):
sh_ = '/bin/bash'
if isinstance(cmd, (list, tuple)):
cmd = ' '.join([str(i) for i in cmd])
cmd = 'chroot {0} {1} -c {2}'.format(root, sh_, _cmd_quote(cmd))
run_func = __context__.pop('cmd.run_chroot.func', run_all)
ret = run_func(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
quiet=quiet,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar=kwargs.get('pillar'),
use_vt=use_vt,
bg=bg)
# Kill processes running in the chroot
for i in range(6):
pids = _chroot_pids(root)
if not pids:
break
for pid in pids:
# use sig 15 (TERM) for first 3 attempts, then 9 (KILL)
sig = 15 if i < 3 else 9
os.kill(pid, sig)
if _chroot_pids(root):
log.error('Processes running in chroot could not be killed, '
'filesystem will remain mounted')
__salt__['mount.umount'](os.path.join(root, 'proc'))
__salt__['mount.umount'](os.path.join(root, 'dev'))
return ret
def _is_valid_shell(shell):
'''
Attempts to search for valid shells on a system and
see if a given shell is in the list
'''
if salt.utils.is_windows():
return True # Don't even try this for Windows
shells = '/etc/shells'
available_shells = []
if os.path.exists(shells):
try:
with salt.utils.fopen(shells, 'r') as shell_fp:
lines = shell_fp.read().splitlines()
for line in lines:
if line.startswith('#'):
continue
else:
available_shells.append(line)
except OSError:
return True
else:
# No known method of determining available shells
return None
if shell in available_shells:
return True
else:
return False
def shells():
'''
Lists the valid shells on this system via the /etc/shells file
.. versionadded:: 2015.5.0
CLI Example::
salt '*' cmd.shells
'''
shells_fn = '/etc/shells'
ret = []
if os.path.exists(shells_fn):
try:
with salt.utils.fopen(shells_fn, 'r') as shell_fp:
lines = shell_fp.read().splitlines()
for line in lines:
line = line.strip()
if line.startswith('#'):
continue
elif not line:
continue
else:
ret.append(line)
except OSError:
log.error("File '{0}' was not found".format(shells_fn))
return ret
def powershell(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
quiet=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
**kwargs):
'''
Execute the passed PowerShell command and return the output as a string.
.. versionadded:: Boron
.. warning ::
This passes the cmd argument directly to PowerShell
without any further processing! Be absolutely sure that you
have properly santized the command passed to this function
and do not use untrusted inputs.
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
:param str cmd: The powershell command to run.
:param str cwd: The current working directory to execute the command in
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: Boron
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param bool reset_system_locale: Resets the system locale
:param bool ignore_retcode: Ignore the return code
:param str saltenv: The salt environment to use. Default is 'base'
CLI Example:
.. code-block:: powershell
salt '*' cmd.powershell "$PSVersionTable.CLRVersion"
'''
if 'python_shell' in kwargs:
python_shell = kwargs.pop('python_shell')
else:
python_shell = True
# Append PowerShell Object formatting
cmd = '{0} | ConvertTo-Json -Depth 32'.format(cmd)
# Retrieve the response, while overriding shell with 'powershell'
response = run(cmd,
cwd=cwd,
stdin=stdin,
runas=runas,
shell='powershell',
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
quiet=quiet,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
python_shell=python_shell,
**kwargs)
try:
return json.loads(response)
except Exception:
log.error("Error converting PowerShell JSON return", exc_info=True)
return {}
def run_bg(cmd,
cwd=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
umask=None,
log_callback=None,
timeout=None,
reset_system_locale=True,
saltenv='base',
**kwargs):
r'''
.. versionadded: Boron
Execute the passed command in the background and return it's PID
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to `/root` (`C:\` in windows)
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param str umask: The umask (in octal) to use when running the command.
:param int timeout: A timeout in seconds for the executed process to return.
.. warning::
This function does not process commands through a shell
unless the python_shell flag is set to True. This means that any
shell-specific functionality such as 'echo' or the use of pipes,
redirection or &&, should either be migrated to cmd.shell or
have the python_shell=True flag set here.
The use of python_shell=True means that the shell will accept _any_ input
including potentially malicious commands such as 'good_command;rm -rf /'.
Be absolutely certain that you have sanitized your input prior to using
python_shell=True
CLI Example:
.. code-block:: bash
salt '*' cmd.run_bg "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_bg template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
Specify an alternate shell with the shell parameter:
.. code-block:: bash
salt '*' cmd.run "Get-ChildItem C:\\ " shell='powershell'
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' cmd.run cmd='sed -e s/=/:/g'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
res = _run(cmd,
stdin=None,
stderr=None,
stdout=None,
output_loglevel=None,
use_vt=None,
bg=True,
with_communicate=False,
rstrip=False,
runas=runas,
shell=shell,
python_shell=python_shell,
cwd=cwd,
env=env,
clean_env=clean_env,
template=template,
umask=umask,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
# ignore_retcode=ignore_retcode,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar_override=kwargs.get('pillar'),
# password=kwargs.get('password', None),
)
return {
'pid': res['pid']
}
| [] | [] | [] | [] | [] | python | 0 | 0 | |
connect.go | package goproxy
import (
"bufio"
"bytes"
"crypto/tls"
"errors"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"regexp"
"strings"
"sync"
"sync/atomic"
)
type ConnectActionLiteral int
const (
ConnectAccept = iota
ConnectReject
ConnectMitm
ConnectHijack
ConnectHTTPMitm
ConnectProxyAuthHijack
)
var (
OkConnect = &ConnectAction{Action: ConnectAccept, TLSConfig: TLSConfigFromCA(&GoproxyCa)}
MitmConnect = &ConnectAction{Action: ConnectMitm, TLSConfig: TLSConfigFromCA(&GoproxyCa)}
HTTPMitmConnect = &ConnectAction{Action: ConnectHTTPMitm, TLSConfig: TLSConfigFromCA(&GoproxyCa)}
RejectConnect = &ConnectAction{Action: ConnectReject, TLSConfig: TLSConfigFromCA(&GoproxyCa)}
httpsRegexp = regexp.MustCompile(`^https:\/\/`)
)
type ConnectAction struct {
Action ConnectActionLiteral
Hijack func(req *http.Request, client net.Conn, ctx *ProxyCtx)
TLSConfig func(host string, ctx *ProxyCtx) (*tls.Config, error)
}
func stripPort(s string) string {
ix := strings.IndexRune(s, ':')
if ix == -1 {
return s
}
return s[:ix]
}
func (proxy *ProxyHttpServer) dial(network, addr string) (c net.Conn, err error) {
if proxy.Tr.Dial != nil {
return proxy.Tr.Dial(network, addr)
}
return net.Dial(network, addr)
}
func (proxy *ProxyHttpServer) connectDial(network, addr string) (c net.Conn, err error) {
if proxy.ConnectDial == nil {
return proxy.dial(network, addr)
}
return proxy.ConnectDial(network, addr)
}
func (proxy *ProxyHttpServer) handleConnect(w http.ResponseWriter, r *http.Request) {
hij, ok := w.(http.Hijacker)
if !ok {
panic("httpserver does not support hijacking")
}
proxyClient, _, e := hij.Hijack()
if e != nil {
panic("Cannot hijack connection " + e.Error())
}
ctx := &ProxyCtx{
Req: r,
Session: atomic.AddInt64(&proxy.sess, 1),
proxy: proxy,
signer: signHost,
}
if proxy.Signer != nil {
ctx.signer = proxy.Signer
}
// Allow connect per default, using the remote host from the request.
// Potential handlers for https might change this decision further
// down.
todo, host := OkConnect, r.URL.Host
ctx.Logf("Running %d CONNECT handlers", len(proxy.httpsHandlers))
for i, h := range proxy.httpsHandlers {
newtodo, newhost := h.HandleConnect(host, ctx)
// If found a result, break the loop immediately
if newtodo != nil {
todo, host = newtodo, newhost
ctx.Logf("on %dth handler: %v %s", i, todo, host)
break
}
}
switch todo.Action {
case ConnectAccept:
if !hasPort.MatchString(host) {
host += ":80"
}
targetSite, err := proxy.connectDial("tcp", host)
if err != nil {
httpError(proxyClient, ctx, err)
return
}
ctx.Logf("Accepting CONNECT to %s", host)
proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n"))
go func() {
var wg sync.WaitGroup
wg.Add(2)
go copyOrWarn(ctx, targetSite, proxyClient, &wg)
go copyOrWarn(ctx, proxyClient, targetSite, &wg)
wg.Wait()
for _, con := range []net.Conn{targetSite, proxyClient} {
if tcp, ok := con.(*net.TCPConn); ok {
tcp.CloseWrite()
tcp.CloseRead()
}
}
proxyClient.Close()
targetSite.Close()
}()
case ConnectHijack:
ctx.Logf("Hijacking CONNECT to %s", host)
proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n"))
todo.Hijack(r, proxyClient, ctx)
case ConnectHTTPMitm:
proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n"))
ctx.Logf("Assuming CONNECT is plain HTTP tunneling, mitm proxying it")
client := bufio.NewReader(proxyClient)
var (
req *http.Request
err error
)
for {
req, err = http.ReadRequest(client)
if err != nil {
if err != io.EOF {
ctx.Warnf("Cannot read request of MITM HTTP client: %+#v", err)
}
break
}
req.URL.Scheme = "http"
req.URL.Host = r.Host
req.RemoteAddr = r.RemoteAddr
// We create a new context but populate it with the
// previous UserData in order to be able to correlate
// the original request to CONNECT with any subsequent
// request.
nctx := &ProxyCtx{
Req: req,
Session: atomic.AddInt64(&proxy.sess, 1),
proxy: proxy,
UserData: ctx.UserData,
}
if end, err := proxy.handleRequest(NewConnResponseWriter(proxyClient), req, nctx); end {
if err != nil {
ctx.Warnf("Error during serving MITM HTTP request: %+#v", err)
}
break
}
if req.Close {
break
}
}
proxyClient.Close()
case ConnectMitm:
ctx.Logf("Assuming CONNECT is TLS, mitm proxying it")
tlsConfig := defaultTLSConfig
if todo.TLSConfig != nil {
var err error
tlsConfig, err = todo.TLSConfig(host, ctx)
if err != nil {
httpError(proxyClient, ctx, err)
return
}
}
proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n"))
rawClientTls := tls.Server(proxyClient, tlsConfig)
if err := rawClientTls.Handshake(); err != nil {
ctx.Warnf("Cannot handshake client %v %v", proxyClient.RemoteAddr(), err)
return
}
clientTls := bufio.NewReader(rawClientTls)
// [oec, 2019-11-02] debugging helper
//
// The following has helped me to track a nasty problem, it
// might help you too. Use them instead of the clientTls in the
// line above along with the routines commented out in the
// for-loop.
/*
var previous, current string
buf := &bytes.Buffer{}
clientTls := bufio.NewReader(io.TeeReader(rawClientTls, buf))
*/
for {
// Read a request from the client.
req, err := http.ReadRequest(clientTls)
// [oec, 2019-11-02] debugging helper
/*
previous = current
current = buf.String()
buf.Reset()
*/
if err != nil {
if err != io.EOF {
ctx.Warnf("Cannot read TLS request from mitm'd client for %v: %v", r.Host, err)
// [oec, 2019-11-02] debugging helper
/*
ctx.Warnf("\n[31mprevious request:\n%s«\n[33mcurrent request:\n%s«[0m\n",
strings.ReplaceAll(previous, "\r\n", "\\r\\n\r\n"),
strings.ReplaceAll(current, "\r\n", "\\r\\n\r\n"))
*/
}
break
}
// We need to read the whole body and reset the buffer.
// Otherwise, traces of previous requests can be left
// in the buffer, if the previous request's
// Content-Length was incorrect or the Body wasn't
// read.
body := &bytes.Buffer{}
io.Copy(body, req.Body)
req.Body.Close()
req.Body = ioutil.NopCloser(body)
clientTls.Reset(rawClientTls)
req.URL.Scheme = "https"
req.URL.Host = r.URL.Host
req.RemoteAddr = r.RemoteAddr
// We create a new context but populate it with the
// previous UserData in order to be able to correlate
// the original request to CONNECT with any subsequent
// request.
nctx := &ProxyCtx{
Req: req,
Session: atomic.AddInt64(&proxy.sess, 1),
proxy: proxy,
UserData: ctx.UserData,
}
if end, err := proxy.handleRequest(NewConnResponseWriter(rawClientTls), req, nctx); end {
if err != nil {
ctx.Warnf("Error during serving MITM HTTPS request: %v", err)
}
break
}
if req.Close {
break
}
}
rawClientTls.Close()
case ConnectProxyAuthHijack:
proxyClient.Write([]byte("HTTP/1.1 407 Proxy Authentication Required\r\n"))
todo.Hijack(r, proxyClient, ctx)
case ConnectReject:
if ctx.Resp != nil {
if err := ctx.Resp.Write(proxyClient); err != nil {
ctx.Warnf("Cannot write response that reject http CONNECT: %v", err)
}
}
proxyClient.Close()
}
}
func httpError(w io.WriteCloser, ctx *ProxyCtx, err error) {
if _, err := io.WriteString(w, "HTTP/1.1 502 Bad Gateway\r\n\r\n"); err != nil {
ctx.Warnf("Error responding to client: %s", err)
}
if err := w.Close(); err != nil {
ctx.Warnf("Error closing client connection: %s", err)
}
}
func copyOrWarn(ctx *ProxyCtx, dst io.WriteCloser, src io.ReadCloser, wg *sync.WaitGroup) {
if _, err := io.Copy(dst, src); err != nil {
ctx.Warnf("Error copying to client: %s", err)
}
// Close both ends, so that another goroutine of this function in the
// other direction terminates as well.
src.Close()
dst.Close()
wg.Done()
}
func dialerFromEnv(proxy *ProxyHttpServer) func(network, addr string) (net.Conn, error) {
https_proxy := os.Getenv("HTTPS_PROXY")
if https_proxy == "" {
https_proxy = os.Getenv("https_proxy")
}
if https_proxy == "" {
return nil
}
return proxy.NewConnectDialToProxy(https_proxy)
}
func (proxy *ProxyHttpServer) NewConnectDialToProxy(https_proxy string) func(network, addr string) (net.Conn, error) {
return proxy.NewConnectDialToProxyWithHandler(https_proxy, nil)
}
func (proxy *ProxyHttpServer) NewConnectDialToProxyWithHandler(https_proxy string, connectReqHandler func(req *http.Request)) func(network, addr string) (net.Conn, error) {
u, err := url.Parse(https_proxy)
if err != nil {
return nil
}
if u.Scheme == "" || u.Scheme == "http" {
if strings.IndexRune(u.Host, ':') == -1 {
u.Host += ":80"
}
return func(network, addr string) (net.Conn, error) {
connectReq := &http.Request{
Method: "CONNECT",
URL: &url.URL{Opaque: addr},
Host: addr,
Header: make(http.Header),
}
if connectReqHandler != nil {
connectReqHandler(connectReq)
}
c, err := proxy.dial(network, u.Host)
if err != nil {
return nil, err
}
connectReq.Write(c)
// Read response.
// Okay to use and discard buffered reader here, because
// TLS server will not speak until spoken to.
br := bufio.NewReader(c)
resp, err := http.ReadResponse(br, connectReq)
if err != nil {
c.Close()
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
resp, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
c.Close()
return nil, errors.New("proxy refused connection" + string(resp))
}
return c, nil
}
}
if u.Scheme == "https" {
if strings.IndexRune(u.Host, ':') == -1 {
u.Host += ":443"
}
return func(network, addr string) (net.Conn, error) {
c, err := proxy.dial(network, u.Host)
if err != nil {
return nil, err
}
c = tls.Client(c, proxy.Tr.TLSClientConfig)
connectReq := &http.Request{
Method: "CONNECT",
URL: &url.URL{Opaque: addr},
Host: addr,
Header: make(http.Header),
}
if connectReqHandler != nil {
connectReqHandler(connectReq)
}
connectReq.Write(c)
// Read response.
// Okay to use and discard buffered reader here, because
// TLS server will not speak until spoken to.
br := bufio.NewReader(c)
resp, err := http.ReadResponse(br, connectReq)
if err != nil {
c.Close()
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 500))
if err != nil {
return nil, err
}
c.Close()
return nil, errors.New("proxy refused connection" + string(body))
}
return c, nil
}
}
return nil
}
func TLSConfigFromCA(ca *tls.Certificate) func(host string, ctx *ProxyCtx) (*tls.Config, error) {
return func(host string, ctx *ProxyCtx) (*tls.Config, error) {
var err error
var cert *tls.Certificate
hostname := stripPort(host)
config := *defaultTLSConfig
ctx.Logf("signing for %s", hostname)
cert, err = ctx.signer(ca, []string{hostname})
if cert == nil {
if err != nil {
ctx.Warnf("Cannot sign host certificate with provided CA: %s", err)
return nil, err
}
}
config.Certificates = append(config.Certificates, *cert)
return &config, nil
}
}
| [
"\"HTTPS_PROXY\"",
"\"https_proxy\""
] | [] | [
"HTTPS_PROXY",
"https_proxy"
] | [] | ["HTTPS_PROXY", "https_proxy"] | go | 2 | 0 | |
tests/aci-e2e/e2e-aci_test.go | /*
Copyright 2020 Docker Compose CLI authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"testing"
"time"
"github.com/docker/docker/pkg/fileutils"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
"gotest.tools/v3/icmd"
"gotest.tools/v3/poll"
"github.com/Azure/azure-sdk-for-go/profiles/2019-03-01/resources/mgmt/resources"
"github.com/Azure/azure-storage-file-go/azfile"
"github.com/Azure/go-autorest/autorest/to"
"github.com/prometheus/tsdb/fileutil"
"github.com/docker/compose-cli/aci"
"github.com/docker/compose-cli/aci/convert"
"github.com/docker/compose-cli/aci/login"
"github.com/docker/compose-cli/api/containers"
"github.com/docker/compose-cli/cli/cmd"
"github.com/docker/compose-cli/context/store"
"github.com/docker/compose-cli/errdefs"
. "github.com/docker/compose-cli/tests/framework"
)
const (
contextName = "aci-test"
)
var (
binDir string
location = []string{"eastus2"}
)
func TestMain(m *testing.M) {
p, cleanup, err := SetupExistingCLI()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
binDir = p
exitCode := m.Run()
cleanup()
os.Exit(exitCode)
}
// Cannot be parallelized as login/logout is global.
func TestLoginLogout(t *testing.T) {
startTime := strconv.Itoa(int(time.Now().UnixNano()))
c := NewE2eCLI(t, binDir)
rg := "E2E-" + startTime
t.Run("login", func(t *testing.T) {
azureLogin(t, c)
})
t.Run("create context", func(t *testing.T) {
sID := getSubscriptionID(t)
location := getTestLocation()
err := createResourceGroup(t, sID, rg, location)
assert.Check(t, is.Nil(err))
t.Cleanup(func() {
_ = deleteResourceGroup(t, rg)
})
c.RunDockerCmd("context", "create", "aci", contextName, "--subscription-id", sID, "--resource-group", rg, "--location", location)
res := c.RunDockerCmd("context", "use", contextName)
res.Assert(t, icmd.Expected{Out: contextName})
res = c.RunDockerCmd("context", "ls")
res.Assert(t, icmd.Expected{Out: contextName + " *"})
})
t.Run("delete context", func(t *testing.T) {
res := c.RunDockerCmd("context", "use", "default")
res.Assert(t, icmd.Expected{Out: "default"})
res = c.RunDockerCmd("context", "rm", contextName)
res.Assert(t, icmd.Expected{Out: contextName})
})
t.Run("logout", func(t *testing.T) {
_, err := os.Stat(login.GetTokenStorePath())
assert.NilError(t, err)
res := c.RunDockerCmd("logout", "azure")
res.Assert(t, icmd.Expected{Out: "Removing login credentials for Azure"})
_, err = os.Stat(login.GetTokenStorePath())
errMsg := "no such file or directory"
if runtime.GOOS == "windows" {
errMsg = "The system cannot find the file specified"
}
assert.ErrorContains(t, err, errMsg)
})
t.Run("create context fail", func(t *testing.T) {
res := c.RunDockerOrExitError("context", "create", "aci", "fail-context")
res.Assert(t, icmd.Expected{
ExitCode: errdefs.ExitCodeLoginRequired,
Err: `not logged in to azure, you need to run "docker login azure" first`,
})
})
}
func getTestLocation() string {
rand.Seed(time.Now().Unix())
n := rand.Intn(len(location))
return location[n]
}
func uploadTestFile(t *testing.T, aciContext store.AciContext, accountName string, fileshareName string, testFileName string, testFileContent string) {
storageLogin := login.StorageLoginImpl{AciContext: aciContext}
key, err := storageLogin.GetAzureStorageAccountKey(context.TODO(), accountName)
assert.NilError(t, err)
cred, err := azfile.NewSharedKeyCredential(accountName, key)
assert.NilError(t, err)
u, _ := url.Parse(fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, fileshareName))
uploadFile(t, *cred, u.String(), testFileName, testFileContent)
}
const (
fileshareName = "dockertestshare"
fileshareName2 = "dockertestshare2"
)
func TestRunVolume(t *testing.T) {
const (
testFileContent = "Volume mounted successfully!"
testFileName = "index.html"
)
c := NewParallelE2eCLI(t, binDir)
sID, rg, location := setupTestResourceGroup(t, c)
// Bootstrap volume
aciContext := store.AciContext{
SubscriptionID: sID,
Location: location,
ResourceGroup: rg,
}
// Used in subtests
var (
container string
hostIP string
endpoint string
volumeID string
accountName = "e2e" + strconv.Itoa(int(time.Now().UnixNano()))
)
t.Run("check empty volume name validity", func(t *testing.T) {
invalidName := ""
res := c.RunDockerOrExitError("volume", "create", "--storage-account", invalidName, fileshareName)
res.Assert(t, icmd.Expected{
ExitCode: 1,
Err: `parameter=accountName constraint=MinLength value="" details: value length must be greater than or equal to 3`,
})
})
t.Run("check volume name validity", func(t *testing.T) {
invalidName := "some-storage-123"
res := c.RunDockerOrExitError("volume", "create", "--storage-account", invalidName, fileshareName)
res.Assert(t, icmd.Expected{
ExitCode: 1,
Err: "some-storage-123 is not a valid storage account name. Storage account name must be between 3 and 24 characters in length and use numbers and lower-case letters only.",
})
})
t.Run("create volumes", func(t *testing.T) {
c.RunDockerCmd("volume", "create", "--storage-account", accountName, fileshareName)
})
volumeID = accountName + "/" + fileshareName
t.Cleanup(func() {
c.RunDockerCmd("volume", "rm", volumeID)
res := c.RunDockerCmd("volume", "ls")
lines := lines(res.Stdout())
assert.Equal(t, len(lines), 1)
})
t.Run("create second fileshare", func(t *testing.T) {
c.RunDockerCmd("volume", "create", "--storage-account", accountName, fileshareName2)
})
volumeID2 := accountName + "/" + fileshareName2
t.Run("list volumes", func(t *testing.T) {
res := c.RunDockerCmd("volume", "ls", "--quiet")
l := lines(res.Stdout())
assert.Equal(t, len(l), 2)
assert.Equal(t, l[0], volumeID)
assert.Equal(t, l[1], volumeID2)
res = c.RunDockerCmd("volume", "ls")
l = lines(res.Stdout())
assert.Equal(t, len(l), 3)
firstAccount := l[1]
fields := strings.Fields(firstAccount)
assert.Equal(t, fields[0], volumeID)
secondAccount := l[2]
fields = strings.Fields(secondAccount)
assert.Equal(t, fields[0], volumeID2)
})
t.Run("inspect volumes", func(t *testing.T) {
res := c.RunDockerCmd("volume", "inspect", volumeID)
assert.Equal(t, res.Stdout(), fmt.Sprintf(`{
"ID": %q,
"Description": "Fileshare %s in %s storage account"
}
`, volumeID, fileshareName, accountName))
res = c.RunDockerCmd("volume", "inspect", volumeID2)
assert.Equal(t, res.Stdout(), fmt.Sprintf(`{
"ID": %q,
"Description": "Fileshare %s in %s storage account"
}
`, volumeID2, fileshareName2, accountName))
})
t.Run("delete only fileshare", func(t *testing.T) {
c.RunDockerCmd("volume", "rm", volumeID2)
res := c.RunDockerCmd("volume", "ls")
lines := lines(res.Stdout())
assert.Equal(t, len(lines), 2)
assert.Assert(t, !strings.Contains(res.Stdout(), fileshareName2), "second fileshare still visible after rm")
})
t.Run("upload file", func(t *testing.T) {
uploadTestFile(t, aciContext, accountName, fileshareName, testFileName, testFileContent)
})
t.Run("run", func(t *testing.T) {
mountTarget := "/usr/share/nginx/html"
res := c.RunDockerCmd("-D", "run", "-d", "-v", fmt.Sprintf("%s:%s", volumeID, mountTarget), "-p", "80:80", "nginx")
fmt.Println(res.Stdout())
res = c.RunDockerCmd("run", "-d", "-v", fmt.Sprintf("%s:%s", volumeID, mountTarget), "-p", "80:80", "nginx")
container = getContainerName(res.Stdout())
})
t.Run("inspect", func(t *testing.T) {
res := c.RunDockerCmd("inspect", container)
containerInspect, err := parseContainerInspect(res.Stdout())
assert.NilError(t, err)
assert.Equal(t, containerInspect.Platform, "Linux")
assert.Equal(t, containerInspect.HostConfig.CPULimit, 1.0)
assert.Equal(t, containerInspect.HostConfig.CPUReservation, 1.0)
assert.Equal(t, containerInspect.HostConfig.RestartPolicy, containers.RestartPolicyNone)
assert.Assert(t, is.Len(containerInspect.Ports, 1))
hostIP = containerInspect.Ports[0].HostIP
endpoint = fmt.Sprintf("http://%s:%d", containerInspect.Ports[0].HostIP, containerInspect.Ports[0].HostPort)
})
t.Run("ps", func(t *testing.T) {
res := c.RunDockerCmd("ps")
out := lines(res.Stdout())
l := out[len(out)-1]
assert.Assert(t, strings.Contains(l, container), "Looking for %q in line: %s", container, l)
assert.Assert(t, strings.Contains(l, "nginx"))
assert.Assert(t, strings.Contains(l, "Running"))
assert.Assert(t, strings.Contains(l, hostIP+":80->80/tcp"))
})
t.Run("http get", func(t *testing.T) {
output := HTTPGetWithRetry(t, endpoint, http.StatusOK, 2*time.Second, 20*time.Second)
assert.Assert(t, strings.Contains(output, testFileContent), "Actual content: "+output)
})
t.Run("logs", func(t *testing.T) {
res := c.RunDockerCmd("logs", container)
res.Assert(t, icmd.Expected{Out: "GET"})
})
t.Run("exec", func(t *testing.T) {
res := c.RunDockerOrExitError("exec", container, "pwd")
assert.Assert(t, strings.Contains(res.Stdout(), "/"))
res = c.RunDockerOrExitError("exec", container, "echo", "fail_with_argument")
res.Assert(t, icmd.Expected{
ExitCode: 1,
Err: "ACI exec command does not accept arguments to the command. Only the binary should be specified",
})
})
t.Run("logs follow", func(t *testing.T) {
cmd := c.NewDockerCmd("logs", "--follow", container)
res := icmd.StartCmd(cmd)
checkUp := func(t poll.LogT) poll.Result {
r, _ := http.Get(endpoint + "/is_up")
if r != nil && r.StatusCode == http.StatusNotFound {
return poll.Success()
}
return poll.Continue("waiting for container to serve request")
}
poll.WaitOn(t, checkUp, poll.WithDelay(1*time.Second), poll.WithTimeout(60*time.Second))
assert.Assert(t, !strings.Contains(res.Stdout(), "/test"))
checkLogs := func(t poll.LogT) poll.Result {
if strings.Contains(res.Stdout(), "/test") {
return poll.Success()
}
return poll.Continue("waiting for logs to contain /test")
}
// Do request on /test
go func() {
time.Sleep(3 * time.Second)
_, _ = http.Get(endpoint + "/test")
}()
poll.WaitOn(t, checkLogs, poll.WithDelay(3*time.Second), poll.WithTimeout(20*time.Second))
if runtime.GOOS == "windows" {
err := res.Cmd.Process.Kill()
assert.NilError(t, err)
} else {
err := res.Cmd.Process.Signal(syscall.SIGTERM)
assert.NilError(t, err)
}
})
t.Run("rm a running container", func(t *testing.T) {
res := c.RunDockerOrExitError("rm", container)
res.Assert(t, icmd.Expected{
Err: fmt.Sprintf("Error: you cannot remove a running container %s. Stop the container before attempting removal or force remove", container),
ExitCode: 1,
})
})
t.Run("force rm", func(t *testing.T) {
res := c.RunDockerCmd("rm", "-f", container)
res.Assert(t, icmd.Expected{Out: container})
checkStopped := func(t poll.LogT) poll.Result {
res := c.RunDockerOrExitError("inspect", container)
if res.ExitCode == 1 {
return poll.Success()
}
return poll.Continue("waiting for container to stop")
}
poll.WaitOn(t, checkStopped, poll.WithDelay(5*time.Second), poll.WithTimeout(60*time.Second))
})
}
func lines(output string) []string {
return strings.Split(strings.TrimSpace(output), "\n")
}
func TestContainerRunAttached(t *testing.T) {
c := NewParallelE2eCLI(t, binDir)
_, groupID, location := setupTestResourceGroup(t, c)
// Used in subtests
var (
container string = "test-container"
endpoint string
followLogsProcess *icmd.Result
)
t.Run("run attached limits", func(t *testing.T) {
dnsLabelName := "nginx-" + groupID
fqdn := dnsLabelName + "." + location + ".azurecontainer.io"
cmd := c.NewDockerCmd(
"run",
"--name", container,
"--restart", "on-failure",
"--memory", "0.1G", "--cpus", "0.1",
"-p", "80:80",
"--domainname",
dnsLabelName,
"nginx",
)
followLogsProcess = icmd.StartCmd(cmd)
checkRunning := func(t poll.LogT) poll.Result {
res := c.RunDockerOrExitError("inspect", container)
if res.ExitCode == 0 && strings.Contains(res.Stdout(), `"Status": "Running"`) && !strings.Contains(res.Stdout(), `"HostIP": ""`) {
return poll.Success()
}
return poll.Continue("waiting for container to be running, current inspect result: \n%s", res.Combined())
}
poll.WaitOn(t, checkRunning, poll.WithDelay(5*time.Second), poll.WithTimeout(90*time.Second))
inspectRes := c.RunDockerCmd("inspect", container)
containerInspect, err := parseContainerInspect(inspectRes.Stdout())
assert.NilError(t, err)
assert.Equal(t, containerInspect.Platform, "Linux")
assert.Equal(t, containerInspect.HostConfig.CPULimit, 0.1)
assert.Equal(t, containerInspect.HostConfig.MemoryLimit, uint64(107374182))
assert.Equal(t, containerInspect.HostConfig.CPUReservation, 0.1)
assert.Equal(t, containerInspect.HostConfig.MemoryReservation, uint64(107374182))
assert.Equal(t, containerInspect.HostConfig.RestartPolicy, containers.RestartPolicyOnFailure)
assert.Assert(t, is.Len(containerInspect.Ports, 1))
port := containerInspect.Ports[0]
assert.Assert(t, port.HostIP != "", "empty hostIP, inspect: \n"+inspectRes.Stdout())
assert.Equal(t, port.ContainerPort, uint32(80))
assert.Equal(t, port.HostPort, uint32(80))
assert.Equal(t, containerInspect.Config.FQDN, fqdn)
endpoint = fmt.Sprintf("http://%s:%d", fqdn, port.HostPort)
assert.Assert(t, !strings.Contains(followLogsProcess.Stdout(), "/test"))
HTTPGetWithRetry(t, endpoint+"/test", http.StatusNotFound, 2*time.Second, 60*time.Second)
checkLog := func(t poll.LogT) poll.Result {
if strings.Contains(followLogsProcess.Stdout(), "/test") {
return poll.Success()
}
return poll.Continue("waiting for logs to contain /test")
}
poll.WaitOn(t, checkLog, poll.WithDelay(1*time.Second), poll.WithTimeout(20*time.Second))
})
t.Run("stop wrong container", func(t *testing.T) {
res := c.RunDockerOrExitError("stop", "unknown-container")
res.Assert(t, icmd.Expected{
Err: "Error: container unknown-container not found",
ExitCode: 1,
})
})
t.Run("stop container", func(t *testing.T) {
res := c.RunDockerCmd("stop", container)
res.Assert(t, icmd.Expected{Out: container})
waitForStatus(t, c, container, "Terminated", "Node Stopped")
})
t.Run("check we stoppped following logs", func(t *testing.T) {
// nolint errcheck
followLogsStopped := waitWithTimeout(func() { followLogsProcess.Cmd.Process.Wait() }, 10*time.Second)
assert.NilError(t, followLogsStopped, "Follow logs process did not stop after container is stopped")
})
t.Run("ps stopped container with --all", func(t *testing.T) {
res := c.RunDockerCmd("ps", container)
out := lines(res.Stdout())
assert.Assert(t, is.Len(out, 1))
res = c.RunDockerCmd("ps", "--all", container)
out = lines(res.Stdout())
assert.Assert(t, is.Len(out, 2))
})
t.Run("restart container", func(t *testing.T) {
res := c.RunDockerCmd("start", container)
res.Assert(t, icmd.Expected{Out: container})
waitForStatus(t, c, container, convert.StatusRunning)
})
t.Run("prune dry run", func(t *testing.T) {
res := c.RunDockerCmd("prune", "--dry-run")
assert.Equal(t, "Resources that would be deleted:\nTotal CPUs reclaimed: 0.00, total memory reclaimed: 0.00 GB\n", res.Stdout())
res = c.RunDockerCmd("prune", "--dry-run", "--force")
assert.Equal(t, "Resources that would be deleted:\n"+container+"\nTotal CPUs reclaimed: 0.10, total memory reclaimed: 0.10 GB\n", res.Stdout())
})
t.Run("prune", func(t *testing.T) {
res := c.RunDockerCmd("prune")
assert.Equal(t, "Deleted resources:\nTotal CPUs reclaimed: 0.00, total memory reclaimed: 0.00 GB\n", res.Stdout())
res = c.RunDockerCmd("ps")
l := lines(res.Stdout())
assert.Equal(t, 2, len(l))
res = c.RunDockerCmd("prune", "--force")
assert.Equal(t, "Deleted resources:\n"+container+"\nTotal CPUs reclaimed: 0.10, total memory reclaimed: 0.10 GB\n", res.Stdout())
res = c.RunDockerCmd("ps", "--all")
l = lines(res.Stdout())
assert.Equal(t, 1, len(l))
})
}
func overwriteFileStorageAccount(t *testing.T, absComposefileName string, storageAccount string) {
data, err := ioutil.ReadFile(absComposefileName)
assert.NilError(t, err)
override := strings.Replace(string(data), "dockertestvolumeaccount", storageAccount, 1)
err = ioutil.WriteFile(absComposefileName, []byte(override), 0644)
assert.NilError(t, err)
}
func TestUpSecretsResources(t *testing.T) {
const (
composeProjectName = "aci_test"
web1 = composeProjectName + "_web1"
web2 = composeProjectName + "_web2"
secret1Name = "mytarget1"
secret1Value = "myPassword1\n"
secret2Name = "mysecret2"
secret2Value = "another_password\n"
)
var (
basefilePath = filepath.Join("..", "composefiles", "aci_secrets_resources")
composefilePath = filepath.Join(basefilePath, "compose.yml")
)
c := NewParallelE2eCLI(t, binDir)
_, _, _ = setupTestResourceGroup(t, c)
t.Run("compose up", func(t *testing.T) {
res := c.RunDockerCmd("compose", "up", "-D", "-f", composefilePath, "--project-name", composeProjectName)
fmt.Println(res.Stdout())
c.RunDockerCmd("compose", "up", "-f", composefilePath, "--project-name", composeProjectName)
res = c.RunDockerCmd("ps")
out := lines(res.Stdout())
// Check 2 containers running
assert.Assert(t, is.Len(out, 3))
})
t.Cleanup(func() {
c.RunDockerCmd("compose", "down", "--project-name", composeProjectName)
res := c.RunDockerCmd("ps")
out := lines(res.Stdout())
assert.Equal(t, len(out), 1)
})
res := c.RunDockerCmd("inspect", web1)
web1Inspect, err := parseContainerInspect(res.Stdout())
assert.NilError(t, err)
res = c.RunDockerCmd("inspect", web2)
web2Inspect, err := parseContainerInspect(res.Stdout())
assert.NilError(t, err)
t.Run("read secrets in service 1", func(t *testing.T) {
assert.Assert(t, is.Len(web1Inspect.Ports, 1))
endpoint := fmt.Sprintf("http://%s:%d", web1Inspect.Ports[0].HostIP, web1Inspect.Ports[0].HostPort)
output := HTTPGetWithRetry(t, endpoint+"/"+secret1Name, http.StatusOK, 2*time.Second, 20*time.Second)
// replace windows carriage return
output = strings.ReplaceAll(output, "\r", "")
assert.Equal(t, output, secret1Value)
output = HTTPGetWithRetry(t, endpoint+"/"+secret2Name, http.StatusOK, 2*time.Second, 20*time.Second)
output = strings.ReplaceAll(output, "\r", "")
assert.Equal(t, output, secret2Value)
})
t.Run("read secrets in service 2", func(t *testing.T) {
assert.Assert(t, is.Len(web2Inspect.Ports, 1))
endpoint := fmt.Sprintf("http://%s:%d", web2Inspect.Ports[0].HostIP, web2Inspect.Ports[0].HostPort)
output := HTTPGetWithRetry(t, endpoint+"/"+secret2Name, http.StatusOK, 2*time.Second, 20*time.Second)
output = strings.ReplaceAll(output, "\r", "")
assert.Equal(t, output, secret2Value)
HTTPGetWithRetry(t, endpoint+"/"+secret1Name, http.StatusNotFound, 2*time.Second, 20*time.Second)
})
t.Run("check resource limits", func(t *testing.T) {
assert.Equal(t, web1Inspect.HostConfig.CPULimit, 1.7)
assert.Equal(t, web1Inspect.HostConfig.MemoryLimit, uint64(1073741824))
assert.Equal(t, web1Inspect.HostConfig.CPUReservation, 1.5)
assert.Equal(t, web1Inspect.HostConfig.MemoryReservation, uint64(536870912))
assert.Equal(t, web2Inspect.HostConfig.CPULimit, 1.5)
assert.Equal(t, web2Inspect.HostConfig.MemoryLimit, uint64(751619276))
assert.Equal(t, web2Inspect.HostConfig.CPUReservation, 1.5)
assert.Equal(t, web2Inspect.HostConfig.MemoryReservation, uint64(751619276))
})
t.Run("check healthchecks inspect", func(t *testing.T) {
assert.Assert(t, web1Inspect.Healthcheck != nil)
assert.Equal(t, time.Duration(*web1Inspect.Healthcheck.Interval), 5*time.Second)
assert.DeepEqual(t, web1Inspect.Healthcheck.Test, []string{"curl", "-f", "http://localhost:80/healthz"})
assert.Assert(t, web2Inspect.Healthcheck == nil)
})
t.Run("healthcheck restart failed app", func(t *testing.T) {
endpoint := fmt.Sprintf("http://%s:%d", web1Inspect.Ports[0].HostIP, web1Inspect.Ports[0].HostPort)
HTTPGetWithRetry(t, endpoint+"/failtestserver", http.StatusOK, 3*time.Second, 3*time.Second)
logs := c.RunDockerCmd("logs", web1).Combined()
assert.Assert(t, strings.Contains(logs, "GET /healthz"))
assert.Assert(t, strings.Contains(logs, "GET /failtestserver"))
assert.Assert(t, strings.Contains(logs, "Server failing"))
checkLogsReset := func(logt poll.LogT) poll.Result {
res := c.RunDockerOrExitError("logs", web1)
if res.ExitCode == 0 &&
!strings.Contains(res.Combined(), "GET /failtestserver") &&
strings.Contains(res.Combined(), "Listening on port 80") &&
strings.Contains(res.Combined(), "GET /healthz") {
return poll.Success()
}
return poll.Continue("Logs not reset by healcheck restart\n" + res.Combined())
}
poll.WaitOn(t, checkLogsReset, poll.WithDelay(5*time.Second), poll.WithTimeout(90*time.Second))
res := c.RunDockerCmd("inspect", web1)
web1Inspect, err = parseContainerInspect(res.Stdout())
assert.Equal(t, web1Inspect.Status, "Running")
})
}
func TestUpUpdate(t *testing.T) {
const (
composeProjectName = "acidemo"
serverContainer = composeProjectName + "_web"
wordsContainer = composeProjectName + "_words"
dbContainer = composeProjectName + "_db"
)
var (
singlePortVolumesComposefile = "aci_demo_port_volumes.yaml"
multiPortComposefile = "demo_multi_port.yaml"
)
c := NewParallelE2eCLI(t, binDir)
sID, groupID, location := setupTestResourceGroup(t, c)
composeAccountName := strings.ToLower(strings.ReplaceAll(groupID, "-", "") + "sa")
dstDir := filepath.Join(os.TempDir(), "e2e-aci-volume-"+composeAccountName)
srcDir := filepath.Join("..", "composefiles", "aci-demo")
err := fileutil.CopyDirs(srcDir, dstDir)
assert.NilError(t, err)
t.Cleanup(func() {
assert.NilError(t, os.RemoveAll(dstDir))
})
_, err = fileutils.CopyFile(filepath.Join(filepath.Join("..", "composefiles"), multiPortComposefile), filepath.Join(dstDir, multiPortComposefile))
assert.NilError(t, err)
singlePortVolumesComposefile = filepath.Join(dstDir, singlePortVolumesComposefile)
overwriteFileStorageAccount(t, singlePortVolumesComposefile, composeAccountName)
multiPortComposefile = filepath.Join(dstDir, multiPortComposefile)
volumeID := composeAccountName + "/" + fileshareName
const (
testFileName = "msg.txt"
testFileContent = "VOLUME_OK"
projectName = "acidemo"
)
var (
dnsLabelName = "nginx-" + groupID
fqdn = dnsLabelName + "." + location + ".azurecontainer.io"
)
t.Run("compose up", func(t *testing.T) {
aciContext := store.AciContext{
SubscriptionID: sID,
Location: location,
ResourceGroup: groupID,
}
c.RunDockerCmd("compose", "up", "-f", singlePortVolumesComposefile, "--domainname", dnsLabelName, "--project-name", projectName)
// Volume should be autocreated by the "compose up"
uploadTestFile(t, aciContext, composeAccountName, fileshareName, testFileName, testFileContent)
})
t.Cleanup(func() {
c.RunDockerCmd("volume", "rm", volumeID)
})
t.Run("check deployed compose app", func(t *testing.T) {
res := c.RunDockerCmd("ps")
out := lines(res.Stdout())
// Check three containers are running
assert.Assert(t, is.Len(out, 4))
webRunning := false
for _, l := range out {
if strings.Contains(l, serverContainer) {
webRunning = true
strings.Contains(l, ":80->80/tcp")
}
}
assert.Assert(t, webRunning, "web container not running ; ps:\n"+res.Stdout())
res = c.RunDockerCmd("inspect", serverContainer)
containerInspect, err := parseContainerInspect(res.Stdout())
assert.NilError(t, err)
assert.Assert(t, is.Len(containerInspect.Ports, 1))
endpoint := fmt.Sprintf("http://%s:%d", containerInspect.Ports[0].HostIP, containerInspect.Ports[0].HostPort)
output := HTTPGetWithRetry(t, endpoint+"/words/noun", http.StatusOK, 2*time.Second, 20*time.Second)
assert.Assert(t, strings.Contains(output, `"word":`))
endpoint = fmt.Sprintf("http://%s:%d", fqdn, containerInspect.Ports[0].HostPort)
HTTPGetWithRetry(t, endpoint+"/words/noun", http.StatusOK, 2*time.Second, 20*time.Second)
body := HTTPGetWithRetry(t, endpoint+"/volume_test/"+testFileName, http.StatusOK, 2*time.Second, 20*time.Second)
assert.Assert(t, strings.Contains(body, testFileContent))
// Try to remove the volume while it's still in use
res = c.RunDockerOrExitError("volume", "rm", volumeID)
res.Assert(t, icmd.Expected{
ExitCode: 1,
Err: fmt.Sprintf(`Error: volume "%s/%s" is used in container group %q`,
composeAccountName, fileshareName, projectName),
})
})
t.Run("compose ps", func(t *testing.T) {
res := c.RunDockerCmd("compose", "ps", "--project-name", composeProjectName, "--quiet")
l := lines(res.Stdout())
assert.Assert(t, is.Len(l, 3))
res = c.RunDockerCmd("compose", "ps", "--project-name", composeProjectName)
l = lines(res.Stdout())
assert.Assert(t, is.Len(l, 4))
var wordsDisplayed, webDisplayed, dbDisplayed bool
for _, line := range l {
fields := strings.Fields(line)
containerID := fields[0]
switch containerID {
case wordsContainer:
wordsDisplayed = true
assert.DeepEqual(t, fields, []string{containerID, "words", "1/1"})
case dbContainer:
dbDisplayed = true
assert.DeepEqual(t, fields, []string{containerID, "db", "1/1"})
case serverContainer:
webDisplayed = true
assert.Equal(t, fields[1], "web")
assert.Check(t, strings.Contains(fields[3], ":80->80/tcp"))
}
}
assert.Check(t, webDisplayed && wordsDisplayed && dbDisplayed, "\n%s\n", res.Stdout())
})
t.Run("compose ls", func(t *testing.T) {
res := c.RunDockerCmd("compose", "ls", "--quiet")
l := lines(res.Stdout())
assert.Assert(t, is.Len(l, 1))
res = c.RunDockerCmd("compose", "ls")
l = lines(res.Stdout())
assert.Equal(t, 2, len(l))
fields := strings.Fields(l[1])
assert.Equal(t, 2, len(fields))
assert.Equal(t, fields[0], composeProjectName)
assert.Equal(t, "Running", fields[1])
})
t.Run("logs web", func(t *testing.T) {
res := c.RunDockerCmd("logs", serverContainer)
res.Assert(t, icmd.Expected{Out: "Listening on port 80"})
})
t.Run("update", func(t *testing.T) {
c.RunDockerCmd("compose", "up", "-f", multiPortComposefile, "--project-name", composeProjectName)
res := c.RunDockerCmd("ps")
out := lines(res.Stdout())
// Check three containers are running
assert.Assert(t, is.Len(out, 4))
for _, cName := range []string{serverContainer, wordsContainer} {
res = c.RunDockerCmd("inspect", cName)
containerInspect, err := parseContainerInspect(res.Stdout())
assert.NilError(t, err)
assert.Assert(t, is.Len(containerInspect.Ports, 1))
endpoint := fmt.Sprintf("http://%s:%d", containerInspect.Ports[0].HostIP, containerInspect.Ports[0].HostPort)
var route string
switch cName {
case serverContainer:
route = "/words/noun"
assert.Equal(t, containerInspect.Ports[0].HostPort, uint32(80))
assert.Equal(t, containerInspect.Ports[0].ContainerPort, uint32(80))
case wordsContainer:
route = "/noun"
assert.Equal(t, containerInspect.Ports[0].HostPort, uint32(8080))
assert.Equal(t, containerInspect.Ports[0].ContainerPort, uint32(8080))
}
HTTPGetWithRetry(t, endpoint+route, http.StatusOK, 1*time.Second, 60*time.Second)
res = c.RunDockerCmd("ps")
p := containerInspect.Ports[0]
res.Assert(t, icmd.Expected{
Out: fmt.Sprintf("%s:%d->%d/tcp", p.HostIP, p.HostPort, p.ContainerPort),
})
}
})
t.Run("down", func(t *testing.T) {
c.RunDockerCmd("compose", "down", "--project-name", composeProjectName)
res := c.RunDockerCmd("ps")
out := lines(res.Stdout())
assert.Equal(t, len(out), 1)
})
}
/*
func TestRunEnvVars(t *testing.T) {
c := NewParallelE2eCLI(t, binDir)
_, _, _ = setupTestResourceGroup(t, c)
t.Run("run", func(t *testing.T) {
cmd := c.NewDockerCmd(
"run", "-d",
"-e", "MYSQL_ROOT_PASSWORD=rootpwd",
"-e", "MYSQL_DATABASE=mytestdb",
"-e", "MYSQL_USER",
"-e", "MYSQL_PASSWORD=userpwd",
"-e", "DATASOURCE_URL=jdbc:mysql://mydb.mysql.database.azure.com/db1?useSSL=true&requireSSL=false&serverTimezone=America/Recife",
"mysql:5.7",
)
cmd.Env = append(cmd.Env, "MYSQL_USER=user1")
res := icmd.RunCmd(cmd)
res.Assert(t, icmd.Success)
out := lines(res.Stdout())
container := strings.TrimSpace(out[len(out)-1])
res = c.RunDockerCmd("inspect", container)
containerInspect, err := ParseContainerInspect(res.Stdout())
assert.NilError(t, err)
assert.Assert(t, containerInspect.Config != nil, "nil container config")
assert.Assert(t, containerInspect.Config.Env != nil, "nil container env variables")
assert.Equal(t, containerInspect.Image, "mysql:5.7")
envVars := containerInspect.Config.Env
assert.Equal(t, len(envVars), 5)
assert.Equal(t, envVars["MYSQL_ROOT_PASSWORD"], "rootpwd")
assert.Equal(t, envVars["MYSQL_DATABASE"], "mytestdb")
assert.Equal(t, envVars["MYSQL_USER"], "user1")
assert.Equal(t, envVars["MYSQL_PASSWORD"], "userpwd")
assert.Equal(t, envVars["DATASOURCE_URL"], "jdbc:mysql://mydb.mysql.database.azure.com/db1?useSSL=true&requireSSL=false&serverTimezone=America/Recife")
check := func(t poll.LogT) poll.Result {
res := c.RunDockerOrExitError("logs", container)
if strings.Contains(res.Stdout(), "Giving user user1 access to schema mytestdb") {
return poll.Success()
}
return poll.Continue("waiting for DB container to be up\n%s", res.Combined())
}
poll.WaitOn(t, check, poll.WithDelay(5*time.Second), poll.WithTimeout(90*time.Second))
})
}
*/
func setupTestResourceGroup(t *testing.T, c *E2eCLI) (string, string, string) {
startTime := strconv.Itoa(int(time.Now().Unix()))
rg := "E2E-" + t.Name() + "-" + startTime[5:]
azureLogin(t, c)
sID := getSubscriptionID(t)
location := getTestLocation()
err := createResourceGroup(t, sID, rg, location)
assert.Check(t, is.Nil(err))
t.Cleanup(func() {
if err := deleteResourceGroup(t, rg); err != nil {
t.Error(err)
}
})
createAciContextAndUseIt(t, c, sID, rg, location)
// Check nothing is running
res := c.RunDockerCmd("ps")
assert.Assert(t, is.Len(lines(res.Stdout()), 1))
return sID, rg, location
}
func deleteResourceGroup(t *testing.T, rgName string) error {
fmt.Printf(" [%s] deleting resource group %s\n", t.Name(), rgName)
ctx := context.TODO()
helper := aci.NewACIResourceGroupHelper()
models, err := helper.GetSubscriptionIDs(ctx)
if err != nil {
return err
}
if len(models) == 0 {
return errors.New("unable to delete resource group: no models")
}
return helper.DeleteAsync(ctx, *models[0].SubscriptionID, rgName)
}
func azureLogin(t *testing.T, c *E2eCLI) {
// in order to create new service principal and get these 3 values : `az ad sp create-for-rbac --name 'TestServicePrincipal' --sdk-auth`
clientID := os.Getenv("AZURE_CLIENT_ID")
clientSecret := os.Getenv("AZURE_CLIENT_SECRET")
tenantID := os.Getenv("AZURE_TENANT_ID")
assert.Check(t, clientID != "", "AZURE_CLIENT_ID must not be empty")
assert.Check(t, clientSecret != "", "AZURE_CLIENT_SECRET must not be empty")
assert.Check(t, tenantID != "", "AZURE_TENANT_ID must not be empty")
c.RunDockerCmd("login", "azure", "--client-id", clientID, "--client-secret", clientSecret, "--tenant-id", tenantID)
}
func getSubscriptionID(t *testing.T) string {
ctx := context.TODO()
helper := aci.NewACIResourceGroupHelper()
models, err := helper.GetSubscriptionIDs(ctx)
assert.Check(t, is.Nil(err))
assert.Check(t, len(models) == 1)
return *models[0].SubscriptionID
}
func createResourceGroup(t *testing.T, sID, rgName string, location string) error {
fmt.Printf(" [%s] creating resource group %s\n", t.Name(), rgName)
helper := aci.NewACIResourceGroupHelper()
_, err := helper.CreateOrUpdate(context.TODO(), sID, rgName, resources.Group{Location: to.StringPtr(location)})
return err
}
func createAciContextAndUseIt(t *testing.T, c *E2eCLI, sID, rgName string, location string) {
res := c.RunDockerCmd("context", "create", "aci", contextName, "--subscription-id", sID, "--resource-group", rgName, "--location", location)
res.Assert(t, icmd.Expected{Out: "Successfully created aci context \"" + contextName + "\""})
res = c.RunDockerCmd("context", "use", contextName)
res.Assert(t, icmd.Expected{Out: contextName})
res = c.RunDockerCmd("context", "ls")
res.Assert(t, icmd.Expected{Out: contextName + " *"})
}
func uploadFile(t *testing.T, cred azfile.SharedKeyCredential, baseURL, fileName, content string) {
fURL, err := url.Parse(baseURL + "/" + fileName)
assert.NilError(t, err)
fileURL := azfile.NewFileURL(*fURL, azfile.NewPipeline(&cred, azfile.PipelineOptions{}))
err = azfile.UploadBufferToAzureFile(context.TODO(), []byte(content), fileURL, azfile.UploadToAzureFileOptions{})
assert.NilError(t, err)
}
func getContainerName(stdout string) string {
out := lines(stdout)
return strings.TrimSpace(out[len(out)-1])
}
func waitForStatus(t *testing.T, c *E2eCLI, containerID string, statuses ...string) {
checkStopped := func(logt poll.LogT) poll.Result {
res := c.RunDockerCmd("inspect", containerID)
containerInspect, err := parseContainerInspect(res.Stdout())
assert.NilError(t, err)
for _, status := range statuses {
if containerInspect.Status == status {
return poll.Success()
}
}
return poll.Continue("Status %s != %s (expected) for container %s", containerInspect.Status, statuses, containerID)
}
poll.WaitOn(t, checkStopped, poll.WithDelay(5*time.Second), poll.WithTimeout(90*time.Second))
}
func parseContainerInspect(stdout string) (*cmd.ContainerInspectView, error) {
var res cmd.ContainerInspectView
rdr := bytes.NewReader([]byte(stdout))
if err := json.NewDecoder(rdr).Decode(&res); err != nil {
return nil, err
}
return &res, nil
}
func waitWithTimeout(blockingCall func(), timeout time.Duration) error {
c := make(chan struct{})
go func() {
defer close(c)
blockingCall()
}()
select {
case <-c:
return nil
case <-time.After(timeout):
return fmt.Errorf("Timed out after %s", timeout)
}
}
| [
"\"AZURE_CLIENT_ID\"",
"\"AZURE_CLIENT_SECRET\"",
"\"AZURE_TENANT_ID\""
] | [] | [
"AZURE_CLIENT_ID",
"AZURE_CLIENT_SECRET",
"AZURE_TENANT_ID"
] | [] | ["AZURE_CLIENT_ID", "AZURE_CLIENT_SECRET", "AZURE_TENANT_ID"] | go | 3 | 0 | |
backend/psql/integration_test.go | package psql
import (
"database/sql"
"flag"
"os"
"testing"
"euphoria.io/heim/backend"
"euphoria.io/heim/cluster"
"euphoria.io/heim/proto"
"github.com/rubenv/sql-migrate"
)
var dsn = flag.String("dsn", "postgres://heimtest:heimtest@localhost/heimtest", "")
func TestBackend(t *testing.T) {
dsn := *dsn
if env := os.Getenv("DSN"); env != "" {
// for running in CI container
dsn = env
}
db, err := sql.Open("postgres", dsn)
if err != nil {
t.Fatalf("sql.Open: %s", err)
}
// Drop all tables.
for _, item := range schema {
if _, err := db.Exec("DROP TABLE IF EXISTS " + item.Name + " CASCADE"); err != nil {
t.Fatalf("failed to drop table %s: %s", item.Name, err)
}
}
for _, table := range []string{"gorp_migrations", "stats_sessions_analyzed", "stats_sessions_global", "stats_sessions_per_room"} {
if _, err := db.Exec("DROP TABLE IF EXISTS " + table); err != nil {
t.Fatal(err)
}
}
for _, function := range []string{
"stats_sessions_analyze()",
"stats_sessions_global_find(timestamp with time zone, timestamp with time zone)",
"stats_sessions_global_extend(timestamp with time zone, timestamp with time zone)",
"stats_sessions_per_room_find(timestamp with time zone, timestamp with time zone)",
"stats_sessions_per_room_extend(timestamp with time zone, timestamp with time zone)",
"job_claim(text, text)",
"job_steal(text, text)",
"job_complete(bigint, integer, bytea)",
"job_fail(bigint, integer, text, bytea)",
"job_cancel(bigint)",
"virtualize_address(text, inet)",
"virtualize_inet4(text, inet)",
"virtualize_inet6(text, inet)",
"permute32(integer)",
} {
if _, err := db.Exec("DROP FUNCTION IF EXISTS " + function); err != nil {
t.Fatal(err)
}
}
// Recreate all tables.
src := migrate.FileMigrationSource{"migrations"}
if _, err := migrate.Exec(db, "postgres", src, migrate.Up); err != nil {
t.Fatal(err)
}
// Define backend factory.
var b *Backend
defer func() {
if b != nil {
b.Close()
}
}()
factory := func(heim *proto.Heim) (proto.Backend, error) {
if b == nil {
heim.Cluster = &cluster.TestCluster{}
heim.PeerDesc = &cluster.PeerDesc{
ID: "testcase",
Era: "era",
Version: "testver",
}
b, err = NewBackend(heim, &backend.DatabaseConfig{
DSN : dsn,
MaxConnCount: 0,
})
if err != nil {
return nil, err
}
}
return &nonClosingBackend{b}, nil
}
// Run test suite.
backend.IntegrationTest(t, factory)
}
type nonClosingBackend struct {
proto.Backend
}
func (nonClosingBackend) Close() {}
| [
"\"DSN\""
] | [] | [
"DSN"
] | [] | ["DSN"] | go | 1 | 0 | |
intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: oneandone_monitoring_policy
short_description: Configure 1&1 monitoring policy.
description:
- Create, remove, update monitoring policies
(and add/remove ports, processes, and servers).
This module has a dependency on 1and1 >= 1.0
options:
state:
description:
- Define a monitoring policy's state to create, remove, update.
type: str
required: false
default: present
choices: [ "present", "absent", "update" ]
auth_token:
description:
- Authenticating API token provided by 1&1.
type: str
api_url:
description:
- Custom API URL. Overrides the
ONEANDONE_API_URL environment variable.
type: str
required: false
name:
description:
- Monitoring policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128
type: str
monitoring_policy:
description:
- The identifier (id or name) of the monitoring policy used with update state.
type: str
agent:
description:
- Set true for using agent.
type: str
email:
description:
- User's email. maxLength=128
type: str
description:
description:
- Monitoring policy description. maxLength=256
type: str
required: false
thresholds:
description:
- Monitoring policy thresholds. Each of the suboptions have warning and critical,
which both have alert and value suboptions. Warning is used to set limits for
warning alerts, critical is used to set critical alerts. alert enables alert,
and value is used to advise when the value is exceeded.
type: list
suboptions:
cpu:
description:
- Consumption limits of CPU.
required: true
ram:
description:
- Consumption limits of RAM.
required: true
disk:
description:
- Consumption limits of hard disk.
required: true
internal_ping:
description:
- Response limits of internal ping.
required: true
transfer:
description:
- Consumption limits for transfer.
required: true
ports:
description:
- Array of ports that will be monitoring.
type: list
suboptions:
protocol:
description:
- Internet protocol.
choices: [ "TCP", "UDP" ]
required: true
port:
description:
- Port number. minimum=1, maximum=65535
required: true
alert_if:
description:
- Case of alert.
choices: [ "RESPONDING", "NOT_RESPONDING" ]
required: true
email_notification:
description:
- Set true for sending e-mail notifications.
required: true
processes:
description:
- Array of processes that will be monitoring.
type: list
suboptions:
process:
description:
- Name of the process. maxLength=50
required: true
alert_if:
description:
- Case of alert.
choices: [ "RUNNING", "NOT_RUNNING" ]
required: true
add_ports:
description:
- Ports to add to the monitoring policy.
type: list
required: false
add_processes:
description:
- Processes to add to the monitoring policy.
type: list
required: false
add_servers:
description:
- Servers to add to the monitoring policy.
type: list
required: false
remove_ports:
description:
- Ports to remove from the monitoring policy.
type: list
required: false
remove_processes:
description:
- Processes to remove from the monitoring policy.
type: list
required: false
remove_servers:
description:
- Servers to remove from the monitoring policy.
type: list
required: false
update_ports:
description:
- Ports to be updated on the monitoring policy.
type: list
required: false
update_processes:
description:
- Processes to be updated on the monitoring policy.
type: list
required: false
wait:
description:
- wait for the instance to be in state 'running' before returning
required: false
default: "yes"
type: bool
wait_timeout:
description:
- how long before wait gives up, in seconds
type: int
default: 600
wait_interval:
description:
- Defines the number of seconds to wait when using the _wait_for methods
type: int
default: 5
requirements:
- "1and1"
- "python >= 2.6"
author:
- "Amel Ajdinovic (@aajdinov)"
- "Ethan Devenport (@edevenport)"
'''
EXAMPLES = '''
- name: Create a monitoring policy
oneandone_moitoring_policy:
auth_token: oneandone_private_api_key
name: ansible monitoring policy
description: Testing creation of a monitoring policy with ansible
email: [email protected]
agent: true
thresholds:
-
cpu:
warning:
value: 80
alert: false
critical:
value: 92
alert: false
-
ram:
warning:
value: 80
alert: false
critical:
value: 90
alert: false
-
disk:
warning:
value: 80
alert: false
critical:
value: 90
alert: false
-
internal_ping:
warning:
value: 50
alert: false
critical:
value: 100
alert: false
-
transfer:
warning:
value: 1000
alert: false
critical:
value: 2000
alert: false
ports:
-
protocol: TCP
port: 22
alert_if: RESPONDING
email_notification: false
processes:
-
process: test
alert_if: NOT_RUNNING
email_notification: false
wait: true
- name: Destroy a monitoring policy
oneandone_moitoring_policy:
auth_token: oneandone_private_api_key
state: absent
name: ansible monitoring policy
- name: Update a monitoring policy
oneandone_moitoring_policy:
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy
name: ansible monitoring policy updated
description: Testing creation of a monitoring policy with ansible updated
email: [email protected]
thresholds:
-
cpu:
warning:
value: 70
alert: false
critical:
value: 90
alert: false
-
ram:
warning:
value: 70
alert: false
critical:
value: 80
alert: false
-
disk:
warning:
value: 70
alert: false
critical:
value: 80
alert: false
-
internal_ping:
warning:
value: 60
alert: false
critical:
value: 90
alert: false
-
transfer:
warning:
value: 900
alert: false
critical:
value: 1900
alert: false
wait: true
state: update
- name: Add a port to a monitoring policy
oneandone_moitoring_policy:
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
add_ports:
-
protocol: TCP
port: 33
alert_if: RESPONDING
email_notification: false
wait: true
state: update
- name: Update existing ports of a monitoring policy
oneandone_moitoring_policy:
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
update_ports:
-
id: existing_port_id
protocol: TCP
port: 34
alert_if: RESPONDING
email_notification: false
-
id: existing_port_id
protocol: TCP
port: 23
alert_if: RESPONDING
email_notification: false
wait: true
state: update
- name: Remove a port from a monitoring policy
oneandone_moitoring_policy:
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
remove_ports:
- port_id
state: update
- name: Add a process to a monitoring policy
oneandone_moitoring_policy:
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
add_processes:
-
process: test_2
alert_if: NOT_RUNNING
email_notification: false
wait: true
state: update
- name: Update existing processes of a monitoring policy
oneandone_moitoring_policy:
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
update_processes:
-
id: process_id
process: test_1
alert_if: NOT_RUNNING
email_notification: false
-
id: process_id
process: test_3
alert_if: NOT_RUNNING
email_notification: false
wait: true
state: update
- name: Remove a process from a monitoring policy
oneandone_moitoring_policy:
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
remove_processes:
- process_id
wait: true
state: update
- name: Add server to a monitoring policy
oneandone_moitoring_policy:
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
add_servers:
- server id or name
wait: true
state: update
- name: Remove server from a monitoring policy
oneandone_moitoring_policy:
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
remove_servers:
- server01
wait: true
state: update
'''
RETURN = '''
monitoring_policy:
description: Information about the monitoring policy that was processed
type: dict
sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}'
returned: always
'''
import os
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.oneandone import (
get_monitoring_policy,
get_server,
OneAndOneResources,
wait_for_resource_creation_completion
)
HAS_ONEANDONE_SDK = True
try:
import oneandone.client
except ImportError:
HAS_ONEANDONE_SDK = False
def _check_mode(module, result):
if module.check_mode:
module.exit_json(
changed=result
)
def _add_ports(module, oneandone_conn, monitoring_policy_id, ports):
"""
Adds new ports to a monitoring policy.
"""
try:
monitoring_policy_ports = []
for _port in ports:
monitoring_policy_port = oneandone.client.Port(
protocol=_port['protocol'],
port=_port['port'],
alert_if=_port['alert_if'],
email_notification=_port['email_notification']
)
monitoring_policy_ports.append(monitoring_policy_port)
if module.check_mode:
if monitoring_policy_ports:
return True
return False
monitoring_policy = oneandone_conn.add_port(
monitoring_policy_id=monitoring_policy_id,
ports=monitoring_policy_ports)
return monitoring_policy
except Exception as ex:
module.fail_json(msg=str(ex))
def _delete_monitoring_policy_port(module, oneandone_conn, monitoring_policy_id, port_id):
"""
Removes a port from a monitoring policy.
"""
try:
if module.check_mode:
monitoring_policy = oneandone_conn.delete_monitoring_policy_port(
monitoring_policy_id=monitoring_policy_id,
port_id=port_id)
if monitoring_policy:
return True
return False
monitoring_policy = oneandone_conn.delete_monitoring_policy_port(
monitoring_policy_id=monitoring_policy_id,
port_id=port_id)
return monitoring_policy
except Exception as ex:
module.fail_json(msg=str(ex))
def _modify_port(module, oneandone_conn, monitoring_policy_id, port_id, port):
"""
Modifies a monitoring policy port.
"""
try:
if module.check_mode:
cm_port = oneandone_conn.get_monitoring_policy_port(
monitoring_policy_id=monitoring_policy_id,
port_id=port_id)
if cm_port:
return True
return False
monitoring_policy_port = oneandone.client.Port(
protocol=port['protocol'],
port=port['port'],
alert_if=port['alert_if'],
email_notification=port['email_notification']
)
monitoring_policy = oneandone_conn.modify_port(
monitoring_policy_id=monitoring_policy_id,
port_id=port_id,
port=monitoring_policy_port)
return monitoring_policy
except Exception as ex:
module.fail_json(msg=str(ex))
def _add_processes(module, oneandone_conn, monitoring_policy_id, processes):
"""
Adds new processes to a monitoring policy.
"""
try:
monitoring_policy_processes = []
for _process in processes:
monitoring_policy_process = oneandone.client.Process(
process=_process['process'],
alert_if=_process['alert_if'],
email_notification=_process['email_notification']
)
monitoring_policy_processes.append(monitoring_policy_process)
if module.check_mode:
mp_id = get_monitoring_policy(oneandone_conn, monitoring_policy_id)
if (monitoring_policy_processes and mp_id):
return True
return False
monitoring_policy = oneandone_conn.add_process(
monitoring_policy_id=monitoring_policy_id,
processes=monitoring_policy_processes)
return monitoring_policy
except Exception as ex:
module.fail_json(msg=str(ex))
def _delete_monitoring_policy_process(module, oneandone_conn, monitoring_policy_id, process_id):
"""
Removes a process from a monitoring policy.
"""
try:
if module.check_mode:
process = oneandone_conn.get_monitoring_policy_process(
monitoring_policy_id=monitoring_policy_id,
process_id=process_id
)
if process:
return True
return False
monitoring_policy = oneandone_conn.delete_monitoring_policy_process(
monitoring_policy_id=monitoring_policy_id,
process_id=process_id)
return monitoring_policy
except Exception as ex:
module.fail_json(msg=str(ex))
def _modify_process(module, oneandone_conn, monitoring_policy_id, process_id, process):
"""
Modifies a monitoring policy process.
"""
try:
if module.check_mode:
cm_process = oneandone_conn.get_monitoring_policy_process(
monitoring_policy_id=monitoring_policy_id,
process_id=process_id)
if cm_process:
return True
return False
monitoring_policy_process = oneandone.client.Process(
process=process['process'],
alert_if=process['alert_if'],
email_notification=process['email_notification']
)
monitoring_policy = oneandone_conn.modify_process(
monitoring_policy_id=monitoring_policy_id,
process_id=process_id,
process=monitoring_policy_process)
return monitoring_policy
except Exception as ex:
module.fail_json(msg=str(ex))
def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, servers):
"""
Attaches servers to a monitoring policy.
"""
try:
attach_servers = []
for _server_id in servers:
server_id = get_server(oneandone_conn, _server_id)
attach_server = oneandone.client.AttachServer(
server_id=server_id
)
attach_servers.append(attach_server)
if module.check_mode:
if attach_servers:
return True
return False
monitoring_policy = oneandone_conn.attach_monitoring_policy_server(
monitoring_policy_id=monitoring_policy_id,
servers=attach_servers)
return monitoring_policy
except Exception as ex:
module.fail_json(msg=str(ex))
def _detach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, server_id):
"""
Detaches a server from a monitoring policy.
"""
try:
if module.check_mode:
mp_server = oneandone_conn.get_monitoring_policy_server(
monitoring_policy_id=monitoring_policy_id,
server_id=server_id)
if mp_server:
return True
return False
monitoring_policy = oneandone_conn.detach_monitoring_policy_server(
monitoring_policy_id=monitoring_policy_id,
server_id=server_id)
return monitoring_policy
except Exception as ex:
module.fail_json(msg=str(ex))
def update_monitoring_policy(module, oneandone_conn):
"""
Updates a monitoring_policy based on input arguments.
Monitoring policy ports, processes and servers can be added/removed to/from
a monitoring policy. Monitoring policy name, description, email,
thresholds for cpu, ram, disk, transfer and internal_ping
can be updated as well.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
"""
try:
monitoring_policy_id = module.params.get('monitoring_policy')
name = module.params.get('name')
description = module.params.get('description')
email = module.params.get('email')
thresholds = module.params.get('thresholds')
add_ports = module.params.get('add_ports')
update_ports = module.params.get('update_ports')
remove_ports = module.params.get('remove_ports')
add_processes = module.params.get('add_processes')
update_processes = module.params.get('update_processes')
remove_processes = module.params.get('remove_processes')
add_servers = module.params.get('add_servers')
remove_servers = module.params.get('remove_servers')
changed = False
monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy_id, True)
if monitoring_policy is None:
_check_mode(module, False)
_monitoring_policy = oneandone.client.MonitoringPolicy(
name=name,
description=description,
email=email
)
_thresholds = None
if thresholds:
threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
_thresholds = []
for treshold in thresholds:
key = treshold.keys()[0]
if key in threshold_entities:
_threshold = oneandone.client.Threshold(
entity=key,
warning_value=treshold[key]['warning']['value'],
warning_alert=str(treshold[key]['warning']['alert']).lower(),
critical_value=treshold[key]['critical']['value'],
critical_alert=str(treshold[key]['critical']['alert']).lower())
_thresholds.append(_threshold)
if name or description or email or thresholds:
_check_mode(module, True)
monitoring_policy = oneandone_conn.modify_monitoring_policy(
monitoring_policy_id=monitoring_policy['id'],
monitoring_policy=_monitoring_policy,
thresholds=_thresholds)
changed = True
if add_ports:
if module.check_mode:
_check_mode(module, _add_ports(module,
oneandone_conn,
monitoring_policy['id'],
add_ports))
monitoring_policy = _add_ports(module, oneandone_conn, monitoring_policy['id'], add_ports)
changed = True
if update_ports:
chk_changed = False
for update_port in update_ports:
if module.check_mode:
chk_changed |= _modify_port(module,
oneandone_conn,
monitoring_policy['id'],
update_port['id'],
update_port)
_modify_port(module,
oneandone_conn,
monitoring_policy['id'],
update_port['id'],
update_port)
monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
changed = True
if remove_ports:
chk_changed = False
for port_id in remove_ports:
if module.check_mode:
chk_changed |= _delete_monitoring_policy_port(module,
oneandone_conn,
monitoring_policy['id'],
port_id)
_delete_monitoring_policy_port(module,
oneandone_conn,
monitoring_policy['id'],
port_id)
_check_mode(module, chk_changed)
monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
changed = True
if add_processes:
monitoring_policy = _add_processes(module,
oneandone_conn,
monitoring_policy['id'],
add_processes)
_check_mode(module, monitoring_policy)
changed = True
if update_processes:
chk_changed = False
for update_process in update_processes:
if module.check_mode:
chk_changed |= _modify_process(module,
oneandone_conn,
monitoring_policy['id'],
update_process['id'],
update_process)
_modify_process(module,
oneandone_conn,
monitoring_policy['id'],
update_process['id'],
update_process)
_check_mode(module, chk_changed)
monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
changed = True
if remove_processes:
chk_changed = False
for process_id in remove_processes:
if module.check_mode:
chk_changed |= _delete_monitoring_policy_process(module,
oneandone_conn,
monitoring_policy['id'],
process_id)
_delete_monitoring_policy_process(module,
oneandone_conn,
monitoring_policy['id'],
process_id)
_check_mode(module, chk_changed)
monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
changed = True
if add_servers:
monitoring_policy = _attach_monitoring_policy_server(module,
oneandone_conn,
monitoring_policy['id'],
add_servers)
_check_mode(module, monitoring_policy)
changed = True
if remove_servers:
chk_changed = False
for _server_id in remove_servers:
server_id = get_server(oneandone_conn, _server_id)
if module.check_mode:
chk_changed |= _detach_monitoring_policy_server(module,
oneandone_conn,
monitoring_policy['id'],
server_id)
_detach_monitoring_policy_server(module,
oneandone_conn,
monitoring_policy['id'],
server_id)
_check_mode(module, chk_changed)
monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
changed = True
return (changed, monitoring_policy)
except Exception as ex:
module.fail_json(msg=str(ex))
def create_monitoring_policy(module, oneandone_conn):
"""
Creates a new monitoring policy.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
"""
try:
name = module.params.get('name')
description = module.params.get('description')
email = module.params.get('email')
agent = module.params.get('agent')
thresholds = module.params.get('thresholds')
ports = module.params.get('ports')
processes = module.params.get('processes')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
wait_interval = module.params.get('wait_interval')
_monitoring_policy = oneandone.client.MonitoringPolicy(name,
description,
email,
agent, )
_monitoring_policy.specs['agent'] = str(_monitoring_policy.specs['agent']).lower()
threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
_thresholds = []
for treshold in thresholds:
key = treshold.keys()[0]
if key in threshold_entities:
_threshold = oneandone.client.Threshold(
entity=key,
warning_value=treshold[key]['warning']['value'],
warning_alert=str(treshold[key]['warning']['alert']).lower(),
critical_value=treshold[key]['critical']['value'],
critical_alert=str(treshold[key]['critical']['alert']).lower())
_thresholds.append(_threshold)
_ports = []
for port in ports:
_port = oneandone.client.Port(
protocol=port['protocol'],
port=port['port'],
alert_if=port['alert_if'],
email_notification=str(port['email_notification']).lower())
_ports.append(_port)
_processes = []
for process in processes:
_process = oneandone.client.Process(
process=process['process'],
alert_if=process['alert_if'],
email_notification=str(process['email_notification']).lower())
_processes.append(_process)
_check_mode(module, True)
monitoring_policy = oneandone_conn.create_monitoring_policy(
monitoring_policy=_monitoring_policy,
thresholds=_thresholds,
ports=_ports,
processes=_processes
)
if wait:
wait_for_resource_creation_completion(
oneandone_conn,
OneAndOneResources.monitoring_policy,
monitoring_policy['id'],
wait_timeout,
wait_interval)
changed = True if monitoring_policy else False
_check_mode(module, False)
return (changed, monitoring_policy)
except Exception as ex:
module.fail_json(msg=str(ex))
def remove_monitoring_policy(module, oneandone_conn):
"""
Removes a monitoring policy.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
"""
try:
mp_id = module.params.get('name')
monitoring_policy_id = get_monitoring_policy(oneandone_conn, mp_id)
if module.check_mode:
if monitoring_policy_id is None:
_check_mode(module, False)
_check_mode(module, True)
monitoring_policy = oneandone_conn.delete_monitoring_policy(monitoring_policy_id)
changed = True if monitoring_policy else False
return (changed, {
'id': monitoring_policy['id'],
'name': monitoring_policy['name']
})
except Exception as ex:
module.fail_json(msg=str(ex))
def main():
module = AnsibleModule(
argument_spec=dict(
auth_token=dict(
type='str', no_log=True,
default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
api_url=dict(
type='str',
default=os.environ.get('ONEANDONE_API_URL')),
name=dict(type='str'),
monitoring_policy=dict(type='str'),
agent=dict(type='str'),
email=dict(type='str'),
description=dict(type='str'),
thresholds=dict(type='list', default=[]),
ports=dict(type='list', default=[]),
processes=dict(type='list', default=[]),
add_ports=dict(type='list', default=[]),
update_ports=dict(type='list', default=[]),
remove_ports=dict(type='list', default=[]),
add_processes=dict(type='list', default=[]),
update_processes=dict(type='list', default=[]),
remove_processes=dict(type='list', default=[]),
add_servers=dict(type='list', default=[]),
remove_servers=dict(type='list', default=[]),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=600),
wait_interval=dict(type='int', default=5),
state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
),
supports_check_mode=True
)
if not HAS_ONEANDONE_SDK:
module.fail_json(msg='1and1 required for this module')
if not module.params.get('auth_token'):
module.fail_json(
msg='auth_token parameter is required.')
if not module.params.get('api_url'):
oneandone_conn = oneandone.client.OneAndOneService(
api_token=module.params.get('auth_token'))
else:
oneandone_conn = oneandone.client.OneAndOneService(
api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
state = module.params.get('state')
if state == 'absent':
if not module.params.get('name'):
module.fail_json(
msg="'name' parameter is required to delete a monitoring policy.")
try:
(changed, monitoring_policy) = remove_monitoring_policy(module, oneandone_conn)
except Exception as ex:
module.fail_json(msg=str(ex))
elif state == 'update':
if not module.params.get('monitoring_policy'):
module.fail_json(
msg="'monitoring_policy' parameter is required to update a monitoring policy.")
try:
(changed, monitoring_policy) = update_monitoring_policy(module, oneandone_conn)
except Exception as ex:
module.fail_json(msg=str(ex))
elif state == 'present':
for param in ('name', 'agent', 'email', 'thresholds', 'ports', 'processes'):
if not module.params.get(param):
module.fail_json(
msg="%s parameter is required for a new monitoring policy." % param)
try:
(changed, monitoring_policy) = create_monitoring_policy(module, oneandone_conn)
except Exception as ex:
module.fail_json(msg=str(ex))
module.exit_json(changed=changed, monitoring_policy=monitoring_policy)
if __name__ == '__main__':
main()
| [] | [] | [
"ONEANDONE_AUTH_TOKEN",
"ONEANDONE_API_URL"
] | [] | ["ONEANDONE_AUTH_TOKEN", "ONEANDONE_API_URL"] | python | 2 | 0 | |
scripts/ltr_msmarco/ltr_inference.py | #
# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
# We're going to explicitly use a local installation of Pyserini (as opposed to a pip-installed one).
# Comment these lines out to use a pip-installed one instead.
sys.path.insert(0, './')
import argparse
import json
import multiprocessing
import os
import pickle
import time
import numpy as np
import pandas as pd
from tqdm import tqdm
from pyserini.ltr.search_msmarco._search_msmarco import MsmarcoLtrSearcher
from pyserini.ltr import *
"""
Running prediction on candidates
"""
def dev_data_loader(file, format, data, top=100):
if format == 'tsv':
dev = pd.read_csv(file, sep="\t",
names=['qid', 'pid', 'rank'],
dtype={'qid': 'S','pid': 'S', 'rank':'i',})
elif format == 'trec':
dev = pd.read_csv(file, sep="\s+",
names=['qid', 'q0', 'pid', 'rank', 'score', 'tag'],
usecols=['qid', 'pid', 'rank'],
dtype={'qid': 'S','pid': 'S', 'rank':'i',})
else:
raise Exception('unknown parameters')
assert dev['qid'].dtype == np.object
assert dev['pid'].dtype == np.object
assert dev['rank'].dtype == np.int32
dev = dev[dev['rank']<=top]
if data == 'passage':
dev_qrel = pd.read_csv('tools/topics-and-qrels/qrels.msmarco-passage.dev-subset.txt', sep=" ",
names=["qid", "q0", "pid", "rel"], usecols=['qid', 'pid', 'rel'],
dtype={'qid': 'S','pid': 'S', 'rel':'i'})
elif data == 'document':
dev_qrel = pd.read_csv('tools/topics-and-qrels/qrels.msmarco-doc.dev.txt', sep="\t",
names=["qid", "q0", "pid", "rel"], usecols=['qid', 'pid', 'rel'],
dtype={'qid': 'S','pid': 'S', 'rel':'i'})
assert dev['qid'].dtype == np.object
assert dev['pid'].dtype == np.object
assert dev['rank'].dtype == np.int32
dev = dev.merge(dev_qrel, left_on=['qid', 'pid'], right_on=['qid', 'pid'], how='left')
dev['rel'] = dev['rel'].fillna(0).astype(np.int32)
dev = dev.sort_values(['qid', 'pid']).set_index(['qid', 'pid'])
print(dev.shape)
print(dev.index.get_level_values('qid').drop_duplicates().shape)
print(dev.groupby('qid').count().mean())
print(dev.head(10))
print(dev.info())
dev_rel_num = dev_qrel[dev_qrel['rel'] > 0].groupby('qid').count()['rel']
recall_point = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000]
recall_curve = {k: [] for k in recall_point}
for qid, group in tqdm(dev.groupby('qid')):
group = group.reset_index()
assert len(group['pid'].tolist()) == len(set(group['pid'].tolist()))
total_rel = dev_rel_num.loc[qid]
query_recall = [0 for k in recall_point]
for t in group.sort_values('rank').itertuples():
if t.rel > 0:
for i, p in enumerate(recall_point):
if t.rank <= p:
query_recall[i] += 1
for i, p in enumerate(recall_point):
if total_rel > 0:
recall_curve[p].append(query_recall[i] / total_rel)
else:
recall_curve[p].append(0.)
for k, v in recall_curve.items():
avg = np.mean(v)
print(f'recall@{k}:{avg}')
return dev, dev_qrel
def query_loader():
queries = {}
with open(f'{args.queries}/queries.dev.small.json') as f:
for line in f:
query = json.loads(line)
qid = query.pop('id')
query['analyzed'] = query['analyzed'].split(" ")
query['text'] = query['text_unlemm'].split(" ")
query['text_unlemm'] = query['text_unlemm'].split(" ")
query['text_bert_tok'] = query['text_bert_tok'].split(" ")
queries[qid] = query
return queries
def eval_mrr(dev_data):
score_tie_counter = 0
score_tie_query = set()
MRR = []
for qid, group in tqdm(dev_data.groupby('qid')):
group = group.reset_index()
rank = 0
prev_score = None
assert len(group['pid'].tolist()) == len(set(group['pid'].tolist()))
# stable sort is also used in LightGBM
for t in group.sort_values('score', ascending=False, kind='mergesort').itertuples():
if prev_score is not None and abs(t.score - prev_score) < 1e-8:
score_tie_counter += 1
score_tie_query.add(qid)
prev_score = t.score
rank += 1
if t.rel > 0:
MRR.append(1.0 / rank)
break
elif rank == 10 or rank == len(group):
MRR.append(0.)
break
score_tie = f'score_tie occurs {score_tie_counter} times in {len(score_tie_query)} queries'
print(score_tie)
mrr_10 = np.mean(MRR).item()
print(f'MRR@10:{mrr_10} with {len(MRR)} queries')
return {'score_tie': score_tie, 'mrr_10': mrr_10}
def eval_recall(dev_qrel, dev_data):
dev_rel_num = dev_qrel[dev_qrel['rel'] > 0].groupby('qid').count()['rel']
score_tie_counter = 0
score_tie_query = set()
recall_point = [10,20,50,100,200,250,300,333,400,500,1000]
recall_curve = {k: [] for k in recall_point}
for qid, group in tqdm(dev_data.groupby('qid')):
group = group.reset_index()
rank = 0
prev_score = None
assert len(group['pid'].tolist()) == len(set(group['pid'].tolist()))
# stable sort is also used in LightGBM
total_rel = dev_rel_num.loc[qid]
query_recall = [0 for k in recall_point]
for t in group.sort_values('score', ascending=False, kind='mergesort').itertuples():
if prev_score is not None and abs(t.score - prev_score) < 1e-8:
score_tie_counter += 1
score_tie_query.add(qid)
prev_score = t.score
rank += 1
if t.rel > 0:
for i, p in enumerate(recall_point):
if rank <= p:
query_recall[i] += 1
for i, p in enumerate(recall_point):
if total_rel > 0:
recall_curve[p].append(query_recall[i] / total_rel)
else:
recall_curve[p].append(0.)
score_tie = f'score_tie occurs {score_tie_counter} times in {len(score_tie_query)} queries'
print(score_tie)
res = {'score_tie': score_tie}
for k, v in recall_curve.items():
avg = np.mean(v)
print(f'recall@{k}:{avg}')
res[f'recall@{k}'] = avg
return res
def output(file, dev_data, format):
score_tie_counter = 0
score_tie_query = set()
output_file = open(file,'w')
for qid, group in tqdm(dev_data.groupby('qid')):
group = group.reset_index()
rank = 0
prev_score = None
assert len(group['pid'].tolist()) == len(set(group['pid'].tolist()))
# stable sort is also used in LightGBM
for t in group.sort_values('score', ascending=False, kind='mergesort').itertuples():
if prev_score is not None and abs(t.score - prev_score) < 1e-8:
score_tie_counter += 1
score_tie_query.add(qid)
prev_score = t.score
rank += 1
if (format == 'tsv'):
output_file.write(f"{qid}\t{t.pid}\t{rank}\n")
else:
output_file.write(f"{qid}\tQ0\t{t.pid}\t{rank}\t{t.score}\tltr\n")
score_tie = f'score_tie occurs {score_tie_counter} times in {len(score_tie_query)} queries'
print(score_tie)
if __name__ == "__main__":
os.environ["ANSERINI_CLASSPATH"] = "./pyserini/resources/jars"
parser = argparse.ArgumentParser(description='Learning to rank reranking')
parser.add_argument('--input', required=True)
parser.add_argument('--reranking-top', type=int, default=1000)
parser.add_argument('--input-format', required=True)
parser.add_argument('--model', required=True)
parser.add_argument('--index', required=True)
parser.add_argument('--output', required=True)
parser.add_argument('--ibm-model', required=True)
parser.add_argument('--queries', required=True)
parser.add_argument('--data', required=True)
parser.add_argument('--output-format',default='trec')
args = parser.parse_args()
searcher = MsmarcoLtrSearcher(args.model, args.ibm_model, args.index, args.data)
searcher.add_fe()
print("load dev")
dev, dev_qrel = dev_data_loader(args.input, args.input_format, args.data, args.reranking_top)
print("load queries")
queries = query_loader()
batch_info = searcher.search(dev, queries)
del dev, queries
eval_res = eval_mrr(batch_info)
eval_recall(dev_qrel, batch_info)
output(args.output, batch_info,args.output_format)
print('Done!')
| [] | [] | [
"ANSERINI_CLASSPATH"
] | [] | ["ANSERINI_CLASSPATH"] | python | 1 | 0 | |
base/wsgi.py | """
WSGI config for base base.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'base.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
cni-plugin/tests/calico_cni_k8s_test.go | // Copyright (c) 2015-2021 Tigera, Inc. All rights reserved.
package main_test
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"os"
"os/exec"
"regexp"
"strings"
"syscall"
"time"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/plugins/pkg/ns"
cnitestutils "github.com/containernetworking/plugins/pkg/testutils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vishvananda/netlink"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
api "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
"github.com/projectcalico/api/pkg/lib/numorstring"
"github.com/projectcalico/calico/cni-plugin/internal/pkg/testutils"
"github.com/projectcalico/calico/cni-plugin/internal/pkg/utils"
"github.com/projectcalico/calico/cni-plugin/pkg/types"
libapi "github.com/projectcalico/calico/libcalico-go/lib/apis/v3"
k8sconversion "github.com/projectcalico/calico/libcalico-go/lib/backend/k8s/conversion"
client "github.com/projectcalico/calico/libcalico-go/lib/clientv3"
"github.com/projectcalico/calico/libcalico-go/lib/ipam"
"github.com/projectcalico/calico/libcalico-go/lib/names"
cnet "github.com/projectcalico/calico/libcalico-go/lib/net"
"github.com/projectcalico/calico/libcalico-go/lib/options"
)
func ensureNamespace(clientset *kubernetes.Clientset, name string) {
ns := &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: name},
}
_, err := clientset.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
if errors.IsAlreadyExists(err) {
return
}
Expect(err).NotTo(HaveOccurred())
}
func ensurePodCreated(clientset *kubernetes.Clientset, namespace string, pod *v1.Pod) *v1.Pod {
pod, err := clientset.CoreV1().Pods(namespace).Create(context.Background(), pod, metav1.CreateOptions{})
ExpectWithOffset(1, err).NotTo(HaveOccurred())
// Immediately try to get the pod, and retry until we do. This prevents race
// conditions where the API Server has accepted the create, but isn't ready
// to find the pod on a get. These races can cause the tests to be flaky.
EventuallyWithOffset(1, func() error {
_, err := clientset.CoreV1().Pods(namespace).Get(context.Background(), pod.Name, metav1.GetOptions{})
return err
}, 2*time.Second, 100*time.Millisecond).ShouldNot(HaveOccurred())
return pod
}
func ensurePodDeleted(clientset *kubernetes.Clientset, ns string, podName string) {
// Check if pod exists first.
_, err := clientset.CoreV1().Pods(ns).Get(context.Background(), podName, metav1.GetOptions{})
if kerrors.IsNotFound(err) {
// Pod has been deleted already. Do nothing.
return
}
Expect(err).NotTo(HaveOccurred())
// Delete pod immediately.
fg := metav1.DeletePropagationForeground
zero := int64(0)
err = clientset.CoreV1().Pods(ns).Delete(context.Background(),
podName,
metav1.DeleteOptions{
PropagationPolicy: &fg,
GracePeriodSeconds: &zero})
Expect(err).NotTo(HaveOccurred())
// Wait for pod to disappear.
Eventually(func() error {
_, err := clientset.CoreV1().Pods(ns).Get(context.Background(), podName, metav1.GetOptions{})
if kerrors.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
return fmt.Errorf("Pod %s.%s still exists", ns, podName)
}, "5s", "200ms").Should(BeNil())
}
func ensureNodeDeleted(clientset *kubernetes.Clientset, nodeName string) {
// Check if node exists first.
_, err := clientset.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{})
if kerrors.IsNotFound(err) {
// Node has been deleted already. Do nothing.
return
}
Expect(err).NotTo(HaveOccurred())
// Delete node immediately.
fg := metav1.DeletePropagationForeground
zero := int64(0)
err = clientset.CoreV1().Nodes().Delete(context.Background(),
nodeName,
metav1.DeleteOptions{
PropagationPolicy: &fg,
GracePeriodSeconds: &zero})
Expect(err).NotTo(HaveOccurred())
// Wait for node to disappear.
Eventually(func() error {
_, err := clientset.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{})
if kerrors.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
return fmt.Errorf("Node %s still exists", nodeName)
}, "5s", "200ms").Should(BeNil())
}
func getKubernetesClient() *kubernetes.Clientset {
kubeconfig := os.Getenv("KUBECONFIG")
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
panic(err)
}
clientset, err := kubernetes.NewForConfig(config)
ExpectWithOffset(1, err).NotTo(HaveOccurred())
return clientset
}
var _ = Describe("Kubernetes CNI tests", func() {
// Create a random seed
rand.Seed(time.Now().UTC().UnixNano())
hostname, _ := names.Hostname()
ctx := context.Background()
calicoClient, err := client.NewFromEnv()
Expect(err).NotTo(HaveOccurred())
k8sClient := getKubernetesClient()
BeforeEach(func() {
testutils.WipeDatastore()
// Create the node for these tests. The IPAM code requires a corresponding Calico node to exist.
name, err := names.Hostname()
Expect(err).NotTo(HaveOccurred())
err = testutils.AddNode(calicoClient, k8sClient, name)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
// Delete the node.
name, err := names.Hostname()
Expect(err).NotTo(HaveOccurred())
err = testutils.DeleteNode(calicoClient, k8sClient, name)
Expect(err).NotTo(HaveOccurred())
})
cniVersion := os.Getenv("CNI_SPEC_VERSION")
Context("using host-local IPAM", func() {
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
},
"kubernetes": {
"kubeconfig": "/home/user/certs/kubeconfig"
},
"policy": {"type": "k8s"},
"nodename_file_optional": true,
"log_level":"debug"
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
It("successfully networks the namespace", func() {
clientset := getKubernetesClient()
ensureNamespace(clientset, testutils.K8S_TEST_NS)
// Create a K8s pod w/o any special params
name := fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
containerID, result, contVeth, contAddresses, contRoutes, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
mac := contVeth.Attrs().HardwareAddr
Expect(len(result.IPs)).Should(Equal(1))
ip := result.IPs[0].Address.IP.String()
result.IPs[0].Address.IP = result.IPs[0].Address.IP.To4() // Make sure the IP is respresented as 4 bytes
Expect(result.IPs[0].Address.Mask.String()).Should(Equal("ffffffff"))
// datastore things:
// TODO Make sure the profile doesn't exist
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: containerID,
}
wrkload, err := ids.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
interfaceName := k8sconversion.NewConverter().VethNameForWorkload(testutils.K8S_TEST_NS, name)
// The endpoint is created
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
if os.Getenv("DATASTORE_TYPE") == "kubernetes" {
// Unlike etcd datastore, WEP based on a kubernetes pod does not store values for the MAC.
// Put it back manually for later comparison.
endpoints.Items[0].Spec.MAC = mac.String()
}
Expect(endpoints.Items[0].Name).Should(Equal(wrkload))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
Expect(endpoints.Items[0].Spec).Should(Equal(libapi.WorkloadEndpointSpec{
Pod: name,
InterfaceName: interfaceName,
IPNetworks: []string{result.IPs[0].Address.String()},
ServiceAccountName: "default",
MAC: mac.String(),
Profiles: []string{"kns.test", "ksa.test.default"},
Node: hostname,
Endpoint: "eth0",
Workload: "",
ContainerID: containerID,
Orchestrator: api.OrchestratorKubernetes,
}))
// Routes and interface on host - there's is nothing to assert on the routes since felix adds those.
// fmt.Println(Cmd("ip link show")) // Useful for debugging
hostVeth, err := netlink.LinkByName(interfaceName)
Expect(err).ToNot(HaveOccurred())
Expect(hostVeth.Attrs().Flags.String()).Should(ContainSubstring("up"))
Expect(hostVeth.Attrs().MTU).Should(Equal(1500))
// Assert hostVeth sysctl values are set to what we expect for IPv4.
err = testutils.CheckSysctlValue(fmt.Sprintf("/proc/sys/net/ipv4/conf/%s/proxy_arp", interfaceName), "1")
Expect(err).ShouldNot(HaveOccurred())
err = testutils.CheckSysctlValue(fmt.Sprintf("/proc/sys/net/ipv4/neigh/%s/proxy_delay", interfaceName), "0")
Expect(err).ShouldNot(HaveOccurred())
err = testutils.CheckSysctlValue(fmt.Sprintf("/proc/sys/net/ipv4/conf/%s/forwarding", interfaceName), "1")
Expect(err).ShouldNot(HaveOccurred())
// Assert sysctl values are set for what we would expect for an endpoint.
err = checkInterfaceConfig(interfaceName, "4")
Expect(err).ShouldNot(HaveOccurred())
// Assert if the host side route is programmed correctly.
hostRoutes, err := netlink.RouteList(hostVeth, syscall.AF_INET)
Expect(err).ShouldNot(HaveOccurred())
Expect(hostRoutes[0]).Should(Equal(netlink.Route{
LinkIndex: hostVeth.Attrs().Index,
Scope: netlink.SCOPE_LINK,
Dst: &result.IPs[0].Address,
Protocol: syscall.RTPROT_BOOT,
Table: syscall.RT_TABLE_MAIN,
Type: syscall.RTN_UNICAST,
Family: syscall.AF_INET,
}))
// Routes and interface in netns
Expect(contVeth.Attrs().Flags.String()).Should(ContainSubstring("up"))
// Assume the first IP is the IPv4 address
Expect(contAddresses[0].IP.String()).Should(Equal(ip))
Expect(contRoutes).Should(SatisfyAll(
ContainElement(netlink.Route{
LinkIndex: contVeth.Attrs().Index,
Gw: net.IPv4(169, 254, 1, 1).To4(),
Protocol: syscall.RTPROT_BOOT,
Table: syscall.RT_TABLE_MAIN,
Type: syscall.RTN_UNICAST,
Family: syscall.AF_INET,
}),
ContainElement(netlink.Route{
LinkIndex: contVeth.Attrs().Index,
Scope: netlink.SCOPE_LINK,
Dst: &net.IPNet{IP: net.IPv4(169, 254, 1, 1).To4(), Mask: net.CIDRMask(32, 32)},
Protocol: syscall.RTPROT_BOOT,
Table: syscall.RT_TABLE_MAIN,
Type: syscall.RTN_UNICAST,
Family: syscall.AF_INET,
})))
// Delete container
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
// Make sure there are no endpoints anymore
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(0))
}
// Make sure the interface has been removed from the namespace
targetNs, _ := ns.GetNS(contNs.Path())
err = targetNs.Do(func(_ ns.NetNS) error {
_, err = netlink.LinkByName("eth0")
return err
})
Expect(err).Should(HaveOccurred())
Expect(err.Error()).Should(Equal("Link not found"))
// Make sure the interface has been removed from the host
_, err = netlink.LinkByName("cali" + containerID)
Expect(err).Should(HaveOccurred())
Expect(err.Error()).Should(Equal("Link not found"))
})
Context("when a named port is set", func() {
It("it is added to the workload endpoint", func() {
clientset := getKubernetesClient()
name := fmt.Sprintf("run%d", rand.Uint32())
// Create a K8s pod w/o any special params
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: fmt.Sprintf("container-%s", name),
Image: "ignore",
Ports: []v1.ContainerPort{{
Name: "anamedport",
ContainerPort: 555,
}},
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
containerID, result, contVeth, _, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
mac := contVeth.Attrs().HardwareAddr
Expect(len(result.IPs)).Should(Equal(1))
result.IPs[0].Address.IP = result.IPs[0].Address.IP.To4() // Make sure the IP is respresented as 4 bytes
Expect(result.IPs[0].Address.Mask.String()).Should(Equal("ffffffff"))
// datastore things:
// TODO Make sure the profile doesn't exist
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: containerID,
}
wrkload, err := ids.CalculateWorkloadEndpointName(false)
interfaceName := k8sconversion.NewConverter().VethNameForWorkload(testutils.K8S_TEST_NS, name)
Expect(err).NotTo(HaveOccurred())
// The endpoint is created
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
if os.Getenv("DATASTORE_TYPE") == "kubernetes" {
// Unlike etcd datastore, WEP based on a kubernetes pod does not store values for the MAC.
// Put it back manually for later comparison.
endpoints.Items[0].Spec.MAC = mac.String()
}
Expect(endpoints.Items[0].Name).Should(Equal(wrkload))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
Expect(endpoints.Items[0].Spec).Should(Equal(libapi.WorkloadEndpointSpec{
Pod: name,
InterfaceName: interfaceName,
IPNetworks: []string{result.IPs[0].Address.String()},
MAC: mac.String(),
Profiles: []string{"kns.test", "ksa.test.default"},
Node: hostname,
ServiceAccountName: "default",
Endpoint: "eth0",
Workload: "",
ContainerID: containerID,
Orchestrator: api.OrchestratorKubernetes,
Ports: []libapi.WorkloadEndpointPort{{
Name: "anamedport",
Protocol: numorstring.ProtocolFromString("TCP"),
Port: 555,
}},
}))
// Assert sysctl values are set for what we would expect for an endpoint.
err = checkInterfaceConfig(interfaceName, "4")
Expect(err).ShouldNot(HaveOccurred())
// Delete container
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
// Make sure there are no endpoints anymore
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(0))
}
// Make sure the interface has been removed from the namespace
targetNs, _ := ns.GetNS(contNs.Path())
err = targetNs.Do(func(_ ns.NetNS) error {
_, err = netlink.LinkByName("eth0")
return err
})
Expect(err).Should(HaveOccurred())
Expect(err.Error()).Should(Equal("Link not found"))
// Make sure the interface has been removed from the host
_, err = netlink.LinkByName("cali" + containerID)
Expect(err).Should(HaveOccurred())
Expect(err.Error()).Should(Equal("Link not found"))
})
})
Context("when the same hostVeth exists", func() {
It("successfully networks the namespace", func() {
clientset := getKubernetesClient()
name := fmt.Sprintf("run%d", rand.Uint32())
// Create a K8s pod w/o any special params
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
if err := testutils.CreateHostVeth("", name, testutils.K8S_TEST_NS, hostname); err != nil {
panic(err)
}
_, _, _, _, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
Context("when /var/lib/calico/mtu file exists", func() {
mtuNetconfTemplate := `
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
},
"kubernetes": {
"kubeconfig": "/home/user/certs/kubeconfig"
},
"policy": {"type": "k8s"},
"nodename_file_optional": true,
"log_level":"info"
}`
It("should create pods with the right MTU", func() {
clientset := getKubernetesClient()
// Create the /var/lib/calico/mtu file with MTU 3000
err = os.MkdirAll("/var/lib/calico", os.ModePerm)
Expect(err).NotTo(HaveOccurred())
err = ioutil.WriteFile("/var/lib/calico/mtu", []byte("3000"), 0644)
Expect(err).NotTo(HaveOccurred())
defer os.Remove("/var/lib/calico/mtu")
// Create a K8s pod/container
name1 := fmt.Sprintf("mtutest%d", rand.Uint32())
mtuNetconf1 := fmt.Sprintf(mtuNetconfTemplate, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name1},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name1,
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name1)
// Run CNI plugin, expect MTU to match the value from file.
_, _, contVeth1, _, _, contNs1, err := testutils.CreateContainer(mtuNetconf1, name1, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
Expect(contVeth1.Attrs().MTU).Should(Equal(3000))
// Cleanup
_, err = testutils.DeleteContainer(mtuNetconf1, contNs1.Path(), name1, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
Context("when calico-config contains a custom mtu", func() {
mtuNetconfTemplate := `
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"mtu": %d,
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
},
"kubernetes": {
"kubeconfig": "/home/user/certs/kubeconfig"
},
"policy": {"type": "k8s"},
"nodename_file_optional": true,
"log_level":"info"
}`
It("creates pods with the new mtu", func() {
clientset := getKubernetesClient()
// Create a K8s pod/container with non-default MTU
name1 := fmt.Sprintf("mtutest%d", rand.Uint32())
mtuNetconf1 := fmt.Sprintf(mtuNetconfTemplate, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"), 3000)
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name1},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name1,
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name1)
_, _, contVeth1, _, _, contNs1, err := testutils.CreateContainer(mtuNetconf1, name1, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
Expect(contVeth1.Attrs().MTU).Should(Equal(3000))
// Create another K8s pod/container with a different non-default MTU
name2 := fmt.Sprintf("mtutest2%d", rand.Uint32())
mtuNetconf2 := fmt.Sprintf(mtuNetconfTemplate, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"), 4000)
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name2},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name2,
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name2)
_, _, contVeth2, _, _, contNs2, err := testutils.CreateContainer(mtuNetconf2, name2, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
Expect(contVeth2.Attrs().MTU).Should(Equal(4000))
// Cleanup
_, err = testutils.DeleteContainer(mtuNetconf1, contNs1.Path(), name1, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
_, err = testutils.DeleteContainer(mtuNetconf2, contNs2.Path(), name2, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
hostLocalIPAMConfigs := []struct {
description, cniVersion, config, unexpectedRoute string
expectedV4Routes, expectedV6Routes []string
numIPv4IPs, numIPv6IPs int
}{
{
description: "old-style inline subnet",
cniVersion: cniVersion,
config: `
{
"cniVersion": "%s",
"name": "net6",
"nodename_file_optional": true,
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"kubernetes": {
"kubeconfig": "/home/user/certs/kubeconfig"
},
"policy": {"type": "k8s"},
"log_level":"info"
}`,
expectedV4Routes: []string{
regexp.QuoteMeta("default via 169.254.1.1 dev eth0"),
regexp.QuoteMeta("169.254.1.1 dev eth0 scope link"),
},
unexpectedRoute: regexp.QuoteMeta("10."),
numIPv4IPs: 1,
numIPv6IPs: 0,
},
{
// This scenario tests IPv4+IPv6 without specifying any routes.
description: "new-style with IPv4 and IPv6 ranges, no routes",
cniVersion: "0.3.0",
config: `
{
"cniVersion": "%s",
"name": "net6",
"nodename_file_optional": true,
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"ranges": [
[
{
"subnet": "usePodCidr"
}
],
[
{
"subnet": "dead:beef::/96"
}
]
]
},
"kubernetes": {
"kubeconfig": "/home/user/certs/kubeconfig"
},
"policy": {"type": "k8s"},
"log_level":"info"
}`,
expectedV4Routes: []string{
regexp.QuoteMeta("default via 169.254.1.1 dev eth0"),
regexp.QuoteMeta("169.254.1.1 dev eth0 scope link"),
},
expectedV6Routes: []string{
"dead:beef::[0-9a-f]* dev eth0 proto kernel metric 256 pref medium",
"fe80::/64 dev eth0 proto kernel metric 256 pref medium",
"default via fe80::ecee:eeff:feee:eeee dev eth0 metric 1024",
},
unexpectedRoute: regexp.QuoteMeta("10."),
numIPv4IPs: 1,
numIPv6IPs: 1,
},
{
// This scenario tests IPv4+IPv6 without specifying any routes.
description: "new-style with IPv4 and IPv6 both using usePodCidr, no routes",
cniVersion: "0.3.0",
config: `
{
"cniVersion": "%s",
"name": "net6",
"nodename_file_optional": true,
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"ranges": [
[
{
"subnet": "usePodCidr"
}
],
[
{
"subnet": "usePodCidrIPv6"
}
]
]
},
"kubernetes": {
"kubeconfig": "/home/user/certs/kubeconfig"
},
"policy": {"type": "k8s"},
"log_level":"info"
}`,
expectedV4Routes: []string{
regexp.QuoteMeta("default via 169.254.1.1 dev eth0"),
regexp.QuoteMeta("169.254.1.1 dev eth0 scope link"),
},
expectedV6Routes: []string{
"dead:beef::[0-9a-f]* dev eth0 proto kernel metric 256 pref medium",
"fe80::/64 dev eth0 proto kernel metric 256 pref medium",
"default via fe80::ecee:eeff:feee:eeee dev eth0 metric 1024",
},
unexpectedRoute: regexp.QuoteMeta("10."),
numIPv4IPs: 1,
numIPv6IPs: 1,
},
{
// In this scenario, we use a lot more of the host-local IPAM plugin. Namely:
// - we use multiple ranges, one of which is IPv6, the other uses the podCIDR
// - we add custom routes, which override our default 0/0 and ::/0 routes.
// This configuration is only supported for CNI version >= 0.3.0 since we assign multiple
// addresses per family.
description: "new-style with IPv4 and IPv6 ranges and routes",
cniVersion: "0.3.0",
config: `
{
"cniVersion": "%s",
"name": "net6",
"nodename_file_optional": true,
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"ranges": [
[
{
"subnet": "usePodCidr"
}
],
[
{
"subnet": "10.100.0.0/24"
}
],
[
{
"subnet": "dead:beef::/96"
}
]
],
"routes": [
{"dst": "10.123.0.0/16", "gw": "10.123.0.1"},
{"dst": "10.124.0.0/16"},
{"dst": "dead:beef::/96"}
]
},
"kubernetes": {
"kubeconfig": "/home/user/certs/kubeconfig"
},
"policy": {"type": "k8s"},
"log_level":"info"
}`,
expectedV4Routes: []string{
regexp.QuoteMeta("10.123.0.0/16 via 169.254.1.1 dev eth0"),
regexp.QuoteMeta("10.124.0.0/16 via 169.254.1.1 dev eth0"),
regexp.QuoteMeta("169.254.1.1 dev eth0 scope link"),
},
expectedV6Routes: []string{
"dead:beef::. dev eth0 proto kernel metric 256 pref medium",
"dead:beef::/96 via fe80::ecee:eeff:feee:eeee dev eth0 metric 1024",
"fe80::/64 dev eth0 proto kernel metric 256 pref medium",
},
unexpectedRoute: "default",
numIPv4IPs: 2,
numIPv6IPs: 1,
},
{
// In this scenario, we use a lot more of the host-local IPAM plugin. Namely:
// - we use multiple ranges, one of which is IPv6, the other uses the podCIDR
// - we add custom routes, but configure the plugin to also include our default routes.
description: "new-style with IPv4 and IPv6 ranges and routes and Calico default routes",
cniVersion: "0.3.0",
config: `
{
"cniVersion": "%s",
"name": "net6",
"nodename_file_optional": true,
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"include_default_routes": true,
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"ranges": [
[
{
"subnet": "usePodCidr"
}
],
[
{
"subnet": "10.100.0.0/24"
}
],
[
{
"subnet": "dead:beef::/96"
}
]
],
"routes": [
{"dst": "10.123.0.0/16", "gw": "10.123.0.1"},
{"dst": "10.124.0.0/16"},
{"dst": "dead:beef::/96"}
]
},
"kubernetes": {
"kubeconfig": "/home/user/certs/kubeconfig"
},
"policy": {"type": "k8s"},
"log_level":"info"
}`,
expectedV4Routes: []string{
regexp.QuoteMeta("default via 169.254.1.1 dev eth0"),
regexp.QuoteMeta("10.123.0.0/16 via 169.254.1.1 dev eth0"),
regexp.QuoteMeta("10.124.0.0/16 via 169.254.1.1 dev eth0"),
regexp.QuoteMeta("169.254.1.1 dev eth0 scope link"),
},
expectedV6Routes: []string{
"dead:beef::. dev eth0 proto kernel metric 256 pref medium",
"dead:beef::/96 via fe80::ecee:eeff:feee:eeee dev eth0 metric 1024",
"fe80::/64 dev eth0 proto kernel metric 256 pref medium",
},
numIPv4IPs: 2,
numIPv6IPs: 1,
},
}
// Run tests with PodCIDR
for _, c := range hostLocalIPAMConfigs {
c := c // Make sure we get a fresh variable on each loop.
// The dual-stack requires PodCIDRs
if strings.Contains(c.config, "usePodCidrIPv6") {
continue
}
Context("Using host-local IPAM with one PodCIDR ("+c.description+"): request an IP then release it, and then request it again", func() {
It("should successfully assign IP both times and successfully release it in the middle", func() {
netconfHostLocalIPAM := fmt.Sprintf(c.config, c.cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
clientset := getKubernetesClient()
ensureNamespace(clientset, testutils.K8S_TEST_NS)
ensureNodeDeleted(clientset, hostname)
// Create a K8s Node object with PodCIDR and name equal to hostname.
_, err = clientset.CoreV1().Nodes().Create(context.Background(), &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: hostname},
Spec: v1.NodeSpec{
PodCIDR: "10.0.0.0/24",
},
}, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
defer ensureNodeDeleted(clientset, hostname)
By("Creating a pod with a specific IP address")
name := fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
requestedIP := "10.0.0.42"
expectedIP := net.IPv4(10, 0, 0, 42).To4()
_, _, _, contAddresses, _, contNs, err := testutils.CreateContainer(netconfHostLocalIPAM, name, testutils.K8S_TEST_NS, requestedIP)
Expect(err).NotTo(HaveOccurred())
podIP := contAddresses[0].IP
Expect(podIP).Should(Equal(expectedIP))
By("Deleting the pod we created earlier")
_, err = testutils.DeleteContainer(netconfHostLocalIPAM, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
By("Creating a second pod with the same IP address as the first pod")
name2 := fmt.Sprintf("run2%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name2},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: fmt.Sprintf("container-%s", name2),
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name2)
_, _, _, contAddresses, _, contNs, err = testutils.CreateContainer(netconfHostLocalIPAM, name2, testutils.K8S_TEST_NS, requestedIP)
Expect(err).NotTo(HaveOccurred())
pod2IP := contAddresses[0].IP
Expect(pod2IP).Should(Equal(expectedIP))
err = contNs.Do(func(_ ns.NetNS) error {
defer GinkgoRecover()
out, err := exec.Command("ip", "route", "show").Output()
Expect(err).NotTo(HaveOccurred())
for _, r := range c.expectedV4Routes {
Expect(string(out)).To(MatchRegexp(r))
}
if c.unexpectedRoute != "" {
Expect(string(out)).NotTo(ContainSubstring(c.unexpectedRoute))
}
out, err = exec.Command("ip", "-6", "route", "show").Output()
Expect(err).NotTo(HaveOccurred())
for _, r := range c.expectedV6Routes {
Expect(string(out)).To(MatchRegexp(r))
}
if c.numIPv6IPs > 0 {
err := testutils.CheckSysctlValue("/proc/sys/net/ipv6/conf/eth0/accept_dad", "0")
Expect(err).NotTo(HaveOccurred())
}
out, err = exec.Command("ip", "addr", "show").Output()
Expect(err).NotTo(HaveOccurred())
inet := regexp.MustCompile(` {4}inet .*scope global`)
Expect(inet.FindAll(out, -1)).To(HaveLen(c.numIPv4IPs))
inetv6 := regexp.MustCompile(` {4}inet6 .*scope global`)
Expect(inetv6.FindAll(out, -1)).To(HaveLen(c.numIPv6IPs))
Expect(out).NotTo(ContainSubstring("scope global tentative"),
"Some IPv6 addresses marked as tentative; disabling DAD must have failed.")
return nil
})
Expect(err).ShouldNot(HaveOccurred())
_, err = testutils.DeleteContainer(netconfHostLocalIPAM, contNs.Path(), name2, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
}
// Run tests with PodCIDRs defining a dual-stack deployment
for _, c := range hostLocalIPAMConfigs {
c := c // Make sure we get a fresh variable on each loop.
Context("Using host-local IPAM with two PodCIDRs ("+c.description+"): request an IP then release it, and then request it again", func() {
It("should successfully assign IP both times and successfully release it in the middle", func() {
netconfHostLocalIPAM := fmt.Sprintf(c.config, c.cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
clientset := getKubernetesClient()
ensureNamespace(clientset, testutils.K8S_TEST_NS)
ensureNodeDeleted(clientset, hostname)
// Create a K8s Node object with PodCIDR and name equal to hostname.
_, err = clientset.CoreV1().Nodes().Create(context.Background(), &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: hostname},
Spec: v1.NodeSpec{
PodCIDRs: []string{"10.10.0.0/24", "dead:beef::/96"},
},
}, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
defer ensureNodeDeleted(clientset, hostname)
By("Creating a pod with a specific IP address")
name := fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
requestedIP := "10.10.0.42"
expectedIP := net.IPv4(10, 10, 0, 42).To4()
_, _, _, contAddresses, _, contNs, err := testutils.CreateContainer(netconfHostLocalIPAM, name, testutils.K8S_TEST_NS, requestedIP)
Expect(err).NotTo(HaveOccurred())
podIP := contAddresses[0].IP
Expect(podIP).Should(Equal(expectedIP))
By("Deleting the pod we created earlier")
_, err = testutils.DeleteContainer(netconfHostLocalIPAM, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
By("Creating a second pod with the same IP address as the first pod")
name2 := fmt.Sprintf("run2%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name2},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: fmt.Sprintf("container-%s", name2),
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name2)
_, _, _, contAddresses, _, contNs, err = testutils.CreateContainer(netconfHostLocalIPAM, name2, testutils.K8S_TEST_NS, requestedIP)
Expect(err).NotTo(HaveOccurred())
pod2IP := contAddresses[0].IP
Expect(pod2IP).Should(Equal(expectedIP))
err = contNs.Do(func(_ ns.NetNS) error {
defer GinkgoRecover()
out, err := exec.Command("ip", "route", "show").Output()
Expect(err).NotTo(HaveOccurred())
for _, r := range c.expectedV4Routes {
Expect(string(out)).To(MatchRegexp(r))
}
if c.unexpectedRoute != "" {
Expect(string(out)).NotTo(ContainSubstring(c.unexpectedRoute))
}
out, err = exec.Command("ip", "-6", "route", "show").Output()
Expect(err).NotTo(HaveOccurred())
for _, r := range c.expectedV6Routes {
Expect(string(out)).To(MatchRegexp(r))
}
if c.numIPv6IPs > 0 {
err := testutils.CheckSysctlValue("/proc/sys/net/ipv6/conf/eth0/accept_dad", "0")
Expect(err).NotTo(HaveOccurred())
}
out, err = exec.Command("ip", "addr", "show").Output()
Expect(err).NotTo(HaveOccurred())
inet := regexp.MustCompile(` {4}inet .*scope global`)
Expect(inet.FindAll(out, -1)).To(HaveLen(c.numIPv4IPs))
inetv6 := regexp.MustCompile(` {4}inet6 .*scope global`)
Expect(inetv6.FindAll(out, -1)).To(HaveLen(c.numIPv6IPs))
Expect(out).NotTo(ContainSubstring("scope global tentative"),
"Some IPv6 addresses marked as tentative; disabling DAD must have failed.")
return nil
})
Expect(err).ShouldNot(HaveOccurred())
_, err = testutils.DeleteContainer(netconfHostLocalIPAM, contNs.Path(), name2, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
}
})
Context("using calico-ipam with a Namespace annotation only", func() {
var nc types.NetConf
var netconf string
var pool1CIDR, pool2CIDR *net.IPNet
var pool1 = "50.60.0.0/28"
var pool2 = "60.70.0.0/28"
var numAddrsInPool = 16
var clientset *kubernetes.Clientset
var name string
var testNS string
BeforeEach(func() {
// Build the network config for this set of tests.
nc = types.NetConf{
CNIVersion: cniVersion,
Name: "calico-uts",
Type: "calico",
EtcdEndpoints: fmt.Sprintf("http://%s:2379", os.Getenv("ETCD_IP")),
DatastoreType: os.Getenv("DATASTORE_TYPE"),
Kubernetes: types.Kubernetes{Kubeconfig: "/home/user/certs/kubeconfig"},
Policy: types.Policy{PolicyType: "k8s"},
NodenameFileOptional: true,
LogLevel: "info",
}
nc.IPAM.Type = "calico-ipam"
ncb, err := json.Marshal(nc)
Expect(err).NotTo(HaveOccurred())
netconf = string(ncb)
// Create IP Pools.
testutils.MustCreateNewIPPoolBlockSize(calicoClient, pool1, false, false, true, 29)
_, pool1CIDR, err = net.ParseCIDR(pool1)
Expect(err).NotTo(HaveOccurred())
testutils.MustCreateNewIPPoolBlockSize(calicoClient, pool2, false, false, true, 29)
_, pool2CIDR, err = net.ParseCIDR(pool2)
Expect(err).NotTo(HaveOccurred())
// Create client set.
clientset = getKubernetesClient()
})
AfterEach(func() {
// Delete pod
ensurePodDeleted(clientset, testNS, name)
// Delete the IP Pools.
testutils.MustDeleteIPPool(calicoClient, pool1)
testutils.MustDeleteIPPool(calicoClient, pool2)
})
It("successfully assigns an IP address from an IP Pool specified on a Namespace", func() {
// Create the Namespace.
testNS = fmt.Sprintf("run%d", rand.Uint32())
_, err = clientset.CoreV1().Namespaces().Create(context.Background(), &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testNS,
Annotations: map[string]string{
"cni.projectcalico.org/ipv4pools": "[\"50.60.0.0/28\"]",
},
},
}, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
// Now create a K8s pod.
name = fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testNS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
_, _, _, contAddresses, _, contNs, err := testutils.CreateContainer(netconf, name, testNS, "")
Expect(err).NotTo(HaveOccurred())
podIP := contAddresses[0].IP
Expect(pool1CIDR.Contains(podIP)).To(BeTrue())
// Delete the container.
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testNS)
Expect(err).ShouldNot(HaveOccurred())
})
It("should fail to assign from an IP pool that doesn't exist", func() {
// Create the Namespace.
testNS = fmt.Sprintf("run%d", rand.Uint32())
_, err = clientset.CoreV1().Namespaces().Create(context.Background(), &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testNS,
Annotations: map[string]string{
"cni.projectcalico.org/ipv4pools": "[\"100.0.0.0/16\"]",
},
},
}, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
// Now create a K8s pod.
name = fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testNS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
// Expect an error when invoking the CNI plugin.
_, _, _, _, _, contNs, err := testutils.CreateContainer(netconf, name, testNS, "")
Expect(err).To(HaveOccurred())
// Delete the container.
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testNS)
Expect(err).ShouldNot(HaveOccurred())
})
It("should fail to assign an IP when the provided IP Pool is full", func() {
// Create the Namespace.
testNS = fmt.Sprintf("run%d", rand.Uint32())
_, err = clientset.CoreV1().Namespaces().Create(context.Background(), &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testNS,
Annotations: map[string]string{
"cni.projectcalico.org/ipv4pools": "[\"50.60.0.0/28\"]",
},
},
}, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
// Now create a K8s pod.
name = fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testNS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
// Allocate all the addresses in the IP pool.
handle := "test-handle"
v4ia, _, err := calicoClient.IPAM().AutoAssign(
context.Background(),
ipam.AutoAssignArgs{
Num4: numAddrsInPool,
HandleID: &handle,
IPv4Pools: []cnet.IPNet{{IPNet: *pool1CIDR}},
IntendedUse: api.IPPoolAllowedUseWorkload,
},
)
Expect(err).NotTo(HaveOccurred())
Expect(v4ia).ToNot(BeNil())
Expect(len(v4ia.IPs)).To(Equal(numAddrsInPool))
// Expect an error when invoking the CNI plugin.
_, _, _, _, _, contNs, err := testutils.CreateContainer(netconf, name, testNS, "")
Expect(err).To(HaveOccurred())
// Delete the container.
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testNS)
Expect(err).ShouldNot(HaveOccurred())
// Release all the IPs assigned above.
err = calicoClient.IPAM().ReleaseByHandle(context.Background(), handle)
Expect(err).ShouldNot(HaveOccurred())
})
It("should assign an IP from the second pool when the first IP Pool is full", func() {
// Create the Namespace.
testNS = fmt.Sprintf("run%d", rand.Uint32())
_, err = clientset.CoreV1().Namespaces().Create(context.Background(), &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testNS,
Annotations: map[string]string{
"cni.projectcalico.org/ipv4pools": "[\"50.60.0.0/28\", \"60.70.0.0/28\"]",
},
},
}, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
// Now create a K8s pod.
name = fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testNS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
// Allocate all the addresses in the first IP pool.
handle := "test-handle"
v4ia, _, err := calicoClient.IPAM().AutoAssign(
context.Background(),
ipam.AutoAssignArgs{
Num4: numAddrsInPool,
HandleID: &handle,
IPv4Pools: []cnet.IPNet{{IPNet: *pool1CIDR}},
IntendedUse: api.IPPoolAllowedUseWorkload,
},
)
Expect(err).NotTo(HaveOccurred())
Expect(v4ia).ToNot(BeNil())
Expect(len(v4ia.IPs)).To(Equal(numAddrsInPool))
// Invoke the CNI plugin.
_, r, _, _, _, contNs, err := testutils.CreateContainer(netconf, name, testNS, "")
Expect(err).NotTo(HaveOccurred())
// Expect the assigned IP address in the second IP pool.
Expect(len(r.IPs)).To(Equal(1))
Expect(pool2CIDR.Contains(r.IPs[0].Address.IP)).To(BeTrue(), "IP assigned from wrong pool")
// Delete the container.
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testNS)
Expect(err).ShouldNot(HaveOccurred())
// Release all the IPs assigned above.
err = calicoClient.IPAM().ReleaseByHandle(context.Background(), handle)
Expect(err).ShouldNot(HaveOccurred())
})
})
Context("using calico-ipam with Namespace annotation and pod annotation", func() {
var nc types.NetConf
var netconf string
var ipPoolCIDR *net.IPNet
var pool1 = "50.70.0.0/16"
var clientset *kubernetes.Clientset
var name string
var testNS string
BeforeEach(func() {
// Build the network config for this set of tests.
nc = types.NetConf{
CNIVersion: cniVersion,
Name: "calico-uts",
Type: "calico",
EtcdEndpoints: fmt.Sprintf("http://%s:2379", os.Getenv("ETCD_IP")),
DatastoreType: os.Getenv("DATASTORE_TYPE"),
Kubernetes: types.Kubernetes{Kubeconfig: "/home/user/certs/kubeconfig"},
Policy: types.Policy{PolicyType: "k8s"},
NodenameFileOptional: true,
LogLevel: "info",
}
nc.IPAM.Type = "calico-ipam"
ncb, err := json.Marshal(nc)
Expect(err).NotTo(HaveOccurred())
netconf = string(ncb)
// Create a new IP Pool.
testutils.MustCreateNewIPPool(calicoClient, pool1, false, false, true)
_, ipPoolCIDR, err = net.ParseCIDR(pool1)
Expect(err).NotTo(HaveOccurred())
// Create clients.
clientset = getKubernetesClient()
})
AfterEach(func() {
// Delete pod
ensurePodDeleted(clientset, testNS, name)
// Delete the IP Pool.
testutils.MustDeleteIPPool(calicoClient, pool1)
})
It("should prefer pod annotations to namespace annotations if both are present", func() {
// Create the Namespace.
testNS = fmt.Sprintf("run%d", rand.Uint32())
_, err = clientset.CoreV1().Namespaces().Create(context.Background(), &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testNS,
Annotations: map[string]string{
"cni.projectcalico.org/ipv4pools": "[\"50.55.0.0/16\"]",
},
},
}, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
// Now create a K8s pod passing in an IP pool.
name = fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testNS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"cni.projectcalico.org/ipv4pools": "[\"50.70.0.0/16\"]",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
// Run the CNI plugin.
_, _, _, contAddresses, _, contNs, err := testutils.CreateContainer(netconf, name, testNS, "")
Expect(err).NotTo(HaveOccurred())
podIP := contAddresses[0].IP
Expect(ipPoolCIDR.Contains(podIP)).To(BeTrue())
// Delete the container.
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testNS)
Expect(err).ShouldNot(HaveOccurred())
})
})
Context("using calico-ipam specifying IP pools via pod annotation", func() {
var nc types.NetConf
var netconf string
var pool1 = "172.16.0.0/16"
var pool2 = "172.17.0.0/16"
var pool1CIDR, pool2CIDR *net.IPNet
var pool2Name string
var clientset *kubernetes.Clientset
var name string
BeforeEach(func() {
// Build the network config for this set of tests.
nc = types.NetConf{
CNIVersion: cniVersion,
Name: "calico-uts",
Type: "calico",
EtcdEndpoints: fmt.Sprintf("http://%s:2379", os.Getenv("ETCD_IP")),
DatastoreType: os.Getenv("DATASTORE_TYPE"),
Kubernetes: types.Kubernetes{Kubeconfig: "/home/user/certs/kubeconfig"},
Policy: types.Policy{PolicyType: "k8s"},
NodenameFileOptional: true,
LogLevel: "info",
}
nc.IPAM.Type = "calico-ipam"
ncb, err := json.Marshal(nc)
Expect(err).NotTo(HaveOccurred())
netconf = string(ncb)
// Create two IP pools.
testutils.MustCreateNewIPPool(calicoClient, pool1, false, false, true)
_, pool1CIDR, err = net.ParseCIDR(pool1)
Expect(err).NotTo(HaveOccurred())
pool2Name = testutils.MustCreateNewIPPool(calicoClient, pool2, false, false, true)
_, pool2CIDR, err = net.ParseCIDR(pool2)
Expect(err).NotTo(HaveOccurred())
// Create a kubernetes clientset.
clientset = getKubernetesClient()
// Ensure a namespace exists.
ensureNamespace(clientset, testutils.K8S_TEST_NS)
})
AfterEach(func() {
// Delete pod
ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
// Delete the IP Pools.
testutils.MustDeleteIPPool(calicoClient, pool1)
testutils.MustDeleteIPPool(calicoClient, pool2)
})
It("successfully assigns an IP address from the annotated IP Pool (by cidr)", func() {
// Create a K8s pod passing in an IP pool.
name = fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"cni.projectcalico.org/ipv4pools": "[\"172.16.0.0/16\"]",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
_, _, _, contAddresses, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).NotTo(HaveOccurred())
podIP := contAddresses[0].IP
Expect(pool1CIDR.Contains(podIP)).To(BeTrue())
// Delete the container.
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
It("successfully assigns an IP address from the annotated IP Pool (by name)", func() {
// Create a K8s pod passing in an IP pool.
name = fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"cni.projectcalico.org/ipv4pools": fmt.Sprintf("[\"%s\"]", pool2Name),
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
_, _, _, contAddresses, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).NotTo(HaveOccurred())
podIP := contAddresses[0].IP
Expect(pool2CIDR.Contains(podIP)).To(BeTrue())
// Delete the container.
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
Context("using floatingIPs annotation to assign a DNAT", func() {
var netconf types.NetConf
var clientset *kubernetes.Clientset
var name string
BeforeEach(func() {
netconf = types.NetConf{
CNIVersion: cniVersion,
Name: "calico-network-name",
Type: "calico",
EtcdEndpoints: fmt.Sprintf("http://%s:2379", os.Getenv("ETCD_IP")),
DatastoreType: os.Getenv("DATASTORE_TYPE"),
Kubernetes: types.Kubernetes{Kubeconfig: "/home/user/certs/kubeconfig"},
Policy: types.Policy{PolicyType: "k8s"},
NodenameFileOptional: true,
LogLevel: "info",
FeatureControl: types.FeatureControl{FloatingIPs: true},
}
netconf.IPAM.Type = "calico-ipam"
// Create an IP pool for the pod IP as well as a floating IP range.
for _, ipPool := range []string{"172.16.0.0/16", "1.1.1.0/24"} {
testutils.MustCreateNewIPPool(calicoClient, ipPool, false, false, true)
_, _, err := net.ParseCIDR(ipPool)
Expect(err).NotTo(HaveOccurred())
}
// Build kubernetes clients.
clientset = getKubernetesClient()
// Now create a K8s pod passing in a floating IP.
ensureNamespace(clientset, testutils.K8S_TEST_NS)
name = fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"cni.projectcalico.org/floatingIPs": "[\"1.1.1.1\"]",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
})
AfterEach(func() {
// Delete pod
ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
// Delete IPPools.
for _, ipPool := range []string{"172.16.0.0/16", "1.1.1.0/24"} {
testutils.MustDeleteIPPool(calicoClient, ipPool)
}
})
It("successfully assigns a DNAT IP address from the annotated floatingIP", func() {
// Resolve the config struct.
confBytes, err := json.Marshal(netconf)
Expect(err).NotTo(HaveOccurred())
// Invoke the CNI plugin
_, _, _, contAddresses, _, contNs, err := testutils.CreateContainer(string(confBytes), name, testutils.K8S_TEST_NS, "")
Expect(err).NotTo(HaveOccurred())
// Assert that the endpoint is created
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
// Assert that the endpoint contains the appropriate DNAT
podIP := contAddresses[0].IP
Expect(endpoints.Items[0].Spec.IPNATs).Should(HaveLen(1))
Expect(endpoints.Items[0].Spec.IPNATs).Should(Equal([]libapi.IPNAT{{InternalIP: podIP.String(), ExternalIP: "1.1.1.1"}}))
// Delete the container.
_, err = testutils.DeleteContainer(string(confBytes), contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
It("fails when the floating_ip feature is not enabled", func() {
// Resolve the config struct, disabling the feature.
netconf.FeatureControl.FloatingIPs = false
confBytes, err := json.Marshal(netconf)
Expect(err).NotTo(HaveOccurred())
// Invoke the CNI plugin, expect it to fail.
_, _, _, _, _, contNs, err := testutils.CreateContainer(string(confBytes), name, testutils.K8S_TEST_NS, "")
Expect(err).To(HaveOccurred())
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
// No WEP should be created with an etcd datastore.
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(0))
}
// Delete the container.
_, err = testutils.DeleteContainer(string(confBytes), contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
Context("using ipAddrsNoIpam annotation to assign IP address to a pod, bypassing IPAM", func() {
var clientset *kubernetes.Clientset
var netconf string
var nc types.NetConf
var name string
BeforeEach(func() {
// Set up clients.
clientset = getKubernetesClient()
// Create a network config.
nc = types.NetConf{
CNIVersion: cniVersion,
Name: "calico-uts",
Type: "calico",
EtcdEndpoints: fmt.Sprintf("http://%s:2379", os.Getenv("ETCD_IP")),
DatastoreType: os.Getenv("DATASTORE_TYPE"),
Kubernetes: types.Kubernetes{Kubeconfig: "/home/user/certs/kubeconfig"},
Policy: types.Policy{PolicyType: "k8s"},
NodenameFileOptional: true,
LogLevel: "info",
FeatureControl: types.FeatureControl{IPAddrsNoIpam: true},
}
nc.IPAM.Type = "calico-ipam"
ncb, err := json.Marshal(nc)
Expect(err).NotTo(HaveOccurred())
netconf = string(ncb)
})
AfterEach(func() {
// Delete pod
ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
})
It("should successfully assigns the annotated IP address", func() {
assignIP := net.IPv4(10, 0, 0, 1).To4()
// Now create a K8s pod passing in an IP address.
name = fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"cni.projectcalico.org/ipAddrsNoIpam": "[\"10.0.0.1\"]",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
containerID, _, contVeth, contAddresses, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).NotTo(HaveOccurred())
mac := contVeth.Attrs().HardwareAddr
podIP := contAddresses[0].IP
Expect(podIP).Should(Equal(assignIP))
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: containerID,
}
wrkload, err := ids.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
interfaceName := k8sconversion.NewConverter().VethNameForWorkload(testutils.K8S_TEST_NS, name)
// The endpoint is created
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
if os.Getenv("DATASTORE_TYPE") == "kubernetes" {
// Unlike etcd datastore, WEP based on a kubernetes pod does not store values for the MAC.
// Put it back manually for later comparison.
endpoints.Items[0].Spec.MAC = mac.String()
}
Expect(endpoints.Items[0].Name).Should(Equal(wrkload))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
Expect(endpoints.Items[0].Spec).Should(Equal(libapi.WorkloadEndpointSpec{
Pod: name,
InterfaceName: interfaceName,
IPNetworks: []string{assignIP.String() + "/32"},
ServiceAccountName: "default",
MAC: mac.String(),
Profiles: []string{"kns.test", "ksa.test.default"},
Node: hostname,
Endpoint: "eth0",
Workload: "",
ContainerID: containerID,
Orchestrator: api.OrchestratorKubernetes,
}))
// Assert sysctl values are set for what we would expect for an endpoint.
err = checkInterfaceConfig(interfaceName, "4")
Expect(err).ShouldNot(HaveOccurred())
// Delete the container.
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
It("should fail if ipAddrsNoIpam is not enabled", func() {
// Disable the feature
nc.FeatureControl.IPAddrsNoIpam = false
ncb, err := json.Marshal(nc)
Expect(err).NotTo(HaveOccurred())
netconf = string(ncb)
clientset := getKubernetesClient()
// Now create a K8s pod passing in an IP address.
name = fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"cni.projectcalico.org/ipAddrsNoIpam": "[\"10.0.0.1\"]",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
_, _, _, _, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).To(HaveOccurred())
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
It("should return an error if multiple addresses are requested using ipAddrsNoIpam", func() {
// Now create a K8s pod passing in more than one IPv4 address.
name = fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"cni.projectcalico.org/ipAddrsNoIpam": "[\"10.0.0.1\", \"10.0.0.2\"]",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
_, _, _, _, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).To(HaveOccurred())
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
// No WEP should be created with an etcd datastore.
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(0))
}
// Delete the container.
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
Context("using ipAddrs annotation to assign IP address to a pod from IPAM", func() {
var clientset *kubernetes.Clientset
BeforeEach(func() {
// Set up clients.
clientset = getKubernetesClient()
})
It("should successfully assign the annotated IP address", func() {
netconfCalicoIPAM := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net4",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"nodename_file_optional": true,
"ipam": {
"type": "calico-ipam",
"assign_ipv4": "true",
"assign_ipv6": "true"
},
"kubernetes": {
"kubeconfig": "/home/user/certs/kubeconfig"
},
"policy": {"type": "k8s"},
"log_level":"info"
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
assignIP := net.IPv4(20, 0, 0, 111).To4()
// Create a new ipPool.
ipPool := "20.0.0.0/24"
testutils.MustCreateNewIPPool(calicoClient, ipPool, false, false, true)
_, _, err := net.ParseCIDR(ipPool)
Expect(err).NotTo(HaveOccurred())
// Now create a K8s pod passing in an IP address.
name := fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"cni.projectcalico.org/ipAddrs": "[\"20.0.0.111\"]",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
containerID, _, contVeth, contAddresses, _, netNS, err := testutils.CreateContainer(netconfCalicoIPAM, name, testutils.K8S_TEST_NS, "")
Expect(err).NotTo(HaveOccurred())
mac := contVeth.Attrs().HardwareAddr
podIP := contAddresses[0].IP
Expect(podIP).Should(Equal(assignIP))
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: containerID,
}
wrkload, err := ids.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
interfaceName := k8sconversion.NewConverter().VethNameForWorkload(testutils.K8S_TEST_NS, name)
// Make sure WorkloadEndpoint is created and has the requested IP in the datastore.
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
if os.Getenv("DATASTORE_TYPE") == "kubernetes" {
// Unlike etcd datastore, WEP based on a kubernetes pod does not store values for the MAC.
// Put it back manually for later comparison.
endpoints.Items[0].Spec.MAC = mac.String()
}
Expect(endpoints.Items[0].Name).Should(Equal(wrkload))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
Expect(endpoints.Items[0].Spec).Should(Equal(libapi.WorkloadEndpointSpec{
Pod: name,
InterfaceName: interfaceName,
IPNetworks: []string{assignIP.String() + "/32"},
ServiceAccountName: "default",
MAC: mac.String(),
Profiles: []string{"kns.test", "ksa.test.default"},
Node: hostname,
Endpoint: "eth0",
Workload: "",
ContainerID: containerID,
Orchestrator: api.OrchestratorKubernetes,
}))
// Check the pod's IP annotations.
checkPodIPAnnotations(clientset, testutils.K8S_TEST_NS, name, "20.0.0.111/32", "20.0.0.111/32")
// Assert sysctl values are set for what we would expect for an endpoint.
err = checkInterfaceConfig(interfaceName, "4")
Expect(err).ShouldNot(HaveOccurred())
// Delete the container.
_, err = testutils.DeleteContainer(netconfCalicoIPAM, netNS.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
Context("with dual stack IP allocations", func() {
var clientset *kubernetes.Clientset
var ipPool4 string = "20.0.0.0/24"
var ipPool6 string = "fd80:20::/96"
BeforeEach(func() {
// Set up clients.
clientset = getKubernetesClient()
testutils.MustCreateNewIPPool(calicoClient, ipPool4, false, false, true)
testutils.MustCreateNewIPPool(calicoClient, ipPool6, false, false, true)
})
AfterEach(func() {
testutils.MustDeleteIPPool(calicoClient, ipPool4)
testutils.MustDeleteIPPool(calicoClient, ipPool6)
})
It("should allocate IPv4 and IPv6 addresses and handle dual stack floating IPs", func() {
netconfCalicoIPAM := fmt.Sprintf(`
{
"feature_control": {
"floating_ips": true
},
"cniVersion": "%s",
"name": "net4",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"nodename_file_optional": true,
"ipam": {
"type": "calico-ipam",
"assign_ipv4": "true",
"assign_ipv6": "true"
},
"kubernetes": {
"kubeconfig": "/home/user/certs/kubeconfig"
},
"policy": {"type": "k8s"},
"log_level":"info"
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
// Now create a K8s pod (without any pod IP annotations).
name := fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"cni.projectcalico.org/floatingIPs": "[\"1.1.1.1\", \"2001:647f::21\"]",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
containerID, _, contVeth, contAddresses, _, netNS, err := testutils.CreateContainer(netconfCalicoIPAM, name, testutils.K8S_TEST_NS, "")
Expect(err).NotTo(HaveOccurred())
mac := contVeth.Attrs().HardwareAddr
Expect(contAddresses).To(HaveLen(2))
podIPv4 := contAddresses[0].IP
Expect(podIPv4.To4()).NotTo(BeNil())
podIPv6 := contAddresses[1].IP
Expect(podIPv6.To16()).NotTo(BeNil())
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: containerID,
}
wrkload, err := ids.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
interfaceName := k8sconversion.NewConverter().VethNameForWorkload(testutils.K8S_TEST_NS, name)
// Make sure WorkloadEndpoint is created and has the requested IP in the datastore.
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
if os.Getenv("DATASTORE_TYPE") == "kubernetes" {
// Unlike etcd datastore, WEP based on a kubernetes pod does not store values for the MAC.
// Put it back manually for later comparison.
endpoints.Items[0].Spec.MAC = mac.String()
}
Expect(endpoints.Items[0].Name).Should(Equal(wrkload))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
Expect(endpoints.Items[0].Spec).Should(Equal(libapi.WorkloadEndpointSpec{
Pod: name,
InterfaceName: interfaceName,
ServiceAccountName: "default",
IPNetworks: []string{podIPv4.String() + "/32", podIPv6.String() + "/128"},
MAC: mac.String(),
Profiles: []string{"kns.test", "ksa.test.default"},
Node: hostname,
Endpoint: "eth0",
Workload: "",
IPNATs: []libapi.IPNAT{
{
InternalIP: podIPv4.String(),
ExternalIP: "1.1.1.1",
},
{
InternalIP: podIPv6.String(),
ExternalIP: "2001:647f::21",
},
},
ContainerID: containerID,
Orchestrator: api.OrchestratorKubernetes,
}))
// Check the pod's IP annotations.
checkPodIPAnnotations(clientset, testutils.K8S_TEST_NS, name, podIPv4.String()+"/32", podIPv4.String()+"/32,"+podIPv6.String()+"/128")
// Delete the container.
_, err = testutils.DeleteContainer(netconfCalicoIPAM, netNS.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
// This context contains test cases meant to simulate specific scenarios seen when running the plugin
// in a Kubernetes cluster.
Context("Kubernetes-specific race condition tests", func() {
var clientset *kubernetes.Clientset
var cniContainerIDX string = "container-id-00x"
var cniContainerIDY string = "container-id-00y"
var ipPool string = "10.0.0.0/24"
var name string
var nc types.NetConf
var netconf string
BeforeEach(func() {
// Set up clients.
clientset = getKubernetesClient()
// Create a network config.
nc = types.NetConf{
CNIVersion: cniVersion,
Name: "calico-uts",
Type: "calico",
EtcdEndpoints: fmt.Sprintf("http://%s:2379", os.Getenv("ETCD_IP")),
DatastoreType: os.Getenv("DATASTORE_TYPE"),
Kubernetes: types.Kubernetes{Kubeconfig: "/home/user/certs/kubeconfig"},
Policy: types.Policy{PolicyType: "k8s"},
NodenameFileOptional: true,
LogLevel: "info",
}
nc.IPAM.Type = "calico-ipam"
ncb, err := json.Marshal(nc)
Expect(err).NotTo(HaveOccurred())
netconf = string(ncb)
// Create a new ipPool.
testutils.MustCreateNewIPPool(calicoClient, ipPool, false, false, true)
// Now create a K8s pod.
name = fmt.Sprintf("pod-%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS,
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
})
AfterEach(func() {
// Delete pod
ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
// Delete IP pools.
testutils.MustDeleteIPPool(calicoClient, ipPool)
})
// This specific test case is for an issue where k8s would send extra DELs being "aggressive". See: https://github.com/kubernetes/kubernetes/issues/44100
// Specifically, what this test simulates is:
// - CNI ADD for containerIDX
// - CNI DEL for containerIDX
// - CNI ADD for containerIDY
// - CNI DEL for containerIDX (Spurious delete for old container ID)
It("Use different container IDs to ADD and DEL the container", func() {
// ADD the container with passing a container ID "X".
_, _, _, _, _, contNs, err := testutils.CreateContainerWithId(netconf, name, testutils.K8S_TEST_NS, "", cniContainerIDX)
Expect(err).ShouldNot(HaveOccurred())
// Assert that the endpoint is created in the backend datastore with ContainerID "X".
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
idsX := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: cniContainerIDX,
}
wrkloadX, err := idsX.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
Expect(endpoints.Items[0].Name).Should(Equal(wrkloadX))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
Expect(endpoints.Items[0].Spec.ContainerID).Should(Equal(cniContainerIDX))
}
// Delete the container with container ID "X".
exitCode, err := testutils.DeleteContainerWithId(netconf, contNs.Path(), name, testutils.K8S_TEST_NS, cniContainerIDX)
Expect(err).ShouldNot(HaveOccurred())
Expect(exitCode).Should(Equal(0))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
// The endpoint for ContainerID "X" should not exist in the backend datastore.
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(0))
}
// ADD a new container with passing a container ID "Y".
_, _, _, _, _, contNs, err = testutils.CreateContainerWithId(netconf, name, testutils.K8S_TEST_NS, "", cniContainerIDY)
Expect(err).ShouldNot(HaveOccurred())
// Assert that the endpoint is created in the backend datastore with ContainerID "Y".
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
idsY := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: cniContainerIDY,
}
wrkloadY, err := idsY.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
Expect(endpoints.Items[0].Name).Should(Equal(wrkloadY))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
Expect(endpoints.Items[0].Spec.ContainerID).Should(Equal(cniContainerIDY))
}
// Delete the container with container ID "X" again.
exitCode, err = testutils.DeleteContainerWithId(netconf, contNs.Path(), name, testutils.K8S_TEST_NS, cniContainerIDX)
Expect(err).ShouldNot(HaveOccurred())
Expect(exitCode).Should(Equal(0))
// Assert that the endpoint with container ID "Y" is still in the datastore.
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
Expect(endpoints.Items[0].Name).Should(Equal(wrkloadY))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
Expect(endpoints.Items[0].Spec.ContainerID).Should(Equal(cniContainerIDY))
}
// Finally, delete the container with container ID "Y".
exitCode, err = testutils.DeleteContainerWithId(netconf, contNs.Path(), name, testutils.K8S_TEST_NS, cniContainerIDY)
Expect(err).ShouldNot(HaveOccurred())
Expect(exitCode).Should(Equal(0))
})
// Specifically, this test simulartes the following:
// - CNI ADD using containerIDX
// - CNI ADD using containerIDY
// - CNI DEL using containerIDX (should be a no-op)
// - CNI DEL using containerIDY (should actually delete the container)
It("should handle deletes for stale container IDs", func() {
// ADD the container with passing a CNI_CONTAINERID of "X".
_, _, _, _, _, _, err := testutils.CreateContainerWithId(netconf, name, testutils.K8S_TEST_NS, "", cniContainerIDX)
Expect(err).ShouldNot(HaveOccurred())
// Assert that the endpoint is created in the backend datastore with ContainerID "X".
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
idsX := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: cniContainerIDX,
}
wrkloadX, err := idsX.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
Expect(endpoints.Items[0].Name).Should(Equal(wrkloadX))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
Expect(endpoints.Items[0].Spec.ContainerID).Should(Equal(cniContainerIDX))
}
// ADD the container with passing a CNI_CONTAINERID of "Y"
_, _, _, _, _, contNs, err := testutils.CreateContainerWithId(netconf, name, testutils.K8S_TEST_NS, "", cniContainerIDY)
Expect(err).ShouldNot(HaveOccurred())
// Assert that the endpoint is created in the backend datastore with ContainerID "Y".
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
idsY := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: cniContainerIDY,
}
wrkloadY, err := idsY.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
Expect(endpoints.Items[0].Name).Should(Equal(wrkloadY))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
Expect(endpoints.Items[0].Spec.ContainerID).Should(Equal(cniContainerIDY))
}
// Delete the container with the CNI_CONTAINERID "X".
exitCode, err := testutils.DeleteContainerWithId(netconf, contNs.Path(), name, testutils.K8S_TEST_NS, cniContainerIDX)
Expect(err).ShouldNot(HaveOccurred())
Expect(exitCode).Should(Equal(0))
// Assert that the endpoint in the backend datastore still has ContainerID "Y".
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
Expect(endpoints.Items[0].Name).Should(Equal(wrkloadY))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
Expect(endpoints.Items[0].Spec.ContainerID).Should(Equal(cniContainerIDY))
}
// Delete the container with the CNI_CONTAINERID "Y".
exitCode, err = testutils.DeleteContainerWithId(netconf, contNs.Path(), name, testutils.K8S_TEST_NS, cniContainerIDY)
Expect(err).ShouldNot(HaveOccurred())
Expect(exitCode).Should(Equal(0))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
// Assert that the endpoint in the backend datastore is now gone.
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(0))
}
})
})
Context("after a pod has already been networked once", func() {
var nc types.NetConf
var netconf string
var clientset *kubernetes.Clientset
var workloadName, containerID, name string
var endpointSpec libapi.WorkloadEndpointSpec
var contNs ns.NetNS
var result *current.Result
checkIPAMReservation := func() {
// IPAM reservation should still be in place.
handleID := utils.GetHandleID("calico-uts", containerID, workloadName)
ipamIPs, err := calicoClient.IPAM().IPsByHandle(context.Background(), handleID)
ExpectWithOffset(1, err).NotTo(HaveOccurred(), "error getting IPs")
ExpectWithOffset(1, ipamIPs).To(HaveLen(1),
"There should be an IPAM handle for endpoint")
Expect(endpointSpec.IPNetworks).To(HaveLen(1))
ExpectWithOffset(1, ipamIPs[0].String()+"/32").To(Equal(endpointSpec.IPNetworks[0]))
}
BeforeEach(func() {
// Create a new ipPool.
testutils.MustCreateNewIPPool(calicoClient, "10.0.0.0/24", false, false, true)
// Create a network config.
nc = types.NetConf{
CNIVersion: cniVersion,
Name: "calico-uts",
Type: "calico",
EtcdEndpoints: fmt.Sprintf("http://%s:2379", os.Getenv("ETCD_IP")),
DatastoreType: os.Getenv("DATASTORE_TYPE"),
Kubernetes: types.Kubernetes{Kubeconfig: "/home/user/certs/kubeconfig"},
Policy: types.Policy{PolicyType: "k8s"},
NodenameFileOptional: true,
LogLevel: "info",
}
nc.IPAM.Type = "calico-ipam"
ncb, err := json.Marshal(nc)
Expect(err).NotTo(HaveOccurred())
netconf = string(ncb)
// Now create a K8s pod.
clientset = getKubernetesClient()
name = fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS,
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
// Run the CNI plugin.
containerID, result, _, _, _, contNs, err = testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
// The endpoint is created in etcd
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: containerID,
}
workloadName, err = ids.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
Expect(endpoints.Items[0].Name).Should(Equal(workloadName))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
endpointSpec = endpoints.Items[0].Spec
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
Expect(endpointSpec.ContainerID).Should(Equal(containerID))
}
checkIPAMReservation()
})
AfterEach(func() {
_, err = testutils.DeleteContainerWithId(netconf, contNs.Path(), name, testutils.K8S_TEST_NS, containerID)
Expect(err).ShouldNot(HaveOccurred())
ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
})
It("a second ADD for the same container should work, assigning a new IP", func() {
// Try to create the same pod with a different container (so CNI receives the ADD for the same endpoint again)
resultSecondAdd, _, _, _, err := testutils.RunCNIPluginWithId(netconf, name, testutils.K8S_TEST_NS, "", "new-container-id", "eth0", contNs)
Expect(err).NotTo(HaveOccurred())
// The IP addresses shouldn't be the same, since we'll reassign one.
Expect(resultSecondAdd.IPs).ShouldNot(Equal(result.IPs))
// Otherwise, they should be the same.
resultSecondAdd.IPs = nil
result.IPs = nil
Expect(resultSecondAdd).Should(Equal(result))
// IPAM reservation should still be in place.
checkIPAMReservation()
})
Context("with networking rigged to fail", func() {
renameVeth := func(from, to string) {
output, err := exec.Command("ip", "link", "set", from, "down").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Output: %s", output))
output, err = exec.Command("ip", "link", "set", from, "name", to).CombinedOutput()
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Output: %s", output))
output, err = exec.Command("ip", "link", "set", to, "up").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Output: %s", output))
}
var realVethName, tweakedVethName string
BeforeEach(func() {
// To prevent the networking attempt from succeeding, rename the old veth.
// This leaves a route and an eth0 in place that the plugin will struggle with.
realVethName = endpointSpec.InterfaceName
tweakedVethName = strings.Replace(realVethName, "cali", "sali", 1)
renameVeth(realVethName, tweakedVethName)
})
It("a second ADD should fail, but not clean up the original IPAM allocation", func() {
// Try to create the same container (so CNI receives the ADD for the same endpoint again)
// Use a different container ID but the same Pod Name/Namespace
_, _, _, _, err := testutils.RunCNIPluginWithId(netconf, name, testutils.K8S_TEST_NS, "", "new-container-id", "eth0", contNs)
Expect(err).Should(HaveOccurred())
// IPAM reservation should still be in place.
checkIPAMReservation()
})
AfterEach(func() {
// So the tear-down succeeds, put the veth back.
renameVeth(tweakedVethName, realVethName)
})
})
})
Context("Create a container then send another ADD for the same container but with a different interface", func() {
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net10",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"nodename_file_optional": true,
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
"kubernetes": {
"kubeconfig": "/home/user/certs/kubeconfig"
},
"policy": {"type": "k8s"}
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
It("should successfully execute both ADDs but for second ADD will return the same result as the first time but it won't network the container", func() {
// Create a new ipPool.
testutils.MustCreateNewIPPool(calicoClient, "10.0.0.0/24", false, false, true)
clientset := getKubernetesClient()
// Now create a K8s pod.
name := "mypod-1"
ensurePodCreated(clientset, testutils.K8S_TEST_NS,
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
// Create the container, which will call CNI and by default it will create the container with interface name 'eth0'.
containerID, _, _, _, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
// Make sure the pod gets cleaned up, whether we fail or not.
expectedIfaceName := "eth0"
defer func() {
_, err := testutils.DeleteContainerWithIdAndIfaceName(netconf, contNs.Path(), name, testutils.K8S_TEST_NS, containerID, expectedIfaceName)
Expect(err).ShouldNot(HaveOccurred())
ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
}()
// The endpoint is created in etcd
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: containerID,
}
wepName, err := ids.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
Expect(endpoints.Items[0].Name).Should(Equal(wepName))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
Expect(endpoints.Items[0].Spec.ContainerID).Should(Equal(containerID))
}
// Try to create the same container but with a different endpoint (container interface name 'eth1'),
// so CNI receives the ADD for the same containerID but different endpoint.
_, _, _, _, err = testutils.RunCNIPluginWithId(netconf, name, testutils.K8S_TEST_NS, "", containerID, "eth1", contNs)
Expect(err).ShouldNot(HaveOccurred())
// If the above command succeeds, the CNI plugin will have renamed the container side of the
// veth to "eth1". We need to clean it up under the correct name, or we'll leak it.
expectedIfaceName = "eth1"
// The endpoint is created in etcd
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
// Returned endpoint should still have the same fields even after calling the CNI plugin with a different interface name.
// Calico CNI currently only supports one endpoint (interface) per pod.
Expect(endpoints.Items[0].Name).Should(Equal(wepName))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
// Explicitly assert that endpoint name is still 'eth0' (which was the case from the first ADD)
Expect(endpoints.Items[0].Spec.Endpoint).Should(Equal("eth0"))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
Expect(endpoints.Items[0].Spec.ContainerID).Should(Equal(containerID))
}
// Now we create another pod with a very similar name.
name2 := "mypod"
ensurePodCreated(clientset, testutils.K8S_TEST_NS,
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name2,
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name2,
Image: "ignore",
}},
NodeName: hostname,
},
})
// Now since we can't use the same container namespace for the second container, we need to create a new one.
contNs2, err := cnitestutils.NewNS()
Expect(err).NotTo(HaveOccurred())
containerID2 := "random-cid"
defer func() {
_, err := testutils.DeleteContainerWithId(netconf, contNs2.Path(), name2, testutils.K8S_TEST_NS, containerID2)
Expect(err).ShouldNot(HaveOccurred())
ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name2)
}()
err = contNs2.Do(func(_ ns.NetNS) error {
lo, err := netlink.LinkByName("lo")
if err != nil {
return err
}
return netlink.LinkSetUp(lo)
})
Expect(err).NotTo(HaveOccurred())
// Create the container, which will call CNI and by default it will create the container with interface name 'eth0'.
_, _, _, _, err = testutils.RunCNIPluginWithId(netconf, name2, testutils.K8S_TEST_NS, "", containerID2, "eth0", contNs2)
Expect(err).ShouldNot(HaveOccurred())
// Make sure BOTH of the endpoints are there in etcd
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(2))
// Construct the workloadendpoint name for the second pod.
ids2 := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name2,
ContainerID: containerID2,
}
wrkload2, err := ids2.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
// Explicitly Get the second workloadendpoint and make sure it exists and has all the right fields.
ep, err := calicoClient.WorkloadEndpoints().Get(ctx, testutils.K8S_TEST_NS, wrkload2, options.GetOptions{})
Expect(err).ShouldNot(HaveOccurred())
// Returned endpoint should still have the same fields even after calling the CNI plugin with a different interface name.
// Calico CNI currently only supports one endpoint (interface) per pod.
Expect(ep.Name).Should(Equal(wrkload2))
Expect(ep.Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(ep.Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
// Assert this WEP has the new containerID for the second pod.
Expect(ep.Spec.ContainerID).Should(Equal(containerID2))
}
})
})
Context("when pod has a service account", func() {
var nc types.NetConf
var netconf string
var clientset *kubernetes.Clientset
var name string
var pool string = "172.24.0.0/24"
BeforeEach(func() {
// Build the network config for this set of tests.
nc = types.NetConf{
CNIVersion: cniVersion,
Name: "calico-uts",
Type: "calico",
EtcdEndpoints: fmt.Sprintf("http://%s:2379", os.Getenv("ETCD_IP")),
DatastoreType: os.Getenv("DATASTORE_TYPE"),
Kubernetes: types.Kubernetes{Kubeconfig: "/home/user/certs/kubeconfig"},
Policy: types.Policy{PolicyType: "k8s"},
NodenameFileOptional: true,
LogLevel: "info",
}
nc.IPAM.Type = "calico-ipam"
ncb, err := json.Marshal(nc)
Expect(err).NotTo(HaveOccurred())
netconf = string(ncb)
// Create an IPPool for the test.
testutils.MustCreateNewIPPool(calicoClient, pool, false, false, true)
Expect(err).NotTo(HaveOccurred())
clientset = getKubernetesClient()
})
AfterEach(func() {
// Delete pod
ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
testutils.MustDeleteIPPool(calicoClient, pool)
})
It("should add a service account profile to the workload endpoint", func() {
name = fmt.Sprintf("run%d", rand.Uint32())
// Make sure the namespace exists.
ensureNamespace(clientset, testutils.K8S_TEST_NS)
// Create a K8s service account
saName := "testserviceaccount"
_, err = clientset.CoreV1().ServiceAccounts(testutils.K8S_TEST_NS).Create(context.Background(), &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{Name: saName},
}, metav1.CreateOptions{})
if err != nil {
panic(err)
}
defer func() {
fg := metav1.DeletePropagationForeground
zero := int64(0)
err = clientset.CoreV1().ServiceAccounts(testutils.K8S_TEST_NS).Delete(context.Background(),
saName,
metav1.DeleteOptions{
PropagationPolicy: &fg,
GracePeriodSeconds: &zero})
Expect(err).NotTo(HaveOccurred())
}()
// Create a K8s pod with the service account
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: fmt.Sprintf("container-%s", name),
Image: "ignore",
Ports: []v1.ContainerPort{{
Name: "anamedport",
ContainerPort: 555,
}},
}},
ServiceAccountName: saName,
NodeName: hostname,
},
})
containerID, result, contVeth, _, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
mac := contVeth.Attrs().HardwareAddr
Expect(len(result.IPs)).Should(Equal(1))
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: containerID,
}
wrkload, err := ids.CalculateWorkloadEndpointName(false)
interfaceName := k8sconversion.NewConverter().VethNameForWorkload(testutils.K8S_TEST_NS, name)
Expect(err).NotTo(HaveOccurred())
// The endpoint is created
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
if os.Getenv("DATASTORE_TYPE") == "kubernetes" {
// Unlike etcd datastore, WEP based on a kubernetes pod does not store values for the MAC.
// Put it back manually for later comparison.
endpoints.Items[0].Spec.MAC = mac.String()
}
Expect(endpoints.Items[0].Name).Should(Equal(wrkload))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/serviceaccount": saName,
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
}))
Expect(endpoints.Items[0].Spec).Should(Equal(libapi.WorkloadEndpointSpec{
Pod: name,
InterfaceName: interfaceName,
IPNetworks: []string{result.IPs[0].Address.String()},
MAC: mac.String(),
Profiles: []string{"kns.test", "ksa.test." + saName},
Node: hostname,
Endpoint: "eth0",
ServiceAccountName: saName,
Workload: "",
ContainerID: containerID,
Orchestrator: api.OrchestratorKubernetes,
Ports: []libapi.WorkloadEndpointPort{{
Name: "anamedport",
Protocol: numorstring.ProtocolFromString("TCP"),
Port: 555,
}},
}))
// Assert sysctl values are set for what we would expect for an endpoint.
err = checkInterfaceConfig(interfaceName, "4")
Expect(err).ShouldNot(HaveOccurred())
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
// Make sure there are no endpoints anymore
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(0))
}
// Make sure the interface has been removed from the namespace
targetNs, _ := ns.GetNS(contNs.Path())
err = targetNs.Do(func(_ ns.NetNS) error {
_, err = netlink.LinkByName("eth0")
return err
})
Expect(err).Should(HaveOccurred())
Expect(err.Error()).Should(Equal("Link not found"))
// Make sure the interface has been removed from the host
_, err = netlink.LinkByName("cali" + containerID)
Expect(err).Should(HaveOccurred())
Expect(err.Error()).Should(Equal("Link not found"))
})
})
Context("when pod has a GenerateName", func() {
var nc types.NetConf
var netconf string
var clientset *kubernetes.Clientset
var name string
var pool string = "172.24.0.0/24"
BeforeEach(func() {
// Build the network config for this set of tests.
nc = types.NetConf{
CNIVersion: cniVersion,
Name: "calico-uts",
Type: "calico",
EtcdEndpoints: fmt.Sprintf("http://%s:2379", os.Getenv("ETCD_IP")),
DatastoreType: os.Getenv("DATASTORE_TYPE"),
Kubernetes: types.Kubernetes{Kubeconfig: "/home/user/certs/kubeconfig"},
Policy: types.Policy{PolicyType: "k8s"},
NodenameFileOptional: true,
LogLevel: "info",
}
nc.IPAM.Type = "calico-ipam"
ncb, err := json.Marshal(nc)
Expect(err).NotTo(HaveOccurred())
netconf = string(ncb)
// Create an IPPool for the test.
testutils.MustCreateNewIPPool(calicoClient, pool, false, false, true)
Expect(err).NotTo(HaveOccurred())
clientset = getKubernetesClient()
})
AfterEach(func() {
// Delete pod
ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
testutils.MustDeleteIPPool(calicoClient, pool)
})
It("should add a workload endpoint with the GenerateName", func() {
// Make sure the namespace exists.
ensureNamespace(clientset, testutils.K8S_TEST_NS)
// Create a K8s pod with GenerateName
name = fmt.Sprintf("run%d", rand.Uint32())
generateName := "test-gen-name"
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name,
GenerateName: generateName},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: fmt.Sprintf("container-%s", name),
Image: "ignore",
Ports: []v1.ContainerPort{{
Name: "anamedport",
ContainerPort: 555,
}},
}},
NodeName: hostname,
},
})
containerID, result, contVeth, _, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: containerID,
}
wrkload, err := ids.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
mac := contVeth.Attrs().HardwareAddr
interfaceName := k8sconversion.NewConverter().VethNameForWorkload(testutils.K8S_TEST_NS, name)
// The endpoint is created
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
if os.Getenv("DATASTORE_TYPE") == "kubernetes" {
// Unlike etcd datastore, WEP based on a kubernetes pod does not store values for the MAC.
// Put it back manually for later comparison.
endpoints.Items[0].Spec.MAC = mac.String()
}
Expect(endpoints.Items[0].Name).Should(Equal(wrkload))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
// Make sure that the GenerateName is there.
Expect(endpoints.Items[0].GenerateName).Should(Equal(generateName))
// Let's just check that the Spec is good too.
Expect(endpoints.Items[0].Spec).Should(Equal(libapi.WorkloadEndpointSpec{
Pod: name,
InterfaceName: interfaceName,
ServiceAccountName: "default",
IPNetworks: []string{result.IPs[0].Address.String()},
MAC: mac.String(),
Profiles: []string{"kns.test", "ksa.test.default"},
Node: hostname,
Endpoint: "eth0",
Workload: "",
ContainerID: containerID,
Orchestrator: api.OrchestratorKubernetes,
Ports: []libapi.WorkloadEndpointPort{{
Name: "anamedport",
Protocol: numorstring.ProtocolFromString("TCP"),
Port: 555,
}},
}))
// Assert sysctl values are set for what we would expect for an endpoint.
err = checkInterfaceConfig(interfaceName, "4")
Expect(err).ShouldNot(HaveOccurred())
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
Describe("testConnection tests", func() {
It("successfully connects to the datastore", func(done Done) {
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
},
"kubernetes": {
"kubeconfig": "/home/user/certs/kubeconfig"
},
"policy": {"type": "k8s"},
"nodename_file_optional": true,
"log_level":"info"
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
pluginPath := fmt.Sprintf("%s/%s", os.Getenv("BIN"), os.Getenv("PLUGIN"))
c := exec.Command(pluginPath, "-t")
stdin, err := c.StdinPipe()
Expect(err).ToNot(HaveOccurred())
go func() {
defer stdin.Close()
_, _ = io.WriteString(stdin, netconf)
}()
_, err = c.CombinedOutput()
Expect(err).ToNot(HaveOccurred())
close(done)
}, 10)
It("reports it cannot connect to the datastore", func(done Done) {
// wrong port(s).
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2370",
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
},
"kubernetes": {
"kubeconfig": "/home/user/certs/kubeconfig",
"k8s_api_root": "https://127.0.0.1:6446"
},
"policy": {"type": "k8s"},
"nodename_file_optional": true,
"log_level":"info"
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
pluginPath := fmt.Sprintf("%s/%s", os.Getenv("BIN"), os.Getenv("PLUGIN"))
c := exec.Command(pluginPath, "-t")
stdin, err := c.StdinPipe()
Expect(err).ToNot(HaveOccurred())
go func() {
defer stdin.Close()
_, _ = io.WriteString(stdin, netconf)
}()
_, err = c.CombinedOutput()
Expect(err).To(HaveOccurred())
close(done)
}, 10)
})
})
func checkPodIPAnnotations(clientset *kubernetes.Clientset, ns, name, expectedIP, expectedIPs string) {
if os.Getenv("DATASTORE_TYPE") == "kubernetes" {
pod, err := clientset.CoreV1().Pods(testutils.K8S_TEST_NS).Get(context.Background(), name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(pod.Annotations["cni.projectcalico.org/podIP"]).To(Equal(expectedIP))
Expect(pod.Annotations["cni.projectcalico.org/podIPs"]).To(Equal(expectedIPs))
}
}
func checkInterfaceConfig(name, ipVersion string) error {
err := testutils.CheckSysctlValue(fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/accept_ra", name), "0")
if err != nil {
return err
}
if ipVersion == "4" {
err = testutils.CheckSysctlValue(fmt.Sprintf("/proc/sys/net/ipv4/conf/%s/route_localnet", name), "1")
if err != nil {
return err
}
err = testutils.CheckSysctlValue(fmt.Sprintf("/proc/sys/net/ipv4/neigh/%s/proxy_delay", name), "0")
if err != nil {
return err
}
err = testutils.CheckSysctlValue(fmt.Sprintf("/proc/sys/net/ipv4/conf/%s/proxy_arp", name), "1")
if err != nil {
return err
}
err = testutils.CheckSysctlValue(fmt.Sprintf("/proc/sys/net/ipv4/conf/%s/forwarding", name), "1")
if err != nil {
return err
}
} else if ipVersion == "6" {
err = testutils.CheckSysctlValue(fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/proxy_ndp", name), "1")
if err != nil {
return err
}
err = testutils.CheckSysctlValue(fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/forwarding", name), "1")
if err != nil {
return err
}
}
return nil
}
| [
"\"KUBECONFIG\"",
"\"CNI_SPEC_VERSION\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"BIN\"",
"\"PLUGIN\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"BIN\"",
"\"PLUGIN\"",
"\"DATASTORE_TYPE\""
] | [] | [
"CNI_SPEC_VERSION",
"BIN",
"KUBECONFIG",
"PLUGIN",
"DATASTORE_TYPE",
"ETCD_IP"
] | [] | ["CNI_SPEC_VERSION", "BIN", "KUBECONFIG", "PLUGIN", "DATASTORE_TYPE", "ETCD_IP"] | go | 6 | 0 | |
api/smtp_email_notification.go | package main
import (
"bytes"
"fmt"
ht "html/template"
"os"
tt "text/template"
)
type emailNotificationText struct {
emailNotification
Html ht.HTML
}
type emailNotificationPlugs struct {
Origin string
Kind string
Subject string
UnsubscribeSecretHex string
Notifications []emailNotificationText
}
func smtpEmailNotification(to string, toName string, unsubscribeSecretHex string, notifications []emailNotificationText, kind string) error {
var subject string
if kind == "reply" {
var verb string
if len(notifications) > 1 {
verb = "replies"
} else {
verb = "reply"
}
subject = fmt.Sprintf("%d new comment %s", len(notifications), verb)
} else {
var verb string
if len(notifications) > 1 {
verb = "comments"
} else {
verb = "comment"
}
if kind == "pending-moderation" {
subject = fmt.Sprintf("%d new %s pending moderation", len(notifications), verb)
} else {
subject = fmt.Sprintf("%d new %s on your website", len(notifications), verb)
}
}
h, err := tt.New("header").Parse(`MIME-Version: 1.0
From: Commento <{{.FromAddress}}>
To: {{.ToName}} <{{.ToAddress}}>
Content-Type: text/html; charset=UTF-8
Subject: {{.Subject}}
`)
var header bytes.Buffer
h.Execute(&header, &headerPlugs{FromAddress: os.Getenv("SMTP_FROM_ADDRESS"), ToAddress: to, ToName: toName, Subject: "[Commento] " + subject})
t, err := ht.ParseFiles(fmt.Sprintf("%s/templates/email-notification.txt", os.Getenv("STATIC")))
if err != nil {
logger.Errorf("cannot parse %s/templates/email-notification.txt: %v", os.Getenv("STATIC"), err)
return errorMalformedTemplate
}
var body bytes.Buffer
err = t.Execute(&body, &emailNotificationPlugs{
Origin: os.Getenv("ORIGIN"),
Kind: kind,
Subject: subject,
UnsubscribeSecretHex: unsubscribeSecretHex,
Notifications: notifications,
})
if err != nil {
logger.Errorf("error generating templated HTML for email notification: %v", err)
return err
}
err = sendMail([]string{to}, concat(header, body))
if err != nil {
logger.Errorf("cannot send email notification: %v", err)
return errorCannotSendEmail
}
return nil
}
| [
"\"SMTP_FROM_ADDRESS\"",
"\"STATIC\"",
"\"STATIC\"",
"\"ORIGIN\""
] | [] | [
"ORIGIN",
"SMTP_FROM_ADDRESS",
"STATIC"
] | [] | ["ORIGIN", "SMTP_FROM_ADDRESS", "STATIC"] | go | 3 | 0 | |
skbuild/setuptools_wrap.py | """This module provides functionality for wrapping key infrastructure components
from distutils and setuptools.
"""
from __future__ import print_function
import argparse
import copy
import json
import os
import os.path
import platform
import stat
import sys
import warnings
from contextlib import contextmanager
# pylint: disable-next=wrong-import-order
from distutils.errors import DistutilsArgError, DistutilsError, DistutilsGetoptError
from glob import glob
from shutil import copyfile, copymode
# Must be imported before distutils
import setuptools
if sys.version_info >= (3, 0):
from io import StringIO
else:
from StringIO import StringIO
if sys.version_info >= (3, 3):
from shutil import which
else:
from .compat import which
from packaging.requirements import Requirement
from packaging.version import parse as parse_version
from setuptools.dist import Distribution as upstream_Distribution
from . import cmaker
from .command import (
bdist,
bdist_wheel,
build,
build_ext,
build_py,
clean,
egg_info,
generate_source_manifest,
install,
install_lib,
install_scripts,
sdist,
test,
)
from .constants import (
CMAKE_DEFAULT_EXECUTABLE,
CMAKE_INSTALL_DIR,
CMAKE_SPEC_FILE,
set_skbuild_plat_name,
skbuild_plat_name,
)
from .exceptions import SKBuildError, SKBuildGeneratorNotFoundError
from .utils import (
PythonModuleFinder,
mkdir_p,
parse_manifestin,
to_platform_path,
to_unix_path,
)
def create_skbuild_argparser():
"""Create and return a scikit-build argument parser."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
"--build-type", default="Release", metavar="", help="specify the CMake build type (e.g. Debug or Release)"
)
parser.add_argument("-G", "--generator", metavar="", help="specify the CMake build system generator")
parser.add_argument("-j", metavar="N", type=int, dest="jobs", help="allow N build jobs at once")
parser.add_argument("--cmake-executable", default=None, metavar="", help="specify the path to the cmake executable")
parser.add_argument(
"--install-target",
default=None,
metavar="",
help="specify the CMake target performing the install. " "If not provided, uses the target ``install``",
)
parser.add_argument(
"--skip-generator-test",
action="store_true",
help="skip generator test when a generator is explicitly selected using --generator",
)
return parser
def _is_cmake_configure_argument(arg):
"""Return True if ``arg`` is a relevant argument to pass to cmake when configuring a project."""
for cmake_arg in (
"-C", # initial-cache
"-D", # <var>[:<type>]=<value>
):
if arg.startswith(cmake_arg):
return True
return False
def parse_skbuild_args(args, cmake_args, build_tool_args):
"""
Parse arguments in the scikit-build argument set. Convert specified
arguments to proper format and append to cmake_args and build_tool_args.
Returns the tuple ``(remaining arguments, cmake executable, skip_generator_test)``.
"""
parser = create_skbuild_argparser()
# Consider CMake arguments passed as global setuptools options
cmake_args.extend([arg for arg in args if _is_cmake_configure_argument(arg)])
# ... and remove them from the list
args = [arg for arg in args if not _is_cmake_configure_argument(arg)]
namespace, remaining_args = parser.parse_known_args(args)
# Construct CMake argument list
cmake_args.append("-DCMAKE_BUILD_TYPE:STRING=" + namespace.build_type)
if namespace.generator is not None:
cmake_args.extend(["-G", namespace.generator])
# Construct build tool argument list
build_tool_args.extend(["--config", namespace.build_type])
if namespace.jobs is not None:
build_tool_args.extend(["-j", str(namespace.jobs)])
if namespace.install_target is not None:
build_tool_args.extend(["--install-target", namespace.install_target])
if namespace.generator is None and namespace.skip_generator_test is True:
sys.exit("ERROR: Specifying --skip-generator-test requires --generator to also be specified.")
return remaining_args, namespace.cmake_executable, namespace.skip_generator_test
def parse_args():
"""This function parses the command-line arguments ``sys.argv`` and returns
the tuple ``(setuptools_args, cmake_executable, skip_generator_test, cmake_args, build_tool_args)``
where each ``*_args`` element corresponds to a set of arguments separated by ``--``."""
dutils = []
cmake = []
make = []
argsets = [dutils, cmake, make]
i = 0
separator = "--"
for arg in sys.argv:
if arg == separator:
i += 1
if i >= len(argsets):
sys.exit(
'ERROR: Too many "{}" separators provided '
"(expected at most {}).".format(separator, len(argsets) - 1)
)
else:
argsets[i].append(arg)
dutils, cmake_executable, skip_generator_test = parse_skbuild_args(dutils, cmake, make)
return dutils, cmake_executable, skip_generator_test, cmake, make
@contextmanager
def _capture_output():
oldout, olderr = sys.stdout, sys.stderr
try:
out = [StringIO(), StringIO()]
sys.stdout, sys.stderr = out
yield out
finally:
sys.stdout, sys.stderr = oldout, olderr
out[0] = out[0].getvalue()
out[1] = out[1].getvalue()
def _parse_setuptools_arguments(setup_attrs):
"""This function instantiates a Distribution object and
parses the command line arguments.
It returns the tuple ``(display_only, help_commands, commands, hide_listing, force_cmake, skip_cmake, plat_name)``
where
- display_only is a boolean indicating if an argument like '--help',
'--help-commands' or '--author' was passed.
- help_commands is a boolean indicating if argument '--help-commands'
was passed.
- commands contains the list of commands that were passed.
- hide_listing is a boolean indicating if the list of files being included
in the distribution is displayed or not.
- force_cmake a boolean indicating that CMake should always be executed.
- skip_cmake is a boolean indicating if the execution of CMake should
explicitly be skipped.
- plat_name is a string identifying the platform name to embed in generated
filenames. It defaults to :func:`skbuild.constants.skbuild_plat_name()`.
- build_ext_inplace is a boolean indicating if ``build_ext`` command was
specified along with the --inplace argument.
Otherwise it raises DistutilsArgError exception if there are
any error on the command-line, and it raises DistutilsGetoptError
if there any error in the command 'options' attribute.
The code has been adapted from the setup() function available
in distutils/core.py.
"""
setup_attrs = dict(setup_attrs)
setup_attrs["script_name"] = os.path.basename(sys.argv[0])
dist = upstream_Distribution(setup_attrs)
# Update class attribute to also ensure the argument is processed
# when ``setuptools.setup`` is called.
upstream_Distribution.global_options.extend(
[
("hide-listing", None, "do not display list of files being " "included in the distribution"),
("force-cmake", None, "always run CMake"),
("skip-cmake", None, "do not run CMake"),
]
)
# Find and parse the config file(s): they will override options from
# the setup script, but be overridden by the command line.
dist.parse_config_files()
# Parse the command line and override config files; any
# command-line errors are the end user's fault, so turn them into
# SystemExit to suppress tracebacks.
with _capture_output():
result = dist.parse_command_line()
display_only = not result
if not hasattr(dist, "hide_listing"):
dist.hide_listing = False
if not hasattr(dist, "force_cmake"):
dist.force_cmake = False
if not hasattr(dist, "skip_cmake"):
dist.skip_cmake = False
plat_names = set()
for cmd in [dist.get_command_obj(command) for command in dist.commands]:
if getattr(cmd, "plat_name", None) is not None:
plat_names.add(cmd.plat_name)
if not plat_names:
plat_names.add(None)
elif len(plat_names) > 1:
raise SKBuildError("--plat-name is ambiguous: %s" % ", ".join(plat_names))
plat_name = list(plat_names)[0]
build_ext_inplace = dist.get_command_obj("build_ext").inplace
return (
display_only,
dist.help_commands,
dist.commands,
dist.hide_listing,
dist.force_cmake,
dist.skip_cmake,
plat_name,
build_ext_inplace,
)
def _check_skbuild_parameters(skbuild_kw):
cmake_install_dir = skbuild_kw["cmake_install_dir"]
if os.path.isabs(cmake_install_dir):
raise SKBuildError(
(
"\n setup parameter 'cmake_install_dir' is set to "
"an absolute path. A relative path is expected.\n"
" Project Root : {}\n"
" CMake Install Directory: {}\n"
).format(os.getcwd(), cmake_install_dir)
)
cmake_source_dir = skbuild_kw["cmake_source_dir"]
if not os.path.exists(os.path.abspath(cmake_source_dir)):
raise SKBuildError(
(
"\n setup parameter 'cmake_source_dir' set to "
"a nonexistent directory.\n"
" Project Root : {}\n"
" CMake Source Directory: {}\n"
).format(os.getcwd(), cmake_source_dir)
)
def strip_package(package_parts, module_file):
"""Given ``package_parts`` (e.g. ``['foo', 'bar']``) and a
``module_file`` (e.g. ``foo/bar/jaz/rock/roll.py``), starting
from the left, this function will strip the parts of the path
matching the package parts and return a new string
(e.g ``jaz/rock/roll.py``).
The function will work as expected for either Windows or Unix-style
``module_file`` and this independently of the platform.
"""
if not package_parts or os.path.isabs(module_file):
return module_file
package = "/".join(package_parts)
module_dir = os.path.dirname(module_file.replace("\\", "/"))
module_dir = module_dir[: len(package)]
return module_file[len(package) + 1 :] if package != "" and module_dir.startswith(package) else module_file
def _package_data_contain_module(module, package_data):
"""Return True if the ``module`` is contained
in the ``package_data``.
``module`` is a tuple of the form
``(package, modulename, module_file)``.
"""
(package, _, module_file) = module
if package not in package_data:
return False
# We need to strip the package because a module entry
# usually looks like this:
#
# ('foo.bar', 'module', 'foo/bar/module.py')
#
# and the entry in package_data would look like this:
#
# {'foo.bar' : ['module.py']}
if strip_package(package.split("."), module_file) in package_data[package]:
return True
return False
def _should_run_cmake(commands, cmake_with_sdist):
"""Return True if at least one command requiring ``cmake`` to run
is found in ``commands``."""
for expected_command in [
"build",
"build_ext",
"develop",
"install",
"install_lib",
"bdist",
"bdist_dumb",
"bdist_egg",
"bdist_rpm",
"bdist_wininst",
"bdist_wheel",
"test",
]:
if expected_command in commands:
return True
if "sdist" in commands and cmake_with_sdist:
return True
return False
def _save_cmake_spec(args):
"""Save the CMake spec to disk"""
# We use JSON here because readability is more important than performance
try:
os.makedirs(os.path.dirname(CMAKE_SPEC_FILE()))
except OSError:
pass
with open(CMAKE_SPEC_FILE(), "w+") as fp:
json.dump(args, fp)
def _load_cmake_spec():
"""Load and return the CMake spec from disk"""
try:
with open(CMAKE_SPEC_FILE()) as fp:
return json.load(fp)
except (OSError, IOError, ValueError):
return None
# pylint:disable=too-many-locals, too-many-branches
def setup(*args, **kw): # noqa: C901
"""This function wraps setup() so that we can run cmake, make,
CMake build, then proceed as usual with setuptools, appending the
CMake-generated output as necessary.
The CMake project is re-configured only if needed. This is achieved by (1) retrieving the environment mapping
associated with the generator set in the ``CMakeCache.txt`` file, (2) saving the CMake configure arguments and
version in :func:`skbuild.constants.CMAKE_SPEC_FILE()`: and (3) re-configuring only if either the generator or
the CMake specs change.
"""
# If any, strip ending slash from each package directory
# Regular setuptools does not support this
# TODO: will become an error in the future
if "package_dir" in kw:
for package, prefix in kw["package_dir"].items():
if prefix.endswith("/"):
msg = "package_dir={{{!r}: {!r}}} ends with a trailing slash, which is not supported by setuptools.".format(
package, prefix
)
warnings.warn(msg, FutureWarning, stacklevel=2)
kw["package_dir"][package] = prefix[:-1]
sys.argv, cmake_executable, skip_generator_test, cmake_args, make_args = parse_args()
# work around https://bugs.python.org/issue1011113
# (patches provided, but no updates since 2014)
cmdclass = kw.get("cmdclass", {})
cmdclass["build"] = cmdclass.get("build", build.build)
cmdclass["build_py"] = cmdclass.get("build_py", build_py.build_py)
cmdclass["build_ext"] = cmdclass.get("build_ext", build_ext.build_ext)
cmdclass["install"] = cmdclass.get("install", install.install)
cmdclass["install_lib"] = cmdclass.get("install_lib", install_lib.install_lib)
cmdclass["install_scripts"] = cmdclass.get("install_scripts", install_scripts.install_scripts)
cmdclass["clean"] = cmdclass.get("clean", clean.clean)
cmdclass["sdist"] = cmdclass.get("sdist", sdist.sdist)
cmdclass["bdist"] = cmdclass.get("bdist", bdist.bdist)
cmdclass["bdist_wheel"] = cmdclass.get("bdist_wheel", bdist_wheel.bdist_wheel)
cmdclass["egg_info"] = cmdclass.get("egg_info", egg_info.egg_info)
cmdclass["generate_source_manifest"] = cmdclass.get(
"generate_source_manifest", generate_source_manifest.generate_source_manifest
)
cmdclass["test"] = cmdclass.get("test", test.test)
kw["cmdclass"] = cmdclass
# Extract setup keywords specific to scikit-build and remove them from kw.
# Removing the keyword from kw need to be done here otherwise, the
# following call to _parse_setuptools_arguments would complain about
# unknown setup options.
parameters = {
"cmake_args": [],
"cmake_install_dir": "",
"cmake_source_dir": "",
"cmake_with_sdist": False,
"cmake_languages": ("C", "CXX"),
"cmake_minimum_required_version": None,
"cmake_process_manifest_hook": None,
"cmake_install_target": "install",
}
skbuild_kw = {param: kw.pop(param, value) for param, value in parameters.items()}
# ... and validate them
try:
_check_skbuild_parameters(skbuild_kw)
except SKBuildError as ex:
import traceback # pylint: disable=import-outside-toplevel
print("Traceback (most recent call last):")
traceback.print_tb(sys.exc_info()[2])
print("")
sys.exit(ex)
# Convert source dir to a path relative to the root
# of the project
cmake_source_dir = skbuild_kw["cmake_source_dir"]
if cmake_source_dir == ".":
cmake_source_dir = ""
if os.path.isabs(cmake_source_dir):
cmake_source_dir = os.path.relpath(cmake_source_dir)
# Skip running CMake in the following cases:
# * flag "--skip-cmake" is provided
# * "display only" argument is provided (e.g '--help', '--author', ...)
# * no command-line arguments or invalid ones are provided
# * no command requiring cmake is provided
# * no CMakeLists.txt if found
display_only = has_invalid_arguments = help_commands = False
force_cmake = skip_cmake = False
commands = []
try:
(
display_only,
help_commands,
commands,
hide_listing,
force_cmake,
skip_cmake,
plat_name,
build_ext_inplace,
) = _parse_setuptools_arguments(kw)
except (DistutilsArgError, DistutilsGetoptError):
has_invalid_arguments = True
has_cmakelists = os.path.exists(os.path.join(cmake_source_dir, "CMakeLists.txt"))
if not has_cmakelists:
print("skipping skbuild (no CMakeLists.txt found)")
skip_skbuild = (
display_only
or has_invalid_arguments
or not _should_run_cmake(commands, skbuild_kw["cmake_with_sdist"])
or not has_cmakelists
)
if skip_skbuild and not force_cmake:
if help_commands:
# Prepend scikit-build help. Generate option descriptions using
# argparse.
skbuild_parser = create_skbuild_argparser()
arg_descriptions = [line for line in skbuild_parser.format_help().split("\n") if line.startswith(" ")]
print("scikit-build options:")
print("\n".join(arg_descriptions))
print("")
print('Arguments following a "--" are passed directly to CMake ' "(e.g. -DMY_VAR:BOOL=TRUE).")
print('Arguments following a second "--" are passed directly to ' " the build tool.")
print("")
return setuptools.setup(*args, **kw)
developer_mode = "develop" in commands or "test" in commands or build_ext_inplace
packages = kw.get("packages", [])
package_dir = kw.get("package_dir", {})
package_data = copy.deepcopy(kw.get("package_data", {}))
py_modules = kw.get("py_modules", [])
new_py_modules = {py_module: False for py_module in py_modules}
scripts = kw.get("scripts", [])
new_scripts = {script: False for script in scripts}
data_files = {(parent_dir or "."): set(file_list) for parent_dir, file_list in kw.get("data_files", [])}
# Since CMake arguments provided through the command line have more
# weight and when CMake is given multiple times a argument, only the last
# one is considered, let's prepend the one provided in the setup call.
cmake_args = skbuild_kw["cmake_args"] + cmake_args
# Handle cmake_install_target
# get the target (next item after '--install-target') or return '' if no --install-target
cmake_install_target_from_command = next(
(make_args[index + 1] for index, item in enumerate(make_args) if item == "--install-target"), ""
)
cmake_install_target_from_setup = skbuild_kw["cmake_install_target"]
# Setting target from command takes precedence
# cmake_install_target_from_setup has the default 'install',
# so cmake_install_target would never be empty.
if cmake_install_target_from_command:
cmake_install_target = cmake_install_target_from_command
else:
cmake_install_target = cmake_install_target_from_setup
if sys.platform == "darwin":
# If no ``--plat-name`` argument was passed, set default value.
if plat_name is None:
plat_name = skbuild_plat_name()
(_, version, machine) = plat_name.split("-")
# The loop here allows for CMAKE_OSX_* command line arguments to overload
# values passed with either the ``--plat-name`` command-line argument
# or the ``cmake_args`` setup option.
for cmake_arg in cmake_args:
if "CMAKE_OSX_DEPLOYMENT_TARGET" in cmake_arg:
version = cmake_arg.split("=")[1]
if "CMAKE_OSX_ARCHITECTURES" in cmake_arg:
machine = cmake_arg.split("=")[1]
if set(machine.split(";")) == {"x86_64", "arm64"}:
machine = "universal2"
set_skbuild_plat_name("macosx-{}-{}".format(version, machine))
# Set platform env. variable so that commands (e.g. bdist_wheel)
# uses this information. The _PYTHON_HOST_PLATFORM env. variable is
# used in distutils.util.get_platform() function.
os.environ.setdefault("_PYTHON_HOST_PLATFORM", skbuild_plat_name())
# Set CMAKE_OSX_DEPLOYMENT_TARGET and CMAKE_OSX_ARCHITECTURES if not already
# specified
(_, version, machine) = skbuild_plat_name().split("-")
if not cmaker.has_cmake_cache_arg(cmake_args, "CMAKE_OSX_DEPLOYMENT_TARGET"):
cmake_args.append("-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=%s" % version)
if not cmaker.has_cmake_cache_arg(cmake_args, "CMAKE_OSX_ARCHITECTURES"):
machine_archs = "x86_64;arm64" if machine == "universal2" else machine
cmake_args.append("-DCMAKE_OSX_ARCHITECTURES:STRING=%s" % machine_archs)
# Install cmake if listed in `setup_requires`
for package in kw.get("setup_requires", []):
if Requirement(package).name == "cmake":
setup_requires = [package]
dist = upstream_Distribution({"setup_requires": setup_requires})
dist.fetch_build_eggs(setup_requires)
# Considering packages associated with "setup_requires" keyword are
# installed in .eggs subdirectory without honoring setuptools "console_scripts"
# entry_points and without settings the expected executable permissions, we are
# taking care of it below.
import cmake # pylint: disable=import-outside-toplevel
for executable in ["cmake", "cpack", "ctest"]:
executable = os.path.join(cmake.CMAKE_BIN_DIR, executable)
if platform.system().lower() == "windows":
executable += ".exe"
st = os.stat(executable)
permissions = st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod(executable, permissions)
cmake_executable = os.path.join(cmake.CMAKE_BIN_DIR, "cmake")
break
# Languages are used to determine a working generator
cmake_languages = skbuild_kw["cmake_languages"]
try:
if cmake_executable is None:
cmake_executable = CMAKE_DEFAULT_EXECUTABLE
cmkr = cmaker.CMaker(cmake_executable)
if not skip_cmake:
cmake_minimum_required_version = skbuild_kw["cmake_minimum_required_version"]
if cmake_minimum_required_version is not None:
if parse_version(cmkr.cmake_version) < parse_version(cmake_minimum_required_version):
raise SKBuildError(
"CMake version {} or higher is required. CMake version {} is being used".format(
cmake_minimum_required_version, cmkr.cmake_version
)
)
# Used to confirm that the cmake executable is the same, and that the environment
# didn't change
cmake_spec = {
"args": [which(CMAKE_DEFAULT_EXECUTABLE)] + cmake_args,
"version": cmkr.cmake_version,
"environment": {
"PYTHONNOUSERSITE": os.environ.get("PYTHONNOUSERSITE"),
"PYTHONPATH": os.environ.get("PYTHONPATH"),
},
}
# skip the configure step for a cached build
env = cmkr.get_cached_generator_env()
if env is None or cmake_spec != _load_cmake_spec():
env = cmkr.configure(
cmake_args,
skip_generator_test=skip_generator_test,
cmake_source_dir=cmake_source_dir,
cmake_install_dir=skbuild_kw["cmake_install_dir"],
languages=cmake_languages,
)
_save_cmake_spec(cmake_spec)
cmkr.make(make_args, install_target=cmake_install_target, env=env)
except SKBuildGeneratorNotFoundError as ex:
sys.exit(ex)
except SKBuildError as ex:
import traceback # pylint: disable=import-outside-toplevel
print("Traceback (most recent call last):")
traceback.print_tb(sys.exc_info()[2])
print("")
sys.exit(ex)
# If needed, set reasonable defaults for package_dir
for package in packages:
if package not in package_dir:
package_dir[package] = package.replace(".", "/")
if "" in package_dir:
package_dir[package] = to_unix_path(os.path.join(package_dir[""], package_dir[package]))
kw["package_dir"] = package_dir
package_prefixes = _collect_package_prefixes(package_dir, packages)
# This hook enables custom processing of the cmake manifest
cmake_manifest = cmkr.install()
process_manifest = skbuild_kw.get("cmake_process_manifest_hook")
if process_manifest is not None:
if callable(process_manifest):
cmake_manifest = process_manifest(cmake_manifest)
else:
raise SKBuildError("The cmake_process_manifest_hook argument should be callable.")
_classify_installed_files(
cmake_manifest,
package_data,
package_prefixes,
py_modules,
new_py_modules,
scripts,
new_scripts,
data_files,
cmake_source_dir,
skbuild_kw["cmake_install_dir"],
)
original_manifestin_data_files = []
if kw.get("include_package_data", False):
original_manifestin_data_files = parse_manifestin(os.path.join(os.getcwd(), "MANIFEST.in"))
for path in original_manifestin_data_files:
_classify_file(
path, package_data, package_prefixes, py_modules, new_py_modules, scripts, new_scripts, data_files
)
if developer_mode:
# Copy packages
for package, package_file_list in package_data.items():
for package_file in package_file_list:
package_file = os.path.join(package_dir[package], package_file)
cmake_file = os.path.join(CMAKE_INSTALL_DIR(), package_file)
if os.path.exists(cmake_file):
_copy_file(cmake_file, package_file, hide_listing)
# Copy modules
for py_module in py_modules:
package_file = py_module + ".py"
cmake_file = os.path.join(CMAKE_INSTALL_DIR(), package_file)
if os.path.exists(cmake_file):
_copy_file(cmake_file, package_file, hide_listing)
else:
_consolidate_package_modules(cmake_source_dir, packages, package_dir, py_modules, package_data, hide_listing)
original_package_data = kw.get("package_data", {}).copy()
_consolidate_package_data_files(original_package_data, package_prefixes, hide_listing)
for data_file in original_manifestin_data_files:
dest_data_file = os.path.join(CMAKE_INSTALL_DIR(), data_file)
_copy_file(data_file, dest_data_file, hide_listing)
kw["package_data"] = package_data
kw["package_dir"] = {
package: (
os.path.join(CMAKE_INSTALL_DIR(), prefix)
if os.path.exists(os.path.join(CMAKE_INSTALL_DIR(), prefix))
else prefix
)
for prefix, package in package_prefixes
}
kw["scripts"] = [
os.path.join(CMAKE_INSTALL_DIR(), script) if mask else script for script, mask in new_scripts.items()
]
kw["data_files"] = [(parent_dir, list(file_set)) for parent_dir, file_set in data_files.items()]
if "zip_safe" not in kw:
kw["zip_safe"] = False
# Adapted from espdev/ITKPythonInstaller/setup.py.in
class BinaryDistribution(upstream_Distribution): # pylint: disable=missing-class-docstring
def has_ext_modules(self): # pylint: disable=no-self-use,missing-function-docstring
return has_cmakelists
kw["distclass"] = BinaryDistribution
print("")
return setuptools.setup(*args, **kw)
def _collect_package_prefixes(package_dir, packages):
"""
Collect the list of prefixes for all packages
The list is used to match paths in the install manifest to packages
specified in the setup.py script.
The list is sorted in decreasing order of prefix length so that paths are
matched with their immediate parent package, instead of any of that
package's ancestors.
For example, consider the project structure below. Assume that the
setup call was made with a package list featuring "top" and "top.bar", but
not "top.not_a_subpackage".
::
top/ -> top/
__init__.py -> top/__init__.py (parent: top)
foo.py -> top/foo.py (parent: top)
bar/ -> top/bar/ (parent: top)
__init__.py -> top/bar/__init__.py (parent: top.bar)
not_a_subpackage/ -> top/not_a_subpackage/ (parent: top)
data_0.txt -> top/not_a_subpackage/data_0.txt (parent: top)
data_1.txt -> top/not_a_subpackage/data_1.txt (parent: top)
The paths in the generated install manifest are matched to packages
according to the parents indicated on the right. Only packages that are
specified in the setup() call are considered. Because of the sort order,
the data files on the bottom would have been mapped to
"top.not_a_subpackage" instead of "top", proper -- had such a package been
specified.
"""
return list(
sorted(
((package_dir[package].replace(".", "/"), package) for package in packages),
key=lambda tup: len(tup[0]),
reverse=True,
)
)
def _classify_installed_files(
install_paths,
package_data,
package_prefixes,
py_modules,
new_py_modules,
scripts,
new_scripts,
data_files,
cmake_source_dir,
_cmake_install_dir,
):
assert not os.path.isabs(cmake_source_dir)
assert cmake_source_dir != "."
install_root = os.path.join(os.getcwd(), CMAKE_INSTALL_DIR())
for path in install_paths:
# if this installed file is not within the project root, complain and
# exit
if not to_platform_path(path).startswith(CMAKE_INSTALL_DIR()):
raise SKBuildError(
(
"\n CMake-installed files must be within the project root.\n"
" Project Root : {}\n"
" Violating File: {}\n"
).format(install_root, to_platform_path(path))
)
# peel off the 'skbuild' prefix
path = to_unix_path(os.path.relpath(path, CMAKE_INSTALL_DIR()))
_classify_file(
path, package_data, package_prefixes, py_modules, new_py_modules, scripts, new_scripts, data_files
)
def _classify_file(path, package_data, package_prefixes, py_modules, new_py_modules, scripts, new_scripts, data_files):
found_package = False
found_module = False
found_script = False
path = to_unix_path(path)
# check to see if path is part of a package
for prefix, package in package_prefixes:
if path.startswith(prefix + "/"):
# peel off the package prefix
path = to_unix_path(os.path.relpath(path, prefix))
package_file_list = package_data.get(package, [])
package_file_list.append(path)
package_data[package] = package_file_list
found_package = True
break
if found_package:
return
# If control reaches this point, then this installed file is not part of
# a package.
# check if path is a module
for module in py_modules:
if path.replace("/", ".") == ".".join((module, "py")):
new_py_modules[module] = True
found_module = True
break
if found_module:
return
# If control reaches this point, then this installed file is not a
# module
# if the file is a script, mark the corresponding script
for script in scripts:
if path == script:
new_scripts[script] = True
found_script = True
break
if found_script:
return
# If control reaches this point, then this installed file is not a
# script
# If control reaches this point, then we have installed files that are
# not part of a package, not a module, nor a script. Without any other
# information, we can only treat it as a generic data file.
parent_dir = os.path.dirname(path)
file_set = data_files.get(parent_dir)
if file_set is None:
file_set = set()
data_files[parent_dir] = file_set
file_set.add(os.path.join(CMAKE_INSTALL_DIR(), path))
def _copy_file(src_file, dest_file, hide_listing=True):
"""Copy ``src_file`` to ``dest_file`` ensuring parent directory exists.
By default, message like `creating directory /path/to/package` and
`copying directory /src/path/to/package -> path/to/package` are displayed
on standard output. Setting ``hide_listing`` to False avoids message from
being displayed.
"""
# Create directory if needed
dest_dir = os.path.dirname(dest_file)
if dest_dir != "" and not os.path.exists(dest_dir):
if not hide_listing:
print("creating directory {}".format(dest_dir))
mkdir_p(dest_dir)
# Copy file
if not hide_listing:
print("copying {} -> {}".format(src_file, dest_file))
copyfile(src_file, dest_file)
copymode(src_file, dest_file)
def _consolidate_package_modules(cmake_source_dir, packages, package_dir, py_modules, package_data, hide_listing):
"""This function consolidates packages having modules located in
both the source tree and the CMake install tree into one location.
The one location is the CMake install tree
(see :func:`.constants.CMAKE_INSTALL_DIR()`).
Why ? This is a necessary evil because ``Setuptools`` keeps track of
packages and modules files to install using a dictionary of lists where
the key are package names (e.g ``foo.bar``) and the values are lists of
module files (e.g ``['__init__.py', 'baz.py']``. Since this doesn't allow
to "split" files associated with a given module in multiple location, one
location is selected, and files are copied over.
How? It currently searches for modules across both locations using
the :class:`.utils.PythonModuleFinder`. then with the help
of :func:`_package_data_contain_module`, it identifies which
one are either already included or missing from the distribution.
Once a module has been identified as ``missing``, it is both copied
into the :func:`.constants.CMAKE_INSTALL_DIR()` and added to the
``package_data`` dictionary so that it can be considered by
the upstream setup function.
"""
try:
# Search for python modules in both the current directory
# and cmake install tree.
modules = PythonModuleFinder(
packages, package_dir, py_modules, alternative_build_base=CMAKE_INSTALL_DIR()
).find_all_modules()
except DistutilsError as msg:
raise SystemExit("error: {}".format(str(msg)))
print("")
for entry in modules:
# Check if module file should be copied into the CMake install tree.
if _package_data_contain_module(entry, package_data):
continue
(package, _, src_module_file) = entry
# Copy missing module file
if os.path.exists(src_module_file):
dest_module_file = os.path.join(CMAKE_INSTALL_DIR(), src_module_file)
_copy_file(src_module_file, dest_module_file, hide_listing)
# Since the mapping in package_data expects the package to be associated
# with a list of files relative to the directory containing the package,
# the following section makes sure to strip the redundant part of the
# module file path.
# The redundant part should be stripped for both cmake_source_dir and
# the package.
package_parts = []
if cmake_source_dir:
package_parts = cmake_source_dir.split(os.path.sep)
package_parts += package.split(".")
stripped_module_file = strip_package(package_parts, src_module_file)
# Update list of files associated with the corresponding package
try:
package_data[package].append(stripped_module_file)
except KeyError:
package_data[package] = [stripped_module_file]
def _consolidate_package_data_files(original_package_data, package_prefixes, hide_listing):
"""This function copies package data files specified using the ``package_data`` keyword
into :func:`.constants.CMAKE_INSTALL_DIR()`.
::
setup(...,
packages=['mypkg'],
package_dir={'mypkg': 'src/mypkg'},
package_data={'mypkg': ['data/*.dat']},
)
Considering that (1) the packages associated with modules located in both the source tree and
the CMake install tree are consolidated into the CMake install tree, and (2) the consolidated
package path set in the ``package_dir`` dictionary and later used by setuptools to package
(or install) modules and data files is :func:`.constants.CMAKE_INSTALL_DIR()`, copying the data files
is required to ensure setuptools can find them when it uses the package directory.
"""
project_root = os.getcwd()
for prefix, package in package_prefixes:
if package not in original_package_data:
continue
raw_patterns = original_package_data[package]
for pattern in raw_patterns:
expanded_package_dir = os.path.join(project_root, prefix, pattern)
for src_data_file in glob(expanded_package_dir):
full_prefix_length = len(os.path.join(project_root, prefix)) + 1
data_file = src_data_file[full_prefix_length:]
dest_data_file = os.path.join(CMAKE_INSTALL_DIR(), prefix, data_file)
_copy_file(src_data_file, dest_data_file, hide_listing)
| [] | [] | [
"PYTHONNOUSERSITE",
"PYTHONPATH"
] | [] | ["PYTHONNOUSERSITE", "PYTHONPATH"] | python | 2 | 0 | |
coap-gateway/test/test.go | package service
import (
"context"
"os"
"sync"
"testing"
"time"
"github.com/plgd-dev/hub/coap-gateway/service"
"github.com/plgd-dev/hub/pkg/log"
"github.com/plgd-dev/hub/test/config"
"github.com/stretchr/testify/require"
)
func MakeConfig(t *testing.T) service.Config {
var cfg service.Config
cfg.Log.DumpCoapMessages = true
cfg.TaskQueue.GoPoolSize = 1600
cfg.TaskQueue.Size = 2 * 1024 * 1024
cfg.APIs.COAP.Addr = config.GW_HOST
cfg.APIs.COAP.ExternalAddress = config.GW_HOST
cfg.APIs.COAP.MaxMessageSize = 256 * 1024
cfg.APIs.COAP.OwnerCacheExpiration = time.Minute
cfg.APIs.COAP.SubscriptionBufferSize = 1000
cfg.APIs.COAP.GoroutineSocketHeartbeat = time.Millisecond * 300
cfg.APIs.COAP.KeepAlive.Timeout = time.Second * 20
cfg.APIs.COAP.BlockwiseTransfer.Enabled = false
cfg.APIs.COAP.BlockwiseTransfer.SZX = "1024"
cfg.APIs.COAP.TLS.Embedded = config.MakeTLSServerConfig()
cfg.APIs.COAP.TLS.Enabled = true
cfg.APIs.COAP.TLS.Embedded.ClientCertificateRequired = false
cfg.APIs.COAP.TLS.Embedded.CertFile = os.Getenv("TEST_COAP_GW_CERT_FILE")
cfg.APIs.COAP.TLS.Embedded.KeyFile = os.Getenv("TEST_COAP_GW_KEY_FILE")
cfg.APIs.COAP.Authorization = service.AuthorizationConfig{
OwnerClaim: config.OWNER_CLAIM,
Providers: []service.ProvidersConfig{
{
Name: config.DEVICE_PROVIDER,
Config: config.MakeDeviceAuthorization(),
},
},
}
cfg.Clients.IdentityStore.Connection = config.MakeGrpcClientConfig(config.IDENTITY_STORE_HOST)
cfg.Clients.ResourceAggregate.Connection = config.MakeGrpcClientConfig(config.RESOURCE_AGGREGATE_HOST)
cfg.Clients.ResourceDirectory.Connection = config.MakeGrpcClientConfig(config.RESOURCE_DIRECTORY_HOST)
cfg.Clients.Eventbus.NATS = config.MakeSubscriberConfig()
err := cfg.Validate()
require.NoError(t, err)
return cfg
}
func SetUp(t *testing.T) (TearDown func()) {
return New(t, MakeConfig(t))
}
// New creates test coap-gateway.
func New(t *testing.T, cfg service.Config) func() {
ctx := context.Background()
logger, err := log.NewLogger(cfg.Log.Embedded)
require.NoError(t, err)
s, err := service.New(ctx, cfg, logger)
require.NoError(t, err)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
_ = s.Serve()
}()
return func() {
_ = s.Close()
wg.Wait()
}
}
| [
"\"TEST_COAP_GW_CERT_FILE\"",
"\"TEST_COAP_GW_KEY_FILE\""
] | [] | [
"TEST_COAP_GW_KEY_FILE",
"TEST_COAP_GW_CERT_FILE"
] | [] | ["TEST_COAP_GW_KEY_FILE", "TEST_COAP_GW_CERT_FILE"] | go | 2 | 0 | |
addrmgr/knownaddress_test.go | // Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package addrmgr_test
import (
"math"
"testing"
"time"
"github.com/aBitcoinDiamond/addrmgr"
"github.com/aBitcoinDiamond/wire"
)
func TestChance(t *testing.T) {
now := time.Unix(time.Now().Unix(), 0)
var tests = []struct {
addr *addrmgr.KnownAddress
expected float64
}{
{
//Test normal case
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
1.0,
}, {
//Test case in which lastseen < 0
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(20 * time.Second)},
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
1.0,
}, {
//Test case in which lastattempt < 0
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
0, time.Now().Add(30*time.Minute), time.Now(), false, 0),
1.0 * .01,
}, {
//Test case in which lastattempt < ten minutes
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
0, time.Now().Add(-5*time.Minute), time.Now(), false, 0),
1.0 * .01,
}, {
//Test case with several failed attempts.
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
2, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
1 / 1.5 / 1.5,
},
}
err := .0001
for i, test := range tests {
chance := addrmgr.TstKnownAddressChance(test.addr)
if math.Abs(test.expected-chance) >= err {
t.Errorf("case %d: got %f, expected %f", i, chance, test.expected)
}
}
}
func TestIsBad(t *testing.T) {
now := time.Unix(time.Now().Unix(), 0)
future := now.Add(35 * time.Minute)
monthOld := now.Add(-43 * time.Hour * 24)
secondsOld := now.Add(-2 * time.Second)
minutesOld := now.Add(-27 * time.Minute)
hoursOld := now.Add(-5 * time.Hour)
zeroTime := time.Time{}
futureNa := &wire.NetAddress{Timestamp: future}
minutesOldNa := &wire.NetAddress{Timestamp: minutesOld}
monthOldNa := &wire.NetAddress{Timestamp: monthOld}
currentNa := &wire.NetAddress{Timestamp: secondsOld}
//Test addresses that have been tried in the last minute.
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 3, secondsOld, zeroTime, false, 0)) {
t.Errorf("test case 1: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 3, secondsOld, zeroTime, false, 0)) {
t.Errorf("test case 2: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, zeroTime, false, 0)) {
t.Errorf("test case 3: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, monthOld, true, 0)) {
t.Errorf("test case 4: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 2, secondsOld, secondsOld, true, 0)) {
t.Errorf("test case 5: addresses that have been tried in the last minute are not bad.")
}
//Test address that claims to be from the future.
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 0, minutesOld, hoursOld, true, 0)) {
t.Errorf("test case 6: addresses that claim to be from the future are bad.")
}
//Test address that has not been seen in over a month.
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 0, minutesOld, hoursOld, true, 0)) {
t.Errorf("test case 7: addresses more than a month old are bad.")
}
//It has failed at least three times and never succeeded.
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 3, minutesOld, zeroTime, true, 0)) {
t.Errorf("test case 8: addresses that have never succeeded are bad.")
}
//It has failed ten times in the last week
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 10, minutesOld, monthOld, true, 0)) {
t.Errorf("test case 9: addresses that have not succeeded in too long are bad.")
}
//Test an address that should work.
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 2, minutesOld, hoursOld, true, 0)) {
t.Errorf("test case 10: This should be a valid address.")
}
}
| [] | [] | [] | [] | [] | go | null | null | null |
generate_thumbnails.py | import os
import tempfile
from pathlib import Path
from PIL import Image
from google.cloud import storage
from retrying import retry
THUMBNAIL_SIZE = int(os.getenv('THUMBNAIL_SIZE', '128'))
THUMBNAIL_MAX_DIM = THUMBNAIL_SIZE, THUMBNAIL_SIZE
THUMBNAIL_SUFFIX = f'_thumb{THUMBNAIL_SIZE}'
SUPPORTED_FILE_EXTENSIONS = {'jpg', 'jpeg', 'png'}
def receive_event(event, context):
"""Triggered by a change to a Cloud Storage bucket.
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
file_extension = Path(event['name']).suffix.lstrip('.')
if not is_event_supported_image(event, file_extension):
return
print(event, context)
bucket = storage.Client().get_bucket(event['bucket'])
with tempfile.NamedTemporaryFile() as temp_image_file, tempfile.NamedTemporaryFile() as temp_thumb_file:
get_image_file(event, temp_image_file, bucket)
image_format = generate_and_save_thumbnail(temp_image_file.name, temp_thumb_file.name)
upload_thumbnail_to_bucket(bucket, temp_thumb_file, get_thumbnail_name(event['name'], file_extension),
image_format)
def is_file_extension_supported(file_extension):
return file_extension.lower() in SUPPORTED_FILE_EXTENSIONS
def is_event_supported_image(event, file_extension):
return (event['contentType'].startswith('image') and
THUMBNAIL_SUFFIX not in event['name'] and
is_file_extension_supported(file_extension))
def get_thumbnail_name(image_name, file_extension):
return f'{Path(image_name).stem}{THUMBNAIL_SUFFIX}.{file_extension}'
def generate_and_save_thumbnail(image_file_name, thumbnail_file_name):
image = Image.open(image_file_name)
image_format = image.format
image.thumbnail(THUMBNAIL_MAX_DIM, Image.ANTIALIAS)
image.save(thumbnail_file_name, format=image_format)
return image_format
@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000)
def upload_thumbnail_to_bucket(bucket, temp_thumb_file, thumbnail_filename, image_format):
bucket.blob(thumbnail_filename).upload_from_filename(temp_thumb_file.name,
content_type=f'image/{image_format.lower()}')
@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000)
def get_image_file(event, destination_file, bucket):
blob = bucket.get_blob(event['name'])
blob.download_to_file(destination_file)
| [] | [] | [
"THUMBNAIL_SIZE"
] | [] | ["THUMBNAIL_SIZE"] | python | 1 | 0 | |
run.py | # /usr/bin/env python3
"""Benchmark of handling PDB files comparing multiple libraries."""
import argparse
import glob
import os
import re
import subprocess
import sys
from pathlib import Path
def gather_libs(selected_libs):
libs = []
for path in sorted(glob.iglob("bench/*")):
lib = os.path.basename(path)
if not os.path.isdir(path) or (selected_libs and lib not in selected_libs):
continue
libs.append(lib)
return libs
def gather_tests(libs, selected_tests):
tests = []
for lib in libs:
for filepath in sorted(glob.iglob(os.path.join("bench", lib, "*"))):
test, _ = os.path.splitext(os.path.basename(filepath))
if test in tests or (selected_tests and test not in selected_tests):
continue
tests.append(test)
return tests
def parse_args(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-t", "--tests", help="Test names to run.")
parser.add_argument("-l", "--libraries", help="Library names to test.")
opts = parser.parse_args()
if opts.tests:
opts.tests = opts.tests.split(",")
if opts.libraries:
opts.libraries = opts.libraries.split(",")
return vars(opts)
def run_test(filepath, pdbfile, repeats=10):
*_, dirname, filename = Path(filepath).parts
basename, _ = os.path.splitext(filename)
pdbid, _ = os.path.splitext(os.path.basename(pdbfile))
print(format(f"{dirname}/{basename}/{pdbid}", "<40"), end="", flush=True)
if "schrodinger" in filepath:
cmd = [
os.path.join(os.environ["SCHRODINGER"], "run"),
filepath,
pdbfile,
str(repeats),
]
elif filepath.endswith(".py"):
cmd = ["python3", filepath, pdbfile, str(repeats)]
elif filepath.endswith(".cr"):
cmd = ["crystal", "run", "--release", filepath, "--", pdbfile, str(repeats)]
elif filepath.endswith(".tcl"):
cmd = [
"vmd",
"-dispdev",
"none",
"-e",
filepath,
"-args",
pdbfile,
str(repeats),
]
try:
output = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
output = output.decode(sys.stdout.encoding).strip()
try:
elapsed = float(output)
except ValueError:
elapsed = float(re.findall(r"elapsed *= *([\d\.e\-]+)", output)[0])
print(format(elapsed, ".6f"))
except subprocess.CalledProcessError:
print("failed")
opts = parse_args(sys.argv[1:])
libs = gather_libs(opts["libraries"])
tests = gather_tests(libs, opts["tests"])
pdbs = list(map(os.path.abspath, glob.glob("data/*.pdb")))
for test in tests:
for pdbfile in pdbs if test.startswith("parse") else ["data/1ake.pdb"]:
for lib in libs:
paths = glob.glob(f"bench/{lib}/{test}.*")
if not paths:
continue
run_test(paths[0], pdbfile, repeats=10 if "1htq" not in pdbfile else 3)
print("")
| [] | [] | [
"SCHRODINGER"
] | [] | ["SCHRODINGER"] | python | 1 | 0 | |
server_scripts/odb.py | #!/usr/bin/env python
import sys
import traceback
import argparse
import os
from subprocess import Popen, PIPE, call
#
# This script copies new Midas datafiles into a CDMS series-number structure
#
def odb(odbcmd, odbkey, val):
#d = os.environ.copy()
#d['MIDASSYS'] = '/home1/aroberts/cdms_midas.git'
#d['MIDAS_EXPTAB'] = 'home1/aroberts/test2_expt/online/exptab'
#d['MIDAS_ONLINE'] = 'home1/aroberts/test2_expt/online'
#print d
if odbcmd == "get":
command = "odbedit -c 'ls -l \""+ odbkey +"\"'"
print command
elif odbcmd == "set":
command = "odbedit -c 'set \"" + odbkey + "\" \"" + val + "\"'"
elif odbcmd == "test":
command = "odbedit -c 'ls -v \""+ odbkey +"\"'"
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)#, env=d)
stdout, stderr = p.communicate()
print stderr
return stdout
#the stuff below is so this functionality can be used as a script
########################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('cmd', choices=['get', 'set', 'test'])
parser.add_argument('key')
parser.add_argument('val', nargs='*')
args = parser.parse_args()
print args
print odb(args.cmd, args.key, args.val)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
sm_project/asgi.py | """
ASGI config for sm_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sm_project.settings')
application = get_asgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
venv/lib/python3.8/site-packages/pyls/plugins/flake8_lint.py | /home/runner/.cache/pip/pool/e9/21/a8/ddaeea548c916834422e78d57a0db4b7d09a1722dc1ac04eac30a6fdbd | [] | [] | [] | [] | [] | python | null | null | null |
test/unit/test_s3_utils.py | # Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
from mock import MagicMock, patch
from sagemaker_tensorflow_container import s3_utils
BUCKET_REGION = 'us-west-2'
JOB_REGION = 'us-west-1'
JOB_BUKCET = 'sagemaker-us-west-2-000-00-1'
PREFIX = 'sagemaker/something'
MODEL_DIR = 's3://{}/{}'.format(JOB_BUKCET, PREFIX)
@patch('boto3.client')
def test_configure(client):
s3 = MagicMock()
client.return_value = s3
loc = {'LocationConstraint': BUCKET_REGION}
s3.get_bucket_location.return_value = loc
s3_utils.configure(MODEL_DIR, JOB_REGION)
assert os.environ['S3_REGION'] == BUCKET_REGION
assert os.environ['TF_CPP_MIN_LOG_LEVEL'] == '1'
assert os.environ['S3_USE_HTTPS'] == '1'
def test_configure_local_dir():
s3_utils.configure('/opt/ml/model', JOB_REGION)
assert os.environ['S3_REGION'] == JOB_REGION
assert os.environ['TF_CPP_MIN_LOG_LEVEL'] == '1'
assert os.environ['S3_USE_HTTPS'] == '1'
| [] | [] | [
"S3_USE_HTTPS",
"S3_REGION",
"TF_CPP_MIN_LOG_LEVEL"
] | [] | ["S3_USE_HTTPS", "S3_REGION", "TF_CPP_MIN_LOG_LEVEL"] | python | 3 | 0 | |
pkg/gds/secret_test.go | package gds_test
import (
"context"
"crypto/x509"
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/require"
"github.com/trisacrypto/directory/pkg/gds"
"github.com/trisacrypto/directory/pkg/gds/config"
"github.com/trisacrypto/trisa/pkg/trust"
)
// Six environment variables are required to run this test:
// $GDS_TEST_GOOGLE_SECRETS
// Required; can be included when the tests are run, e.g.
// GDS_TEST_GOOGLE_SECRETS=1 go test ./pkg/gds/secret_test.go
// $GOOGLE_APPLICATION_CREDENTIALS
// Required; must be a valid google secret manager service
// credentials JSON (absolute path)
// $GOOGLE_PROJECT_NAME
// Required; must be a valid google secret manager project name
// $TRISA_TEST_PASSWORD
// Required; must be a valid pkcs12password that matches the TRISA_TEST_CERT
// $TRISA_TEST_CERT
// Required; must be a valid TRISA certificate that matches the TRISA_TEST_PASSWORD
// $TRISA_TEST_FILE
// Required; absolute path for an intermediate tempfile to write the retrieved cert
// TODO: Temp file write & delete can be removed once trust serializer can unzip raw bytes, see:
// https://github.com/trisacrypto/trisa/issues/51
// Note: tests execute against live secret manager API, so use caution!
func TestSecrets(t *testing.T) {
if os.Getenv("GDS_TEST_GOOGLE_SECRETS") == "" {
t.Skip("skip Google SecretManager API connection test")
}
testConf := config.SecretsConfig{
Credentials: os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"),
Project: os.Getenv("GOOGLE_PROJECT_NAME"),
}
// Prep test fixtures
testRequestId := "123"
testSecretType := "testCert"
testContext := context.Background()
tempFile := os.Getenv("TRISA_TEST_FILE")
testPassword := os.Getenv("TRISA_TEST_PASSWORD")
testPayload, err := ioutil.ReadFile(os.Getenv("TRISA_TEST_CERT"))
require.NoError(t, err)
// Create secret manager
sm, err := gds.NewSecretManager(testConf)
require.NoError(t, err)
// Create a secret for the testSecretType
require.NoError(t, sm.With(testRequestId).CreateSecret(testContext, testSecretType))
// Create a version to hold the testPayload
require.NoError(t, sm.With(testRequestId).AddSecretVersion(testContext, testSecretType, testPayload))
// Retrieve the testPayload
testResult, err := sm.With(testRequestId).GetLatestVersion(testContext, testSecretType)
require.Equal(t, testResult, testPayload)
require.NoError(t, err)
// Create a serializer to read in the retrieved payload
var archive *trust.Serializer
archive, err = trust.NewSerializer(true, testPassword, trust.CompressionZIP)
require.NoError(t, err)
// Write the cert to a temp file
require.NoError(t, ioutil.WriteFile(tempFile, testResult, 0777))
// Create provider to read in the bytes of the zipfile
var provider *trust.Provider
provider, err = archive.ReadFile(tempFile)
require.NoError(t, err)
// Delete the temporary file now that we're done with it
require.NoError(t, os.Remove(tempFile))
// Verify that the leaves of the retrieved cert can be extracted
var cert *x509.Certificate
cert, err = provider.GetLeafCertificate()
require.NoError(t, err)
require.NotNil(t, cert)
// Delete the secret
require.NoError(t, sm.With(testRequestId).DeleteSecret(testContext, testSecretType))
}
| [
"\"GDS_TEST_GOOGLE_SECRETS\"",
"\"GOOGLE_APPLICATION_CREDENTIALS\"",
"\"GOOGLE_PROJECT_NAME\"",
"\"TRISA_TEST_FILE\"",
"\"TRISA_TEST_PASSWORD\"",
"\"TRISA_TEST_CERT\""
] | [] | [
"GOOGLE_PROJECT_NAME",
"GOOGLE_APPLICATION_CREDENTIALS",
"GDS_TEST_GOOGLE_SECRETS",
"TRISA_TEST_PASSWORD",
"TRISA_TEST_CERT",
"TRISA_TEST_FILE"
] | [] | ["GOOGLE_PROJECT_NAME", "GOOGLE_APPLICATION_CREDENTIALS", "GDS_TEST_GOOGLE_SECRETS", "TRISA_TEST_PASSWORD", "TRISA_TEST_CERT", "TRISA_TEST_FILE"] | go | 6 | 0 | |
4.30/code/three_nodes_bw.py | #!/usr/bin/python
import os
import sys
import glob
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.link import TCLink
from mininet.cli import CLI
script_deps = [ 'ethtool' ]
def check_scripts():
dir = os.path.abspath(os.path.dirname(sys.argv[0]))
for fname in glob.glob(dir + '/' + 'scripts/*.sh'):
if not os.access(fname, os.X_OK):
print '%s should be set executable by using `chmod +x $script_name`' % (fname)
sys.exit(1)
for program in script_deps:
found = False
for path in os.environ['PATH'].split(os.pathsep):
exe_file = os.path.join(path, program)
if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
found = True
break
if not found:
print '`%s` is required but missing, which could be installed via `apt` or `aptitude`' % (program)
sys.exit(2)
# Mininet will assign an IP address for each interface of a node
# automatically, but hub or switch does not need IP address.
def clearIP(n):
for iface in n.intfList():
n.cmd('ifconfig %s 0.0.0.0' % (iface))
class BroadcastTopo(Topo):
def build(self):
h1 = self.addHost('h1')
h2 = self.addHost('h2')
h3 = self.addHost('h3')
s1 = self.addHost('s1')
self.addLink(h1, s1, bw=20)
self.addLink(h2, s1, bw=10)
self.addLink(h3, s1, bw=10)
if __name__ == '__main__':
check_scripts()
topo = BroadcastTopo()
net = Mininet(topo = topo, link = TCLink, controller = None)
h1, h2, h3, s1 = net.get('h1', 'h2', 'h3', 's1')
h1.cmd('ifconfig h1-eth0 10.0.0.1/8')
h2.cmd('ifconfig h2-eth0 10.0.0.2/8')
h3.cmd('ifconfig h3-eth0 10.0.0.3/8')
clearIP(s1)
for h in [ h1, h2, h3, s1 ]:
h.cmd('./scripts/disable_offloading.sh')
h.cmd('./scripts/disable_ipv6.sh')
net.start()
# s1.cmd('./switch-reference &')
# h2.cmd('iperf -s &')
# h3.cmd('iperf -s &')
CLI(net)
net.stop()
| [] | [] | [
"PATH"
] | [] | ["PATH"] | python | 1 | 0 | |
neutronclient/tests/unit/test_shell.py | # Copyright (C) 2013 Yahoo! Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import logging
import os
import re
import sys
import fixtures
from keystoneauth1 import session
import mock
import six
import testtools
from testtools import matchers
from neutronclient.common import clientmanager
from neutronclient.neutron.v2_0 import network
from neutronclient import shell as openstack_shell
DEFAULT_USERNAME = 'username'
DEFAULT_PASSWORD = 'password'
DEFAULT_TENANT_ID = 'tenant_id'
DEFAULT_TENANT_NAME = 'tenant_name'
DEFAULT_AUTH_URL = 'http://127.0.0.1:5000/v2.0/'
DEFAULT_TOKEN = '3bcc3d3a03f44e3d8377f9247b0ad155'
DEFAULT_URL = 'http://quantum.example.org:9696/'
DEFAULT_REGION = 'regionOne'
DEFAULT_ENDPOINT_TYPE = 'public'
DEFAULT_API_VERSION = '2.0'
DEFAULT_SERVICE_TYPE = 'network'
DEFAULT_SERVICE_NAME = 'neutron'
DEFAULT_RETRIES = 3
DEFAULT_TIMEOUT = 3.0
class ShellTest(testtools.TestCase):
FAKE_ENV = {
'OS_USERNAME': DEFAULT_USERNAME,
'OS_PASSWORD': DEFAULT_PASSWORD,
'OS_TENANT_ID': DEFAULT_TENANT_ID,
'OS_TENANT_NAME': DEFAULT_TENANT_NAME,
'OS_AUTH_URL': DEFAULT_AUTH_URL,
'OS_REGION_NAME': None,
'HTTP_PROXY': None,
'http_proxy': None,
}
# Patch os.environ to avoid required auth info.
def setUp(self):
super(ShellTest, self).setUp()
for var in self.FAKE_ENV:
self.useFixture(
fixtures.EnvironmentVariable(
var, self.FAKE_ENV[var]))
def shell(self, argstr, check=False, expected_val=0):
# expected_val is the expected return value after executing
# the command in NeutronShell
orig = (sys.stdout, sys.stderr)
clean_env = {}
_old_env, os.environ = os.environ, clean_env.copy()
try:
sys.stdout = six.moves.cStringIO()
sys.stderr = six.moves.cStringIO()
_shell = openstack_shell.NeutronShell('2.0')
_shell.run(argstr.split())
except SystemExit:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(expected_val, exc_value.code)
finally:
stdout = sys.stdout.getvalue()
stderr = sys.stderr.getvalue()
sys.stdout.close()
sys.stderr.close()
sys.stdout, sys.stderr = orig
os.environ = _old_env
return stdout, stderr
def test_run_unknown_command(self):
self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
stdout, stderr = self.shell('fake', check=True)
self.assertFalse(stdout)
self.assertIn("Unknown command ['fake']", stderr.strip())
def test_help(self):
required = 'usage:'
help_text, stderr = self.shell('help')
self.assertThat(
help_text,
matchers.MatchesRegex(required))
def test_bash_completion(self):
required = '.*os_user_domain_id.*'
bash_completion, stderr = self.shell('bash-completion')
self.assertThat(
bash_completion,
matchers.MatchesRegex(required))
def test_help_on_subcommand(self):
required = [
'.*?^usage: .* quota-list']
stdout, stderr = self.shell('help quota-list')
for r in required:
self.assertThat(
stdout,
matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE))
def test_help_command(self):
required = 'usage:'
help_text, stderr = self.shell('help network-create')
self.assertThat(
help_text,
matchers.MatchesRegex(required))
def test_bash_completion_in_outputs_of_help_command(self):
help_text, stderr = self.shell('help')
completion_cmd = "bash-completion"
completion_help_str = ("Prints all of the commands and options "
"for bash-completion.")
self.assertIn(completion_cmd, help_text)
self.assertIn(completion_help_str, help_text)
def test_bash_completion_command(self):
# just check we have some output
required = [
'.*--tenant_id',
'.*help',
'.*--dns-nameserver']
help_text, stderr = self.shell('neutron bash-completion')
for r in required:
self.assertThat(help_text,
matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE))
def test_build_option_parser(self):
neutron_shell = openstack_shell.NeutronShell('2.0')
result = neutron_shell.build_option_parser('descr', '2.0')
self.assertIsInstance(result, argparse.ArgumentParser)
@mock.patch.object(openstack_shell.NeutronShell, 'run')
def test_main_with_unicode(self, fake_shell):
unicode_text = u'\u7f51\u7edc'
argv = ['net-list', unicode_text, unicode_text]
fake_shell.return_value = 0
ret = openstack_shell.main(argv=argv)
fake_shell.assert_called_once_with([u'net-list', unicode_text,
unicode_text])
self.assertEqual(0, ret)
def test_endpoint_option(self):
shell = openstack_shell.NeutronShell('2.0')
parser = shell.build_option_parser('descr', '2.0')
# Neither $OS_ENDPOINT_TYPE nor --os-endpoint-type
namespace = parser.parse_args([])
self.assertEqual('public', namespace.os_endpoint_type)
# --endpoint-type but not $OS_ENDPOINT_TYPE
namespace = parser.parse_args(['--os-endpoint-type=admin'])
self.assertEqual('admin', namespace.os_endpoint_type)
def test_endpoint_environment_variable(self):
fixture = fixtures.EnvironmentVariable("OS_ENDPOINT_TYPE",
"public")
self.useFixture(fixture)
shell = openstack_shell.NeutronShell('2.0')
parser = shell.build_option_parser('descr', '2.0')
# $OS_ENDPOINT_TYPE but not --endpoint-type
namespace = parser.parse_args([])
self.assertEqual("public", namespace.os_endpoint_type)
# --endpoint-type and $OS_ENDPOINT_TYPE
namespace = parser.parse_args(['--endpoint-type=admin'])
self.assertEqual('admin', namespace.endpoint_type)
def test_timeout_option(self):
shell = openstack_shell.NeutronShell('2.0')
parser = shell.build_option_parser('descr', '2.0')
# Neither $OS_ENDPOINT_TYPE nor --endpoint-type
namespace = parser.parse_args([])
self.assertIsNone(namespace.http_timeout)
# --endpoint-type but not $OS_ENDPOINT_TYPE
namespace = parser.parse_args(['--http-timeout=50'])
self.assertEqual(50, namespace.http_timeout)
def test_timeout_environment_variable(self):
fixture = fixtures.EnvironmentVariable("OS_NETWORK_TIMEOUT",
"50")
self.useFixture(fixture)
shell = openstack_shell.NeutronShell('2.0')
parser = shell.build_option_parser('descr', '2.0')
namespace = parser.parse_args([])
self.assertEqual(50, namespace.http_timeout)
def test_run_incomplete_command(self):
self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
cmd = (
'--os-username test --os-password test --os-project-id test '
'--os-auth-strategy keystone --os-auth-url '
'%s port-create' %
DEFAULT_AUTH_URL)
stdout, stderr = self.shell(cmd, check=True, expected_val=2)
search_str = "Try 'neutron help port-create' for more information"
self.assertTrue(any(search_str in string for string
in stderr.split('\n')))
def _test_authenticate_user(self, expect_verify, expect_insecure,
**options):
base_options = {'os_cloud': None,
'http_timeout': DEFAULT_TIMEOUT,
'region_name': DEFAULT_REGION,
'network_service_name': DEFAULT_SERVICE_NAME,
'neutron_service_type': DEFAULT_SERVICE_TYPE}
options.update(base_options)
if options.get('os_token'):
options.update({'auth_type': 'token'})
options.update({'os_token': 'token', 'os_url': 'url'})
else:
options.update({'os_token': None, 'os_url': None})
with mock.patch.object(openstack_shell.NeutronShell,
'run_subcommand'), \
mock.patch.object(session, 'Session') as session_mock, \
mock.patch.object(clientmanager, 'ClientManager') as cmgr_mock:
shell = openstack_shell.NeutronShell(DEFAULT_API_VERSION)
shell.options = mock.Mock(spec=options.keys())
for k, v in options.items():
setattr(shell.options, k, v)
shell.options.os_endpoint_type = DEFAULT_ENDPOINT_TYPE
shell.options.retries = DEFAULT_RETRIES
if not (options.get('os_token') and options.get('os_url')):
auth = mock.ANY
auth_session = mock.sentinel.session
session_mock.return_value = auth_session
else:
auth = None
auth_session = None
shell.authenticate_user()
if not (options.get('os_token') and options.get('os_url')):
session_mock.assert_called_once_with(
auth=mock.ANY, verify=expect_verify,
cert=options.get('cert'),
timeout=DEFAULT_TIMEOUT)
else:
self.assertFalse(session_mock.called)
cmgr_mock.assert_called_once_with(
retries=DEFAULT_RETRIES,
raise_errors=False,
session=auth_session,
url=options.get('os_url'),
token=options.get('os_token'),
region_name=DEFAULT_REGION,
api_version=DEFAULT_API_VERSION,
service_type=DEFAULT_SERVICE_TYPE,
service_name=DEFAULT_SERVICE_NAME,
endpoint_type=DEFAULT_ENDPOINT_TYPE,
auth=auth,
insecure=expect_insecure,
log_credentials=True)
def test_authenticate_secure_with_cacert_with_cert(self):
self._test_authenticate_user(
insecure=False, cacert='cacert', cert='cert',
expect_verify='cacert', expect_insecure=False)
def test_authenticate_secure_with_cacert_with_cert_with_token(self):
self._test_authenticate_user(
os_token='token',
insecure=False, cacert='cacert', cert='cert',
expect_verify='cacert', expect_insecure=False)
def test_authenticate_insecure_with_cacert_with_cert(self):
self._test_authenticate_user(
insecure=True, cacert='cacert', cert='cert',
expect_verify=False, expect_insecure=True)
def test_authenticate_insecure_with_cacert_with_cert_with_token(self):
self._test_authenticate_user(
os_token='token',
insecure=True, cacert='cacert', cert='cert',
expect_verify=False, expect_insecure=True)
def test_authenticate_secure_without_cacert_with_cert(self):
self._test_authenticate_user(
insecure=False, cert='cert',
expect_verify=True, expect_insecure=False)
def test_authenticate_secure_without_cacert_with_cert_with_token(self):
self._test_authenticate_user(
os_token='token',
insecure=False, cert='cert',
expect_verify=True, expect_insecure=False)
def test_authenticate_insecure_without_cacert_with_cert(self):
self._test_authenticate_user(
insecure=True, cert='cert',
expect_verify=False, expect_insecure=True)
def test_authenticate_insecure_without_cacert_with_cert_with_token(self):
self._test_authenticate_user(
os_token='token',
insecure=True, cert='cert',
expect_verify=False, expect_insecure=True)
def test_authenticate_secure_with_cacert_without_cert(self):
self._test_authenticate_user(
insecure=False, cacert='cacert',
expect_verify='cacert', expect_insecure=False)
def test_authenticate_secure_with_cacert_without_cert_with_token(self):
self._test_authenticate_user(
os_token='token',
insecure=False, cacert='cacert',
expect_verify='cacert', expect_insecure=False)
def test_authenticate_insecure_with_cacert_without_cert(self):
self._test_authenticate_user(
insecure=True, cacert='cacert',
expect_verify=False, expect_insecure=True)
def test_authenticate_insecure_with_cacert_without_cert_with_token(self):
self._test_authenticate_user(
os_token='token',
insecure=True, cacert='cacert',
expect_verify=False, expect_insecure=True)
def test_commands_dict_populated(self):
# neutron.shell.COMMANDS is populated once NeutronShell is initialized.
# To check COMMANDS during NeutronShell initialization,
# reset COMMANDS to some dummy value before calling NeutronShell().
self.useFixture(fixtures.MockPatchObject(openstack_shell,
'COMMANDS', None))
openstack_shell.NeutronShell('2.0')
self.assertDictContainsSubset(
{'net-create': network.CreateNetwork,
'net-delete': network.DeleteNetwork,
'net-list': network.ListNetwork,
'net-show': network.ShowNetwork,
'net-update': network.UpdateNetwork},
openstack_shell.COMMANDS['2.0'])
| [] | [] | [] | [] | [] | python | 0 | 0 | |
server.py | from flask import Flask, request
import json
import cloudinary
import cloudinary.uploader
import string
import random
import luckyface
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
# cloudinary.config(
# cloud_name=os.environ['CLOUDINARY_CLOUD_NAME'],
# api_key=os.environ['CLOUDINARY_API_KEY'],
# api_secret=os.environ['CLOUDINARY_API_SECRET']
# )
def write_cache(father, mother, image_url, filename):
filename = 'combinations.json'
key = "%s and %s" % (father, mother)
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
try:
data[key]
except KeyError:
data[key] = {
'url': image_url,
'filename': filename
}
with open(filename, 'w') as jsonFile:
json.dump(data, jsonFile)
def get_cached_url(father, mother):
filename = 'combinations.json'
key = "%s and %s" % (father, mother)
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
try:
dictionary = data[key]
except:
return None
try:
url = dictionary['url']
except:
return None
return url
def bad():
return str({
'status': 'error'
}), 400
def good(url):
return str(url), 200
def generate_child(father, mother):
filename = 'imgz' + ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20)) + '.jpg'
luckyface.make_baby(father, mother, filename)
return filename
def upload_image(filename):
try:
url = cloudinary.uploader.upload(filename)['url']
return url
except Exception as e:
return None
@app.route('/get-child')
def get_child():
try:
father, mother = request.args.get('father'), request.args.get('mother')
except:
return bad()
if not mother or not father:
return bad()
url = get_cached_url(father, mother)
if url:
return good(url)
filename = generate_child(father, mother)
url = upload_image(filename)
if url:
write_cache(father, mother, url, filename)
print(url)
return good(url)
return bad()
| [] | [] | [
"CLOUDINARY_API_KEY",
"CLOUDINARY_API_SECRET",
"CLOUDINARY_CLOUD_NAME"
] | [] | ["CLOUDINARY_API_KEY", "CLOUDINARY_API_SECRET", "CLOUDINARY_CLOUD_NAME"] | python | 3 | 0 | |
loadgen/generate_load.py | import barnum, random, time, json, requests, math, os
from mysql.connector import connect, Error
from kafka import KafkaProducer
# CONFIG
userSeedCount = 10000
itemSeedCount = 1000
purchaseGenCount = 500000
purchaseGenEveryMS = 100
pageviewMultiplier = 75 # Translates to 75x purchases, currently 750/sec or 65M/day
itemInventoryMin = 1000
itemInventoryMax = 5000
itemPriceMin = 5
itemPriceMax = 500
mysqlHost = 'mysql'
mysqlPort = '3306'
mysqlUser = 'root'
mysqlPass = 'debezium'
kafkaHostPort = os.getenv('KAFKA_ADDR', 'kafka:9092')
kafkaTopic = 'pageviews'
debeziumHostPort = 'debezium:8083'
channels = ['organic search', 'paid search', 'referral', 'social', 'display']
categories = ['widgets', 'gadgets', 'doodads', 'clearance']
# INSERT TEMPLATES
item_insert = "INSERT INTO shop.items (name, category, price, inventory) VALUES ( %s, %s, %s, %s )"
user_insert = "INSERT INTO shop.users (email, is_vip) VALUES ( %s, %s )"
purchase_insert = "INSERT INTO shop.purchases (user_id, item_id, quantity, purchase_price) VALUES ( %s, %s, %s, %s )"
#Initialize Debezium (Kafka Connect Component)
requests.post(('http://%s/connectors' % debeziumHostPort),
json={
"name": "mysql-connector",
"config": {
"connector.class": "io.debezium.connector.mysql.MySqlConnector",
"database.hostname": mysqlHost,
"database.port": mysqlPort,
"database.user": mysqlUser,
"database.password": mysqlPass,
"database.server.name": mysqlHost,
"database.server.id": '1234',
"database.history.kafka.bootstrap.servers": kafkaHostPort,
"database.history.kafka.topic": "mysql-history",
"time.precision.mode": "connect"
}
}
)
#Initialize Kafka
producer = KafkaProducer(bootstrap_servers=[kafkaHostPort],
value_serializer=lambda x:
json.dumps(x).encode('utf-8'))
def generatePageview(viewer_id, target_id, page_type):
return {
"user_id": viewer_id,
"url": f'/{page_type}/{target_id}',
"channel": random.choice(channels),
"received_at": int(time.time())
}
try:
with connect(
host=mysqlHost,
user=mysqlUser,
password=mysqlPass,
) as connection:
with connection.cursor() as cursor:
print("Initializing shop database...")
cursor.execute('CREATE DATABASE IF NOT EXISTS shop;')
cursor.execute(
"""CREATE TABLE IF NOT EXISTS shop.users
(
id SERIAL PRIMARY KEY,
email VARCHAR(255),
is_vip BOOLEAN DEFAULT FALSE,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
);"""
)
cursor.execute(
"""CREATE TABLE IF NOT EXISTS shop.items
(
id SERIAL PRIMARY KEY,
name VARCHAR(100),
category VARCHAR(100),
price DECIMAL(7,2),
inventory INT,
inventory_updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
);"""
)
cursor.execute(
"""CREATE TABLE IF NOT EXISTS shop.purchases
(
id SERIAL PRIMARY KEY,
user_id BIGINT UNSIGNED REFERENCES user(id),
item_id BIGINT UNSIGNED REFERENCES item(id),
status TINYINT UNSIGNED DEFAULT 1,
quantity INT UNSIGNED DEFAULT 1,
purchase_price DECIMAL(12,2),
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
);"""
)
connection.commit()
print("Seeding data...")
cursor.executemany(
item_insert,
[
(
barnum.create_nouns(),
random.choice(categories),
random.randint(itemPriceMin*100,itemPriceMax*100)/100,
random.randint(itemInventoryMin,itemInventoryMax)
) for i in range(itemSeedCount)
]
)
cursor.executemany(
user_insert,
[
(
barnum.create_email(),
(random.randint(0,10) > 8)
) for i in range(userSeedCount)
]
)
connection.commit()
print("Getting item ID and PRICEs...")
cursor.execute("SELECT id, price FROM shop.items")
item_prices = [(row[0], row[1]) for row in cursor]
print("Preparing to loop + seed kafka pageviews and purchases")
for i in range(purchaseGenCount):
# Get a user and item to purchase
purchase_item = random.choice(item_prices)
purchase_user = random.randint(0,userSeedCount-1)
purchase_quantity = random.randint(1,5)
# Write purchaser pageview
producer.send(kafkaTopic, key=str(purchase_user).encode('ascii'), value=generatePageview(purchase_user, purchase_item[0], 'products'))
# Write random pageviews to products or profiles
pageviewOscillator = int(pageviewMultiplier + (math.sin(time.time()/1000)*50))
for i in range(pageviewOscillator):
rand_user = random.randint(0,userSeedCount)
rand_page_type = random.choice(['products', 'profiles'])
target_id_max_range = itemSeedCount if rand_page_type == 'products' else userSeedCount
producer.send(kafkaTopic, key=str(rand_user).encode('ascii'), value=generatePageview(rand_user, random.randint(0,target_id_max_range), rand_page_type))
# Write purchase row
cursor.execute(
purchase_insert,
(
purchase_user,
purchase_item[0],
purchase_quantity,
purchase_item[1] * purchase_quantity
)
)
connection.commit()
#Pause
time.sleep(purchaseGenEveryMS/1000)
connection.close()
except Error as e:
print(e)
| [] | [] | [
"KAFKA_ADDR"
] | [] | ["KAFKA_ADDR"] | python | 1 | 0 | |
server/src/test/java/umm3601/note/NoteControllerSpec.java | package umm3601.note;
import static com.mongodb.client.model.Filters.eq;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.collect.ImmutableMap;
import com.mockrunner.mock.web.MockHttpServletRequest;
import com.mockrunner.mock.web.MockHttpServletResponse;
import com.mongodb.BasicDBObject;
import com.mongodb.MongoClientSettings;
import com.mongodb.ServerAddress;
import com.mongodb.client.MongoClient;
import com.mongodb.client.MongoClients;
import com.mongodb.client.MongoCollection;
import com.mongodb.client.MongoDatabase;
import org.bson.Document;
import org.bson.types.ObjectId;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.mockito.Spy;
import io.javalin.http.BadRequestResponse;
import io.javalin.http.ConflictResponse;
import io.javalin.http.Context;
import io.javalin.http.NotFoundResponse;
import io.javalin.http.util.ContextUtil;
import io.javalin.plugin.json.JavalinJson;
import umm3601.UnprocessableResponse;
public class NoteControllerSpec {
MockHttpServletRequest mockReq = new MockHttpServletRequest();
MockHttpServletResponse mockRes = new MockHttpServletResponse();
@Mock(name = "dt")
DeathTimer dtMock;
private ObjectId samsNoteId;
static MongoClient mongoClient;
@Spy
static MongoDatabase db;
// I'll be honest this is some real bullshit to make myself able to inject
// dtMock.
@InjectMocks
NoteController noteController;
static ObjectMapper jsonMapper = new ObjectMapper();
@BeforeAll
public static void setupAll() {
String mongoAddr = System.getenv().getOrDefault("MONGO_ADDR", "localhost");
mongoClient = MongoClients.create(MongoClientSettings.builder()
.applyToClusterSettings(builder -> builder.hosts(Arrays.asList(new ServerAddress(mongoAddr)))).build());
db = mongoClient.getDatabase("test");
}
@BeforeEach
public void setupEach() throws IOException {
MockitoAnnotations.initMocks(this);
// Reset our mock objects
mockReq.resetAll();
mockRes.resetAll();
when(dtMock.updateTimerStatus(any(Note.class))).thenReturn(true);
MongoCollection<Document> noteDocuments = db.getCollection("notes");
noteDocuments.drop();
List<Document> testNotes = new ArrayList<>();
testNotes.add(Document
.parse("{ " + "ownerID: \"owner1_ID\", " + "body: \"I am running 5 minutes late to my non-existent office\", "
+ "addDate: \"2020-03-07T22:03:38+0000\", " + "expireDate: \"2021-03-20T22:03:38+0000\", "
+ "status: \"active\"" + "}"));
testNotes.add(Document.parse("{ " + "ownerID: \"owner1_ID\", " + "body: \"I am never coming to my office again\", "
+ "addDate: \"2020-03-07T22:03:38+0000\", " + "expireDate: \"2099-03-07T22:03:38+0000\", "
+ "status: \"active\"" + "}"));
testNotes.add(Document.parse("{ " + "ownerID: \"owner2_ID\", " + "body: \"I am on sabbatical no office hours\", "
+ "addDate: \"2020-03-07T22:03:38+0000\", " + "expireDate: \"2021-03-07T22:03:38+0000\", "
+ "status: \"active\"" + "}"));
testNotes.add(Document.parse("{ " + "ownerID: \"owner2_ID\", " + "body: \"Go to owner3's office\", "
+ "addDate: \"2020-03-07T22:03:38+0000\", " + "expireDate: \"2020-03-21T22:03:38+0000\", "
+ "status: \"active\"" + "}"));
testNotes.add(Document.parse("{ " + "ownerID: \"owner3_ID\", "
+ "body: \"Not many come to my office I offer donuts\", " + "addDate: \"2020-03-07T22:03:38+0000\", "
+ "expireDate: \"2021-03-07T22:03:38+0000\", " + "status: \"active\"" + "}"));
samsNoteId = new ObjectId();
BasicDBObject sam = new BasicDBObject("_id", samsNoteId);
sam = sam.append("ownerID", "owner3_ID").append("body", "I am sam").append("addDate", "2020-03-07T22:03:38+0000")
.append("expireDate", "2100-03-07T22:03:38+0000").append("status", "active");
noteDocuments.insertMany(testNotes);
noteDocuments.insertOne(Document.parse(sam.toJson()));
noteController = new NoteController(db, dtMock);
}
@AfterAll
public static void teardown() {
db.drop();
mongoClient.close();
}
@Test
public void getAllNotesForOwner1() {
mockReq.setQueryString("ownerid=owner1_ID");
Context ctx = ContextUtil.init(mockReq, mockRes, "api/notes");
noteController.getNotesByOwner(ctx);
assertEquals(200, mockRes.getStatus());
String result = ctx.resultString();
Note[] resultNotes = JavalinJson.fromJson(result, Note[].class);
assertEquals(2, resultNotes.length);
for (Note note : resultNotes) {
assertEquals("owner1_ID", note.ownerID, "Incorrect ID");
}
}
@Test
public void addNote() throws IOException {
ArgumentCaptor<Note> noteCaptor = ArgumentCaptor.forClass(Note.class);
String testNewNote = "{ " + "\"ownerID\": \"e7fd674c72b76596c75d9f1e\", " + "\"body\": \"Test Body\", "
+ "\"addDate\": \"2020-03-07T22:03:38+0000\", " + "\"expireDate\": \"2021-03-07T22:03:38+0000\", "
+ "\"status\": \"active\" }";
mockReq.setBodyContent(testNewNote);
mockReq.setMethod("POST");
Context ctx = ContextUtil.init(mockReq, mockRes, "api/notes/new");
noteController.addNewNote(ctx);
assertEquals(201, mockRes.getStatus());
String result = ctx.resultString();
String id = jsonMapper.readValue(result, ObjectNode.class).get("id").asText();
assertNotEquals("", id);
assertEquals(1, db.getCollection("notes").countDocuments(eq("_id", new ObjectId(id))));
Document addedNote = db.getCollection("notes").find(eq("_id", new ObjectId(id))).first();
assertNotNull(addedNote);
assertEquals("e7fd674c72b76596c75d9f1e", addedNote.getString("ownerID"));
assertEquals("Test Body", addedNote.getString("body"));
assertEquals("2020-03-07T22:03:38+0000", addedNote.getString("addDate"));
assertEquals("2021-03-07T22:03:38+0000", addedNote.getString("expireDate"));
assertEquals("active", addedNote.getString("status"));
verify(dtMock).updateTimerStatus(noteCaptor.capture());
Note newNote = noteCaptor.getValue();
assertEquals(id, newNote._id);
assertEquals("e7fd674c72b76596c75d9f1e", newNote.ownerID);
assertEquals("Test Body", newNote.body);
assertEquals("2020-03-07T22:03:38+0000", newNote.addDate);
assertEquals("2021-03-07T22:03:38+0000", newNote.expireDate);
assertEquals("active", newNote.status);
}
@Test
public void editSingleField() throws IOException {
String reqBody = "{\"body\": \"I am not sam anymore\"}";
mockReq.setBodyContent(reqBody);
mockReq.setMethod("PATCH");
// Because we're partially altering an object, we make a body with just the
// alteration and use the PATCH (not PUT) method
Context ctx = ContextUtil.init(mockReq, mockRes, "api/notes/:id", ImmutableMap.of("id", samsNoteId.toHexString()));
noteController.editNote(ctx);
assertEquals(204, mockRes.getStatus());
// We don't have a good way to return just the edited object right now,
// so we return nothing in the body and show that with a 204 response.
assertEquals(1, db.getCollection("notes").countDocuments(eq("_id", samsNoteId)));
// There should still be exactly one note per id, and the id shouldn't have
// changed.
Document editedNote = db.getCollection("notes").find(eq("_id", samsNoteId)).first();
assertNotNull(editedNote);
// The note should still actually exist
assertEquals("I am not sam anymore", editedNote.getString("body"));
// The edited field should show the new value
assertEquals("owner3_ID", editedNote.getString("ownerID"));
assertEquals("active", editedNote.getString("status"));
assertEquals("2020-03-07T22:03:38+0000", editedNote.getString("addDate"));
assertEquals("2100-03-07T22:03:38+0000", editedNote.getString("expireDate"));
// all other fields should be untouched
verify(dtMock).updateTimerStatus(any(Note.class));
}
@Test
public void editMultipleFields() throws IOException {
ArgumentCaptor<Note> noteCaptor = ArgumentCaptor.forClass(Note.class);
String reqBody = "{\"body\": \"I am still sam\", \"expireDate\": \"2025-03-07T22:03:38+0000\"}";
mockReq.setBodyContent(reqBody);
mockReq.setMethod("PATCH");
Context ctx = ContextUtil.init(mockReq, mockRes, "api/notes/:id", ImmutableMap.of("id", samsNoteId.toHexString()));
noteController.editNote(ctx);
assertEquals(204, mockRes.getStatus());
assertEquals(1, db.getCollection("notes").countDocuments(eq("_id", samsNoteId)));
Document editedNote = db.getCollection("notes").find(eq("_id", samsNoteId)).first();
assertNotNull(editedNote);
assertEquals("I am still sam", editedNote.getString("body"));
assertEquals("2025-03-07T22:03:38+0000", editedNote.getString("expireDate"));
assertEquals("active", editedNote.getString("status"));
assertEquals("owner3_ID", editedNote.getString("ownerID"));
assertEquals("2020-03-07T22:03:38+0000", editedNote.getString("addDate"));
// Since the expireDate was changed, the timer's status should have been updated
verify(dtMock).updateTimerStatus(noteCaptor.capture());
Note updatedNote = noteCaptor.getValue();
assertEquals(samsNoteId.toHexString(), updatedNote._id);
assertEquals("I am still sam", updatedNote.body);
assertEquals("2025-03-07T22:03:38+0000", updatedNote.expireDate);
assertEquals("active", updatedNote.status);
assertEquals("owner3_ID", updatedNote.ownerID);
assertEquals("2020-03-07T22:03:38+0000", updatedNote.addDate);
}
@Test
public void editMissingId() throws IOException {
String reqBody = "{\"body\": \"I am not sam anymore\"}";
mockReq.setBodyContent(reqBody);
mockReq.setMethod("PATCH");
Context ctx = ContextUtil.init(mockReq, mockRes, "api/notes/:id",
ImmutableMap.of("id", "58af3a600343927e48e87335"));
assertThrows(NotFoundResponse.class, () -> {
noteController.editNote(ctx);
});
}
@Test
public void editBadId() throws IOException {
String reqBody = "{\"body\": \"I am not sam anymore\"}";
mockReq.setBodyContent(reqBody);
mockReq.setMethod("PATCH");
Context ctx = ContextUtil.init(mockReq, mockRes, "api/notes/:id",
ImmutableMap.of("id", "this garbage isn't an id!"));
assertThrows(BadRequestResponse.class, () -> {
noteController.editNote(ctx);
});
}
@Test
public void editIdWithMalformedBody() throws IOException {
mockReq.setBodyContent("This isn't parsable as a document");
mockReq.setMethod("PATCH");
Context ctx = ContextUtil.init(mockReq, mockRes, "api/notes/:id", ImmutableMap.of("id", samsNoteId.toHexString()));
assertThrows(BadRequestResponse.class, () -> {
noteController.editNote(ctx);
});
}
@Test
public void editIdWithInvalidValue() throws IOException {
mockReq.setBodyContent("{\"expireDate\": \"not actually a date\"}");
mockReq.setMethod("PATCH");
Context ctx = ContextUtil.init(mockReq, mockRes, "api/notes/:id", ImmutableMap.of("id", samsNoteId.toHexString()));
assertThrows(UnprocessableResponse.class, () -> {
noteController.editNote(ctx);
});
// HTTP 422 Unprocessable Entity: the entity could be syntactically parsed but
// was semantically garbage.
// In this case, it's because a non-date-string was attempted to be inserted
// into a location that requires
// a date string.
}
@Test
public void editIdWithBadKeys() throws IOException {
mockReq.setBodyContent("{\"badKey\": \"irrelevant value\"}");
mockReq.setMethod("PATCH");
Context ctx = ContextUtil.init(mockReq, mockRes, "api/notes/:id", ImmutableMap.of("id", samsNoteId.toHexString()));
assertThrows(ConflictResponse.class, () -> {
noteController.editNote(ctx);
});
// ConflictResponse represents a 409 error, in this case an attempt to edit a
// nonexistent field.
}
@Test
public void editIdWithIllegalKeys() throws IOException {
mockReq.setBodyContent("{\"ownerID\": \"Charlie\"}");
mockReq.setMethod("PATCH");
Context ctx = ContextUtil.init(mockReq, mockRes, "api/notes/:id", ImmutableMap.of("id", samsNoteId.toHexString()));
assertThrows(BadRequestResponse.class, () -> {
noteController.editNote(ctx);
});
}
// The 422 and 409 errors could be switched between these conditions, or they
// could possibly both be 409?
// Additionally, should attempting to edit a non-editable field (id, ownerID, or
// addDate) throw a 422, 409, 400, or 403?
@Test
public void AddNoteWithoutExpiration() throws IOException {
String testNewNote = "{ " + "\"ownerID\": \"e7fd674c72b76596c75d9f1e\", " + "\"body\": \"Test Body\", "
+ "\"addDate\": \"2020-03-07T22:03:38+0000\", " + "\"status\": \"active\" }";
mockReq.setBodyContent(testNewNote);
mockReq.setMethod("POST");
Context ctx = ContextUtil.init(mockReq, mockRes, "api/notes/new");
noteController.addNewNote(ctx);
assertEquals(201, mockRes.getStatus());
String result = ctx.resultString();
String id = jsonMapper.readValue(result, ObjectNode.class).get("id").asText();
assertNotEquals("", id);
assertEquals(1, db.getCollection("notes").countDocuments(eq("_id", new ObjectId(id))));
Document addedNote = db.getCollection("notes").find(eq("_id", new ObjectId(id))).first();
assertNotNull(addedNote);
assertEquals("e7fd674c72b76596c75d9f1e", addedNote.getString("ownerID"));
assertEquals("Test Body", addedNote.getString("body"));
assertEquals("2020-03-07T22:03:38+0000", addedNote.getString("addDate"));
assertNull(addedNote.getString("expireDate"));
assertEquals("active", addedNote.getString("status"));
verify(dtMock, never()).updateTimerStatus(any(Note.class));
}
@Test
public void RemoveExpirationFromNote() throws IOException {
ArgumentCaptor<Note> noteCaptor = ArgumentCaptor.forClass(Note.class);
mockReq.setBodyContent("{\"expireDate\": null}");
mockReq.setMethod("PATCH");
Context ctx = ContextUtil.init(mockReq, mockRes, "api/notes/:id", ImmutableMap.of("id", samsNoteId.toHexString()));
noteController.editNote(ctx);
assertEquals(204, mockRes.getStatus());
assertEquals(1, db.getCollection("notes").countDocuments(eq("_id", samsNoteId)));
Document editedNote = db.getCollection("notes").find(eq("_id", samsNoteId)).first();
assertNotNull(editedNote);
assertNull(editedNote.getString("expireDate"));
assertEquals("active", editedNote.getString("status"));
assertEquals("I am sam", editedNote.getString("body"));
assertEquals("owner3_ID", editedNote.getString("ownerID"));
assertEquals("2020-03-07T22:03:38+0000", editedNote.getString("addDate"));
verify(dtMock).updateTimerStatus(noteCaptor.capture());
Note updatedNote = noteCaptor.getValue();
assertEquals("active", updatedNote.status);
assertEquals("I am sam", updatedNote.body);
assertEquals("owner3_ID", updatedNote.ownerID);
assertEquals("2020-03-07T22:03:38+0000", updatedNote.addDate);
}
@Test
public void AddExpirationToNote() throws IOException {
// This is... a little ugly. And relies on something else working. But there
// isn't a great way of knowing
// the ID of another notice without an expiration date.
ArgumentCaptor<Note> noteCaptor = ArgumentCaptor.forClass(Note.class);
String testNewNote = "{ " + "\"ownerID\": \"e7fd674c72b76596c75d9f1e\", " + "\"body\": \"Test Body\", "
+ "\"addDate\": \"2020-03-07T22:03:38+0000\", " + "\"status\": \"active\" }";
mockReq.setBodyContent(testNewNote);
mockReq.setMethod("POST");
Context ctx = ContextUtil.init(mockReq, mockRes, "api/notes/new");
noteController.addNewNote(ctx);
String id = jsonMapper.readValue(ctx.resultString(), ObjectNode.class).get("id").asText();
mockRes.resetAll();
mockReq.setBodyContent("{\"expireDate\": \"2021-03-07T22:03:38+0000\"}");
mockReq.setMethod("PATCH");
ctx = ContextUtil.init(mockReq, mockRes, "api/notes/:id", ImmutableMap.of("id", new ObjectId(id).toHexString()));
noteController.editNote(ctx);
assertEquals(1, db.getCollection("notes").countDocuments(eq("_id", new ObjectId(id))));
Document addedNote = db.getCollection("notes").find(eq("_id", new ObjectId(id))).first();
assertNotNull(addedNote);
assertEquals("e7fd674c72b76596c75d9f1e", addedNote.getString("ownerID"));
assertEquals("Test Body", addedNote.getString("body"));
assertEquals("2020-03-07T22:03:38+0000", addedNote.getString("addDate"));
assertEquals("2021-03-07T22:03:38+0000", addedNote.getString("expireDate"));
assertEquals("active", addedNote.getString("status"));
verify(dtMock).updateTimerStatus(noteCaptor.capture());
Note editedNote = noteCaptor.getValue();
assertEquals(id, editedNote._id);
assertEquals("e7fd674c72b76596c75d9f1e", editedNote.ownerID);
assertEquals("Test Body", editedNote.body);
assertEquals("2020-03-07T22:03:38+0000", editedNote.addDate);
assertEquals("2021-03-07T22:03:38+0000", editedNote.expireDate);
assertEquals("active", editedNote.status);
}
@Test
public void ChangingStatusRemovesExpiration() throws IOException {
String reqBody = "{\"status\": \"draft\"}";
mockReq.setBodyContent(reqBody);
mockReq.setMethod("PATCH");
Context ctx = ContextUtil.init(mockReq, mockRes, "api/notes/:id", ImmutableMap.of("id", samsNoteId.toHexString()));
noteController.editNote(ctx);
assertEquals(204, mockRes.getStatus());
assertEquals(1, db.getCollection("notes").countDocuments(eq("_id", samsNoteId)));
Document editedNote = db.getCollection("notes").find(eq("_id", samsNoteId)).first();
assertNotNull(editedNote);
assertEquals("draft", editedNote.getString("status"));
assertNull(editedNote.getString("expireDate"));
assertEquals("I am sam", editedNote.getString("body"));
assertEquals("owner3_ID", editedNote.getString("ownerID"));
assertEquals("2020-03-07T22:03:38+0000", editedNote.getString("addDate"));
verify(dtMock).updateTimerStatus(any(Note.class));
}
@Test
public void AddNewInactiveWithExpiration() throws IOException {
String testNewNote = "{ " + "\"ownerID\": \"e7fd674c72b76596c75d9f1e\", " + "\"body\": \"Test Body\", "
+ "\"addDate\": \"2020-03-07T22:03:38+0000\", " + "\"expireDate\": \"2021-03-07T22:03:38+0000\", "
+ "\"status\": \"draft\" }";
mockReq.setBodyContent(testNewNote);
mockReq.setMethod("POST");
Context ctx = ContextUtil.init(mockReq, mockRes, "api/notes/new");
assertThrows(ConflictResponse.class, () -> {
noteController.addNewNote(ctx);
});
}
@Test
public void AddExpirationToInactive() throws IOException {
String testNewNote = "{ " + "\"ownerID\": \"e7fd674c72b76596c75d9f1e\", " + "\"body\": \"Test Body\", "
+ "\"addDate\": \"2020-03-07T22:03:38+0000\", " + "\"status\": \"template\" }";
mockReq.setBodyContent(testNewNote);
mockReq.setMethod("POST");
Context ctx = ContextUtil.init(mockReq, mockRes, "api/notes/new");
noteController.addNewNote(ctx);
String id = jsonMapper.readValue(ctx.resultString(), ObjectNode.class).get("id").asText();
mockRes.resetAll();
mockReq.setBodyContent("{\"expireDate\": \"2021-03-07T22:03:38+0000\"}");
mockReq.setMethod("PATCH");
Context ctx2 = ContextUtil.init(mockReq, mockRes, "api/notes/:id",
ImmutableMap.of("id", new ObjectId(id).toHexString()));
assertThrows(ConflictResponse.class, () -> {
noteController.editNote(ctx2);
});
}
@Test
public void AddExpirationAndDeactivate() throws IOException {
mockReq.setBodyContent("{\"expireDate\": \"2021-03-07T22:03:38+0000\", \"status\": \"draft\"}");
mockReq.setMethod("PATCH");
Context ctx = ContextUtil.init(mockReq, mockRes, "api/notes/:id", ImmutableMap.of("id", samsNoteId.toHexString()));
assertThrows(ConflictResponse.class, () -> {
noteController.editNote(ctx);
});
}
// Internal helper functions
@Test
public void FlagSinglePost() throws IOException {
noteController.flagOneForDeletion(samsNoteId.toHexString());
assertEquals("deleted", db.getCollection("notes").find(eq("_id", samsNoteId)).first().getString("status"));
verify(dtMock).updateTimerStatus(any(Note.class));
}
@Test
public void PurgeSinglePost() throws IOException {
noteController.singleDelete(samsNoteId.toHexString());
assertEquals(0, db.getCollection("notes").countDocuments(eq("_id", samsNoteId)));
verify(dtMock).clearKey(samsNoteId.toHexString());
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
libbeat/outputs/elasticsearch/client_proxy_test.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// This file contains tests to confirm that elasticsearch.Client uses proxy
// settings following the intended precedence.
package elasticsearch
import (
"bytes"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"os"
"os/exec"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/elastic/beats/v7/libbeat/common/atomic"
"github.com/elastic/beats/v7/libbeat/common/transport/httpcommon"
"github.com/elastic/beats/v7/libbeat/esleg/eslegclient"
"github.com/elastic/beats/v7/libbeat/outputs/outil"
)
// These constants are inserted into client http request headers and confirmed
// by the server listeners.
const (
headerTestField = "X-Test-Value"
headerTestValue = "client_proxy_test test value"
)
// TestClientPing is a placeholder test that does nothing on a standard run,
// but starts up a client and sends a ping when the environment variable
// TEST_START_CLIENT is set to 1 as in execClient).
func TestClientPing(t *testing.T) {
// If this is the child process, start up the client, otherwise do nothing.
if os.Getenv("TEST_START_CLIENT") == "1" {
doClientPing(t)
return
}
}
// TestBaseline makes sure we can have a client process ping the server that
// we start, with no changes to the proxy settings. (This is really a
// meta-test for the helpers that create the servers / client.)
func TestBaseline(t *testing.T) {
servers, teardown := startServers(t)
defer teardown()
// Start a bare client with no proxy settings, pointed at the main server.
execClient(t, "TEST_SERVER_URL="+servers.serverURL)
// We expect one server request and 0 proxy requests
assert.Equal(t, 1, servers.serverRequestCount())
assert.Equal(t, 0, servers.proxyRequestCount())
}
// TestClientSettingsProxy confirms that we can control the proxy of a client
// by setting its ClientSettings.Proxy value on creation. (The child process
// uses the TEST_PROXY_URL environment variable to initialize the flag.)
func TestClientSettingsProxy(t *testing.T) {
servers, teardown := startServers(t)
defer teardown()
// Start a client with ClientSettings.Proxy set to the proxy listener.
execClient(t,
"TEST_SERVER_URL="+servers.serverURL,
"TEST_PROXY_URL="+servers.proxyURL)
// We expect one proxy request and 0 server requests
assert.Equal(t, 0, servers.serverRequestCount())
assert.Equal(t, 1, servers.proxyRequestCount())
}
// TestEnvironmentProxy confirms that we can control the proxy of a client by
// setting the HTTP_PROXY environment variable (see
// https://golang.org/pkg/net/http/#ProxyFromEnvironment).
func TestEnvironmentProxy(t *testing.T) {
servers, teardown := startServers(t)
defer teardown()
// Start a client with HTTP_PROXY set to the proxy listener.
// The server is set to a nonexistent URL because ProxyFromEnvironment
// always returns a nil proxy for local destination URLs.
execClient(t,
"TEST_SERVER_URL=http://fakeurl.fake.not-real",
"HTTP_PROXY="+servers.proxyURL)
// We expect one proxy request and 0 server requests
assert.Equal(t, 0, servers.serverRequestCount())
assert.Equal(t, 1, servers.proxyRequestCount())
}
// TestClientSettingsOverrideEnvironmentProxy confirms that when both
// ClientSettings.Proxy and HTTP_PROXY are set, ClientSettings takes precedence.
func TestClientSettingsOverrideEnvironmentProxy(t *testing.T) {
servers, teardown := startServers(t)
defer teardown()
// Start a client with ClientSettings.Proxy set to the proxy listener and
// HTTP_PROXY set to the server listener. We expect that the former will
// override the latter and thus we will only see a ping to the proxy.
// As above, the fake URL is needed to ensure ProxyFromEnvironment gives a
// non-nil result.
execClient(t,
"TEST_SERVER_URL=http://fakeurl.fake.not-real",
"TEST_PROXY_URL="+servers.proxyURL,
"HTTP_PROXY="+servers.serverURL)
// We expect one proxy request and 0 server requests
assert.Equal(t, 0, servers.serverRequestCount())
assert.Equal(t, 1, servers.proxyRequestCount())
}
// TestProxyDisableOverridesProxySettings confirms that setting
// ClientSettings.ProxyDisable disables the proxy even if both HTTP_PROXY
// and ClientSettings.Proxy are set.
// This test is less robust than the others: when golang derives proxy settings
// from HTTP[S]_PROXY, it still returns nil if it can detect that
// a request will be routed to localhost (this is why many tests in this file
// use invalid target URLs, so golang doesn't skip the proxy). In this test,
// where we want to confirm that a proxy is *not* used, we still need to
// use a remote URL (or we aren't really testing anything), but that means we
// can't listen on the remote endpoint. Instead, we just have to listen on the
// proxy endpoint and verify that it *doesn't* receive a request.
// I'm not entirely satisfied with this approach, but it seems by nature of
// golang's proxy handling that we can't do better without a multi-machine
// integration test.
func TestProxyDisableOverridesProxySettings(t *testing.T) {
servers, teardown := startServers(t)
defer teardown()
// Start a client with both ClientSettings.Proxy and HTTP_PROXY set to the
// proxy listener and ClientSettings.ProxyDisable set to true. We expect that
// ProxyDisable should override both the other proxy settings and the proxy
// should get zero requests.
execClient(t,
"TEST_SERVER_URL=http://fakeurl.fake.not-real",
"TEST_PROXY_URL="+servers.proxyURL,
"HTTP_PROXY="+servers.proxyURL,
"TEST_PROXY_DISABLE=true")
assert.Equal(t, 0, servers.proxyRequestCount())
}
// runClientTest executes the current test binary as a child process,
// running only the TestClientPing, and calling it with the environment variable
// TEST_START_CLIENT=1 (so the test can recognize that it is the child process),
// and any additional environment settings specified in env.
// This is helpful for testing proxy settings, since we need to have both a
// proxy / server-side listener and a client that communicates with the server
// using various proxy settings.
func execClient(t *testing.T, env ...string) {
// The child process always runs only the TestClientPing test, which pings
// the server at TEST_SERVER_URL and then terminates.
cmd := exec.Command(os.Args[0], "-test.run=TestClientPing")
cmd.Env = append(append(os.Environ(),
"TEST_START_CLIENT=1"),
env...)
cmdOutput := new(bytes.Buffer)
cmd.Stderr = cmdOutput
cmd.Stdout = cmdOutput
err := cmd.Run()
if err != nil {
t.Error("Error executing client:\n" + cmdOutput.String())
}
}
func doClientPing(t *testing.T) {
serverURL := os.Getenv("TEST_SERVER_URL")
require.NotEqual(t, serverURL, "")
proxy := os.Getenv("TEST_PROXY_URL")
// if TEST_PROXY_DISABLE is nonempty, set ClientSettings.ProxyDisable.
proxyDisable := os.Getenv("TEST_PROXY_DISABLE")
clientSettings := ClientSettings{
ConnectionSettings: eslegclient.ConnectionSettings{
URL: serverURL,
Headers: map[string]string{headerTestField: headerTestValue},
Transport: httpcommon.HTTPTransportSettings{
Proxy: httpcommon.HTTPClientProxySettings{
Disable: proxyDisable != "",
},
},
},
Index: outil.MakeSelector(outil.ConstSelectorExpr("test", outil.SelectorLowerCase)),
}
if proxy != "" {
u, err := url.Parse(proxy)
require.NoError(t, err)
proxyURL := httpcommon.ProxyURI(*u)
clientSettings.Transport.Proxy.URL = &proxyURL
}
client, err := NewClient(clientSettings, nil)
require.NoError(t, err)
// This ping won't succeed; we aren't testing end-to-end communication
// (which would require a lot more setup work), we just want to make sure
// the client is pointed at the right server or proxy.
client.Connect()
}
// serverState contains the state of the http listeners for proxy tests,
// including the endpoint URLs and the observed request count for each one.
type serverState struct {
serverURL string
proxyURL string
_serverRequestCount atomic.Int // Requests directly to the server
_proxyRequestCount atomic.Int // Requests via the proxy
}
// Convenience functions to unwrap the atomic primitives
func (s serverState) serverRequestCount() int {
return s._serverRequestCount.Load()
}
func (s serverState) proxyRequestCount() int {
return s._proxyRequestCount.Load()
}
// startServers starts endpoints representing a backend server and a proxy,
// and returns the corresponding serverState and a teardown function that
// should be called to shut them down at the end of the test.
func startServers(t *testing.T) (*serverState, func()) {
state := serverState{}
server := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, headerTestValue, r.Header.Get(headerTestField))
fmt.Fprintln(w, "Hello, client")
state._serverRequestCount.Inc()
}))
proxy := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, headerTestValue, r.Header.Get(headerTestField))
fmt.Fprintln(w, "Hello, client")
state._proxyRequestCount.Inc()
}))
state.serverURL = server.URL
state.proxyURL = proxy.URL
return &state, func() {
server.Close()
proxy.Close()
}
}
| [
"\"TEST_START_CLIENT\"",
"\"TEST_SERVER_URL\"",
"\"TEST_PROXY_URL\"",
"\"TEST_PROXY_DISABLE\""
] | [] | [
"TEST_PROXY_URL",
"TEST_START_CLIENT",
"TEST_PROXY_DISABLE",
"TEST_SERVER_URL"
] | [] | ["TEST_PROXY_URL", "TEST_START_CLIENT", "TEST_PROXY_DISABLE", "TEST_SERVER_URL"] | go | 4 | 0 | |
python/paddle/fluid/framework.py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
from collections import defaultdict
from collections import Iterable
import contextlib
from .wrapped_decorator import signature_safe_contextmanager, wrap_decorator
import os
import re
import traceback
import six
import numpy as np
import subprocess
import multiprocessing
import sys
import logging
from .. import compat as cpt
from .proto import framework_pb2
from . import core
from . import unique_name
import paddle.version as fluid_version
import warnings
import functools
__all__ = [
'Program',
'default_startup_program',
'default_main_program',
'program_guard',
'name_scope',
'cuda_places',
'cpu_places',
'cuda_pinned_places',
'in_dygraph_mode',
'is_compiled_with_cuda',
'is_compiled_with_xpu',
'Variable',
'ComplexVariable',
'load_op_library',
'require_version',
'device_guard',
'set_flags',
'get_flags',
]
EMPTY_VAR_NAME = core.kEmptyVarName()
TEMP_VAR_NAME = core.kTempVarName()
GRAD_VAR_SUFFIX = core.kGradVarSuffix()
ZERO_VAR_SUFFIX = core.kZeroVarSuffix()
CONTROL_DEP_VAR_PREFIX = core.kControlDepVarName()
_dygraph_tracer_ = None
_global_expected_place_ = None
_current_device = None
global_prog_seed = 0
def require_version(min_version, max_version=None):
"""
Check if the installed version of PaddlePaddle is in [min_version, max_version],
if the installed version is lower than ``min_version`` or higher than ``max_version``,
an exception will be thrown, NO returns if the installed version is satisfied.
Args:
min_version (str): the minimum version required (like '1.4.0').
max_version (str, optional): the max version required (like '1.6.0'), default is None,
meaning any version equal or higher than ``min_version`` is acceptable.
Returns:
None.
Raises:
TypeError: if the type of ``min_version`` is not str.
TypeError: if the type of ``max_version`` is not str or type(None).
ValueError: if the value of ``min_version`` is not in version format.
ValueError: if the value of ``max_version`` is not in version format or None.
Exception: if the installed version is lower than ``min_version`` or higher than ``max_version``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# any version >= 0.1.0 is acceptable.
fluid.require_version('0.1.0')
# if 0.1.0 <= version <= 10.0.0, it is acceptable.
fluid.require_version(min_version='0.1.0', max_version='10.0.0')
"""
if not isinstance(min_version, str):
raise TypeError(
"The type of 'min_version' in require_version must be str, but received %s."
% (type(min_version)))
if not isinstance(max_version, (str, type(None))):
raise TypeError(
"The type of 'max_version' in require_version must be str or type(None), but received %s."
% (type(max_version)))
check_format = re.match(r'\d+(\.\d+){0,3}', min_version)
if check_format is None or check_format.group() != min_version:
raise ValueError(
"The value of 'min_version' in require_version must be in format '\\d+(\\.\\d+){0,3}', "
"like '1.5.2.0', but received %s" % min_version)
if max_version is not None:
check_format = re.match(r'\d+(\.\d+){0,3}', max_version)
if check_format is None or check_format.group() != max_version:
raise ValueError(
"The value of 'max_version' in require_version must be in format '\\d+(\\.\\d+){0,3}', "
"like '1.5.2.0', but received %s" % max_version)
version_installed = [
fluid_version.major, fluid_version.minor, fluid_version.patch,
fluid_version.rc
]
zero_version = ['0', '0', '0', '0']
def version_cmp(ver_a, ver_b):
for i in six.moves.range(len(ver_a)):
if int(ver_a[i]) > int(ver_b[i]):
return 1
elif int(ver_a[i]) < int(ver_b[i]):
return -1
return 0
if version_cmp(version_installed, zero_version) == 0:
if max_version is not None:
warnings.warn(
"PaddlePaddle version in [%s, %s] required, but %s installed. "
"Maybe you are using a develop version, "
"please make sure the version is good with your code." %
(min_version, max_version, fluid_version.full_version))
else:
warnings.warn(
"PaddlePaddle version %s or higher is required, but %s installed, "
"Maybe you are using a develop version, "
"please make sure the version is good with your code." %
(min_version, fluid_version.full_version))
return
min_version_split = min_version.split('.')
min_version_to_check = min_version_split + zero_version[len(
min_version_split):]
if max_version is not None:
max_version_split = max_version.split('.')
max_version_to_check = max_version_split + zero_version[len(
max_version_split):]
if version_cmp(version_installed,
max_version_to_check) > 0 or version_cmp(
version_installed, min_version_to_check) < 0:
raise Exception(
"VersionError: PaddlePaddle version in [%s, %s] required, but %s installed."
% (min_version, max_version, fluid_version.full_version))
else:
if version_cmp(version_installed, min_version_to_check) < 0:
raise Exception(
"VersionError: PaddlePaddle version %s or higher is required, but %s installed, "
"please upgrade your PaddlePaddle to %s or other higher version."
% (min_version, fluid_version.full_version, min_version))
def in_dygraph_mode():
"""
:alias_main: paddle.in_dygraph_mode
:alias: paddle.in_dygraph_mode
:old_api: paddle.fluid.framework.in_dygraph_mode
This function checks whether the program runs in dynamic graph mode or not.
You can enter dynamic graph mode with :ref:`api_fluid_dygraph_guard` api,
or enable and disable dynamic graph mode with :ref:`api_fluid_dygraph_enable`
and :ref:`api_fluid_dygraph_disable` api .
Returns:
bool: Whether the program is running in dynamic graph mode.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fluid.enable_dygraph() # Now we are in dygragh mode
print(fluid.in_dygraph_mode()) # True
fluid.disable_dygraph()
print(fluid.in_dygraph_mode()) # False
"""
return _dygraph_tracer_ is not None
def _dygraph_not_support_(func):
def __impl__(*args, **kwargs):
assert not in_dygraph_mode(
), "We don't support %s in imperative mode" % func.__name__
return func(*args, **kwargs)
return __impl__
def _dygraph_only_(func):
def __impl__(*args, **kwargs):
assert in_dygraph_mode(
), "We Only support %s in dynamic mode, please call 'paddle.disable_static()' to enter dynamic mode." % func.__name__
return func(*args, **kwargs)
return __impl__
# NOTE(zhiqiu): This decorator is used for the APIs of Variable which is only
# used to make Variable and VarBase has same interfaces, like numpy. Since VarBase is not exposed in our
# official docments, logically, we want to keep VarBase and logically consistent. While, actually,
# in our implementation, there some APIs not supported, like numpy, because Variable contains the desc.
# So, those APIs are listed under class Variable to generate docs only.
# TODO(zhiqiu): We should make VarBase consistent with Variable in future, for example, by inheritting
# same base class.
def _fake_interface_only_(func):
def __impl__(*args, **kwargs):
raise AssertionError(
"'%s' should be called by imperative Varible in imperative mode, please use fluid.dygraph.guard() as context to run it in imperative mode"
% func.__name__)
return __impl__
# NOTE(chenweihang): There is argument name typo (stat_dict, correct name is state_dict)
# in fluid api Layer.set_dict, Optimizer.load, in order to correct the argument without
# introducing compatibility issues, add this decorator
# NOTE(chenweihang): not using `wrap_decorator` here is because `wrap_decorator` will
# move kwargs to args, which doesn't work in this decorate case
def deprecate_stat_dict(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if 'stat_dict' in kwargs:
warnings.warn(
"The argument `stat_dict` has deprecated, please change it to `state_dict`.",
DeprecationWarning)
kwargs['state_dict'] = kwargs['stat_dict']
kwargs.pop('stat_dict')
return func(*args, **kwargs)
return wrapper
dygraph_not_support = wrap_decorator(_dygraph_not_support_)
dygraph_only = wrap_decorator(_dygraph_only_)
fake_interface_only = wrap_decorator(_fake_interface_only_)
def _dygraph_tracer():
return _dygraph_tracer_
def _current_expected_place():
global _global_expected_place_
if _global_expected_place_ is None:
if core.is_compiled_with_cuda():
_global_expected_place_ = core.CUDAPlace(0)
else:
_global_expected_place_ = core.CPUPlace()
return _global_expected_place_
def _set_dygraph_tracer_expected_place(place):
global _dygraph_tracer_
if _dygraph_tracer_ is not None:
_dygraph_tracer_._expected_place = place
def _set_expected_place(place):
global _global_expected_place_
_global_expected_place_ = place
_set_dygraph_tracer_expected_place(place)
# TODO(zhiqiu): remove this function.
def _var_base_to_np(var_base):
"""
convert VarBase tp numpy
Args:
var_base(VarBase) : the VarBase to convert
Returns (np.ndarray): the np.ndarray contain the value of VarBase
"""
warnings.warn(
"paddle.fluid.framework._var_base_to_np is deprecated, please use var_base.numpy() instead of _var_base_to_np(var_base)."
)
return var_base.numpy()
def _cpu_num():
if "CPU_NUM" not in os.environ.keys():
if multiprocessing.cpu_count() > 1:
sys.stderr.write(
'!!! The CPU_NUM is not specified, you should set CPU_NUM in the environment variable list.\n'
'CPU_NUM indicates that how many CPUPlace are used in the current task.\n'
'And if this parameter are set as N (equal to the number of physical CPU core) the program may be faster.\n\n'
'export CPU_NUM={} # for example, set CPU_NUM as number of physical CPU core which is {}.\n\n'
'!!! The default number of CPU_NUM=1.\n'.format(
multiprocessing.cpu_count(), multiprocessing.cpu_count()))
os.environ['CPU_NUM'] = str(1)
cpu_num = os.environ.get('CPU_NUM')
return int(cpu_num)
def _cuda_ids():
gpus_env = os.getenv("FLAGS_selected_gpus")
if gpus_env:
device_ids = [int(s) for s in gpus_env.split(",")]
else:
device_ids = six.moves.range(core.get_cuda_device_count())
return device_ids
def is_compiled_with_xpu():
"""
Whether this whl package can be used to run the model on XPU.
Returns (bool): support xpu or not.
Examples:
.. code-block:: python
import paddle.fluid as fluid
support_xpu = fluid.is_compiled_with_xpu()
"""
return core.is_compiled_with_xpu()
def is_compiled_with_cuda():
"""
Whether this whl package can be used to run the model on GPU.
Returns (bool): support gpu or not.
Examples:
.. code-block:: python
import paddle.fluid as fluid
support_gpu = fluid.is_compiled_with_cuda()
"""
return core.is_compiled_with_cuda()
def cuda_places(device_ids=None):
"""
**Note**:
For multi-card tasks, please use `FLAGS_selected_gpus` environment variable to set the visible GPU device.
The next version will fix the problem with `CUDA_VISIBLE_DEVICES` environment variable.
This function creates a list of :code:`fluid.CUDAPlace` objects.
If :code:`device_ids` is None, environment variable of
:code:`FLAGS_selected_gpus` would be checked first. For example, if
:code:`FLAGS_selected_gpus=0,1,2`, the returned list would
be [fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)].
If :code:`FLAGS_selected_gpus` is not set, all visible
gpu places would be returned according to the :code:`CUDA_VISIBLE_DEVICES` environment variable.
If :code:`device_ids` is not None, it should be the device
ids of GPUs. For example, if :code:`device_ids=[0,1,2]`,
the returned list would be
[fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)].
Parameters:
device_ids (list or tuple of int, optional): list of GPU device ids.
Returns:
list of fluid.CUDAPlace: Created GPU place list.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cuda_places = fluid.cuda_places()
"""
assert core.is_compiled_with_cuda(), \
"Not compiled with CUDA"
if device_ids is None:
device_ids = _cuda_ids()
elif not isinstance(device_ids, (list, tuple)):
device_ids = [device_ids]
return [core.CUDAPlace(dev_id) for dev_id in device_ids]
def cpu_places(device_count=None):
"""
This function creates a list of :code:`fluid.CPUPlace` objects, and returns the created list.
If :code:`device_count` is None, the device count would
be determined by environment variable :code:`CPU_NUM`.
If :code:`CPU_NUM` is not set, the default value is 1,
i.e. CPU_NUM=1.
:code:`CPU_NUM` indicates the number of devices used in the current task.
The running of the program can be accelerated if :code:`CPU_NUM` is the same as the number of physical cores.
Parameters:
device_count (int, optional): device number. Default: None.
Returns:
list of fluid.CPUPlace: Created list of CPU places.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cpu_places = fluid.cpu_places()
"""
if device_count is None:
device_count = _cpu_num()
return [core.CPUPlace()] * device_count
def cuda_pinned_places(device_count=None):
"""
This function creates a list of :code:`fluid.CUDAPinnedPlace` objects.
If :code:`device_count` is None, the device count would
be determined by environment variable :code:`CPU_NUM`.
If :code:`CPU_NUM` is not set, the default value is 1,
i.e. CPU_NUM=1.
:code:`CPU_NUM` indicates the number of devices used in the current task.
The running of the program can be accelerated if :code:`CPU_NUM` is the same as the number of physical cores.
Parameters:
device_count (int, optional): device number. Default: None.
Returns:
list of fluid.CUDAPinnedPlace: Created list of CUDA pinned places.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cuda_pinned_places_cpu_num = fluid.cuda_pinned_places()
# or
cuda_pinned_places = fluid.cuda_pinned_places(1)
"""
assert core.is_compiled_with_cuda(), \
"Not compiled with CUDA"
if device_count is None:
device_count = len(_cuda_ids())
return [core.CUDAPinnedPlace()] * device_count
class NameScope(object):
def __init__(self, name="", parent=None):
self._children = dict()
self._name = name
self._parent = parent
def child(self, prefix):
if prefix not in self._children:
new_child = NameScope(prefix, self)
self._children[prefix] = [new_child]
else:
new_child = NameScope(prefix + "_%d" % len(self._children[prefix]),
self)
self._children[prefix].append(new_child)
return new_child
def parent(self):
return self._parent
def name(self):
return self._name
_name_scope = NameScope()
@signature_safe_contextmanager
def name_scope(prefix=None):
"""
:api_attr: Static Graph
Generate hierarchical name prefix for the operators.
Note:
This should only used for debugging and visualization purpose.
Don't use it for serious analysis such as graph/program transformations.
Args:
prefix(str, optional): prefix. Default is none.
Examples:
.. code-block:: python
import paddle.fluid as fluid
with fluid.name_scope("s1"):
a = fluid.data(name='data', shape=[None, 1], dtype='int32')
b = a + 1
with fluid.name_scope("s2"):
c = b * 1
with fluid.name_scope("s3"):
d = c / 1
with fluid.name_scope("s1"):
f = fluid.layers.pow(d, 2.0)
with fluid.name_scope("s4"):
g = f - 1
# Op are created in the default main program.
for op in fluid.default_main_program().block(0).ops:
# elementwise_add is created in /s1/
if op.type == 'elementwise_add':
assert op.desc.attr("op_namescope") == '/s1/'
# elementwise_mul is created in '/s1/s2'
elif op.type == 'elementwise_mul':
assert op.desc.attr("op_namescope") == '/s1/s2/'
# elementwise_div is created in '/s1/s3'
elif op.type == 'elementwise_div':
assert op.desc.attr("op_namescope") == '/s1/s3/'
# elementwise_sum is created in '/s4'
elif op.type == 'elementwise_sub':
assert op.desc.attr("op_namescope") == '/s4/'
# pow is created in /s1_1/
elif op.type == 'pow':
assert op.desc.attr("op_namescope") == '/s1_1/'
"""
# TODO(panyx0718): Only [0-9a-z].
# in dygraph we don't need namescope since it will cause mem leak
if in_dygraph_mode():
yield
else:
assert prefix, "namescope prefix can not be empty."
global _name_scope
_name_scope = _name_scope.child(prefix)
try:
yield
finally:
_name_scope = _name_scope.parent()
def _full_name_scope():
global _name_scope
scope = _name_scope
name = ""
while scope:
name = scope.name() + "/" + name
scope = scope.parent()
return name
def generate_control_dev_var_name():
import random
return CONTROL_DEP_VAR_PREFIX + "@" + str(random.random())
def grad_var_name(var_name):
"""
Returns:
str: gradient name for a certain var name
"""
return var_name + GRAD_VAR_SUFFIX
def convert_np_dtype_to_dtype_(np_dtype):
"""
Convert the data type in numpy to the data type in Paddle
Args:
np_dtype(np.dtype): the data type in numpy.
Returns:
core.VarDesc.VarType: the data type in Paddle.
"""
dtype = np.dtype(np_dtype)
if dtype == np.float32:
return core.VarDesc.VarType.FP32
elif dtype == np.float64:
return core.VarDesc.VarType.FP64
elif dtype == np.float16:
return core.VarDesc.VarType.FP16
elif dtype == np.int32:
return core.VarDesc.VarType.INT32
elif dtype == np.int16:
return core.VarDesc.VarType.INT16
elif dtype == np.int64:
return core.VarDesc.VarType.INT64
elif dtype == np.bool:
return core.VarDesc.VarType.BOOL
elif dtype == np.uint16:
return core.VarDesc.VarType.INT16
elif dtype == np.uint8:
return core.VarDesc.VarType.UINT8
elif dtype == np.int8:
return core.VarDesc.VarType.INT8
else:
raise ValueError("Not supported numpy dtype %s" % dtype)
def dtype_is_floating(dtype):
"""
Check the data type is floating or not.
Args:
dtype(np.dtype|core.VarDesc.VarType): data type.
Could be numpy format or Paddle format
Returns(bool): True if data type is a float value
"""
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
return dtype in [
core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32,
core.VarDesc.VarType.FP64
]
def _debug_string_(proto, throw_on_error=True):
"""
Get the debug string of a protobuf message. The message could be not
initialized.
Args:
proto(google.protobuf.message.Message): The protobuf message
throw_on_error(bool): True if raise an error when the protobuf message
is not initialized.
Returns(str): The debug string of the protobuf message
"""
error_fields = list()
if not proto.IsInitialized(error_fields) and throw_on_error:
raise ValueError("{0} are not initialized.\nThe message is {1}:\n".
format(error_fields, proto))
return proto.__str__()
def _varbase_creator(type=core.VarDesc.VarType.LOD_TENSOR,
name=None,
shape=None,
dtype=None,
persistable=None,
**kwargs):
if dtype is not None:
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
return core.VarBase(dtype if dtype else core.VarDesc.VarType.FP32,
list(shape) if shape else [], name, type
if type else core.VarDesc.VarType.LOD_TENSOR, True
if persistable else False)
class VariableMetaClass(type):
@classmethod
def __instancecheck__(cls, instance):
t = type(instance)
if in_dygraph_mode():
return issubclass(t, core.VarBase)
else:
return issubclass(t, Variable)
class ParameterMetaClass(VariableMetaClass):
@classmethod
def __instancecheck__(cls, instance):
t = type(instance)
if in_dygraph_mode():
return issubclass(t, ParamBase)
else:
return issubclass(t, Parameter)
def _getitem_impl_(var, item):
"""
Slice the variable.
Args:
item(int/slice/tuple) : the index.
Returns:
Sliced variable
"""
if not isinstance(item, tuple):
item = [item]
decrease_axis = []
slice_axis = []
slice_start = []
slice_end = []
slice_step = []
use_strided_slice = False
reverse_axis = []
target_block = default_main_program().current_block()
def fill_constant(shape, value, force_cpu=False, out=None):
var.block.append_op(
type='fill_constant',
inputs={},
outputs={'Out': [out]},
attrs={
'shape': shape,
'dtype': out.dtype,
'value': float(value),
'force_cpu': force_cpu
})
out.stop_gradient = True
return out
for dim, slice_item in enumerate(item):
if isinstance(slice_item, slice):
start = slice_item.start
end = slice_item.stop
step = slice_item.step
if start is None and end is None and step is None:
continue
if step is None:
step = 1
if start is None and end is None:
assert (step == -1)
reverse_axis.append(dim)
continue
if start is None:
start = 0
if end is None:
end = 10000000
if step != 1:
use_strided_slice = True
slice_axis.append(dim)
slice_start.append(start)
slice_end.append(end)
slice_step.append(step)
else:
decrease_axis.append(dim)
slice_axis.append(dim)
slice_start.append(slice_item)
slice_step.append(1)
if isinstance(slice_item, Variable):
temp_1 = var.block.create_var(dtype=slice_item.dtype)
fill_constant([1], 1, force_cpu=True, out=temp_1)
temp_end = target_block.create_var(dtype=slice_item.dtype)
target_block.append_op(
type='elementwise_add',
inputs={'X': slice_item,
'Y': temp_1},
outputs={'Out': temp_end},
attrs={'axis': -1})
slice_end.append(temp_end)
else:
slice_end.append(slice_item + 1
if slice_item != -1 else 10000000)
def contain_var(one_list):
for ele in one_list:
if isinstance(ele, Variable):
return True
return False
def get_new_list_tensor(old_list):
new_list_tensor = []
for dim in old_list:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_list_tensor.append(dim)
else:
assert (isinstance(dim, int))
temp_out = var.block.create_var(dtype='int32')
fill_constant([1], dim, force_cpu=True, out=temp_out)
new_list_tensor.append(temp_out)
return new_list_tensor
inputs = {'Input': [var]}
attrs = {
'axes': slice_axis,
'starts': [],
'ends': [],
'decrease_axis': decrease_axis
}
if (use_strided_slice == True):
attrs['strides'] = []
infer_flags = list(1 for i in range(len(slice_axis)))
# starts
if contain_var(slice_start):
inputs['StartsTensorList'] = get_new_list_tensor(slice_start)
for i, dim in enumerate(slice_start):
if isinstance(dim, Variable):
attrs['starts'].append(-1)
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
else:
attrs['starts'] = slice_start
# ends
if contain_var(slice_end):
inputs['EndsTensorList'] = get_new_list_tensor(slice_end)
for i, dim in enumerate(slice_end):
if isinstance(dim, Variable):
attrs['ends'].append(-1)
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
else:
attrs['ends'] = slice_end
# strides
if use_strided_slice == True:
if contain_var(slice_step):
inputs['StridesTensorList'] = get_new_list_tensor(slice_step)
for i, dim in enumerate(slice_step):
if isinstance(dim, Variable):
attrs['strides'].append(-1)
infer_flags[i] = -1
else:
attrs['strides'].append(dim)
else:
attrs['strides'] = slice_step
# infer_flags
attrs['infer_flags'] = infer_flags
out = var
if use_strided_slice == False and len(slice_axis) > 0:
# append slice_op here
slice_out_var = target_block.create_var(
name=unique_name.generate_with_ignorable_key(var.name + "_slice"),
dtype=var.dtype)
target_block.append_op(
type="slice",
inputs=inputs,
outputs={'Out': [slice_out_var]},
attrs=attrs)
out = slice_out_var
elif use_strided_slice == True and len(slice_axis) > 0:
strided_slice_out_var = target_block.create_var(
name=unique_name.generate_with_ignorable_key(var.name +
"_strided_slice"),
dtype=var.dtype)
target_block.append_op(
type="strided_slice",
inputs=inputs,
outputs={'Out': [strided_slice_out_var]},
attrs=attrs)
out = strided_slice_out_var
if len(reverse_axis) > 0:
reverse_out_var = target_block.create_var(
name=unique_name.generate_with_ignorable_key(var.name +
"_slice_reverse"),
dtype=var.dtype)
target_block.append_op(
type="reverse",
inputs={'X': out},
outputs={'Out': [reverse_out_var]},
attrs={'axis': reverse_axis})
out = reverse_out_var
return out
@six.add_metaclass(VariableMetaClass)
class Variable(object):
"""
**Notes**:
**The constructor of Variable should not be invoked directly.**
**In Static Graph Mode: Please use** `Block.create_var` **to create a Static variable which has no data until being feed.**
**In Dygraph Mode: Please use** :ref:`api_fluid_dygraph_to_variable` **to create a dygraph variable with real data**
In Fluid, every input and output of an OP is a variable. In most
cases, variables are used for holding different kinds of data or training
labels. A variable belongs to a :ref:`api_guide_Block_en` . All variable has its own name and
two variables in different :ref:`api_guide_Block_en` could have the same name.
There are many kinds of variables. Each kind of them has its own attributes
and usages. Please refer to the `framework.proto <https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/framework.proto>`_ for details.
Most of a Variable's member variables can be set to be None. It mean
it is not available or will be specified later.
Examples:
In Static Graph Mode:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
In `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ Mode:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
new_variable = fluid.dygraph.to_variable(np.arange(10))
"""
def __init__(self,
block,
type=core.VarDesc.VarType.LOD_TENSOR,
name=None,
shape=None,
dtype=None,
lod_level=None,
capacity=None,
persistable=None,
error_clip=None,
stop_gradient=False,
is_data=False,
need_check_feed=False,
belong_to_optimizer=False,
**kwargs):
self.block = block
if name is None:
name = unique_name.generate('_generated_var')
if dtype is not None:
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
self.belong_to_optimizer = belong_to_optimizer
self.error_clip = error_clip
is_new_var = False
name = cpt.to_text(name)
self.desc = self.block.desc.find_var(cpt.to_bytes(name))
if self.desc is None:
self.desc = self.block.desc.var(cpt.to_bytes(name))
is_new_var = True
if is_new_var:
self.desc.set_type(type)
elif self.desc.type() != type:
raise ValueError("Variable {0} has been created before. The "
"previous type is {1}; the new type is {2}. They"
" are not matched".format(self.name,
self.desc.type(), type))
if shape is not None:
if is_new_var:
self.desc.set_shape(shape)
else:
old_shape = self.shape
shape = tuple(shape)
if shape != old_shape:
raise ValueError(
"Variable {0} has been created before. the previous "
"shape is {1}; the new shape is {2}. They are not "
"matched.".format(self.name, old_shape, shape))
if dtype is not None:
if is_new_var:
self.desc.set_dtype(dtype)
else:
old_dtype = self.dtype
if dtype != old_dtype:
raise ValueError("Variable {0} has been created before. "
"The previous data type is {1}; the new "
"data type is {2}. They are not "
"matched.".format(self.name, old_dtype,
dtype))
if lod_level is not None:
if is_new_var:
self.desc.set_lod_level(lod_level)
else:
if lod_level != self.lod_level:
raise ValueError("Variable {0} has been created before. "
"The previous lod_level is {1}; the new "
"lod_level is {2}. They are not "
"matched".format(self.name, self.lod_level,
lod_level))
if persistable is not None:
if is_new_var:
self.desc.set_persistable(persistable)
else:
if persistable != self.persistable:
raise ValueError(
"Variable {0} has been created before."
"The previous persistable is {1}; the new "
"persistable is {2}. They are not matched".format(
self.name, self.persistable, persistable))
if need_check_feed and is_new_var:
self.desc.set_need_check_feed(need_check_feed)
if capacity is not None:
if is_new_var:
self.desc.set_capacity(capacity)
else:
# TODO(abhinavarora) : Compare with set capacity once,
# get_capacity is implemented
pass
self.block.vars[name] = self
self.op = None
self._stop_gradient = stop_gradient
self.is_data = is_data
@fake_interface_only
def detach(self):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
Returns a new Variable, detached from the current graph.
Returns:
( :ref:`api_guide_Variable_en` | dtype is same as current Variable): The detached Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph import Linear
import numpy as np
data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')
with fluid.dygraph.guard():
linear = Linear(32, 64)
data = to_variable(data)
x = linear(data)
y = x.detach()
"""
pass
@fake_interface_only
def numpy(self):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
Returns a numpy array shows the value of current :ref:`api_guide_Variable_en`
Returns:
ndarray: The numpy value of current Variable.
Returns type:
ndarray: dtype is same as current Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph import Linear
import numpy as np
data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')
with fluid.dygraph.guard():
linear = Linear(32, 64)
data = to_variable(data)
x = linear(data)
print(x.numpy())
"""
pass
@fake_interface_only
def set_value(self, value):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
Set a new value for this Variable.
Args:
value (Variable|np.ndarray): the new value.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph import Linear
import numpy as np
data = np.ones([3, 1024], dtype='float32')
with fluid.dygraph.guard():
linear = fluid.dygraph.Linear(1024, 4)
t = to_variable(data)
linear(t) # call with default weight
custom_weight = np.random.randn(1024, 4).astype("float32")
linear.weight.set_value(custom_weight) # change existing weight
out = linear(t) # call with different weight
"""
pass
@fake_interface_only
def backward(self, retain_graph=False):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
Run backward of current Graph which starts from current Tensor.
Args:
retain_graph(bool, optional): If False, the graph used to compute grads will be freed. If you would
like to add more ops to the built graph after calling this method( :code:`backward` ), set the parameter
:code:`retain_graph` to True, then the grads will be retained. Thus, seting it to False is much more memory-efficient.
Defaults to False.
Returns:
NoneType: None
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x = np.ones([2, 2], np.float32)
inputs = []
for _ in range(10):
tmp = paddle.to_tensor(x)
# if we don't set tmp's stop_gradient as False then, all path to loss will has no gradient since
# there is no one need gradient on it.
tmp.stop_gradient=False
inputs.append(tmp)
ret = paddle.sums(inputs)
loss = paddle.reduce_sum(ret)
loss.backward()
"""
pass
@fake_interface_only
def gradient(self):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
Get the Gradient of Current Variable
Returns:
ndarray or tuple of ndarray: if Variable's type is LoDTensor, return numpy value of the gradient of current Variable, if Variable's type is SelectedRows, return tuple of ndarray, first element of tuple is numpy value of the gradient of current Variable, second element of tuple is numpy value of the rows of current Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# example1: return ndarray
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
inputs2 = []
for _ in range(10):
tmp = fluid.dygraph.base.to_variable(x)
tmp.stop_gradient=False
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
loss2.backward()
print(loss2.gradient())
# example2: return tuple of ndarray
with fluid.dygraph.guard():
embedding = fluid.dygraph.Embedding(
size=[20, 32],
param_attr='emb.w',
is_sparse=True)
x_data = np.arange(12).reshape(4, 3).astype('int64')
x_data = x_data.reshape((-1, 3, 1))
x = fluid.dygraph.base.to_variable(x_data)
out = embedding(x)
out.backward()
print(embedding.weight.gradient())
"""
pass
@fake_interface_only
def clear_gradient(self):
"""
**Notes**:
**1. This API is ONLY available in Dygraph mode**
**2. Use it only Variable has gradient, normally we use this for Parameters since other temporal Variable will be deleted by Python's GC**
Clear (set to ``0`` ) the Gradient of Current Variable
Returns: None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
inputs2 = []
for _ in range(10):
tmp = fluid.dygraph.base.to_variable(x)
tmp.stop_gradient=False
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
loss2.backward()
print(loss2.gradient())
loss2.clear_gradient()
print("After clear {}".format(loss2.gradient()))
"""
pass
def __str__(self):
return self._to_readable_code()
def _to_readable_code(self):
"""
Get readable debug string of Variable.
.. note::
If you want to get the debug string in protobuf format,
please use :code:`to_string` method.
Returns:
string: The formatted Variable string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print(new_variable._to_readable_code())
"""
if self.type == core.VarDesc.VarType.SELECTED_ROWS or self.type == core.VarDesc.VarType.LOD_TENSOR:
var_str = "{name} : fluid.{type}.shape{shape}.astype({dtype})".\
format(i="{", e="}", name=self.name, type=self.type, shape=self.shape, dtype=self.dtype)
else:
var_str = "{name} : fluid.{type})".\
format(i="{", e="}", name=self.name, type=self.type)
if type(self) == Parameter:
if self.trainable:
var_str = "trainable param " + var_str
else:
var_str = "param " + var_str
else:
var_str = "var " + var_str
if self.persistable:
var_str = "persist " + var_str
return var_str
def to_string(self, throw_on_error, with_details=False):
"""
Get debug string.
Args:
throw_on_error (bool): True if raise an exception when self is not initialized.
with_details (bool): more details about variables and parameters (e.g. trainable, optimize_attr, ...) will be printed when with_details is True. Default value is False;
Returns:
str: The debug string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print(new_variable.to_string(True))
print("=============with detail===============")
print(new_variable.to_string(True, True))
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
protostr = self.desc.serialize_to_string()
proto = framework_pb2.VarDesc.FromString(six.binary_type(protostr))
res_str = _debug_string_(proto, throw_on_error)
if with_details:
additional_attr = ("error_clip", "stop_gradient")
for attr_name in additional_attr:
res_str += "%s: %s\n" % (attr_name,
cpt.to_text(getattr(self, attr_name)))
return res_str
__repr__ = __str__
@property
def stop_gradient(self):
"""
Indicating if we stop gradient from current Variable
**Notes: This Property has default value as** ``True`` **in** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode, while Parameter's default value is False. However, in Static Graph Mode all Variable's default stop_gradient value is** ``False``
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
value0 = np.arange(26).reshape(2, 13).astype("float32")
value1 = np.arange(6).reshape(2, 3).astype("float32")
value2 = np.arange(10).reshape(2, 5).astype("float32")
linear = fluid.Linear(13, 5, dtype="float32")
linear2 = fluid.Linear(3, 3, dtype="float32")
a = fluid.dygraph.to_variable(value0)
b = fluid.dygraph.to_variable(value1)
c = fluid.dygraph.to_variable(value2)
out1 = linear(a)
out2 = linear2(b)
out1.stop_gradient = True
out = fluid.layers.concat(input=[out1, out2, c], axis=1)
out.backward()
assert linear.weight.gradient() is None
assert (out1.gradient() == 0).all()
"""
return self._stop_gradient
@stop_gradient.setter
def stop_gradient(self, s):
self._stop_gradient = s
@property
def persistable(self):
"""
Indicating if we current Variable should be long-term alive
**Notes: This Property will be deprecated and this API is just to help user understand concept**
**1. All Variable's persistable is** ``False`` **except Parameters.**
**2. In** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode, this property should not be changed**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("persistable of current Var is: {}".format(new_variable.persistable))
"""
return self.desc.persistable()
@persistable.setter
def persistable(self, p):
self.desc.set_persistable(p)
@property
def name(self):
"""
Indicating name of current Variable
**Notes: If it has two or more Varaible share the same name in the same** :ref:`api_guide_Block_en` **, it means these Variable will share content in no-** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode. This is how we achieve Parameter sharing**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("name of current Var is: {}".format(new_variable.name))
"""
return cpt.to_text(self.desc.name())
@property
def grad_name(self):
"""
Indicating name of the gradient Variable of current Variable.
**Notes: This is a read-only property. It simply returns name of
gradient Variable from a naming convention but doesn't guarantee
the gradient exists.**
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[-1, 23, 48], dtype='float32')
print(x.grad_name) # output is "x@GRAD"
"""
return self.name + "@GRAD"
@name.setter
def name(self, new_name):
self.desc.set_name(new_name)
@property
def shape(self):
"""
Indicating shape of current Variable
**Notes: This is a read-only property**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("shape of current Var is: {}".format(new_variable.shape))
"""
# convert to tuple, make it as same as numpy API.
return tuple(self.desc.shape())
@property
def dtype(self):
"""
Indicating data type of current Variable
**Notes: This is a read-only property**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("Dtype of current Var is: {}".format(new_variable.dtype))
"""
return self.desc.dtype()
@property
def lod_level(self):
"""
Indicating ``LoD`` info of current Variable, please refer to :ref:`api_fluid_LoDTensor_en` to check the meaning
of ``LoD``
**Notes**:
**1. This is a read-only property**
**2. Don't support this property in** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode, it's value should be** ``0(int)``
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("LoD Level of current Var is: {}".format(new_variable.lod_level))
"""
if self.type == core.VarDesc.VarType.SELECTED_ROWS:
raise Exception("SelectedRows DO NOT supprt lod")
return self.desc.lod_level()
@property
def type(self):
"""
Indicating Type of current Variable
**Notes: This is a read-only property**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("Type of current Var is: {}".format(new_variable.type))
"""
return self.desc.type()
def _set_error_clip(self, error_clip):
"""
Set the error_clip.
Args:
error_clip(BaseErrorClipAttr) : The new error_clip.
Returns:
None
"""
self.error_clip = error_clip
def _set_info(self, key, value):
"""
Set key-value information for this variable.
Args:
key(str): Key for this information.
value(object): The value associated to the key.
Returns:
None
"""
if not hasattr(self, "_info"):
self._info = {}
self._info[key] = value
def _get_info(self, key):
"""
Get the information of this variable corresponding to key.
Args:
key(str): Key for this information.
Returns:
object
"""
if hasattr(self, "_info") and key in self._info:
return self._info[key]
return None
def _slice_indices(self, slice, length):
"""
Reference implementation for the slice.indices method.
"""
# Compute step and length as integers.
step = 1 if slice.step is None else slice.step
# Raise ValueError for negative length or zero step.
if length < 0:
raise ValueError("length should not be negative")
if step == 0:
raise ValueError("slice step can not be zero")
# Find lower and upper bounds for start and stop.
lower = -1 if step < 0 else 0
upper = length - 1 if step < 0 else length
# Compute start.
if slice.start is None:
start = upper if step < 0 else lower
else:
start = slice.start
start = max(start + length, lower) if start < 0 else min(start,
upper)
# Compute stop.
if slice.stop is None:
stop = lower if step < 0 else upper
else:
stop = slice.stop
stop = max(stop + length, lower) if stop < 0 else min(stop, upper)
return start, stop, step
def _detectEllipsis(self, item):
has_ellipsis = False
start = 0
end = len(self.shape)
for index, o in enumerate(item):
if o is Ellipsis:
if has_ellipsis:
raise ValueError("Index can have one ellipsis only.")
has_ellipsis = True
start = index
else:
if has_ellipsis:
end = index
return has_ellipsis, start, end
def _reconstructSliceinfo(self, item):
has_ellipsis, start, end = self._detectEllipsis(item)
if has_ellipsis:
newitem = []
for i in range(start):
newitem.append(item[i])
for i in range(start, end):
newitem.append(slice(None, None, None))
for i in range(end, len(item)):
newitem.append(item[i])
return newitem
else:
return None
def _detectContinuesSlice(self, item):
starts = []
ends = []
for index, o in enumerate(item):
if isinstance(o, int):
start = int(o)
if (index > 0 and index >= self.shape[index]) \
or (index < 0 and (index + self.shape[index]) < 0):
raise IndexError("invalid index")
start = max(start + self.shape[index], 0) if start < 0 else min(
start, self.shape[index])
starts.append(start)
ends.append(start + 1)
elif isinstance(o, slice):
start, stop, step = self._slice_indices(o, self.shape[index])
if step == 1 or step == -1:
starts.append(start)
ends.append(stop)
else:
return False, None
else:
raise IndexError("Valid index accept int or slice or ellipsis")
return True, [starts, ends]
def _cloneVar(self, copy=False):
if not copy:
return self.block.create_var(
name=unique_name.generate_with_ignorable_key(self.name),
dtype=self.dtype)
else:
return self
def _sliceVar(self, axes, starts, ends):
new_var = self._cloneVar()
self.block.append_op(
type="slice",
inputs={'Input': [self]},
outputs={'Out': [new_var]},
attrs={'axes': axes,
'starts': starts,
'ends': ends})
return new_var
def _concatVar(self, inputs, axis):
new_var = self._cloneVar()
self.block.append_op(
type="concat",
inputs={'X': inputs},
outputs={'Out': [new_var]},
attrs={'axis': axis, })
return new_var
def _sliceAndConcatVar(self, item, axis):
if isinstance(item, slice):
if self.shape[axis] < 0:
return self._cloneVar(True)
start, stop, step = self._slice_indices(item, self.shape[axis])
if step == 1:
return self._sliceVar([axis], [start], [stop])
else:
vars = []
if step > 0:
while start < stop:
vars.append(
self._sliceVar([axis], [start], [start + 1]))
start += step
else:
while start > stop:
vars.append(
self._sliceVar([axis], [start], [start + 1]))
start += step
return self._concatVar(vars, axis)
elif isinstance(item, int):
if self.shape[axis] < 0:
return self._cloneVar(True)
index = int(item)
if (index > 0 and index >= self.shape[axis]) \
or (index < 0 and (index + self.shape[axis]) < 0):
raise IndexError("invalid index")
return self._sliceVar([axis], [index], [index + 1])
else:
raise IndexError("Valid index accept int or slice or tuple")
def __getitem__(self, item):
return _getitem_impl_(self, item)
def get_all_op_protos():
"""
Get all registered op proto from PaddlePaddle C++ end.
Returns:
list: list of OpProto.
"""
protostrs = core.get_all_op_protos()
ret_values = []
for pbstr in protostrs:
op_proto = framework_pb2.OpProto.FromString(six.binary_type(pbstr))
ret_values.append(op_proto)
return ret_values
class ComplexVariable(object):
"""
The ComplexTensor defined on the complex number domain. It contains two common
real number Tensor as its members, :attr:`real` and :attr:`imag`
holding the real part and imaginary part of complex numbers respectively.
**Notes**:
**The constructor of ComplexTensor should not be invoked directly.**
**Only support dygraph mode at present. Please use** :ref:`api_fluid_dygraph_to_variable` **to create a dygraph ComplexTensor with complex number data.**
Args:
real (Tensor): The Tensor holding real-part data.
imag (Tensor): The Tensor holding imaginery-part data.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.enable_imperative()
x = paddle.to_tensor([1.0+2.0j, 0.2])
print(x.name, x.dtype, x.shape)
# ({'real': 'generated_tensor_0.real', 'imag': 'generated_tensor_0.imag'}, 'complex128', [2L])
print(x.numpy())
# [1. +2.j 0.2+0.j]
print(type(x))
# <class 'paddle.ComplexTensor'>
"""
def __new__(cls, *arg, **kwargs):
cls.__module__ = "paddle"
cls.__name__ = "ComplexTensor"
return super(ComplexVariable, cls).__new__(cls)
def __init__(self, real, imag):
assert real.shape == imag.shape, "The real part and imaginary part " \
"of a ComplexVariable should have the same shape!"
assert real.dtype == imag.dtype, "The real part and imaginary part " \
"of a ComplexVariable should have the same data type!"
self.real = real
self.imag = imag
if self.real.dtype in [
core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32
]:
self._dtype = "complex64"
else:
self._dtype = "complex128"
self._shape = self.real.shape
@property
def dtype(self):
return self._dtype
@property
def shape(self):
return self._shape
@property
def name(self):
return {"real": self.real.name, "imag": self.imag.name}
@name.setter
def name(self, name):
# rename
if isinstance(name, str):
self.real.name = name + ".real"
self.imag.name = name + ".imag"
elif (isinstance(name, tuple) or isinstance(name,
list)) and len(name) == 2:
self.real.name, self.imag.name = name[0], name[1]
else:
raise ValueError(
"An invalid name assigned to the ComplexVariable, "
"which must be a string, or a tuple or a list with length 2!")
def numpy(self):
return self.real.numpy() + 1j * self.imag.numpy()
def __str__(self):
return "ComplexTensor[real]: %s\n%s\nComplexTensor[imag]: %s\n%s" % (
self.real.name, str(self.real.value().get_tensor()), self.imag.name,
str(self.imag.value().get_tensor()))
__repr__ = __str__
class OpProtoHolder(object):
"""
A global variable to hold all OpProtos from C++ as a map
"""
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
cls._instance = cls()
return cls._instance
def __init__(self):
assert not hasattr(
self.__class__,
'_instance'), 'Please use `instance()` to get OpProtoHolder object!'
op_protos = get_all_op_protos()
self.op_proto_map = {}
for proto in op_protos:
self.op_proto_map[proto.type] = proto
def get_op_proto(self, type):
"""
Get OpProto by a type string.
Args:
type(str): The type that operator registered in C++ side.
Returns(framework_pb2.OpProto): The OpProto
"""
if type not in self.op_proto_map:
raise ValueError("Operator \"%s\" has not been registered." % type)
return self.op_proto_map[type]
def update_op_proto(self):
op_protos = get_all_op_protos()
for proto in op_protos:
if proto.type not in self.op_proto_map:
self.op_proto_map[proto.type] = proto
@staticmethod
def generated_op_attr_names():
return {
core.op_proto_and_checker_maker.kOpRoleAttrName(),
core.op_proto_and_checker_maker.kOpRoleVarAttrName(),
core.op_proto_and_checker_maker.kOpNameScopeAttrName(),
core.op_proto_and_checker_maker.kOpCreationCallstackAttrName(),
core.op_proto_and_checker_maker.kOpDeviceAttrName()
}
class Operator(object):
"""
In Fluid, all the operation are represented by Operator, and Operator
is regarded as a build in an instruction of a Block. Users can use the
build in instructions to describe their neural network.
Args:
block(Block): The block has the current operator.
desc(core.OpDesc): The protobuf description of Operator.
type(str): The type of operator. Default None.
inputs(dict): The input of this Operator. it is a dictionary, for every
element, key is the input parameter name, and value is a list of
variables. Default None.
outputs(dict): The output of this Operator. it is a dictionary, for
every element, key is the input parameter name, and value is a list
of variables. Default None.
attrs(dict): The attributes of this Operator. it is a dictionary, for
every element, key is attribute name, and value is the attribute value.
The attribute type should be as same as the type registered in C++ side.
Default None.
Returns:
Operator: The initialized Operator.
Raises:
ValueError: If the passed input, output and attrs doesn't match the
initializing Operator's that registered in C++ side.
Notes:
The constructor of operator should not be invoked directly. Use
Block.append_op or Block._prepend_op instead.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
# var1 += var2 + var3
cur_block.append_op(type="sum",
inputs={"X": [var1, var2, var3]},
outputs={"Out": [var1]})
"""
OP_WITHOUT_KERNEL_SET = {
'feed', 'fetch', 'recurrent', 'go', 'rnn_memory_helper_grad',
'conditional_block', 'while', 'send', 'recv', 'listen_and_serv',
'fl_listen_and_serv', 'ncclInit', 'select', 'checkpoint_notify',
'gen_nccl_id', 'c_gen_nccl_id', 'c_comm_init', 'c_sync_calc_stream',
'c_sync_comm_stream', 'queue_generator', 'dequeue', 'enqueue'
}
def __init__(self,
block,
desc,
type=None,
inputs=None,
outputs=None,
attrs=None):
if in_dygraph_mode():
if type is None:
raise ValueError(
"`type` to initialized an Operator can not be None.")
self._type = type
self.attrs = attrs if attrs else {}
else:
self.block = block
self.desc = desc
# note: not add self.attrs here:
# https://github.com/PaddlePaddle/Paddle/pull/12583#pullrequestreview-145093173
op_attrs = attrs
if op_attrs is None:
op_attrs = dict()
del attrs
op_maker = core.op_proto_and_checker_maker
if op_maker.kOpRoleAttrName() not in op_attrs:
op_attrs[op_maker.kOpRoleAttrName(
)] = self.block.program._op_role
role_var_name = op_maker.kOpRoleVarAttrName()
if len(self.block.program.
_op_role_var) != 0 and role_var_name not in op_attrs:
op_attrs[role_var_name] = self.block.program._op_role_var
if role_var_name in op_attrs and len(op_attrs[role_var_name]) == 0:
del op_attrs[role_var_name]
if len(self.desc.type()) != 0:
return
if type is None:
raise ValueError(
"`type` to initialized an Operator can not be None.")
else:
callstack_var_name = op_maker.kOpCreationCallstackAttrName()
op_attrs[callstack_var_name] = []
for frame in traceback.extract_stack():
op_attrs[callstack_var_name].append(
' File "{}", line {}, in {}'.format(frame[0], frame[1],
frame[2]))
op_attrs[callstack_var_name].append(' {}'.format(frame[
3]))
self.desc.set_type(type)
proto = OpProtoHolder.instance().get_op_proto(type)
namescope_var_name = op_maker.kOpNameScopeAttrName()
op_attrs[namescope_var_name] = _full_name_scope()
# set device for op with kernels, give warning for op without kernels
# when force_cpu and device_guard are used at the same time, a warning will be given.
# TODO(zhangting2020): when force_cpu is removed, clear warning below.
if _current_device is not None:
if self._has_kernel(type):
op_device = op_maker.kOpDeviceAttrName()
op_attrs[op_device] = _current_device
else:
warnings.warn("The Op(%s) is not support to set device." %
type)
if 'force_cpu' in op_attrs:
if (type is 'less_than' and op_attrs['force_cpu'] != None
) or op_attrs['force_cpu'] != False:
warnings.warn(
"The Attr(force_cpu) of Op(%s) will be deprecated in the future, "
"please use 'device_guard' instead. 'device_guard' has higher priority when they are "
"used at the same time." % type)
def find_name(var_list, name):
for var_name in var_list:
if var_list[var_name] is not None and var_name == name:
return True
return False
if inputs is not None:
for in_proto in proto.inputs:
found = find_name(inputs, in_proto.name)
assert found or in_proto.dispensable, "Input {} not found".format(
in_proto.name)
if found:
in_args = inputs[in_proto.name]
if not isinstance(in_args, (list, tuple)):
in_args = [in_args]
if not in_proto.duplicable and len(in_args) > 1:
raise ValueError(
"Input %s expects only one input, but %d are given."
% (in_proto.name, len(in_args)))
in_arg_names = []
for index, arg in enumerate(in_args):
if isinstance(arg, six.string_types):
in_arg_names.append(arg)
elif isinstance(arg, six.binary_type):
in_arg_names.append(arg.decode())
elif isinstance(arg, (Variable, core.VarBase)):
in_arg_names.append(cpt.to_text(arg.name))
else:
raise TypeError(
"The type of '%s' in operator %s should be "
"one of [basestring(), str, Varibale] in python2, "
"or one of [str, bytes, Variable] in python3."
"but received : %s" %
(in_proto.name, type, arg))
self.desc.set_input(in_proto.name, in_arg_names)
else:
self.desc.set_input(in_proto.name, [])
if outputs is not None:
for m in proto.outputs:
if (m.name not in outputs) and m.dispensable:
continue
if not ((m.name in outputs) or m.dispensable):
raise ValueError(("Incorrect setting for output(s) of "
"operator \"%s\", should set: [%s].")
% (type, m.name))
for out_proto in proto.outputs:
if out_proto.name not in outputs:
continue
out_args = outputs[out_proto.name]
if not isinstance(out_args, list):
out_args = [out_args]
if not out_proto.duplicable and len(out_args) > 1:
raise ValueError(
"Output %s expects only one output, but %d are given."
% (out_proto.name, len(out_args)))
out_arg_names = []
for arg in out_args:
out_arg_names.append(cpt.to_text(arg.name))
# TODO(minqiyang): could we remove variable's op in static mode?
if not in_dygraph_mode():
arg.op = self
self.desc.set_output(out_proto.name, out_arg_names)
if op_attrs is not None:
if not isinstance(op_attrs, dict):
raise TypeError("'attrs' should be a dict.")
for attr in proto.attrs:
attr_name = attr.name
if (attr_name not in op_attrs) or (
op_attrs[attr_name] is None):
continue
attr_val = op_attrs[attr_name]
self._update_desc_attr(attr_name, attr_val)
self.desc.check_attrs()
if self._has_kernel(type):
self.desc.infer_var_type(self.block.desc)
self.desc.infer_shape(self.block.desc)
def _has_kernel(self, op_type):
return op_type not in self.OP_WITHOUT_KERNEL_SET
def to_string(self, throw_on_error):
"""
Get debug string.
Args:
throw_on_error(bool): Whether to raise exception if self is not
initialized.
Returns:
str: The debug string.
"""
protostr = self.desc.serialize_to_string()
proto = framework_pb2.OpDesc.FromString(six.binary_type(protostr))
return _debug_string_(proto, throw_on_error)
def _to_readable_code(self, skip_op_callstack=True):
"""
Get readable debug string of Operator.
.. note::
If you want to get the debug string in protobuf format,
please use :code:`to_string` method.
Args:
skip_op_callstack(bool): whether to skip parsing Operator's attribute
op_callstack, default value is True
Returns:
string: The formatted Operator string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
var = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
new_op = cur_block.append_op(type="abs",
inputs={"X": [var]},
outputs={"Out": [var]})
print(new_op._to_readable_code())
"""
assert isinstance(
skip_op_callstack, bool
), "skip_op_callstack parameter's type is error, expect bool, received %s".format(
type(skip_op_callstack))
outputs_str = "{"
for i in range(0, len(self.output_names)):
outputs_str += "{name}=".format(name=self.output_names[i])
o = self.output(self.output_names[i])
outputs_str += "{value}".format(value=o)
if i != len(self.output_names) - 1:
outputs_str += ", "
outputs_str += "}"
inputs_str = "{"
for i in range(0, len(self.input_names)):
inputs_str += "{name}=".format(name=self.input_names[i])
o = self.input(self.input_names[i])
inputs_str += "{value}".format(value=o)
if i != len(self.input_names) - 1:
inputs_str += ", "
inputs_str += "}"
attr_names = sorted(self.attr_names)
attrs_str = ""
for i in range(0, len(attr_names)):
name = attr_names[i]
if skip_op_callstack and name == "op_callstack":
continue
attr_type = self.desc.attr_type(name)
if attr_type == core.AttrType.BLOCK:
a = "{name} = block[{value}]".format(
name=name, type=attr_type, value=self._block_attr_id(name))
attrs_str += a
if i != len(attr_names) - 1:
attrs_str += ", "
continue
if attr_type == core.AttrType.BLOCKS:
a = "{name} = blocks{value}".format(
name=name,
type=attr_type,
value=self._blocks_attr_ids(name))
attrs_str += a
if i != len(attr_names) - 1:
attrs_str += ", "
continue
a = "{name} = {value}".format(
name=name, type=attr_type, value=self.desc.attr(name))
attrs_str += a
if i != len(attr_names) - 1:
attrs_str += ", "
if outputs_str != "{}":
op_str = "{outputs} = {op_type}(inputs={inputs}, {attrs})".\
format(outputs = outputs_str, op_type=self.type, inputs=inputs_str, attrs=attrs_str)
else:
op_str = "{op_type}(inputs={inputs}, {attrs})".\
format(op_type=self.type, inputs=inputs_str, attrs=attrs_str)
return op_str
def __str__(self):
return self._to_readable_code()
__repr__ = __str__
@property
def type(self):
return self.desc.type()
def input(self, name):
"""
Get the input arguments according to the input parameter name.
Args:
name(str): The input parameter name.
Returns:
list: return the list of argument names that associated with \
the specific parameter name.
"""
return self.desc.input(name)
def _rename_input(self, old_name, new_name):
"""
Rename the `old_name` to `new_name`.
Args:
old_name(str): The old name of the Operator's input.
new_name(str): The new name of the Operator's input.
Returns:
None
"""
self.desc._rename_input(old_name, new_name)
def _rename_output(self, old_name, new_name):
"""
Rename the `old_name` to `new_name`.
Args:
old_name(str): The old name of the Operator's output.
new_name(str): The new name of the Operator's output.
Returns:
None
"""
self.desc._rename_output(old_name, new_name)
@property
def input_names(self):
return self.desc.input_names()
@property
def input_arg_names(self):
return self.desc.input_arg_names()
@property
def output_arg_names(self):
return self.desc.output_arg_names()
def output(self, name):
"""
Get output arguments by the output parameter name.
Args:
name(str): The output parameter name.
Returns:
list: return the list of argument names associated with \
the specific parameter name.
"""
return self.desc.output(name)
@property
def output_names(self):
return self.desc.output_names()
@property
def idx(self):
for i, op in enumerate(self.block.ops):
if op == self:
return i
raise ValueError(
"Can't find op itself in it's block. It could be a bug of Paddle.")
def has_attr(self, name):
"""
Whether this Operator has the attribute with name or not.
Args:
name(str): the attribute name.
Returns:
bool: True if has this attribute.
"""
return self.desc.has_attr(name)
def attr_type(self, name):
"""
Get the type of attribute by attribute's name.
Args:
name(str): the attribute name.
Returns:
core.AttrType: the attribute type.
"""
return self.desc.attr_type(name)
def _set_attr(self, name, val):
"""
Set the value of attribute by attribute's name.
Args:
name(str): the attribute name.
val(bool|int|str|float|list): the value of the attribute.
Raises:
ValueError: If the type of value doesn't match with desc.attr_type(name).
"""
self._update_desc_attr(name, val)
def _remove_attr(self, name):
self.desc.remove_attr(name)
def _update_desc_attr(self, name, val):
"""
Update the value of desc's attribute by attribute's name.
Args:
name(str): the attribute name.
val(bool|int|str|float|list): the value of the attribute.
Raises:
ValueError: If the type of value doesn't match with desc.attr_type(name).
"""
if isinstance(val, Block):
self.desc.set_block_attr(name, val.desc)
elif isinstance(val, list) and val and all(
isinstance(v, Block) for v in val):
self.desc.set_blocks_attr(name, [v.desc for v in val])
elif isinstance(val, core.BlockDesc) or \
isinstance(val, core.ProgramDesc):
self.desc.set_serialized_attr(name, val.serialize_to_string())
else:
self.desc._set_attr(name, val)
@property
def attr_names(self):
return self.desc.attr_names()
def attr(self, name):
"""
Get the attribute by name.
Args:
name(str): the attribute name.
Returns:
bool|int|str|float|list: The attribute value. The return value
can be any valid attribute type.
"""
return self.desc.attr(name)
def _block_attr_id(self, name):
"""
Get the block attribute's id by name.
Args:
name(str): the attribute name.
Returns:
int: the block index.
"""
return self.desc._block_attr_id(name)
def _block_attr(self, name):
"""
Get the block attribute by name.
Args:
name(str): the attribute name.
Returns:
block: the block attribute.
"""
id = self._block_attr_id(name)
assert (id >= 0 and id < len(self.block.program.blocks))
return self.block.program.blocks[id]
def _blocks_attr(self, name):
"""
Get the blocks attribute by name.
Args:
name(str): the attribute name.
Returns:
list: list of the blocks attribute.
"""
attrs = []
for i in self._blocks_attr_ids(name):
assert (i >= 0 and i < len(self.block.program.blocks))
attrs.append(self.block.program.blocks[i])
return attrs
def _blocks_attr_ids(self, name):
"""
Get the blocks attribute's ids by name.
Args:
name(str): the attribute name.
Returns:
list: list of the blocks ids.
"""
return self.desc._blocks_attr_ids(name)
def all_attrs(self):
"""
Get the attribute dict.
Returns:
dict: The Operator's attribute dict, name->attr.
"""
attr_names = self.attr_names
attr_map = {}
for n in attr_names:
attr_type = self.desc.attr_type(n)
if attr_type == core.AttrType.BLOCK:
attr_map[n] = self._block_attr(n)
continue
if attr_type == core.AttrType.BLOCKS:
attr_map[n] = self._blocks_attr(n)
continue
attr_map[n] = self.attr(n)
return attr_map
def _is_optimize_op(self):
op_maker = core.op_proto_and_checker_maker
OPTIMIZE = core.op_proto_and_checker_maker.OpRole.Optimize
if not self.desc.has_attr(op_maker.kOpRoleAttrName()):
return False
op_role = self.desc.attr(op_maker.kOpRoleAttrName())
if op_role & int(OPTIMIZE):
return True
return False
def _is_backward_op(self):
op_maker = core.op_proto_and_checker_maker
BACKWARD = core.op_proto_and_checker_maker.OpRole.Backward
if not self.desc.has_attr(op_maker.kOpRoleAttrName()):
return False
op_role = self.desc.attr(op_maker.kOpRoleAttrName())
if op_role & int(BACKWARD):
return True
return False
class Block(object):
"""
In Fluid, a Program is consistence of multi-Block, and Block stores
VarDesc and OpDesc. In a specific Block, a VarDesc have a unique name.
One block could have some child blocks, and child block's name scopes
should inherit the parent's so that OpDesc in child block can reference
a VarDesc that is stored in the parent block.
Please reference the framework.proto for details.
Args:
program(Program): The Program that the Block belongs to.
idx(int): The block's id in the Program.
Notes:
The constructor of Block should not be invoked directly. Please
use `Program._create_block()` to create a block.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
var = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
cur_block.append_op(type="abs",
inputs={"X": [var]},
outputs={"Out": [var]})
"""
def __init__(self, program, idx):
self.desc = program.desc.block(idx)
self.vars = collections.OrderedDict() # var_name --> var
self.ops = list() # operator list
self.program = program
self.removed_vars = collections.OrderedDict()
def __str__(self):
return self._to_readable_code()
def _to_readable_code(self, skip_op_callstack=True):
"""
Get readable debug string of Block.
.. note::
If you want to get the debug string in protobuf format,
please use :code:`to_string` method.
Args:
skip_op_callstack(bool): whether to skip parsing Operator's attribute
op_callstack, default value is True
Returns:
string: The formatted Block string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_var = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
new_op = cur_block.append_op(type="abs",
inputs={"X": [new_var]},
outputs={"Out": [new_var]})
print(cur_block._to_readable_code())
"""
assert isinstance(
skip_op_callstack, bool
), "skip_op_callstack parameter's type is error, expect bool, received %s".format(
type(skip_op_callstack))
block_str = "{ // block "
block_str += "{}\n".format(self.idx)
for var in list(self.vars.values()):
block_str += " {}\n".format(var._to_readable_code())
block_str += "\n"
for op in self.ops:
block_str += " {}\n".format(
op._to_readable_code(skip_op_callstack))
block_str += "}"
return block_str
def to_string(self, throw_on_error, with_details=False):
"""
Get debug string.
Args:
throw_on_error(bool): raise exception when self is not initialized
when throw_on_error is True.
with_details(bool): more details about variables and parameters
(e.g. trainable, optimize_attr, ...) will be printed when
with_details is True. Default False.
Returns:
str: The debug string.
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
if with_details:
re_add_indent = re.compile(r"\n(.)")
res_str = "blocks {\n idx: %d\n parent_idx: %d" % (
self.idx, self.parent_idx)
for var in list(self.vars.values()):
res_str += "\n vars {\n %s }" % re_add_indent.sub(
r"\n \1", var.to_string(throw_on_error, with_details))
for op in self.ops:
res_str += "\n ops {\n %s }" % re_add_indent.sub(
r"\n \1", op.to_string(throw_on_error))
res_str += "\n}"
else:
protostr = self.desc.serialize_to_string()
proto = framework_pb2.BlockDesc.FromString(
six.binary_type(protostr))
res_str = _debug_string_(proto, throw_on_error)
return res_str
__repr__ = __str__
@property
def parent_idx(self):
return self.desc.parent
@property
def forward_block_idx(self):
return self.desc.get_forward_block_idx()
def _set_forward_block_idx(self, idx):
"""
Set the forward block Idx.
Args:
idx(int): the block index.
Returns:
None
"""
self.desc._set_forward_block_idx(idx)
@property
def backward_block_idx(self):
cur_block_idx = self.idx
for block in self.program.blocks:
if block.forward_block_idx == cur_block_idx:
return block.idx
return -1
@property
def idx(self):
return self.desc.id
def var(self, name):
"""
Get a Variable by name from this block.
Args:
name(str): the Variable's name.
Raises:
ValueError: The If input's type is not str, or this block
doesn't have a Variable with the giving name.
Returns:
Variable: the Variable with the giving name.
"""
if not isinstance(name, six.string_types):
raise TypeError(
"var require string as parameter, but get %s instead." %
(type(name)))
v = self.vars.get(name, None)
if v is None:
raise ValueError("var %s not in this block" % name)
return v
def _find_var_recursive(self, name):
"""
Get a Variable by name from this block recursively.
Args:
name(str): the Variable's name.
Returns:
Variable: the Variable with the giving name. Or None if not found.
"""
frontier = list()
visited = set()
frontier.append(self)
prog = self.program
while len(frontier) != 0: # BFS
cur = frontier[0]
frontier = frontier[1:]
if id(cur) in visited:
continue
if cur.has_var(name):
return cur.var(name)
if cur.parent_idx != -1:
frontier.append(prog.block(cur.parent_idx))
if cur.forward_block_idx != -1:
frontier.append(prog.block(cur.forward_block_idx))
visited.add(id(cur))
return None
def _var_recursive(self, name):
"""
Get a Variable by name from this block recursively.
Args:
name(str): the Variable's name.
Raises:
ValueError: this block and this parent block doesn't
have a Variable with the giving name.
Returns:
Variable: the Variable with the giving name.
"""
var = self._find_var_recursive(name)
if var:
return var
else:
raise ValueError("Var {0} is not found recursively".format(name))
def all_parameters(self):
return list(self.iter_parameters())
def iter_parameters(self):
return (item[1] for item in six.iteritems(self.vars)
if isinstance(item[1], Parameter))
def create_var(self, *args, **kwargs):
if in_dygraph_mode():
var = _varbase_creator(*args, **kwargs)
else:
var = Variable(block=self, *args, **kwargs)
if 'initializer' in kwargs:
kwargs['initializer'](var, self)
return var
def has_var(self, name):
return name in self.vars
def _rename_var(self, name, new_name):
"""
Rename variable in vars and ops' inputs and outputs
Args:
name(str): the name that need to be renamed.
new_name(str): the name that need to rename to.
Raises:
ValueError: If this block doesn't have this the giving name,
or the type of the var with the giving name is not Parameter
or Variable.
Returns:
Variable: the Variable with the giving name.
"""
name = cpt.to_text(name)
new_name = cpt.to_text(new_name)
if not self.has_var(name):
raise ValueError("var %s is not in current block" % name)
v = self.var(name)
if type(v) == Parameter:
var_type = "Parameter"
stop_gradient = v.stop_gradient
trainable = v.trainable
optimize_attr = v.optimize_attr
regularizer = v.regularizer
error_clip = v.error_clip
elif type(v) == Variable:
var_type = "Variable"
error_clip = v.error_clip
stop_gradient = v.stop_gradient
else:
raise ValueError("unsupported var type: %s", type(v))
orig_var_type = v.type
self.desc._rename_var(cpt.to_bytes(name), cpt.to_bytes(new_name))
# NOTE: v is destroyed by C++ after calling _rename_var.
d = self.desc.find_var(cpt.to_bytes(new_name))
if var_type == "Parameter":
if in_dygraph_mode():
var = ParamBase(
d.shape(),
d.dtype(),
type=orig_var_type,
name=new_name,
stop_gradient=stop_gradient,
trainable=trainable,
optimize_attr=optimize_attr,
regularizer=regularizer,
error_clip=error_clip)
else:
var = Parameter(
self,
d.shape(),
d.dtype(),
type=orig_var_type,
name=new_name,
stop_gradient=stop_gradient,
trainable=trainable,
optimize_attr=optimize_attr,
regularizer=regularizer,
error_clip=error_clip)
elif var_type == "Variable":
var = Variable(
self,
type=orig_var_type,
name=new_name,
error_clip=error_clip,
stop_gradient=stop_gradient)
# rename the python side, _sync_with_cpp will only add
# new vars/ops to python side.
self.vars[new_name] = var
del self.vars[name]
self._sync_with_cpp()
return var
def _remove_var(self, name):
self._sync_with_cpp()
self.desc._remove_var(cpt.to_bytes(name))
del self.vars[name]
def create_parameter(self, *args, **kwargs):
global_block = self.program.global_block()
param = None
if in_dygraph_mode():
param = ParamBase(*args, **kwargs)
else:
param = Parameter(global_block, *args, **kwargs)
if 'initializer' in kwargs:
def _is_inited_by(block, var):
init_ops = []
for op in block.ops:
if var.name in op.output_arg_names:
# In startup_program, "c_broadcast" and "c_sync_comm_stream"
# are treated as initialization ops that cause error.
# Think of "c_broadcast" and "c_sync_comm_stream" as a special case here.
if op.type in ["c_broadcast", "c_sync_comm_stream"]:
continue
init_ops.append(op)
return init_ops
initializer = kwargs['initializer']
init_ops = _is_inited_by(global_block, param)
init_ops_len = len(init_ops)
if init_ops_len > 1:
raise RuntimeError("param " + param.name +
" is inited by multiple init ops " + str(
init_ops))
elif init_ops_len == 1:
# TODO already inited, do nothing, should log a warning
pass
else:
initializer(param, self)
param.stop_gradient = False
return param
def append_op(self, *args, **kwargs):
"""
Appends a new Operator according to the giving arguments.
Returns:
Operator: the append Operator.
"""
if in_dygraph_mode():
attrs = kwargs.get("attrs", {})
type = kwargs.get("type", None)
op = Operator(
block=self,
desc=None,
type=type,
inputs=None,
outputs=None,
attrs=attrs)
# record ops in tracer rather than blocks
#
# TODO(minqiyang): add op stop_gradient support in static mode too.
# currently, we only support stop_gradient in dygraph mode.
_dygraph_tracer().trace_op(type,
kwargs.get("inputs", {}),
kwargs.get("outputs", {}), attrs
if attrs else {},
kwargs.get("stop_gradient", False))
else:
op_desc = self.desc.append_op()
op = Operator(
block=self,
desc=op_desc,
type=kwargs.get("type", None),
inputs=kwargs.get("inputs", None),
outputs=kwargs.get("outputs", None),
attrs=kwargs.get("attrs", None))
self.ops.append(op)
return op
def _insert_op(self, index, *args, **kwargs):
"""
Insert a Operator according to the giving arguments.
Args:
index(int): the place that the operator to insert.
Returns:
Operator: the insert Operator.
"""
self._sync_with_cpp()
op_desc = self.desc._insert_op(index)
op = Operator(block=self, desc=op_desc, *args, **kwargs)
self.ops.insert(index, op)
return op
def _remove_op(self, index):
"""
Remove the specific position operator.
Args:
index(int): the position that the operator to insert.
Returns:
None
"""
self._sync_with_cpp()
self.desc._remove_op(index, index + 1)
del self.ops[index]
def _slice_ops(self, start, end):
"""
Return the Operator between start and end.
Args:
start(int): the start position.
end(int): the end position.
Returns:
list: the Operators between start and end.
"""
return self.ops[start:end]
def _prepend_op(self, *args, **kwargs):
if in_dygraph_mode():
type = kwargs.get("type", None)
attrs = kwargs.get("attrs", {})
op = Operator(
self, None, type=type, inputs=None, outputs=None, attrs=attrs)
_dygraph_tracer().trace_op(type,
kwargs.get("inputs", {}),
kwargs.get("outputs", {}), attrs
if attrs else {},
kwargs.get("stop_gradient", False))
else:
op_desc = self.desc._prepend_op()
op = Operator(
self,
op_desc,
type=kwargs.get("type", None),
inputs=kwargs.get("inputs", None),
outputs=kwargs.get("outputs", None),
attrs=kwargs.get("attrs", None))
self.ops.insert(0, op)
return op
def _sync_with_cpp(self):
"""
Sync from the desc on the c++ end. This method is used to synchronize
the c++ desc instance generated by backward.
"""
# sync variables from cpp
for var in self.desc.all_vars():
if not self.has_var(var.name()):
self.create_var(name=var.name(), desc=var, type=var.type())
# sync variables removed from c++ end
for var in list(self.vars.keys()):
if not self.desc.find_var(cpt.to_bytes(var)):
self.vars.pop(var)
# sync operators from cpp
ops_in_cpp = []
for op_idx in range(0, self.desc.op_size()):
ops_in_cpp.append(self.desc.op(op_idx))
if len(self.ops) != 0:
first_op_in_python = self.ops[0].desc
last_op_in_python = self.ops[len(self.ops) - 1].desc
start_index = None
end_index = None
for index in range(len(ops_in_cpp)):
if first_op_in_python == ops_in_cpp[index]:
start_index = index
if last_op_in_python == ops_in_cpp[index]:
end_index = index
assert start_index is not None
assert end_index is not None
assert start_index <= end_index
else:
start_index = 0
end_index = -1
# sync ops append to the head of cpp_ops
for index in range((start_index - 1 - 1), -1, -1):
op_desc = ops_in_cpp[index]
op = Operator(self, op_desc)
self.ops.insert(0, op)
# sync ops append to the end of cpp_ops
for index in range((end_index + 1), len(ops_in_cpp)):
op_desc = ops_in_cpp[index]
op = Operator(self, op_desc)
self.ops.append(op)
# sync ops removed from c++ end
if end_index != -1 and end_index < len(self.ops):
ops_in_cpp_index = 0
ops_in_python_index = 0
while ops_in_python_index < len(
self.ops) and ops_in_cpp_index < len(ops_in_cpp):
if self.ops[ops_in_python_index].desc != ops_in_cpp[
ops_in_cpp_index]:
del self.ops[ops_in_python_index]
else:
ops_in_cpp_index += 1
ops_in_python_index += 1
assert len(self.ops) == len(ops_in_cpp)
for index in range(len(self.ops)):
assert self.ops[index].desc == ops_in_cpp[index]
def _copy_param_info_from(self, other):
"""
Copy the information of parameters from the other block.
Args:
other(Block): the other block.
Raises:
ValueError: If type of input is not Block, or the `other` and this
block is not in the same topology.
Returns:
None
"""
if not isinstance(other, Block):
raise TypeError(
"_copy_param_info_from should be invoked with Block")
for p in other.iter_parameters():
assert isinstance(p, Parameter)
v = self.vars.get(p.name, None)
if v is None:
# if the Parameter is pruned, v may be None
continue
assert isinstance(v, Variable)
new_p = None
if in_dygraph_mode():
new_p = ParamBase(
shape=v.shape,
dtype=v.dtype,
type=v.type,
lod_level=v.lod_level,
stop_gradient=p.stop_gradient,
trainable=p.trainable,
optimize_attr=p.optimize_attr,
regularizer=p.regularizer,
error_clip=p.error_clip,
name=v.name)
else:
new_p = Parameter(
block=self,
shape=v.shape,
dtype=v.dtype,
type=v.type,
lod_level=v.lod_level
if v.type == core.VarDesc.VarType.LOD_TENSOR else None,
stop_gradient=p.stop_gradient,
trainable=p.trainable,
optimize_attr=p.optimize_attr,
regularizer=p.regularizer,
error_clip=p.error_clip,
name=v.name)
self.vars[new_p.name] = new_p
def _clone_variable(self, var, force_persistable=True):
"""
Clone a variable into current block.
Args:
var: the variable to be cloned.
force_persistable(bool): True means setting the result variable to being persistable.
False means setting the persistable the same with that of input var.
default: True.
Returns:
Variable: the new variable cloned from 'var' in current block.
"""
assert isinstance(var, Variable)
ret_var = None
# make STEP_SCOPES var can be safely cloned.
if var.type == core.VarDesc.VarType.STEP_SCOPES:
ret_var = self.create_var(
name=var.name, persistable=var.persistable, type=var.type)
elif var.type == core.VarDesc.VarType.RAW:
ret_var = self.create_var(
name=var.name, persistable=var.persistable, type=var.type)
elif var.type == core.VarDesc.VarType.SELECTED_ROWS:
ret_var = self.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
persistable=True if force_persistable else var.persistable,
is_data=var.is_data,
need_check_feed=var.desc.need_check_feed())
else:
ret_var = self.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
lod_level=var.lod_level,
persistable=True if force_persistable else var.persistable,
is_data=var.is_data,
need_check_feed=var.desc.need_check_feed())
return ret_var
class IrNode(object):
"""
Python IrNode. Beneath it is a core.Node, which is used for Ir Pass.
"""
def __init__(self, node):
"""
Construct an IrNode using core.Node.
Args:
node(core.Node): C++ Node.
"""
assert isinstance(node,
core.Node), 'node must be the instance of core.Node.'
self.node = node
def name(self):
"""
Return the node name.
Returns:
str: node name.
"""
return self.node.name()
def node_type(self):
"""
Return the node type.
Returns:
core.Node.Type: node type(core.Node.Type.Operation or core.Node.Type.Variable).
"""
return self.node.node_type()
def var(self):
"""
Return the node variable description.
Returns:
core.VarDesc: node variable description.
"""
return self.node.var()
def op(self):
"""
Return the node operator description.
Returns:
core.OpDesc: node operator description.
"""
return self.node.op()
def id(self):
"""
Return the node id.
Returns:
int: node id.
"""
return self.node.id()
def is_op(self):
"""
If the node is an operator, then return true.
Returns:
bool: indicate whether the node is an operator.
"""
return self.node.is_op()
def is_var(self):
"""
If the node is a variable, then return true.
Returns:
bool: indicate whether the node is a variable.
"""
return self.node.is_var()
def is_ctrl_var(self):
"""
If the node is a control dependence variable, then return true.
Returns:
bool: indicate whether the node is a control dependence variable.
"""
return self.node.is_ctrl_var()
def clear_inputs(self):
"""
Clear the node inputs. After executing the `clear_inputs` function,
the node inputs will be empty.
"""
self.node.clear_inputs()
def remove_input_by_id(self, node_id):
"""
Remove a node from inputs by the given node id.
Args:
node_id(int): the given node id.
"""
self.node.remove_input(node_id)
def remove_input(self, node):
"""
Remove a node from inputs.
Args:
node(IrNode): the node being removed.
"""
self.node.remove_input(node.node)
def append_input(self, node):
"""
Append a node in inputs.
Args:
node(IrNode): the node being appended.
"""
self.node.append_input(node.node)
def clear_outputs(self):
"""
Clear the node outputs. After executing the `clear_outputs` function,
the node outputs will be empty.
"""
self.node.clear_outputs()
def remove_output_by_id(self, node_id):
"""
Remove a node from outputs by the given node id.
Args:
node_id(int): the given node id.
"""
self.node.remove_output(node_id)
def remove_output(self, node):
"""
Remove a node from outputs.
Args:
node(IrNode): the node being removed.
"""
self.node.remove_output(node.node)
def append_output(self, node):
"""
Append a node in outputs.
Args:
node(IrNode): the node being appended.
"""
self.node.append_output(node.node)
@property
def inputs(self):
"""
Return the node inputs.
Returns:
list(IrNode): node inputs wrapped by IrNode.
"""
return [IrNode(n) for n in self.node.inputs]
@property
def outputs(self):
"""
Return the node outputs.
Returns:
list(IrNode): node outputs wrapped by IrNode.
"""
return [IrNode(n) for n in self.node.outputs]
class IrVarNode(IrNode):
"""
Python IrVarNode. Beneath it is a core.Node, it inherits from IrNode.
"""
def __init__(self, node):
"""
Construct an IrVarNode using core.Node.
Args:
node(core.Node): C++ Node.
"""
assert isinstance(node, core.Node) and node.is_var(), \
'node must be the instance of core.Node and it must be a variable node.'
super(IrVarNode, self).__init__(node)
self.node = node
def set_shape(self, shape):
"""
Set the node variable shape.
Args:
shape(list): shape to be set.
"""
assert self.node.var() is not None, \
"The node variable description can not be None."
self.node.var().set_shape(shape)
def persistable(self):
"""
If the variable node is a persistable variable, then return true.
Returns:
bool: indicate whether the variable is persistable.
"""
assert self.node.var() is not None, \
"The node variable description can not be None."
return self.node.var().persistable()
def type(self):
"""
Return the variable type.
Returns:
core.VarDesc.VarType: the variable type.
"""
assert self.node.var() is not None, \
"The node variable description can not be None."
return self.node.var().type()
def dtype(self):
"""
Return the variable data type.
Returns:
core.VarDesc.VarType: the variable data type.
"""
assert self.node.var() is not None, \
"The node variable description can not be None."
return self.node.var().dtype()
def shape(self):
"""
Return the variable shape.
Returns:
list: the variable shape.
"""
assert self.node.var() is not None, \
"The node variable description can not be None."
return self.node.var().shape()
@property
def inputs(self):
"""
Return the node inputs.
Returns:
list(IrOpNode): node inputs wrapped by IrOpNode.
"""
return [IrOpNode(n) for n in self.node.inputs]
@property
def outputs(self):
"""
Return the node outputs.
Returns:
list(IrOpNode): node outputs wrapped by IrOpNode.
"""
return [IrOpNode(n) for n in self.node.outputs]
class IrOpNode(IrNode):
"""
Python IrOpNode. Beneath it is a core.Node, it inherits from IrNode.
"""
def __init__(self, node):
"""
Construct an IrOpNode using core.Node.
Args:
node(core.Node): C++ Node.
"""
assert isinstance(node, core.Node) and node.is_op(), \
'node must be the instance of core.Node and it must be a operator node.'
super(IrOpNode, self).__init__(node)
self.node = node
def rename_input(self, old_input_name, new_input_name):
"""
Rename the input of this node.
Args:
old_input_name(str): the old input name.
new_input_name(str): the new input name.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
self.node.op()._rename_input(old_input_name, new_input_name)
def rename_output(self, old_output_name, new_output_name):
"""
Rename the output of this node.
Args:
old_output_name(str): the old output name.
new_output_name(str): the new output name.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
self.node.op()._rename_output(old_output_name, new_output_name)
def input(self, name):
"""
Get the argument name list by the parameter name for input.
Args:
name(str): the parameter name.
Returns:
list(str): the argument name list.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
return self.node.op().input(name)
def output(self, name):
"""
Get the argument name list by the parameter name for output.
Args:
name(str): the parameter name.
Returns:
list(str): the argument name list.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
return self.node.op().output(name)
def set_type(self, new_type):
"""
Change the operator type into new type.
Args:
new_type(str): new operator type to be set.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
return self.node.op().set_type(new_type)
def set_attr(self, name, val):
"""
Set the value of attribute by attribute's name.
Args:
name(str): the attribute name.
val(bool|int|str|float|list): the value of the attribute.
"""
self._update_desc_attr(name, val)
def _update_desc_attr(self, name, val):
"""
Update the value of the op desc's attribute by attribute's name.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
desc = self.node.op()
if isinstance(val, Block):
desc.set_block_attr(name, val.desc)
elif isinstance(val, list) and val and \
all(isinstance(v, Block) for v in val):
desc.set_blocks_attr(name, [v.desc for v in val])
elif isinstance(val, core.BlockDesc) or \
isinstance(val, core.ProgramDesc):
desc.set_serialized_attr(name, val.serialize_to_string())
else:
desc._set_attr(name, val)
def input_arg_names(self):
"""
Return input arguments' names of this op node.
Returns:
list(str): input arguments' names of this op node.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
return self.node.op().input_arg_names()
def output_arg_names(self):
"""
Return output arguments' names of this op node.
Returns:
list(str): output arguments' names of this op node.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
return self.node.op().output_arg_names()
@property
def inputs(self):
"""
Return the node inputs.
Returns:
list(IrVarNode): node inputs wrapped by IrVarNode.
"""
return [IrVarNode(n) for n in self.node.inputs]
@property
def outputs(self):
"""
Return the node outputs.
Returns:
list(IrVarNode): node outputs wrapped by IrVarNode.
"""
return [IrVarNode(n) for n in self.node.outputs]
class IrGraph(object):
"""
Python IrGraph. Beneath it is a core.Graph, which is used for
creating a c++ Ir Pass Graph. An IrGraph is just a graph view of
a Program. In an IrGraph, both Variables and Operators are graph
nodes.
"""
def __init__(self, graph, for_test=False):
"""
Construct an IrGraph using core.Graph.
Args:
graph(core.Graph): C++ Graph.
for_test(bool): True for the test graph and false for the train graph.
"""
assert isinstance(
graph, core.Graph), 'graph must be the instance of core.Graph.'
self.graph = graph
self._for_test = for_test
def clone(self):
"""
Create a new and duplicated IrGraph.
Warns:
The method only clones the graph structure, not its attributes.
Returns:
IrGraph: A new and duplicated graph.
"""
g = self.graph.clone()
return IrGraph(g, self._for_test)
def is_test(self):
"""
If the graph is used for testing, the function returns true. Otherwise, returns false.
"""
return self._for_test
def all_nodes(self):
"""
Return all nodes included in the graph as a set.
"""
return {IrNode(node) for node in self.graph.nodes()}
def all_var_nodes(self):
"""
Return all variable nodes included in the graph as a set.
"""
return {IrVarNode(node) for node in self.graph.nodes() if node.is_var()}
def all_persistable_nodes(self):
"""
Return all persistable variable nodes included in the graph as a set.
"""
persistable_nodes = set()
for node in self.graph.nodes():
if node.is_var() and node.var() is not None and node.var(
).persistable():
persistable_nodes.add(node)
return {IrVarNode(p) for p in persistable_nodes}
def all_op_nodes(self):
"""
Return all operator nodes included in the graph as a set.
"""
return {IrOpNode(node) for node in self.graph.nodes() if node.is_op()}
def create_persistable_node(self, name, var_type, shape, var_dtype):
"""
Create a persistable variable node in the graph. In IrGraph,
it can not distinguish between persistable variables and parameters.
Args:
name(str): the name of the persistable variable node.
vart_type(core.VarDesc.VarType): the type of the persistable variable node.
shape(list): the shape of the persistable variable node.
var_dtype(core.VarDesc.VarType): the data type of the persistable variable node.
Returns:
IrVarNode: the created persistable variable node.
"""
var_desc = core.VarDesc(name)
var_desc.set_type(var_type)
var_desc.set_shape(shape)
var_desc.set_dtype(var_dtype)
var_desc.set_persistable(True)
return IrVarNode(self.graph.create_var_node(var_desc))
def create_var_node(self, name, var_type, shape, var_dtype):
"""
Create a variable node in the graph. The created variable node is
not persistable.
Args:
name(str): the name of the variable node.
vart_type(core.VarDesc.VarType): the type of the variable node.
shape(list): the shape of the variable node.
var_dtype(core.VarDesc.VarType): the data type of the variable node.
Returns:
IrVarNode: the created variable node.
"""
var_desc = core.VarDesc(name)
var_desc.set_type(var_type)
var_desc.set_shape(shape)
var_desc.set_dtype(var_dtype)
return IrVarNode(self.graph.create_var_node(var_desc))
def create_control_dep_var(self):
"""
create a control var
"""
return IrVarNode(self.graph.create_control_dep_var())
def create_var_node_from_desc(self, var_desc):
"""
Create a variable node by using an existing VarDesc in the graph.
Depend on the giving VarDesc, the created variable node may be persistable.
Args:
var_desc(core.VarDesc): the giving variable description.
Returns:
IrVarNode: the created variable node.
"""
return IrVarNode(self.graph.create_var_node(var_desc))
def create_op_node(self, op_type, attrs, inputs, outputs):
"""
Create a operator node in the graph.
Args:
op_type(str): the type of the operator node.
attrs(dict): the attributes of the operator node.
inputs(dict): the inputs of the operator node.
outputs(dict): the outputs of the operator node.
Returns:
IrOpNode: the created operator node.
"""
op_desc = core.OpDesc()
op_desc.set_type(op_type)
for attr, value in six.iteritems(attrs):
self._update_desc_attr(op_desc, attr, value)
for input_name, var_nodes in six.iteritems(inputs):
if not isinstance(var_nodes, list):
var_nodes = [var_nodes]
op_desc.set_input(input_name,
[var_node.name() for var_node in var_nodes])
for output_name, var_nodes in six.iteritems(outputs):
if not isinstance(var_nodes, list):
var_nodes = [var_nodes]
op_desc.set_output(output_name,
[var_node.name() for var_node in var_nodes])
return IrOpNode(self.graph.create_op_node(op_desc))
def create_op_node_from_desc(self, op_desc):
"""
Create a operator node by using an existing OpDesc in the graph.
Args:
op_desc(core.VarDesc): the giving operator description.
Returns:
IrOpNode: the created operator node.
"""
return IrOpNode(self.graph.create_op_node(op_desc))
def update_input_link(self, old_input_node, new_input_node, op_node):
"""
Update the input's link of a operator node.
Args:
old_input_node(IrNode): the old input node of the giving op_node.
new_input_node(IrNode): the new input node of the giving op_node.
op_node(IrOpNode): the operator node that is needed to update input's link.
"""
assert old_input_node.node in self.graph.nodes() and new_input_node.node in \
self.graph.nodes() and op_node.node in self.graph.nodes(), \
'The three arguments(old_input_node&new_input_node&op_node) must be in the graph nodes.'
old_input_node.remove_output(op_node)
op_node.remove_input(old_input_node)
new_input_node.append_output(op_node)
op_node.append_input(new_input_node)
op_node.rename_input(old_input_node.name(), new_input_node.name())
def update_output_link(self, old_output_node, new_output_node, op_node):
"""
Update the output's link of an operator node.
Args:
old_output_node(IrNode): the old output node of the giving op_node.
new_output_node(IrNode): the new output node of the giving op_node.
op_node(IrOpNode): the operator node that is needed to update input's link.
"""
assert old_output_node.node in self.graph.nodes() and new_output_node.node in \
self.graph.nodes() and op_node.node in self.graph.nodes(), \
'The three arguments(old_output_node &new_output_node &op_node) must be in the graph nodes.'
old_output_node.remove_input(op_node)
op_node.remove_output(old_output_node)
new_output_node.append_input(op_node)
op_node.append_output(new_output_node)
op_node.rename_output(old_output_node.name(), new_output_node.name())
def link_to(self, node_in, node_out):
"""
Connect two nodes.
Args:
node_in(IrNode): the input node.
node_out(IrNode): the output node.
"""
assert node_in.node in self.graph.nodes() and node_out.node in self.graph.nodes(), \
'The two arguments(node_in&node_out) must be in the graph nodes.'
node_in.append_output(node_out)
node_out.append_input(node_in)
def safe_remove_nodes(self, remove_nodes):
"""
Remove nodes safely since links connected to these removed nodes are
also removed.
Args:
remove_nodes(set): the nodes prepared to be removed.
"""
if not isinstance(remove_nodes, set):
if isinstance(remove_nodes, Iterable):
remove_nodes = set(remove_nodes)
else:
remove_nodes = {remove_nodes}
original_nodes = {n.node for n in remove_nodes}
core.graph_safe_remove_nodes(self.graph, original_nodes)
def resolve_hazard(self):
ordered_nodes = core.topology_sort(self.graph)
var_nodes = dict()
for node in ordered_nodes:
if node.is_op() and node.op() is not None:
for each_var_name in node.op().input_arg_names():
if each_var_name not in var_nodes:
var_nodes[each_var_name] = [
self._find_node_by_name(node.inputs, each_var_name)
]
for each_var_name in node.op().output_arg_names():
if each_var_name not in var_nodes:
var_nodes[each_var_name] = [
self._find_node_by_name(node.outputs, each_var_name)
]
else:
var_nodes[each_var_name].append(
self._find_node_by_name(node.outputs,
each_var_name))
self.graph.resolve_hazard(var_nodes)
def has_circle(self):
"""
Check if the graph has a circle.
Returns:
bool: True if the graph has a circle else False.
"""
return core.has_circle(self.graph)
def graph_num(self):
"""
Count the number of unconnected graphs in this graph.
Returns:
int: the number of unconnected graphs.
"""
return core.graph_num(self.graph)
def topology_sort(self):
"""
Perform the topology sort operation on the graph.
Notes: the `graph` can not contain a circle.
Returns:
list(IrNode): nodes in topology order.
"""
ordered_nodes = core.topology_sort(self.graph)
return [IrNode(n) for n in ordered_nodes]
def build_adjacency_list(self):
"""
Build an adjacency list of operations for the `graph`.
Returns:
dict{IrNode: set(IrNode)}: the adjacency list.
"""
adj_list = core.build_adjacency_list(self.graph)
wrapped_adj_list = dict()
for k, v in six.iteritems(adj_list):
wrapped_adj_list[IrNode(k)] = {IrNode(n) for n in v}
return wrapped_adj_list
def draw(self, save_path, name, marked_nodes=None, remove_ctr_var=True):
"""
Draw the graph. If `dot` command is installed, the drawn graph
will be saved as pdf file type, otherwise dot file type is used.
Args:
save_path(str): the save path of drawn graph.
name(str): the name of drawn graph.
marked_nodes(set(IrNode)): nodes that are needed to be marked.
Default value is None.
remove_ctr_var(bool): If it is set True, all control variable nodes
in the graph will be removed. Default value is True.
"""
def _convert_to_pdf(dot_file_path):
pdf_save_path = os.path.splitext(dot_file_path)[0] + '.pdf'
exited_code = subprocess.call('dot -Tpdf ' + dot_file_path \
+ ' -o ' + pdf_save_path, shell=True)
if exited_code != 0:
print('The dot command is needed for creating pdf files.')
print('The {} is saved as the dot filetype.'.format(
dot_file_path))
remove_ctr_vars = set()
if remove_ctr_var:
for node in self.all_var_nodes():
if node.is_ctrl_var():
remove_ctr_vars.add(node)
self.safe_remove_nodes(remove_ctr_vars)
print('Total ops num = {}.'.format(len(self.all_op_nodes())))
if marked_nodes is not None:
if not isinstance(marked_nodes, set):
if isinstance(marked_nodes, Iterable):
marked_nodes = set(marked_nodes)
else:
marked_nodes = {marked_nodes}
marked_nodes = {n.node for n in marked_nodes}
remove_ctr_vars = {n.node for n in remove_ctr_vars}
marked_nodes = marked_nodes - remove_ctr_vars
if self.graph.has('__graphviz__marked_node__'):
self.graph.erase('__graphviz__marked_node__')
self.graph.set('__graphviz__marked_node__', marked_nodes)
if not os.path.exists(save_path):
os.makedirs(save_path)
viz_dot_path = os.path.join(save_path, name) + '.dot'
viz_pass = core.get_pass('graph_viz_pass')
viz_pass.set('graph_viz_path', viz_dot_path)
viz_pass.apply(self.graph)
_convert_to_pdf(viz_dot_path)
def to_program(self):
"""
Convert the graph into a Program.
WARN: When the graph includes backward operator nodes, the
conversion process may be failed. Usually, this function is
only used to convert a test graph.
Returns:
Program: a program converted from the graph.
"""
convert_pass = core.get_pass('graph_to_program_pass')
desc = core.ProgramDesc()
convert_pass.set_not_owned('program', desc)
convert_pass.apply(self.graph)
program = Program._construct_from_desc(desc)
return program
def _find_node_by_name(self, nodes, node_name):
"""
Find a node in the giving nodes set by the name.
"""
target_node = None
for n in nodes:
if n.name() == node_name:
target_node = n
assert target_node is not None, "Cannot find the target node in the giving set."
return target_node
def _update_desc_attr(self, desc, name, val):
"""
Update the value of desc's attribute by attribute's name.
"""
if isinstance(val, Block):
desc.set_block_attr(name, val.desc)
elif isinstance(val, list) and val and all(
isinstance(v, Block) for v in val):
desc.set_blocks_attr(name, [v.desc for v in val])
elif isinstance(val, core.BlockDesc) or \
isinstance(val, core.ProgramDesc):
desc.set_serialized_attr(name, val.serialize_to_string())
else:
desc._set_attr(name, val)
class Program(object):
"""
Create Python Program. It has at least one :ref:`api_guide_Block_en`, when the
control flow op like conditional_block, while :ref:`api_fluid_layers_While` is included,
it will contain nested block.
Please reference the
`framework.proto <https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/framework.proto>`_
for details.
A set of Program usually contains startup program and main program.
A startup program is set to contain some initial work, eg. initialize the ``Parameter``, and the main
program will contain the network structure and vars for train.
A set of Program can be used for test or train, in train program ,
Paddle will contain all content to build a train network, in test
program Paddle will prune some content which is irrelevant to test, eg.
backward ops and vars.
**Notes**:
**we have** :ref:`api_fluid_default_startup_program` **and** :ref:`api_fluid_default_main_program`
**by default, a pair of them will shared the parameters. The** :ref:`api_fluid_default_startup_program` **only run once to initialize parameters,**
:ref:`api_fluid_default_main_program` **run in every mini batch and adjust the weights.**
Returns:
Program: An empty Program.
Examples:
.. code-block:: python
import paddle.fluid as fluid
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program=main_program, startup_program=startup_program):
x = fluid.layers.data(name="x", shape=[-1, 784], dtype='float32')
y = fluid.layers.data(name="y", shape=[-1, 1], dtype='int32')
z = fluid.layers.fc(name="fc", input=x, size=10, act="relu")
print("main program is: {}".format(main_program))
print("start up program is: {}".format(startup_program))
"""
def __init__(self):
self.desc = core.ProgramDesc()
self.blocks = [Block(self, 0)]
self.current_block_idx = 0
global global_prog_seed
self._seed = global_prog_seed
self._current_role = core.op_proto_and_checker_maker.OpRole.Forward
self.__op_role_var = []
# for distribute training
# _is_distributed = True if under distributed training
self._is_distributed = False
# _is_chief = True if the trainer is the first one, usually No.0
self._is_chief = False
# _parameters_on_pservers records all the parameters distributed on parameter servers.
self._parameters_on_pservers = None
# _endpoints is a list about parameter servers ip:port, such as ["ip:port","ip:port"]
self._endpoints = []
# if current role is parameter server, the _ps_endpoint is its "ip:port"
self._ps_endpoint = None
# trainers_endpoints, it is used for distribution.
self._trainers_endpoints = []
# the distributed lookup table names
self._distributed_lookup_table = None
# use Deep gradient comrepssion or not
self._enable_dgc = False
self._use_lamb = False
self._nccl_comm_num = 1
self._use_hierarchical_allreduce = False
self._hierarchical_allreduce_inter_nranks = 0
# if this program has been optimized by distributed optimizer
# fleet_opt will be given a value
self._fleet_opt = None
self._program_config = None
# assigned if this program has been parsed by a pipeline optimizer
self._pipeline_opt = None
# appending gradients times
self._appending_grad_times = 0
# identifier for auto checkpoint
self._auto_checkpoint_name = unique_name.generate(
"__auto_checkpoint_program__")
# compiled program, i.e. Graph
self._graph = None
def global_seed(self, seed=0):
"""
Set global seed for Program
Returns:
None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
print(prog.random_seed)
## 0
## the default random seed is 0
prog.global_seed(102)
prog1 = fluid.default_main_program()
print(prog1.random_seed)
## 102
## the random seed is 102
"""
global global_prog_seed
global_prog_seed = seed
self._seed = global_prog_seed
@property
def _op_role(self):
"""
The operator role. In a enum {Forward, Backward, Optimize}.
Notes: this is a low level API. It is used only for ParallelExecutor to
duplicate or schedule operator to devices.
For example, the forward operator should be executed on every device.
The backward operator should be executed on every device and the
parameter gradient of backward (use :code:`_op_role_var` to get this
variable) operator should be merged to one device. The optimization
operators should be executed on only one device and broadcast the
optimization result, i.e., the new parameter, to every other device.
"""
return self._current_role
@_op_role.setter
def _op_role(self, role):
self._current_role = role
@property
def _op_role_var(self):
"""
The auxiliary variables for :code:`_op_role` property.
See Also: :code:`Program._op_role`'s documentation for details.
Notes: This is a very low-level API. Users should not use it directly.
"""
return self.__op_role_var
@signature_safe_contextmanager
def _backward_role_guard(self):
tmp_role = self._current_role
OpRole = core.op_proto_and_checker_maker.OpRole
self._current_role = OpRole.Backward
try:
yield
finally:
self._current_role = tmp_role
@signature_safe_contextmanager
def _optimized_guard(self, param_and_grads):
"""
A with guard to set :code:`Optimization` :code:`OpRole` and
:code:`OpRoleVar` automatically.
Notes: This is a very low level API. Users should not use it directly.
Args:
param_and_grads(list): The variables (names) to be optimized.
Examples:
>>> import paddle.fluid as fluid
>>> p, g = backward(...)
>>> with program._optimized_guard([p,g]):
>>> p = p - 0.001 * g
"""
tmp_role = self._current_role
tmp_var = self.__op_role_var
OpRole = core.op_proto_and_checker_maker.OpRole
self._current_role = OpRole.Optimize
self.__op_role_var = [
var.name if isinstance(var, Variable) else var
for var in param_and_grads
]
try:
yield
finally:
self.__op_role_var = tmp_var
self._current_role = tmp_role
@signature_safe_contextmanager
def _lr_schedule_guard(self, is_with_opt=False):
"""
A with guard to set :code:`LRSched` :code:`OpRole` and
:code:`OpRoleVar` automatically. The :code:`OpRoleVar` is
set to the target learning rate.
Notes: This is a very low level API. Users should not use it directly.
Args:
is_with_opt: Only set to true if these ops a in the middle
of a bunch of optimize ops so that it can be treated
correctly. For example, sgd->lr_op->sgd->lr_op->sgd.
Examples:
>>> import paddle.fluid as fluid
>>> p, g = backward(...)
>>> with program.lr_schedule_guard():
>>> lr = lr * decay
"""
tmp_role = self._current_role
tmp_var = self.__op_role_var
OpRole = core.op_proto_and_checker_maker.OpRole
self._current_role = OpRole.LRSched
if is_with_opt:
self._current_role = int(OpRole.LRSched) | int(OpRole.Optimize)
# TODO(typhoonzero): how to set target learning rate var
self.__op_role_var = []
try:
yield
finally:
self.__op_role_var = tmp_var
self._current_role = tmp_role
def __str__(self):
"""
Get the protobuf debug string of this Program.
Returns:
(str): The protobuf debug string.
Raises:
ValueError: If any of required fields is not set.
"""
return self._to_readable_code()
def _to_readable_code(self, skip_op_callstack=True):
"""
Get readable debug string of Program.
.. note::
If you want to get the debug string in protobuf format,
please use :code:`to_string` method.
Args:
skip_op_callstack(bool): whether to skip parsing Operator's attribute
op_callstack, default value is True
Returns:
string: The formatted Program string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_var = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
new_op = cur_block.append_op(type="abs",
inputs={"X": [new_var]},
outputs={"Out": [new_var]})
print(cur_program._to_readable_code())
"""
assert isinstance(
skip_op_callstack, bool
), "skip_op_callstack parameter's type is error, expect bool, received %s".format(
type(skip_op_callstack))
program_str = ""
for block in self.blocks:
program_str += block._to_readable_code(skip_op_callstack)
program_str += '\n'
return program_str
def to_string(self, throw_on_error, with_details=False):
"""
To debug string.
Args:
throw_on_error (bool): raise Value error when any of required fields is not set.
with_details (bool): True if more details about variables and parameters, e.g., :code:`trainable`, :code:`optimize_attr`, need to print.
Returns:
str: The debug string describe current Program.
Raises:
ValueError: If any of required fields is not set and throw_on_error is True.
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
x = fluid.layers.data(name="X", shape=[2,3], dtype="float32", append_batch_size=False)
pred = fluid.layers.fc(x, size=3)
prog_string = prog.to_string(throw_on_error=True, with_details=False)
prog_string_with_details = prog.to_string(throw_on_error=False, with_details=True)
print("program string without detail: {}".format(prog_string))
print("program string with detail: {}".format(prog_string_with_details))
"""
assert isinstance(
throw_on_error, bool
), "The type of throw_on_error parameter is wrong, expected bool, but received {}.".format(
type(throw_on_error))
assert isinstance(
with_details, bool
), "The type of with_details parameter is wrong, expected bool, but received {}.".format(
type(with_details))
if with_details:
res_str = ""
for block in self.blocks:
res_str += block.to_string(throw_on_error, with_details)
else:
protostr = self.desc.serialize_to_string()
proto = framework_pb2.ProgramDesc.FromString(
six.binary_type(protostr))
res_str = _debug_string_(proto, throw_on_error)
return res_str
def _get_desc(self):
"""
Get the C++ side of `ProgramDesc` object pointer. The C++ object is
exposed by :code:`pybind`.
Notes: This is a very low level API. Users should not use this API
directly.
"""
return self.desc
def _version(self):
return self.desc._version()
def clone(self, for_test=False):
"""
**Notes**:
**1.** :code:`Program.clone()` **method DOES NOT clone** :ref:`api_fluid_io_DataLoader` .
**2. Recommend you to use** :code:`clone` **before using** :code:`Opimizer.minimize`.
**3. This API has no effect in Dygraph Mode**
Create a new Program with forward content of original one when ``for_test=True``.
Create a new Program as same as the original one when ``for_test=False``.
Some operators, e.g., :ref:`api_fluid_layers_batch_norm` , behave differently between
training and testing. They have an attribute, :code:`is_test`, to
control this behaviour. This method will change the :code:`is_test`
attribute of them to :code:`True` when :code:`for_test=True`.
* Set for_test to False when you want to clone the program for training.
* Set for_test to True when you want to clone the program for testing.
We will prune the backward and optimize part of the program when you
use :code:`clone` after :code:`Opimizer.minimize`, but we still
recommend you to use :code:`clone` before using :code:`Opimizer.minimize`.
For Example:
::
import paddle.fluid as fluid
img = fluid.layers.data(name='image', shape=[784])
pred = fluid.layers.fc(input=img, size=10, act='relu')
loss = fluid.layers.mean(pred)
# Here we use clone before Momentum
test_program = fluid.default_main_program().clone(for_test=True)
optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9)
optimizer.minimize(loss)
Args:
for_test (bool): True if change the :code:`is_test` attribute of operators to :code:`True`
and prune the backward and optimize part of the program. The default value is :code:`False` .
Returns:
Program: A new Program with forward content of original one when ``for_test=True``. A new Program as same as the original one when ``for_test=False``
Examples:
**Notes: The Program's order maybe different after** :code:`clone` **and
this will not affect your training or testing progress. In the following
example we give you an simple method** :code:`print_prog(program)` **to
print Program Descs inorder to make sure you have same print result
after** :code:`clone`:
.. code-block:: python
import paddle.fluid as fluid
import six
def print_prog(prog):
for name, value in sorted(six.iteritems(prog.block(0).vars)):
print(value)
for op in prog.block(0).ops:
print("op type is {}".format(op.type))
print("op inputs are {}".format(op.input_arg_names))
print("op outputs are {}".format(op.output_arg_names))
for key, value in sorted(six.iteritems(op.all_attrs())):
if key not in ['op_callstack', 'op_role_var']:
print(" [ attrs: {}: {} ]".format(key, value))
1. To clone a test program, the sample code is:
.. code-block:: python
import paddle.fluid as fluid
import six
def print_prog(prog):
for name, value in sorted(six.iteritems(prog.block(0).vars)):
print(value)
for op in prog.block(0).ops:
print("op type is {}".format(op.type))
print("op inputs are {}".format(op.input_arg_names))
print("op outputs are {}".format(op.output_arg_names))
for key, value in sorted(six.iteritems(op.all_attrs())):
if key not in ['op_callstack', 'op_role_var']:
print(" [ attrs: {}: {} ]".format(key, value))
train_program = fluid.Program()
startup_program = fluid.Program()
# startup_program is used to do some parameter init work,
# and main program is used to hold the network
with fluid.program_guard(train_program, startup_program):
with fluid.unique_name.guard():
img = fluid.layers.data(name='image', shape=[784])
hidden = fluid.layers.fc(input=img, size=200, act='relu')
hidden = fluid.layers.dropout(hidden, dropout_prob=0.5)
loss = fluid.layers.cross_entropy(
input=fluid.layers.fc(hidden, size=10, act='softmax'),
label=fluid.layers.data(name='label', shape=[1], dtype='int64'))
avg_loss = fluid.layers.mean(loss)
test_program = train_program.clone(for_test=True)
print_prog(test_program)
# Due to parameter sharing usage for train and test, so we need to use startup program of train
# instead of using test startup program, while nothing is in test's startup program
# In Paddle Fluid we will share weights by using the same Variable name. In train and test program
# all parameters will have the same name and this can make train and test program sharing parameters,
# that's why we need to use startup program of train. And for startup program of test, it has nothing,
# since it is a new program.
with fluid.program_guard(train_program, startup_program):
with fluid.unique_name.guard():
sgd = fluid.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(avg_loss)
2. The clone method can be avoid if you create program for training and program for testing individually.
.. code-block:: python
import paddle.fluid as fluid
import six
def print_prog(prog):
for name, value in sorted(six.iteritems(prog.block(0).vars)):
print(value)
for op in prog.block(0).ops:
print("op type is {}".format(op.type))
print("op inputs are {}".format(op.input_arg_names))
print("op outputs are {}".format(op.output_arg_names))
for key, value in sorted(six.iteritems(op.all_attrs())):
if key not in ['op_callstack', 'op_role_var']:
print(" [ attrs: {}: {} ]".format(key, value))
def network():
img = fluid.layers.data(name='image', shape=[784])
hidden = fluid.layers.fc(input=img, size=200, act='relu')
hidden = fluid.layers.dropout(hidden, dropout_prob=0.5)
loss = fluid.layers.cross_entropy(
input=fluid.layers.fc(hidden, size=10, act='softmax'),
label=fluid.layers.data(name='label', shape=[1], dtype='int64'))
avg_loss = fluid.layers.mean(loss)
return avg_loss
train_program_2 = fluid.Program()
startup_program_2 = fluid.Program()
test_program_2 = fluid.Program()
with fluid.program_guard(train_program_2, startup_program_2):
with fluid.unique_name.guard():
avg_loss = network()
sgd = fluid.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(avg_loss)
# the test startup program is not used.
with fluid.program_guard(test_program_2, startup_program_2):
with fluid.unique_name.guard():
avg_loss = network()
print_prog(test_program_2)
The two code snippets above will generate and print same programs.
"""
#NOTE(zhiqiu): we sync the original program first, since its program may diff with
# its desc due to modifying desc in c++ space. E.g. save op will add kLookupTablePath in desc.
self._sync_with_cpp()
pruned_origin_block_id_map = None
if for_test:
forward_prog = Program()
forward_prog.desc, pruned_origin_block_id_map = core.prune_backward(
self.desc)
forward_prog.blocks = [
Block(forward_prog, i)
for i in six.moves.range(forward_prog.desc.num_blocks())
]
forward_prog._sync_with_cpp()
p = forward_prog._inference_optimize(prune_read_op=False)
else:
p = Program()
p.current_block_idx = self.current_block_idx
p._seed = self._seed
p.desc = core.ProgramDesc(self.desc)
p.blocks = [
Block(p, i) for i in six.moves.range(self.desc.num_blocks())
]
p._current_role = self._current_role
p.__op_role_var = self.__op_role_var
p._appending_grad_times = self._appending_grad_times
if hasattr(self, 'lr_sheduler'):
p.lr_sheduler = self.lr_sheduler
#NOTE(zhiqiu): we sync the cloned program, to update its program by
# its desc.
p._sync_with_cpp()
p._copy_param_info_from(self)
p._copy_data_info_from(self, pruned_origin_block_id_map)
p._copy_dist_param_info_from(self)
return p
def _prune(self, targets):
"""
Prune operators and variables which are not needed to generate
:code:`targets`.
Notes: This is a very low level API. Users should not use this API
directly. This API is in flux and not stable.
Args:
targets(list|Variable|Operator): A list of variables, operators, or variable names
need to be pruned
Returns:
Program: A new, pruned program.
"""
return self._prune_with_input([], targets)
def _prune_with_input(self, feeded_var_names, targets):
"""
Prune operators and variables which are not needed to generate
:code:`targets`. Prune operators and variables which are needed
to generate feeded_var
Notes: This is a very low level API. Users should not use this API
directly. This API is in flux and not stable.
Args:
feeded_var_names(list|str): A list of variable names from where
pruning start. If it is set as [], this API works just like _prune()
targets(list|Variable|Operator): A list of variables, operators, or variable names
need to be pruned
Returns:
Program: A new, pruned program.
"""
#NOTE(zhiqiu): we sync the original program first, since its program may diff with
# its desc due to modifying desc in c++ space. E.g. save op will add kLookupTablePath in desc.
self._sync_with_cpp()
if not isinstance(feeded_var_names, list):
feeded_var_names = [feeded_var_names]
if not isinstance(targets, list):
targets = [targets]
for var in feeded_var_names:
if not isinstance(var, six.string_types):
raise ValueError(
"All feeded_var_names of Program._prune_with_input() can only be "
"str, but received %s." % type(var))
targets_idx = []
for t in targets:
if not isinstance(t, Operator):
if isinstance(t, Variable):
name = t.name
elif isinstance(t, six.string_types):
name = str(t)
else:
raise ValueError(
"All targets of Program._prune_with_input() can only be "
"Variable or Operator, but received %s." % type(t))
# NOTEZ(zhiqiu): For variable to be fed in fetch_list, there two cases:
# (1) the variable is leaf, it has no op that generates it;
# (2) the variable is not leaf, and we need to prune the op that generates it.
# In both cases, wo can just skip target_op of that it.
if name in feeded_var_names:
continue
# After transpiler processing, the op that output this
# variable maybe has been changed, so t.op is not reliable
# and we need to find the current op that generate this
# variable here.
target_op = None
global_block = self.global_block()
for idx, op in enumerate(global_block.ops):
if name in op.output_arg_names:
# NOTE(zhiqiu): Find op that generate target name.
# Skip optimize op except for optimize op in targets,
# since optimize op generates parameters.
if op._is_optimize_op() and op not in targets:
continue
else:
target_op = op
break
if target_op is None:
raise ValueError(
"The target variable used for pruning should have an "
"associated operator that generates it.")
else:
targets_idx.append([target_op.block.idx, target_op.idx])
else:
targets_idx.append([t.block.idx, t.idx])
res = Program()
res.desc, pruned_origin_block_id_map = core.prune(self.desc,
set(feeded_var_names),
targets_idx)
res.blocks = [
Block(res, i) for i in six.moves.range(res.desc.num_blocks())
]
res._sync_with_cpp()
res._copy_param_info_from(self)
res._copy_data_info_from(self, pruned_origin_block_id_map)
res._copy_dist_param_info_from(self)
return res
def _inference_optimize(self, prune_read_op=True):
"""
This method will create a new program and do following adjustments on it:
1. Remove all reader variables and their creator ops if exist.
2. Remove the :code:`read_op` if exists.
3. change the :code:`is_test`
attribute of operators to :code:`True`. All the :code:`Parameter`
information will be lost.
Args:
prune_read_op(bool): remove the read ops that are added by py_reader
for cpp inference library
Notes: This API is a very low level API. Use
:code:`Program.clone(for_test=True)` instead.
Returns:
Program: The new program.
"""
res = Program()
res.desc = core.ProgramDesc(self.desc)
# remove all readers and the read_op if exist
read_op_idx = 0
root_block = res.desc.block(0)
if prune_read_op:
while True:
if read_op_idx >= root_block.op_size() or root_block.op(
read_op_idx).type() == 'read':
break
read_op_idx += 1
if read_op_idx < root_block.op_size():
root_block._remove_op(0, read_op_idx + 1)
for var in root_block.all_vars():
if var.type() == core.VarDesc.VarType.READER:
root_block._remove_var(cpt.to_bytes(var.name()))
# change all `is_test` attributes to True
for i in six.moves.range(res.desc.num_blocks()):
block = res.desc.block(i)
for j in six.moves.range(block.op_size()):
op = block.op(j)
if op.has_attr('is_test'):
op._set_attr('is_test', True)
res.blocks = [
Block(res, i) for i in six.moves.range(res.desc.num_blocks())
]
res._sync_with_cpp()
return res
@staticmethod
def parse_from_string(binary_str):
"""
**Notes**:
**1. All information about parameters will be lost after serialization**
**2. This API has no effect in Dygraph mode**
Deserialize a Program from `protobuf <https://en.wikipedia.org/wiki/Protocol_Buffers>`_ binary string.
This method always use to save and load model
Args:
binary_str_type (str): the binary prootbuf string.
Returns:
Program: A deserialized Program.
Examples:
.. code-block:: python
import paddle.fluid as fluid
startup_prog = fluid.Program()
main_prog = fluid.Program()
with fluid.program_guard(startup_prog, main_prog):
x = fluid.layers.data(
name='X', shape=[1000, 784], dtype='float32', append_batch_size=False)
y = fluid.layers.data(
name='Y', shape=[784, 100], dtype='float32', append_batch_size=False)
z = fluid.layers.mul(x=x, y=y)
binary_str = fluid.default_main_program().desc.serialize_to_string()
prog_restored = fluid.default_main_program().parse_from_string(binary_str)
print(fluid.default_main_program())
print(prog_restored)
"""
p = Program()
p.desc = core.ProgramDesc(binary_str)
p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())]
p._sync_with_cpp()
return p
@staticmethod
def _construct_from_desc(desc):
"""
Construct a program from program desc.
Args:
desc(core.ProgramDesc): The program desc for constructing.
Returns:
Program: A program.
"""
p = Program()
p.desc = desc
p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())]
p._sync_with_cpp()
return p
@property
def random_seed(self):
"""
The default random seed for random operators in Program. ``0`` means get
the random seed from random device.
**Notes: It must be set before the operators have been added.**
Returns:
int64: Random seed in current Program
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
random_seed = prog.random_seed
x_var = fluid.layers.data(name="X", shape=[3,3], dtype="float32", append_batch_size=False)
print(random_seed)
## 0
## the default random seed is 0
# Here we need to set random seed before we use fluid.layers.dropout
prog.random_seed = 1
z_var = fluid.layers.dropout(x_var, 0.7)
print(prog.random_seed)
## 1
## the random seed is change to 1
"""
return self._seed
@property
def num_blocks(self):
"""
The number of :ref:`api_guide_Block_en` in this Program.
**Notes: This API has no effect in Dygraph mode**
Returns:
int(Platform-dependent size): num of :ref:`api_guide_Block_en` in current Program
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
num_blocks = prog.num_blocks
print(num_blocks)
"""
return self.desc.num_blocks()
@random_seed.setter
def random_seed(self, seed):
if not isinstance(seed, int):
raise ValueError(
"Program.random_seed's input seed must be an integer, but received %s."
% type(seed))
self._seed = seed
def __repr__(self):
return self.__str__()
def global_block(self):
"""
**Notes**:
**This API has no effect in Dygraph mode**
Get the first :ref:`api_guide_Block_en` of this Program.
Returns:
:ref:`api_guide_Block_en`: The first :ref:`api_guide_Block_en` of this Program.
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
gb_block = prog.global_block()
print(gb_block)
"""
return self.blocks[0]
def block(self, index):
"""
**Notes**:
**This API has no effect in Dygraph mode**
Get the :code:`index` :ref:`api_guide_Block_en` of this Program
Args:
index (int) - The index of :ref:`api_guide_Block_en` to get
Returns:
:ref:`api_guide_Block_en`: The :code:`index` block
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
block_0 = prog.block(0)
print(block_0)
"""
return self.blocks[index]
def current_block(self):
"""
**Notes**:
**This API has no effect in Dygraph mode**
Get the current :ref:`api_guide_Block_en` . The :code:`current` :ref:`api_guide_Block_en`
is the :ref:`api_guide_Block_en` to append operators.
Returns:
:ref:`api_guide_Block_en`: The :code:`index` :ref:`api_guide_Block_en`
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
current_blk = prog.current_block()
print(current_blk)
"""
return self.blocks[self.current_block_idx]
def _create_block(self, parent_idx=None):
"""
Create a new block with the :code:`parent_idx` and change the current block
to new block.
Args:
parent_idx(int): The parent block index.
Returns:
Block: The new block.
"""
new_block_idx = len(self.blocks)
parent = self.current_block() if parent_idx is None else self.block(
parent_idx)
self.desc.append_block(parent.desc)
self.current_block_idx = new_block_idx
self.blocks.append(Block(self, self.current_block_idx))
return self.current_block()
def _rollback(self):
"""
Exit a code block, i.e., roll back to the parent block.
Returns:
None
"""
self.current_block_idx = self.current_block().parent_idx
def _sync_with_cpp(self):
"""
Synchronize Python instance to its binding C++ object instance.
If the program is modified in C++ space, this method should be invoked.
Notes: This is a very low level API. Users should not invoke it
directly.
Returns:
None
"""
for block_idx in range(len(self.blocks), self.desc.num_blocks()):
self.blocks.append(Block(self, block_idx))
for block in self.blocks:
block._sync_with_cpp()
def _copy_param_info_from(self, other):
"""
Copy the information of parameters from other program.
Notes: This is a very low level API. Users should not invoke it
directly.
Args:
other(Program): Other program
Returns:
None
"""
if not isinstance(other, Program):
raise TypeError(
"Function Program._copy_param_info_from() needs to pass in a source Program, but received %s"
% type(other))
self.global_block()._copy_param_info_from(other.global_block())
def _copy_dist_param_info_from(self, other):
"""
Copy the information of distributed information from other program.
Args:
other(Program): Other program
Returns:
None
"""
if not isinstance(other, Program):
raise TypeError(
"Function Program._copy_param_info_from() needs to pass in a source Program, but received %s"
% type(other))
self._is_distributed = other._is_distributed
self._is_chief = other._is_chief
self._parameters_on_pservers = other._parameters_on_pservers
self._endpoints = other._endpoints
self._ps_endpoint = other._ps_endpoint
self._distributed_lookup_table = other._distributed_lookup_table
def _copy_data_info_from(self, other, pruned_origin_block_id_map=None):
"""
Copy the information of data variables from other program.
Notes: This is a very low level API. Users should not invoke it
directly.
Args:
other(Program): Other program
pruned_origin_block_id_map(dict{int:int}): A dict which maps the block id in program
self to the block id in program other. For example, {0:0, 1:1, 2:3} means block 0 in self is
cloned from block 0 in other, etc. Default is None, which means default mapped,
{0:0, 1:1,..., n:n}.
Returns:
None
"""
if not isinstance(other, Program):
raise TypeError(
"Function Program._copy_param_info_from() needs to pass in a source Program, but received %s"
% type(other))
if not pruned_origin_block_id_map:
pruned_origin_block_id_map = {
i: i
for i in six.moves.range(self.desc.num_blocks())
}
# NOTE(zhiqiu): All vars in cloned program exist in original program.
# The reverse is not true, due to backward pruning.
for i, block in enumerate(self.blocks):
other_block = other.blocks[pruned_origin_block_id_map[i]]
for var in list(block.vars.values()):
other_var = other_block.var(var.name)
if other_var.is_data:
var.is_data = True
if other_var.desc.need_check_feed():
var.desc.set_need_check_feed(True)
if other_var.stop_gradient:
var.stop_gradient = True
def list_vars(self):
"""
Get all :ref:`api_guide_Variable_en` from this Program. A iterable object is returned.
Returns:
iterable :ref:`api_guide_Variable_en`: The Generator will yield every variable in this program.
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
img = fluid.layers.data(name='img', shape=[1,28,28], dtype='float32')
label = fluid.layers.data(name='label', shape=[128,1], dtype='int64')
for var in prog.list_vars():
print(var)
"""
for each_block in self.blocks:
for each_var in list(each_block.vars.values()):
yield each_var
def all_parameters(self):
"""
Get all :ref:`api_guide_parameter_en` from this Program. A list object is returned.
Returns:
list[ :ref:`api_guide_parameter_en` ]: The list contians all parameters in this program.
Examples:
.. code-block:: python
import paddle.fluid as fluid
program = fluid.default_main_program()
data = fluid.data(name='x', shape=[None, 13], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)
for param in program.all_parameters():
print(param)
# Here will print all parameters in current program, in this example,
# the result is like:
#
# name: "fc_0.w_0"
# type {
# type: LOD_TENSOR
# lod_tensor {
# tensor {
# data_type: FP32
# dims: 13
# dims: 10
# }
# }
# }
# persistable: true
#
# name: "fc_0.b_0"
# type {
# type: LOD_TENSOR
# lod_tensor {
# tensor {
# data_type: FP32
# dims: 10
# }
# }
# }
# persistable: true
#
# Here print(param) will print out all the properties of a parameter,
# including name, type and persistable, you can access to specific
# property of a parameter, such as param.name, param.type
"""
parameters = []
for each_block in self.blocks:
parameters.extend(each_block.all_parameters())
return parameters
@six.add_metaclass(ParameterMetaClass)
class Parameter(Variable):
"""
Parameter is derived from Variable. A parameter is a persistable
Variable, and will be updated by optimizers after each iteration.
The training of a neural network is essentially the updating of
its parameters.
Relative to a general Variable, a Parameter has several its own
member variables:
Args:
trainable(bool): True if the parameter need to be updated after
iterations.
optimize_attr(map): Parameter attributes related with optimizing.
Currently, it only contains 'learning_rate'.
Default: {'learning_rate': 1.0}
regularizer(WeightDecayRegularizer): The Regularizer which will
be applied on the parameter. Default: None
do_model_average(bool): True if the model average strategy will
be applied on this parameter.
"""
def __init__(self,
block,
shape,
dtype,
type=core.VarDesc.VarType.LOD_TENSOR,
**kwargs):
if shape is None:
raise ValueError("The shape of Parameter should not be None")
if dtype is None:
raise ValueError("The dtype of Parameter should not be None")
if len(shape) == 0:
raise ValueError(
"The dimensions of shape for Parameter must be greater than 0")
for each in shape:
if each < 0:
raise ValueError(
"Each dimension of shape for Parameter must be greater than 0, but received %s"
% list(shape))
Variable.__init__(
self,
block,
persistable=True,
shape=shape,
dtype=dtype,
type=type,
**kwargs)
self.trainable = kwargs.get('trainable', True)
self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0})
self.regularizer = kwargs.get('regularizer', None)
self.do_model_average = kwargs.get('do_model_average', None)
self.is_distributed = False
def __str__(self):
return self._to_readable_code()
def to_string(self, throw_on_error, with_details=False):
"""
To debug string.
Args:
throw_on_error(bool): raise exception when self is not initialized
when throw_on_error is True
with_details(bool): more details about variables and parameters
(e.g. trainable, optimize_attr, ...) will be printed when with_details is True
Returns(str): The debug string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
rlt = fluid.layers.data("fake_data", shape=[1,1], dtype='float32')
debug_str = prog.to_string(throw_on_error=True, with_details=False)
print(debug_str)
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
if with_details:
res_str = Variable.to_string(self, throw_on_error, True)
additional_attr = ("trainable", "optimize_attr", "regularizer",
"do_model_average")
for attr_name in additional_attr:
res_str += "%s: %s\n" % (attr_name,
cpt.to_text(getattr(self, attr_name)))
else:
res_str = Variable.to_string(self, throw_on_error, False)
return res_str
__repr__ = __str__
class ParamBase(core.VarBase):
"""
ParamBase is derived from Tensor( Which is the concept in Dygraph Mode).
A ParamBase is a persistable Tensor, and will be updated by optimizers
after each iteration.
The training of a neural network is essentially the updating of
its ParamBase.
Relative to a general Tensor, a ParamBase has several its own
member variables:
Args:
trainable(bool): True if the ParamBase need to be updated after
iterations.
optimize_attr(map): ParamBase attributes related with optimizing.
Currently, it only contains 'learning_rate'.
Default: {'learning_rate': 1.0}
regularizer(WeightDecayRegularizer): The Regularizer which will
be applied on the ParamBase. Default: None
do_model_average(bool): True if the model average strategy will
be applied on this ParamBase.
"""
@dygraph_only
def __init__(self, shape, dtype, **kwargs):
if shape is None:
raise ValueError("The shape of Parameter should not be None")
if dtype is None:
raise ValueError("The dtype of Parameter should not be None")
if len(shape) == 0:
raise ValueError(
"The dimensions of shape for Parameter must be greater than 0")
for each in shape:
if each < 0:
raise ValueError(
"Each dimension of shape for Parameter must be greater than 0, but received %s"
% list(shape))
if dtype is not None:
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
name = kwargs.get('name', unique_name.generate('_param_base'))
super(ParamBase, self).__init__(dtype
if dtype else core.VarDesc.VarType.FP32,
list(shape) if shape else [], name,
core.VarDesc.VarType.LOD_TENSOR, True)
trainable = kwargs.get('trainable', True)
self.stop_gradient = not trainable
self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0})
self.regularizer = kwargs.get('regularizer', None)
self.do_model_average = kwargs.get('do_model_average', None)
self.is_distributed = False
# self.block = default_main_program().global_block()
@property
def trainable(self):
return not self.stop_gradient
@trainable.setter
def trainable(self, trainable):
if isinstance(trainable, bool):
self.stop_gradient = not trainable
else:
raise ValueError(
"The type of trainable MUST be bool, but the type is ",
type(trainable))
def __str__(self):
"""
Convert a ParamBase object to a readable string.
Returns(str): A readable string.
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
conv = paddle.nn.Conv2D(3, 3, 5)
print(conv.weight)
# Parameter: conv2d_0.w_0
# - place: CUDAPlace(0)
# - shape: [3, 3, 5, 5]
# - layout: NCHW
# - dtype: float
# - data: [...]
paddle.enable_static()
"""
return "Parameter containing:\n {}\n - stop_gradient: {}".format(
super(ParamBase, self).__str__(), self.stop_gradient)
__repr__ = __str__
# program is a global instance.
_main_program_ = Program()
_startup_program_ = Program()
def default_startup_program():
"""
Get default/global startup program.
The layer function in :ref:`api_fluid_layers` will create parameters, :ref:`api_paddle_data_reader_reader` ,
`NCCL <https://developer.nvidia.com/nccl>`_ handles as global variables. The :code:`startup_program` will
initialize them by the OPs in startup :ref:`api_fluid_Program` . The :ref:`api_fluid_layers` function will
append these initialization operators into startup program.
This method will return the :code:`default` or the :code:`current` startup
program. Users can use :ref:`api_fluid_program_guard` to switch :ref:`api_fluid_Program` .
Returns: current default startup :ref:`api_fluid_Program`
Returns type: :ref:`api_fluid_Program`
Examples:
.. code-block:: python
import paddle.fluid as fluid
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program=main_program, startup_program=startup_program):
x = fluid.layers.data(name="x", shape=[-1, 784], dtype='float32')
y = fluid.layers.data(name="y", shape=[-1, 1], dtype='int32')
z = fluid.layers.fc(name="fc", input=x, size=10, act="relu")
print("main program is: {}".format(fluid.default_main_program()))
print("start up program is: {}".format(fluid.default_startup_program()))
"""
return _startup_program_
def default_main_program():
"""
This API can be used to get ``default main program`` which store the
descriptions of ``op`` and ``variable``.
For example ``z = fluid.layers.elementwise_add(x, y)`` will create a new ``elementwise_add``
``op`` and a new ``z`` ``variable``, and they will be recorded in ``default main program``
The ``default_main_program`` is the default value for ``Program`` parameter in
a lot of ``fluid`` APIs. For example, the :code:`Executor.run()` will execute the
:code:`default_main_program` when the program is not specified.
If you want to replace the ``default main program``, you can use :ref:`api_fluid_program_guard`
Returns:
:ref:`api_fluid_Program`: a ``Program`` which holding the descriptions of ops and variables in the network.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# Sample Network:
data = fluid.data(name='image', shape=[None, 3, 224, 224], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
conv1 = fluid.layers.conv2d(data, 4, 5, 1, act=None)
bn1 = fluid.layers.batch_norm(conv1, act='relu')
pool1 = fluid.layers.pool2d(bn1, 2, 'max', 2)
conv2 = fluid.layers.conv2d(pool1, 16, 5, 1, act=None)
bn2 = fluid.layers.batch_norm(conv2, act='relu')
pool2 = fluid.layers.pool2d(bn2, 2, 'max', 2)
fc1 = fluid.layers.fc(pool2, size=50, act='relu')
fc2 = fluid.layers.fc(fc1, size=102, act='softmax')
loss = fluid.layers.cross_entropy(input=fc2, label=label)
loss = fluid.layers.mean(loss)
opt = fluid.optimizer.Momentum(
learning_rate=0.1,
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
opt.minimize(loss)
#print the number of blocks in the program, 1 in this case
print(fluid.default_main_program().num_blocks)
#print the description of variable 'image'
print(fluid.default_main_program().blocks[0].var('image'))
"""
return _main_program_
def switch_main_program(program):
"""
Switch the main program to a new program.
Args:
program(Program): The new main program
Returns:
Program: The previous main program
"""
global _main_program_
prev_program = _main_program_
_main_program_ = program
return prev_program
def switch_startup_program(program):
"""
Switch the startup program to a new program
Args:
program(Program): The new startup program
Returns:
Program: The previous startup program
"""
global _startup_program_
prev_program = _startup_program_
_startup_program_ = program
return prev_program
@signature_safe_contextmanager
def program_guard(main_program, startup_program=None):
"""
:api_attr: Static Graph
Change the global main program and startup program with `"with"` statement.
Layer functions in the Python `"with"` block will append operators and
variables to the new main programs.
Args:
main_program(Program): New main program inside `"with"` statement.
startup_program(Program, optional): New startup program inside `"with"`
statement. :code:`None` means not changing startup program,
default_startup_program is still used.
Default: None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
data = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10, act='relu')
Notes: The temporary :code:`Program` can be used if the user does not need
to construct either of startup program or main program.
Examples:
.. code-block:: python
import paddle.fluid as fluid
main_program = fluid.Program()
# does not care about startup program. Just pass a temporary value.
with fluid.program_guard(main_program, fluid.Program()):
data = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
"""
from .data_feeder import check_type
check_type(main_program, 'main_program', Program, 'fluid.program_guard')
main_program = switch_main_program(main_program)
if startup_program is not None:
check_type(startup_program, 'startup_program', Program,
'fluid.program_guard')
startup_program = switch_startup_program(startup_program)
try:
yield
finally:
switch_main_program(main_program)
if startup_program is not None:
switch_startup_program(startup_program)
def _get_var(name, program=None):
"""
Get a variable by name from the global block of a program.
Args:
name(str): name of the variable
program(Program|None): program object.
If None, default_global_program() will be used.
Returns:
Variable
"""
if program is None:
program = default_main_program()
assert isinstance(name, str)
assert isinstance(program, Program)
return program.global_block().var(name)
@signature_safe_contextmanager
def _dygraph_guard(tracer):
global _dygraph_tracer_
tmp_trace = _dygraph_tracer_
_dygraph_tracer_ = tracer
core._switch_tracer(tracer)
try:
yield
finally:
core._switch_tracer(tmp_trace)
_dygraph_tracer_ = tmp_trace
@signature_safe_contextmanager
def _dygraph_place_guard(place):
global _global_expected_place_
tmp_place = _global_expected_place_
_global_expected_place_ = place
try:
yield
finally:
_global_expected_place_ = tmp_place
def load_op_library(lib_filename):
"""
:api_attr: Static Graph
Load a dynamic library, including custom operators and kernels.
When library is loaded, ops and kernels registered in the library
will be available in PaddlePaddle main process.
Please note, the type of custom operators can't have the same type
with the existing operators in the framework.
Args:
lib_filename (str): name of dynamic library.
Examples:
.. code-block:: python
import paddle.fluid as fluid
#fluid.load_op_library('custom_op.so')
"""
core.load_op_library(lib_filename)
OpProtoHolder.instance().update_op_proto()
def switch_device(device):
global _current_device
pre_device = _current_device
_current_device = device
return pre_device
@signature_safe_contextmanager
def device_guard(device=None):
"""
**Notes**:
**The API only supports static mode.**
A context manager that specifies the device on which the OP will be placed.
Args:
device(str|None): Specify the device to use in the context. It should be 'cpu' or 'gpu',
When it is set to 'cpu' or 'gpu', all OPs created in the context will be
placed on CPUPlace or CUDAPlace. When 'gpu' is set and the program runs on
single-card, the device index will be the same as the device on which the
executor runs. Default: None, OPs in this context will be automatically
assigned devices.
Examples:
.. code-block:: python
import paddle.fluid as fluid
support_gpu = fluid.is_compiled_with_cuda()
place = fluid.CPUPlace()
if support_gpu:
place = fluid.CUDAPlace(0)
# if GPU is supported, the three OPs below will be automatically assigned to CUDAPlace(0)
data1 = fluid.layers.fill_constant(shape=[1, 3, 8, 8], value=0.5, dtype='float32')
data2 = fluid.layers.fill_constant(shape=[1, 3, 5, 5], value=0.5, dtype='float32')
shape = fluid.layers.shape(data2)
with fluid.device_guard("cpu"):
# Ops created here will be placed on CPUPlace
shape = fluid.layers.slice(shape, axes=[0], starts=[0], ends=[4])
with fluid.device_guard('gpu'):
# if GPU is supported, OPs created here will be placed on CUDAPlace(0), otherwise on CPUPlace
out = fluid.layers.crop_tensor(data1, shape=shape)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
result = exe.run(fetch_list=[out])
"""
index = None
if device and ':' in device:
device, index = device.split(':')
if device == 'cpu':
raise ValueError("Should not set device id for cpu.")
if device not in ['cpu', 'gpu', '', None]:
raise ValueError(
"The Attr(device) should be 'cpu' or 'gpu', and it can also be empty string or None "
"when there is no need to specify device. But received %s" % device)
if index:
device = ":".join([device, index])
pre_device = switch_device(device)
try:
yield
finally:
switch_device(pre_device)
def set_flags(flags):
"""
This function sets the GFlags value in Paddle.
Args:
flags (dict): A dict contains flags and its value.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fluid.set_flags({'FLAGS_eager_delete_tensor_gb': 1.0})
"""
if not isinstance(flags, dict):
raise TypeError('flags in set_flags should be a dict')
for key, value in flags.items():
if core.globals().is_public(key):
core.globals()[key] = value
else:
raise ValueError(
"Flag %s cannot set its value through this function." % (key))
def get_flags(flags):
"""
This function gets the GFlags value in Paddle.
Args:
flags(list|tuple|str): A list/tuple of string or a string which is the flag's name.
Returns:
flag's value in Paddle.
Examples:
.. code-block:: python
import paddle.fluid as fluid
flags = ['FLAGS_eager_delete_tensor_gb', 'FLAGS_check_nan_inf']
res = fluid.get_flags(flags)
print(res)
# {'FLAGS_eager_delete_tensor_gb': 0.0, 'FLAGS_check_nan_inf': False}
"""
flags_value = {}
if isinstance(flags, (list, tuple)):
for key in flags:
if (core.globals().is_public(key)):
value = core.globals()[key]
temp = {key: value}
flags_value.update(temp)
else:
raise ValueError(
'Flag %s cannot get its value through this function.' %
(key))
elif isinstance(flags, str):
if (core.globals().is_public(flags)):
value = core.globals()[flags]
temp = {flags: value}
flags_value.update(temp)
else:
raise ValueError(
'Flag %s cannot get its value through this function.' % (flags))
else:
raise TypeError('Flags in get_flags should be a list, tuple or string.')
return flags_value
| [] | [] | [
"FLAGS_selected_gpus",
"CPU_NUM"
] | [] | ["FLAGS_selected_gpus", "CPU_NUM"] | python | 2 | 0 | |
request_handler/appconfig.py | #!/usr/bin/env python
# coding: utf-8
import logging.config
import os
# Конфигурация базы данных
DB_CONFIG = {
'username': 'root',
'password': os.environ.get('MYSQL_TRADING_PASS'),
'host': '127.0.0.1',
'dbname': 'trading_db',
}
# Конфигурация журналирования
LOGGING = {
'version': 1,
'formatters': { # Форматирование сообщения
'main': {
'format': '[%(asctime)s] %(levelname)s %(module)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
},
'handlers': { # Обработчикаи сообщений
'file_handler': {
'class': 'logging.FileHandler',
'filename': '/tmp/trading.log',
'formatter': 'main',
},
'streamlogger': {
'class': 'logging.StreamHandler',
'formatter': 'main',
},
},
'loggers': { # Логгеры
'prod_logger': {
'handlers': ['file_handler', 'streamlogger'],
'level': 'INFO',
},
'devel_logger': {
'handlers': ['file_handler', 'streamlogger'],
'level': 'DEBUG',
},
},
}
logging.config.dictConfig(LOGGING)
# Базовая конфигурация
class Config(object):
DEBUG = False
CSRF_ENABLED = True
SQLALCHEMY_DATABASE_URI = f"mysql+pymysql://{DB_CONFIG['username']}:{DB_CONFIG['password']}" \
f"@{DB_CONFIG['host']}/{DB_CONFIG['dbname']}?charset=utf8"
SQLALCHEMY_TRACK_MODIFICATIONS = False
LOGGER_NAME = 'devel_logger'
MAIL_SERVER = 'smtp.yandex.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TSL = False
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
MAIL_DEFAULT_SENDER = os.environ.get('MAIL_USERNAME')
CELERY_BROKER_URL = 'redis://0.0.0.0:6379/'
CELERY_RESULT_BACKEND = 'redis://0.0.0.0:6379/'
CELERY_DEFAULT_QUEUE = 'request_handler_queue'
# Конфигурация выпуска
class ProductionConfig(Config):
DEBUG = False
LOGGER_NAME = 'prod_logger'
# Конфигурация разработки
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
LOGGER_NAME = 'devel_logger'
# Конфигурация тестирования
class TestConfig(Config):
DEBUG = True
TESTING = True
WTF_CSRF_ENABLED = False
LOGGER_NAME = 'devel_logger'
test_db_name = "test_trading_db"
SQLALCHEMY_DATABASE_URI = f"mysql+pymysql://{DB_CONFIG['username']}:{DB_CONFIG['password']}" \
f"@{DB_CONFIG['host']}/{test_db_name}?charset=utf8"
# Текущая конфигурация
# --------------------------------------------------
_currentConfig = DevelopmentConfig
def getConfig():
return _currentConfig
def setConfig(config):
global _currentConfig
_currentConfig = config
# --------------------------------------------------
# Размер буффера данных, загружаемых в базу
chunkSize = 30000
| [] | [] | [
"MYSQL_TRADING_PASS",
"MAIL_USERNAME",
"MAIL_PASSWORD"
] | [] | ["MYSQL_TRADING_PASS", "MAIL_USERNAME", "MAIL_PASSWORD"] | python | 3 | 0 | |
scripts/aws/create-kubernetes-cluster.go | package main
import (
"bytes"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"runtime"
"strings"
"time"
)
import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/eks"
"github.com/aws/aws-sdk-go/service/iam"
)
type OutputsMap struct {
SecurityGroups *string
VpcId *string
SubnetIds *string
}
var _, currentFilePath, _, _ = runtime.Caller(0)
var currentPath = path.Dir(currentFilePath)
func strp(str string) *string{
return &str
}
func checkError(err error) {
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case "EntityAlreadyExists":
case "AlreadyExistsException":
case "ResourceInUseException":
case "InvalidKeyPair.Duplicate":
default:
log.Fatalf("Error (%s): %s\n", aerr.Code(), aerr.Message())
}
log.Printf("Warning (%s): %s\n", aerr.Code(), aerr.Message())
} else {
log.Fatalf("Error: %s\n", err.Error())
}
}
}
func OutputsToMap(outputs []*cloudformation.Output) *OutputsMap {
res := &OutputsMap{}
for _, v := range outputs {
switch *v.OutputKey {
case "SecurityGroups":
res.SecurityGroups = v.OutputValue
case "VpcId":
res.VpcId = v.OutputValue
case "SubnetIds":
res.SubnetIds = v.OutputValue
}
}
return res
}
func CreateEksRole(iamClient *iam.IAM, eksRoleName *string) *string {
log.Printf("Creating EKS service role \"%s\"...\n", *eksRoleName)
roleDescription := "Allows EKS to manage clusters on your behalf."
rpf, err := ioutil.ReadFile(path.Join(currentPath, "amazon-eks-role-policy.json"))
checkError(err)
rps := string(rpf)
_, err = iamClient.CreateRole(&iam.CreateRoleInput{
RoleName: eksRoleName,
Description: &roleDescription,
AssumeRolePolicyDocument: &rps,
})
checkError(err)
policyArn := "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
_, err = iamClient.AttachRolePolicy(&iam.AttachRolePolicyInput{
RoleName: eksRoleName,
PolicyArn: &policyArn,
})
checkError(err)
policyArn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
_, err = iamClient.AttachRolePolicy(&iam.AttachRolePolicyInput{
RoleName: eksRoleName,
PolicyArn: &policyArn,
})
checkError(err)
result, err := iamClient.GetRole(&iam.GetRoleInput{
RoleName: eksRoleName,
})
checkError(err)
log.Printf("Role \"%s\"(%s) successfully created!\n" , *eksRoleName, *result.Role.Arn)
return result.Role.Arn
}
func CreateEksClusterVpc(cfClient *cloudformation.CloudFormation, clusterStackName *string) *OutputsMap {
log.Printf("Creating Amazon EKS Cluster VPC \"%s\"...\n", *clusterStackName)
sf, err := ioutil.ReadFile(path.Join(currentPath, "amazon-eks-vpc.yaml"))
checkError(err)
s := string(sf)
_, err = cfClient.CreateStack(&cloudformation.CreateStackInput{
StackName: clusterStackName,
TemplateBody: &s,
})
checkError(err)
for {
resp, err := cfClient.DescribeStacks(&cloudformation.DescribeStacksInput{
StackName: clusterStackName,
})
checkError(err)
switch *resp.Stacks[0].StackStatus {
case "CREATE_COMPLETE":
log.Printf("Cluster VPC \"%s\" successfully created!\n", *clusterStackName)
return OutputsToMap(resp.Stacks[0].Outputs)
case "CREATE_IN_PROGRESS":
time.Sleep(time.Second)
default:
log.Fatalf("Error: Unexpected stack status: %s\n", *resp.Stacks[0].StackStatus)
}
}
}
func CreateEksCluster(eksClient *eks.EKS, clusterName *string, eksRoleArn *string, clusterStackOutputs *OutputsMap) *eks.Cluster {
log.Printf("Creating Amazon EKS Cluster \"%s\"...\n", *clusterName)
subnetIdsTemp := strings.Split(*clusterStackOutputs.SubnetIds, ",")
var subnetIds []*string
for i := range subnetIdsTemp {
subnetIds = append(subnetIds, &subnetIdsTemp[i])
}
endpointPrivateAccess := false
endpointPublicAccess := true
_, err := eksClient.CreateCluster(&eks.CreateClusterInput{
Name: clusterName,
RoleArn: eksRoleArn,
ResourcesVpcConfig: &eks.VpcConfigRequest{
SubnetIds: subnetIds,
SecurityGroupIds: []*string{
clusterStackOutputs.SecurityGroups,
},
EndpointPrivateAccess: &endpointPrivateAccess,
EndpointPublicAccess: &endpointPublicAccess,
},
})
checkError(err)
for {
resp, err := eksClient.DescribeCluster(&eks.DescribeClusterInput{
Name: clusterName,
})
checkError(err)
switch *resp.Cluster.Status {
case "ACTIVE":
log.Printf("EKS Cluster \"%s\" successfully created!\n", *clusterName)
return resp.Cluster
case "CREATING":
time.Sleep(time.Second)
default:
log.Fatalf("Error: Unexpected cluster status: %s\n", *resp.Cluster.Status)
}
}
}
func CreateKubeConfigFile(cluster *eks.Cluster) {
kubeconfigFile := os.Getenv("KUBECONFIG")
if len(kubeconfigFile) == 0 {
kubeconfigFile = os.Getenv("HOME") + "/.kube/config"
}
kc, err := ioutil.ReadFile(path.Join(currentPath, "kube-config-template"))
checkError(err)
kubeconfig := string(kc)
kubeconfig = strings.Replace(kubeconfig, "<CERT>", *cluster.CertificateAuthority.Data, -1)
kubeconfig = strings.Replace(kubeconfig, "<SERVER_ENDPOINT>", *cluster.Endpoint, -1)
kubeconfig = strings.Replace(kubeconfig, "<SERVER_NAME>", *cluster.Arn, -1)
err = ioutil.WriteFile(kubeconfigFile, []byte(kubeconfig), 0644)
checkError(err)
log.Printf("Updated context %s in %s\n", *cluster.Arn, kubeconfigFile)
}
func CreateEksEc2KeyPair(ec2Client *ec2.EC2, keyPairName *string) {
log.Printf("Creating Amazon EC2 key pair \"%s\"...\n", *keyPairName)
_, err := ec2Client.CreateKeyPair(&ec2.CreateKeyPairInput{
KeyName: keyPairName,
})
checkError(err)
log.Printf("Amazon EC2 key pair \"%s\" successfully created!\n", *keyPairName)
}
func createEksWorkerNodes(cfClient *cloudformation.CloudFormation, nodesStackName *string, nodeGroupName *string, clusterName *string, keyPairName *string, clusterStackOutputs *OutputsMap) *string {
log.Printf("Creating Amazon EKS Worker Nodes on cluster \"%s\"...\n", *clusterName)
sf, err := ioutil.ReadFile(path.Join(currentPath, "amazon-eks-nodegroup.yaml"))
checkError(err)
s := string(sf)
_, err = cfClient.CreateStack(&cloudformation.CreateStackInput{
StackName: nodesStackName,
TemplateBody: &s,
Capabilities: []*string{strp("CAPABILITY_IAM")},
Parameters: []*cloudformation.Parameter{
{
ParameterKey: strp("KeyName"),
ParameterValue: keyPairName,
},
{
ParameterKey: strp("NodeImageId"),
ParameterValue: strp("ami-0484545fe7d3da96f"),
},
{
ParameterKey: strp("ClusterName"),
ParameterValue: clusterName,
},
{
ParameterKey: strp("NodeGroupName"),
ParameterValue: nodeGroupName,
},
{
ParameterKey: strp("ClusterControlPlaneSecurityGroup"),
ParameterValue: clusterStackOutputs.SecurityGroups,
},
{
ParameterKey: strp("VpcId"),
ParameterValue: clusterStackOutputs.VpcId,
},
{
ParameterKey: strp("Subnets"),
ParameterValue: clusterStackOutputs.SubnetIds,
},
},
})
checkError(err)
for {
resp, err := cfClient.DescribeStacks(&cloudformation.DescribeStacksInput{
StackName: nodesStackName,
})
checkError(err)
switch *resp.Stacks[0].StackStatus {
case "CREATE_COMPLETE":
log.Printf("EKS Worker Nodes \"%s\" successfully created!\n", *nodesStackName)
for _, output := range resp.Stacks[0].Outputs {
if *output.OutputKey == "NodeInstanceRole" {
return output.OutputValue
}
}
return nil
case "CREATE_IN_PROGRESS":
time.Sleep(time.Second)
default:
log.Fatalf("Error: Unexpected stack status: %s\n", *resp.Stacks[0].StackStatus)
}
}
}
func createAWSKubernetesCluster() {
sess := session.Must(session.NewSession())
iamClient := iam.New(sess)
eksClient := eks.New(sess)
cfClient := cloudformation.New(sess)
ec2Client := ec2.New(sess)
// Creating Amazon EKS Role
serviceSuffix := os.Getenv("NSM_AWS_SERVICE_SUFFIX")
eksRoleName := "nsm-role" + serviceSuffix
eksRoleArn := CreateEksRole(iamClient, &eksRoleName)
// Creating Amazon EKS Cluster VPC
clusterStackName := "nsm-srv" + serviceSuffix
clusterStackOutputs := CreateEksClusterVpc(cfClient, &clusterStackName)
// Creating Amazon EKS Cluster
clusterName := "nsm" + serviceSuffix
cluster := CreateEksCluster(eksClient, &clusterName, eksRoleArn, clusterStackOutputs)
// Creating kubeconfig file
CreateKubeConfigFile(cluster)
// Creating Amazon EKS Worker Nodes
keyPairName := "nsm-key-pair" + serviceSuffix
CreateEksEc2KeyPair(ec2Client, &keyPairName)
nodesStackName := "nsm-nodes" + serviceSuffix
nodeGroupName := "nsm-node-group" + serviceSuffix
nodeInstanceRole := createEksWorkerNodes(cfClient, &nodesStackName, &nodeGroupName, &clusterName, &keyPairName, clusterStackOutputs)
// Enable worker nodes to join the cluster
sf, err := ioutil.ReadFile(path.Join(currentPath, "aws-auth-cm-temp.yaml"))
checkError(err)
f, err := ioutil.TempFile(os.TempDir(),"aws-auth-cm-temp-*.yaml")
checkError(err)
s := string(sf)
_, err = f.Write([]byte(strings.Replace(s, "<NodeInstanceRole>", *nodeInstanceRole, -1)))
checkError(err)
_ = f.Close()
log.Printf("> kubectl %s %s %s","apply","-f", f.Name())
cmd := exec.Command("kubectl","apply","-f", f.Name())
var out bytes.Buffer
cmd.Stdout = &out
err = cmd.Run()
checkError(err)
log.Printf(out.String())
_ = os.Remove(f.Name())
}
| [
"\"KUBECONFIG\"",
"\"HOME\"",
"\"NSM_AWS_SERVICE_SUFFIX\""
] | [] | [
"HOME",
"NSM_AWS_SERVICE_SUFFIX",
"KUBECONFIG"
] | [] | ["HOME", "NSM_AWS_SERVICE_SUFFIX", "KUBECONFIG"] | go | 3 | 0 | |
HDP/HDP/asgi.py | """
ASGI config for HDP project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'HDP.settings')
application = get_asgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
playbooks/roles/createresources/molecule/fiaas01-debian10/tests/test_phppools.py | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_pool_config(host):
config = host.file('/etc/php/7.3/fpm/pool.d/vhost1.conf')
assert config.exists
assert config.contains('pm = dynamic')
assert config.contains('user = www-data')
assert config.contains('group = www-data')
config = host.file('/etc/php/7.3/fpm/pool.d/vhost2.conf')
assert config.exists
assert config.contains('pm = ondemand')
assert config.contains('user = www-data')
assert config.contains('group = www-data')
config = host.file('/etc/php/7.3/fpm/pool.d/vhost3.conf')
assert config.exists
assert config.contains('pm = ondemand')
assert config.contains('user = www-data')
assert config.contains('group = www-data')
| [] | [] | [
"MOLECULE_INVENTORY_FILE"
] | [] | ["MOLECULE_INVENTORY_FILE"] | python | 1 | 0 | |
build/android/run_tests.py | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs all the native unit tests.
1. Copy over test binary to /data/local on device.
2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak)
to be deployed to the device. We use the device's $EXTERNAL_STORAGE as the
base dir (which maps to Context.getExternalFilesDir()).
3. Environment:
3.1. chrome/unit_tests requires (via chrome_paths.cc) a directory named:
$EXTERNAL_STORAGE + /chrome/test/data
4. Run the binary in the device and stream the log to the host.
4.1. Optionally, filter specific tests.
4.2. If we're running a single test suite and we have multiple devices
connected, we'll shard the tests.
5. Clean up the device.
Suppressions:
Individual tests in a test binary can be suppressed by listing it in
the gtest_filter directory in a file of the same name as the test binary,
one test per line. Here is an example:
$ cat gtest_filter/base_unittests_disabled
DataPackTest.Load
ReadOnlyFileUtilTest.ContentsEqual
This file is generated by the tests running on devices. If running on emulator,
additonal filter file which lists the tests only failed in emulator will be
loaded. We don't care about the rare testcases which succeeded on emuatlor, but
failed on device.
"""
import copy
import fnmatch
import logging
import optparse
import os
import signal
import subprocess
import sys
import time
import emulator
from pylib import android_commands
from pylib import buildbot_report
from pylib import cmd_helper
from pylib import debug_info
from pylib import ports
from pylib import run_tests_helper
from pylib import test_options_parser
from pylib.base_test_sharder import BaseTestSharder
from pylib.single_test_runner import SingleTestRunner
_TEST_SUITES = ['base_unittests',
'cc_unittests',
'content_unittests',
'gpu_unittests',
'ipc_tests',
'media_unittests',
'net_unittests',
'sql_unittests',
'sync_unit_tests',
'ui_unittests',
'unit_tests',
'webkit_compositor_bindings_unittests',
]
def FullyQualifiedTestSuites(exe, option_test_suite, build_type):
"""Get a list of absolute paths to test suite targets.
Args:
exe: if True, use the executable-based test runner.
option_test_suite: the test_suite specified as an option.
build_type: 'Release' or 'Debug'.
"""
test_suite_dir = os.path.join(cmd_helper.OutDirectory.get(), build_type)
if option_test_suite:
all_test_suites = [option_test_suite]
else:
all_test_suites = _TEST_SUITES
if exe:
qualified_test_suites = [os.path.join(test_suite_dir, t)
for t in all_test_suites]
else:
# out/(Debug|Release)/$SUITE_apk/$SUITE-debug.apk
qualified_test_suites = [os.path.join(test_suite_dir,
t + '_apk',
t + '-debug.apk')
for t in all_test_suites]
for t, q in zip(all_test_suites, qualified_test_suites):
if not os.path.exists(q):
raise Exception('Test suite %s not found in %s.\n'
'Supported test suites:\n %s\n'
'Ensure it has been built.\n' %
(t, q, _TEST_SUITES))
return qualified_test_suites
class TimeProfile(object):
"""Class for simple profiling of action, with logging of cost."""
def __init__(self, description):
self._description = description
self.Start()
def Start(self):
self._starttime = time.time()
def Stop(self):
"""Stop profiling and dump a log."""
if self._starttime:
stoptime = time.time()
logging.info('%fsec to perform %s',
stoptime - self._starttime, self._description)
self._starttime = None
class Xvfb(object):
"""Class to start and stop Xvfb if relevant. Nop if not Linux."""
def __init__(self):
self._pid = 0
def _IsLinux(self):
"""Return True if on Linux; else False."""
return sys.platform.startswith('linux')
def Start(self):
"""Start Xvfb and set an appropriate DISPLAY environment. Linux only.
Copied from tools/code_coverage/coverage_posix.py
"""
if not self._IsLinux():
return
proc = subprocess.Popen(['Xvfb', ':9', '-screen', '0', '1024x768x24',
'-ac'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
self._pid = proc.pid
if not self._pid:
raise Exception('Could not start Xvfb')
os.environ['DISPLAY'] = ':9'
# Now confirm, giving a chance for it to start if needed.
for _ in range(10):
proc = subprocess.Popen('xdpyinfo >/dev/null', shell=True)
_, retcode = os.waitpid(proc.pid, 0)
if retcode == 0:
break
time.sleep(0.25)
if retcode != 0:
raise Exception('Could not confirm Xvfb happiness')
def Stop(self):
"""Stop Xvfb if needed. Linux only."""
if self._pid:
try:
os.kill(self._pid, signal.SIGKILL)
except:
pass
del os.environ['DISPLAY']
self._pid = 0
class TestSharder(BaseTestSharder):
"""Responsible for sharding the tests on the connected devices."""
def __init__(self, attached_devices, test_suite, gtest_filter,
test_arguments, timeout, cleanup_test_files, tool,
log_dump_name, fast_and_loose, build_type, in_webkit_checkout):
BaseTestSharder.__init__(self, attached_devices, build_type)
self.test_suite = test_suite
self.test_suite_basename = os.path.basename(test_suite)
self.gtest_filter = gtest_filter or ''
self.test_arguments = test_arguments
self.timeout = timeout
self.cleanup_test_files = cleanup_test_files
self.tool = tool
self.log_dump_name = log_dump_name
self.fast_and_loose = fast_and_loose
self.in_webkit_checkout = in_webkit_checkout
self.all_tests = []
if not self.gtest_filter:
# No filter has been specified, let's add all tests then.
self.all_tests, self.attached_devices = self._GetAllEnabledTests()
self.tests = self.all_tests
def _GetAllEnabledTests(self):
"""Get all enabled tests and available devices.
Obtains a list of enabled tests from the test package on the device,
then filters it again using the diabled list on the host.
Returns:
Tuple of (all enabled tests, available devices).
Raises Exception if all devices failed.
"""
# TODO(frankf): This method is doing too much in a non-systematic way.
# If the intention is to drop flaky devices, why not go through all devices
# instead of breaking on the first succesfull run?
available_devices = list(self.attached_devices)
while available_devices:
try:
return (self._GetTestsFromDevice(available_devices[-1]),
available_devices)
except Exception as e:
logging.warning('Failed obtaining tests from %s %s',
available_devices[-1], e)
available_devices.pop()
raise Exception('No device available to get the list of tests.')
def _GetTestsFromDevice(self, device):
logging.info('Obtaining tests from %s', device)
test_runner = SingleTestRunner(
device,
self.test_suite,
self.gtest_filter,
self.test_arguments,
self.timeout,
self.cleanup_test_files,
self.tool,
0,
not not self.log_dump_name,
self.fast_and_loose,
self.build_type,
self.in_webkit_checkout)
# The executable/apk needs to be copied before we can call GetAllTests.
test_runner.test_package.StripAndCopyExecutable()
all_tests = test_runner.test_package.GetAllTests()
disabled_list = test_runner.GetDisabledTests()
# Only includes tests that do not have any match in the disabled list.
all_tests = filter(lambda t:
not any([fnmatch.fnmatch(t, disabled_pattern)
for disabled_pattern in disabled_list]),
all_tests)
return all_tests
def CreateShardedTestRunner(self, device, index):
"""Creates a suite-specific test runner.
Args:
device: Device serial where this shard will run.
index: Index of this device in the pool.
Returns:
A SingleTestRunner object.
"""
device_num = len(self.attached_devices)
shard_size = (len(self.tests) + device_num - 1) / device_num
shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size]
test_filter = ':'.join(shard_test_list) + self.gtest_filter
return SingleTestRunner(
device,
self.test_suite,
test_filter,
self.test_arguments,
self.timeout,
self.cleanup_test_files, self.tool, index,
not not self.log_dump_name,
self.fast_and_loose,
self.build_type,
self.in_webkit_checkout)
def OnTestsCompleted(self, test_runners, test_results):
"""Notifies that we completed the tests."""
test_results.LogFull('Unit test', os.path.basename(self.test_suite),
self.build_type, self.all_tests)
test_results.PrintAnnotation()
if self.log_dump_name:
# Zip all debug info outputs into a file named by log_dump_name.
debug_info.GTestDebugInfo.ZipAndCleanResults(
os.path.join(
cmd_helper.OutDirectory.get(), self.build_type,
'debug_info_dumps'),
self.log_dump_name)
def _RunATestSuite(options):
"""Run a single test suite.
Helper for Dispatch() to allow stop/restart of the emulator across
test bundles. If using the emulator, we start it on entry and stop
it on exit.
Args:
options: options for running the tests.
Returns:
0 if successful, number of failing tests otherwise.
"""
step_name = os.path.basename(options.test_suite).replace('-debug.apk', '')
buildbot_report.PrintNamedStep(step_name)
attached_devices = []
buildbot_emulators = []
if options.use_emulator:
for n in range(options.emulator_count):
t = TimeProfile('Emulator launch %d' % n)
avd_name = None
if n > 0:
# Creates a temporary AVD for the extra emulators.
avd_name = 'run_tests_avd_%d' % n
buildbot_emulator = emulator.Emulator(avd_name, options.fast_and_loose)
buildbot_emulator.Launch(kill_all_emulators=n == 0)
t.Stop()
buildbot_emulators.append(buildbot_emulator)
attached_devices.append(buildbot_emulator.device)
# Wait for all emulators to boot completed.
map(lambda buildbot_emulator: buildbot_emulator.ConfirmLaunch(True),
buildbot_emulators)
elif options.test_device:
attached_devices = [options.test_device]
else:
attached_devices = android_commands.GetAttachedDevices()
if not attached_devices:
logging.critical('A device must be attached and online.')
buildbot_report.PrintError()
return 1
# Reset the test port allocation. It's important to do it before starting
# to dispatch any tests.
if not ports.ResetTestServerPortAllocation():
raise Exception('Failed to reset test server port.')
if options.gtest_filter:
logging.warning('Sharding is not possible with these configurations.')
attached_devices = [attached_devices[0]]
sharder = TestSharder(
attached_devices,
options.test_suite,
options.gtest_filter,
options.test_arguments,
options.timeout,
options.cleanup_test_files,
options.tool,
options.log_dump,
options.fast_and_loose,
options.build_type,
options.webkit)
test_results = sharder.RunShardedTests()
for buildbot_emulator in buildbot_emulators:
buildbot_emulator.Shutdown()
return len(test_results.failed)
def Dispatch(options):
"""Dispatches the tests, sharding if possible.
If options.use_emulator is True, all tests will be run in new emulator
instance.
Args:
options: options for running the tests.
Returns:
0 if successful, number of failing tests otherwise.
"""
if options.test_suite == 'help':
ListTestSuites()
return 0
if options.use_xvfb:
xvfb = Xvfb()
xvfb.Start()
all_test_suites = FullyQualifiedTestSuites(options.exe, options.test_suite,
options.build_type)
failures = 0
for suite in all_test_suites:
# Give each test suite its own copy of options.
test_options = copy.deepcopy(options)
test_options.test_suite = suite
failures += _RunATestSuite(test_options)
if options.use_xvfb:
xvfb.Stop()
return failures
def ListTestSuites():
"""Display a list of available test suites."""
print 'Available test suites are:'
for test_suite in _TEST_SUITES:
print test_suite
def main(argv):
option_parser = optparse.OptionParser()
test_options_parser.AddTestRunnerOptions(option_parser, default_timeout=0)
option_parser.add_option('-s', '--suite', dest='test_suite',
help='Executable name of the test suite to run '
'(use -s help to list them)')
option_parser.add_option('--out-directory', dest='out_directory',
help='Path to the out/ directory, irrespective of '
'the build type. Only for non-Chromium uses.')
option_parser.add_option('-d', '--device', dest='test_device',
help='Target device the test suite to run ')
option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter',
help='gtest filter')
option_parser.add_option('-a', '--test_arguments', dest='test_arguments',
help='Additional arguments to pass to the test')
option_parser.add_option('-L', dest='log_dump',
help='file name of log dump, which will be put in '
'subfolder debug_info_dumps under the same '
'directory in where the test_suite exists.')
option_parser.add_option('-e', '--emulator', dest='use_emulator',
action='store_true',
help='Run tests in a new instance of emulator')
option_parser.add_option('-n', '--emulator_count',
type='int', default=1,
help='Number of emulators to launch for running the '
'tests.')
option_parser.add_option('-x', '--xvfb', dest='use_xvfb',
action='store_true',
help='Use Xvfb around tests (ignored if not Linux)')
option_parser.add_option('--webkit', action='store_true',
help='Run the tests from a WebKit checkout.')
option_parser.add_option('--fast', '--fast_and_loose', dest='fast_and_loose',
action='store_true',
help='Go faster (but be less stable), '
'for quick testing. Example: when tracking down '
'tests that hang to add to the disabled list, '
'there is no need to redeploy the test binary '
'or data to the device again. '
'Don\'t use on bots by default!')
option_parser.add_option('--repeat', dest='repeat', type='int',
default=2,
help='Repeat count on test timeout')
option_parser.add_option('--exit_code', action='store_true',
help='If set, the exit code will be total number '
'of failures.')
option_parser.add_option('--exe', action='store_true',
help='If set, use the exe test runner instead of '
'the APK.')
options, args = option_parser.parse_args(argv)
if len(args) > 1:
print 'Unknown argument:', args[1:]
option_parser.print_usage()
sys.exit(1)
run_tests_helper.SetLogLevel(options.verbose_count)
if options.out_directory:
cmd_helper.OutDirectory.set(options.out_directory)
if options.use_emulator:
emulator.DeleteAllTempAVDs()
failed_tests_count = Dispatch(options)
# Failures of individual test suites are communicated by printing a
# STEP_FAILURE message.
# Returning a success exit status also prevents the buildbot from incorrectly
# marking the last suite as failed if there were failures in other suites in
# the batch (this happens because the exit status is a sum of all failures
# from all suites, but the buildbot associates the exit status only with the
# most recent step).
if options.exit_code:
return failed_tests_count
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [] | [] | [
"DISPLAY"
] | [] | ["DISPLAY"] | python | 1 | 0 | |
Shortener/wsgi.py | """
WSGI config for Shortener project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Shortener.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
dcr/scripts/orchestrator/set_environment.py | import json
import os.path
from dcr.scenario_utils.logging_utils import get_logger
logger = get_logger("dcr.script.orchestrator.set_environment")
add_variable_to_pipeline = '##vso[task.setvariable variable={name};]{value}'
def _check_if_file_in_scenario_and_set_variable(file_name: str, name: str, true_value: str, false_val: str = None):
"""
We have certain scenarios in the tests where we determine what type of test to run based on the availability of the file.
Check if file is present in the current scenario, and if so, set the variable name.
Syntax for setting the variable : https://docs.microsoft.com/en-us/azure/devops/pipelines/scripts/logging-commands?view=azure-devops&tabs=bash#setvariable-initialize-or-modify-the-value-of-a-variable
Eg: echo "##vso[task.setvariable variable=<VariableName>;]<Variable value>"
"""
file_path = os.path.join(scenario_path, file_name)
if os.path.exists(file_path):
logger.info(f"Found file: {file_path}, setting variable: {name}")
print(add_variable_to_pipeline.format(name=name, value=true_value))
elif false_val is not None:
print(add_variable_to_pipeline.format(name=name, value=false_val))
def _override_config():
"""
This function reads the config.json file present in the scenario and makes all the variables available to the whole
job as environment variables.
It also overrides existing variables with the same name if available.
Note: This function expects config.json to be a flat JSON
"""
config_path = os.path.join(scenario_path, "config.json")
if not os.path.exists(config_path):
logger.info(f"Config file: {config_path} not available")
return
with open(config_path, encoding="utf-8") as config_fh:
config_data = json.load(config_fh)
for key, val in config_data.items():
print(add_variable_to_pipeline.format(name=key, value=val))
if __name__ == '__main__':
"""
This script sets the environment for the current job.
It determines what files to run and what not.
Eg: If we're supposed to run run.host.py or run.py
"""
__dcr_dir = os.path.join(os.environ.get("BUILD_SOURCESDIRECTORY"), "dcr")
scenario_path = os.path.join(__dcr_dir, "scenario")
template_dir = os.path.join(__dcr_dir, "templates")
_check_if_file_in_scenario_and_set_variable(file_name="run.py", name="runPy", true_value="true")
_check_if_file_in_scenario_and_set_variable(file_name="run.host.py", name="runHost", true_value="true")
_check_if_file_in_scenario_and_set_variable(file_name="setup.sh", name="runScenarioSetup", true_value="true")
_check_if_file_in_scenario_and_set_variable(file_name="template.json", name="templateFile",
true_value=os.path.join(scenario_path, "template.json"),
false_val=os.path.join(template_dir, "deploy-linux-vm.json"))
_check_if_file_in_scenario_and_set_variable(file_name="parameters.json", name="parametersFile",
true_value=os.path.join(scenario_path, "parameters.json"),
false_val=os.path.join(template_dir, "deploy-linux-vm-params.json"))
# Check if config.json exists and add to environment
_override_config()
| [] | [] | [
"BUILD_SOURCESDIRECTORY"
] | [] | ["BUILD_SOURCESDIRECTORY"] | python | 1 | 0 | |
src/main/java/net/sourceforge/myvd/test/util/StartOpenLDAP.java | /*
* Copyright 2008 Marc Boorshtein
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.sourceforge.myvd.test.util;
import java.io.*;
import java.util.HashMap;
import com.novell.ldap.LDAPConnection;
import com.novell.ldap.LDAPException;
import com.novell.ldap.LDAPLocalException;
import com.novell.ldap.LDAPMessage;
import com.novell.ldap.LDAPSearchResult;
import com.novell.ldap.LDAPSearchResults;
import com.novell.ldap.util.LDIFReader;
public class StartOpenLDAP {
static HashMap<Integer,StartOpenLDAP> servers = new HashMap<Integer,StartOpenLDAP>();
Process process;
int port;
private String adminDN;
private String adminPass;
private String fullPath;
public void reloadAllData() throws Exception {
LDAPConnection con = new LDAPConnection();
con.connect("127.0.0.1", this.port);
con.bind(this.adminDN, this.adminPass);
LDIFReader reader = new LDIFReader(new FileInputStream(fullPath + "/data.ldif"));
LDAPMessage msg = reader.readMessage();
this.deleteNode(((LDAPSearchResult) msg).getEntry().getDN(), con,true);
con.disconnect();
this.loadLDIF(fullPath,adminDN,adminPass,port,true);
}
void deleteNode(String dn,LDAPConnection con,boolean childrenOnly) throws Exception {
LDAPSearchResults children = con.search(dn, 1, "(objectClass=*)", new String[]{"1.1"}, false);
while (children.hasMore()) {
deleteNode(children.next().getDN(),con,false);
}
System.err.println("Deleting " + dn);
if (!childrenOnly) {
con.delete(dn);
}
}
public void stopServer() {
if (this.process != null) {
this.process.destroy();
}
for (int i=0,m=100;i<m;i++) {
try {
LDAPConnection con = new LDAPConnection();
con.connect("127.0.0.1",port);
try {
Thread.sleep(100);
} catch (InterruptedException e) {
}
} catch (LDAPException e) {
servers.remove(port);
break;
}
}
}
private void clearData(String path) {
String dataPath = path + "/data";
File dir = new File(dataPath);
if (dir.exists()) {
File[] files = dir.listFiles();
if (files != null) {
for (int i=0,m=files.length;i<m;i++) {
files[i].delete();
}
}
} else {
dir.mkdir();
}
}
private void loadLDIF(String path,String adminDN,String adminPass,int port, boolean skipFirst) throws LDAPException, FileNotFoundException, IOException {
try {
this.port = port;
LDAPConnection con = new LDAPConnection();
con.connect("localhost",port);
con.bind(3,adminDN,adminPass.getBytes());
//System.out.println(path + "/data.ldif");
LDIFReader reader = new LDIFReader(new FileInputStream(path + "/data.ldif"));
LDAPMessage msg;
if (skipFirst) {
reader.readMessage();
}
while ((msg = reader.readMessage()) != null) {
System.err.println("Msg : " + msg);
con.add(((LDAPSearchResult) msg).getEntry());
}
con.disconnect();
} catch (LDAPLocalException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (LDAPException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public boolean startServer(String fullPath,int port,String adminDN,String adminPass) throws IOException,Exception {
return this.startServer(fullPath, port, adminDN, adminPass,0);
}
public boolean startServer(String fullPath,int port,String adminDN,String adminPass,int sslPort) throws IOException,Exception {
this.adminDN = adminDN;
this.adminPass = adminPass;
this.fullPath = fullPath;
LDAPConnection con = new LDAPConnection();
try {
con.connect("127.0.0.1",port);
con.disconnect();
for (int i=0;i<10;i++) {
Thread.sleep(5000);
try {
con.connect("127.0.0.1",port);
con.disconnect();
} catch (Exception e) {
throw e;
}
}
if (! servers.containsKey(port)) {
throw new Exception("Server on port " + port + "not stopped");
} else {
servers.get(port).stopServer();
}
} catch (LDAPException e) {
} catch (Exception e) {
throw e;
}
clearData(fullPath);
this.createTestConf(fullPath);
String exec = System.getenv("SLAPD_PATH") + "/slapd -d 1 -h 'ldap://:" + port + "/" + (sslPort > 0 ? " ldaps://:" + sslPort + "/" : "") + "' -f " + fullPath + "/slapd-gen.conf";
String[] execa = new String[] {System.getenv("SLAPD_PATH") + "/slapd","-d","1","-h","ldap://:" + port + "/" + (sslPort > 0 ? " ldaps://:" + sslPort + "/" : ""),"-f",fullPath + "/slapd-gen.conf"};
System.out.println(exec);
process = Runtime.getRuntime().exec(execa);
boolean debug = Boolean.valueOf(System.getenv("DEBUG_SERVER"));
StreamReader reader = new StreamReader(process.getInputStream(), debug);
StreamReader errReader = new StreamReader(process.getErrorStream(), debug);
reader.start();
errReader.start();
for (int i=0,m=10;i<m;i++) {
con = new LDAPConnection();
try {
//System.out.println("Try : " + i);
con.connect("127.0.0.1",port);
con.disconnect();
if (sslPort > 0) {
con.connect("127.0.0.1", sslPort);
con.disconnect();
}
this.loadLDIF(fullPath,adminDN,adminPass,port,false);
servers.put(port, this);
return true;
} catch (LDAPException e) {
try {
Thread.sleep(5000);
} catch (InterruptedException e1) {
//do nothing
}
}
}
return false;
}
private void createTestConf(String fullPath) throws IOException {
BufferedReader in = new BufferedReader(new InputStreamReader(new FileInputStream(fullPath + "/slapd.conf")));
StringBuffer buf = new StringBuffer();
String line;
while ((line = in.readLine()) != null) {
buf.append(line).append('\n');
}
String tmpDir = System.getenv("TMP_DIR");
if (tmpDir != null && ! tmpDir.isEmpty()) {
this.clearData(tmpDir);
}
String tmp = buf.toString().replaceAll("[%]PROJ_DIR[%]", System.getenv("PROJ_DIR"));
tmp = tmp.replaceAll("[%]SCHEMA_DIR[%]", System.getenv("SCHEMA_DIR"));
tmp = tmp.replaceAll("[%]TMP_DIR[%]", System.getenv("TMP_DIR"));
PrintWriter out = new PrintWriter(new FileWriter(fullPath + "/slapd-gen.conf"));
out.println("# GENERATED FILE - DO NOT EDIT");
out.print(tmp);
out.close();
}
public static final void main(String[] args) throws Exception {
StartOpenLDAP start = new StartOpenLDAP();
boolean isStarted = start.startServer(System.getenv("PROJ_DIR") + "/test/startopenldap",10983,"cn=admin,dc=domain,dc=com","manager");
if (isStarted) {
//System.out.println("Server started on 10983....");
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
in.readLine();
start.stopServer();
}
}
}
| [
"\"SLAPD_PATH\"",
"\"SLAPD_PATH\"",
"\"DEBUG_SERVER\"",
"\"TMP_DIR\"",
"\"PROJ_DIR\"",
"\"SCHEMA_DIR\"",
"\"TMP_DIR\"",
"\"PROJ_DIR\""
] | [] | [
"DEBUG_SERVER",
"TMP_DIR",
"SLAPD_PATH",
"PROJ_DIR",
"SCHEMA_DIR"
] | [] | ["DEBUG_SERVER", "TMP_DIR", "SLAPD_PATH", "PROJ_DIR", "SCHEMA_DIR"] | java | 5 | 0 | |
tests/unit/modules/test_cmdmod.py | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Nicole Thomas <[email protected]>`
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import tempfile
# Import Salt Libs
import salt.utils.platform
import salt.modules.cmdmod as cmdmod
from salt.exceptions import CommandExecutionError
from salt.log import LOG_LEVELS
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
mock_open,
Mock,
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
DEFAULT_SHELL = 'foo/bar'
MOCK_SHELL_FILE = '# List of acceptable shells\n' \
'\n'\
'/bin/bash\n'
@skipIf(NO_MOCK, NO_MOCK_REASON)
class CMDMODTestCase(TestCase, LoaderModuleMockMixin):
'''
Unit tests for the salt.modules.cmdmod module
'''
def setup_loader_modules(self):
return {cmdmod: {}}
@classmethod
def setUpClass(cls):
cls.mock_loglevels = {'info': 'foo', 'all': 'bar', 'critical': 'bar',
'trace': 'bar', 'garbage': 'bar', 'error': 'bar',
'debug': 'bar', 'warning': 'bar', 'quiet': 'bar'}
@classmethod
def tearDownClass(cls):
del cls.mock_loglevels
def test_render_cmd_no_template(self):
'''
Tests return when template=None
'''
self.assertEqual(cmdmod._render_cmd('foo', 'bar', None),
('foo', 'bar'))
def test_render_cmd_unavailable_engine(self):
'''
Tests CommandExecutionError raised when template isn't in the
template registry
'''
self.assertRaises(CommandExecutionError,
cmdmod._render_cmd,
'boo', 'bar', 'baz')
def test_check_loglevel_bad_level(self):
'''
Tests return of providing an invalid loglevel option
'''
with patch.dict(LOG_LEVELS, self.mock_loglevels):
self.assertEqual(cmdmod._check_loglevel(level='bad_loglevel'), 'foo')
def test_check_loglevel_bad_level_not_str(self):
'''
Tests the return of providing an invalid loglevel option that is not a string
'''
with patch.dict(LOG_LEVELS, self.mock_loglevels):
self.assertEqual(cmdmod._check_loglevel(level=1000), 'foo')
def test_check_loglevel_quiet(self):
'''
Tests the return of providing a loglevel of 'quiet'
'''
with patch.dict(LOG_LEVELS, self.mock_loglevels):
self.assertEqual(cmdmod._check_loglevel(level='quiet'), None)
def test_parse_env_not_env(self):
'''
Tests the return of an env that is not an env
'''
self.assertEqual(cmdmod._parse_env(None), {})
def test_parse_env_list(self):
'''
Tests the return of an env that is a list
'''
ret = {'foo': None, 'bar': None}
self.assertEqual(ret, cmdmod._parse_env(['foo', 'bar']))
def test_parse_env_dict(self):
'''
Test the return of an env that is not a dict
'''
self.assertEqual(cmdmod._parse_env('test'), {})
def test_run_shell_is_not_file(self):
'''
Tests error raised when shell is not available after _is_valid_shell error msg
and os.path.isfile returns False
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)):
with patch('os.path.isfile', MagicMock(return_value=False)):
self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar')
def test_run_shell_file_no_access(self):
'''
Tests error raised when shell is not available after _is_valid_shell error msg,
os.path.isfile returns True, but os.access returns False
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)):
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.access', MagicMock(return_value=False)):
self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar')
def test_run_runas_with_windows(self):
'''
Tests error raised when runas is passed on windows
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=True)):
with patch.dict(cmdmod.__grains__, {'os': 'fake_os'}):
self.assertRaises(CommandExecutionError,
cmdmod._run,
'foo', 'bar', runas='baz')
def test_run_user_not_available(self):
'''
Tests return when runas user is not available
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.access', MagicMock(return_value=True)):
self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar', runas='baz')
def test_run_zero_umask(self):
'''
Tests error raised when umask is set to zero
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)):
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.access', MagicMock(return_value=True)):
self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar', umask=0)
def test_run_invalid_umask(self):
'''
Tests error raised when an invalid umask is given
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)):
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.access', MagicMock(return_value=True)):
self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar', umask='baz')
def test_run_invalid_cwd_not_abs_path(self):
'''
Tests error raised when cwd is not an absolute path
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)):
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.access', MagicMock(return_value=True)):
self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar')
def test_run_invalid_cwd_not_dir(self):
'''
Tests error raised when cwd is not a dir
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)):
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.access', MagicMock(return_value=True)):
with patch('os.path.isabs', MagicMock(return_value=True)):
self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar')
def test_run_no_vt_os_error(self):
'''
Tests error raised when not useing vt and OSError is provided
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)):
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.access', MagicMock(return_value=True)):
with patch('salt.utils.timed_subprocess.TimedProc', MagicMock(side_effect=OSError)):
self.assertRaises(CommandExecutionError, cmdmod._run, 'foo')
def test_run_no_vt_io_error(self):
'''
Tests error raised when not useing vt and IOError is provided
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)):
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.access', MagicMock(return_value=True)):
with patch('salt.utils.timed_subprocess.TimedProc', MagicMock(side_effect=IOError)):
self.assertRaises(CommandExecutionError, cmdmod._run, 'foo')
@skipIf(salt.utils.platform.is_windows(), 'Do not run on Windows')
def test_run(self):
'''
Tests end result when a command is not found
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)):
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.access', MagicMock(return_value=True)):
ret = cmdmod._run('foo', cwd=os.getcwd(), use_vt=True).get('stderr')
self.assertIn('foo', ret)
def test_is_valid_shell_windows(self):
'''
Tests return if running on windows
'''
with patch('salt.utils.platform.is_windows', MagicMock(return_value=True)):
self.assertTrue(cmdmod._is_valid_shell('foo'))
@skipIf(salt.utils.platform.is_windows(), 'Do not run on Windows')
def test_is_valid_shell_none(self):
'''
Tests return of when os.path.exists(/etc/shells) isn't available
'''
with patch('os.path.exists', MagicMock(return_value=False)):
self.assertIsNone(cmdmod._is_valid_shell('foo'))
def test_is_valid_shell_available(self):
'''
Tests return when provided shell is available
'''
with patch('os.path.exists', MagicMock(return_value=True)):
with patch('salt.utils.files.fopen', mock_open(read_data=MOCK_SHELL_FILE)):
self.assertTrue(cmdmod._is_valid_shell('/bin/bash'))
@skipIf(salt.utils.platform.is_windows(), 'Do not run on Windows')
def test_is_valid_shell_unavailable(self):
'''
Tests return when provided shell is not available
'''
with patch('os.path.exists', MagicMock(return_value=True)):
with patch('salt.utils.files.fopen', mock_open(read_data=MOCK_SHELL_FILE)):
self.assertFalse(cmdmod._is_valid_shell('foo'))
@skipIf(salt.utils.platform.is_windows(), 'Do not run on Windows')
def test_os_environment_remains_intact(self):
'''
Make sure the OS environment is not tainted after running a command
that specifies runas.
'''
with patch('pwd.getpwnam') as getpwnam_mock:
with patch('subprocess.Popen') as popen_mock:
environment = os.environ.copy()
popen_mock.return_value = Mock(
communicate=lambda *args, **kwags: ['{}', None],
pid=lambda: 1,
retcode=0
)
with patch.dict(cmdmod.__grains__, {'os': 'Darwin', 'os_family': 'Solaris'}):
if sys.platform.startswith(('freebsd', 'openbsd')):
shell = '/bin/sh'
else:
shell = '/bin/bash'
cmdmod._run('ls',
cwd=tempfile.gettempdir(),
runas='foobar',
shell=shell)
environment2 = os.environ.copy()
self.assertEqual(environment, environment2)
getpwnam_mock.assert_called_with('foobar')
def test_run_cwd_doesnt_exist_issue_7154(self):
'''
cmd.run should fail and raise
salt.exceptions.CommandExecutionError if the cwd dir does not
exist
'''
cmd = 'echo OHAI'
cwd = '/path/to/nowhere'
try:
cmdmod.run_all(cmd, cwd=cwd)
except CommandExecutionError:
pass
else:
raise RuntimeError
| [] | [] | [] | [] | [] | python | 0 | 0 | |
store/fuzz/fuzz.go | package fuzz
import (
"bytes"
"encoding/binary"
"fmt"
"os"
"strconv"
"sync"
"github.com/coinexchain/randsrc"
"github.com/smartbch/moeingads"
"github.com/smartbch/moeingads/datatree"
"github.com/smartbch/moeingads/store"
"github.com/smartbch/moeingads/store/rabbit"
storetypes "github.com/smartbch/moeingads/store/types"
)
// go test -tags debug -c -coverpkg github.com/smartbch/moeingads/... .
// RANDFILE=~/Downloads/goland-2019.1.3.dmg RANDCOUNT=2000 ./fuzz.test -test.coverprofile a.out
func allTests() {
//cfg1 := &FuzzConfig {
// MaxReadCountInTx: 10,
// MaxCheckProofCountInTx: 2,
// MaxWriteCountInTx: 10,
// MaxDeleteCountInTx: 10,
// MaxTxCountInEpoch: 100,
// MaxEpochCountInBlock: 5,
// EffectiveBits: 0xFFF00000_00000FFF,
// MaxActiveCount: -1,
// MaxValueLength: 256,
// TxSucceedRatio: 0.85,
// BlockSucceedRatio: 0.95,
// BlockPanicRatio: 0.02,
// RootType: "Real",
// ConsistencyEveryNBlock: 200,
// PruneEveryNBlock: 100,
// KeepRecentNBlock: 100,
// ReloadEveryNBlock: 500,
//}
//runTest(cfg1)
cfg2 := &FuzzConfig{
MaxReadCountInTx: 20,
MaxCheckProofCountInTx: 20,
MaxWriteCountInTx: 10,
MaxDeleteCountInTx: 10,
MaxTxCountInEpoch: 100, // For rabbit, we cannot avoid inter-tx dependency prehand, but it seldom happens
MaxEpochCountInBlock: 900,
EffectiveBits: 0xFF000000_000003FF,
MaxActiveCount: 128 * 1024,
MaxValueLength: 256,
TxSucceedRatio: 0.85,
BlockSucceedRatio: 0.95,
BlockPanicRatio: 0.03,
RootType: "Real", //"MockDataTree", //"MockRoot",
ConsistencyEveryNBlock: 10,
PruneEveryNBlock: 100,
KeepRecentNBlock: 100,
ReloadEveryNBlock: 500,
}
runTest(cfg2)
}
// ==============================
const (
FirstByteOfCacheableKey = byte(105)
)
var (
GuardStart = []byte{0, 0, 0, 0, 0, 0, 0, 0}
GuardEnd = []byte{255, 255, 255, 255, 255, 255, 255, 255}
)
var DBG bool
type FuzzConfig struct {
MaxReadCountInTx uint32
MaxCheckProofCountInTx uint32
MaxWriteCountInTx uint32
MaxDeleteCountInTx uint32
MaxTxCountInEpoch uint32
MaxEpochCountInBlock uint32
EffectiveBits uint64
MaxValueLength int
MaxActiveCount int
TxSucceedRatio float32
BlockSucceedRatio float32
BlockPanicRatio float32
RootType string //MockRoot MockDataTree Real
ConsistencyEveryNBlock int
PruneEveryNBlock int
KeepRecentNBlock int
ReloadEveryNBlock int
}
func runTest(cfg *FuzzConfig) {
DBG = false
randFilename := os.Getenv("RANDFILE")
if len(randFilename) == 0 {
fmt.Printf("No RANDFILE specified. Exiting...")
return
}
roundCount, err := strconv.Atoi(os.Getenv("RANDCOUNT"))
if err != nil {
panic(err)
}
rs := randsrc.NewRandSrcFromFileWithSeed(randFilename, []byte{0})
var root storetypes.RootStoreI
var mads *moeingads.MoeingADS
initRootAndMads := func() {
var err error
mads, err = moeingads.NewMoeingADS("./moeingads4test", false, [][]byte{GuardStart, GuardEnd})
if err != nil {
panic(err)
}
root = store.NewRootStore(mads, func(k []byte) bool {
return k[0] > FirstByteOfCacheableKey
})
}
if cfg.RootType == "MockRoot" {
root = store.NewMockRootStore()
} else if cfg.RootType == "MockDataTree" {
os.RemoveAll("./rocksdb.db")
mads := moeingads.NewMoeingADS4Mock([][]byte{GuardStart, GuardEnd})
root = store.NewRootStore(mads, func(k []byte) bool {
return k[0] > FirstByteOfCacheableKey
})
} else if cfg.RootType == "Real" {
os.RemoveAll("./moeingads4test")
initRootAndMads()
} else {
panic("Invalid RootType " + cfg.RootType)
}
ref := NewRefL1()
fmt.Printf("Initialized\n")
block := GenerateRandBlock(0, ref, rs, cfg)
madsHeight := 0
for height := 0; height < roundCount; height++ {
//if height > 66 {DBG = true}
fmt.Printf("Block %d success %v\n", height, block.Succeed)
//retry := ExecuteBlock(height, mads, root, block, cfg, false) //not in parallel
retry := ExecuteBlock(height, mads, root, block, cfg, true) //in parallel
if retry {
initRootAndMads()
if int64(madsHeight) != mads.GetCurrHeight() {
panic(fmt.Sprintf("Height mismatch %d %d", madsHeight, mads.GetCurrHeight()))
}
height--
continue // retry with the same height and same block
}
if block.Succeed && !retry {
madsHeight = height
}
if block.Succeed && int64(height) != mads.GetCurrHeight() {
panic(fmt.Sprintf("Height mismatch %d %d", height, mads.GetCurrHeight()))
}
if (height+1)%cfg.ReloadEveryNBlock == 0 {
mads.Close()
initRootAndMads()
}
block = GenerateRandBlock(height, ref, rs, cfg)
}
root.Close()
if cfg.RootType == "MockDataTree" {
os.RemoveAll("./rocksdb.db")
} else if cfg.RootType == "Real" {
os.RemoveAll("./moeingads4test")
}
}
type Pair struct {
Key, Value []byte
}
const (
OpRead = 8
OpCheckProof = 4
OpWrite = 1
OpDelete = 0
)
type Operation struct {
opType int
key [8]byte
value []byte
}
type Tx struct {
OpList []Operation
Succeed bool
}
type Epoch struct {
TxList []*Tx
}
type Block struct {
EpochList []Epoch
Succeed bool
PanicNumber int
}
func getRandValue(rs randsrc.RandSrc, cfg *FuzzConfig) []byte {
length := 1 + int(rs.GetUint32())%(cfg.MaxValueLength-1) //no zero-length value
if float32(rs.GetUint32()%0x10000)/float32(0x10000) < 0.15 { //some corner cases for large value
length = 1 + int(rs.GetUint32())%(moeingads.HPFileBufferSize/2)
}
bz := rs.GetBytes(length)
if len(bz) < 16 {
return bz
}
for i := 0; i < 3; i++ {
if float32(rs.GetUint32()%0x10000)/float32(0x10000) < 0.1 {
continue
}
pos := int(rs.GetUint32()) % (length - 8)
copy(bz[pos:], datatree.MagicBytes[:])
}
return bz
}
func getRand8Bytes(rs randsrc.RandSrc, cfg *FuzzConfig, touchedKeys map[uint64]struct{}) (res [8]byte) {
if touchedKeys == nil {
i := rs.GetUint64() & cfg.EffectiveBits
binary.LittleEndian.PutUint64(res[:], i)
return
}
for {
i := rs.GetUint64() & cfg.EffectiveBits
if _, ok := touchedKeys[i]; ok {
continue
} else {
binary.LittleEndian.PutUint64(res[:], i)
break
}
}
return
}
func GenerateRandTx(ref *RefL1, rs randsrc.RandSrc, cfg *FuzzConfig, touchedKeys map[uint64]struct{}) *Tx {
ref2 := NewRefL2(ref)
var readCount, checkProofCount, writeCount, deleteCount uint32
maxReadCount := rs.GetUint32() % (cfg.MaxReadCountInTx + 1)
maxCheckProofCount := rs.GetUint32() % (cfg.MaxCheckProofCountInTx + 1)
maxWriteCount := rs.GetUint32() % (cfg.MaxWriteCountInTx + 1)
if cfg.MaxActiveCount > 0 && ref.Size() > cfg.MaxActiveCount {
maxWriteCount = 0 //if we have too many active entries in ref store, stop writting
}
maxDeleteCount := rs.GetUint32() % (cfg.MaxDeleteCountInTx + 1)
tx := Tx{
OpList: make([]Operation, 0, maxReadCount+maxWriteCount+maxDeleteCount),
Succeed: float32(rs.GetUint32()%0x10000)/float32(0x10000) < cfg.TxSucceedRatio,
}
for readCount != maxReadCount || checkProofCount != maxCheckProofCount ||
writeCount != maxWriteCount || deleteCount != maxDeleteCount {
if rs.GetUint32()%4 == 0 && readCount < maxReadCount {
key := getRand8Bytes(rs, cfg, touchedKeys)
tx.OpList = append(tx.OpList, Operation{
opType: OpRead,
key: key,
value: ref2.Get(key[:]),
})
readCount++
}
if rs.GetUint32()%4 == 1 && checkProofCount < maxCheckProofCount {
key := getRand8Bytes(rs, cfg, touchedKeys)
tx.OpList = append(tx.OpList, Operation{
opType: OpCheckProof,
key: key,
})
checkProofCount++
}
if rs.GetUint32()%4 == 2 && writeCount < maxWriteCount {
op := Operation{
opType: OpWrite,
key: getRand8Bytes(rs, cfg, touchedKeys),
value: getRandValue(rs, cfg),
}
ref2.Set(op.key[:], op.value[:])
tx.OpList = append(tx.OpList, op)
writeCount++
}
if rs.GetUint32()%4 == 3 && deleteCount < maxDeleteCount {
op := Operation{
opType: OpDelete,
key: getRand8Bytes(rs, cfg, touchedKeys),
}
ref2.Delete(op.key[:])
tx.OpList = append(tx.OpList, op)
deleteCount++
}
}
if tx.Succeed { // to prevent inter-tx dependency
for _, op := range tx.OpList {
if op.opType == OpRead || op.opType == OpWrite || op.opType == OpDelete {
touchedKeys[binary.LittleEndian.Uint64(op.key[:])] = struct{}{}
}
}
}
ref2.Close(tx.Succeed)
return &tx
}
func GenerateRandEpoch(height, epochNum int, ref *RefL1, rs randsrc.RandSrc, cfg *FuzzConfig, blkSuc bool) Epoch {
keyCountEstimated := cfg.MaxTxCountInEpoch * (cfg.MaxReadCountInTx +
cfg.MaxWriteCountInTx + cfg.MaxDeleteCountInTx) / 2
touchedKeys := make(map[uint64]struct{}, keyCountEstimated)
txCount := rs.GetUint32() % (cfg.MaxTxCountInEpoch + 1)
epoch := Epoch{TxList: make([]*Tx, int(txCount))}
for i := range epoch.TxList {
// Transactions in an epoch must be independent to each other. We use touchedKeys to ensure this
tx := GenerateRandTx(ref, rs, cfg, touchedKeys)
if DBG {
fmt.Printf("FinishGeneration h:%d (%v) epoch %d tx %d (%v) of %d\n", height, blkSuc, epochNum, i, tx.Succeed, txCount)
for j, op := range tx.OpList {
fmt.Printf("See operation %d of %d\n", j, len(tx.OpList))
fmt.Printf("%#v\n", op)
}
}
epoch.TxList[i] = tx
}
return epoch
}
func GenerateRandBlock(height int, ref *RefL1, rs randsrc.RandSrc, cfg *FuzzConfig) *Block {
epochCount := rs.GetUint32() % (cfg.MaxEpochCountInBlock + 1)
block := &Block{EpochList: make([]Epoch, epochCount)}
block.Succeed = float32(rs.GetUint32()%0x10000)/float32(0x10000) < cfg.BlockSucceedRatio
withPanic := float32(rs.GetUint32()%0x10000)/float32(0x10000) < cfg.BlockPanicRatio
if block.Succeed && withPanic {
block.PanicNumber = int(1 + rs.GetUint32()%4)
}
for i := range block.EpochList {
if DBG {
fmt.Printf("Generating h:%d epoch %d of %d\n", height, i, epochCount)
}
block.EpochList[i] = GenerateRandEpoch(height, i, ref, rs, cfg, block.Succeed)
}
if block.Succeed {
ref.FlushCache()
}
ref.ClearCache()
return block
}
func MyGet(rbt rabbit.RabbitStore, key []byte) []byte {
res := rbt.Get(key)
if findIt := rbt.Has(key); findIt != (len(res) > 0) {
panic(fmt.Sprintf("Bug in Has: %v %#v", findIt, res))
}
return res
}
func CheckTx(height, epochNum, txNum int, mads *moeingads.MoeingADS, rbt rabbit.RabbitStore, tx *Tx, cfg *FuzzConfig, blkSuc bool) {
for i, op := range tx.OpList {
if DBG {
fmt.Printf("Check %d-%d (%v) tx %d (%v) operation %d of %d\n", height, epochNum, blkSuc, txNum, tx.Succeed, i, len(tx.OpList))
fmt.Printf("%#v\n", op)
}
if op.opType == OpRead {
bz := MyGet(rbt, op.key[:])
if !bytes.Equal(op.value[:], bz) {
panic(fmt.Sprintf("Error in Get %#v real %#v expected %#v", op.key[:], bz, op.value[:]))
}
} else if op.opType == OpCheckProof {
path, ok := rbt.GetShortKeyPath(op.key[:])
if ok && len(path) == 1 {
entry := mads.GetEntry(path[0][:])
if entry != nil {
_, _, err := mads.GetProof(path[0][:])
if err != nil {
fmt.Printf("Why %#v err %#v\n", entry, err)
panic(err)
}
}
}
} else if op.opType == OpWrite {
rbt.Set(op.key[:], op.value[:])
} else if op.opType == OpDelete {
rbt.Delete(op.key[:])
}
}
}
func ExecuteBlock(height int, mads *moeingads.MoeingADS, root storetypes.RootStoreI, block *Block, cfg *FuzzConfig, inParallel bool) (retry bool) {
root.SetHeight(int64(height))
trunk := root.GetTrunkStore(1000).(*store.TrunkStore)
for i, epoch := range block.EpochList {
if DBG {
fmt.Printf("Check h:%d (%v) epoch %d of %d\n", height, block.Succeed, i, len(block.EpochList))
}
dbList := make([]rabbit.RabbitStore, len(epoch.TxList))
var wg sync.WaitGroup
for j, tx := range epoch.TxList {
dbList[j] = rabbit.NewRabbitStore(trunk)
if DBG {
fmt.Printf("Check h:%d (%v) epoch %d tx %d (%v) of %d\n", height, block.Succeed, i, j, tx.Succeed, len(epoch.TxList))
}
if inParallel {
wg.Add(1)
go func(tx *Tx, j int) {
CheckTx(height, i, j, mads, dbList[j], tx, cfg, block.Succeed)
wg.Done()
}(tx, j)
} else {
CheckTx(height, i, j, mads, dbList[j], tx, cfg, block.Succeed)
}
}
if inParallel {
wg.Wait()
}
for j, tx := range epoch.TxList {
if DBG {
fmt.Printf("WriteBack %d-%d tx %d : %v\n", height, i, j, tx.Succeed)
}
dbList[j].Close()
if tx.Succeed {
dbList[j].WriteBack()
}
}
}
defer func() {
if err := recover(); err != nil {
fmt.Printf("RECOVER %#v\n", err)
moeingads.DebugPanicNumber = 0
root.Close()
retry = true
}
}()
if block.PanicNumber != 0 {
moeingads.DebugPanicNumber = block.PanicNumber
block.PanicNumber = 0
}
trunk.Close(block.Succeed)
if (height+1)%cfg.ConsistencyEveryNBlock == 0 {
fmt.Printf("Now Check Consistency\n")
mads.CheckConsistency()
mads.CheckHashConsistency()
}
if (height+1)%cfg.PruneEveryNBlock == 0 && height > cfg.KeepRecentNBlock {
mads.PruneBeforeHeight(int64(height - cfg.KeepRecentNBlock))
}
return false
}
| [
"\"RANDFILE\"",
"\"RANDCOUNT\""
] | [] | [
"RANDCOUNT",
"RANDFILE"
] | [] | ["RANDCOUNT", "RANDFILE"] | go | 2 | 0 | |
ReefberryPi/wsgi.py | """
WSGI config for ReefberryPi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ReefberryPi.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
go/kbfs/libkbfs/init.go | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"context"
"errors"
"flag"
"fmt"
"os"
"os/signal"
"path/filepath"
"runtime/pprof"
"strings"
"syscall"
"time"
"github.com/keybase/client/go/kbconst"
"github.com/keybase/client/go/kbfs/data"
"github.com/keybase/client/go/kbfs/kbfscrypto"
"github.com/keybase/client/go/kbfs/kbfsmd"
"github.com/keybase/client/go/kbfs/libkey"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/go-framed-msgpack-rpc/rpc"
)
const (
// InitDefaultString is the normal mode for when KBFS data will be
// read and written.
InitDefaultString string = "default"
// InitMinimalString is for when KBFS will only be used as a MD
// lookup layer (e.g., for chat on mobile).
InitMinimalString = "minimal"
// InitSingleOpString is for when KBFS will only be used for a
// single logical operation (e.g., as a git remote helper).
InitSingleOpString = "singleOp"
// InitConstrainedString is for when KBFS will use constrained
// resources.
InitConstrainedString = "constrained"
// InitMemoryLimitedString is for when KBFS will use memory limited
// resources.
InitMemoryLimitedString = "memoryLimited"
// InitTestSearchString is for when KBFS will index synced TLFs.
InitTestSearchString = "testSearch"
// InitSingleOpWithQRString is for when KBFS will only be used for
// a single logical operation (e.g., as a git remote helper), with
// QR enabled.
InitSingleOpWithQRString = "singleOpQR"
)
// CtxInitTagKey is the type used for unique context tags for KBFS init.
type CtxInitTagKey int
const (
// CtxInitKey is the type of the tag for unique operation IDs
// for KBFS init.
CtxInitKey CtxInitTagKey = iota
)
// CtxInitID is the display name for the unique operation
// init ID tag.
const CtxInitID = "KBFSINIT"
// AdditionalProtocolCreator creates an additional protocol.
type AdditionalProtocolCreator func(Context, Config) (rpc.Protocol, error)
const (
configModeStr = "kbfs.mode"
configBlockCacheMemMaxBytesStr = "kbfs.block_cache.mem_max_bytes"
configBlockCacheDiskMaxFracStr = "kbfs.block_cache.disk_max_fraction"
configBlockCacheSyncMaxFracStr = "kbfs.block_cache.sync_max_fraction"
)
// InitParams contains the initialization parameters for Init(). It is
// usually filled in by the flags parser passed into AddFlags().
type InitParams struct {
// Whether to print debug messages.
Debug bool
// If non-empty, the host:port of the block server. If empty,
// a default value is used depending on the run mode. Can also
// be "memory" for an in-memory test server or
// "dir:/path/to/dir" for an on-disk test server.
BServerAddr string
// If non-empty the host:port of the metadata server. If
// empty, a default value is used depending on the run mode.
// Can also be "memory" for an in-memory test server or
// "dir:/path/to/dir" for an on-disk test server.
MDServerAddr string
// If non-zero, specifies the capacity (in bytes) of the block cache. If
// zero, the capacity is set using getDefaultBlockCacheCapacity().
CleanBlockCacheCapacity uint64
// Fake local user name.
LocalUser string
// Where to put favorites. Has an effect only when LocalUser
// is non-empty, in which case it must be either "memory" or
// "dir:/path/to/dir".
LocalFavoriteStorage string
// TLFValidDuration is the duration that TLFs are valid
// before marked for lazy revalidation.
TLFValidDuration time.Duration
// MetadataVersion is the default version of metadata to use
// when creating new metadata.
MetadataVersion kbfsmd.MetadataVer
// BlockCryptVersion is the encryption version to use when
// encrypting new blocks.
BlockCryptVersion kbfscrypto.EncryptionVer
// LogToFile if true, logs to a default file location.
LogToFile bool
// LogFileConfig tells us where to log and rotation config.
LogFileConfig logger.LogFileConfig
// TLFJournalBackgroundWorkStatus is the status to use to
// pass into JournalManager.enableJournaling. Only has an effect when
// EnableJournal is non-empty.
TLFJournalBackgroundWorkStatus TLFJournalBackgroundWorkStatus
// AdditionalProtocolCreators are for adding additional protocols that we
// should handle for service to call in.
AdditionalProtocolCreators []AdditionalProtocolCreator
// EnableJournal enables journaling.
EnableJournal bool
// DiskCacheMode specifies which mode to start the disk cache.
DiskCacheMode DiskCacheMode
// StorageRoot, if non-empty, points to a local directory to put its local
// databases for things like the journal or disk cache.
StorageRoot string
// BGFlushPeriod indicates how long to wait for a batch to fill up
// before syncing a set of changes on a TLF to the servers.
BGFlushPeriod time.Duration
// BGFlushDirOpBatchSize indicates how many directory operations
// in a TLF should be batched together in a single background
// flush.
BGFlushDirOpBatchSize int
// Mode describes how KBFS should initialize itself.
Mode string
// DiskBlockCacheFraction indicates what fraction of free space on the disk
// is allowed to be occupied by the KBFS disk block cache.
DiskBlockCacheFraction float64
// SyncBlockCacheFraction indicates what fraction of free space on the disk
// is allowed to be occupied by the KBFS sync block cache for offline use.
SyncBlockCacheFraction float64
}
// defaultBServer returns the default value for the -bserver flag.
func defaultBServer(ctx Context) string {
switch ctx.GetRunMode() {
case kbconst.DevelRunMode:
return memoryAddr
case kbconst.StagingRunMode:
return `
bserver-0.dev.keybase.io:443,bserver-1.dev.keybase.io:443`
case kbconst.ProductionRunMode:
return `
bserver-0.kbfs.keybaseapi.com:443,bserver-1.kbfs.keybaseapi.com:443;
bserver-0.kbfs.keybase.io:443,bserver-1.kbfs.keybase.io:443`
default:
return ""
}
}
// defaultMDServer returns the default value for the -mdserver flag.
func defaultMDServer(ctx Context) string {
switch ctx.GetRunMode() {
case kbconst.DevelRunMode:
return memoryAddr
case kbconst.StagingRunMode:
return `
mdserver-0.dev.keybase.io:443,mdserver-1.dev.keybase.io:443`
case kbconst.ProductionRunMode:
return `
mdserver-0.kbfs.keybaseapi.com:443,mdserver-1.kbfs.keybaseapi.com:443;
mdserver-0.kbfs.keybase.io:443,mdserver-1.kbfs.keybase.io:443`
default:
return ""
}
}
// defaultMetadataVersion returns the default metadata version per run mode.
func defaultMetadataVersion(ctx Context) kbfsmd.MetadataVer {
switch ctx.GetRunMode() {
case kbconst.DevelRunMode:
return kbfsmd.ImplicitTeamsVer
case kbconst.StagingRunMode:
return kbfsmd.ImplicitTeamsVer
case kbconst.ProductionRunMode:
return kbfsmd.ImplicitTeamsVer
default:
return kbfsmd.ImplicitTeamsVer
}
}
func defaultLogPath(ctx Context) string {
return filepath.Join(ctx.GetLogDir(), libkb.KBFSLogFileName)
}
// DefaultInitParams returns default init params
func DefaultInitParams(ctx Context) InitParams {
journalEnv := os.Getenv("KBFS_DEFAULT_ENABLE_JOURNAL_VALUE")
if journalEnv == "" {
journalEnv = "true"
}
return InitParams{
Debug: BoolForString(os.Getenv("KBFS_DEBUG")),
BServerAddr: defaultBServer(ctx),
MDServerAddr: defaultMDServer(ctx),
TLFValidDuration: tlfValidDurationDefault,
MetadataVersion: defaultMetadataVersion(ctx),
BlockCryptVersion: kbfscrypto.EncryptionSecretboxWithKeyNonce,
LogFileConfig: logger.LogFileConfig{
MaxAge: 30 * 24 * time.Hour,
MaxSize: 128 * 1024 * 1024,
MaxKeepFiles: 3,
},
TLFJournalBackgroundWorkStatus: TLFJournalBackgroundWorkEnabled,
StorageRoot: ctx.GetDataDir(),
BGFlushPeriod: bgFlushPeriodDefault,
BGFlushDirOpBatchSize: bgFlushDirOpBatchSizeDefault,
EnableJournal: BoolForString(journalEnv),
DiskCacheMode: DiskCacheModeLocal,
Mode: "",
}
}
// AddFlagsWithDefaults adds libkbfs flags to the given FlagSet, given
// a set of default flags. Returns an InitParams that will be filled
// in once the given FlagSet is parsed.
func AddFlagsWithDefaults(
flags *flag.FlagSet, defaultParams InitParams,
defaultLogPath string) *InitParams {
var params InitParams
flags.BoolVar(¶ms.Debug, "debug", defaultParams.Debug,
"Print debug messages")
flags.StringVar(¶ms.BServerAddr, "bserver", defaultParams.BServerAddr,
"host:port of the block server, 'memory', or 'dir:/path/to/dir'")
flags.StringVar(¶ms.MDServerAddr, "mdserver",
defaultParams.MDServerAddr,
"host:port of the metadata server, 'memory', or 'dir:/path/to/dir'")
flags.StringVar(¶ms.LocalUser, "localuser", defaultParams.LocalUser,
"fake local user")
flags.StringVar(¶ms.LocalFavoriteStorage, "local-fav-storage",
defaultParams.LocalFavoriteStorage,
"where to put favorites; used only when -localuser is set, then must "+
"either be 'memory' or 'dir:/path/to/dir'")
flags.DurationVar(¶ms.TLFValidDuration, "tlf-valid",
defaultParams.TLFValidDuration,
"time tlfs are valid before redoing identification")
flags.BoolVar(¶ms.LogToFile, "log-to-file", defaultParams.LogToFile,
fmt.Sprintf("Log to default file: %s", defaultLogPath))
flags.StringVar(¶ms.LogFileConfig.Path, "log-file", "",
"Path to log file")
flags.DurationVar(¶ms.LogFileConfig.MaxAge, "log-file-max-age",
defaultParams.LogFileConfig.MaxAge,
"Maximum age of a log file before rotation")
params.LogFileConfig.MaxSize = defaultParams.LogFileConfig.MaxSize
flags.Var(SizeFlag{¶ms.LogFileConfig.MaxSize}, "log-file-max-size",
"Maximum size of a log file before rotation")
// The default is to *DELETE* old log files for kbfs.
flags.IntVar(¶ms.LogFileConfig.MaxKeepFiles, "log-file-max-keep-files",
defaultParams.LogFileConfig.MaxKeepFiles, "Maximum number of log "+
"files for this service, older ones are deleted. 0 for infinite.")
flags.Uint64Var(¶ms.CleanBlockCacheCapacity, "clean-bcache-cap",
defaultParams.CleanBlockCacheCapacity,
"If non-zero, specify the capacity of clean block cache. If zero, "+
"the capacity is set based on system RAM.")
flags.StringVar(¶ms.StorageRoot, "storage-root",
defaultParams.StorageRoot, "Specifies where Keybase will store its "+
"local databases for the journal and disk cache.")
params.DiskCacheMode = defaultParams.DiskCacheMode
flags.Var(¶ms.DiskCacheMode, "disk-cache-mode",
"Sets the mode for the disk cache. If 'local', then it uses a "+
"subdirectory of -storage-root to store the cache. If 'remote', "+
"then it connects to the local KBFS instance and delegates disk "+
"cache operations to it.")
flags.BoolVar(¶ms.EnableJournal, "enable-journal",
defaultParams.EnableJournal, "Enables write journaling for TLFs.")
// No real need to enable setting
// params.TLFJournalBackgroundWorkStatus via a flag.
params.TLFJournalBackgroundWorkStatus =
defaultParams.TLFJournalBackgroundWorkStatus
flags.DurationVar(¶ms.BGFlushPeriod, "sync-batch-period",
defaultParams.BGFlushPeriod,
"The amount of time to wait before syncing data in a TLF, if the "+
"batch size doesn't fill up.")
flags.IntVar(¶ms.BGFlushDirOpBatchSize, "sync-batch-size",
defaultParams.BGFlushDirOpBatchSize,
"The number of unflushed directory operations in a TLF that will "+
"trigger an immediate data sync.")
flags.IntVar((*int)(¶ms.MetadataVersion), "md-version",
int(defaultParams.MetadataVersion),
"Metadata version to use when creating new metadata")
flags.IntVar((*int)(¶ms.BlockCryptVersion), "block-crypt-version",
int(defaultParams.BlockCryptVersion),
"Encryption version to use when encrypting new blocks")
flags.StringVar(¶ms.Mode, "mode", defaultParams.Mode,
fmt.Sprintf("Overall initialization mode for KBFS, indicating how "+
"heavy-weight it can be (%s, %s, %s, %s or %s)", InitDefaultString,
InitMinimalString, InitSingleOpString, InitConstrainedString,
InitMemoryLimitedString))
flags.Float64Var(¶ms.DiskBlockCacheFraction,
"disk-block-cache-fraction", defaultParams.DiskBlockCacheFraction,
"The portion of the free disk space that KBFS will use for caching ")
flags.Float64Var(¶ms.SyncBlockCacheFraction,
"sync-block-cache-fraction", defaultParams.SyncBlockCacheFraction,
"The portion of the free disk space that KBFS will use for offline storage")
return ¶ms
}
// AddFlags adds libkbfs flags to the given FlagSet. Returns an
// InitParams that will be filled in once the given FlagSet is parsed.
func AddFlags(flags *flag.FlagSet, ctx Context) *InitParams {
return AddFlagsWithDefaults(
flags, DefaultInitParams(ctx), defaultLogPath(ctx))
}
// GetRemoteUsageString returns a string describing the flags to use
// to run against remote KBFS servers.
func GetRemoteUsageString() string {
return ` [-debug]
[-bserver=host:port] [-mdserver=host:port]
[-log-to-file] [-log-file=path/to/file] [-clean-bcache-cap=0]`
}
// GetLocalUsageString returns a string describing the flags to use to
// run in a local testing environment.
func GetLocalUsageString() string {
return ` [-debug]
[-bserver=(memory | dir:/path/to/dir | host:port)]
[-mdserver=(memory | dir:/path/to/dir | host:port)]
[-localuser=<user>]
[-local-fav-storage=(memory | dir:/path/to/dir)]
[-log-to-file] [-log-file=path/to/file] [-clean-bcache-cap=0]`
}
// GetDefaultsUsageString returns a string describing the default
// values of flags based on the run mode.
func GetDefaultsUsageString(ctx Context) string {
runMode := ctx.GetRunMode()
defaultBServer := defaultBServer(ctx)
defaultMDServer := defaultMDServer(ctx)
return fmt.Sprintf(` (KEYBASE_RUN_MODE=%s)
-bserver=%s
-mdserver=%s`,
runMode, defaultBServer, defaultMDServer)
}
const memoryAddr = "memory"
const dirAddrPrefix = "dir:"
func parseRootDir(addr string) (string, bool) {
if !strings.HasPrefix(addr, dirAddrPrefix) {
return "", false
}
serverRootDir := addr[len(dirAddrPrefix):]
if len(serverRootDir) == 0 {
return "", false
}
return serverRootDir, true
}
func makeMDServer(kbCtx Context, config Config, mdserverAddr string,
rpcLogFactory rpc.LogFactory, log logger.Logger) (
MDServer, error) {
if mdserverAddr == memoryAddr {
log.Debug("Using in-memory mdserver")
// local in-memory MD server
return NewMDServerMemory(mdServerLocalConfigAdapter{config})
}
if len(mdserverAddr) == 0 {
return nil, errors.New("Empty MD server address")
}
if serverRootDir, ok := parseRootDir(mdserverAddr); ok {
log.Debug("Using on-disk mdserver at %s", serverRootDir)
// local persistent MD server
return MakeDiskMDServer(config, serverRootDir)
}
remote, err := rpc.ParsePrioritizedRoundRobinRemote(mdserverAddr)
if err != nil {
return nil, err
}
// remote MD server. this can't fail. reconnection attempts
// will be automatic.
log.Debug("Using remote mdserver %s", remote)
mdServer := NewMDServerRemote(kbCtx, config, remote, rpcLogFactory)
return mdServer, nil
}
func makeKeyServer(
config Config, keyserverAddr string, log logger.Logger) (
libkey.KeyServer, error) {
keyOpsConfig := keyOpsConfigWrapper{config}
if keyserverAddr == memoryAddr {
log.Debug("Using in-memory keyserver")
// local in-memory key server
return libkey.NewKeyServerMemory(keyOpsConfig, log)
}
if len(keyserverAddr) == 0 {
return nil, errors.New("Empty key server address")
}
if serverRootDir, ok := parseRootDir(keyserverAddr); ok {
log.Debug("Using on-disk keyserver at %s", serverRootDir)
// local persistent key server
keyPath := filepath.Join(serverRootDir, "kbfs_key")
return libkey.NewKeyServerDir(keyOpsConfig, log, keyPath)
}
log.Debug("Using remote keyserver %s (same as mdserver)", keyserverAddr)
// currently the MD server also acts as the key server.
keyServer, ok := config.MDServer().(libkey.KeyServer)
if !ok {
return nil, errors.New("MD server is not a key server")
}
return keyServer, nil
}
func makeBlockServer(kbCtx Context, config Config, bserverAddr string,
rpcLogFactory rpc.LogFactory,
log logger.Logger) (BlockServer, error) {
if bserverAddr == memoryAddr {
log.Debug("Using in-memory bserver")
bserverLog := config.MakeLogger("BSM")
// local in-memory block server
return NewBlockServerMemory(bserverLog), nil
}
if len(bserverAddr) == 0 {
return nil, errors.New("Empty block server address")
}
if serverRootDir, ok := parseRootDir(bserverAddr); ok {
log.Debug("Using on-disk bserver at %s", serverRootDir)
// local persistent block server
return MakeDiskBlockServer(config, serverRootDir), nil
}
remote, err := rpc.ParsePrioritizedRoundRobinRemote(bserverAddr)
if err != nil {
return nil, err
}
log.Debug("Using remote bserver %s", remote)
return NewBlockServerRemote(kbCtx, config, remote, rpcLogFactory), nil
}
// InitLogWithPrefix sets up logging switching to a log file if
// necessary, given a prefix and a default log path. Returns a valid
// logger even on error, which are non-fatal, thus errors from this
// function may be ignored. Possible errors are logged to the logger
// returned.
func InitLogWithPrefix(
params InitParams, ctx Context, prefix string,
defaultLogPath string) (logger.Logger, error) {
var err error
// Set log file to default if log-to-file was specified
if params.LogToFile {
if params.LogFileConfig.Path != "" {
return nil, fmt.Errorf(
"log-to-file and log-file flags can't be specified together")
}
params.LogFileConfig.Path = defaultLogPath
}
if params.LogFileConfig.Path != "" {
err = logger.SetLogFileConfig(¶ms.LogFileConfig)
}
log := logger.New(prefix)
log.Configure("", params.Debug, "")
log.Info("KBFS version %s", VersionString())
if err != nil {
log.Warning("Failed to setup log file %q: %+v",
params.LogFileConfig.Path, err)
}
return log, err
}
// InitLog sets up logging switching to a log file if necessary.
// Returns a valid logger even on error, which are non-fatal, thus
// errors from this function may be ignored.
// Possible errors are logged to the logger returned.
func InitLog(params InitParams, ctx Context) (logger.Logger, error) {
return InitLogWithPrefix(params, ctx, "kbfs", defaultLogPath(ctx))
}
// InitWithLogPrefix initializes a config and returns it, given a prefix.
//
// onInterruptFn is called whenever an interrupt signal is received
// (e.g., if the user hits Ctrl-C).
//
// Init should be called at the beginning of main. Shutdown (see
// below) should then be called at the end of main (usually via
// defer).
//
// The keybaseServiceCn argument is to specify a custom service and
// crypto (for non-RPC environments) like mobile. If this is nil, we'll
// use the default RPC implementation.
func InitWithLogPrefix(
ctx context.Context, kbCtx Context, params InitParams,
keybaseServiceCn KeybaseServiceCn, onInterruptFn func() error,
log logger.Logger, logPrefix string) (cfg Config, err error) {
done := make(chan struct{})
interruptChan := make(chan os.Signal, 1)
SIGINT := os.Interrupt
signal.Notify(interruptChan, SIGINT, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGABRT)
if SIGPWR != NonexistentSignal {
signal.Notify(interruptChan, SIGPWR)
}
var initInterruptSignal os.Signal
var interruptErr error
go func() {
closed := false
for sig := range interruptChan {
initInterruptSignal = sig
if !closed {
close(done)
closed = true
}
// Restore stacktraces of signals that are supposed to print them
// https://golang.org/pkg/os/signal/#hdr-Default_behavior_of_signals_in_Go_programs
switch sig {
case syscall.SIGQUIT, syscall.SIGILL, syscall.SIGTRAP, syscall.SIGABRT:
_ = pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
}
if onInterruptFn != nil {
interruptErr = onInterruptFn()
}
// Continue loop only on SIGINT; exit immediately on other codes
// even if unmount has failed.
switch sig {
case SIGINT:
default:
if interruptErr != nil {
log.Info("Failed to unmount before exit: %s", interruptErr)
os.Exit(1)
} else {
// Do not return 128 + signal since kbfsfuse is not a shell command
os.Exit(0)
}
}
}
}()
// Spawn a new goroutine for `doInit` so that we can `select` on
// `done` and `errCh` below. This is particularly for the
// situation where a SIGINT comes in while `doInit` is still not
// finished (because e.g. service daemon is not up), where the
// process can fail to exit while being stuck in `doInit`. This
// allows us to not call `os.Exit()` in the interrupt handler.
errCh := make(chan error)
go func() {
var er error
cfg, er = doInit(ctx, kbCtx, params, keybaseServiceCn, log, logPrefix)
errCh <- er
}()
select {
case <-done:
return nil, fmt.Errorf("kbfsfuse received signal=<%s>", initInterruptSignal)
case err = <-errCh:
return cfg, err
}
}
// Init initializes a config and returns it.
//
// onInterruptFn is called whenever an interrupt signal is received
// (e.g., if the user hits Ctrl-C).
//
// Init should be called at the beginning of main. Shutdown (see
// below) should then be called at the end of main (usually via
// defer).
//
// The keybaseServiceCn argument is to specify a custom service and
// crypto (for non-RPC environments) like mobile. If this is nil, we'll
// use the default RPC implementation.
func Init(
ctx context.Context, kbCtx Context, params InitParams,
keybaseServiceCn KeybaseServiceCn, onInterruptFn func() error,
log logger.Logger) (cfg Config, err error) {
return InitWithLogPrefix(
ctx, kbCtx, params, keybaseServiceCn, onInterruptFn, log, "kbfs")
}
func getInitMode(
ctx context.Context, kbCtx Context, params InitParams,
log logger.Logger) (InitMode, error) {
mode := InitDefault
// Use the KBFS mode from the config file if none is provided on
// the command line.
modeString := params.Mode
if modeString == "" {
config := kbCtx.GetEnv().GetConfig()
configModeString, ok := config.GetStringAtPath(configModeStr)
if ok {
log.CDebugf(
ctx, "Using mode from config file: %s", configModeString)
modeString = configModeString
} else {
modeString = InitDefaultString
}
}
switch modeString {
case InitDefaultString:
// Already the default
case InitMinimalString:
mode = InitMinimal
case InitSingleOpString:
mode = InitSingleOp
case InitConstrainedString:
mode = InitConstrained
case InitMemoryLimitedString:
mode = InitMemoryLimited
case InitTestSearchString:
mode = InitTestSearch
case InitSingleOpWithQRString:
mode = InitSingleOpWithQR
default:
return nil, fmt.Errorf("Unexpected mode: %s", params.Mode)
}
log.CDebugf(ctx, "Initializing in %s mode", modeString)
kbCtx.GetPerfLog().CDebugf(
ctx, "KBFS initializing in %s mode, version %s", modeString,
VersionString())
return NewInitModeFromType(mode), nil
}
func getCleanBlockCacheCapacity(
ctx context.Context, kbCtx Context, params InitParams,
log logger.Logger) uint64 {
cap := params.CleanBlockCacheCapacity
// Use the capacity from the config file if none is provided on
// the command line.
if cap == 0 {
config := kbCtx.GetEnv().GetConfig()
capInt, ok := config.GetIntAtPath(configBlockCacheMemMaxBytesStr)
if ok {
log.CDebugf(
ctx, "Using block cache capacity from config file: %d", capInt)
cap = uint64(capInt)
}
}
return cap
}
func getCacheFrac(
ctx context.Context, kbCtx Context, param, defaultVal float64,
configKey string, log logger.Logger) float64 {
frac := param
// Use the fraction from the config file if none is provided on
// the command line.
if frac == 0 {
config := kbCtx.GetEnv().GetConfig()
configFrac, ok := config.GetFloatAtPath(configKey)
if ok {
log.CDebugf(
ctx, "Using %s value from config file: %f", configKey,
configFrac)
frac = configFrac
}
}
if frac == 0 {
frac = defaultVal
}
return frac
}
func doInit(
ctx context.Context, kbCtx Context, params InitParams,
keybaseServiceCn KeybaseServiceCn, log logger.Logger,
logPrefix string) (Config, error) {
ctx = CtxWithRandomIDReplayable(ctx, CtxInitKey, CtxInitID, log)
initMode, err := getInitMode(ctx, kbCtx, params, log)
if err != nil {
return nil, err
}
config := NewConfigLocal(initMode,
func(module string) logger.Logger {
mname := logPrefix
if module != "" {
mname += fmt.Sprintf("(%s)", module)
}
lg := logger.New(mname)
if params.Debug {
// Turn on debugging. TODO: allow a proper log file and
// style to be specified.
lg.Configure("", true, "")
}
return lg
}, params.StorageRoot, params.DiskCacheMode, kbCtx)
config.SetVLogLevel(kbCtx.GetVDebugSetting())
if cap := getCleanBlockCacheCapacity(ctx, kbCtx, params, log); cap > 0 {
log.CDebugf(
ctx, "Overriding default clean block cache capacity from %d to %d",
config.BlockCache().GetCleanBytesCapacity(), cap)
config.BlockCache().SetCleanBytesCapacity(cap)
}
workers := config.Mode().BlockWorkers()
prefetchWorkers := config.Mode().PrefetchWorkers()
throttledPrefetchPeriod := config.Mode().ThrottledPrefetchPeriod()
config.SetBlockOps(NewBlockOpsStandard(
config, workers, prefetchWorkers, throttledPrefetchPeriod))
bsplitter, err := data.NewBlockSplitterSimple(
data.MaxBlockSizeBytesDefault, 8*1024, config.Codec())
if err != nil {
return nil, err
}
err = bsplitter.SetMaxDirEntriesByBlockSize(config.Codec())
if err != nil {
return nil, err
}
config.SetBlockSplitter(bsplitter)
if registry := config.MetricsRegistry(); registry != nil {
keyCache := config.KeyCache()
keyCache = NewKeyCacheMeasured(keyCache, registry)
config.SetKeyCache(keyCache)
keyBundleCache := config.KeyBundleCache()
keyBundleCache = libkey.NewKeyBundleCacheMeasured(
keyBundleCache, registry)
config.SetKeyBundleCache(keyBundleCache)
}
config.SetMetadataVersion(params.MetadataVersion)
config.SetBlockCryptVersion(params.BlockCryptVersion)
config.SetTLFValidDuration(params.TLFValidDuration)
config.SetBGFlushPeriod(params.BGFlushPeriod)
kbfsLog := config.MakeLogger("")
// Initialize Keybase service connection
if keybaseServiceCn == nil {
keybaseServiceCn = keybaseDaemon{}
}
service, err := keybaseServiceCn.NewKeybaseService(
config, params, kbCtx, kbfsLog)
if err != nil {
return nil, fmt.Errorf("problem creating service: %s", err)
}
// Initialize KBPKI client (needed for KBFSOps, MD Server, and Chat).
k := NewKBPKIClient(config, kbfsLog)
config.SetKBPKI(k)
// Initialize Chat client (for file edit notifications).
chat, err := keybaseServiceCn.NewChat(config, params, kbCtx, kbfsLog)
if err != nil {
return nil, fmt.Errorf("problem creating chat: %s", err)
}
config.SetChat(chat)
initDoneCh := make(chan struct{})
kbfsOps := NewKBFSOpsStandard(kbCtx, config, initDoneCh)
defer close(initDoneCh)
config.SetKBFSOps(kbfsOps)
config.SetNotifier(kbfsOps)
config.SetKeyManager(NewKeyManagerStandard(config))
config.SetMDOps(NewMDOpsStandard(config))
// Enable the disk limiter before the keybase service, since if
// that service receives a logged-in event it will create a disk
// block cache, which requires the disk limiter.
config.SetDiskBlockCacheFraction(getCacheFrac(
ctx, kbCtx, params.DiskBlockCacheFraction,
defaultDiskBlockCacheFraction, configBlockCacheDiskMaxFracStr, log))
config.SetSyncBlockCacheFraction(getCacheFrac(
ctx, kbCtx, params.SyncBlockCacheFraction,
defaultSyncBlockCacheFraction, configBlockCacheSyncMaxFracStr, log))
err = config.EnableDiskLimiter(params.StorageRoot)
if err != nil {
log.CWarningf(ctx, "Could not enable disk limiter: %+v", err)
return nil, err
}
if registry := config.MetricsRegistry(); registry != nil {
service = NewKeybaseServiceMeasured(service, registry)
}
config.SetKeybaseService(service)
kbfsOps.favs.Initialize(ctx)
config.SetReporter(NewReporterKBPKI(config, 10, 1000))
// Initialize Crypto client (needed for MD and Block servers).
crypto, err := keybaseServiceCn.NewCrypto(config, params, kbCtx, kbfsLog)
if err != nil {
return nil, fmt.Errorf("problem creating crypto: %s", err)
}
config.SetCrypto(crypto)
// Initialize MDServer connection.
mdServer, err := makeMDServer(kbCtx, config, params.MDServerAddr, kbCtx.NewRPCLogFactory(), log)
if err != nil {
return nil, fmt.Errorf("problem creating MD server: %+v", err)
}
config.SetMDServer(mdServer)
// Must do this after setting the md server, since it depends on
// being able to fetch MDs.
go kbfsOps.initSyncedTlfs()
// Initialize KeyServer connection. MDServer is the KeyServer at the
// moment.
keyServer, err := makeKeyServer(config, params.MDServerAddr, log)
if err != nil {
return nil, fmt.Errorf("problem creating key server: %+v", err)
}
if registry := config.MetricsRegistry(); registry != nil {
keyServer = libkey.NewKeyServerMeasured(keyServer, registry)
}
config.SetKeyServer(keyServer)
// Initialize BlockServer connection.
bserv, err := makeBlockServer(
kbCtx, config, params.BServerAddr, kbCtx.NewRPCLogFactory(), log)
if err != nil {
return nil, fmt.Errorf("cannot open block database: %+v", err)
}
if registry := config.MetricsRegistry(); registry != nil {
bserv = NewBlockServerMeasured(bserv, registry)
}
config.SetBlockServer(bserv)
// Don't trigger cache initialization warnings in single-op mode
// (e.g., during a git pull), because KBFS might be running in
// constrained mode and the cache remote proxy wouldn't be
// available (which is fine).
doSendWarnings := !config.Mode().IsSingleOp()
err = config.MakeDiskBlockCacheIfNotExists()
if err != nil {
log.CWarningf(ctx, "Could not initialize disk cache: %+v", err)
notification := &keybase1.FSNotification{
StatusCode: keybase1.FSStatusCode_ERROR,
NotificationType: keybase1.FSNotificationType_INITIALIZED,
ErrorType: keybase1.FSErrorType_DISK_CACHE_ERROR_LOG_SEND,
}
if doSendWarnings {
defer config.Reporter().Notify(ctx, notification)
}
} else {
log.CDebugf(ctx, "Disk cache of type \"%s\" enabled",
params.DiskCacheMode.String())
}
err = config.MakeDiskMDCacheIfNotExists()
if err != nil {
log.CWarningf(ctx, "Could not initialize MD cache: %+v", err)
notification := &keybase1.FSNotification{
StatusCode: keybase1.FSStatusCode_ERROR,
NotificationType: keybase1.FSNotificationType_INITIALIZED,
ErrorType: keybase1.FSErrorType_DISK_CACHE_ERROR_LOG_SEND,
}
if doSendWarnings {
defer config.Reporter().Notify(ctx, notification)
}
} else {
log.CDebugf(ctx, "Disk MD cache enabled")
}
err = config.MakeDiskQuotaCacheIfNotExists()
if err != nil {
log.CWarningf(ctx, "Could not initialize disk quota cache: %+v", err)
notification := &keybase1.FSNotification{
StatusCode: keybase1.FSStatusCode_ERROR,
NotificationType: keybase1.FSNotificationType_INITIALIZED,
ErrorType: keybase1.FSErrorType_DISK_CACHE_ERROR_LOG_SEND,
}
if doSendWarnings {
defer config.Reporter().Notify(ctx, notification)
}
} else {
log.CDebugf(ctx, "Disk quota cache enabled")
}
err = config.MakeBlockMetadataStoreIfNotExists()
if err != nil {
log.CWarningf(ctx,
"Could not initialize block metadata store: %+v", err)
/*
notification := &keybase1.FSNotification{
StatusCode: keybase1.FSStatusCode_ERROR,
NotificationType: keybase1.FSNotificationType_INITIALIZED,
ErrorType: keybase1.FSErrorType_DISK_CACHE_ERROR_LOG_SEND,
}
defer config.Reporter().Notify(ctx, notification)
*/
}
log.CDebugf(ctx, "Disk block metadata store cache enabled")
if config.Mode().KBFSServiceEnabled() {
// Initialize kbfsService only when we run a full KBFS process.
// This requires the disk block cache to have been initialized, if it
// should be initialized.
kbfsService, err := NewKBFSService(kbCtx, config)
if err != nil {
// This error shouldn't be fatal
log.CWarningf(ctx, "Error starting RPC server for KBFS: %+v", err)
} else {
config.SetKBFSService(kbfsService)
log.CDebugf(ctx, "Started RPC server for KBFS")
}
}
ctx60s, cancel := context.WithTimeout(ctx, 60*time.Second)
defer cancel()
// TODO: Don't turn on journaling if either -bserver or
// -mdserver point to local implementations.
if params.EnableJournal && config.Mode().JournalEnabled() {
journalRoot := filepath.Join(params.StorageRoot, "kbfs_journal")
err = config.EnableJournaling(ctx60s, journalRoot,
params.TLFJournalBackgroundWorkStatus)
if err != nil {
log.CWarningf(ctx, "Could not initialize journal server: %+v", err)
}
log.CDebugf(ctx, "Journaling enabled")
}
if params.BGFlushDirOpBatchSize < 1 {
return nil, fmt.Errorf(
"Illegal sync batch size: %d", params.BGFlushDirOpBatchSize)
}
log.CDebugf(ctx, "Enabling a dir op batch size of %d",
params.BGFlushDirOpBatchSize)
config.SetBGFlushDirOpBatchSize(params.BGFlushDirOpBatchSize)
if config.Mode().OldStorageRootCleaningEnabled() {
go cleanOldTempStorageRoots(config)
}
return config, nil
}
// Shutdown does any necessary shutdown tasks for libkbfs. Shutdown
// should be called at the end of main.
func Shutdown() {}
| [
"\"KBFS_DEFAULT_ENABLE_JOURNAL_VALUE\"",
"\"KBFS_DEBUG\""
] | [] | [
"KBFS_DEBUG",
"KBFS_DEFAULT_ENABLE_JOURNAL_VALUE"
] | [] | ["KBFS_DEBUG", "KBFS_DEFAULT_ENABLE_JOURNAL_VALUE"] | go | 2 | 0 | |
backend/conf/conf.go | package conf
import (
"fmt"
"github.com/joho/godotenv"
"os"
)
func Init() {
_ = godotenv.Load("conf/dev.env")
fmt.Println("name: ", os.Getenv("name"))
fmt.Println("age: ", os.Getenv("age"))
fmt.Println("city: ", os.Getenv("city"))
}
| [
"\"name\"",
"\"age\"",
"\"city\""
] | [] | [
"age",
"city",
"name"
] | [] | ["age", "city", "name"] | go | 3 | 0 | |
tutorials/assets/tensorflow_to_onnx_example.py | # SPDX-License-Identifier: Apache-2.0
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A deep MNIST classifier using convolutional layers.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/pros
"""
# Disable linter warnings to maintain consistency with tutorial.
# pylint: disable=invalid-name
# pylint: disable=g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import os
import shutil
import tempfile
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def add(x, y):
return tf.nn.bias_add(x, y, data_format="NHWC")
def deepnn(x):
"""deepnn builds the graph for a deep net for classifying digits.
Args:
x: an input tensor with the dimensions (N_examples, 784), where 784 is the
number of pixels in a standard MNIST image.
Returns:
A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values
equal to the logits of classifying the digit into one of 10 classes (the
digits 0-9). keep_prob is a scalar placeholder for the probability of dropout.
"""
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
x_image = tf.reshape(x, [-1, 1, 28, 28])
x_image = tf.transpose(x_image, [0, 2, 3, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
w_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(add(conv2d(x_image, w_conv1), b_conv1))
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
w_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(add(conv2d(h_pool1, w_conv2), b_conv2))
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool_2x2(h_conv2)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
with tf.name_scope('fc1'):
w_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
w_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1, w_fc2) + b_fc2
return y_conv
def conv2d(x, w):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME', data_format="NHWC")
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME', data_format="NHWC")
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def create_and_train_mnist():
tf.logging.set_verbosity(tf.logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Import data
data_dir = r"/tmp/tensorflow/mnist/input_data"
mnist = input_data.read_data_sets(data_dir)
# Create the model
tf.reset_default_graph()
input_tensor = tf.placeholder(tf.float32, [None, 784], name="input")
# Build the graph for the deep net
y_conv = deepnn(input_tensor)
output_tensor = tf.identity(y_conv, "result")
with open("./output/graph.proto", "wb") as file:
graph = tf.get_default_graph().as_graph_def(add_shapes=True)
file.write(graph.SerializeToString())
# Define loss and optimizer
y_ = tf.placeholder(tf.int64, [None])
with tf.name_scope('loss'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
labels=y_, logits=y_conv)
cross_entropy = tf.reduce_mean(cross_entropy)
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), y_)
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
for i in range(5000):
batch = mnist.train.next_batch(50)
if i % 1000 == 0:
train_accuracy = accuracy.eval(session=sess, feed_dict={input_tensor: batch[0], y_: batch[1]})
print('step %d, training accuracy %g' % (i, train_accuracy))
train_step.run(session=sess, feed_dict={input_tensor: batch[0], y_: batch[1]})
print('test accuracy %g' % accuracy.eval(session=sess, feed_dict={input_tensor: mnist.test.images[:1000], y_: mnist.test.labels[:1000]}))
return sess, saver, input_tensor, output_tensor
def save_model_to_checkpoint(saver, sess):
print("save model to checkpoint")
save_path = saver.save(sess, "./output/ckpt/model.ckpt")
def save_model_to_frozen_proto(sess):
print('save model to frozen graph')
frozen_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["result"])
with open("./output/mnist_frozen.pb", "wb") as file:
file.write(frozen_graph.SerializeToString())
def save_model_to_saved_model(sess, input_tensor, output_tensor):
print('save model to saved_model')
from tensorflow.saved_model import simple_save
save_path = r"./output/saved_model"
if os.path.exists(save_path):
shutil.rmtree(save_path)
simple_save(sess, save_path, {input_tensor.name: input_tensor}, {output_tensor.name: output_tensor})
| [] | [] | [
"TF_CPP_MIN_LOG_LEVEL"
] | [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
test/e2e/save_test.go | package integration
import (
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"github.com/containers/podman/v2/pkg/rootless"
. "github.com/containers/podman/v2/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Podman save", func() {
var (
tempdir string
err error
podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
tempdir, err = CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
podmanTest.RestoreAllArtifacts()
})
AfterEach(func() {
podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
})
It("podman save output flag", func() {
outfile := filepath.Join(podmanTest.TempDir, "alpine.tar")
save := podmanTest.PodmanNoCache([]string{"save", "-o", outfile, ALPINE})
save.WaitWithDefaultTimeout()
Expect(save.ExitCode()).To(Equal(0))
})
It("podman save oci flag", func() {
outfile := filepath.Join(podmanTest.TempDir, "alpine.tar")
save := podmanTest.PodmanNoCache([]string{"save", "-o", outfile, "--format", "oci-archive", ALPINE})
save.WaitWithDefaultTimeout()
Expect(save.ExitCode()).To(Equal(0))
})
It("podman save with stdout", func() {
Skip("Pipe redirection in ginkgo probably won't work")
outfile := filepath.Join(podmanTest.TempDir, "alpine.tar")
save := podmanTest.PodmanNoCache([]string{"save", ALPINE, ">", outfile})
save.WaitWithDefaultTimeout()
Expect(save.ExitCode()).To(Equal(0))
})
It("podman save quiet flag", func() {
outfile := filepath.Join(podmanTest.TempDir, "alpine.tar")
save := podmanTest.PodmanNoCache([]string{"save", "-q", "-o", outfile, ALPINE})
save.WaitWithDefaultTimeout()
Expect(save.ExitCode()).To(Equal(0))
})
It("podman save bogus image", func() {
outfile := filepath.Join(podmanTest.TempDir, "alpine.tar")
save := podmanTest.PodmanNoCache([]string{"save", "-o", outfile, "FOOBAR"})
save.WaitWithDefaultTimeout()
Expect(save).To(ExitWithError())
})
It("podman save to directory with oci format", func() {
if rootless.IsRootless() && podmanTest.RemoteTest {
Skip("Requires a fix in containers image for chown/lchown")
}
outdir := filepath.Join(podmanTest.TempDir, "save")
save := podmanTest.PodmanNoCache([]string{"save", "--format", "oci-dir", "-o", outdir, ALPINE})
save.WaitWithDefaultTimeout()
Expect(save.ExitCode()).To(Equal(0))
})
It("podman save to directory with v2s2 docker format", func() {
if rootless.IsRootless() && podmanTest.RemoteTest {
Skip("Requires a fix in containers image for chown/lchown")
}
outdir := filepath.Join(podmanTest.TempDir, "save")
save := podmanTest.PodmanNoCache([]string{"save", "--format", "docker-dir", "-o", outdir, ALPINE})
save.WaitWithDefaultTimeout()
Expect(save.ExitCode()).To(Equal(0))
})
It("podman save to directory with docker format and compression", func() {
if rootless.IsRootless() && podmanTest.RemoteTest {
Skip("Requires a fix in containers image for chown/lchown")
}
outdir := filepath.Join(podmanTest.TempDir, "save")
save := podmanTest.PodmanNoCache([]string{"save", "--compress", "--format", "docker-dir", "-o", outdir, ALPINE})
save.WaitWithDefaultTimeout()
Expect(save.ExitCode()).To(Equal(0))
})
It("podman save bad filename", func() {
outdir := filepath.Join(podmanTest.TempDir, "save:colon")
save := podmanTest.PodmanNoCache([]string{"save", "--compress", "--format", "docker-dir", "-o", outdir, ALPINE})
save.WaitWithDefaultTimeout()
Expect(save).To(ExitWithError())
})
It("podman save remove signature", func() {
SkipIfRootless("FIXME: Need get in rootless push sign")
if podmanTest.Host.Arch == "ppc64le" {
Skip("No registry image for ppc64le")
}
tempGNUPGHOME := filepath.Join(podmanTest.TempDir, "tmpGPG")
err := os.Mkdir(tempGNUPGHOME, os.ModePerm)
Expect(err).To(BeNil())
origGNUPGHOME := os.Getenv("GNUPGHOME")
err = os.Setenv("GNUPGHOME", tempGNUPGHOME)
Expect(err).To(BeNil())
defer os.Setenv("GNUPGHOME", origGNUPGHOME)
port := 5000
session := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", strings.Join([]string{strconv.Itoa(port), strconv.Itoa(port)}, ":"), "docker.io/registry:2.6"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
if !WaitContainerReady(podmanTest, "registry", "listening on", 20, 1) {
Skip("Cannot start docker registry.")
}
cmd := exec.Command("gpg", "--import", "sign/secret-key.asc")
err = cmd.Run()
Expect(err).To(BeNil())
cmd = exec.Command("cp", "/etc/containers/registries.d/default.yaml", "default.yaml")
if err = cmd.Run(); err != nil {
Skip("no signature store to verify")
}
defer func() {
cmd = exec.Command("cp", "default.yaml", "/etc/containers/registries.d/default.yaml")
cmd.Run()
}()
cmd = exec.Command("cp", "sign/key.gpg", "/tmp/key.gpg")
Expect(cmd.Run()).To(BeNil())
sigstore := `
default-docker:
sigstore: file:///var/lib/containers/sigstore
sigstore-staging: file:///var/lib/containers/sigstore
`
Expect(ioutil.WriteFile("/etc/containers/registries.d/default.yaml", []byte(sigstore), 0755)).To(BeNil())
session = podmanTest.Podman([]string{"tag", ALPINE, "localhost:5000/alpine"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
session = podmanTest.Podman([]string{"push", "--tls-verify=false", "--sign-by", "[email protected]", "localhost:5000/alpine"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
session = podmanTest.Podman([]string{"rmi", ALPINE, "localhost:5000/alpine"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
session = podmanTest.Podman([]string{"pull", "--tls-verify=false", "--signature-policy=sign/policy.json", "localhost:5000/alpine"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
outfile := filepath.Join(podmanTest.TempDir, "temp.tar")
save := podmanTest.Podman([]string{"save", "remove-signatures=true", "-o", outfile, "localhost:5000/alpine"})
save.WaitWithDefaultTimeout()
Expect(save).To(ExitWithError())
})
It("podman save image with digest reference", func() {
// pull a digest reference
session := podmanTest.PodmanNoCache([]string{"pull", ALPINELISTDIGEST})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
// save a digest reference should exit without error.
outfile := filepath.Join(podmanTest.TempDir, "temp.tar")
save := podmanTest.PodmanNoCache([]string{"save", "-o", outfile, ALPINELISTDIGEST})
save.WaitWithDefaultTimeout()
Expect(save.ExitCode()).To(Equal(0))
})
It("podman save --multi-image-archive (tagged images)", func() {
multiImageSave(podmanTest, RESTORE_IMAGES)
})
It("podman save --multi-image-archive (untagged images)", func() {
// Refer to images via ID instead of tag.
session := podmanTest.PodmanNoCache([]string{"images", "--format", "{{.ID}}"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
ids := session.OutputToStringArray()
Expect(len(RESTORE_IMAGES), len(ids))
multiImageSave(podmanTest, ids)
})
})
// Create a multi-image archive, remove all images, load it and
// make sure that all images are (again) present.
func multiImageSave(podmanTest *PodmanTestIntegration, images []string) {
// Create the archive.
outfile := filepath.Join(podmanTest.TempDir, "temp.tar")
session := podmanTest.PodmanNoCache(append([]string{"save", "-o", outfile, "--multi-image-archive"}, images...))
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
// Remove all images.
session = podmanTest.PodmanNoCache([]string{"rmi", "-af"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
// Now load the archive.
session = podmanTest.PodmanNoCache([]string{"load", "-i", outfile})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
// Grep for each image in the `podman load` output.
for _, image := range images {
found, _ := session.GrepString(image)
Expect(found).Should(BeTrue())
}
// Make sure that each image has really been loaded.
for _, image := range images {
session = podmanTest.PodmanNoCache([]string{"image", "exists", image})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
}
}
| [
"\"GNUPGHOME\""
] | [] | [
"GNUPGHOME"
] | [] | ["GNUPGHOME"] | go | 1 | 0 | |
tests/disputes_test.go | package tests
import (
"os"
"testing"
"github.com/HRInnovationLab/adyen-go-api-library/v5/src/adyen"
"github.com/HRInnovationLab/adyen-go-api-library/v5/src/checkout"
"github.com/HRInnovationLab/adyen-go-api-library/v5/src/common"
"github.com/HRInnovationLab/adyen-go-api-library/v5/src/disputes"
"github.com/HRInnovationLab/adyen-go-api-library/v5/src/payments"
"github.com/joho/godotenv"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_Disputes(t *testing.T) {
godotenv.Load("./../.env")
var (
MerchantAccount = os.Getenv("ADYEN_MERCHANT")
APIKey = os.Getenv("ADYEN_API_KEY")
)
client := adyen.NewClient(&common.Config{
ApiKey: APIKey,
Environment: "TEST",
})
createTestPayment := func() string {
res, _, _ := client.Checkout.Payments(&checkout.PaymentRequest{
Amount: checkout.Amount{
Currency: "EUR",
Value: 1000,
},
Reference: "DISPUTES_CHARGEBACK",
PaymentMethod: map[string]interface{}{
"type": "scheme",
"number": "4111111111111111",
"expiryMonth": "03",
"expiryYear": "2030",
"holderName": "chargeback:10.4",
"cvc": "737",
},
ReturnUrl: "https://adyen.com",
MerchantAccount: MerchantAccount,
})
captureRes, _, _ := client.Payments.Capture(&payments.ModificationRequest{
OriginalReference: res.PspReference,
ModificationAmount: &payments.Amount{
Currency: "EUR",
Value: 1000,
},
Reference: "MODIFICATION_REFERENCE",
MerchantAccount: MerchantAccount,
})
return captureRes.PspReference
}
t.Run("Disputes", func(t *testing.T) {
t.Run("Retrieve applicable defense reasons", func(t *testing.T) {
pspReference := createTestPayment()
res, httpRes, err := client.Disputes.RetrieveApplicableDefenseReasons(&disputes.DefenseReasonsRequest{
DisputePspReference: pspReference,
MerchantAccountCode: MerchantAccount,
})
require.Nil(t, err)
assert.Equal(t, 200, httpRes.StatusCode)
assert.NotNil(t, res)
assert.Equal(t, "Dispute not found.", res.DisputeServiceResult.ErrorMessage)
})
t.Run("Supply defense document", func(t *testing.T) {
pspReference := createTestPayment()
res, httpRes, err := client.Disputes.SupplyDefenseDocument(&disputes.SupplyDefenseDocumentRequest{
DefenseDocuments: []disputes.DefenseDocument{
{
Content: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg==",
ContentType: "image/jpg",
DefenseDocumentTypeCode: "DefenseMaterial",
},
},
DisputePspReference: pspReference,
MerchantAccountCode: MerchantAccount,
})
require.Nil(t, err)
assert.Equal(t, 200, httpRes.StatusCode)
assert.NotNil(t, res)
assert.Equal(t, "Unknown dispute", res.DisputeServiceResult.ErrorMessage)
})
t.Run("Delete dispute defense document", func(t *testing.T) {
pspReference := createTestPayment()
res, httpRes, err := client.Disputes.DeleteDisputeDefenseDocument(&disputes.DeleteDefenseDocumentRequest{
DefenseDocumentType: "DefenseMaterial",
DisputePspReference: pspReference,
MerchantAccountCode: MerchantAccount,
})
require.Nil(t, err)
assert.Equal(t, 200, httpRes.StatusCode)
assert.NotNil(t, res)
assert.Equal(t, "Unknown dispute", res.DisputeServiceResult.ErrorMessage)
})
t.Run("Defend dispute", func(t *testing.T) {
pspReference := createTestPayment()
res, httpRes, err := client.Disputes.DefendDispute(&disputes.DefendDisputeRequest{
DefenseReasonCode: "DuplicateChargeback",
DisputePspReference: pspReference,
MerchantAccountCode: MerchantAccount,
})
require.Nil(t, err)
assert.Equal(t, 200, httpRes.StatusCode)
assert.NotNil(t, res)
assert.Equal(t, "Dispute not found.", res.DisputeServiceResult.ErrorMessage)
})
t.Run("Download dispute defense document", func(t *testing.T) {
pspReference := createTestPayment()
_, httpRes, err := client.Disputes.DownloadDisputeDefenseDocument(&disputes.DownloadDefenseDocumentRequest{
DefenseDocumentType: "DefenseMaterial",
DisputePspReference: pspReference,
MerchantAccountCode: MerchantAccount,
})
require.NotNil(t, err)
assert.Equal(t, 403, httpRes.StatusCode)
})
})
}
| [
"\"ADYEN_MERCHANT\"",
"\"ADYEN_API_KEY\""
] | [] | [
"ADYEN_MERCHANT",
"ADYEN_API_KEY"
] | [] | ["ADYEN_MERCHANT", "ADYEN_API_KEY"] | go | 2 | 0 | |
vendor/github.com/hashicorp/go-getter/get_git_test.go | package getter
import (
"encoding/base64"
"io/ioutil"
"net/url"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"testing"
)
var testHasGit bool
func init() {
if _, err := exec.LookPath("git"); err == nil {
testHasGit = true
}
}
func TestGitGetter_impl(t *testing.T) {
var _ Getter = new(GitGetter)
}
func TestGitGetter(t *testing.T) {
if !testHasGit {
t.Log("git not found, skipping")
t.Skip()
}
g := new(GitGetter)
dst := tempDir(t)
repo := testGitRepo(t, "basic")
repo.commitFile("foo.txt", "hello")
// With a dir that doesn't exist
if err := g.Get(dst, repo.url); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath := filepath.Join(dst, "foo.txt")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestGitGetter_branch(t *testing.T) {
if !testHasGit {
t.Log("git not found, skipping")
t.Skip()
}
g := new(GitGetter)
dst := tempDir(t)
repo := testGitRepo(t, "branch")
repo.git("checkout", "-b", "test-branch")
repo.commitFile("branch.txt", "branch")
q := repo.url.Query()
q.Add("ref", "test-branch")
repo.url.RawQuery = q.Encode()
if err := g.Get(dst, repo.url); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath := filepath.Join(dst, "branch.txt")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
// Get again should work
if err := g.Get(dst, repo.url); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath = filepath.Join(dst, "branch.txt")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestGitGetter_branchUpdate(t *testing.T) {
if !testHasGit {
t.Log("git not found, skipping")
t.Skip()
}
g := new(GitGetter)
dst := tempDir(t)
// First setup the state with a fresh branch
repo := testGitRepo(t, "branch-update")
repo.git("checkout", "-b", "test-branch")
repo.commitFile("branch.txt", "branch")
// Get the "test-branch" branch
q := repo.url.Query()
q.Add("ref", "test-branch")
repo.url.RawQuery = q.Encode()
if err := g.Get(dst, repo.url); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath := filepath.Join(dst, "branch.txt")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
// Commit an update to the branch
repo.commitFile("branch-update.txt", "branch-update")
// Get again should work
if err := g.Get(dst, repo.url); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath = filepath.Join(dst, "branch-update.txt")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestGitGetter_tag(t *testing.T) {
if !testHasGit {
t.Log("git not found, skipping")
t.Skip()
}
g := new(GitGetter)
dst := tempDir(t)
repo := testGitRepo(t, "tag")
repo.commitFile("tag.txt", "tag")
repo.git("tag", "v1.0")
q := repo.url.Query()
q.Add("ref", "v1.0")
repo.url.RawQuery = q.Encode()
if err := g.Get(dst, repo.url); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath := filepath.Join(dst, "tag.txt")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
// Get again should work
if err := g.Get(dst, repo.url); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath = filepath.Join(dst, "tag.txt")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestGitGetter_GetFile(t *testing.T) {
if !testHasGit {
t.Log("git not found, skipping")
t.Skip()
}
g := new(GitGetter)
dst := tempFile(t)
repo := testGitRepo(t, "file")
repo.commitFile("file.txt", "hello")
// Download the file
repo.url.Path = filepath.Join(repo.url.Path, "file.txt")
if err := g.GetFile(dst, repo.url); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
if _, err := os.Stat(dst); err != nil {
t.Fatalf("err: %s", err)
}
assertContents(t, dst, "hello")
}
func TestGitGetter_gitVersion(t *testing.T) {
dir, err := ioutil.TempDir("", "go-getter")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
script := filepath.Join(dir, "git")
err = ioutil.WriteFile(
script,
[]byte("#!/bin/sh\necho \"git version 2.0 (Some Metadata Here)\n\""),
0700)
if err != nil {
t.Fatal(err)
}
defer func(v string) {
os.Setenv("PATH", v)
}(os.Getenv("PATH"))
os.Setenv("PATH", dir)
// Asking for a higher version throws an error
if err := checkGitVersion("2.3"); err == nil {
t.Fatal("expect git version error")
}
// Passes when version is satisfied
if err := checkGitVersion("1.9"); err != nil {
t.Fatal(err)
}
}
func TestGitGetter_sshKey(t *testing.T) {
if !testHasGit {
t.Log("git not found, skipping")
t.Skip()
}
g := new(GitGetter)
dst := tempDir(t)
encodedKey := base64.StdEncoding.EncodeToString([]byte(testGitToken))
u, err := url.Parse("ssh://[email protected]/hashicorp/test-private-repo" +
"?sshkey=" + encodedKey)
if err != nil {
t.Fatal(err)
}
if err := g.Get(dst, u); err != nil {
t.Fatalf("err: %s", err)
}
readmePath := filepath.Join(dst, "README.md")
if _, err := os.Stat(readmePath); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestGitGetter_submodule(t *testing.T) {
if !testHasGit {
t.Log("git not found, skipping")
t.Skip()
}
g := new(GitGetter)
dst := tempDir(t)
// Set up the grandchild
gc := testGitRepo(t, "grandchild")
gc.commitFile("grandchild.txt", "grandchild")
// Set up the child
c := testGitRepo(t, "child")
c.commitFile("child.txt", "child")
c.git("submodule", "add", gc.dir)
c.git("commit", "-m", "Add grandchild submodule")
// Set up the parent
p := testGitRepo(t, "parent")
p.commitFile("parent.txt", "parent")
p.git("submodule", "add", c.dir)
p.git("commit", "-m", "Add child submodule")
// Clone the root repository
if err := g.Get(dst, p.url); err != nil {
t.Fatalf("err: %s", err)
}
// Check that the files exist
for _, path := range []string{
filepath.Join(dst, "parent.txt"),
filepath.Join(dst, "child", "child.txt"),
filepath.Join(dst, "child", "grandchild", "grandchild.txt"),
} {
if _, err := os.Stat(path); err != nil {
t.Fatalf("err: %s", err)
}
}
}
func TestGitGetter_setupGitEnv_sshKey(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skipf("skipping on windows since the test requires sh")
return
}
cmd := exec.Command("/bin/sh", "-c", "echo $GIT_SSH_COMMAND")
setupGitEnv(cmd, "/tmp/foo.pem")
out, err := cmd.Output()
if err != nil {
t.Fatal(err)
}
actual := strings.TrimSpace(string(out))
if actual != "ssh -i /tmp/foo.pem" {
t.Fatalf("unexpected GIT_SSH_COMMAND: %q", actual)
}
}
func TestGitGetter_setupGitEnvWithExisting_sshKey(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skipf("skipping on windows since the test requires sh")
return
}
// start with an existing ssh command configuration
os.Setenv("GIT_SSH_COMMAND", "ssh -o StrictHostKeyChecking=no")
defer os.Setenv("GIT_SSH_COMMAND", "")
cmd := exec.Command("/bin/sh", "-c", "echo $GIT_SSH_COMMAND")
setupGitEnv(cmd, "/tmp/foo.pem")
out, err := cmd.Output()
if err != nil {
t.Fatal(err)
}
actual := strings.TrimSpace(string(out))
if actual != "ssh -o StrictHostKeyChecking=no -i /tmp/foo.pem" {
t.Fatalf("unexpected GIT_SSH_COMMAND: %q", actual)
}
}
// gitRepo is a helper struct which controls a single temp git repo.
type gitRepo struct {
t *testing.T
url *url.URL
dir string
}
// testGitRepo creates a new test git repository.
func testGitRepo(t *testing.T, name string) *gitRepo {
dir, err := ioutil.TempDir("", "go-getter")
if err != nil {
t.Fatal(err)
}
dir = filepath.Join(dir, name)
if err := os.Mkdir(dir, 0700); err != nil {
t.Fatal(err)
}
r := &gitRepo{
t: t,
dir: dir,
}
url, err := url.Parse("file://" + r.dir)
if err != nil {
t.Fatal(err)
}
r.url = url
r.git("init")
r.git("config", "user.name", "go-getter")
r.git("config", "user.email", "[email protected]")
return r
}
// git runs a git command against the repo.
func (r *gitRepo) git(args ...string) {
cmd := exec.Command("git", args...)
cmd.Dir = r.dir
if err := cmd.Run(); err != nil {
r.t.Fatal(err)
}
}
// commitFile writes and commits a text file to the repo.
func (r *gitRepo) commitFile(file, content string) {
path := filepath.Join(r.dir, file)
if err := ioutil.WriteFile(path, []byte(content), 0600); err != nil {
r.t.Fatal(err)
}
r.git("add", file)
r.git("commit", "-m", "Adding "+file)
}
// This is a read-only deploy key for an empty test repository.
// Note: This is split over multiple lines to avoid being disabled by key
// scanners automatically.
var testGitToken = `-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA9cHsxCl3Jjgu9DHpwvmfFOl1XEdY+ShHDR/cMnzJ5ddk5/oV
Wy6EWatvyHZfRSZMwzv4PtKeUPm6iXjqWp4xdWU9khlPzozyj+U9Fq70TRVUW9E5
T1XdQVwJE421yffr4VMMwu60wBqjI1epapH2i2inYvw9Zl9X2MXq0+jTvFvDerbT
mDtfStDPljenELAIZtWVETSvbI46gALwbxbM2292ZUIL4D6jRz0aZMmyy/twYv8r
9WGJLwmYzU518Ie7zqKW/mCTdTrV0WRiDj0MeRaPgrGY9amuHE4r9iG/cJkwpKAO
Ccz0Hs6i89u9vZnTqZU9V7weJqRAQcMjXXR6yQIDAQABAoIBAQDBzICKnGxiTlHw
rd+6qqChnAy5jWYDbZjCJ8q8YZ3RS08+g/8NXZxvHftTqM0uOaq1FviHig3gq15H
hHvCpBc6jXDFYoKFzq6FfO/0kFkE5HoWweIgxwRow0xBCDJAJ+ryUEyy+Ay/pQHb
IAjwilRS0V+WdnVw4mTjBAhPvb4jPOo97Yfy3PYUyx2F3newkqXOZy+zx3G/ANoa
ncypfMGyy76sfCWKqw4J1gVkVQLwbB6gQkXUFGYwY9sRrxbG93kQw76Flc/E/s52
62j4v1IM0fq0t/St+Y/+s6Lkw` + `aqt3ft1nsqWcRaVDdqvMfkzgJGXlw0bGzJG5MEQ
AIBq3dHRAoGBAP8OeG/DKG2Z1VmSfzuz1pas1fbZ+F7venOBrjez3sKlb3Pyl2aH
mt2wjaTUi5v10VrHgYtOEdqyhQeUSYydWXIBKNMag0NLLrfFUKZK+57wrHWFdFjn
VgpsdkLSNTOZpC8gA5OaJ+36IcOPfGqyyP9wuuRoaYnVT1KEzqLa9FEFAoGBAPaq
pglwhil2rxjJE4zq0afQLNpAfi7Xqcrepij+xvJIcIj7nawxXuPxqRFxONE/h3yX
zkybO8wLdbHX9Iw/wc1j50Uf1Z5gHdLf7/hQJoWKpz1RnkWRy6CYON8v1tpVp0tb
OAajR/kZnzebq2mfa7pyy5zDCX++2kp/dcFwHf31AoGAE8oupBVTZLWj7TBFuP8q
LkS40U92Sv9v09iDCQVmylmFvUxcXPM2m+7f/qMTNgWrucxzC7kB/6MMWVszHbrz
vrnCTibnemgx9sZTjKOSxHFOIEw7i85fSa3Cu0qOIDPSnmlwfZpfcMKQrhjLAYhf
uhooFiLX1X78iZ2OXup4PHUCgYEAsmBrm83sp1V1gAYBBlnVbXakyNv0pCk/Vz61
iFXeRt1NzDGxLxGw3kQnED8BaIh5kQcyn8Fud7sdzJMv/LAqlT4Ww60mzNYTGyjo
H3jOsqm3ESfRvduWFreeAQBWbiOczGjV1i8D4EbAFfWT+tjXjchwKBf+6Yt5zn/o
Bw/uEHUCgYAFs+JPOR25oRyBs7ujrMo/OY1z/eXTVVgZxY+tYGe1FJqDeFyR7ytK
+JBB1MuDwQKGm2wSIXdCzTNoIx2B9zTseiPTwT8G7vqNFhXoIaTBp4P2xIQb45mJ
7GkTsMBHwpSMOXgX9Weq3v5xOJ2WxVtjENmd6qzxcYCO5lP15O17hA==
-----END RSA PRIVATE KEY-----`
| [
"\"PATH\""
] | [] | [
"PATH"
] | [] | ["PATH"] | go | 1 | 0 | |
bin/sonnet_sad_multi.py | #!/usr/bin/env python
# Copyright 2017 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from optparse import OptionParser
import glob
import os
import pickle
import shutil
import subprocess
import sys
import h5py
import numpy as np
import slurm
"""
sonnet_sad_multi.py
Compute SNP expression difference scores for variants in a VCF file,
using multiple processes.
"""
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <model> <vcf_file>'
parser = OptionParser(usage)
# sad
parser.add_option('-b', dest='batch_size',
default=4, type='int',
help='Batch size [Default: %default]')
parser.add_option('-c', dest='slice_center',
default=None, type='int',
help='Slice center positions [Default: %default]')
parser.add_option('-f', dest='genome_fasta',
default='%s/data/hg19.fa' % os.environ['BASENJIDIR'],
help='Genome FASTA for sequences [Default: %default]')
parser.add_option('-o',dest='out_dir',
default='sad',
help='Output directory for tables and plots [Default: %default]')
parser.add_option('--pseudo', dest='log_pseudo',
default=1, type='float',
help='Log2 pseudocount [Default: %default]')
parser.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Average forward and reverse complement predictions [Default: %default]')
parser.add_option('--shifts', dest='shifts',
default='0', type='str',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option('--species', dest='species',
default='human')
parser.add_option('--stats', dest='sad_stats',
default='SAD',
help='Comma-separated list of stats to save. [Default: %default]')
parser.add_option('-t', dest='targets_file',
default=None, type='str',
help='File specifying target indexes and labels in table format')
# multi
parser.add_option('-e', dest='conda_env',
default='tf2.6',
help='Anaconda environment [Default: %default]')
parser.add_option('--name', dest='name',
default='sad', help='SLURM name prefix [Default: %default]')
parser.add_option('--max_proc', dest='max_proc',
default=None, type='int',
help='Maximum concurrent processes [Default: %default]')
parser.add_option('-p', dest='processes',
default=None, type='int',
help='Number of processes, passed by multi script')
parser.add_option('-q', dest='queue',
default='gtx1080ti',
help='SLURM queue on which to run the jobs [Default: %default]')
parser.add_option('-r', dest='restart',
default=False, action='store_true',
help='Restart a partially completed job [Default: %default]')
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error('Must provide model and VCF file')
else:
model_file = args[0]
vcf_file = args[1]
#######################################################
# prep work
# output directory
if not options.restart:
if os.path.isdir(options.out_dir):
print('Please remove %s' % options.out_dir, file=sys.stderr)
exit(1)
os.mkdir(options.out_dir)
# pickle options
options_pkl_file = '%s/options.pkl' % options.out_dir
options_pkl = open(options_pkl_file, 'wb')
pickle.dump(options, options_pkl)
options_pkl.close()
#######################################################
# launch worker threads
jobs = []
for pi in range(options.processes):
if not options.restart or not job_completed(options, pi):
cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
cmd += ' conda activate %s;' % options.conda_env
cmd += ' sonnet_sad.py %s %s %d' % (
options_pkl_file, ' '.join(args), pi)
name = '%s_p%d' % (options.name, pi)
outf = '%s/job%d.out' % (options.out_dir, pi)
errf = '%s/job%d.err' % (options.out_dir, pi)
j = slurm.Job(cmd, name,
outf, errf,
queue=options.queue, gpu=1,
mem=22000, time='14-0:0:0')
jobs.append(j)
slurm.multi_run(jobs, max_proc=options.max_proc, verbose=True,
launch_sleep=10, update_sleep=60)
#######################################################
# collect output
collect_h5('sad.h5', options.out_dir, options.processes)
# for pi in range(options.processes):
# shutil.rmtree('%s/job%d' % (options.out_dir,pi))
def collect_h5(file_name, out_dir, num_procs):
# count variants
num_variants = 0
for pi in range(num_procs):
# open job
job_h5_file = '%s/job%d/%s' % (out_dir, pi, file_name)
job_h5_open = h5py.File(job_h5_file, 'r')
num_variants += len(job_h5_open['snp'])
job_h5_open.close()
# initialize final h5
final_h5_file = '%s/%s' % (out_dir, file_name)
final_h5_open = h5py.File(final_h5_file, 'w')
# keep dict for string values
final_strings = {}
job0_h5_file = '%s/job0/%s' % (out_dir, file_name)
job0_h5_open = h5py.File(job0_h5_file, 'r')
for key in job0_h5_open.keys():
if key in ['percentiles', 'target_ids', 'target_labels']:
# copy
final_h5_open.create_dataset(key, data=job0_h5_open[key])
elif key[-4:] == '_pct':
values = np.zeros(job0_h5_open[key].shape)
final_h5_open.create_dataset(key, data=values)
elif job0_h5_open[key].dtype.char == 'S':
final_strings[key] = []
elif job0_h5_open[key].ndim == 1:
final_h5_open.create_dataset(key, shape=(num_variants,), dtype=job0_h5_open[key].dtype)
else:
num_targets = job0_h5_open[key].shape[1]
final_h5_open.create_dataset(key, shape=(num_variants, num_targets), dtype=job0_h5_open[key].dtype)
job0_h5_open.close()
# set values
vi = 0
for pi in range(num_procs):
# open job
job_h5_file = '%s/job%d/%s' % (out_dir, pi, file_name)
job_h5_open = h5py.File(job_h5_file, 'r')
# append to final
for key in job_h5_open.keys():
if key in ['percentiles', 'target_ids', 'target_labels']:
# once is enough
pass
elif key[-4:] == '_pct':
# average
u_k1 = np.array(final_h5_open[key])
x_k = np.array(job_h5_open[key])
final_h5_open[key][:] = u_k1 + (x_k - u_k1) / (pi+1)
else:
if job_h5_open[key].dtype.char == 'S':
final_strings[key] += list(job_h5_open[key])
else:
job_variants = job_h5_open[key].shape[0]
final_h5_open[key][vi:vi+job_variants] = job_h5_open[key]
vi += job_variants
job_h5_open.close()
# create final string datasets
for key in final_strings:
final_h5_open.create_dataset(key,
data=np.array(final_strings[key], dtype='S'))
final_h5_open.close()
def job_completed(options, pi):
"""Check whether a specific job has generated its
output file."""
out_file = '%s/job%d/sad.h5' % (options.out_dir, pi)
return os.path.isfile(out_file) or os.path.isdir(out_file)
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| [] | [] | [
"BASENJIDIR"
] | [] | ["BASENJIDIR"] | python | 1 | 0 | |
samples/go/main.go | package main
import (
"encoding/json"
"fmt"
"log"
rand2 "math/rand"
"net/http"
"os"
)
func main() {
port := "8080"
if v := os.Getenv("PORT"); v != "" {
port = v
}
http.HandleFunc("/", handler)
log.Printf("starting server on port :%s", port)
err := http.ListenAndServe(":"+port, nil)
log.Fatalf("http listen error: %v", err)
}
func handler(w http.ResponseWriter, req *http.Request) {
if req.Method == http.MethodGet {
fmt.Fprint(w, "Let the battle begin!")
return
}
var v ArenaUpdate
defer req.Body.Close()
d := json.NewDecoder(req.Body)
d.DisallowUnknownFields()
if err := d.Decode(&v); err != nil {
log.Printf("WARN: failed to decode ArenaUpdate in response body: %v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
resp := play(v)
fmt.Fprint(w, resp)
}
func play(input ArenaUpdate) (response string) {
log.Printf("IN: %#v", input)
commands := []string{"F", "R", "L", "T"}
rand := rand2.Intn(4)
return commands[rand]
}
| [
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
warrior_modules/warrior_netconf/warriornetconf/ProductDrivers/netconf_driver.py | '''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
"""" NetConf driver """
from warrior.WarriorCore import kw_driver
import warriornetconf.Actions.NetconfActions
def main(keyword, data_repository, args_repository):
"""Import all actions related to netconf driver
and call the driver Utils to execute a keyword """
# Declare a list of packages to be used by this driver,
# if you want to add more packages import them outside the main function
# and then add them to the package_list below
package_list = [warriornetconf.Actions.NetconfActions]
return kw_driver.execute_keyword(keyword, data_repository, args_repository, package_list)
| [] | [] | [] | [] | [] | python | null | null | null |
core/src/main/java/io/questdb/log/LogFactory.java | /*******************************************************************************
* ___ _ ____ ____
* / _ \ _ _ ___ ___| |_| _ \| __ )
* | | | | | | |/ _ \/ __| __| | | | _ \
* | |_| | |_| | __/\__ \ |_| |_| | |_) |
* \__\_\\__,_|\___||___/\__|____/|____/
*
* Copyright (c) 2014-2019 Appsicle
* Copyright (c) 2019-2022 QuestDB
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package io.questdb.log;
import io.questdb.mp.*;
import io.questdb.std.*;
import io.questdb.std.datetime.microtime.MicrosecondClock;
import io.questdb.std.datetime.microtime.MicrosecondClockImpl;
import java.io.*;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.util.Comparator;
import java.util.Properties;
public class LogFactory implements Closeable {
public static final LogFactory INSTANCE = new LogFactory();
public static final String DEBUG_TRIGGER = "ebug";
public static final String DEBUG_TRIGGER_ENV = "QDB_DEBUG";
public static final String CONFIG_SYSTEM_PROPERTY = "out";
private static final int DEFAULT_QUEUE_DEPTH = 1024;
private static final int DEFAULT_MSG_SIZE = 4 * 1024;
private static final String DEFAULT_CONFIG = "/log-stdout.conf";
private static final String EMPTY_STR = "";
private static final CharSequenceHashSet reserved = new CharSequenceHashSet();
private static final LengthDescendingComparator LDC = new LengthDescendingComparator();
private final CharSequenceObjHashMap<ScopeConfiguration> scopeConfigMap = new CharSequenceObjHashMap<>();
private final ObjList<ScopeConfiguration> scopeConfigs = new ObjList<>();
private final ObjHashSet<LogWriter> jobs = new ObjHashSet<>();
private final MicrosecondClock clock;
private WorkerPool workerPool;
private boolean configured = false;
private int queueDepth = DEFAULT_QUEUE_DEPTH;
private int recordLength = DEFAULT_MSG_SIZE;
static boolean envEnabled = true;
public LogFactory() {
this(MicrosecondClockImpl.INSTANCE);
}
public LogFactory(MicrosecondClock clock) {
this.clock = clock;
}
public static void configureFromProperties(LogFactory factory, Properties properties, WorkerPool workerPool) {
factory.workerPool = workerPool;
String writers = getProperty(properties, "writers");
if (writers == null) {
factory.configured = true;
return;
}
String s;
s = getProperty(properties, "queueDepth");
if (s != null && s.length() > 0) {
try {
factory.setQueueDepth(Numbers.parseInt(s));
} catch (NumericException e) {
throw new LogError("Invalid value for queueDepth");
}
}
s = getProperty(properties, "recordLength");
if (s != null && s.length() > 0) {
try {
factory.setRecordLength(Numbers.parseInt(s));
} catch (NumericException e) {
throw new LogError("Invalid value for recordLength");
}
}
for (String w : writers.split(",")) {
LogWriterConfig conf = createWriter(properties, w.trim());
if (conf != null) {
factory.add(conf);
}
}
factory.bind();
}
public static void configureFromSystemProperties(LogFactory factory) {
configureFromSystemProperties(factory, null);
}
public static void configureFromSystemProperties(LogFactory factory, WorkerPool workerPool) {
String conf = System.getProperty(CONFIG_SYSTEM_PROPERTY);
if (conf == null) {
conf = DEFAULT_CONFIG;
}
try (InputStream is = LogFactory.class.getResourceAsStream(conf)) {
if (is != null) {
Properties properties = new Properties();
properties.load(is);
configureFromProperties(factory, properties, workerPool);
} else {
File f = new File(conf);
if (f.canRead()) {
try (FileInputStream fis = new FileInputStream(f)) {
Properties properties = new Properties();
properties.load(fis);
configureFromProperties(factory, properties, workerPool);
}
} else {
factory.configureDefaultWriter();
}
}
} catch (IOException e) {
if (!DEFAULT_CONFIG.equals(conf)) {
throw new LogError("Cannot read " + conf, e);
} else {
factory.configureDefaultWriter();
}
}
factory.startThread();
}
public static void configureFromSystemProperties(WorkerPool workerPool) {
configureFromSystemProperties(INSTANCE, workerPool);
}
@SuppressWarnings("rawtypes")
public static Log getLog(Class clazz) {
return getLog(clazz.getName());
}
public static Log getLog(CharSequence key) {
if (!INSTANCE.configured) {
configureFromSystemProperties(INSTANCE, null);
}
return INSTANCE.create(key);
}
public void add(final LogWriterConfig config) {
final int index = scopeConfigMap.keyIndex(config.getScope());
ScopeConfiguration scopeConf;
if (index > -1) {
scopeConfigMap.putAt(index, config.getScope(), scopeConf = new ScopeConfiguration(LogLevel.LOG_LEVEL_MAX));
scopeConfigs.add(scopeConf);
} else {
scopeConf = scopeConfigMap.valueAtQuick(index);
}
scopeConf.add(config);
}
public void assign(WorkerPool workerPool) {
for (int i = 0, n = jobs.size(); i < n; i++) {
workerPool.assign(jobs.get(i));
}
if (this.workerPool == null) {
this.workerPool = workerPool;
}
}
public void bind() {
if (configured) {
return;
}
configured = true;
for (int i = 0, n = scopeConfigs.size(); i < n; i++) {
ScopeConfiguration conf = scopeConfigs.get(i);
conf.bind(jobs, queueDepth, recordLength);
}
scopeConfigMap.sortKeys(LDC);
for (int i = 0, n = jobs.size(); i < n; i++) {
jobs.get(i).bindProperties();
}
if (workerPool != null) {
assign(workerPool);
}
}
@Override
public void close() {
haltThread();
for (int i = 0, n = jobs.size(); i < n; i++) {
Misc.free(jobs.get(i));
}
for (int i = 0, n = scopeConfigs.size(); i < n; i++) {
Misc.free(scopeConfigs.getQuick(i));
}
}
public Log create(CharSequence key) {
if (!configured) {
throw new LogError("Not configured");
}
ScopeConfiguration scopeConfiguration = find(key);
if (scopeConfiguration == null) {
return new Logger(
clock,
compressScope(key),
null,
null,
null,
null,
null,
null,
null,
null
);
}
final Holder inf = scopeConfiguration.getHolder(Numbers.msb(LogLevel.LOG_LEVEL_INFO));
final Holder dbg = scopeConfiguration.getHolder(Numbers.msb(LogLevel.LOG_LEVEL_DEBUG));
final Holder err = scopeConfiguration.getHolder(Numbers.msb(LogLevel.LOG_LEVEL_ERROR));
final Holder adv = scopeConfiguration.getHolder(Numbers.msb(LogLevel.LOG_LEVEL_ADVISORY));
return new Logger(
clock,
compressScope(key),
dbg == null ? null : dbg.ring,
dbg == null ? null : dbg.lSeq,
inf == null ? null : inf.ring,
inf == null ? null : inf.lSeq,
err == null ? null : err.ring,
err == null ? null : err.lSeq,
adv == null ? null : adv.ring,
adv == null ? null : adv.lSeq
);
}
public ObjHashSet<LogWriter> getJobs() {
return jobs;
}
public int getQueueDepth() {
return queueDepth;
}
private void setQueueDepth(int queueDepth) {
this.queueDepth = queueDepth;
}
public int getRecordLength() {
return recordLength;
}
private void setRecordLength(int recordLength) {
this.recordLength = recordLength;
}
public void haltThread() {
if (workerPool != null) {
workerPool.halt();
workerPool = null;
}
}
public void startThread() {
if (this.workerPool != null) {
return;
}
this.workerPool = new WorkerPool(new WorkerPoolConfiguration() {
@Override
public int[] getWorkerAffinity() {
return new int[]{-1};
}
@Override
public int getWorkerCount() {
return 1;
}
@Override
public boolean haltOnError() {
return false;
}
@Override
public boolean isDaemonPool() {
return true;
}
});
assign(workerPool);
workerPool.start(null);
}
private static String getProperty(final Properties properties, String key) {
if (envEnabled) {
final String envValue = System.getenv("QDB_LOG_" + key.replace('.', '_').toUpperCase());
if (envValue == null) {
return properties.getProperty(key);
}
return envValue;
}
return properties.getProperty(key);
}
@SuppressWarnings("rawtypes")
private static LogWriterConfig createWriter(final Properties properties, String w) {
final String writer = "w." + w + '.';
final String clazz = getProperty(properties, writer + "class");
final String levelStr = getProperty(properties, writer + "level");
final String scope = getProperty(properties, writer + "scope");
if (clazz == null) {
return null;
}
final Class<?> cl;
final Constructor constructor;
try {
cl = Class.forName(clazz);
constructor = cl.getDeclaredConstructor(RingQueue.class, SCSequence.class, int.class);
} catch (ClassNotFoundException e) {
throw new LogError("Class not found " + clazz, e);
} catch (NoSuchMethodException e) {
throw new LogError("Constructor(RingQueue, Sequence, int) expected: " + clazz, e);
}
int level = 0;
if (levelStr != null) {
for (String s : levelStr.split(",")) {
switch (s.toUpperCase()) {
case "DEBUG":
level |= LogLevel.LOG_LEVEL_DEBUG;
break;
case "INFO":
level |= LogLevel.LOG_LEVEL_INFO;
break;
case "ERROR":
level |= LogLevel.LOG_LEVEL_ERROR;
break;
case "ADVISORY":
level |= LogLevel.LOG_LEVEL_ADVISORY;
break;
default:
throw new LogError("Unknown level: " + s);
}
}
}
if (isForcedDebug()) {
level = level | LogLevel.LOG_LEVEL_DEBUG;
}
// enable all LOG levels above the minimum set one
// ((-1 >>> (msb-1)) << msb) | level
final int msb = Numbers.msb(level);
level = (((-1 >>> (msb - 1)) << msb) | level) & LogLevel.LOG_LEVEL_MASK;
return new LogWriterConfig(scope == null ? EMPTY_STR : scope, level, (ring, seq, level1) -> {
try {
LogWriter w1 = (LogWriter) constructor.newInstance(ring, seq, level1);
for (String n : properties.stringPropertyNames()) {
if (n.startsWith(writer)) {
String p = n.substring(writer.length());
if (reserved.contains(p)) {
continue;
}
try {
Field f = cl.getDeclaredField(p);
if (f.getType() == String.class) {
Unsafe.getUnsafe().putObject(w1, Unsafe.getUnsafe().objectFieldOffset(f), getProperty(properties, n));
}
} catch (Exception e) {
throw new LogError("Unknown property: " + n, e);
}
}
}
return w1;
} catch (Exception e) {
throw new LogError("Error creating log writer", e);
}
});
}
private static boolean isForcedDebug() {
return System.getProperty(DEBUG_TRIGGER) != null || System.getenv().containsKey(DEBUG_TRIGGER_ENV);
}
/**
* Converts fully qualified class name into an abbreviated form:
* com.questdb.mp.Sequence -> c.n.m.Sequence
*
* @param key typically class name
* @return abbreviated form of key
*/
private static CharSequence compressScope(CharSequence key) {
StringBuilder builder = new StringBuilder();
char c = 0;
boolean pick = true;
int z = 0;
for (int i = 0, n = key.length(); i < n; i++) {
char a = key.charAt(i);
if (a == '.') {
if (!pick) {
builder.append(c).append('.');
pick = true;
}
} else if (pick) {
c = a;
z = i;
pick = false;
}
}
for (; z < key.length(); z++) {
builder.append(key.charAt(z));
}
builder.append(' ');
return builder;
}
private void configureDefaultWriter() {
int level = LogLevel.LOG_LEVEL_INFO | LogLevel.LOG_LEVEL_ERROR | LogLevel.LOG_LEVEL_ADVISORY;
if (isForcedDebug()) {
level = level | LogLevel.LOG_LEVEL_DEBUG;
}
add(new LogWriterConfig(level, LogConsoleWriter::new));
bind();
}
private ScopeConfiguration find(CharSequence key) {
ObjList<CharSequence> keys = scopeConfigMap.keys();
CharSequence k = null;
for (int i = 0, n = keys.size(); i < n; i++) {
CharSequence s = keys.getQuick(i);
if (Chars.startsWith(key, s)) {
k = s;
break;
}
}
if (k == null) {
return null;
}
return scopeConfigMap.get(k);
}
private static class ScopeConfiguration implements Closeable {
private final int[] channels;
private final ObjList<LogWriterConfig> writerConfigs = new ObjList<>();
private final IntObjHashMap<Holder> holderMap = new IntObjHashMap<>();
private final ObjList<Holder> holderList = new ObjList<>();
private int ci = 0;
public ScopeConfiguration(int levels) {
this.channels = new int[levels];
}
public void bind(ObjHashSet<LogWriter> jobs, int queueDepth, int recordLength) {
// create queues for processed channels
for (int i = 0, n = channels.length; i < n; i++) {
int index = channels[i];
if (index > 0) {
int keyIndex = holderMap.keyIndex(index);
if (keyIndex > -1) {
Holder h = new Holder(queueDepth, recordLength);
holderMap.putAt(keyIndex, index, h);
holderList.add(h);
}
}
}
for (int i = 0, n = writerConfigs.size(); i < n; i++) {
LogWriterConfig c = writerConfigs.getQuick(i);
// the channels array has a guarantee that
// all bits in level mask will point to the same queue
// so we just get most significant bit number
// and dereference queue on its index
Holder h = holderMap.get(channels[Numbers.msb(c.getLevel())]);
// check if this queue was used by another writer
if (h.wSeq != null) {
// yes, it was
if (h.fanOut == null) {
h.fanOut = FanOut.to(h.wSeq).and(h.wSeq = new SCSequence());
} else {
h.fanOut.and(h.wSeq = new SCSequence());
}
} else {
// we are here first!
h.wSeq = new SCSequence();
}
// now h.wSeq contains out writer's sequence
jobs.add(c.getFactory().createLogWriter(h.ring, h.wSeq, c.getLevel()));
}
// and the last step is to link dependent sequences
for (int i = 0, n = holderList.size(); i < n; i++) {
Holder h = holderList.getQuick(i);
if (h.fanOut != null) {
h.lSeq.then(h.fanOut).then(h.lSeq);
} else {
h.lSeq.then(h.wSeq).then(h.lSeq);
}
}
}
@Override
public void close() {
for (int i = 0, n = holderList.size(); i < n; i++) {
Misc.free(holderList.getQuick(i));
}
}
/**
* Aggregates channels into set of queues. Consumer interest is represented by
* level, where consumer sets bits corresponding to channel indexes is it interested in.
* <p>
* Consumer 1 requires channels D & E. So its interest looks like {1,0,1}
* Consumer 2 requires channel I, so its interest is {0,1,0}
* <p>
* This method combines these interests as follows:
* <p>
* channels = {1,2,1}
* <p>
* which means that there will be need to 2 queues (1 and 2) and that Consumer 1
* will be using queue 1 and consumer 2 will be using queue 2.
* <p>
* More complex scenario where consumer interests overlap, for example:
* <p>
* consumer 1 {1,1,0}
* consumer 2 {0,1,1}
* <p>
* these interests will be combined as follows:
* <p>
* channels = {1,1,1}
* <p>
* which means that both consumers will be sharing same queue and they will have to
* filter applicable messages as they get them.
* <p>
* Algorithm iterates over set of bits in "level" twice. First pass is to establish
* minimum number of channel[] element out of those entries where bit in level is set.
* Additionally this pass will set channel[] elements to current consumer index where
* channel[] element is zero.
* <p>
* Second pass sets channel[] element to min value found on first pass.
*
* @param conf LogWriterConfig
*/
private void add(LogWriterConfig conf) {
int mask = conf.getLevel();
int min = Integer.MAX_VALUE;
int q = ++ci;
for (int i = 0, n = channels.length; i < n; i++) {
if (((mask >> i) & 1) == 1) {
int that = channels[i];
if (that == 0) {
channels[i] = q;
}
if (that > 0 && that < min) {
min = that;
}
}
}
if (mask > 1 && min < Integer.MAX_VALUE) {
for (int i = 0, n = channels.length; i < n; i++) {
if (((mask >> i) & 1) == 1) {
channels[i] = min;
}
}
}
writerConfigs.add(conf);
}
private Holder getHolder(int index) {
return holderMap.get(channels[index]);
}
}
private static class LengthDescendingComparator implements Comparator<CharSequence>, Serializable {
@Override
public int compare(CharSequence o1, CharSequence o2) {
int l1, l2;
if ((l1 = o1.length()) < (l2 = o2.length())) {
return 1;
}
if (l1 > l2) {
return -11;
}
return 0;
}
}
private static class Holder implements Closeable {
private final RingQueue<LogRecordSink> ring;
private final Sequence lSeq;
private SCSequence wSeq;
private FanOut fanOut;
public Holder(int queueDepth, final int recordLength) {
this.ring = new RingQueue<>(
LogRecordSink::new,
Numbers.ceilPow2(recordLength),
queueDepth,
MemoryTag.NATIVE_DEFAULT
);
this.lSeq = new MPSequence(queueDepth);
}
@Override
public void close() {
Misc.free(ring);
}
}
static {
reserved.add("scope");
reserved.add("class");
reserved.add("level");
Os.init();
}
}
| [
"\"QDB_LOG_\" + key.replace('.', '_'"
] | [] | [
"QDB_LOG_\" + key.replace('.', '_"
] | [] | ["QDB_LOG_\" + key.replace('.', '_"] | java | 1 | 0 | |
venv/lib/python2.7/site-packages/pyramid/authentication.py | import binascii
from codecs import utf_8_decode
from codecs import utf_8_encode
from collections import namedtuple
import hashlib
import base64
import re
import time as time_mod
import warnings
from zope.interface import implementer
from webob.cookies import CookieProfile
from pyramid.compat import (
long,
text_type,
binary_type,
url_unquote,
url_quote,
bytes_,
ascii_native_,
native_,
)
from pyramid.interfaces import (
IAuthenticationPolicy,
IDebugLogger,
)
from pyramid.security import (
Authenticated,
Everyone,
)
from pyramid.util import strings_differ
VALID_TOKEN = re.compile(r"^[A-Za-z][A-Za-z0-9+_-]*$")
class CallbackAuthenticationPolicy(object):
""" Abstract class """
debug = False
callback = None
def _log(self, msg, methodname, request):
logger = request.registry.queryUtility(IDebugLogger)
if logger:
cls = self.__class__
classname = cls.__module__ + '.' + cls.__name__
methodname = classname + '.' + methodname
logger.debug(methodname + ': ' + msg)
def _clean_principal(self, princid):
if princid in (Authenticated, Everyone):
princid = None
return princid
def authenticated_userid(self, request):
""" Return the authenticated userid or ``None``.
If no callback is registered, this will be the same as
``unauthenticated_userid``.
If a ``callback`` is registered, this will return the userid if
and only if the callback returns a value that is not ``None``.
"""
debug = self.debug
userid = self.unauthenticated_userid(request)
if userid is None:
debug and self._log(
'call to unauthenticated_userid returned None; returning None',
'authenticated_userid',
request)
return None
if self._clean_principal(userid) is None:
debug and self._log(
('use of userid %r is disallowed by any built-in Pyramid '
'security policy, returning None' % userid),
'authenticated_userid',
request)
return None
if self.callback is None:
debug and self._log(
'there was no groupfinder callback; returning %r' % (userid,),
'authenticated_userid',
request)
return userid
callback_ok = self.callback(userid, request)
if callback_ok is not None: # is not None!
debug and self._log(
'groupfinder callback returned %r; returning %r' % (
callback_ok, userid),
'authenticated_userid',
request
)
return userid
debug and self._log(
'groupfinder callback returned None; returning None',
'authenticated_userid',
request
)
def effective_principals(self, request):
""" A list of effective principals derived from request.
This will return a list of principals including, at least,
:data:`pyramid.security.Everyone`. If there is no authenticated
userid, or the ``callback`` returns ``None``, this will be the
only principal:
.. code-block:: python
return [Everyone]
If the ``callback`` does not return ``None`` and an authenticated
userid is found, then the principals will include
:data:`pyramid.security.Authenticated`, the ``authenticated_userid``
and the list of principals returned by the ``callback``:
.. code-block:: python
extra_principals = callback(userid, request)
return [Everyone, Authenticated, userid] + extra_principals
"""
debug = self.debug
effective_principals = [Everyone]
userid = self.unauthenticated_userid(request)
if userid is None:
debug and self._log(
'unauthenticated_userid returned %r; returning %r' % (
userid, effective_principals),
'effective_principals',
request
)
return effective_principals
if self._clean_principal(userid) is None:
debug and self._log(
('unauthenticated_userid returned disallowed %r; returning %r '
'as if it was None' % (userid, effective_principals)),
'effective_principals',
request
)
return effective_principals
if self.callback is None:
debug and self._log(
'groupfinder callback is None, so groups is []',
'effective_principals',
request)
groups = []
else:
groups = self.callback(userid, request)
debug and self._log(
'groupfinder callback returned %r as groups' % (groups,),
'effective_principals',
request)
if groups is None: # is None!
debug and self._log(
'returning effective principals: %r' % (
effective_principals,),
'effective_principals',
request
)
return effective_principals
effective_principals.append(Authenticated)
effective_principals.append(userid)
effective_principals.extend(groups)
debug and self._log(
'returning effective principals: %r' % (
effective_principals,),
'effective_principals',
request
)
return effective_principals
@implementer(IAuthenticationPolicy)
class RepozeWho1AuthenticationPolicy(CallbackAuthenticationPolicy):
""" A :app:`Pyramid` :term:`authentication policy` which
obtains data from the :mod:`repoze.who` 1.X WSGI 'API' (the
``repoze.who.identity`` key in the WSGI environment).
Constructor Arguments
``identifier_name``
Default: ``auth_tkt``. The :mod:`repoze.who` plugin name that
performs remember/forget. Optional.
``callback``
Default: ``None``. A callback passed the :mod:`repoze.who` identity
and the :term:`request`, expected to return ``None`` if the user
represented by the identity doesn't exist or a sequence of principal
identifiers (possibly empty) representing groups if the user does
exist. If ``callback`` is None, the userid will be assumed to exist
with no group principals.
Objects of this class implement the interface described by
:class:`pyramid.interfaces.IAuthenticationPolicy`.
"""
def __init__(self, identifier_name='auth_tkt', callback=None):
self.identifier_name = identifier_name
self.callback = callback
def _get_identity(self, request):
return request.environ.get('repoze.who.identity')
def _get_identifier(self, request):
plugins = request.environ.get('repoze.who.plugins')
if plugins is None:
return None
identifier = plugins[self.identifier_name]
return identifier
def authenticated_userid(self, request):
""" Return the authenticated userid or ``None``.
If no callback is registered, this will be the same as
``unauthenticated_userid``.
If a ``callback`` is registered, this will return the userid if
and only if the callback returns a value that is not ``None``.
"""
identity = self._get_identity(request)
if identity is None:
self.debug and self._log(
'repoze.who identity is None, returning None',
'authenticated_userid',
request)
return None
userid = identity['repoze.who.userid']
if userid is None:
self.debug and self._log(
'repoze.who.userid is None, returning None' % userid,
'authenticated_userid',
request)
return None
if self._clean_principal(userid) is None:
self.debug and self._log(
('use of userid %r is disallowed by any built-in Pyramid '
'security policy, returning None' % userid),
'authenticated_userid',
request)
return None
if self.callback is None:
return userid
if self.callback(identity, request) is not None: # is not None!
return userid
def unauthenticated_userid(self, request):
""" Return the ``repoze.who.userid`` key from the detected identity."""
identity = self._get_identity(request)
if identity is None:
return None
return identity['repoze.who.userid']
def effective_principals(self, request):
""" A list of effective principals derived from the identity.
This will return a list of principals including, at least,
:data:`pyramid.security.Everyone`. If there is no identity, or
the ``callback`` returns ``None``, this will be the only principal.
If the ``callback`` does not return ``None`` and an identity is
found, then the principals will include
:data:`pyramid.security.Authenticated`, the ``authenticated_userid``
and the list of principals returned by the ``callback``.
"""
effective_principals = [Everyone]
identity = self._get_identity(request)
if identity is None:
self.debug and self._log(
('repoze.who identity was None; returning %r' %
effective_principals),
'effective_principals',
request
)
return effective_principals
if self.callback is None:
groups = []
else:
groups = self.callback(identity, request)
if groups is None: # is None!
self.debug and self._log(
('security policy groups callback returned None; returning %r' %
effective_principals),
'effective_principals',
request
)
return effective_principals
userid = identity['repoze.who.userid']
if userid is None:
self.debug and self._log(
('repoze.who.userid was None; returning %r' %
effective_principals),
'effective_principals',
request
)
return effective_principals
if self._clean_principal(userid) is None:
self.debug and self._log(
('unauthenticated_userid returned disallowed %r; returning %r '
'as if it was None' % (userid, effective_principals)),
'effective_principals',
request
)
return effective_principals
effective_principals.append(Authenticated)
effective_principals.append(userid)
effective_principals.extend(groups)
return effective_principals
def remember(self, request, userid, **kw):
""" Store the ``userid`` as ``repoze.who.userid``.
The identity to authenticated to :mod:`repoze.who`
will contain the given userid as ``userid``, and
provide all keyword arguments as additional identity
keys. Useful keys could be ``max_age`` or ``userdata``.
"""
identifier = self._get_identifier(request)
if identifier is None:
return []
environ = request.environ
identity = kw
identity['repoze.who.userid'] = userid
return identifier.remember(environ, identity)
def forget(self, request):
""" Forget the current authenticated user.
Return headers that, if included in a response, will delete the
cookie responsible for tracking the current user.
"""
identifier = self._get_identifier(request)
if identifier is None:
return []
identity = self._get_identity(request)
return identifier.forget(request.environ, identity)
@implementer(IAuthenticationPolicy)
class RemoteUserAuthenticationPolicy(CallbackAuthenticationPolicy):
""" A :app:`Pyramid` :term:`authentication policy` which
obtains data from the ``REMOTE_USER`` WSGI environment variable.
Constructor Arguments
``environ_key``
Default: ``REMOTE_USER``. The key in the WSGI environ which
provides the userid.
``callback``
Default: ``None``. A callback passed the userid and the request,
expected to return None if the userid doesn't exist or a sequence of
principal identifiers (possibly empty) representing groups if the
user does exist. If ``callback`` is None, the userid will be assumed
to exist with no group principals.
``debug``
Default: ``False``. If ``debug`` is ``True``, log messages to the
Pyramid debug logger about the results of various authentication
steps. The output from debugging is useful for reporting to maillist
or IRC channels when asking for support.
Objects of this class implement the interface described by
:class:`pyramid.interfaces.IAuthenticationPolicy`.
"""
def __init__(self, environ_key='REMOTE_USER', callback=None, debug=False):
self.environ_key = environ_key
self.callback = callback
self.debug = debug
def unauthenticated_userid(self, request):
""" The ``REMOTE_USER`` value found within the ``environ``."""
return request.environ.get(self.environ_key)
def remember(self, request, userid, **kw):
""" A no-op. The ``REMOTE_USER`` does not provide a protocol for
remembering the user. This will be application-specific and can
be done somewhere else or in a subclass."""
return []
def forget(self, request):
""" A no-op. The ``REMOTE_USER`` does not provide a protocol for
forgetting the user. This will be application-specific and can
be done somewhere else or in a subclass."""
return []
@implementer(IAuthenticationPolicy)
class AuthTktAuthenticationPolicy(CallbackAuthenticationPolicy):
"""A :app:`Pyramid` :term:`authentication policy` which
obtains data from a Pyramid "auth ticket" cookie.
Constructor Arguments
``secret``
The secret (a string) used for auth_tkt cookie signing. This value
should be unique across all values provided to Pyramid for various
subsystem secrets (see :ref:`admonishment_against_secret_sharing`).
Required.
``callback``
Default: ``None``. A callback passed the userid and the
request, expected to return ``None`` if the userid doesn't
exist or a sequence of principal identifiers (possibly empty) if
the user does exist. If ``callback`` is ``None``, the userid
will be assumed to exist with no principals. Optional.
``cookie_name``
Default: ``auth_tkt``. The cookie name used
(string). Optional.
``secure``
Default: ``False``. Only send the cookie back over a secure
conn. Optional.
``include_ip``
Default: ``False``. Make the requesting IP address part of
the authentication data in the cookie. Optional.
For IPv6 this option is not recommended. The ``mod_auth_tkt``
specification does not specify how to handle IPv6 addresses, so using
this option in combination with IPv6 addresses may cause an
incompatible cookie. It ties the authentication ticket to that
individual's IPv6 address.
``timeout``
Default: ``None``. Maximum number of seconds which a newly
issued ticket will be considered valid. After this amount of
time, the ticket will expire (effectively logging the user
out). If this value is ``None``, the ticket never expires.
Optional.
``reissue_time``
Default: ``None``. If this parameter is set, it represents the number
of seconds that must pass before an authentication token cookie is
automatically reissued as the result of a request which requires
authentication. The duration is measured as the number of seconds
since the last auth_tkt cookie was issued and 'now'. If this value is
``0``, a new ticket cookie will be reissued on every request which
requires authentication.
A good rule of thumb: if you want auto-expired cookies based on
inactivity: set the ``timeout`` value to 1200 (20 mins) and set the
``reissue_time`` value to perhaps a tenth of the ``timeout`` value
(120 or 2 mins). It's nonsensical to set the ``timeout`` value lower
than the ``reissue_time`` value, as the ticket will never be reissued
if so. However, such a configuration is not explicitly prevented.
Optional.
``max_age``
Default: ``None``. The max age of the auth_tkt cookie, in
seconds. This differs from ``timeout`` inasmuch as ``timeout``
represents the lifetime of the ticket contained in the cookie,
while this value represents the lifetime of the cookie itself.
When this value is set, the cookie's ``Max-Age`` and
``Expires`` settings will be set, allowing the auth_tkt cookie
to last between browser sessions. It is typically nonsensical
to set this to a value that is lower than ``timeout`` or
``reissue_time``, although it is not explicitly prevented.
Optional.
``path``
Default: ``/``. The path for which the auth_tkt cookie is valid.
May be desirable if the application only serves part of a domain.
Optional.
``http_only``
Default: ``False``. Hide cookie from JavaScript by setting the
HttpOnly flag. Not honored by all browsers.
Optional.
``wild_domain``
Default: ``True``. An auth_tkt cookie will be generated for the
wildcard domain. If your site is hosted as ``example.com`` this
will make the cookie available for sites underneath ``example.com``
such as ``www.example.com``.
Optional.
``parent_domain``
Default: ``False``. An auth_tkt cookie will be generated for the
parent domain of the current site. For example if your site is
hosted under ``www.example.com`` a cookie will be generated for
``.example.com``. This can be useful if you have multiple sites
sharing the same domain. This option supercedes the ``wild_domain``
option.
Optional.
This option is available as of :app:`Pyramid` 1.5.
``domain``
Default: ``None``. If provided the auth_tkt cookie will only be
set for this domain. This option is not compatible with ``wild_domain``
and ``parent_domain``.
Optional.
This option is available as of :app:`Pyramid` 1.5.
``hashalg``
Default: ``sha512`` (the literal string).
Any hash algorithm supported by Python's ``hashlib.new()`` function
can be used as the ``hashalg``.
Cookies generated by different instances of AuthTktAuthenticationPolicy
using different ``hashalg`` options are not compatible. Switching the
``hashalg`` will imply that all existing users with a valid cookie will
be required to re-login.
This option is available as of :app:`Pyramid` 1.4.
Optional.
``debug``
Default: ``False``. If ``debug`` is ``True``, log messages to the
Pyramid debug logger about the results of various authentication
steps. The output from debugging is useful for reporting to maillist
or IRC channels when asking for support.
Objects of this class implement the interface described by
:class:`pyramid.interfaces.IAuthenticationPolicy`.
"""
def __init__(self,
secret,
callback=None,
cookie_name='auth_tkt',
secure=False,
include_ip=False,
timeout=None,
reissue_time=None,
max_age=None,
path="/",
http_only=False,
wild_domain=True,
debug=False,
hashalg='sha512',
parent_domain=False,
domain=None,
):
self.cookie = AuthTktCookieHelper(
secret,
cookie_name=cookie_name,
secure=secure,
include_ip=include_ip,
timeout=timeout,
reissue_time=reissue_time,
max_age=max_age,
http_only=http_only,
path=path,
wild_domain=wild_domain,
hashalg=hashalg,
parent_domain=parent_domain,
domain=domain,
)
self.callback = callback
self.debug = debug
def unauthenticated_userid(self, request):
""" The userid key within the auth_tkt cookie."""
result = self.cookie.identify(request)
if result:
return result['userid']
def remember(self, request, userid, **kw):
""" Accepts the following kw args: ``max_age=<int-seconds>,
``tokens=<sequence-of-ascii-strings>``.
Return a list of headers which will set appropriate cookies on
the response.
"""
return self.cookie.remember(request, userid, **kw)
def forget(self, request):
""" A list of headers which will delete appropriate cookies."""
return self.cookie.forget(request)
def b64encode(v):
return base64.b64encode(bytes_(v)).strip().replace(b'\n', b'')
def b64decode(v):
return base64.b64decode(bytes_(v))
# this class licensed under the MIT license (stolen from Paste)
class AuthTicket(object):
"""
This class represents an authentication token. You must pass in
the shared secret, the userid, and the IP address. Optionally you
can include tokens (a list of strings, representing role names),
'user_data', which is arbitrary data available for your own use in
later scripts. Lastly, you can override the cookie name and
timestamp.
Once you provide all the arguments, use .cookie_value() to
generate the appropriate authentication ticket.
Usage::
token = AuthTicket('sharedsecret', 'username',
os.environ['REMOTE_ADDR'], tokens=['admin'])
val = token.cookie_value()
"""
def __init__(self, secret, userid, ip, tokens=(), user_data='',
time=None, cookie_name='auth_tkt', secure=False,
hashalg='md5'):
self.secret = secret
self.userid = userid
self.ip = ip
self.tokens = ','.join(tokens)
self.user_data = user_data
if time is None:
self.time = time_mod.time()
else:
self.time = time
self.cookie_name = cookie_name
self.secure = secure
self.hashalg = hashalg
def digest(self):
return calculate_digest(
self.ip, self.time, self.secret, self.userid, self.tokens,
self.user_data, self.hashalg)
def cookie_value(self):
v = '%s%08x%s!' % (self.digest(), int(self.time),
url_quote(self.userid))
if self.tokens:
v += self.tokens + '!'
v += self.user_data
return v
# this class licensed under the MIT license (stolen from Paste)
class BadTicket(Exception):
"""
Exception raised when a ticket can't be parsed. If we get far enough to
determine what the expected digest should have been, expected is set.
This should not be shown by default, but can be useful for debugging.
"""
def __init__(self, msg, expected=None):
self.expected = expected
Exception.__init__(self, msg)
# this function licensed under the MIT license (stolen from Paste)
def parse_ticket(secret, ticket, ip, hashalg='md5'):
"""
Parse the ticket, returning (timestamp, userid, tokens, user_data).
If the ticket cannot be parsed, a ``BadTicket`` exception will be raised
with an explanation.
"""
ticket = native_(ticket).strip('"')
digest_size = hashlib.new(hashalg).digest_size * 2
digest = ticket[:digest_size]
try:
timestamp = int(ticket[digest_size:digest_size + 8], 16)
except ValueError as e:
raise BadTicket('Timestamp is not a hex integer: %s' % e)
try:
userid, data = ticket[digest_size + 8:].split('!', 1)
except ValueError:
raise BadTicket('userid is not followed by !')
userid = url_unquote(userid)
if '!' in data:
tokens, user_data = data.split('!', 1)
else: # pragma: no cover (never generated)
# @@: Is this the right order?
tokens = ''
user_data = data
expected = calculate_digest(ip, timestamp, secret,
userid, tokens, user_data, hashalg)
# Avoid timing attacks (see
# http://seb.dbzteam.org/crypto/python-oauth-timing-hmac.pdf)
if strings_differ(expected, digest):
raise BadTicket('Digest signature is not correct',
expected=(expected, digest))
tokens = tokens.split(',')
return (timestamp, userid, tokens, user_data)
# this function licensed under the MIT license (stolen from Paste)
def calculate_digest(ip, timestamp, secret, userid, tokens, user_data,
hashalg='md5'):
secret = bytes_(secret, 'utf-8')
userid = bytes_(userid, 'utf-8')
tokens = bytes_(tokens, 'utf-8')
user_data = bytes_(user_data, 'utf-8')
hash_obj = hashlib.new(hashalg)
# Check to see if this is an IPv6 address
if ':' in ip:
ip_timestamp = ip + str(int(timestamp))
ip_timestamp = bytes_(ip_timestamp)
else:
# encode_ip_timestamp not required, left in for backwards compatibility
ip_timestamp = encode_ip_timestamp(ip, timestamp)
hash_obj.update(ip_timestamp + secret + userid + b'\0' +
tokens + b'\0' + user_data)
digest = hash_obj.hexdigest()
hash_obj2 = hashlib.new(hashalg)
hash_obj2.update(bytes_(digest) + secret)
return hash_obj2.hexdigest()
# this function licensed under the MIT license (stolen from Paste)
def encode_ip_timestamp(ip, timestamp):
ip_chars = ''.join(map(chr, map(int, ip.split('.'))))
t = int(timestamp)
ts = ((t & 0xff000000) >> 24,
(t & 0xff0000) >> 16,
(t & 0xff00) >> 8,
t & 0xff)
ts_chars = ''.join(map(chr, ts))
return bytes_(ip_chars + ts_chars)
class AuthTktCookieHelper(object):
"""
A helper class for use in third-party authentication policy
implementations. See
:class:`pyramid.authentication.AuthTktAuthenticationPolicy` for the
meanings of the constructor arguments.
"""
parse_ticket = staticmethod(parse_ticket) # for tests
AuthTicket = AuthTicket # for tests
BadTicket = BadTicket # for tests
now = None # for tests
userid_type_decoders = {
'int':int,
'unicode':lambda x: utf_8_decode(x)[0], # bw compat for old cookies
'b64unicode': lambda x: utf_8_decode(b64decode(x))[0],
'b64str': lambda x: b64decode(x),
}
userid_type_encoders = {
int: ('int', str),
long: ('int', str),
text_type: ('b64unicode', lambda x: b64encode(utf_8_encode(x)[0])),
binary_type: ('b64str', lambda x: b64encode(x)),
}
def __init__(self, secret, cookie_name='auth_tkt', secure=False,
include_ip=False, timeout=None, reissue_time=None,
max_age=None, http_only=False, path="/", wild_domain=True,
hashalg='md5', parent_domain=False, domain=None):
serializer = _SimpleSerializer()
self.cookie_profile = CookieProfile(
cookie_name=cookie_name,
secure=secure,
max_age=max_age,
httponly=http_only,
path=path,
serializer=serializer
)
self.secret = secret
self.cookie_name = cookie_name
self.secure = secure
self.include_ip = include_ip
self.timeout = timeout if timeout is None else int(timeout)
self.reissue_time = reissue_time if reissue_time is None else int(reissue_time)
self.max_age = max_age if max_age is None else int(max_age)
self.wild_domain = wild_domain
self.parent_domain = parent_domain
self.domain = domain
self.hashalg = hashalg
def _get_cookies(self, request, value, max_age=None):
cur_domain = request.domain
domains = []
if self.domain:
domains.append(self.domain)
else:
if self.parent_domain and cur_domain.count('.') > 1:
domains.append('.' + cur_domain.split('.', 1)[1])
else:
domains.append(None)
domains.append(cur_domain)
if self.wild_domain:
domains.append('.' + cur_domain)
profile = self.cookie_profile(request)
kw = {}
kw['domains'] = domains
if max_age is not None:
kw['max_age'] = max_age
headers = profile.get_headers(value, **kw)
return headers
def identify(self, request):
""" Return a dictionary with authentication information, or ``None``
if no valid auth_tkt is attached to ``request``"""
environ = request.environ
cookie = request.cookies.get(self.cookie_name)
if cookie is None:
return None
if self.include_ip:
remote_addr = environ['REMOTE_ADDR']
else:
remote_addr = '0.0.0.0'
try:
timestamp, userid, tokens, user_data = self.parse_ticket(
self.secret, cookie, remote_addr, self.hashalg)
except self.BadTicket:
return None
now = self.now # service tests
if now is None:
now = time_mod.time()
if self.timeout and ( (timestamp + self.timeout) < now ):
# the auth_tkt data has expired
return None
userid_typename = 'userid_type:'
user_data_info = user_data.split('|')
for datum in filter(None, user_data_info):
if datum.startswith(userid_typename):
userid_type = datum[len(userid_typename):]
decoder = self.userid_type_decoders.get(userid_type)
if decoder:
userid = decoder(userid)
reissue = self.reissue_time is not None
if reissue and not hasattr(request, '_authtkt_reissued'):
if ( (now - timestamp) > self.reissue_time ):
# See https://github.com/Pylons/pyramid/issues#issue/108
tokens = list(filter(None, tokens))
headers = self.remember(request, userid, max_age=self.max_age,
tokens=tokens)
def reissue_authtkt(request, response):
if not hasattr(request, '_authtkt_reissue_revoked'):
for k, v in headers:
response.headerlist.append((k, v))
request.add_response_callback(reissue_authtkt)
request._authtkt_reissued = True
environ['REMOTE_USER_TOKENS'] = tokens
environ['REMOTE_USER_DATA'] = user_data
environ['AUTH_TYPE'] = 'cookie'
identity = {}
identity['timestamp'] = timestamp
identity['userid'] = userid
identity['tokens'] = tokens
identity['userdata'] = user_data
return identity
def forget(self, request):
""" Return a set of expires Set-Cookie headers, which will destroy
any existing auth_tkt cookie when attached to a response"""
request._authtkt_reissue_revoked = True
return self._get_cookies(request, None)
def remember(self, request, userid, max_age=None, tokens=()):
""" Return a set of Set-Cookie headers; when set into a response,
these headers will represent a valid authentication ticket.
``max_age``
The max age of the auth_tkt cookie, in seconds. When this value is
set, the cookie's ``Max-Age`` and ``Expires`` settings will be set,
allowing the auth_tkt cookie to last between browser sessions. If
this value is ``None``, the ``max_age`` value provided to the
helper itself will be used as the ``max_age`` value. Default:
``None``.
``tokens``
A sequence of strings that will be placed into the auth_tkt tokens
field. Each string in the sequence must be of the Python ``str``
type and must match the regex ``^[A-Za-z][A-Za-z0-9+_-]*$``.
Tokens are available in the returned identity when an auth_tkt is
found in the request and unpacked. Default: ``()``.
"""
max_age = self.max_age if max_age is None else int(max_age)
environ = request.environ
if self.include_ip:
remote_addr = environ['REMOTE_ADDR']
else:
remote_addr = '0.0.0.0'
user_data = ''
encoding_data = self.userid_type_encoders.get(type(userid))
if encoding_data:
encoding, encoder = encoding_data
else:
warnings.warn(
"userid is of type {}, and is not supported by the "
"AuthTktAuthenticationPolicy. Explicitly converting to string "
"and storing as base64. Subsequent requests will receive a "
"string as the userid, it will not be decoded back to the type "
"provided.".format(type(userid)), RuntimeWarning
)
encoding, encoder = self.userid_type_encoders.get(text_type)
userid = str(userid)
userid = encoder(userid)
user_data = 'userid_type:%s' % encoding
new_tokens = []
for token in tokens:
if isinstance(token, text_type):
try:
token = ascii_native_(token)
except UnicodeEncodeError:
raise ValueError("Invalid token %r" % (token,))
if not (isinstance(token, str) and VALID_TOKEN.match(token)):
raise ValueError("Invalid token %r" % (token,))
new_tokens.append(token)
tokens = tuple(new_tokens)
if hasattr(request, '_authtkt_reissued'):
request._authtkt_reissue_revoked = True
ticket = self.AuthTicket(
self.secret,
userid,
remote_addr,
tokens=tokens,
user_data=user_data,
cookie_name=self.cookie_name,
secure=self.secure,
hashalg=self.hashalg
)
cookie_value = ticket.cookie_value()
return self._get_cookies(request, cookie_value, max_age)
@implementer(IAuthenticationPolicy)
class SessionAuthenticationPolicy(CallbackAuthenticationPolicy):
""" A :app:`Pyramid` authentication policy which gets its data from the
configured :term:`session`. For this authentication policy to work, you
will have to follow the instructions in the :ref:`sessions_chapter` to
configure a :term:`session factory`.
Constructor Arguments
``prefix``
A prefix used when storing the authentication parameters in the
session. Defaults to 'auth.'. Optional.
``callback``
Default: ``None``. A callback passed the userid and the
request, expected to return ``None`` if the userid doesn't
exist or a sequence of principal identifiers (possibly empty) if
the user does exist. If ``callback`` is ``None``, the userid
will be assumed to exist with no principals. Optional.
``debug``
Default: ``False``. If ``debug`` is ``True``, log messages to the
Pyramid debug logger about the results of various authentication
steps. The output from debugging is useful for reporting to maillist
or IRC channels when asking for support.
"""
def __init__(self, prefix='auth.', callback=None, debug=False):
self.callback = callback
self.prefix = prefix or ''
self.userid_key = prefix + 'userid'
self.debug = debug
def remember(self, request, userid, **kw):
""" Store a userid in the session."""
request.session[self.userid_key] = userid
return []
def forget(self, request):
""" Remove the stored userid from the session."""
if self.userid_key in request.session:
del request.session[self.userid_key]
return []
def unauthenticated_userid(self, request):
return request.session.get(self.userid_key)
@implementer(IAuthenticationPolicy)
class BasicAuthAuthenticationPolicy(CallbackAuthenticationPolicy):
""" A :app:`Pyramid` authentication policy which uses HTTP standard basic
authentication protocol to authenticate users. To use this policy you will
need to provide a callback which checks the supplied user credentials
against your source of login data.
Constructor Arguments
``check``
A callback function passed a username, password and request, in that
order as positional arguments. Expected to return ``None`` if the
userid doesn't exist or a sequence of principal identifiers (possibly
empty) if the user does exist.
``realm``
Default: ``"Realm"``. The Basic Auth Realm string. Usually displayed to
the user by the browser in the login dialog.
``debug``
Default: ``False``. If ``debug`` is ``True``, log messages to the
Pyramid debug logger about the results of various authentication
steps. The output from debugging is useful for reporting to maillist
or IRC channels when asking for support.
**Issuing a challenge**
Regular browsers will not send username/password credentials unless they
first receive a challenge from the server. The following recipe will
register a view that will send a Basic Auth challenge to the user whenever
there is an attempt to call a view which results in a Forbidden response::
from pyramid.httpexceptions import HTTPUnauthorized
from pyramid.security import forget
from pyramid.view import forbidden_view_config
@forbidden_view_config()
def basic_challenge(request):
response = HTTPUnauthorized()
response.headers.update(forget(request))
return response
"""
def __init__(self, check, realm='Realm', debug=False):
self.check = check
self.realm = realm
self.debug = debug
def unauthenticated_userid(self, request):
""" The userid parsed from the ``Authorization`` request header."""
credentials = extract_http_basic_credentials(request)
if credentials:
return credentials.username
def remember(self, request, userid, **kw):
""" A no-op. Basic authentication does not provide a protocol for
remembering the user. Credentials are sent on every request.
"""
return []
def forget(self, request):
""" Returns challenge headers. This should be attached to a response
to indicate that credentials are required."""
return [('WWW-Authenticate', 'Basic realm="%s"' % self.realm)]
def callback(self, username, request):
# Username arg is ignored. Unfortunately
# extract_http_basic_credentials winds up getting called twice when
# authenticated_userid is called. Avoiding that, however,
# winds up duplicating logic from the superclass.
credentials = extract_http_basic_credentials(request)
if credentials:
username, password = credentials
return self.check(username, password, request)
class _SimpleSerializer(object):
def loads(self, bstruct):
return native_(bstruct)
def dumps(self, appstruct):
return bytes_(appstruct)
HTTPBasicCredentials = namedtuple(
'HTTPBasicCredentials', ['username', 'password'])
def extract_http_basic_credentials(request):
""" A helper function for extraction of HTTP Basic credentials
from a given :term:`request`.
Returns a :class:`.HTTPBasicCredentials` 2-tuple with ``username`` and
``password`` attributes or ``None`` if no credentials could be found.
"""
authorization = request.headers.get('Authorization')
if not authorization:
return None
try:
authmeth, auth = authorization.split(' ', 1)
except ValueError: # not enough values to unpack
return None
if authmeth.lower() != 'basic':
return None
try:
authbytes = b64decode(auth.strip())
except (TypeError, binascii.Error): # can't decode
return None
# try utf-8 first, then latin-1; see discussion in
# https://github.com/Pylons/pyramid/issues/898
try:
auth = authbytes.decode('utf-8')
except UnicodeDecodeError:
auth = authbytes.decode('latin-1')
try:
username, password = auth.split(':', 1)
except ValueError: # not enough values to unpack
return None
return HTTPBasicCredentials(username, password)
| [] | [] | [
"REMOTE_ADDR"
] | [] | ["REMOTE_ADDR"] | python | 1 | 0 | |
ppo/main.py | import copy
import glob
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from arguments import get_args
from model import CNNPolicy, MLPPolicy
from storage import RolloutStorage
from usb_cam_env import *
ENV_IMG_W = 640
ENV_IMG_H = 480
"""done_reward: if env get reward >= done_reward, then env return done"""
env_done_reward = 45
"""search_done_reward: if env get reward >= search_done_reward, then ppo stop"""
search_done_reward = 48
args = get_args()
assert args.num_processes * args.num_steps % args.batch_size == 0
num_updates = int(args.num_frames) // args.num_steps // args.num_processes
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
try:
os.makedirs(args.log_dir)
except OSError:
files = glob.glob(os.path.join(args.log_dir, '*.monitor.json'))
for f in files:
os.remove(f)
def main():
os.environ['OMP_NUM_THREADS'] = '1'
envs = UsbCamEnv(ENV_IMG_W, ENV_IMG_H, env_done_reward)
obs_shape = envs.observation_space.shape
obs_shape = (obs_shape[0] * args.num_stack, *obs_shape[1:])
actor_critic = MLPPolicy(obs_shape[0], envs.action_space)
action_shape = envs.action_space.shape[0]
print('+++++++++++++++++++++++++++++++++++++')
print('obs_shape:', obs_shape)
print('action_shape:', action_shape)
print('+++++++++++++++++++++++++++++++++++++')
if args.cuda:
actor_critic.cuda()
optimizer = optim.Adam(actor_critic.parameters(), args.lr, eps=args.eps)
rollouts = RolloutStorage(args.num_steps, args.num_processes, obs_shape, envs.action_space)
current_state = torch.zeros(args.num_processes, *obs_shape)
def update_current_state(state):
shape_dim0 = envs.observation_space.shape[0]
state = torch.from_numpy(state).float()
if args.num_stack > 1:
current_state[:, :-shape_dim0] = current_state[:, shape_dim0:]
current_state[:, -shape_dim0:] = state
state = envs.reset()
update_current_state(state)
rollouts.states[0].copy_(current_state)
# These variables are used to compute average rewards for all processes.
episode_rewards = torch.zeros([args.num_processes, 1])
final_rewards = torch.zeros([args.num_processes, 1])
if args.cuda:
current_state = current_state.cuda()
rollouts.cuda()
old_model = copy.deepcopy(actor_critic)
for j in range(num_updates):
for step in range(args.num_steps):
# Sample actions
value, action = actor_critic.act(Variable(rollouts.states[step], volatile=True))
cpu_actions = action.data.cpu().numpy()
# Obser reward and next state
state, reward, done, info = envs.step(cpu_actions)
print('%3d [%3d %3d %3d %3d] %3d' % (step,
int(envs.convert_2_real_action(cpu_actions)[0, 0]),
int(envs.convert_2_real_action(cpu_actions)[0, 1]),
int(envs.convert_2_real_action(cpu_actions)[0, 2]),
int(envs.convert_2_real_action(cpu_actions)[0, 3]),
reward[0]))
if reward[0] >= search_done_reward:
sys.exit()
reward = torch.from_numpy(np.expand_dims(np.stack(reward), 1)).float()
episode_rewards += reward
# If done then clean the history of observations.
masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])
final_rewards *= masks
final_rewards += (1 - masks) * episode_rewards
episode_rewards *= masks
if args.cuda:
masks = masks.cuda()
if current_state.dim() == 4:
current_state *= masks.unsqueeze(2).unsqueeze(2)
else:
current_state *= masks
update_current_state(state)
rollouts.insert(step, current_state, action.data, value.data, reward, masks)
next_value = actor_critic(Variable(rollouts.states[-1], volatile=True))[0].data
if hasattr(actor_critic, 'obs_filter'):
actor_critic.obs_filter.update(rollouts.states[:-1].view(-1, *obs_shape))
rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.tau)
advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)
old_model.load_state_dict(actor_critic.state_dict())
if hasattr(actor_critic, 'obs_filter'):
old_model.obs_filter = actor_critic.obs_filter
for _ in range(args.ppo_epoch):
sampler = BatchSampler(SubsetRandomSampler(range(args.num_processes * args.num_steps)), args.batch_size * args.num_processes, drop_last=False)
for indices in sampler:
indices = torch.LongTensor(indices)
if args.cuda:
indices = indices.cuda()
states_batch = rollouts.states[:-1].view(-1, *obs_shape)[indices]
actions_batch = rollouts.actions.view(-1, action_shape)[indices]
return_batch = rollouts.returns[:-1].view(-1, 1)[indices]
# Reshape to do in a single forward pass for all steps
values, action_log_probs, dist_entropy = actor_critic.evaluate_actions(Variable(states_batch), Variable(actions_batch))
_, old_action_log_probs, _ = old_model.evaluate_actions(Variable(states_batch, volatile=True), Variable(actions_batch, volatile=True))
ratio = torch.exp(action_log_probs - Variable(old_action_log_probs.data))
adv_targ = Variable(advantages.view(-1, 1)[indices])
surr1 = ratio * adv_targ
surr2 = torch.clamp(ratio, 1.0 - args.clip_param, 1.0 + args.clip_param) * adv_targ
action_loss = -torch.min(surr1, surr2).mean() # PPO's pessimistic surrogate (L^CLIP)
value_loss = (Variable(return_batch) - values).pow(2).mean()
optimizer.zero_grad()
(value_loss + action_loss - dist_entropy * args.entropy_coef).backward()
optimizer.step()
rollouts.states[0].copy_(rollouts.states[-1])
if j % args.save_interval == 0 and args.save_dir != "":
save_path = os.path.join(args.save_dir, args.algo)
try:
os.makedirs(save_path)
except OSError:
pass
# A really ugly way to save a model to CPU
save_model = actor_critic
if args.cuda:
save_model = copy.deepcopy(actor_critic).cpu()
torch.save(save_model, os.path.join(save_path, args.env_name + ".pt"))
if j % args.log_interval == 0:
print("Updates {}, num frames {}, mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}, entropy {:.5f}, value loss {:.5f}, policy loss {:.5f}".
format(j, j * args.num_processes * args.num_steps,
final_rewards.mean(),
final_rewards.median(),
final_rewards.min(),
final_rewards.max(), -dist_entropy.data[0],
value_loss.data[0], action_loss.data[0]))
if __name__ == "__main__":
main()
| [] | [] | [
"OMP_NUM_THREADS"
] | [] | ["OMP_NUM_THREADS"] | python | 1 | 0 | |
src/runtime/pprof/pprof_test.go | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !nacl,!js
package pprof
import (
"bytes"
"context"
"fmt"
"internal/testenv"
"io"
"io/ioutil"
"math/big"
"os"
"os/exec"
"regexp"
"runtime"
"runtime/pprof/internal/profile"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
)
func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
// We only need to get one 100 Hz clock tick, so we've got
// a large safety buffer.
// But do at least 500 iterations (which should take about 100ms),
// otherwise TestCPUProfileMultithreaded can fail if only one
// thread is scheduled during the testing period.
t0 := time.Now()
accum := *y
for i := 0; i < 500 || time.Since(t0) < dur; i++ {
accum = f(accum)
}
*y = accum
}
var (
salt1 = 0
salt2 = 0
)
// The actual CPU hogging function.
// Must not call other functions nor access heap/globals in the loop,
// otherwise under race detector the samples will be in the race runtime.
func cpuHog1(x int) int {
foo := x
for i := 0; i < 1e5; i++ {
if foo > 0 {
foo *= foo
} else {
foo *= foo + 1
}
}
return foo
}
func cpuHog2(x int) int {
foo := x
for i := 0; i < 1e5; i++ {
if foo > 0 {
foo *= foo
} else {
foo *= foo + 2
}
}
return foo
}
func TestCPUProfile(t *testing.T) {
testCPUProfile(t, []string{"runtime/pprof.cpuHog1"}, func(dur time.Duration) {
cpuHogger(cpuHog1, &salt1, dur)
})
}
func TestCPUProfileMultithreaded(t *testing.T) {
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
testCPUProfile(t, []string{"runtime/pprof.cpuHog1", "runtime/pprof.cpuHog2"}, func(dur time.Duration) {
c := make(chan int)
go func() {
cpuHogger(cpuHog1, &salt1, dur)
c <- 1
}()
cpuHogger(cpuHog2, &salt2, dur)
<-c
})
}
func TestCPUProfileInlining(t *testing.T) {
testCPUProfile(t, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, func(dur time.Duration) {
cpuHogger(inlinedCaller, &salt1, dur)
})
}
func inlinedCaller(x int) int {
x = inlinedCallee(x)
return x
}
func inlinedCallee(x int) int {
// We could just use cpuHog1, but for loops prevent inlining
// right now. :(
foo := x
i := 0
loop:
if foo > 0 {
foo *= foo
} else {
foo *= foo + 1
}
if i++; i < 1e5 {
goto loop
}
return foo
}
func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) {
p, err := profile.Parse(bytes.NewReader(valBytes))
if err != nil {
t.Fatal(err)
}
for _, sample := range p.Sample {
count := uintptr(sample.Value[0])
f(count, sample.Location, sample.Label)
}
}
func testCPUProfile(t *testing.T, need []string, f func(dur time.Duration)) {
switch runtime.GOOS {
case "darwin":
switch runtime.GOARCH {
case "arm", "arm64":
// nothing
default:
out, err := exec.Command("uname", "-a").CombinedOutput()
if err != nil {
t.Fatal(err)
}
vers := string(out)
t.Logf("uname -a: %v", vers)
}
case "plan9":
t.Skip("skipping on plan9")
}
const maxDuration = 5 * time.Second
// If we're running a long test, start with a long duration
// for tests that try to make sure something *doesn't* happen.
duration := 5 * time.Second
if testing.Short() {
duration = 200 * time.Millisecond
}
// Profiling tests are inherently flaky, especially on a
// loaded system, such as when this test is running with
// several others under go test std. If a test fails in a way
// that could mean it just didn't run long enough, try with a
// longer duration.
for duration <= maxDuration {
var prof bytes.Buffer
if err := StartCPUProfile(&prof); err != nil {
t.Fatal(err)
}
f(duration)
StopCPUProfile()
if profileOk(t, need, prof, duration) {
return
}
duration *= 2
if duration <= maxDuration {
t.Logf("retrying with %s duration", duration)
}
}
switch runtime.GOOS {
case "darwin", "dragonfly", "netbsd", "solaris":
t.Skipf("ignoring failure on %s; see golang.org/issue/13841", runtime.GOOS)
}
// Ignore the failure if the tests are running in a QEMU-based emulator,
// QEMU is not perfect at emulating everything.
// IN_QEMU environmental variable is set by some of the Go builders.
// IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605.
if os.Getenv("IN_QEMU") == "1" {
t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
}
t.FailNow()
}
func contains(slice []string, s string) bool {
for i := range slice {
if slice[i] == s {
return true
}
}
return false
}
func profileOk(t *testing.T, need []string, prof bytes.Buffer, duration time.Duration) (ok bool) {
ok = true
// Check that profile is well formed and contains need.
have := make([]uintptr, len(need))
var samples uintptr
var buf bytes.Buffer
parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
fmt.Fprintf(&buf, "%d:", count)
fprintStack(&buf, stk)
samples += count
for i, name := range need {
if semi := strings.Index(name, ";"); semi > -1 {
kv := strings.SplitN(name[semi+1:], "=", 2)
if len(kv) != 2 || !contains(labels[kv[0]], kv[1]) {
continue
}
name = name[:semi]
}
for _, loc := range stk {
for _, line := range loc.Line {
if strings.Contains(line.Function.Name, name) {
have[i] += count
}
}
}
}
fmt.Fprintf(&buf, "\n")
})
t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
if samples < 10 && runtime.GOOS == "windows" {
// On some windows machines we end up with
// not enough samples due to coarse timer
// resolution. Let it go.
t.Log("too few samples on Windows (golang.org/issue/10842)")
return false
}
// Check that we got a reasonable number of samples.
// We used to always require at least ideal/4 samples,
// but that is too hard to guarantee on a loaded system.
// Now we accept 10 or more samples, which we take to be
// enough to show that at least some profiling is occurring.
if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) {
t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
ok = false
}
if len(need) == 0 {
return ok
}
var total uintptr
for i, name := range need {
total += have[i]
t.Logf("%s: %d\n", name, have[i])
}
if total == 0 {
t.Logf("no samples in expected functions")
ok = false
}
// We'd like to check a reasonable minimum, like
// total / len(have) / smallconstant, but this test is
// pretty flaky (see bug 7095). So we'll just test to
// make sure we got at least one sample.
min := uintptr(1)
for i, name := range need {
if have[i] < min {
t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
ok = false
}
}
return ok
}
// Fork can hang if preempted with signals frequently enough (see issue 5517).
// Ensure that we do not do this.
func TestCPUProfileWithFork(t *testing.T) {
testenv.MustHaveExec(t)
heap := 1 << 30
if runtime.GOOS == "android" {
// Use smaller size for Android to avoid crash.
heap = 100 << 20
}
if testing.Short() {
heap = 100 << 20
}
// This makes fork slower.
garbage := make([]byte, heap)
// Need to touch the slice, otherwise it won't be paged in.
done := make(chan bool)
go func() {
for i := range garbage {
garbage[i] = 42
}
done <- true
}()
<-done
var prof bytes.Buffer
if err := StartCPUProfile(&prof); err != nil {
t.Fatal(err)
}
defer StopCPUProfile()
for i := 0; i < 10; i++ {
exec.Command(os.Args[0], "-h").CombinedOutput()
}
}
// Test that profiler does not observe runtime.gogo as "user" goroutine execution.
// If it did, it would see inconsistent state and would either record an incorrect stack
// or crash because the stack was malformed.
func TestGoroutineSwitch(t *testing.T) {
// How much to try. These defaults take about 1 seconds
// on a 2012 MacBook Pro. The ones in short mode take
// about 0.1 seconds.
tries := 10
count := 1000000
if testing.Short() {
tries = 1
}
for try := 0; try < tries; try++ {
var prof bytes.Buffer
if err := StartCPUProfile(&prof); err != nil {
t.Fatal(err)
}
for i := 0; i < count; i++ {
runtime.Gosched()
}
StopCPUProfile()
// Read profile to look for entries for runtime.gogo with an attempt at a traceback.
// The special entry
parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
// An entry with two frames with 'System' in its top frame
// exists to record a PC without a traceback. Those are okay.
if len(stk) == 2 {
name := stk[1].Line[0].Function.Name
if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
return
}
}
// Otherwise, should not see runtime.gogo.
// The place we'd see it would be the inner most frame.
name := stk[0].Line[0].Function.Name
if name == "runtime.gogo" {
var buf bytes.Buffer
fprintStack(&buf, stk)
t.Fatalf("found profile entry for runtime.gogo:\n%s", buf.String())
}
})
}
}
func fprintStack(w io.Writer, stk []*profile.Location) {
for _, loc := range stk {
fmt.Fprintf(w, " %#x", loc.Address)
fmt.Fprintf(w, " (")
for i, line := range loc.Line {
if i > 0 {
fmt.Fprintf(w, " ")
}
fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
}
fmt.Fprintf(w, ")")
}
fmt.Fprintf(w, "\n")
}
// Test that profiling of division operations is okay, especially on ARM. See issue 6681.
func TestMathBigDivide(t *testing.T) {
testCPUProfile(t, nil, func(duration time.Duration) {
t := time.After(duration)
pi := new(big.Int)
for {
for i := 0; i < 100; i++ {
n := big.NewInt(2646693125139304345)
d := big.NewInt(842468587426513207)
pi.Div(n, d)
}
select {
case <-t:
return
default:
}
}
})
}
func TestBlockProfile(t *testing.T) {
type TestCase struct {
name string
f func()
stk []string
re string
}
tests := [...]TestCase{
{
name: "chan recv",
f: blockChanRecv,
stk: []string{
"runtime.chanrecv1",
"runtime/pprof.blockChanRecv",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockChanRecv\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
{
name: "chan send",
f: blockChanSend,
stk: []string{
"runtime.chansend1",
"runtime/pprof.blockChanSend",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ runtime\.chansend1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockChanSend\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
{
name: "chan close",
f: blockChanClose,
stk: []string{
"runtime.chanrecv1",
"runtime/pprof.blockChanClose",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockChanClose\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
{
name: "select recv async",
f: blockSelectRecvAsync,
stk: []string{
"runtime.selectgo",
"runtime/pprof.blockSelectRecvAsync",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*/src/runtime/select.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
{
name: "select send sync",
f: blockSelectSendSync,
stk: []string{
"runtime.selectgo",
"runtime/pprof.blockSelectSendSync",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*/src/runtime/select.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
{
name: "mutex",
f: blockMutex,
stk: []string{
"sync.(*Mutex).Lock",
"runtime/pprof.blockMutex",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+ .*/src/sync/mutex\.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockMutex\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
{
name: "cond",
f: blockCond,
stk: []string{
"sync.(*Cond).Wait",
"runtime/pprof.blockCond",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+ .*/src/sync/cond\.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockCond\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
}
// Generate block profile
runtime.SetBlockProfileRate(1)
defer runtime.SetBlockProfileRate(0)
for _, test := range tests {
test.f()
}
t.Run("debug=1", func(t *testing.T) {
var w bytes.Buffer
Lookup("block").WriteTo(&w, 1)
prof := w.String()
if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
t.Fatalf("Bad profile header:\n%v", prof)
}
if strings.HasSuffix(prof, "#\t0x0\n\n") {
t.Errorf("Useless 0 suffix:\n%v", prof)
}
for _, test := range tests {
if !regexp.MustCompile(strings.Replace(test.re, "\t", "\t+", -1)).MatchString(prof) {
t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
}
}
})
t.Run("proto", func(t *testing.T) {
// proto format
var w bytes.Buffer
Lookup("block").WriteTo(&w, 0)
p, err := profile.Parse(&w)
if err != nil {
t.Fatalf("failed to parse profile: %v", err)
}
t.Logf("parsed proto: %s", p)
if err := p.CheckValid(); err != nil {
t.Fatalf("invalid profile: %v", err)
}
stks := stacks(p)
for _, test := range tests {
if !containsStack(stks, test.stk) {
t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
}
}
})
}
func stacks(p *profile.Profile) (res [][]string) {
for _, s := range p.Sample {
var stk []string
for _, l := range s.Location {
for _, line := range l.Line {
stk = append(stk, line.Function.Name)
}
}
res = append(res, stk)
}
return res
}
func containsStack(got [][]string, want []string) bool {
for _, stk := range got {
if len(stk) < len(want) {
continue
}
for i, f := range want {
if f != stk[i] {
break
}
if i == len(want)-1 {
return true
}
}
}
return false
}
const blockDelay = 10 * time.Millisecond
func blockChanRecv() {
c := make(chan bool)
go func() {
time.Sleep(blockDelay)
c <- true
}()
<-c
}
func blockChanSend() {
c := make(chan bool)
go func() {
time.Sleep(blockDelay)
<-c
}()
c <- true
}
func blockChanClose() {
c := make(chan bool)
go func() {
time.Sleep(blockDelay)
close(c)
}()
<-c
}
func blockSelectRecvAsync() {
const numTries = 3
c := make(chan bool, 1)
c2 := make(chan bool, 1)
go func() {
for i := 0; i < numTries; i++ {
time.Sleep(blockDelay)
c <- true
}
}()
for i := 0; i < numTries; i++ {
select {
case <-c:
case <-c2:
}
}
}
func blockSelectSendSync() {
c := make(chan bool)
c2 := make(chan bool)
go func() {
time.Sleep(blockDelay)
<-c
}()
select {
case c <- true:
case c2 <- true:
}
}
func blockMutex() {
var mu sync.Mutex
mu.Lock()
go func() {
time.Sleep(blockDelay)
mu.Unlock()
}()
// Note: Unlock releases mu before recording the mutex event,
// so it's theoretically possible for this to proceed and
// capture the profile before the event is recorded. As long
// as this is blocked before the unlock happens, it's okay.
mu.Lock()
}
func blockCond() {
var mu sync.Mutex
c := sync.NewCond(&mu)
mu.Lock()
go func() {
time.Sleep(blockDelay)
mu.Lock()
c.Signal()
mu.Unlock()
}()
c.Wait()
mu.Unlock()
}
func TestMutexProfile(t *testing.T) {
// Generate mutex profile
old := runtime.SetMutexProfileFraction(1)
defer runtime.SetMutexProfileFraction(old)
if old != 0 {
t.Fatalf("need MutexProfileRate 0, got %d", old)
}
blockMutex()
t.Run("debug=1", func(t *testing.T) {
var w bytes.Buffer
Lookup("mutex").WriteTo(&w, 1)
prof := w.String()
t.Logf("received profile: %v", prof)
if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") {
t.Errorf("Bad profile header:\n%v", prof)
}
prof = strings.Trim(prof, "\n")
lines := strings.Split(prof, "\n")
if len(lines) != 6 {
t.Errorf("expected 6 lines, got %d %q\n%s", len(lines), prof, prof)
}
if len(lines) < 6 {
return
}
// checking that the line is like "35258904 1 @ 0x48288d 0x47cd28 0x458931"
r2 := `^\d+ 1 @(?: 0x[[:xdigit:]]+)+`
//r2 := "^[0-9]+ 1 @ 0x[0-9a-f x]+$"
if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
t.Errorf("%q didn't match %q", lines[3], r2)
}
r3 := "^#.*runtime/pprof.blockMutex.*$"
if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
t.Errorf("%q didn't match %q", lines[5], r3)
}
t.Logf(prof)
})
t.Run("proto", func(t *testing.T) {
// proto format
var w bytes.Buffer
Lookup("mutex").WriteTo(&w, 0)
p, err := profile.Parse(&w)
if err != nil {
t.Fatalf("failed to parse profile: %v", err)
}
t.Logf("parsed proto: %s", p)
if err := p.CheckValid(); err != nil {
t.Fatalf("invalid profile: %v", err)
}
stks := stacks(p)
for _, want := range [][]string{
{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutex.func1"},
} {
if !containsStack(stks, want) {
t.Errorf("No matching stack entry for %+v", want)
}
}
})
}
func func1(c chan int) { <-c }
func func2(c chan int) { <-c }
func func3(c chan int) { <-c }
func func4(c chan int) { <-c }
func TestGoroutineCounts(t *testing.T) {
// Setting GOMAXPROCS to 1 ensures we can force all goroutines to the
// desired blocking point.
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
c := make(chan int)
for i := 0; i < 100; i++ {
switch {
case i%10 == 0:
go func1(c)
case i%2 == 0:
go func2(c)
default:
go func3(c)
}
// Let goroutines block on channel
for j := 0; j < 5; j++ {
runtime.Gosched()
}
}
var w bytes.Buffer
goroutineProf := Lookup("goroutine")
// Check debug profile
goroutineProf.WriteTo(&w, 1)
prof := w.String()
if !containsInOrder(prof, "\n50 @ ", "\n40 @", "\n10 @", "\n1 @") {
t.Errorf("expected sorted goroutine counts:\n%s", prof)
}
// Check proto profile
w.Reset()
goroutineProf.WriteTo(&w, 0)
p, err := profile.Parse(&w)
if err != nil {
t.Errorf("error parsing protobuf profile: %v", err)
}
if err := p.CheckValid(); err != nil {
t.Errorf("protobuf profile is invalid: %v", err)
}
if !containsCounts(p, []int64{50, 40, 10, 1}) {
t.Errorf("expected count profile to contain goroutines with counts %v, got %v",
[]int64{50, 40, 10, 1}, p)
}
close(c)
time.Sleep(10 * time.Millisecond) // let goroutines exit
}
func containsInOrder(s string, all ...string) bool {
for _, t := range all {
i := strings.Index(s, t)
if i < 0 {
return false
}
s = s[i+len(t):]
}
return true
}
func containsCounts(prof *profile.Profile, counts []int64) bool {
m := make(map[int64]int)
for _, c := range counts {
m[c]++
}
for _, s := range prof.Sample {
// The count is the single value in the sample
if len(s.Value) != 1 {
return false
}
m[s.Value[0]]--
}
for _, n := range m {
if n > 0 {
return false
}
}
return true
}
// Issue 18836.
func TestEmptyCallStack(t *testing.T) {
t.Parallel()
var buf bytes.Buffer
p := NewProfile("test18836")
p.Add("foo", 47674)
p.WriteTo(&buf, 1)
p.Remove("foo")
got := buf.String()
prefix := "test18836 profile: total 1\n"
if !strings.HasPrefix(got, prefix) {
t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
}
lostevent := "lostProfileEvent"
if !strings.Contains(got, lostevent) {
t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
}
}
func TestCPUProfileLabel(t *testing.T) {
testCPUProfile(t, []string{"runtime/pprof.cpuHogger;key=value"}, func(dur time.Duration) {
Do(context.Background(), Labels("key", "value"), func(context.Context) {
cpuHogger(cpuHog1, &salt1, dur)
})
})
}
func TestLabelRace(t *testing.T) {
// Test the race detector annotations for synchronization
// between settings labels and consuming them from the
// profile.
testCPUProfile(t, []string{"runtime/pprof.cpuHogger;key=value"}, func(dur time.Duration) {
start := time.Now()
var wg sync.WaitGroup
for time.Since(start) < dur {
var salts [10]int
for i := 0; i < 10; i++ {
wg.Add(1)
go func(j int) {
Do(context.Background(), Labels("key", "value"), func(context.Context) {
cpuHogger(cpuHog1, &salts[j], time.Millisecond)
})
wg.Done()
}(i)
}
wg.Wait()
}
})
}
// Check that there is no deadlock when the program receives SIGPROF while in
// 64bit atomics' critical section. Used to happen on mips{,le}. See #20146.
func TestAtomicLoadStore64(t *testing.T) {
f, err := ioutil.TempFile("", "profatomic")
if err != nil {
t.Fatalf("TempFile: %v", err)
}
defer os.Remove(f.Name())
defer f.Close()
if err := StartCPUProfile(f); err != nil {
t.Fatal(err)
}
defer StopCPUProfile()
var flag uint64
done := make(chan bool, 1)
go func() {
for atomic.LoadUint64(&flag) == 0 {
runtime.Gosched()
}
done <- true
}()
time.Sleep(50 * time.Millisecond)
atomic.StoreUint64(&flag, 1)
<-done
}
| [
"\"IN_QEMU\""
] | [] | [
"IN_QEMU"
] | [] | ["IN_QEMU"] | go | 1 | 0 | |
setup.py | import os
import re
import sys
from setuptools import setup
# Utility function to read from file.
def fread(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def get_version():
VERSIONFILE="curator/_version.py"
verstrline = fread(VERSIONFILE).strip()
vsre = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(vsre, verstrline, re.M)
if mo:
VERSION = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
build_number = os.environ.get('CURATOR_BUILD_NUMBER', None)
if build_number:
return VERSION + "b{}".format(build_number)
return VERSION
def get_install_requires():
res = ['elasticsearch>=2.4.0,<3.0.0' ]
res.append('click>=6.0')
res.append('pyyaml>=3.10')
res.append('voluptuous>=0.9.3')
res.append('certifi>=2017.4.17')
return res
try:
### cx_Freeze ###
from cx_Freeze import setup, Executable
try:
import certifi
cert_file = certifi.where()
except ImportError:
cert_file = ''
# Dependencies are automatically detected, but it might need
# fine tuning.
buildOptions = dict(
packages = [],
excludes = [],
include_files = [cert_file],
)
base = 'Console'
icon = None
if os.path.exists('Elastic.ico'):
icon = 'Elastic.ico'
curator_exe = Executable(
"run_curator.py",
base=base,
targetName = "curator",
)
curator_cli_exe = Executable(
"run_singleton.py",
base=base,
targetName = "curator_cli",
)
repomgr_exe = Executable(
"run_es_repo_mgr.py",
base=base,
targetName = "es_repo_mgr",
)
if sys.platform == "win32":
curator_exe = Executable(
"run_curator.py",
base=base,
targetName = "curator.exe",
icon = icon
)
curator_cli_exe = Executable(
"run_singleton.py",
base=base,
targetName = "curator_cli.exe",
icon = icon
)
repomgr_exe = Executable(
"run_es_repo_mgr.py",
base=base,
targetName = "es_repo_mgr.exe",
icon = icon
)
setup(
name = "elasticsearch-curator",
version = get_version(),
author = "Elastic",
author_email = "[email protected]",
description = "Tending your Elasticsearch indices",
long_description=fread('README.rst'),
url = "http://github.com/elastic/curator",
download_url = "https://github.com/elastic/curator/tarball/v" + get_version(),
license = "Apache License, Version 2.0",
install_requires = get_install_requires(),
keywords = "elasticsearch time-series indexed index-expiry",
packages = ["curator"],
include_package_data=True,
entry_points = {
"console_scripts" : [
"curator = curator.cli:cli",
"curator_cli = curator.curator_cli:main",
"es_repo_mgr = curator.repomgrcli:repo_mgr_cli",
]
},
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
test_suite = "test.run_tests.run_all",
tests_require = ["mock", "nose", "coverage", "nosexcover"],
options = {"build_exe" : buildOptions},
executables = [curator_exe,curator_cli_exe,repomgr_exe]
)
### end cx_Freeze ###
except ImportError:
setup(
name = "elasticsearch-curator",
version = get_version(),
author = "Elastic",
author_email = "[email protected]",
description = "Tending your Elasticsearch indices",
long_description=fread('README.rst'),
url = "http://github.com/elastic/curator",
download_url = "https://github.com/elastic/curator/tarball/v" + get_version(),
license = "Apache License, Version 2.0",
install_requires = get_install_requires(),
keywords = "elasticsearch time-series indexed index-expiry",
packages = ["curator"],
include_package_data=True,
entry_points = {
"console_scripts" : [
"curator = curator.cli:cli",
"curator_cli = curator.curator_cli:main",
"es_repo_mgr = curator.repomgrcli:repo_mgr_cli",
]
},
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
test_suite = "test.run_tests.run_all",
tests_require = ["mock", "nose", "coverage", "nosexcover"]
)
| [] | [] | [
"CURATOR_BUILD_NUMBER"
] | [] | ["CURATOR_BUILD_NUMBER"] | python | 1 | 0 | |
grl/rl_apps/psro/poker_utils.py | import os
from typing import Dict, Callable, List, Tuple, Union
import deepdish
import numpy as np
import pyspiel
import ray
from open_spiel.python import rl_environment
from open_spiel.python.algorithms.exploitability import exploitability
from open_spiel.python.policy import Policy as OpenSpielPolicy, tabular_policy_from_callable
from pyspiel import Game as OpenSpielGame
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.policy import Policy
from ray.rllib.utils import try_import_torch
from ray.rllib.utils.typing import TensorType
from grl.envs.poker_multi_agent_env import PokerMultiAgentEnv
from grl.algos.p2sro.p2sro_manager.utils import get_latest_metanash_strategies
from grl.algos.p2sro.payoff_table import PayoffTable
from grl.envs.oshi_zumo_multi_agent_env import get_oshi_zumo_obs
torch, _ = try_import_torch()
CACHE_PSRO_TABULAR_POLICIES = bool(os.getenv("CACHE_PSRO_TABULAR_POLICIES", True))
_psro_tabular_policies_cache = {}
def softmax(x):
"""
Compute softmax values for each sets of scores in x.
https://stackoverflow.com/questions/34968722/how-to-implement-the-softmax-function-in-python
"""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def _callable_tabular_policy(tabular_policy):
"""Turns a tabular policy into a callable.
Args:
tabular_policy: A dictionary mapping information state key to a dictionary
of action probabilities (action -> prob).
Returns:
A function `state` -> list of (action, prob)
"""
def wrap(state):
infostate_key = state.information_state_string(state.current_player())
assert infostate_key in tabular_policy, f"tabular_policy: {tabular_policy}, key: {infostate_key}"
ap_list = []
for action in state.legal_actions():
assert action in tabular_policy[infostate_key]
ap_list.append((action, tabular_policy[infostate_key][action]))
return ap_list
return wrap
def _policy_dict_at_state(callable_policy, state):
"""Turns a policy function into a dictionary at a specific state.
Args:
callable_policy: A function from `state` -> lis of (action, prob),
state: the specific state to extract the policy from.
Returns:
A dictionary of action -> prob at this state.
"""
infostate_policy_list = callable_policy(state)
if isinstance(infostate_policy_list, dict):
infostate_policy = infostate_policy_list
legal_actions = state.legal_actions()
for action in infostate_policy.keys():
assert action in legal_actions
for action in legal_actions:
if action not in infostate_policy:
infostate_policy[action] = 0.0
else:
infostate_policy = {}
for ap in infostate_policy_list:
infostate_policy[ap[0]] = ap[1]
return infostate_policy
def _recursively_update_average_policies(state, avg_reach_probs, br_reach_probs,
avg_policies,
best_responses,
alpha,
avg_policy_tables,
delta_tolerance=1e-10):
"""Recursive implementation of the average strategy update."""
if state.is_terminal():
return
elif state.is_chance_node():
for action, _ in state.chance_outcomes():
new_state = state.clone()
new_state.apply_action(action)
_recursively_update_average_policies(state=new_state,
avg_reach_probs=avg_reach_probs,
br_reach_probs=br_reach_probs,
avg_policies=avg_policies,
best_responses=best_responses,
alpha=alpha,
avg_policy_tables=avg_policy_tables,
delta_tolerance=delta_tolerance)
else:
player = state.current_player()
# print(f"avg policy: {avg_policies[player]}, state: {state}")
avg_policy = _policy_dict_at_state(avg_policies[player], state)
br_policy = _policy_dict_at_state(best_responses[player], state)
legal_actions = state.legal_actions()
infostate_key = state.information_state_string(player)
# First traverse the subtrees.
for action in legal_actions:
assert action in br_policy, f"action is {action}, br policy is {br_policy}"
assert action in avg_policy
new_state = state.clone()
new_state.apply_action(action)
new_avg_reach = np.copy(avg_reach_probs)
new_avg_reach[player] *= avg_policy[action]
new_br_reach = np.copy(br_reach_probs)
new_br_reach[player] *= br_policy[action]
_recursively_update_average_policies(state=new_state,
avg_reach_probs=new_avg_reach,
br_reach_probs=new_br_reach,
avg_policies=avg_policies,
best_responses=best_responses,
alpha=alpha,
avg_policy_tables=avg_policy_tables,
delta_tolerance=delta_tolerance)
# Now, do the updates.
if infostate_key not in avg_policy_tables[player]:
avg_policy_tables[player][infostate_key] = {}
pr_sum = 0.0
if avg_reach_probs[player] + br_reach_probs[player] > 0.0:
for action in legal_actions:
assert isinstance(br_policy[action], float), f"br policy action : {br_policy[action]}"
assert isinstance(avg_policy[action], float), f"avg policy action : {avg_policy[action]}"
assert isinstance(alpha[player], float), f"alpha[player]: {alpha[player]}"
assert isinstance(br_reach_probs[player],
float), f"br_reach_probs[player]: {br_reach_probs[player]}"
pr = (
avg_policy[action] + (alpha[player] * br_reach_probs[player] *
(br_policy[action] - avg_policy[action])) /
((1.0 - alpha[player]) * avg_reach_probs[player] +
alpha[player] * br_reach_probs[player]))
assert not np.isnan(pr)
assert isinstance(pr, float), f"pr is {pr}"
avg_policy_tables[player][infostate_key][action] = pr
pr_sum += pr
assert (1.0 - delta_tolerance <= pr_sum <= 1.0 + delta_tolerance)
else:
for action in legal_actions:
avg_policy_tables[player][infostate_key][action] = 1.0 / len(legal_actions)
def tabular_policies_from_weighted_policies(game: OpenSpielGame,
policy_iterable,
weights: List[Tuple[float, float]]):
"""Converts multiple Policy instances into an weighted averaged TabularPolicy.
Args:
game: The game for which we want a TabularPolicy.
policy_iterable: for each player, an iterable that returns tuples of Openspiel policies
weights: for each player, probabilities of selecting each policy
Returns:
A averaged OpenSpiel Policy over the policy_iterable.
"""
num_players = game.num_players()
# A set of callables that take in a state and return a list of
# (action, probability) tuples.
avg_policies = [None, None]
total_weights_added = np.zeros(num_players)
for index, (best_responses, weights_for_each_br) in enumerate(zip(policy_iterable, weights)):
weights_for_each_br = np.asarray(weights_for_each_br, dtype=np.float64)
total_weights_added += weights_for_each_br
if index == 0:
for i in range(num_players):
avg_policies[i] = tabular_policy_from_callable(game=game, callable_policy=best_responses[i])
else:
br_reach_probs = np.ones(num_players)
avg_reach_probs = np.ones(num_players)
average_policy_tables = [{} for _ in range(num_players)]
_recursively_update_average_policies(state=game.new_initial_state(),
avg_reach_probs=avg_reach_probs,
br_reach_probs=br_reach_probs,
avg_policies=avg_policies,
best_responses=best_responses,
alpha=weights_for_each_br / total_weights_added,
avg_policy_tables=average_policy_tables)
for i in range(num_players):
avg_policies[i] = _callable_tabular_policy(average_policy_tables[i])
for i in range(num_players):
avg_policies[i] = tabular_policy_from_callable(game=game, callable_policy=avg_policies[i], players=[i])
return avg_policies
def openspiel_policy_from_nonlstm_rllib_policy(openspiel_game: OpenSpielGame,
game_version: str,
game_parameters: dict,
rllib_policy: Policy):
if openspiel_game.get_type().short_name == "universal_poker":
print("Converting universal_poker rllib policy to tabular. This will take a while...")
def policy_callable(state: pyspiel.State):
valid_actions_mask = state.legal_actions_mask()
legal_actions_list = state.legal_actions()
# assert np.array_equal(valid_actions, np.ones_like(valid_actions)) # should be always true for Kuhn Poker
try:
info_state_vector = state.information_state_tensor()
except pyspiel.SpielError:
assert openspiel_game.get_type().short_name == "turn_based_simultaneous_game"
assert game_version == "oshi_zumo", game_version
info_state_vector = state.observation_tensor(state.current_player())[4:]
info_state_vector = get_oshi_zumo_obs(openspiel_observation_tensor=info_state_vector,
starting_coins=int(str(game_parameters["coins"])))
if game_version in ["leduc_poker", "oshi_zumo", "oshi_zumo_tiny", "universal_poker"]:
obs = np.concatenate(
(np.asarray(info_state_vector, dtype=np.float32), np.asarray(valid_actions_mask, dtype=np.float32)),
axis=0)
else:
obs = np.asarray(info_state_vector, dtype=np.float32)
_, _, action_info = rllib_policy.compute_single_action(obs=obs, state=[], explore=False)
action_probs = None
for key in ['policy_targets', 'action_probs']:
if key in action_info:
action_probs = action_info[key]
break
if action_probs is None:
action_logits = action_info['behaviour_logits']
action_probs = softmax(action_logits)
if len(action_probs) > len(valid_actions_mask) and len(action_probs) % len(valid_actions_mask) == 0:
# we may be using a dummy action variant of poker
dummy_action_probs = action_probs.copy()
action_probs = np.zeros_like(valid_actions_mask, dtype=np.float64)
for i, action_prob in enumerate(dummy_action_probs):
action_probs[i % len(valid_actions_mask)] += action_prob
assert np.isclose(sum(action_probs), 1.0), sum(action_probs)
assert np.isclose(sum(action_probs), 1.0)
legal_action_probs = []
valid_action_prob_sum = 0.0
for idx in range(len(valid_actions_mask)):
if valid_actions_mask[idx] == 1.0:
legal_action_probs.append(action_probs[idx])
valid_action_prob_sum += action_probs[idx]
assert np.isclose(valid_action_prob_sum, 1.0), (
action_probs, valid_actions_mask, action_info.get('behaviour_logits'))
# avoid triggering any downstream assertions due to tiny near-zero amounts
for i in range(len(legal_action_probs)):
if np.isclose(legal_action_probs[i], 0.0):
legal_action_probs[i] = 0.0
return {action_name: action_prob for action_name, action_prob in zip(legal_actions_list, legal_action_probs)}
# defensive copy to tabular policy in case the rllib policy changes after this function is called
return tabular_policy_from_callable(game=openspiel_game, callable_policy=policy_callable)
class JointPlayerPolicy(OpenSpielPolicy):
"""Joint policy to be evaluated."""
def __init__(self, game, policies):
player_ids = [0, 1]
super(JointPlayerPolicy, self).__init__(game, player_ids)
self._policies = policies
self._obs = {"info_state": [None, None], "legal_actions": [None, None]}
def action_probabilities(self, state, player_id=None):
cur_player = state.current_player()
player_policy: OpenSpielPolicy = self._policies[cur_player]
return player_policy.action_probabilities(state=state, player_id=cur_player)
def psro_measure_exploitability_nonlstm(br_checkpoint_path_tuple_list: List[Tuple[str, str]],
metanash_weights: List[Tuple[float, float]],
set_policy_weights_fn: Callable,
rllib_policies: List[Policy],
poker_game_version: str,
open_spiel_env_config: dict = None):
if open_spiel_env_config is None:
if poker_game_version in ["kuhn_poker", "leduc_poker"]:
open_spiel_env_config = {
"players": pyspiel.GameParameter(2)
}
else:
open_spiel_env_config = {}
open_spiel_env_config = {k: pyspiel.GameParameter(v) if not isinstance(v, pyspiel.GameParameter) else v for k, v in
open_spiel_env_config.items()}
openspiel_game = pyspiel.load_game(poker_game_version, open_spiel_env_config)
if poker_game_version == "oshi_zumo":
openspiel_game = pyspiel.convert_to_turn_based(openspiel_game)
def policy_iterable():
for checkpoint_path_tuple in br_checkpoint_path_tuple_list:
openspiel_policies = []
for player, player_rllib_policy in enumerate(rllib_policies):
checkpoint_path = checkpoint_path_tuple[player]
if checkpoint_path not in _psro_tabular_policies_cache:
set_policy_weights_fn(player_rllib_policy, checkpoint_path=checkpoint_path)
single_openspiel_policy = openspiel_policy_from_nonlstm_rllib_policy(openspiel_game=openspiel_game,
rllib_policy=player_rllib_policy,
game_version=poker_game_version,
game_parameters=open_spiel_env_config,
)
if CACHE_PSRO_TABULAR_POLICIES:
_psro_tabular_policies_cache[checkpoint_path] = single_openspiel_policy
else:
single_openspiel_policy = _psro_tabular_policies_cache[checkpoint_path]
openspiel_policies.append(single_openspiel_policy)
yield openspiel_policies
avg_policies = tabular_policies_from_weighted_policies(game=openspiel_game,
policy_iterable=policy_iterable(),
weights=metanash_weights)
joint_player_policy = JointPlayerPolicy(game=openspiel_game, policies=avg_policies)
# Exploitability is NashConv / num_players
if poker_game_version == "universal_poker":
print("Measuring exploitability for universal_poker policy. This will take a while...")
exploitability_result = exploitability(game=openspiel_game, policy=joint_player_policy)
return exploitability_result
def get_exploitability_from_cache(cache_dir: str, policy_num: int) -> Union[float, None]:
cache_file_path = os.path.join(cache_dir, f"policy_{policy_num}_exploitability.txt")
if os.path.exists(cache_file_path):
with open(cache_file_path, "r") as f:
return float(f.readline())
return None
def write_exploitability_to_cache(cache_dir: str, policy_num: int, exploitability: float):
cache_file_path = os.path.join(cache_dir, f"policy_{policy_num}_exploitability.txt")
with open(cache_file_path, "w+") as f:
f.write(str(exploitability))
def get_stats_for_single_payoff_table(payoff_table: PayoffTable, highest_policy_num: int, poker_env_config,
policy_class, policy_config, cache_dir: str, eval_every_nth_entry=1, is_symmetric_two_player=False):
ray.init(ignore_reinit_error=True, local_mode=True, num_cpus=1, num_gpus=0)
poker_game_version = poker_env_config["version"]
temp_env = PokerMultiAgentEnv(env_config=poker_env_config)
openspiel_env_config = temp_env.open_spiel_env_config
# def fetch_logits(policy):
# return {
# "behaviour_logits": policy.model.last_output(),
# }
#
# policy_class = policy_class.with_updates(
# extra_action_fetches_fn=fetch_logits
# )
def extra_action_out_fn(policy: Policy, input_dict, state_batches, model,
action_dist: ActionDistribution) -> Dict[str, TensorType]:
action = action_dist.deterministic_sample()
action_probs = torch.zeros_like(policy.q_values).long()
action_probs[0][action[0]] = 1.0
return {"q_values": policy.q_values, "action_probs": action_probs}
policy_class = policy_class.with_updates(
extra_action_out_fn=extra_action_out_fn
)
policies = [policy_class(
obs_space=temp_env.observation_space,
action_space=temp_env.action_space,
config=policy_config) for _ in range(2)]
def set_policy_weights(policy: Policy, checkpoint_path: str):
checkpoint_data = deepdish.io.load(path=checkpoint_path)
weights = checkpoint_data["weights"]
weights = {k.replace("_dot_", "."): v for k, v in weights.items()}
policy.set_weights(weights)
exploitability_per_generation = []
total_steps_per_generation = []
total_episodes_per_generation = []
num_policies_per_generation = []
for i, n_policies in enumerate(range(1, highest_policy_num + 1)):
exploitability_cached = get_exploitability_from_cache(cache_dir=cache_dir, policy_num=i)
if exploitability_cached is None and i % eval_every_nth_entry == 0:
metanash_probs_0 = get_latest_metanash_strategies(payoff_table=payoff_table,
as_player=1,
as_policy_num=n_policies,
fictitious_play_iters=2000,
mix_with_uniform_dist_coeff=0.0,
print_matrix=False)[0].probabilities_for_each_strategy()
if is_symmetric_two_player:
metanash_probs_1 = metanash_probs_0
else:
metanash_probs_1 = get_latest_metanash_strategies(payoff_table=payoff_table,
as_player=0,
as_policy_num=n_policies,
fictitious_play_iters=2000,
mix_with_uniform_dist_coeff=0.0,
print_matrix=False)[1].probabilities_for_each_strategy()
policy_specs_0 = payoff_table.get_ordered_spec_list_for_player(player=0)[:n_policies]
policy_specs_1 = payoff_table.get_ordered_spec_list_for_player(player=1)[:n_policies]
assert len(metanash_probs_1) == len(
policy_specs_1), f"len(metanash_probs_1): {len(metanash_probs_1)}, len(policy_specs_1): {len(policy_specs_1)}"
assert len(metanash_probs_0) == len(policy_specs_0)
assert len(policy_specs_0) == len(policy_specs_1)
br_checkpoint_paths = []
metanash_weights = []
for spec_0, prob_0, spec_1, prob_1 in zip(policy_specs_0, metanash_probs_0, policy_specs_1,
metanash_probs_1):
br_checkpoint_paths.append((spec_0.metadata["checkpoint_path"], spec_1.metadata["checkpoint_path"]))
metanash_weights.append((prob_0, prob_1))
exploitability_this_gen = psro_measure_exploitability_nonlstm(
br_checkpoint_path_tuple_list=br_checkpoint_paths,
metanash_weights=metanash_weights,
set_policy_weights_fn=set_policy_weights,
rllib_policies=policies,
poker_game_version=poker_game_version,
open_spiel_env_config=openspiel_env_config
)
write_exploitability_to_cache(cache_dir=cache_dir, policy_num=i, exploitability=exploitability_this_gen)
else:
exploitability_this_gen = exploitability_cached
print(f"{n_policies} policies, {exploitability_this_gen} exploitability")
policy_spec_added_this_gen = [payoff_table.get_spec_for_player_and_pure_strat_index(
player=p, pure_strat_index=i) for p in range(2)]
latest_policy_steps = sum(policy_spec_added_this_gen[p].metadata["timesteps_training_br"] for p in range(2))
latest_policy_episodes = sum(policy_spec_added_this_gen[p].metadata["episodes_training_br"] for p in range(2))
if i > 0:
total_steps_this_generation = latest_policy_steps + total_steps_per_generation[i - 1]
total_episodes_this_generation = latest_policy_episodes + total_episodes_per_generation[i - 1]
else:
total_steps_this_generation = latest_policy_steps
total_episodes_this_generation = latest_policy_episodes
exploitability_per_generation.append(exploitability_this_gen)
total_steps_per_generation.append(total_steps_this_generation)
total_episodes_per_generation.append(total_episodes_this_generation)
num_policies_per_generation.append(n_policies)
stats_out = {'num_policies': num_policies_per_generation, 'exploitability': exploitability_per_generation,
'timesteps': total_steps_per_generation, 'episodes': total_episodes_per_generation}
return stats_out
| [] | [] | [
"CACHE_PSRO_TABULAR_POLICIES"
] | [] | ["CACHE_PSRO_TABULAR_POLICIES"] | python | 1 | 0 | |
pkg/sources/resolver.go | package sources
import (
"os"
"github.com/JonathanNasc/toroburro/pkg/sources/sources_domain"
"github.com/JonathanNasc/toroburro/pkg/sources/sources_services"
)
type SourcesByCode map[string]sources_domain.Source
var inMemorySources SourcesByCode
func Resolve(sourceName string) sources_domain.Source {
setInMemorySourcesIfItIsNotAlreadSet()
if source, ok := inMemorySources[sourceName]; ok {
return source
}
panic("Source name not allowed: " + sourceName)
}
func SubscribeSourceForTests(source sources_domain.Source) {
if os.Getenv("GO_ENV") == "test" {
inMemorySources = make(SourcesByCode)
inMemorySources[source.GetName()] = source
}
}
func setInMemorySourcesIfItIsNotAlreadSet() {
if (inMemorySources) == nil {
inMemorySources = make(SourcesByCode)
inMemorySources[alpha_vantage.SourceName] = alpha_vantage.New()
}
}
| [
"\"GO_ENV\""
] | [] | [
"GO_ENV"
] | [] | ["GO_ENV"] | go | 1 | 0 | |
api/cmd/auth-proxy/main.go | package main
import (
"bytes"
"context"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
"github.com/coreos/go-oidc"
"github.com/go-logr/logr"
"github.com/kalmhq/kalm/api/auth_proxy"
"github.com/kalmhq/kalm/api/log"
"github.com/kalmhq/kalm/api/server"
"github.com/kalmhq/kalm/api/utils"
"github.com/kalmhq/kalm/controller/controllers"
"github.com/labstack/echo/v4"
"golang.org/x/net/http2"
"golang.org/x/oauth2"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"sync"
"time"
)
var logger logr.Logger
var oauth2Config *oauth2.Config
var oauth2ConfigMut = &sync.Mutex{}
var oidcVerifier *oidc.IDTokenVerifier
var authProxyURL string
var clientSecret string
const KALM_TOKEN_KEY_NAME = "kalm-sso"
const ENVOY_EXT_AUTH_PATH_PREFIX = "ext_authz"
// CSRF protection and pass payload
type OauthState struct {
Nonce string
OriginalURL string
}
func getOauth2Config() *oauth2.Config {
if oauth2Config != nil {
return oauth2Config
}
oauth2ConfigMut.Lock()
defer oauth2ConfigMut.Unlock()
if oauth2Config != nil {
return oauth2Config
}
clientID := os.Getenv("KALM_OIDC_CLIENT_ID")
clientSecret = os.Getenv("KALM_OIDC_CLIENT_SECRET")
oidcProviderUrl := os.Getenv("KALM_OIDC_PROVIDER_URL")
authProxyURL = os.Getenv("KALM_OIDC_AUTH_PROXY_URL")
logger.V(1).Info(fmt.Sprintf("ClientID: %s", clientID))
logger.V(1).Info(fmt.Sprintf("oidcProviderUrl: %s", oidcProviderUrl))
logger.V(1).Info(fmt.Sprintf("authProxyURL: %s", authProxyURL))
if clientID == "" || clientSecret == "" || oidcProviderUrl == "" || authProxyURL == "" {
logger.Error(nil, "KALM OIDC ENVs are not configured")
return nil
}
auth_proxy.InitEncrypteKey(sha256.Sum256([]byte(clientSecret)))
provider, err := oidc.NewProvider(context.Background(), oidcProviderUrl)
if err != nil {
logger.Error(err, "KALM new provider failed.")
return nil
}
oidcVerifier = provider.Verifier(&oidc.Config{ClientID: clientID})
scopes := []string{}
scopes = append(scopes, oidc.ScopeOpenID, "profile", "email", "groups", "offline_access")
oauth2Config = &oauth2.Config{
ClientID: clientID,
ClientSecret: clientSecret,
Endpoint: provider.Endpoint(),
Scopes: scopes,
RedirectURL: authProxyURL + "/oidc/callback",
}
return oauth2Config
}
func removeExtAuthPathPrefix(path string) string {
if strings.HasPrefix(path, "/"+ENVOY_EXT_AUTH_PATH_PREFIX) {
// remove prefix "/" + ENVOY_EXT_AUTH_PATH_PREFIX
path = path[len(ENVOY_EXT_AUTH_PATH_PREFIX)+1:]
}
return path
}
func getOriginalURL(c echo.Context) string {
// If the request is rewrite at route level, use the original path
requestURI := c.Request().Header.Get("X-Envoy-Original-Path")
if requestURI == "" {
requestURI = removeExtAuthPathPrefix(c.Request().RequestURI)
}
if requestURI == "" {
requestURI = "/"
}
ur := fmt.Sprintf("%s://%s%s", c.Scheme(), c.Request().Host, requestURI)
logger.V(1).Info(fmt.Sprintf("original url %s", ur))
return ur
}
func getStringSignature(data string) string {
signBytes := sha256.Sum256(append([]byte(data), []byte(clientSecret)...))
signString := base64.RawStdEncoding.EncodeToString(signBytes[:])
return signString
}
func redirectToAuthProxyUrl(c echo.Context) error {
originalURL := getOriginalURL(c)
uri, err := url.Parse(authProxyURL + "/oidc/login")
if err != nil {
logger.Error(err, "parse auth proxy url error.")
return err
}
now := strconv.FormatInt(time.Now().UTC().UnixNano(), 10)
params := uri.Query()
params.Add("original_url", originalURL)
params.Add("now", now)
params.Add("sign", getStringSignature(originalURL+now))
uri.RawQuery = params.Encode()
return c.Redirect(302, uri.String())
}
///////////////////////////////////
// Run as Envoy ext_authz filter //
///////////////////////////////////
type ClaimsWithGroups struct {
Groups []string `json:"groups"`
}
func handleExtAuthz(c echo.Context) error {
contextLog := logger.WithValues("clientIP", c.RealIP(), "host", c.Request().Host, "path", c.Request().URL.Path)
contextLog.V(1).Info("handleExtAuthz", "tls", c.Request().TLS != nil)
for k, v := range c.Request().Header {
contextLog.V(1).Info("handleExtAuthz", "header", k, "value", v)
}
if getOauth2Config() == nil {
return c.String(503, "Please configure KALM OIDC environments.")
}
if c.QueryParam(KALM_TOKEN_KEY_NAME) != "" {
thinToken := new(auth_proxy.ThinToken)
if err := thinToken.Decode(c.QueryParam(KALM_TOKEN_KEY_NAME)); err != nil {
contextLog.Info(err.Error())
return c.String(401, err.Error())
}
// only valid if the token is valid.
// do not check group permission here
if _, err := oidcVerifier.Verify(context.Background(), thinToken.IDTokenString); err != nil {
contextLog.Info(err.Error())
return c.String(401, err.Error())
}
contextLog.Info("valid jwt token")
return handleSetIDToken(c)
}
// allow traffic to pass (AND semanteme)
// - If `let-pass-if-has-bearer-token` header is explicitly declared
// - There is a bearer token
if shouldLetPass(c) {
return c.NoContent(200)
}
token, err := getTokenFromRequest(c)
if err != nil {
contextLog.Info("No auth cookie, redirect to auth proxy")
return redirectToAuthProxyUrl(c)
}
idToken, err := oidcVerifier.Verify(context.Background(), token.IDTokenString)
if err != nil {
// An hack way to know whether the error is expire or not
if strings.Contains(strings.ToLower(err.Error()), "expire") {
// use refresh token to fetch the id_token
idToken, err = refreshIDToken(token)
if err != nil {
logger.Error(err, "refresh token error")
clearTokenInCookie(c)
return c.JSON(401, "The jwt token is invalid, expired, revoked, or was issued to another client. (After refresh)")
}
encodedToken, _ := token.Encode()
// ext_authz doesn't allow set response header to client when the auth is successful.
// Kalm set the new cookie in a payload heaader, which will be picked up by a envoy filter, and set it into response header to client.
c.Response().Header().Set(
controllers.KALM_SSO_SET_COOKIE_PAYLOAD_HEADER,
newTokenCookie(encodedToken).String(),
)
logger.V(1).Info("[refresh] Set Kalm-Set-Cookie payload.", "X-Request-Id", c.Request().Header.Get("X-Request-Id"))
} else {
clearTokenInCookie(c)
return c.JSON(401, "The jwt token is invalid, expired, revoked, or was issued to another client.")
}
}
if !inGrantedGroups(c, idToken) {
clearTokenInCookie(c)
return c.JSON(401, "You don't in any granted groups. Contact you admin please.")
}
// Set user info in meta header
// if the verify returns no error. It's safe to get claims in this way
parts := strings.Split(token.IDTokenString, ".")
c.Response().Header().Set(controllers.KALM_SSO_USERINFO_HEADER, parts[1])
return c.NoContent(200)
}
// When a user's id_token has expired, but the refresh_token is still valid, multiple requests may be received in a short time window.
// But refresh_token is not allowed to be used twice. We can't let all the requests to refresh token at the same time.
// So a condition variable is used to ensure that only one process sends a refresh request,
// and other processes wait for the result. This is enough if there is the auth-proxy service only has one replica.
// If you deploy auth-proxy with scaling, make sure use sticky load balancing strategy.
func refreshIDToken(token *auth_proxy.ThinToken) (idToken *oidc.IDToken, err error) {
refreshContext, isProducer := auth_proxy.GetRefreshTokenCond(token.RefreshToken)
if isProducer {
logger.V(1).Info("[refresh] Producer. Do refresh. ", "token", token.RefreshToken[:10])
err = doRefresh(token, refreshContext)
logger.V(1).Info(fmt.Sprintf("[refresh] Producer. Done. error: %+v", err))
auth_proxy.RemoveRefreshTokenCond(token.RefreshToken, 60)
} else {
refreshContext.Cond.L.Lock()
logger.V(1).Info("[refresh] Consumer. Wait. ", "token", token.RefreshToken[:10])
for refreshContext.IDToken == nil && refreshContext.Error == nil {
refreshContext.Cond.Wait()
}
err = refreshContext.Error
logger.V(1).Info(fmt.Sprintf("[refresh] Consumer. Got result. error: %+v", err))
refreshContext.Cond.L.Unlock()
}
if err != nil {
return nil, err
}
token.IDTokenString = refreshContext.IDTokenString
token.RefreshToken = refreshContext.RefreshToken
return refreshContext.IDToken, nil
}
func doRefresh(token *auth_proxy.ThinToken, refreshContext *auth_proxy.RefreshContext) (err error) {
// Tell other blocked routines the data is ready
defer func() {
if err != nil {
refreshContext.Error = err
}
refreshContext.Cond.Broadcast()
}()
t := &oauth2.Token{
RefreshToken: token.RefreshToken,
Expiry: time.Now().Add(-time.Hour),
}
newOauth2Token, err := oauth2Config.TokenSource(context.Background(), t).Token()
if err != nil {
logger.Error(err, "Refresh token error")
return err
}
rawIDToken, ok := newOauth2Token.Extra("id_token").(string)
if !ok {
return fmt.Errorf("no id_token in refresh token response")
}
IDToken, err := oidcVerifier.Verify(context.Background(), rawIDToken)
if err != nil {
logger.Error(err, "refreshed token verify error")
return fmt.Errorf("The jwt token is invalid, expired, revoked, or was issued to another client. (After refresh)")
}
refreshContext.Cond.L.Lock()
refreshContext.Cond.L.Unlock()
refreshContext.IDToken = IDToken
refreshContext.IDTokenString = rawIDToken
refreshContext.RefreshToken = newOauth2Token.RefreshToken
return nil
}
func getTokenFromRequest(c echo.Context) (*auth_proxy.ThinToken, error) {
var tokenString string
const prefix = "Bearer "
if strings.HasPrefix(c.Request().Header.Get(echo.HeaderAuthorization), prefix) {
tokenString = c.Request().Header.Get(echo.HeaderAuthorization)[len(prefix):]
}
if tokenString == "" {
cookie, err := c.Cookie(KALM_TOKEN_KEY_NAME)
if err != nil {
return nil, fmt.Errorf("No auth token in cookie")
}
if cookie.Value == "" {
return nil, fmt.Errorf("Auth token in cookie empty")
}
tokenString = cookie.Value
}
if tokenString == "" {
return nil, fmt.Errorf("No auth in cookie or authorization header")
}
token := new(auth_proxy.ThinToken)
if err := token.Decode(tokenString); err != nil {
return nil, err
}
return token, nil
}
func shouldLetPass(c echo.Context) bool {
return c.Request().Header.Get(controllers.KALM_ALLOW_TO_PASS_IF_HAS_BEARER_TOKEN_HEADER) == "true" &&
strings.HasPrefix(c.Request().Header.Get("Authorization"), "Bearer ")
}
func inGrantedGroups(c echo.Context, idToken *oidc.IDToken) bool {
grantedGroups := c.Request().Header.Get(controllers.KALM_SSO_GRANTED_GROUPS_HEADER)
if grantedGroups == "" {
return true
}
groups := strings.Split(grantedGroups, "|")
var claim ClaimsWithGroups
_ = idToken.Claims(&claim)
gm := make(map[string]struct{}, len(groups))
for _, g := range groups {
gm[g] = struct{}{}
}
for _, g := range claim.Groups {
if _, ok := gm[g]; ok {
return true
}
}
return false
}
func clearTokenInCookie(c echo.Context) {
c.SetCookie(&http.Cookie{
Name: KALM_TOKEN_KEY_NAME,
Value: "",
Path: "/",
Expires: time.Unix(0, 0),
HttpOnly: true,
SameSite: http.SameSiteLaxMode,
})
}
func newTokenCookie(token string) *http.Cookie {
cookie := new(http.Cookie)
cookie.Name = KALM_TOKEN_KEY_NAME
cookie.Expires = time.Now().Add(24 * 7 * time.Hour)
cookie.HttpOnly = true
cookie.SameSite = http.SameSiteLaxMode
cookie.Path = "/"
cookie.Value = token
return cookie
}
func handleSetIDToken(c echo.Context) error {
c.SetCookie(newTokenCookie(c.QueryParam(KALM_TOKEN_KEY_NAME)))
requestURI := c.Request().Header.Get("X-Envoy-Original-Path")
logger.V(1).Info("[Set ID Token]", "X-Envoy-Original-Path", requestURI)
if requestURI == "" {
requestURI = removeExtAuthPathPrefix(c.Request().RequestURI)
logger.V(1).Info("[Set ID Token]", "RawRequestURI", c.Request().RequestURI, "removeExtAuthPathPrefix", requestURI)
}
uri, err := url.Parse(requestURI)
if err != nil {
return err
}
params := uri.Query()
params.Del(KALM_TOKEN_KEY_NAME)
uri.RawQuery = params.Encode()
if uri.Path == "" {
uri.Path = "/"
}
return c.Redirect(302, uri.String())
}
///////////////////////
// Run as Auth proxy //
///////////////////////
func handleOIDCLogin(c echo.Context) error {
if getOauth2Config() == nil {
return c.String(503, "Please configure KALM OIDC environments.")
}
// verify request
originalURL := c.QueryParam("original_url")
now := c.QueryParam("now")
if originalURL == "" {
return c.String(400, "Require param original_url.")
}
if now == "" {
return c.String(400, "Require param original_url.")
}
sign := c.QueryParam("sign")
if sign == "" {
return c.String(400, "Require sign.")
}
if sign != getStringSignature(originalURL+now) {
logger.Error(nil, "wrong sign", "receive", sign, "expected", getStringSignature(originalURL+now))
return c.String(400, "Wrong sign")
}
state := &OauthState{
Nonce: utils.RandString(16),
OriginalURL: originalURL,
}
stateBytes := new(bytes.Buffer)
err := json.NewEncoder(stateBytes).Encode(state)
if err != nil {
return err
}
encryptedState, err := auth_proxy.AesEncrypt(stateBytes.Bytes())
if err != nil {
return err
}
return c.Redirect(
302,
oauth2Config.AuthCodeURL(
base64.RawStdEncoding.EncodeToString(encryptedState),
),
)
}
// this handler run under dashboard api domain
func handleOIDCCallback(c echo.Context) error {
if getOauth2Config() == nil {
return c.String(503, "Please configure KALM OIDC environments.")
}
code := c.QueryParam("code")
stateStr := c.QueryParam("state")
if stateStr == "" {
logger.Error(nil, "missing state")
return c.String(400, "Missing state")
}
stateBytes, err := base64.RawStdEncoding.DecodeString(stateStr)
if err != nil {
logger.Error(err, "Base64 decode state failed")
return c.String(400, "Base64 decode state failed")
}
stateJsonBytes, err := auth_proxy.AesDecrypt(stateBytes)
if err != nil {
logger.Error(err, "Aes decrypted state failed")
return c.String(400, "State mismatch")
}
var state OauthState
err = json.Unmarshal(stateJsonBytes, &state)
if err != nil {
logger.Error(err, "json decode state failed")
return c.String(400, "json decode state failed")
}
uri, err := url.Parse(state.OriginalURL)
if err != nil {
logger.Error(err, "parse original url failed. ", "OriginalURL", state.OriginalURL)
return c.String(400, "parse original url failed.")
}
oauth2Token, err := oauth2Config.Exchange(
context.Background(),
code,
)
if err != nil {
logger.Error(err, "Exchange oauth2Token error")
return c.String(400, "Exchange oauth2Token error")
}
rawIDToken, ok := oauth2Token.Extra("id_token").(string)
if !ok {
logger.Error(nil, "no id_token in token response")
return c.String(400, "no id_token in token response")
}
_, err = oidcVerifier.Verify(context.Background(), rawIDToken)
if err != nil {
logger.Error(err, "jwt verify failed")
return c.String(400, "jwt verify failed")
}
thinToken := &auth_proxy.ThinToken{
RefreshToken: oauth2Token.RefreshToken,
IDTokenString: rawIDToken,
}
encryptedThinToken, err := thinToken.Encode()
if err != nil {
logger.Error(err, "thin token encode error")
return c.String(400, err.Error())
}
params := uri.Query()
params.Add(KALM_TOKEN_KEY_NAME, encryptedThinToken)
uri.RawQuery = params.Encode()
return c.Redirect(302, uri.String())
}
func handleLog(c echo.Context) error {
level := c.QueryParam("level")
switch level {
case "debug":
logger = log.NewLogger("debug")
default:
logger = log.NewLogger("info")
}
return c.NoContent(200)
}
func main() {
logger = log.NewLogger("info")
e := server.NewEchoInstance()
// oidc auth proxy handlers
e.GET("/oidc/login", handleOIDCLogin)
e.GET("/oidc/callback", handleOIDCCallback)
// envoy ext_authz handlers
e.Any("/"+ENVOY_EXT_AUTH_PATH_PREFIX+"/*", handleExtAuthz)
e.Any("/"+ENVOY_EXT_AUTH_PATH_PREFIX, handleExtAuthz)
e.POST("/log", handleLog)
err := e.StartH2CServer("0.0.0.0:3002", &http2.Server{
MaxConcurrentStreams: 250,
MaxReadFrameSize: 1048576,
IdleTimeout: 60 * time.Second,
})
if err != nil {
panic(err)
}
}
| [
"\"KALM_OIDC_CLIENT_ID\"",
"\"KALM_OIDC_CLIENT_SECRET\"",
"\"KALM_OIDC_PROVIDER_URL\"",
"\"KALM_OIDC_AUTH_PROXY_URL\""
] | [] | [
"KALM_OIDC_CLIENT_ID",
"KALM_OIDC_AUTH_PROXY_URL",
"KALM_OIDC_PROVIDER_URL",
"KALM_OIDC_CLIENT_SECRET"
] | [] | ["KALM_OIDC_CLIENT_ID", "KALM_OIDC_AUTH_PROXY_URL", "KALM_OIDC_PROVIDER_URL", "KALM_OIDC_CLIENT_SECRET"] | go | 4 | 0 | |
docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.join(os.path.dirname(__name__), '..'))
# -- Project information -----------------------------------------------------
project = 'disksurf'
copyright = '2021, Richard Teague'
author = 'Richard Teague, Jane Huang, Charles Law'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.imgmath',
'nbsphinx',
]
# Is this really necessary...
autodoc_mock_imports = [
'astropy',
'scipy',
'argparse',
'numpy',
'tqdm',
'matplotlib',
'gofish',
]
# Make sure all docstrings are in source order.
autodoc_member_order = "bysource"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
master_doc = "index"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Readthedocs.
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd:
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_logo = "disksurf.png"
html_theme_options = {"logo_only": True}
| [] | [] | [
"READTHEDOCS"
] | [] | ["READTHEDOCS"] | python | 1 | 0 | |
integration/client/client_windows_test.go | /*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package containerd
import (
"fmt"
"os"
"path/filepath"
"github.com/Microsoft/hcsshim/osversion"
_ "github.com/Microsoft/hcsshim/test/functional/manifest" // For rsrc_amd64.syso
)
const (
defaultAddress = `\\.\pipe\containerd-containerd-test`
)
var (
defaultRoot = filepath.Join(os.Getenv("programfiles"), "containerd", "root-test")
defaultState = filepath.Join(os.Getenv("programfiles"), "containerd", "state-test")
testImage string
shortCommand = withTrue()
longCommand = withProcessArgs("ping", "-t", "localhost")
)
func init() {
b := osversion.Build()
switch b {
case osversion.RS1:
testImage = "mcr.microsoft.com/windows/nanoserver:sac2016"
case osversion.RS3:
testImage = "mcr.microsoft.com/windows/nanoserver:1709"
case osversion.RS4:
testImage = "mcr.microsoft.com/windows/nanoserver:1803"
case osversion.RS5:
testImage = "mcr.microsoft.com/windows/nanoserver:1809"
case osversion.V19H1:
testImage = "mcr.microsoft.com/windows/nanoserver:1903"
case osversion.V19H2:
testImage = "mcr.microsoft.com/windows/nanoserver:1909"
case osversion.V20H1:
testImage = "mcr.microsoft.com/windows/nanoserver:2004"
case osversion.V20H2:
testImage = "mcr.microsoft.com/windows/nanoserver:20H2"
default:
fmt.Println("No test image defined for Windows build version:", b)
panic("No windows test image found for this Windows build")
}
fmt.Println("Windows test image:", testImage, ", Windows build version:", b)
}
| [
"\"programfiles\"",
"\"programfiles\""
] | [] | [
"programfiles"
] | [] | ["programfiles"] | go | 1 | 0 | |
src/util/helper/libaws/dynamo.go | package libaws
import (
"encoding/json"
"log"
"os"
"sync"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
"github.com/aws/aws-sdk-go/service/dynamodb/expression"
dynamoEntyty "github.com/sofyan48/cimol/src/entity/http/v1"
)
// GetDynamoDB get dynamodb service
// return *dynamodb.DynamoDB
func (aw *Aws) GetDynamoDB() *dynamodb.DynamoDB {
cfg := aw.Sessions()
dynamo := dynamodb.New(session.New(), cfg)
return dynamo
}
// InputDynamo ...
func (aw *Aws) InputDynamo(itemDynamo *dynamoEntyty.DynamoItem, wg *sync.WaitGroup) (*dynamodb.PutItemOutput, error) {
dynamoLibs := aw.GetDynamoDB()
result := &dynamodb.PutItemOutput{}
mItem, err := dynamodbattribute.MarshalMap(itemDynamo)
if err != nil {
log.Println("ERROR: ", err)
return result, err
}
inputDynamo := &dynamodb.PutItemInput{
Item: mItem,
TableName: aws.String(os.Getenv("AWS_DYNAMO_TABLE")),
}
result, err = dynamoLibs.PutItem(inputDynamo)
if err != nil {
return result, err
}
wg.Done()
return result, nil
}
// InputDynamoEmail ...
func (aw *Aws) InputDynamoEmail(itemDynamo *dynamoEntyty.DynamoItemEmail, wg *sync.WaitGroup) (*dynamodb.PutItemOutput, error) {
dynamoLibs := aw.GetDynamoDB()
result := &dynamodb.PutItemOutput{}
mItem, err := dynamodbattribute.MarshalMap(itemDynamo)
if err != nil {
log.Println("ERROR: ", err)
return result, err
}
inputDynamo := &dynamodb.PutItemInput{
Item: mItem,
TableName: aws.String(os.Getenv("AWS_DYNAMO_TABLE")),
}
result, err = dynamoLibs.PutItem(inputDynamo)
if err != nil {
return result, err
}
wg.Done()
return result, nil
}
// UpdateDynamo ...
func (aw *Aws) UpdateDynamo(ID, status, data string, history *dynamoEntyty.HistoryItem) (*dynamodb.UpdateItemOutput, error) {
dynamoLibs := aw.GetDynamoDB()
historyReformat, err := json.Marshal(history)
input := &dynamodb.UpdateItemInput{
Key: map[string]*dynamodb.AttributeValue{
"id": {
S: aws.String(ID),
},
},
ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
":valhistory": {
SS: aws.StringSlice([]string{
string(historyReformat),
data,
}),
},
":valstatusText": {
S: aws.String(status),
},
},
TableName: aws.String(os.Getenv("AWS_DYNAMO_TABLE")),
ReturnValues: aws.String("ALL_NEW"),
UpdateExpression: aws.String("SET history = :valhistory, statusText = :valstatusText"),
}
result, err := dynamoLibs.UpdateItem(input)
if err != nil {
return result, err
}
return result, nil
}
// CallbackSendUpdate ...
func (aw *Aws) CallbackSendUpdate(ID, status, oldHistory string, newHistory string) (*dynamodb.UpdateItemOutput, error) {
dynamoLibs := aw.GetDynamoDB()
input := &dynamodb.UpdateItemInput{
Key: map[string]*dynamodb.AttributeValue{
"id": {
S: aws.String(ID),
},
},
ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
":valhistory": {
SS: aws.StringSlice([]string{
newHistory,
oldHistory,
}),
},
":valstatusText": {
S: aws.String(status),
},
},
TableName: aws.String(os.Getenv("AWS_DYNAMO_TABLE")),
ReturnValues: aws.String("ALL_NEW"),
UpdateExpression: aws.String("SET history = :valhistory, statusText = :valstatusText"),
}
result, err := dynamoLibs.UpdateItem(input)
if err != nil {
return result, err
}
return result, nil
}
// GetDynamoData ..
func (aw *Aws) GetDynamoData(ID string) (*dynamodb.GetItemOutput, error) {
dynamoLibs := aw.GetDynamoDB()
result, err := dynamoLibs.GetItem(&dynamodb.GetItemInput{
TableName: aws.String(os.Getenv("AWS_DYNAMO_TABLE")),
Key: map[string]*dynamodb.AttributeValue{
"id": {
S: aws.String(ID),
},
},
// ProjectionExpression: expr.Projection(),
})
return result, err
}
// GetDynamoHistory ..
func (aw *Aws) GetDynamoHistory(receiverAddress string) (*dynamodb.ScanOutput, error) {
dynamoLibs := aw.GetDynamoDB()
filter := expression.Name("receiverAddress").Equal(expression.Value(receiverAddress))
proj := expression.NamesList(
expression.Name("id"),
expression.Name("history"),
expression.Name("data"),
expression.Name("receiverAddress"),
expression.Name("createdAt"),
expression.Name("statusText"),
expression.Name("type"),
expression.Name("updatedAt"),
)
expr, err := expression.NewBuilder().WithFilter(filter).WithProjection(proj).Build()
if err != nil {
return nil, err
}
params := &dynamodb.ScanInput{
ExpressionAttributeNames: expr.Names(),
ExpressionAttributeValues: expr.Values(),
FilterExpression: expr.Filter(),
ProjectionExpression: expr.Projection(),
TableName: aws.String(os.Getenv("AWS_DYNAMO_TABLE")),
}
result, err := dynamoLibs.Scan(params)
if err != nil {
return nil, err
}
return result, nil
}
| [
"\"AWS_DYNAMO_TABLE\"",
"\"AWS_DYNAMO_TABLE\"",
"\"AWS_DYNAMO_TABLE\"",
"\"AWS_DYNAMO_TABLE\"",
"\"AWS_DYNAMO_TABLE\"",
"\"AWS_DYNAMO_TABLE\""
] | [] | [
"AWS_DYNAMO_TABLE"
] | [] | ["AWS_DYNAMO_TABLE"] | go | 1 | 0 | |
vendor/github.com/aws/aws-sdk-go/aws/version.go | // Package aws provides core functionality for making requests to AWS services.
package aws
// SDKName is the name of this AWS SDK
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.22.3"
| [] | [] | [] | [] | [] | go | null | null | null |
vendor/cmd/go/internal/work/security.go | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Checking of compiler and linker flags.
// We must avoid flags like -fplugin=, which can allow
// arbitrary code execution during the build.
// Do not make changes here without carefully
// considering the implications.
// (That's why the code is isolated in a file named security.go.)
//
// Note that -Wl,foo means split foo on commas and pass to
// the linker, so that -Wl,-foo,bar means pass -foo bar to
// the linker. Similarly -Wa,foo for the assembler and so on.
// If any of these are permitted, the wildcard portion must
// disallow commas.
//
// Note also that GNU binutils accept any argument @foo
// as meaning "read more flags from the file foo", so we must
// guard against any command-line argument beginning with @,
// even things like "-I @foo".
// We use load.SafeArg (which is even more conservative)
// to reject these.
//
// Even worse, gcc -I@foo (one arg) turns into cc1 -I @foo (two args),
// so although gcc doesn't expand the @foo, cc1 will.
// So out of paranoia, we reject @ at the beginning of every
// flag argument that might be split into its own argument.
package work
import (
"cmd/go/internal/load"
"fmt"
"os"
"regexp"
"strings"
)
var re = regexp.MustCompile
var validCompilerFlags = []*regexp.Regexp{
re(`-D([A-Za-z_].*)`),
re(`-F([^@\-].*)`),
re(`-I([^@\-].*)`),
re(`-O`),
re(`-O([^@\-].*)`),
re(`-W`),
re(`-W([^@,]+)`), // -Wall but not -Wa,-foo.
re(`-Wa,-mbig-obj`),
re(`-Wp,-D([A-Za-z_].*)`),
re(`-ansi`),
re(`-f(no-)?asynchronous-unwind-tables`),
re(`-f(no-)?blocks`),
re(`-f(no-)builtin-[a-zA-Z0-9_]*`),
re(`-f(no-)?common`),
re(`-f(no-)?constant-cfstrings`),
re(`-fdiagnostics-show-note-include-stack`),
re(`-f(no-)?eliminate-unused-debug-types`),
re(`-f(no-)?exceptions`),
re(`-f(no-)?fast-math`),
re(`-f(no-)?inline-functions`),
re(`-finput-charset=([^@\-].*)`),
re(`-f(no-)?fat-lto-objects`),
re(`-f(no-)?keep-inline-dllexport`),
re(`-f(no-)?lto`),
re(`-fmacro-backtrace-limit=(.+)`),
re(`-fmessage-length=(.+)`),
re(`-f(no-)?modules`),
re(`-f(no-)?objc-arc`),
re(`-f(no-)?objc-nonfragile-abi`),
re(`-f(no-)?objc-legacy-dispatch`),
re(`-f(no-)?omit-frame-pointer`),
re(`-f(no-)?openmp(-simd)?`),
re(`-f(no-)?permissive`),
re(`-f(no-)?(pic|PIC|pie|PIE)`),
re(`-f(no-)?plt`),
re(`-f(no-)?rtti`),
re(`-f(no-)?split-stack`),
re(`-f(no-)?stack-(.+)`),
re(`-f(no-)?strict-aliasing`),
re(`-f(un)signed-char`),
re(`-f(no-)?use-linker-plugin`), // safe if -B is not used; we don't permit -B
re(`-f(no-)?visibility-inlines-hidden`),
re(`-fsanitize=(.+)`),
re(`-ftemplate-depth-(.+)`),
re(`-fvisibility=(.+)`),
re(`-g([^@\-].*)?`),
re(`-m32`),
re(`-m64`),
re(`-m(abi|arch|cpu|fpu|tune)=([^@\-].*)`),
re(`-marm`),
re(`-mfloat-abi=([^@\-].*)`),
re(`-mfpmath=[0-9a-z,+]*`),
re(`-m(no-)?avx[0-9a-z.]*`),
re(`-m(no-)?ms-bitfields`),
re(`-m(no-)?stack-(.+)`),
re(`-mmacosx-(.+)`),
re(`-mios-simulator-version-min=(.+)`),
re(`-miphoneos-version-min=(.+)`),
re(`-mnop-fun-dllimport`),
re(`-m(no-)?sse[0-9.]*`),
re(`-mthumb(-interwork)?`),
re(`-mthreads`),
re(`-mwindows`),
re(`--param=ssp-buffer-size=[0-9]*`),
re(`-pedantic(-errors)?`),
re(`-pipe`),
re(`-pthread`),
re(`-?-std=([^@\-].*)`),
re(`-?-stdlib=([^@\-].*)`),
re(`--sysroot=([^@\-].*)`),
re(`-w`),
re(`-x([^@\-].*)`),
re(`-v`),
}
var validCompilerFlagsWithNextArg = []string{
"-arch",
"-D",
"-I",
"-framework",
"-isysroot",
"-isystem",
"--sysroot",
"-target",
"-x",
}
var validLinkerFlags = []*regexp.Regexp{
re(`-F([^@\-].*)`),
re(`-l([^@\-].*)`),
re(`-L([^@\-].*)`),
re(`-O`),
re(`-O([^@\-].*)`),
re(`-f(no-)?(pic|PIC|pie|PIE)`),
re(`-f(no-)?openmp(-simd)?`),
re(`-fsanitize=([^@\-].*)`),
re(`-flat_namespace`),
re(`-g([^@\-].*)?`),
re(`-headerpad_max_install_names`),
re(`-m(abi|arch|cpu|fpu|tune)=([^@\-].*)`),
re(`-mfloat-abi=([^@\-].*)`),
re(`-mmacosx-(.+)`),
re(`-mios-simulator-version-min=(.+)`),
re(`-miphoneos-version-min=(.+)`),
re(`-mthreads`),
re(`-mwindows`),
re(`-(pic|PIC|pie|PIE)`),
re(`-pthread`),
re(`-rdynamic`),
re(`-shared`),
re(`-?-static([-a-z0-9+]*)`),
re(`-?-stdlib=([^@\-].*)`),
re(`-v`),
// Note that any wildcards in -Wl need to exclude comma,
// since -Wl splits its argument at commas and passes
// them all to the linker uninterpreted. Allowing comma
// in a wildcard would allow tunnelling arbitrary additional
// linker arguments through one of these.
re(`-Wl,--(no-)?allow-multiple-definition`),
re(`-Wl,--(no-)?allow-shlib-undefined`),
re(`-Wl,--(no-)?as-needed`),
re(`-Wl,-Bdynamic`),
re(`-Wl,-Bstatic`),
re(`-WL,-O([^@,\-][^,]*)?`),
re(`-Wl,-d[ny]`),
re(`-Wl,--disable-new-dtags`),
re(`-Wl,-e[=,][a-zA-Z0-9]*`),
re(`-Wl,--enable-new-dtags`),
re(`-Wl,--end-group`),
re(`-Wl,-framework,[^,@\-][^,]+`),
re(`-Wl,-headerpad_max_install_names`),
re(`-Wl,--no-undefined`),
re(`-Wl,-rpath(-link)?[=,]([^,@\-][^,]+)`),
re(`-Wl,-s`),
re(`-Wl,-search_paths_first`),
re(`-Wl,-sectcreate,([^,@\-][^,]+),([^,@\-][^,]+),([^,@\-][^,]+)`),
re(`-Wl,--start-group`),
re(`-Wl,-?-static`),
re(`-Wl,-?-subsystem,(native|windows|console|posix|xbox)`),
re(`-Wl,-syslibroot[=,]([^,@\-][^,]+)`),
re(`-Wl,-undefined[=,]([^,@\-][^,]+)`),
re(`-Wl,-?-unresolved-symbols=[^,]+`),
re(`-Wl,--(no-)?warn-([^,]+)`),
re(`-Wl,-z,(no)?execstack`),
re(`-Wl,-z,relro`),
re(`[a-zA-Z0-9_/].*\.(a|o|obj|dll|dylib|so)`), // direct linker inputs: x.o or libfoo.so (but not -foo.o or @foo.o)
re(`\./.*\.(a|o|obj|dll|dylib|so)`),
}
var validLinkerFlagsWithNextArg = []string{
"-arch",
"-F",
"-l",
"-L",
"-framework",
"-isysroot",
"--sysroot",
"-target",
"-Wl,-framework",
"-Wl,-rpath",
"-Wl,-undefined",
}
func checkCompilerFlags(name, source string, list []string) error {
return checkFlags(name, source, list, validCompilerFlags, validCompilerFlagsWithNextArg)
}
func checkLinkerFlags(name, source string, list []string) error {
return checkFlags(name, source, list, validLinkerFlags, validLinkerFlagsWithNextArg)
}
func checkFlags(name, source string, list []string, valid []*regexp.Regexp, validNext []string) error {
// Let users override rules with $CGO_CFLAGS_ALLOW, $CGO_CFLAGS_DISALLOW, etc.
var (
allow *regexp.Regexp
disallow *regexp.Regexp
)
if env := os.Getenv("CGO_" + name + "_ALLOW"); env != "" {
r, err := regexp.Compile(env)
if err != nil {
return fmt.Errorf("parsing $CGO_%s_ALLOW: %v", name, err)
}
allow = r
}
if env := os.Getenv("CGO_" + name + "_DISALLOW"); env != "" {
r, err := regexp.Compile(env)
if err != nil {
return fmt.Errorf("parsing $CGO_%s_DISALLOW: %v", name, err)
}
disallow = r
}
Args:
for i := 0; i < len(list); i++ {
arg := list[i]
if disallow != nil && disallow.FindString(arg) == arg {
goto Bad
}
if allow != nil && allow.FindString(arg) == arg {
continue Args
}
for _, re := range valid {
if re.FindString(arg) == arg { // must be complete match
continue Args
}
}
for _, x := range validNext {
if arg == x {
if i+1 < len(list) && load.SafeArg(list[i+1]) {
i++
continue Args
}
// Permit -Wl,-framework -Wl,name.
if i+1 < len(list) &&
strings.HasPrefix(arg, "-Wl,") &&
strings.HasPrefix(list[i+1], "-Wl,") &&
load.SafeArg(list[i+1][4:]) &&
!strings.Contains(list[i+1][4:], ",") {
i++
continue Args
}
if i+1 < len(list) {
return fmt.Errorf("invalid flag in %s: %s %s (see https://golang.org/s/invalidflag)", source, arg, list[i+1])
}
return fmt.Errorf("invalid flag in %s: %s without argument (see https://golang.org/s/invalidflag)", source, arg)
}
}
Bad:
return fmt.Errorf("invalid flag in %s: %s", source, arg)
}
return nil
}
| [
"\"CGO_\" + name + \"_ALLOW\"",
"\"CGO_\" + name + \"_DISALLOW\""
] | [] | [
"CGO_\" + name + \"_ALLOW",
"CGO_\" + name + \"_DISALLOW"
] | [] | ["CGO_\" + name + \"_ALLOW", "CGO_\" + name + \"_DISALLOW"] | go | 2 | 0 | |
2021/quals/hw-parking/src/game.py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
import string
from PIL import Image, ImageDraw
from matplotlib import pyplot as plt
if sys.platform == 'darwin':
import matplotlib
matplotlib.use('TkAgg')
import os
WALL = -1
blocks = []
board = {}
boardfile = open(sys.argv[1]).read()
header, boardfile = boardfile.split("\n", 1)
W, H = [int(x) for x in header.split()]
flagblocks = {}
target = -1
SZ = 4
target_start = None
def putcar(x,y,w,h,m):
x0 = x*SZ
x1 = (x+w)*SZ
y0 = y*SZ
y1 = (y+h)*SZ
if m == -1:
color = (0, 128, 0)
elif m == -2:
color = (128, 0, 0)
else:
color = (128, 128, 128)
draw.rectangle((x0+1,y0+1,x1-2,y1-2), fill=color)
def putwall(x,y,w,h):
x0 = x*SZ
x1 = (x+w)*SZ
y0 = y*SZ
y1 = (y+h)*SZ
draw.rectangle((x0,y0,x1-1,y1-1), fill=(48,24,0))
walls = []
for line in boardfile.splitlines():
if not line.strip(): continue
x,y,w,h,movable = [int(x) for x in line.split()]
if movable == -1:
flagblocks[len(blocks)] = (x,y)
elif movable == -2:
target = len(blocks)
target_start = x, y
for i in range(x, x+w):
for j in range(y, y+h):
if movable != 0:
if (i,j) in board:
print("Car overlap at %d, %d" % (i,j))
#assert False
board[(i,j)] = len(blocks)
else:
if (i,j) in board and board[i,j] != WALL:
print("Wall-car overlap at %d, %d" % (i,j))
#assert False
board[(i,j)] = WALL
if movable:
blocks.append([x,y,w,h, movable])
else:
walls.append([x,y,w,h])
def printflag():
if os.environ.get("FLAG") == "0":
print("<flag would be here if on real level>")
return
bits = ""
for fl in flagblocks:
orig_xy = flagblocks[fl]
new_xy = tuple(blocks[fl][:2])
bit = 0 if orig_xy == new_xy else 1
bits += str(bit)
flag = b"CTF{"
while bits:
byte, bits = bits[:8], bits[8:]
flag += bytes([ int(byte[::-1], 2) ])
flag += b"}"
print(flag)
def check_win():
x, y = blocks[target][:2]
if target_start is not None and target_start != (x,y):
print("Congratulations, here's your flag:")
printflag()
sys.exit(0)
print("Here's the parking. Can you move the red car?")
print("Green cars' final position will encode the flag.")
print("Move by clicking a car, then clicking a near empty space to move to. Feel free to zoom in using the magnifying glass if necessary!")
#print_board()
def can_move(which, dirx, diry):
x,y,w,h,mv = blocks[which]
if w == 1 and dirx != 0:
print("This car moves only vertically...")
return False
if h == 1 and diry != 0:
print("This car only moves horizontally...")
return False
bad = False
for i in range(x+dirx, x+dirx+w):
for j in range(y+diry, y+diry+h):
if (i,j) in board and board[(i,j)] != which:
bad = True
if bad:
print("Such a move would cause collision...")
return False
return True
def do_move(which, dirx, diry):
x,y,w,h,mv = blocks[which]
for i in range(x, x+w):
for j in range(y, y+h):
del board[(i,j)]
x += dirx
y += diry
blocks[which] = [x, y, w, h, mv]
for i in range(x, x+w):
for j in range(y, y+h):
board[(i,j)] = which
if mv == -1:
print("This setting corresponds to the following flag:")
printflag()
def onclick(event):
global which
global xy
if which is None:
x, y = event.xdata, event.ydata
x, y = int(x), int(y)
xy = (x, y)
try:
which = board[x//SZ, y//SZ]
if which == WALL:
which = None
print("Selected wall...")
return
print("Car %d selected." % which)
except KeyError:
print("Selected empty space...")
which = None
return
dirx, diry = event.xdata - xy[0], event.ydata - xy[1]
if abs(dirx) > abs(diry):
dirx, diry = (1, 0) if dirx > 0 else (-1, 0)
else:
dirx, diry = (0, 1) if diry > 0 else (0, -1)
if not can_move(which, dirx, diry):
which = None
return
do_move(which, dirx, diry)
which = None
redraw()
check_win()
DRAW = True
if os.environ.get("DRAW") == "0":
DRAW = False
first = True
def redraw():
print("Redrawing...")
global draw, first, ax
im = Image.new("RGB", (W*SZ,H*SZ), (255,255,255))
draw = ImageDraw.Draw(im)
for wall in walls:
putwall(*wall)
for block in blocks:
putcar(*block)
print("Redrawn.")
if first:
print("Saving...")
im.save("initial.png")
print("Saved.")
first = False
ax = fig.add_subplot(111)
ax = ax.imshow(im)
ax.set_data(im)
plt.draw()
if DRAW:
fig = plt.figure()
plt.ion()
plt.show()
cid = fig.canvas.mpl_connect('button_press_event', onclick)
redraw()
which = None
xy = None
# Alternative API, you don't have to click ;)
while True:
i = input()
which, dirx, diry = i.strip().split()
which, dirx, diry = int(which), int(dirx), int(diry)
if can_move(which, dirx, diry):
do_move(which, dirx, diry)
print("Moved")
if DRAW:
redraw()
check_win()
else:
print("Invalid")
| [] | [] | [
"DRAW",
"FLAG"
] | [] | ["DRAW", "FLAG"] | python | 2 | 0 | |
tests/jobs/test_scheduler_job.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import os
import shutil
import unittest
from datetime import timedelta
from tempfile import NamedTemporaryFile, mkdtemp
import mock
import psutil
import pytest
import six
from freezegun import freeze_time
from mock import MagicMock, patch
from parameterized import parameterized
import airflow.example_dags
import airflow.smart_sensor_dags
from airflow import settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.base_executor import BaseExecutor
from airflow.jobs.backfill_job import BackfillJob
from airflow.jobs.scheduler_job import DagFileProcessor, SchedulerJob
from airflow.models import DAG, DagBag, DagModel, Pool, SlaMiss, TaskInstance, errors
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstanceKey
from airflow.operators.bash import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.serialization.serialized_objects import SerializedDAG
from airflow.utils import timezone
from airflow.utils.dag_processing import FailureCallbackRequest, SimpleDagBag
from airflow.utils.dates import days_ago
from airflow.utils.file import list_py_file_paths
from airflow.utils.session import create_session, provide_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from tests.test_utils.asserts import assert_queries_count
from tests.test_utils.config import conf_vars, env_vars
from tests.test_utils.db import (
clear_db_dags, clear_db_errors, clear_db_jobs, clear_db_pools, clear_db_runs, clear_db_sla_miss,
set_default_pool_slots,
)
from tests.test_utils.mock_executor import MockExecutor
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
PERF_DAGS_FOLDER = os.path.join(ROOT_FOLDER, "tests", "utils", "perf", "dags")
ELASTIC_DAG_FILE = os.path.join(PERF_DAGS_FOLDER, "elastic_dag.py")
TEST_DAG_FOLDER = os.environ['AIRFLOW__CORE__DAGS_FOLDER']
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TRY_NUMBER = 1
# Include the words "airflow" and "dag" in the file contents,
# tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
@pytest.fixture(scope="class")
def disable_load_example():
with conf_vars({('core', 'load_examples'): 'false'}):
with env_vars({('core', 'load_examples'): 'false'}):
yield
@pytest.mark.usefixtures("disable_load_example")
class TestDagFileProcessor(unittest.TestCase):
@staticmethod
def clean_db():
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_errors()
clear_db_jobs()
def setUp(self):
self.clean_db()
# Speed up some tests by not running the tasks, just look at what we
# enqueue!
self.null_exec = MockExecutor()
def tearDown(self) -> None:
self.clean_db()
def create_test_dag(self, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + timedelta(hours=1), **kwargs):
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=start_date,
# Make sure it only creates a single DAG Run
end_date=end_date)
dag.clear()
dag.is_subdag = False
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
return dag
@classmethod
@patch("airflow.models.dagbag.settings.STORE_SERIALIZED_DAGS", True)
def setUpClass(cls):
# Ensure the DAGs we are looking at from the DB are up-to-date
non_serialized_dagbag = DagBag(read_dags_from_db=False, include_examples=False)
non_serialized_dagbag.sync_to_db()
cls.dagbag = DagBag(read_dags_from_db=True)
def test_dag_file_processor_sla_miss_callback(self):
"""
Test that the dag file processor calls the sla miss callback
"""
session = settings.Session()
sla_callback = MagicMock()
# Create dag with a start of 1 day ago, but an sla of 0
# so we'll already have an sla_miss on the books.
test_start_date = days_ago(1)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta()})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='success'))
session.merge(SlaMiss(task_id='dummy', dag_id='test_sla_miss', execution_date=test_start_date))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_file_processor.manage_slas(dag=dag, session=session)
assert sla_callback.called
def test_dag_file_processor_sla_miss_callback_invalid_sla(self):
"""
Test that the dag file processor does not call the sla miss callback when
given an invalid sla
"""
session = settings.Session()
sla_callback = MagicMock()
# Create dag with a start of 1 day ago, but an sla of 0
# so we'll already have an sla_miss on the books.
# Pass anything besides a timedelta object to the sla argument.
test_start_date = days_ago(1)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': None})
task = DummyOperator(task_id='dummy', dag=dag, owner='airflow')
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='success'))
session.merge(SlaMiss(task_id='dummy', dag_id='test_sla_miss', execution_date=test_start_date))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_file_processor.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_executor_overflow(self):
"""
Test that tasks that are set back to scheduled and removed from the executor
queue in the case of an overflow.
"""
executor = MockExecutor(do_update=True, parallelism=3)
with create_session() as session:
dagbag = DagBag(dag_folder=os.path.join(settings.DAGS_FOLDER, "no_dags.py"),
include_examples=False,
include_smart_sensor=False)
dag = self.create_test_dag()
dag.clear()
dagbag.bag_dag(dag=dag, root_dag=dag)
dag = self.create_test_dag()
dag.clear()
task = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
tis = []
for i in range(1, 10):
ti = TaskInstance(task, DEFAULT_DATE + timedelta(days=i))
ti.state = State.SCHEDULED
tis.append(ti)
session.merge(ti)
# scheduler._process_dags(simple_dag_bag)
@mock.patch('airflow.jobs.scheduler_job.DagBag', return_value=dagbag)
@mock.patch('airflow.jobs.scheduler_job.SchedulerJob._change_state_for_tis_without_dagrun')
def do_schedule(mock_dagbag, mock_change_state):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
with conf_vars({('core', 'mp_start_method'): 'fork'}):
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule() # pylint: disable=no-value-for-parameter
for ti in tis:
ti.refresh_from_db()
self.assertEqual(len(executor.queued_tasks), 0)
successful_tasks = [ti for ti in tis if ti.state == State.SUCCESS]
scheduled_tasks = [ti for ti in tis if ti.state == State.SCHEDULED]
self.assertEqual(3, len(successful_tasks))
self.assertEqual(6, len(scheduled_tasks))
def test_dag_file_processor_sla_miss_callback_sent_notification(self):
"""
Test that the dag file processor does not call the sla_miss_callback when a
notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day
# ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy', dag=dag, owner='airflow')
# Create a TaskInstance for two days ago
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_file_processor.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_dag_file_processor_sla_miss_callback_exception(self):
"""
Test that the dag file processor gracefully logs an exception if there is a problem
calling the sla_miss_callback
"""
session = settings.Session()
sla_callback = MagicMock(side_effect=RuntimeError('Could not call function'))
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
sla=datetime.timedelta(hours=1))
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
# Now call manage_slas and see if the sla_miss callback gets called
mock_log = mock.MagicMock()
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock_log)
dag_file_processor.manage_slas(dag=dag, session=session)
assert sla_callback.called
mock_log.exception.assert_called_once_with(
'Could not call sla_miss_callback for DAG %s',
'test_sla_miss')
@mock.patch('airflow.jobs.scheduler_job.send_email')
def test_dag_file_processor_only_collect_emails_from_sla_missed_tasks(self, mock_send_email):
session = settings.Session()
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
email1 = '[email protected]'
task = DummyOperator(task_id='sla_missed',
dag=dag,
owner='airflow',
email=email1,
sla=datetime.timedelta(hours=1))
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='Success'))
email2 = '[email protected]'
DummyOperator(task_id='sla_not_missed',
dag=dag,
owner='airflow',
email=email2)
session.merge(SlaMiss(task_id='sla_missed', dag_id='test_sla_miss', execution_date=test_start_date))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_file_processor.manage_slas(dag=dag, session=session)
self.assertTrue(len(mock_send_email.call_args_list), 1)
send_email_to = mock_send_email.call_args_list[0][0][0]
self.assertIn(email1, send_email_to)
self.assertNotIn(email2, send_email_to)
@mock.patch('airflow.jobs.scheduler_job.Stats.incr')
@mock.patch("airflow.utils.email.send_email")
def test_dag_file_processor_sla_miss_email_exception(self, mock_send_email, mock_stats_incr):
"""
Test that the dag file processor gracefully logs an exception if there is a problem
sending an email
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
mock_send_email.side_effect = RuntimeError('Could not send an email')
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='[email protected]',
sla=datetime.timedelta(hours=1))
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy', dag_id='test_sla_miss', execution_date=test_start_date))
mock_log = mock.MagicMock()
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock_log)
dag_file_processor.manage_slas(dag=dag, session=session)
mock_log.exception.assert_called_once_with(
'Could not send SLA Miss email notification for DAG %s',
'test_sla_miss')
mock_stats_incr.assert_called_once_with('sla_email_notification_failure')
def test_dag_file_processor_sla_miss_deleted_task(self):
"""
Test that the dag file processor will not crash when trying to send
sla miss notification for a deleted task
"""
session = settings.Session()
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='[email protected]',
sla=datetime.timedelta(hours=1))
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy_deleted', dag_id='test_sla_miss',
execution_date=test_start_date))
mock_log = mock.MagicMock()
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock_log)
dag_file_processor.manage_slas(dag=dag, session=session)
def test_dag_file_processor_dagrun_once(self):
"""
Test if the dag file proccessor does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval="@once")
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNone(dr)
@freeze_time(timezone.datetime(2020, 1, 5))
def test_dag_file_processor_dagrun_with_timedelta_schedule_and_catchup_false(self):
"""
Test that the dag file processor does not create multiple dagruns
if a dag is scheduled with 'timedelta' and catchup=False
"""
dag = DAG(
'test_scheduler_dagrun_once_with_timedelta_and_catchup_false',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval=timedelta(days=1),
catchup=False)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2020, 1, 4))
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNone(dr)
@freeze_time(timezone.datetime(2020, 5, 4))
def test_dag_file_processor_dagrun_with_timedelta_schedule_and_catchup_true(self):
"""
Test that the dag file processor creates multiple dagruns
if a dag is scheduled with 'timedelta' and catchup=True
"""
dag = DAG(
'test_scheduler_dagrun_once_with_timedelta_and_catchup_true',
start_date=timezone.datetime(2020, 5, 1),
schedule_interval=timedelta(days=1),
catchup=True)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2020, 5, 1))
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2020, 5, 2))
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2020, 5, 3))
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNone(dr)
@parameterized.expand([
[State.NONE, None, None],
[State.UP_FOR_RETRY, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
[State.UP_FOR_RESCHEDULE, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
])
def test_dag_file_processor_process_task_instances(self, state, start_date, end_date):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
with create_session() as session:
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
mock_list = dag_file_processor._process_task_instances(dag, dag_runs=[dr])
self.assertEqual(
[(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)],
mock_list
)
@parameterized.expand([
[State.NONE, None, None],
[State.UP_FOR_RETRY, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
[State.UP_FOR_RESCHEDULE, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
])
def test_dag_file_processor_process_task_instances_with_task_concurrency(
self, state, start_date, end_date,
):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task_with_task_concurrency',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
task_concurrency=2,
dag=dag,
owner='airflow')
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
with create_session() as session:
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
ti_to_schedule = dag_file_processor._process_task_instances(dag, dag_runs=[dr])
assert ti_to_schedule == [
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER),
]
@parameterized.expand([
[State.NONE, None, None],
[State.UP_FOR_RETRY, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
[State.UP_FOR_RESCHEDULE, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
])
def test_dag_file_processor_process_task_instances_depends_on_past(self, state, start_date, end_date):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task_depends_on_past',
start_date=DEFAULT_DATE,
default_args={
'depends_on_past': True,
},
)
dag_task1 = DummyOperator(
task_id='dummy1',
dag=dag,
owner='airflow')
dag_task2 = DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
with create_session() as session:
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
ti_to_schedule = dag_file_processor._process_task_instances(dag, dag_runs=[dr])
assert sorted(ti_to_schedule) == [
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER),
(dag.dag_id, dag_task2.task_id, DEFAULT_DATE, TRY_NUMBER),
]
def test_dag_file_processor_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = DagRun.find(run_id=dr.run_id)[0]
# Re-create the DAG, but remove the task
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
mock_list = dag_file_processor._process_task_instances(dag, dag_runs=[dr])
self.assertEqual([], mock_list)
def test_dag_file_processor_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=timezone.datetime(2200, 1, 1))
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNone(dr)
mock_list = dag_file_processor._process_task_instances(dag, dag_runs=[])
self.assertEqual([], mock_list)
def test_dag_file_processor_do_not_schedule_without_tasks(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_without_tasks',
start_date=DEFAULT_DATE)
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear(session=session)
dag.start_date = None
dr = dag_file_processor.create_dag_run(dag, session=session)
self.assertIsNone(dr)
def test_dag_file_processor_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
mock_list = dag_file_processor._process_task_instances(dag, dag_runs=[dr])
self.assertEqual([], mock_list)
def test_dag_file_processor_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 1)
DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor._process_task_instances(dag, dag_runs=[dr])
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
def test_dag_file_processor_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNone(dr)
def test_dag_file_processor_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEqual(dr.state, State.FAILED)
def test_dag_file_processor_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs
has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has
been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = dag_file_processor.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_dag_file_processor_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
# First create up to 3 dagruns in RUNNING state.
dr1 = dag_file_processor.create_dag_run(dag)
assert dr1 is not None
dr2 = dag_file_processor.create_dag_run(dag)
assert dr2 is not None
dr3 = dag_file_processor.create_dag_run(dag)
assert dr3 is not None
assert len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)) == 3
# Reduce max_active_runs to 1
dag.max_active_runs = 1
# and schedule them in, so we can check how many
# tasks are put on the task_instances_list (should be one, not 3)
task_instances_list = dag_file_processor._process_task_instances(dag, dag_runs=[dr1, dr2, dr3])
self.assertEqual([(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)], task_instances_list)
def test_find_dags_to_run_includes_subdags(self):
dag = self.dagbag.get_dag('test_subdag_operator')
self.assertGreater(len(dag.subdags), 0)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dags = dag_file_processor._find_dags_to_process(self.dagbag.dags.values())
self.assertIn(dag, dags)
for subdag in dag.subdags:
self.assertIn(subdag, dags)
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
def setup_dag(dag_id, schedule_interval, start_date, catchup):
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': start_date
}
dag = DAG(dag_id,
schedule_interval=schedule_interval,
max_active_runs=1,
catchup=catchup,
default_args=default_args)
op1 = DummyOperator(task_id='t1', dag=dag)
op2 = DummyOperator(task_id='t2', dag=dag)
op2.set_upstream(op1)
op3 = DummyOperator(task_id='t3', dag=dag)
op3.set_upstream(op2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
return SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(
minute=0, second=0, microsecond=0)
half_an_hour_ago = now - datetime.timedelta(minutes=30)
two_hours_ago = now - datetime.timedelta(hours=2)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag1 = setup_dag(dag_id='dag_with_catchup',
schedule_interval='* * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=True)
default_catchup = conf.getboolean('scheduler', 'catchup_by_default')
self.assertEqual(default_catchup, True)
self.assertEqual(dag1.catchup, True)
dag2 = setup_dag(dag_id='dag_without_catchup_ten_minute',
schedule_interval='*/10 * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = dag_file_processor.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last half an hour, not 6 hours ago
self.assertGreater(dr.execution_date, half_an_hour_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag3 = setup_dag(dag_id='dag_without_catchup_hourly',
schedule_interval='@hourly',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = dag_file_processor.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 2 hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag4 = setup_dag(dag_id='dag_without_catchup_once',
schedule_interval='@once',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = dag_file_processor.create_dag_run(dag4)
self.assertIsNotNone(dr)
def test_dag_file_processor_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 1, 10, 10))
def test_process_dags_not_create_dagrun_for_subdags(self):
dag = self.dagbag.get_dag('test_subdag_operator')
scheduler = DagFileProcessor(dag_ids=[dag.dag_id], log=mock.MagicMock())
scheduler._process_task_instances = mock.MagicMock()
scheduler.manage_slas = mock.MagicMock()
scheduler._process_dags([dag] + dag.subdags)
with create_session() as session:
sub_dagruns = (
session.query(DagRun).filter(DagRun.dag_id == dag.subdags[0].dag_id).count()
)
self.assertEqual(0, sub_dagruns)
parent_dagruns = (
session.query(DagRun).filter(DagRun.dag_id == dag.dag_id).count()
)
self.assertGreater(parent_dagruns, 0)
@patch.object(TaskInstance, 'handle_failure')
def test_execute_on_failure_callbacks(self, mock_ti_handle_failure):
dagbag = DagBag(dag_folder="/dev/null", include_examples=True)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
with create_session() as session:
session.query(TaskInstance).delete()
dag = dagbag.get_dag('example_branch_operator')
task = dag.get_task(task_id='run_this_first')
ti = TaskInstance(task, DEFAULT_DATE, State.RUNNING)
session.add(ti)
session.commit()
requests = [
FailureCallbackRequest(
full_filepath="A",
simple_task_instance=SimpleTaskInstance(ti),
msg="Message"
)
]
dag_file_processor.execute_on_failure_callbacks(dagbag, requests)
mock_ti_handle_failure.assert_called_once_with(
"Message",
conf.getboolean('core', 'unit_test_mode'),
mock.ANY
)
def test_process_file_should_failure_callback(self):
dag_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../dags/test_on_failure_callback.py'
)
dagbag = DagBag(dag_folder=dag_file, include_examples=False)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
with create_session() as session, NamedTemporaryFile(delete=False) as callback_file:
session.query(TaskInstance).delete()
dag = dagbag.get_dag('test_om_failure_callback_dag')
task = dag.get_task(task_id='test_om_failure_callback_task')
ti = TaskInstance(task, DEFAULT_DATE, State.RUNNING)
session.add(ti)
session.commit()
requests = [
FailureCallbackRequest(
full_filepath=dag.full_filepath,
simple_task_instance=SimpleTaskInstance(ti),
msg="Message"
)
]
callback_file.close()
with mock.patch.dict("os.environ", {"AIRFLOW_CALLBACK_FILE": callback_file.name}):
dag_file_processor.process_file(dag_file, requests)
with open(callback_file.name) as callback_file2:
content = callback_file2.read()
self.assertEqual("Callback fired", content)
os.remove(callback_file.name)
def test_should_parse_only_unpaused_dags(self):
dag_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../dags/test_multiple_dags.py'
)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dagbag = DagBag(dag_folder=dag_file, include_examples=False)
dagbag.sync_to_db()
with create_session() as session:
session.query(TaskInstance).delete()
(
session.query(DagModel)
.filter(DagModel.dag_id == "test_multiple_dags__dag_1")
.update({DagModel.is_paused: True}, synchronize_session=False)
)
serialized_dags, import_errors_count = dag_file_processor.process_file(
file_path=dag_file, failure_callback_requests=[]
)
dags = [SerializedDAG.from_dict(serialized_dag) for serialized_dag in serialized_dags]
with create_session() as session:
tis = session.query(TaskInstance).all()
self.assertEqual(0, import_errors_count)
self.assertEqual(['test_multiple_dags__dag_2'], [dag.dag_id for dag in dags])
self.assertEqual({'test_multiple_dags__dag_2'}, {ti.dag_id for ti in tis})
def test_should_mark_dummy_task_as_success(self):
dag_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../dags/test_only_dummy_tasks.py'
)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
with create_session() as session:
session.query(TaskInstance).delete()
session.query(DagModel).delete()
dagbag = DagBag(dag_folder=dag_file, include_examples=False)
dagbag.sync_to_db()
serialized_dags, import_errors_count = dag_file_processor.process_file(
file_path=dag_file, failure_callback_requests=[]
)
dags = [SerializedDAG.from_dict(serialized_dag) for serialized_dag in serialized_dags]
with create_session() as session:
tis = session.query(TaskInstance).all()
self.assertEqual(0, import_errors_count)
self.assertEqual(['test_only_dummy_tasks'], [dag.dag_id for dag in dags])
self.assertEqual(5, len(tis))
self.assertEqual({
('test_task_a', 'success'),
('test_task_b', None),
('test_task_c', 'success'),
('test_task_on_execute', 'scheduled'),
('test_task_on_success', 'scheduled'),
}, {(ti.task_id, ti.state) for ti in tis})
for state, start_date, end_date, duration in [(ti.state, ti.start_date, ti.end_date, ti.duration) for
ti in tis]:
if state == 'success':
self.assertIsNotNone(start_date)
self.assertIsNotNone(end_date)
self.assertEqual(0.0, duration)
else:
self.assertIsNone(start_date)
self.assertIsNone(end_date)
self.assertIsNone(duration)
dag_file_processor.process_file(
file_path=dag_file, failure_callback_requests=[]
)
with create_session() as session:
tis = session.query(TaskInstance).all()
self.assertEqual(5, len(tis))
self.assertEqual({
('test_task_a', 'success'),
('test_task_b', 'success'),
('test_task_c', 'success'),
('test_task_on_execute', 'scheduled'),
('test_task_on_success', 'scheduled'),
}, {(ti.task_id, ti.state) for ti in tis})
for state, start_date, end_date, duration in [(ti.state, ti.start_date, ti.end_date, ti.duration) for
ti in tis]:
if state == 'success':
self.assertIsNotNone(start_date)
self.assertIsNotNone(end_date)
self.assertEqual(0.0, duration)
else:
self.assertIsNone(start_date)
self.assertIsNone(end_date)
self.assertIsNone(duration)
@pytest.mark.heisentests
class TestDagFileProcessorQueriesCount(unittest.TestCase):
"""
These tests are designed to detect changes in the number of queries for different DAG files.
Each test has saved queries count in the table/spreadsheets. If you make a change that affected the number
of queries, please update the tables.
These tests allow easy detection when a change is made that affects the performance of the
DagFileProcessor.
"""
def setUp(self) -> None:
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_errors()
@parameterized.expand(
[
# pylint: disable=bad-whitespace
# expected, dag_count, task_count, start_ago, schedule_interval, shape
# One DAG with one task per DAG file
([ 1, 1, 1, 1], 1, 1, "1d", "None", "no_structure"), # noqa
([ 1, 1, 1, 1], 1, 1, "1d", "None", "linear"), # noqa
([ 9, 5, 5, 5], 1, 1, "1d", "@once", "no_structure"), # noqa
([ 9, 5, 5, 5], 1, 1, "1d", "@once", "linear"), # noqa
([ 9, 12, 15, 18], 1, 1, "1d", "30m", "no_structure"), # noqa
([ 9, 12, 15, 18], 1, 1, "1d", "30m", "linear"), # noqa
([ 9, 12, 15, 18], 1, 1, "1d", "30m", "binary_tree"), # noqa
([ 9, 12, 15, 18], 1, 1, "1d", "30m", "star"), # noqa
([ 9, 12, 15, 18], 1, 1, "1d", "30m", "grid"), # noqa
# One DAG with five tasks per DAG file
([ 1, 1, 1, 1], 1, 5, "1d", "None", "no_structure"), # noqa
([ 1, 1, 1, 1], 1, 5, "1d", "None", "linear"), # noqa
([ 9, 5, 5, 5], 1, 5, "1d", "@once", "no_structure"), # noqa
([10, 6, 6, 6], 1, 5, "1d", "@once", "linear"), # noqa
([ 9, 12, 15, 18], 1, 5, "1d", "30m", "no_structure"), # noqa
([10, 14, 18, 22], 1, 5, "1d", "30m", "linear"), # noqa
([10, 14, 18, 22], 1, 5, "1d", "30m", "binary_tree"), # noqa
([10, 14, 18, 22], 1, 5, "1d", "30m", "star"), # noqa
([10, 14, 18, 22], 1, 5, "1d", "30m", "grid"), # noqa
# 10 DAGs with 10 tasks per DAG file
([ 1, 1, 1, 1], 10, 10, "1d", "None", "no_structure"), # noqa
([ 1, 1, 1, 1], 10, 10, "1d", "None", "linear"), # noqa
([81, 41, 41, 41], 10, 10, "1d", "@once", "no_structure"), # noqa
([91, 51, 51, 51], 10, 10, "1d", "@once", "linear"), # noqa
([81, 111, 111, 111], 10, 10, "1d", "30m", "no_structure"), # noqa
([91, 131, 131, 131], 10, 10, "1d", "30m", "linear"), # noqa
([91, 131, 131, 131], 10, 10, "1d", "30m", "binary_tree"), # noqa
([91, 131, 131, 131], 10, 10, "1d", "30m", "star"), # noqa
([91, 131, 131, 131], 10, 10, "1d", "30m", "grid"), # noqa
# pylint: enable=bad-whitespace
]
)
def test_process_dags_queries_count(
self, expected_query_counts, dag_count, task_count, start_ago, schedule_interval, shape
):
with mock.patch.dict("os.environ", {
"PERF_DAGS_COUNT": str(dag_count),
"PERF_TASKS_COUNT": str(task_count),
"PERF_START_AGO": start_ago,
"PERF_SCHEDULE_INTERVAL": schedule_interval,
"PERF_SHAPE": shape,
}), conf_vars({
('scheduler', 'use_job_schedule'): 'True',
}):
dagbag = DagBag(dag_folder=ELASTIC_DAG_FILE,
include_examples=False,
include_smart_sensor=False)
processor = DagFileProcessor([], mock.MagicMock())
for expected_query_count in expected_query_counts:
with assert_queries_count(expected_query_count):
processor._process_dags(dagbag.dags.values())
@parameterized.expand(
[
# pylint: disable=bad-whitespace
# expected, dag_count, task_count, start_ago, schedule_interval, shape
# One DAG with two tasks per DAG file
([ 5, 5, 5, 5], 1, 1, "1d", "None", "no_structure"), # noqa
([ 5, 5, 5, 5], 1, 1, "1d", "None", "linear"), # noqa
([15, 9, 9, 9], 1, 1, "1d", "@once", "no_structure"), # noqa
([15, 9, 9, 9], 1, 1, "1d", "@once", "linear"), # noqa
([15, 18, 21, 24], 1, 1, "1d", "30m", "no_structure"), # noqa
([15, 18, 21, 24], 1, 1, "1d", "30m", "linear"), # noqa
# One DAG with five tasks per DAG file
([ 5, 5, 5, 5], 1, 5, "1d", "None", "no_structure"), # noqa
([ 5, 5, 5, 5], 1, 5, "1d", "None", "linear"), # noqa
([15, 9, 9, 9], 1, 5, "1d", "@once", "no_structure"), # noqa
([16, 10, 10, 10], 1, 5, "1d", "@once", "linear"), # noqa
([15, 18, 21, 24], 1, 5, "1d", "30m", "no_structure"), # noqa
([16, 20, 24, 28], 1, 5, "1d", "30m", "linear"), # noqa
# 10 DAGs with 10 tasks per DAG file
([ 5, 5, 5, 5], 10, 10, "1d", "None", "no_structure"), # noqa
([ 5, 5, 5, 5], 10, 10, "1d", "None", "linear"), # noqa
([87, 45, 45, 45], 10, 10, "1d", "@once", "no_structure"), # noqa
([97, 55, 55, 55], 10, 10, "1d", "@once", "linear"), # noqa
([87, 117, 117, 117], 10, 10, "1d", "30m", "no_structure"), # noqa
([97, 137, 137, 137], 10, 10, "1d", "30m", "linear"), # noqa
# pylint: enable=bad-whitespace
]
)
def test_process_file_queries_count(
self, expected_query_counts, dag_count, task_count, start_ago, schedule_interval, shape
):
with mock.patch.dict("os.environ", {
"PERF_DAGS_COUNT": str(dag_count),
"PERF_TASKS_COUNT": str(task_count),
"PERF_START_AGO": start_ago,
"PERF_SCHEDULE_INTERVAL": schedule_interval,
"PERF_SHAPE": shape,
}), conf_vars({
('scheduler', 'use_job_schedule'): 'True'
}):
processor = DagFileProcessor([], mock.MagicMock())
for expected_query_count in expected_query_counts:
with assert_queries_count(expected_query_count):
processor.process_file(ELASTIC_DAG_FILE, [])
@pytest.mark.usefixtures("disable_load_example")
class TestSchedulerJob(unittest.TestCase):
def setUp(self):
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_errors()
# Speed up some tests by not running the tasks, just look at what we
# enqueue!
self.null_exec = MockExecutor()
@classmethod
@patch("airflow.models.dagbag.settings.STORE_SERIALIZED_DAGS", True)
def setUpClass(cls):
# Ensure the DAGs we are looking at from the DB are up-to-date
non_serialized_dagbag = DagBag(read_dags_from_db=False, include_examples=False)
non_serialized_dagbag.sync_to_db()
cls.dagbag = DagBag(read_dags_from_db=True)
def test_is_alive(self):
job = SchedulerJob(None, heartrate=10, state=State.RUNNING)
self.assertTrue(job.is_alive())
job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=20)
self.assertTrue(job.is_alive())
job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=31)
self.assertFalse(job.is_alive())
# test because .seconds was used before instead of total_seconds
# internal repr of datetime is (days, seconds)
job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(days=1)
self.assertFalse(job.is_alive())
job.state = State.SUCCESS
job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=10)
self.assertFalse(job.is_alive(), "Completed jobs even with recent heartbeat should not be alive")
def run_single_scheduler_loop_with_no_dags(self, dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type dags_folder: str
"""
scheduler = SchedulerJob(
executor=self.null_exec,
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def _make_simple_dag_bag(self, dags):
return SimpleDagBag([SerializedDAG.from_dict(SerializedDAG.to_dict(dag)) for dag in dags])
def test_no_orphan_process_will_be_left(self):
empty_dir = mkdtemp()
current_process = psutil.Process()
old_children = current_process.children(recursive=True)
scheduler = SchedulerJob(subdir=empty_dir,
num_runs=1,
executor=MockExecutor(do_update=False))
scheduler.run()
shutil.rmtree(empty_dir)
# Remove potential noise created by previous tests.
current_children = set(current_process.children(recursive=True)) - set(
old_children)
self.assertFalse(current_children)
@mock.patch('airflow.jobs.scheduler_job.Stats.incr')
def test_process_executor_events(self, mock_stats_incr):
dag_id = "test_process_executor_events"
dag_id2 = "test_process_executor_events_2"
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, full_filepath="/test_path1/")
dag2 = DAG(dag_id=dag_id2, start_date=DEFAULT_DATE, full_filepath="/test_path1/")
task1 = DummyOperator(dag=dag, task_id=task_id_1)
DummyOperator(dag=dag2, task_id=task_id_1)
dag.fileloc = "/test_path1/"
dag2.fileloc = "/test_path1/"
dagbag1 = self._make_simple_dag_bag([dag])
dagbag2 = self._make_simple_dag_bag([dag2])
scheduler = SchedulerJob()
session = settings.Session()
ti1 = TaskInstance(task1, DEFAULT_DATE)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor = MockExecutor(do_update=False)
executor.event_buffer[ti1.key] = State.FAILED, None
scheduler.executor = executor
scheduler.processor_agent = mock.MagicMock()
# dag bag does not contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag2)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
scheduler.processor_agent.send_callback_to_execute.assert_not_called()
# dag bag does contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
scheduler.processor_agent.send_callback_to_execute.assert_called_once_with(
full_filepath='/test_path1/',
task_instance=mock.ANY,
msg='Executor reports task instance '
'<TaskInstance: test_process_executor_events.dummy_task 2016-01-01 00:00:00+00:00 [queued]> '
'finished (failed) although the task says its queued. (Info: None) '
'Was the task killed externally?'
)
scheduler.processor_agent.reset_mock()
# ti in success state
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS, None
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.SUCCESS)
scheduler.processor_agent.send_callback_to_execute.assert_not_called()
mock_stats_incr.assert_called_once_with('scheduler.tasks.killed_externally')
def test_process_executor_events_uses_inmemory_try_number(self):
execution_date = DEFAULT_DATE
dag_id = "dag_id"
task_id = "task_id"
try_number = 42
scheduler = SchedulerJob()
executor = MagicMock()
event_buffer = {
TaskInstanceKey(dag_id, task_id, execution_date, try_number): (State.SUCCESS, None)
}
executor.get_event_buffer.return_value = event_buffer
scheduler.executor = executor
processor_agent = MagicMock()
scheduler.processor_agent = processor_agent
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task = DummyOperator(dag=dag, task_id=task_id)
with create_session() as session:
ti = TaskInstance(task, DEFAULT_DATE)
ti.state = State.SUCCESS
session.merge(ti)
scheduler._process_executor_events(simple_dag_bag=MagicMock())
# Assert that the even_buffer is empty so the task was popped using right
# task instance key
self.assertEqual(event_buffer, {})
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
dr1.state = State.RUNNING
dagmodel = DagModel()
dagmodel.dag_id = dag_id
dagmodel.is_paused = True
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.commit()
scheduler._execute_task_instances(dagbag)
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.commit()
scheduler._execute_task_instances(dagbag)
ti1.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
dr1.run_type = DagRunType.BACKFILL_JOB.value
ti1 = TaskInstance(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
scheduler._execute_task_instances(dagbag)
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
dr2 = dag_file_processor.create_dag_run(dag)
dr2.run_type = DagRunType.BACKFILL_JOB.value
ti_no_dagrun = TaskInstance(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TaskInstance(task1, dr2.execution_date)
ti_with_dagrun = TaskInstance(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
dr2 = dag_file_processor.create_dag_run(dag)
tis = ([
TaskInstance(task1, dr1.execution_date),
TaskInstance(task2, dr1.execution_date),
TaskInstance(task1, dr2.execution_date),
TaskInstance(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = Pool(pool='a', slots=1, description='haha')
pool2 = Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
session.commit()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
def test_find_executable_task_instances_in_default_pool(self):
set_default_pool_slots(1)
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_in_default_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
op1 = DummyOperator(dag=dag, task_id='dummy1')
op2 = DummyOperator(dag=dag, task_id='dummy2')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
executor = MockExecutor(do_update=True)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob(executor=executor)
dr1 = dag_file_processor.create_dag_run(dag)
dr2 = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task=op1, execution_date=dr1.execution_date)
ti2 = TaskInstance(task=op2, execution_date=dr2.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session = settings.Session()
session.merge(ti1)
session.merge(ti2)
session.commit()
# Two tasks w/o pool up for execution and our default pool size is 1
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(1, len(res))
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
# One task w/o pool up for execution and one task task running
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(0, len(res))
session.close()
def test_nonexistent_pool(self):
dag_id = 'SchedulerJobTest.test_nonexistent_pool'
task_id = 'dummy_wrong_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task = DummyOperator(dag=dag, task_id=task_id, pool="this_pool_doesnt_exist")
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr = dag_file_processor.create_dag_run(dag)
ti = TaskInstance(task, dr.execution_date)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
session.commit()
self.assertEqual(0, len(res))
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dag_file_processor.create_dag_run(dag)
session.commit()
self.assertEqual(0, len(scheduler._find_executable_task_instances(
dagbag,
session=session)))
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
dr2 = dag_file_processor.create_dag_run(dag)
dr3 = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dr1.execution_date)
ti2 = TaskInstance(task1, dr2.execution_date)
ti3 = TaskInstance(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(0, len(res))
def test_find_executable_task_instances_concurrency_queued(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency_queued'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id='dummy1')
task2 = DummyOperator(dag=dag, task_id='dummy2')
task3 = DummyOperator(dag=dag, task_id='dummy3')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dag_run = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dag_run.execution_date)
ti2 = TaskInstance(task2, dag_run.execution_date)
ti3 = TaskInstance(task3, dag_run.execution_date)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(1, len(res))
self.assertEqual(res[0].key, ti3.key)
def test_find_executable_task_instances_task_concurrency(self): # pylint: disable=too-many-statements
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
executor = MockExecutor(do_update=True)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob(executor=executor)
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
dr2 = dag_file_processor.create_dag_run(dag)
dr3 = dag_file_processor.create_dag_run(dag)
ti1_1 = TaskInstance(task1, dr1.execution_date)
ti2 = TaskInstance(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TaskInstance(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TaskInstance(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob()
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances(
[], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dr1 = dag_file_processor.create_dag_run(dag)
dr2 = dag_file_processor.create_dag_run(dag)
dr3 = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dr1.execution_date)
ti2 = TaskInstance(task1, dr2.execution_date)
ti3 = TaskInstance(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.RUNNING
ti3.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
session)
self.assertEqual(0, len(res))
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
assert mock_queue_command.called
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = SimpleDagBag([])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dr1.execution_date)
ti2 = TaskInstance(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(
2,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING], session=session
)
)
# create second dag run
dr2 = dag_file_processor.create_dag_run(dag)
ti3 = TaskInstance(task1, dr2.execution_date)
ti4 = TaskInstance(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag)
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(
3,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING, State.QUEUED], session=session
)
)
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for _ in range(0, 4):
dr = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dr.execution_date)
ti2 = TaskInstance(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag)
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
@pytest.mark.quarantined
def test_change_state_for_tis_without_dagrun(self):
dag1 = DAG(dag_id='test_change_state_for_tis_without_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag1, owner='airflow')
DummyOperator(task_id='dummy_b', dag=dag1, owner='airflow')
dag1 = SerializedDAG.from_dict(SerializedDAG.to_dict(dag1))
dag2 = DAG(dag_id='test_change_state_for_tis_without_dagrun_dont_change', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag2, owner='airflow')
dag2 = SerializedDAG.from_dict(SerializedDAG.to_dict(dag2))
dag3 = DAG(dag_id='test_change_state_for_tis_without_dagrun_no_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag3, owner='airflow')
dag3 = SerializedDAG.from_dict(SerializedDAG.to_dict(dag3))
session = settings.Session()
dr1 = dag1.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.state = State.SCHEDULED
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.state = State.SUCCESS
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TaskInstance(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
dagbag = self._make_simple_dag_bag([dag1, dag2, dag3])
scheduler = SchedulerJob(num_runs=0)
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEqual(ti3.state, State.NONE)
dr1.refresh_from_db(session=session)
dr1.state = State.FAILED
# why o why
session.merge(dr1)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
# don't touch ti1b
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_change_state_for_tasks_failed_to_execute(self):
dag = DAG(
dag_id='dag_id',
start_date=DEFAULT_DATE)
task = DummyOperator(
task_id='task_id',
dag=dag,
owner='airflow')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
# If there's no left over task in executor.queued_tasks, nothing happens
session = settings.Session()
scheduler_job = SchedulerJob()
mock_logger = mock.MagicMock()
test_executor = MockExecutor(do_update=False)
scheduler_job.executor = test_executor
scheduler_job._logger = mock_logger
scheduler_job._change_state_for_tasks_failed_to_execute() # pylint: disable=no-value-for-parameter
mock_logger.info.assert_not_called()
# Tasks failed to execute with QUEUED state will be set to SCHEDULED state.
session.query(TaskInstance).delete()
session.commit()
key = 'dag_id', 'task_id', DEFAULT_DATE, 1
test_executor.queued_tasks[key] = 'value'
ti = TaskInstance(task, DEFAULT_DATE)
ti.state = State.QUEUED
session.merge(ti) # pylint: disable=no-value-for-parameter
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute() # pylint: disable=no-value-for-parameter
ti.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti.state)
# Tasks failed to execute with RUNNING state will not be set to SCHEDULED state.
session.query(TaskInstance).delete()
session.commit()
ti.state = State.RUNNING
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute() # pylint: disable=no-value-for-parameter
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
def test_reset_state_for_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag.clear()
dr = dag.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_type=DagRunType.BACKFILL_JOB,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
scheduler = SchedulerJob(num_runs=0)
scheduler.processor_agent = processor
scheduler.reset_state_for_orphaned_tasks()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@parameterized.expand([
[State.UP_FOR_RETRY, State.FAILED],
[State.QUEUED, State.NONE],
[State.SCHEDULED, State.NONE],
[State.UP_FOR_RESCHEDULE, State.NONE],
])
def test_scheduler_loop_should_change_state_for_tis_without_dagrun(self,
initial_task_state,
expected_task_state):
session = settings.Session()
dag = DAG(
'test_execute_helper_should_change_state_for_tis_without_dagrun',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
# Create DAG run with FAILED state
dag.clear()
dr = dag.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.FAILED,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = initial_task_state
session.commit()
# Create scheduler and mock calls to processor. Run duration is set
# to a high value to ensure loop is entered. Poll interval is 0 to
# avoid sleep. Done flag is set to true to exist the loop immediately.
scheduler = SchedulerJob(num_runs=0, processor_poll_interval=0)
executor = MockExecutor(do_update=False)
executor.queued_tasks
scheduler.executor = executor
processor = mock.MagicMock()
processor.harvest_serialized_dags.return_value = [
SerializedDAG.from_dict(SerializedDAG.to_dict(dag))]
processor.done = True
scheduler.processor_agent = processor
scheduler._run_scheduler_loop()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, expected_task_state)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None): # pylint: disable=unused-argument
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag = self.dagbag.get_dag(dag_id)
dr = dag_file_processor.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = dag_file_processor.create_dag_run(dag)
ex_date = dr.execution_date
for tid, state in expected_task_states.items():
if state != State.FAILED:
continue
self.null_exec.mock_task_fail(dag_id, tid, ex_date)
try:
# This needs a _REAL_ dag, not the serialized version
dag.run(start_date=ex_date, end_date=ex_date, executor=self.null_exec, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TaskInstance(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# TODO: this should live in test_dagrun.py
# Run both the failed and successful tasks
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dr = dag_file_processor.create_dag_run(dag)
self.null_exec.mock_task_fail(dag_id, 'test_dagrun_fail', DEFAULT_DATE)
with self.assertRaises(AirflowException):
dag.run(start_date=dr.execution_date, end_date=dr.execution_date, executor=self.null_exec)
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
with create_session() as session:
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr.update_state()
self.assertEqual(dr.state, State.RUNNING)
def test_dagrun_root_after_dagrun_unfinished(self):
"""
DagRuns with one successful and one future root task -> SUCCESS
Noted: the DagRun state could be still in running state during CI.
"""
dag_id = 'test_dagrun_states_root_future'
dag = self.dagbag.get_dag(dag_id)
scheduler = SchedulerJob(
dag_id,
num_runs=1,
executor=self.null_exec,
subdir=dag.fileloc)
scheduler.run()
first_run = DagRun.find(dag_id=dag_id, execution_date=DEFAULT_DATE)[0]
ti_ids = [(ti.task_id, ti.state) for ti in first_run.get_task_instances()]
self.assertEqual(ti_ids, [('current', State.SUCCESS)])
self.assertIn(first_run.state, [State.SUCCESS, State.RUNNING])
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
with create_session() as session:
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertGreater(dag.start_date, datetime.datetime.now(timezone.utc))
scheduler = SchedulerJob(dag_id,
executor=self.null_exec,
subdir=dag.fileloc,
num_runs=1)
scheduler.run()
# zero tasks ran
self.assertEqual(
len(session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).all()), 0)
session.commit()
self.assertListEqual([], self.null_exec.sorted_tasks)
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
bf_exec = MockExecutor()
backfill = BackfillJob(
executor=bf_exec,
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
self.assertEqual(
len(session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).all()), 1)
self.assertListEqual(
[
(TaskInstanceKey(dag.dag_id, 'dummy', DEFAULT_DATE, 1), (State.SUCCESS, None)),
],
bf_exec.sorted_tasks
)
session.commit()
scheduler = SchedulerJob(dag_id,
executor=self.null_exec,
subdir=dag.fileloc,
num_runs=1)
scheduler.run()
# still one task
self.assertEqual(
len(session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).all()), 1)
session.commit()
self.assertListEqual([], self.null_exec.sorted_tasks)
def test_scheduler_task_start_date(self):
"""
Test that the scheduler respects task start dates that are different from DAG start dates
"""
dag_id = 'test_task_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_id,
executor=self.null_exec,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=2)
scheduler.run()
session = settings.Session()
tiq = session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id)
ti1s = tiq.filter(TaskInstance.task_id == 'dummy1').all()
ti2s = tiq.filter(TaskInstance.task_id == 'dummy2').all()
self.assertEqual(len(ti1s), 0)
self.assertEqual(len(ti2s), 2)
for task in ti2s:
self.assertEqual(task.state, State.SUCCESS)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
executor=self.null_exec,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=1)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).all()), 0)
@conf_vars({("core", "mp_start_method"): "spawn"})
def test_scheduler_multiprocessing_with_spawn_method(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
when using "spawn" mode of multiprocessing. (Fork is default on Linux and older OSX)
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
executor=self.null_exec,
subdir=os.path.join(
TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=1)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
with create_session() as session:
self.assertEqual(
session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).count(), 0)
def test_scheduler_verify_pool_full(self):
"""
Test task instances not queued when pool is full
"""
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob(executor=self.null_exec)
# Create 2 dagruns, which will create 2 task instances.
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, DEFAULT_DATE)
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
dag_runs = DagRun.find(dag_id="test_scheduler_verify_pool_full")
task_instances_list = dag_file_processor._process_task_instances(dag, dag_runs=dag_runs)
self.assertEqual(len(task_instances_list), 2)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in task_instances_list:
task = dag.get_task(ti_key[1])
ti = TaskInstance(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
self.assertEqual(len(scheduler.executor.queued_tasks), 0, "Check test pre-condition")
scheduler._execute_task_instances(dagbag, session=session)
self.assertEqual(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_verify_pool_full_2_slots_per_task(self):
"""
Test task instances not queued when pool is full.
Variation with non-default pool_slots
"""
dag = DAG(
dag_id='test_scheduler_verify_pool_full_2_slots_per_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full_2_slots_per_task',
pool_slots=2,
)
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full_2_slots_per_task', slots=6)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob(executor=self.null_exec)
# Create 5 dagruns, which will create 5 task instances.
for _ in range(5):
dag_file_processor.create_dag_run(dag)
dag_runs = DagRun.find(dag_id="test_scheduler_verify_pool_full_2_slots_per_task")
task_instances_list = dag_file_processor._process_task_instances(dag, dag_runs=dag_runs)
self.assertEqual(len(task_instances_list), 5)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in task_instances_list:
task = dag.get_task(ti_key[1])
ti = TaskInstance(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
self.assertEqual(len(scheduler.executor.queued_tasks), 0, "Check test pre-condition")
scheduler._execute_task_instances(dagbag, session=session)
# As tasks require 2 slots, only 3 can fit into 6 available
self.assertEqual(len(scheduler.executor.queued_tasks), 3)
def test_scheduler_verify_priority_and_slots(self):
"""
Test task instances with higher priority are not queued
when pool does not have enough slots.
Though tasks with lower priority might be executed.
"""
dag = DAG(
dag_id='test_scheduler_verify_priority_and_slots',
start_date=DEFAULT_DATE)
# Medium priority, not enough slots
DummyOperator(
task_id='test_scheduler_verify_priority_and_slots_t0',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_priority_and_slots',
pool_slots=2,
priority_weight=2,
)
# High priority, occupies first slot
DummyOperator(
task_id='test_scheduler_verify_priority_and_slots_t1',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_priority_and_slots',
pool_slots=1,
priority_weight=3,
)
# Low priority, occupies second slot
DummyOperator(
task_id='test_scheduler_verify_priority_and_slots_t2',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_priority_and_slots',
pool_slots=1,
priority_weight=1,
)
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_priority_and_slots', slots=2)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob(executor=self.null_exec)
dag_file_processor.create_dag_run(dag)
dag_runs = DagRun.find(dag_id="test_scheduler_verify_priority_and_slots")
task_instances_list = dag_file_processor._process_task_instances(dag, dag_runs=dag_runs)
self.assertEqual(len(task_instances_list), 3)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in task_instances_list:
task = dag.get_task(ti_key[1])
ti = TaskInstance(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
self.assertEqual(len(scheduler.executor.queued_tasks), 0, "Check test pre-condition")
scheduler._execute_task_instances(dagbag, session=session)
# Only second and third
self.assertEqual(len(scheduler.executor.queued_tasks), 2)
ti0 = session.query(TaskInstance)\
.filter(TaskInstance.task_id == 'test_scheduler_verify_priority_and_slots_t0').first()
self.assertEqual(ti0.state, State.SCHEDULED)
ti1 = session.query(TaskInstance)\
.filter(TaskInstance.task_id == 'test_scheduler_verify_priority_and_slots_t1').first()
self.assertEqual(ti1.state, State.QUEUED)
ti2 = session.query(TaskInstance)\
.filter(TaskInstance.task_id == 'test_scheduler_verify_priority_and_slots_t2').first()
self.assertEqual(ti2.state, State.QUEUED)
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = MockExecutor(do_update=False)
dagbag = DagBag(dag_folder=os.path.join(settings.DAGS_FOLDER, "no_dags.py"))
dagbag.dags.clear()
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
dummy_task = BashOperator(
task_id='dummy',
dag=dag,
owner='airflow',
bash_command='echo 1',
)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag.clear()
dag.is_subdag = False
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
dagbag.bag_dag(dag=dag, root_dag=dag)
@mock.patch('airflow.jobs.scheduler_job.DagBag', return_value=dagbag)
def do_schedule(mock_dagbag):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
with conf_vars({('core', 'mp_start_method'): 'fork'}):
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule() # pylint: disable=no-value-for-parameter
with create_session() as session:
ti = session.query(TaskInstance).filter(TaskInstance.dag_id == dag.dag_id,
TaskInstance.task_id == dummy_task.task_id).first()
self.assertEqual(0, len(executor.queued_tasks))
self.assertEqual(State.SCHEDULED, ti.state)
executor.do_update = True
do_schedule() # pylint: disable=no-value-for-parameter
self.assertEqual(0, len(executor.queued_tasks))
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = MockExecutor(do_update=False)
dagbag = DagBag(dag_folder=os.path.join(settings.DAGS_FOLDER, "no_dags.py"))
dagbag.dags.clear()
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
dagbag.bag_dag(dag=dag, root_dag=dag)
@mock.patch('airflow.jobs.scheduler_job.DagBag', return_value=dagbag)
def do_schedule(mock_dagbag):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule() # pylint: disable=no-value-for-parameter
with create_session() as session:
ti = session.query(TaskInstance).filter(TaskInstance.dag_id == 'test_retry_still_in_executor',
TaskInstance.task_id == 'test_retry_handling_op').first()
ti.task = dag_task1
# Nothing should be left in the queued_tasks as we don't do update in MockExecutor yet,
# and the queued_tasks will be cleared by scheduler job.
self.assertEqual(0, len(executor.queued_tasks))
def run_with_error(ti, ignore_ti_state=False):
try:
ti.run(ignore_ti_state=ignore_ti_state)
except AirflowException:
pass
self.assertEqual(ti.try_number, 1)
# At this point, scheduler has tried to schedule the task once and
# heartbeated the executor once, which moved the state of the task from
# SCHEDULED to QUEUED and then to SCHEDULED, to fail the task execution
# we need to ignore the TaskInstance state as SCHEDULED is not a valid state to start
# executing task.
run_with_error(ti, ignore_ti_state=True)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
with create_session() as session:
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
# do schedule
do_schedule() # pylint: disable=no-value-for-parameter
# MockExecutor is not aware of the TaskInstance since we don't do update yet
# and no trace of this TaskInstance will be left in the executor.
self.assertFalse(executor.has_task(ti))
self.assertEqual(ti.state, State.SCHEDULED)
# To verify that task does get re-queued.
executor.do_update = True
do_schedule() # pylint: disable=no-value-for-parameter
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
@pytest.mark.quarantined
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TaskInstance).filter(TaskInstance.dag_id == dag.dag_id,
TaskInstance.task_id == dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER, "..", "dags_with_system_exit")
dag_file = os.path.join(dag_directory, 'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
executor=self.null_exec,
subdir=dag_directory,
num_runs=1)
scheduler.run()
with create_session() as session:
tis = session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).all()
# Since this dag has no end date, and there's a chance that we'll
# start a and finish two dag parsing processes twice in one loop!
self.assertGreaterEqual(
len(tis), 1,
repr(tis))
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns its active runs
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = \
(now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
start_date = six_hours_ago_to_the_hour
dag_name1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': start_date
}
dag1 = DAG(dag_name1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag1.clear()
dr = dag_file_processor.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except Exception: # pylint: disable=broad-except
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
with env_vars({('core', 'dags_folder'): dags_folder}):
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = set()
expected_files = set()
# No_dags is empty, _invalid_ is ignored by .airflowignore
ignored_files = {
'no_dags.py',
'test_invalid_cron.py',
'test_zip_invalid_cron.zip',
'test_ignore_this.py',
}
for root, _, files in os.walk(TEST_DAG_FOLDER): # pylint: disable=too-many-nested-blocks
for file_name in files:
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ignored_files:
expected_files.add(
'{}/{}'.format(root, file_name))
for file_path in list_py_file_paths(TEST_DAG_FOLDER, include_examples=False):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
ignored_files = {
'helper.py',
}
example_dag_folder = airflow.example_dags.__path__[0]
for root, _, files in os.walk(example_dag_folder): # pylint: disable=too-many-nested-blocks
for file_name in files:
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ['__init__.py'] and file_name not in ignored_files:
expected_files.add(os.path.join(root, file_name))
detected_files.clear()
for file_path in list_py_file_paths(TEST_DAG_FOLDER, include_examples=True):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
smart_sensor_dag_folder = airflow.smart_sensor_dags.__path__[0]
for root, _, files in os.walk(smart_sensor_dag_folder):
for file_name in files:
if (file_name.endswith('.py') or file_name.endswith('.zip')) and \
file_name not in ['__init__.py']:
expected_files.add(os.path.join(root, file_name))
detected_files.clear()
for file_path in list_py_file_paths(TEST_DAG_FOLDER,
include_examples=True,
include_smart_sensor=True):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
def test_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob()
session = settings.Session()
self.assertEqual(
0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
dr1.state = State.RUNNING
ti.state = State.SCHEDULED
dr1.external_trigger = True
session.merge(ti)
session.merge(dr1)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(session=session)
self.assertEqual(1, len(reset_tis))
def test_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
dr1.state = State.RUNNING
dr1.run_type = DagRunType.BACKFILL_JOB.value
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_specified_dagrun(self):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
# make two dagruns, only reset for one
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dr1 = dag_file_processor.create_dag_run(dag)
dr2 = dag_file_processor.create_dag_run(dag)
dr1.state = State.SUCCESS
dr2.state = State.RUNNING
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
self.assertEqual(1, len(reset_tis))
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(State.SCHEDULED, ti1.state)
self.assertEqual(State.NONE, ti2.state)
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.commit()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
dr1.state = State.RUNNING
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
tis[0].refresh_from_db()
self.assertEqual(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
dr1.state = State.SUCCESS
tis = dr1.get_task_instances(session=session)
self.assertEqual(1, len(tis))
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_with_orphans(self):
"""Create dagruns and esnure only ones with correct states are reset."""
prefix = 'scheduler_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
dag = DAG(dag_id=prefix,
start_date=DEFAULT_DATE,
schedule_interval="@daily")
tasks = []
for i in range(len(states)):
task_id = "{}_task_{}".format(prefix, i)
task = DummyOperator(task_id=task_id, dag=dag)
tasks.append(task)
scheduler = SchedulerJob()
session = settings.Session()
# create dagruns
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dr1 = dag_file_processor.create_dag_run(dag)
dr2 = dag_file_processor.create_dag_run(dag)
dr1.state = State.RUNNING
dr2.state = State.SUCCESS
session.merge(dr1)
session.merge(dr2)
session.commit()
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TaskInstance(task, dr1.execution_date)
ti2 = TaskInstance(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(2, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
self.assertIsNone(ti.state)
else:
self.assertEqual(state, ti.state)
# otherwise not
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
session.close()
def test_task_with_upstream_skip_process_task_instances():
"""
Test if _process_task_instances puts a task instance into SKIPPED state if any of its
upstream tasks are skipped according to TriggerRuleDep.
"""
clear_db_runs()
with DAG(
dag_id='test_task_with_upstream_skip_dag',
start_date=DEFAULT_DATE,
schedule_interval=None
) as dag:
dummy1 = DummyOperator(task_id='dummy1')
dummy2 = DummyOperator(task_id="dummy2")
dummy3 = DummyOperator(task_id="dummy3")
[dummy1, dummy2] >> dummy3
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag.create_dagrun(run_type=DagRunType.MANUAL,
state=State.RUNNING,
execution_date=DEFAULT_DATE)
assert dr is not None
with create_session() as session:
tis = {ti.task_id: ti for ti in dr.get_task_instances(session=session)}
# Set dummy1 to skipped and dummy2 to success. dummy3 remains as none.
tis[dummy1.task_id].state = State.SKIPPED
tis[dummy2.task_id].state = State.SUCCESS
assert tis[dummy3.task_id].state == State.NONE
dag_runs = DagRun.find(dag_id='test_task_with_upstream_skip_dag')
dag_file_processor._process_task_instances(dag, dag_runs=dag_runs)
with create_session() as session:
tis = {ti.task_id: ti for ti in dr.get_task_instances(session=session)}
assert tis[dummy1.task_id].state == State.SKIPPED
assert tis[dummy2.task_id].state == State.SUCCESS
# dummy3 should be skipped because dummy1 is skipped.
assert tis[dummy3.task_id].state == State.SKIPPED
class TestSchedulerJobQueriesCount(unittest.TestCase):
"""
These tests are designed to detect changes in the number of queries for
different DAG files. These tests allow easy detection when a change is
made that affects the performance of the SchedulerJob.
"""
def setUp(self) -> None:
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_errors()
@parameterized.expand(
[
# pylint: disable=bad-whitespace
# expected, dag_count, task_count
# One DAG with one task per DAG file
(13, 1, 1), # noqa
# One DAG with five tasks per DAG file
(17, 1, 5), # noqa
# 10 DAGs with 10 tasks per DAG file
(46, 10, 10), # noqa
]
)
def test_execute_queries_count_with_harvested_dags(self, expected_query_count, dag_count, task_count):
with mock.patch.dict("os.environ", {
"PERF_DAGS_COUNT": str(dag_count),
"PERF_TASKS_COUNT": str(task_count),
"PERF_START_AGO": "1d",
"PERF_SCHEDULE_INTERVAL": "30m",
"PERF_SHAPE": "no_structure",
}), conf_vars({
('scheduler', 'use_job_schedule'): 'True',
('core', 'load_examples'): 'False',
}):
dagbag = DagBag(dag_folder=ELASTIC_DAG_FILE, include_examples=False)
for i, dag in enumerate(dagbag.dags.values()):
dr = dag.create_dagrun(state=State.RUNNING, run_id=f"{DagRunType.MANUAL.value}__{i}")
for ti in dr.get_task_instances():
ti.set_state(state=State.SCHEDULED)
mock_agent = mock.MagicMock()
mock_agent.harvest_serialized_dags.return_value = [
SerializedDAG.from_dict(SerializedDAG.to_dict(d)) for d in dagbag.dags.values()]
job = SchedulerJob(subdir=PERF_DAGS_FOLDER)
job.executor = MockExecutor()
job.heartbeat = mock.MagicMock()
job.processor_agent = mock_agent
with assert_queries_count(expected_query_count):
job._run_scheduler_loop()
@parameterized.expand(
[
# pylint: disable=bad-whitespace
# expected, dag_count, task_count
# One DAG with one task per DAG file
(2, 1, 1), # noqa
# One DAG with five tasks per DAG file
(2, 1, 5), # noqa
# 10 DAGs with 10 tasks per DAG file
(2, 10, 10), # noqa
]
)
def test_execute_queries_count_no_harvested_dags(self, expected_query_count, dag_count, task_count):
with mock.patch.dict("os.environ", {
"PERF_DAGS_COUNT": str(dag_count),
"PERF_TASKS_COUNT": str(task_count),
"PERF_START_AGO": "1d",
"PERF_SCHEDULE_INTERVAL": "30m",
"PERF_SHAPE": "no_structure",
}), conf_vars({
('scheduler', 'use_job_schedule'): 'True',
('core', 'load_examples'): 'False',
}):
dagbag = DagBag(dag_folder=ELASTIC_DAG_FILE, include_examples=False)
for i, dag in enumerate(dagbag.dags.values()):
dr = dag.create_dagrun(state=State.RUNNING, run_id=f"{DagRunType.MANUAL.value}__{i}")
for ti in dr.get_task_instances():
ti.set_state(state=State.SCHEDULED)
mock_agent = mock.MagicMock()
mock_agent.harvest_serialized_dags.return_value = []
job = SchedulerJob(subdir=PERF_DAGS_FOLDER)
job.executor = MockExecutor()
job.heartbeat = mock.MagicMock()
job.processor_agent = mock_agent
with assert_queries_count(expected_query_count):
job._run_scheduler_loop()
| [] | [] | [
"AIRFLOW__CORE__DAGS_FOLDER"
] | [] | ["AIRFLOW__CORE__DAGS_FOLDER"] | python | 1 | 0 | |
geoextent/__main__.py | import argparse
import logging
import os
import sys
import zipfile
from . import __version__ as current_version
from .lib import extent
from .lib import helpfunctions as hf
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger("geoextent")
help_description = '''
geoextent is a Python library for extracting geospatial and temporal extents of a file
or a directory of multiple geospatial data formats.
'''
help_epilog = '''
Examples:
geoextent -b path/to/directory_with_geospatial_data
geoextent -t path/to/file_with_temporal_extent
geoextent -b -t path/to/geospatial_files
geoextent -b -t --details path/to/zipfile_with_geospatial_data
'''
supported_formats = '''
Supported formats:
- GeoJSON (.geojson)
- Tabular data (.csv)
- GeoTIFF (.geotiff, .tif)
- Shapefile (.shp)
- GeoPackage (.gpkg)
- GPS Exchange Format (.gpx)
- Geography Markup Language (.gml)
- Keyhole Markup Language (.kml)
'''
# custom action, see e.g. https://stackoverflow.com/questions/11415570/directory-path-types-with-argparse
class readable_file_or_dir(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
for candidate in values:
if (hf.doi_regexp.match(candidate) is not None) or (hf.zenodo_regexp.match(candidate) is not None):
logger.debug("The format of the URL or DOI is correct. Geoextent is going to try to download "
"this repository from {} ".format(candidate))
setattr(namespace, self.dest, candidate)
else:
if not (os.path.isdir(candidate) or os.path.isfile(candidate) or zipfile.is_zipfile(candidate)):
raise argparse.ArgumentTypeError("{0} is not a valid directory or file".format(candidate))
if os.access(candidate, os.R_OK):
setattr(namespace, self.dest, candidate)
else:
raise argparse.ArgumentTypeError("{0} is not a readable directory or file".format(candidate))
def get_arg_parser():
"""Get arguments to extract geoextent """
parser = argparse.ArgumentParser(
add_help=False,
prog='geoextent',
formatter_class=argparse.RawDescriptionHelpFormatter,
usage="geoextent [-h] [--formats] [--version] [--debug] [--details] [--output] [output file] [-b] [-t] [input "
"file]'] "
)
parser.add_argument(
'-h', '--help',
action='store_true',
help='show help message and exit'
)
parser.add_argument(
'--formats',
action='store_true',
help='show supported formats'
)
parser.add_argument(
'--version',
action='store_true',
help='show installed version'
)
parser.add_argument(
'--debug',
help='turn on debug logging, alternatively set environment variable GEOEXTENT_DEBUG=1',
action='store_true'
)
parser.add_argument(
'--details',
action='store_true',
default=False,
help='Returns details of folder/zipFiles geoextent extraction',
)
parser.add_argument(
'--output',
action='store',
default=None,
help="Creates geopackage with geoextent output",
)
parser.add_argument(
'-b', '--bounding-box',
action='store_true',
default=False,
help='extract spatial extent (bounding box)'
)
parser.add_argument(
'-t', '--time-box',
action='store_true',
default=False,
help='extract temporal extent (%%Y-%%m-%%d)'
)
parser.add_argument(
'files',
action=readable_file_or_dir,
nargs=argparse.REMAINDER,
help="input file or path"
)
return parser
def print_help():
print(help_description)
arg_parser.print_help()
print(help_epilog)
print_supported_formats()
def print_supported_formats():
print(supported_formats)
def print_version():
print(current_version)
arg_parser = get_arg_parser()
def main():
# Check if there is no arguments, then print help
if len(sys.argv[1:]) == 0:
print_help()
arg_parser.exit()
# version, help, and formats must be checked before parse, as otherwise files are required
# but arg parser gives an error if allowed to be parsed first
if "--help" in sys.argv or "-h" in sys.argv:
print_help()
arg_parser.exit()
if "--version" in sys.argv:
print_version()
arg_parser.exit()
if "--formats" in sys.argv:
print_supported_formats()
arg_parser.exit()
args = vars(arg_parser.parse_args())
files = args['files']
if files is None:
raise Exception("Invalid command, input file missing")
multiple_files = True
logger.debug('Extracting from inputs %s', files)
# Set logging level
if args['debug']:
logging.getLogger('geoextent').setLevel(logging.DEBUG)
if os.environ.get('GEOEXTENT_DEBUG', None) == "1":
logging.getLogger('geoextent').setLevel(logging.DEBUG)
# Identify local file source
is_file = os.path.isfile(os.path.join(os.getcwd(), files))
is_zipfile = zipfile.is_zipfile(os.path.join(os.getcwd(), files))
is_directory = os.path.isdir(os.path.join(os.getcwd(), files))
# Identify URL
is_url = hf.https_regexp.match(files) is not None
# Check output path
export = args['output'] is not None
try:
if export:
filename = hf.path_output(args['output'])
except ValueError as e:
raise ValueError(e)
output = None
try:
if is_file and not is_zipfile:
output = extent.fromFile(files, bbox=args['bounding_box'], tbox=args['time_box'])
multiple_files = False
if is_directory or is_zipfile:
output = extent.fromDirectory(files, bbox=args['bounding_box'], tbox=args['time_box'], details=True)
if is_url:
output = extent.from_repository(files, bbox=args['bounding_box'], tbox=args['time_box'], details=True)
except Exception as e:
if logger.getEffectiveLevel() >= logging.DEBUG:
logger.exception(e)
sys.exit(1)
if output is None:
raise Exception("Did not find supported files at {}".format(files))
else:
if export and not multiple_files:
logger.warning("Exporting result does not apply to single files")
elif export and multiple_files:
logger.warning("Exporting result into: {}".format(args['output']))
df = hf.extract_output(output, files, current_version)
hf.create_geopackage(df, filename)
if not args['details']:
output.pop('details', None)
if type(output) == list:
print(str(output))
elif type(output) == dict:
print(str(output))
else:
print(output)
if __name__ == '__main__':
main()
| [] | [] | [
"GEOEXTENT_DEBUG"
] | [] | ["GEOEXTENT_DEBUG"] | python | 1 | 0 | |
utils/TrainNetwork.py | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import cv2
import time
from datetime import datetime
import sys
from .plotters import *
from .readers import *
from .transformer import *
from .models import *
from .datasets import *
from .training import *
def trainEllregNetwork(arg_dict):
sess = tf.Session()
##########################################
with tf.variable_scope('Input_Variables'):
image_placeholder = tf.placeholder(tf.float32, [arg_dict['batch_size'], arg_dict['input_size'], arg_dict['input_size'], 1])
label_placeholder = tf.placeholder(tf.float32, [arg_dict['batch_size'], 6])
is_training = tf.placeholder(tf.bool, [], name='is_training')
##########################################
with tf.variable_scope('Network'):
print('Constructing model...')
network_eval_batch, _ = arg_dict['model_construct_function'](image_placeholder, is_training)
with tf.variable_scope('Loss'):
print('Adding loss function...')
loss, errors, angle_errs = gen_loss_ellreg(network_eval_batch, label_placeholder)
##########################################
with tf.variable_scope('Input_Decoding'):
print('Populating input queues...')
image_valid_batch, label_valid_batch = get_eval_batch_ellreg(arg_dict['dataset'], arg_dict['n_reader_threads'], arg_dict['batch_size'], arg_dict['input_size'])
image_train_batch, label_train_batch = get_train_batch_ellreg_v2(arg_dict['dataset'], arg_dict['n_reader_threads'], arg_dict['batch_size'], arg_dict['input_size'], max_rot = arg_dict['aug_rot_max'], max_trans = arg_dict['aug_trans_max'])
print('Starting input threads...')
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
##########################################
global_step = tf.Variable(0, name='global_step', trainable=False)
with tf.variable_scope('Optimizer'):
print('Initializing optimizer...')
learn_rate, train_op = arg_dict['learn_function'](loss, arg_dict['dataset'].train_size, arg_dict['batch_size'], global_step, arg_dict['start_learn_rate'], arg_dict['epocs_per_lr_decay'], const_learn_rate=arg_dict['const_learn_rate'])
##########################################
with tf.variable_scope('Saver'):
print('Generating summaries and savers...')
training_summary, validation_summary = gen_summary_ellreg(loss, errors, angle_errs, learn_rate)
summary_writer = tf.summary.FileWriter(arg_dict['log_dir'], sess.graph)
saver = tf.train.Saver(slim.get_variables_to_restore(), max_to_keep=2)
##########################################
print('Initializing model...')
sess.run(tf.global_variables_initializer())
if 'network_to_restore' in arg_dict.keys() and arg_dict['network_to_restore'] is not None:
saver.restore(sess,arg_dict['network_to_restore'])
print('Beginning training...')
for step in range(0, arg_dict['num_steps']):
start_time = time.time()
img_batch, label_batch = sess.run([image_train_batch, label_train_batch])
_, train_loss, summary_output, cur_step = sess.run(fetches=[train_op, loss, training_summary, global_step], feed_dict={image_placeholder: img_batch, label_placeholder: label_batch, is_training: True})
duration = time.time() - start_time
if (step+1) % 10 == 0: # CMDline updates every 10 steps
examples_per_sec = arg_dict['batch_size'] / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.4f (%.1f examples/sec; %.3f sec/batch)')
print (format_str % (datetime.now(), cur_step, train_loss, examples_per_sec, sec_per_batch))
if (step+1) % 100 == 0: # Tensorboard updates values every 100 steps
summary_writer.add_summary(summary_output, cur_step)
img_batch, label_batch = sess.run([image_valid_batch, label_valid_batch])
summary_output = sess.run(fetches=[validation_summary], feed_dict={image_placeholder: img_batch, label_placeholder: label_batch, is_training: False})[0]
summary_writer.add_summary(summary_output, cur_step)
if (step+1) % 1000 == 0: # Save model every 1k steps
checkpoint_path = os.path.join(arg_dict['log_dir'], 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=cur_step)
# Save model after training is terminated...
checkpoint_path = os.path.join(arg_dict['log_dir'], 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=cur_step)
def trainSegEllfitNetwork(arg_dict):
sess = tf.Session()
##########################################
with tf.variable_scope('Input_Variables'):
image_placeholder = tf.placeholder(tf.float32, [arg_dict['batch_size'], arg_dict['input_size'], arg_dict['input_size'], 1])
seg_label_placeholder = tf.placeholder(tf.float32, [arg_dict['batch_size'], arg_dict['input_size'], arg_dict['input_size'], 1])
ellfit_label_placeholder = tf.placeholder(tf.float32, [arg_dict['batch_size'], 6])
is_training = tf.placeholder(tf.bool, [], name='is_training')
##########################################
with tf.variable_scope('Network'):
print('Constructing model...')
network_eval_batch, ellfit_eval_batch, angle_fix_batch = arg_dict['model_construct_function'](image_placeholder, is_training)
with tf.variable_scope('Loss'):
print('Adding loss function...')
loss1, errors1 = gen_loss_seg(network_eval_batch, seg_label_placeholder)
# Alternative loss if we don't want morphological filtering
#loss1, errors1 = gen_loss_seg_nomorph(network_eval_batch, seg_label_placeholder)
loss2, errors2, angle_errs = gen_loss_ellreg(ellfit_eval_batch, ellfit_label_placeholder)
loss3 = gen_loss_anglequadrant(angle_fix_batch, ellfit_label_placeholder)
loss = tf.reduce_mean([loss1, loss2, loss3])
##########################################
with tf.variable_scope('Input_Decoding'):
print('Populating input queues...')
image_valid_batch, seg_valid_batch, label_valid_batch = get_valid_batch_segellreg(arg_dict['dataset'], arg_dict['n_reader_threads'], arg_dict['batch_size'], arg_dict['input_size'])
image_train_batch, seg_train_batch, label_train_batch = get_train_batch_segellreg(arg_dict['dataset'], arg_dict['n_reader_threads'], arg_dict['batch_size'], arg_dict['input_size'], max_trans = arg_dict['aug_trans_max'], max_rot = arg_dict['aug_rot_max'])
print('Starting input threads...')
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
##########################################
global_step = tf.Variable(0, name='global_step', trainable=False)
with tf.variable_scope('Optimizer'):
print('Initializing optimizer...')
learn_rate, train_op = arg_dict['learn_function'](loss, arg_dict['dataset'].train_size, arg_dict['batch_size'], global_step, arg_dict['start_learn_rate'], arg_dict['epocs_per_lr_decay'], const_learn_rate=arg_dict['const_learn_rate'])
##########################################
with tf.variable_scope('Saver'):
print('Generating summaries and savers...')
training_summary1, validation_summary1 = gen_summary_seg(loss1, errors1, learn_rate)
training_summary2, validation_summary2 = gen_summary_ellreg(loss2, errors2, angle_errs, learn_rate)
training_summary = tf.summary.merge([training_summary1, training_summary2])
validation_summary = tf.summary.merge([validation_summary1, validation_summary2])
summary_writer = tf.summary.FileWriter(arg_dict['log_dir'], sess.graph)
saver = tf.train.Saver(slim.get_variables_to_restore(), max_to_keep=2)
##########################################
print('Initializing model...')
sess.run(tf.global_variables_initializer())
if 'network_to_restore' in arg_dict.keys() and arg_dict['network_to_restore'] is not None:
saver.restore(sess,arg_dict['network_to_restore'])
print('Beginning training...')
for step in range(0, arg_dict['num_steps']):
start_time = time.time()
img_batch, seg_label_batch, ellfit_label_batch = sess.run([image_train_batch, seg_train_batch, label_train_batch])
_, train_loss, summary_output, cur_step, ellfit_errs = sess.run(fetches=[train_op, loss, training_summary, global_step, errors2], feed_dict={image_placeholder: img_batch, seg_label_placeholder: seg_label_batch, ellfit_label_placeholder: ellfit_label_batch, is_training: True})
duration = time.time() - start_time
if (step+1) % 10 == 0: # CMDline updates every 10 steps
examples_per_sec = arg_dict['batch_size'] / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss=%.4f, xerr=%.2f, yerr=%.2f (%.1f examples/sec)')
print (format_str % (datetime.now(), cur_step, train_loss, ellfit_errs[0], ellfit_errs[1], examples_per_sec))
if (step+1) % 100 == 0: # Tensorboard updates values every 100 steps
summary_writer.add_summary(summary_output, cur_step)
img_batch, seg_label_batch, ellfit_label_batch = sess.run([image_valid_batch, seg_valid_batch, label_valid_batch])
summary_output = sess.run(fetches=[validation_summary], feed_dict={image_placeholder: img_batch, seg_label_placeholder: seg_label_batch, ellfit_label_placeholder: ellfit_label_batch, is_training: False})[0]
summary_writer.add_summary(summary_output, cur_step)
if (step+1) % 1000 == 0: # Save model every 1k steps
checkpoint_path = os.path.join(arg_dict['log_dir'], 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=cur_step)
# Save model after training is terminated...
checkpoint_path = os.path.join(arg_dict['log_dir'], 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=cur_step)
def trainBinnedNetwork(arg_dict):
sess = tf.Session()
##########################################
with tf.variable_scope('Input_Variables'):
image_placeholder = tf.placeholder(tf.float32, [arg_dict['batch_size'], arg_dict['input_size'], arg_dict['input_size'], 1])
label_placeholder = tf.placeholder(tf.float32, [arg_dict['batch_size'], 6])
is_training = tf.placeholder(tf.bool, [], name='is_training')
##########################################
with tf.variable_scope('Network'):
print('Constructing model...')
network_eval_batch, _ = arg_dict['model_construct_function'](image_placeholder, is_training, int(arg_dict['input_size']*arg_dict['bin_per_px']))
with tf.variable_scope('Loss'):
print('Adding loss function...')
loss, errors = gen_loss_xyhot(network_eval_batch, label_placeholder, arg_dict['input_size'], int(arg_dict['input_size']*arg_dict['bin_per_px']))
##########################################
with tf.variable_scope('Input_Decoding'):
print('Populating input queues...')
image_valid_batch, label_valid_batch = get_eval_batch_ellreg(arg_dict['dataset'], arg_dict['n_reader_threads'], arg_dict['batch_size'], arg_dict['input_size'])
image_train_batch, label_train_batch = get_train_batch_ellreg_v2(arg_dict['dataset'], arg_dict['n_reader_threads'], arg_dict['batch_size'], arg_dict['input_size'], max_rot = arg_dict['aug_rot_max'], max_trans = arg_dict['aug_trans_max'])
print('Starting input threads...')
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
##########################################
global_step = tf.Variable(0, name='global_step', trainable=False)
with tf.variable_scope('Optimizer'):
print('Initializing optimizer...')
learn_rate, train_op = arg_dict['learn_function'](loss, arg_dict['dataset'].train_size, arg_dict['batch_size'], global_step, arg_dict['start_learn_rate'], arg_dict['epocs_per_lr_decay'], const_learn_rate=arg_dict['const_learn_rate'])
##########################################
with tf.variable_scope('Saver'):
print('Generating summaries and savers...')
training_summary, validation_summary = gen_summary_xyhot(loss, errors, learn_rate)
summary_writer = tf.summary.FileWriter(arg_dict['log_dir'], sess.graph)
saver = tf.train.Saver(slim.get_variables_to_restore(), max_to_keep=2)
##########################################
print('Initializing model...')
sess.run(tf.global_variables_initializer())
if 'network_to_restore' in arg_dict.keys() and arg_dict['network_to_restore'] is not None:
saver.restore(sess,arg_dict['network_to_restore'])
print('Beginning training...')
for step in range(0, arg_dict['num_steps']):
start_time = time.time()
img_batch, label_batch = sess.run([image_train_batch, label_train_batch])
_, train_loss, summary_output, cur_step, bin_errs = sess.run(fetches=[train_op, loss, training_summary, global_step, errors], feed_dict={image_placeholder: img_batch, label_placeholder: label_batch, is_training: True})
duration = time.time() - start_time
if (step+1) % 10 == 0: # CMDline updates every 10 steps
examples_per_sec = arg_dict['batch_size'] / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss=%.4f, xerr=%.2f, yerr=%.2f (%.1f examples/sec)')
print (format_str % (datetime.now(), cur_step, train_loss, bin_errs[0], bin_errs[1], examples_per_sec))
if (step+1) % 100 == 0: # Tensorboard updates values every 100 steps
summary_writer.add_summary(summary_output, cur_step)
img_batch, label_batch = sess.run([image_valid_batch, label_valid_batch])
summary_output = sess.run(fetches=[validation_summary], feed_dict={image_placeholder: img_batch, label_placeholder: label_batch, is_training: False})[0]
summary_writer.add_summary(summary_output, cur_step)
if (step+1) % 1000 == 0: # Save model every 1k steps
checkpoint_path = os.path.join(arg_dict['log_dir'], 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=cur_step)
# Save model after training is terminated...
checkpoint_path = os.path.join(arg_dict['log_dir'], 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=cur_step)
def trainSegSoftNetwork(arg_dict):
config = tf.ConfigProto(allow_soft_placement = True)
# log_device_placement=True
# allow_soft_placement=True
sess = tf.Session(config = config)
##########################################
with tf.variable_scope('Input_Variables'):
image_placeholder = tf.placeholder(tf.float32, [arg_dict['batch_size'], arg_dict['input_size'], arg_dict['input_size'], 1])
label_placeholder = tf.placeholder(tf.float32, [arg_dict['batch_size'], arg_dict['input_size'], arg_dict['input_size'], 1])
is_training = tf.placeholder(tf.bool, [], name='is_training')
##########################################
with tf.variable_scope('Network'):
print('Constructing model...')
network_eval_batch = arg_dict['model_construct_function'](image_placeholder, is_training)
with tf.variable_scope('Loss'):
print('Adding loss function...')
loss, errors = gen_loss_seg_nomorph(network_eval_batch, label_placeholder)
##########################################
with tf.variable_scope('Input_Decoding'):
print('Populating input queues...')
image_valid_batch, label_valid_batch = get_eval_batch_seg(arg_dict['dataset'], arg_dict['n_reader_threads'], arg_dict['batch_size'], arg_dict['input_size'])
image_train_batch, label_train_batch = get_train_batch_seg(arg_dict['dataset'], arg_dict['n_reader_threads'], arg_dict['batch_size'], arg_dict['input_size'], max_trans = arg_dict['aug_trans_max'], max_rot = arg_dict['aug_rot_max'])
print('Starting input threads...')
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
##########################################
global_step = tf.Variable(0, name='global_step', trainable=False)
with tf.variable_scope('Optimizer'):
print('Initializing optimizer...')
learn_rate, train_op = arg_dict['learn_function'](loss, arg_dict['dataset'].train_size, arg_dict['batch_size'], global_step, arg_dict['start_learn_rate'], arg_dict['epocs_per_lr_decay'], const_learn_rate=arg_dict['const_learn_rate'])
##########################################
with tf.variable_scope('Saver'):
print('Generating summaries and savers...')
training_summary, validation_summary = gen_summary_seg(loss, errors, learn_rate)
summary_writer = tf.summary.FileWriter(arg_dict['log_dir'], sess.graph)
saver = tf.train.Saver(slim.get_variables_to_restore(), max_to_keep=2)
##########################################
print('Initializing model...')
sess.run(tf.global_variables_initializer())
if 'network_to_restore' in arg_dict.keys() and arg_dict['network_to_restore'] is not None:
saver.restore(sess,arg_dict['network_to_restore'])
for step in range(0, arg_dict['num_steps']):
start_time = time.time()
img_batch, label_batch = sess.run([image_train_batch, label_train_batch])
_, train_loss, summary_output, cur_step = sess.run(fetches=[train_op, loss, training_summary, global_step], feed_dict={image_placeholder: img_batch, label_placeholder: label_batch, is_training: True})
duration = time.time() - start_time
if (step+1) % 50 == 0: # CMDline updates every 50 steps
examples_per_sec = arg_dict['batch_size'] / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.4f (%.1f examples/sec; %.3f sec/batch)')
print (format_str % (datetime.now(), cur_step, train_loss, examples_per_sec, sec_per_batch))
if (step+1) % 100 == 0: # Tensorboard updates values every 100 steps
summary_writer.add_summary(summary_output, cur_step)
img_batch, label_batch = sess.run([image_valid_batch, label_valid_batch])
summary_output = sess.run(fetches=[validation_summary], feed_dict={image_placeholder: img_batch, label_placeholder: label_batch, is_training: False})[0]
summary_writer.add_summary(summary_output, cur_step)
if (step+1) % 1000 == 0: # Save model every 1k steps
checkpoint_path = os.path.join(arg_dict['log_dir'], 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=cur_step)
# Save model after training is terminated...
checkpoint_path = os.path.join(arg_dict['log_dir'], 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=cur_step)
def trainNetwork(arg_dict):
if arg_dict['net_type'] == 'segellreg':
trainSegEllfitNetwork(arg_dict)
elif arg_dict['net_type'] == 'ellreg':
trainEllregNetwork(arg_dict)
elif arg_dict['net_type'] == 'binned':
trainBinnedNetwork(arg_dict)
elif arg_dict['net_type'] == 'seg':
trainSegSoftNetwork(arg_dict)
| [] | [] | [
"TF_CPP_MIN_LOG_LEVEL"
] | [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
services/datastore/endpoints/endpoints.go | package endpoints
import (
"compress/gzip"
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"strconv"
"time"
"github.com/IamCathal/neo/services/datastore/app"
"github.com/IamCathal/neo/services/datastore/configuration"
"github.com/IamCathal/neo/services/datastore/controller"
"github.com/IamCathal/neo/services/datastore/datastructures"
"github.com/IamCathal/neo/services/datastore/dbmonitor"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
influxdb2 "github.com/influxdata/influxdb-client-go"
"github.com/neosteamfriendgraphing/common"
"github.com/neosteamfriendgraphing/common/dtos"
"github.com/neosteamfriendgraphing/common/util"
"github.com/segmentio/ksuid"
"go.mongodb.org/mongo-driver/mongo"
"go.uber.org/zap"
)
var (
upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
authRequiredEndpoints map[string]bool
)
type Endpoints struct {
Cntr controller.CntrInterface
}
func init() {
authRequiredEndpoints = make(map[string]bool)
authRequiredEndpoints["saveuser"] = true
authRequiredEndpoints["insertgame"] = true
authRequiredEndpoints["getuser"] = true
authRequiredEndpoints["getdetailsforgames"] = true
authRequiredEndpoints["savecrawlingstats"] = true
authRequiredEndpoints["getgraphabledata"] = true
authRequiredEndpoints["getusernamesfromsteamids"] = true
authRequiredEndpoints["saveprocessedgraphdata"] = true
}
func (endpoints *Endpoints) SetupRouter() *mux.Router {
r := mux.NewRouter()
apiRouter := r.PathPrefix("/api").Subrouter()
apiRouter.HandleFunc("/status", endpoints.Status).Methods("POST")
apiRouter.HandleFunc("/saveuser", endpoints.SaveUser).Methods("POST")
apiRouter.HandleFunc("/insertgame", endpoints.InsertGame).Methods("POST")
apiRouter.HandleFunc("/getuser/{steamid}", endpoints.GetUser).Methods("GET")
apiRouter.HandleFunc("/getdetailsforgames", endpoints.GetDetailsForGames).Methods("POST")
apiRouter.HandleFunc("/savecrawlingstats", endpoints.SaveCrawlingStatsToDB).Methods("POST")
apiRouter.HandleFunc("/getcrawlinguser/{crawlid}", endpoints.GetCrawlingUser).Methods("GET", "OPTIONS")
apiRouter.HandleFunc("/hasbeencrawledbefore", endpoints.HasBeenCrawledBefore).Methods("POST", "OPTIONS")
apiRouter.HandleFunc("/getcrawlingstatus/{crawlid}", endpoints.GetCrawlingStatus).Methods("GET", "OPTIONS")
apiRouter.HandleFunc("/getgraphabledata/{steamid}", endpoints.GetGraphableData).Methods("GET")
apiRouter.HandleFunc("/getusernamesfromsteamids", endpoints.GetUsernamesFromSteamIDs).Methods("POST")
apiRouter.HandleFunc("/saveprocessedgraphdata/{crawlid}", endpoints.SaveProcessedGraphData).Methods("POST")
apiRouter.HandleFunc("/getprocessedgraphdata/{crawlid}", endpoints.GetProcessedGraphData).Methods("POST", "OPTIONS")
apiRouter.HandleFunc("/doesprocessedgraphdataexist/{crawlid}", endpoints.DoesProcessedGraphDataExist).Methods("POST", "OPTIONS")
apiRouter.HandleFunc("/calculateshortestdistanceinfo", endpoints.CalculateShortestDistanceInfo).Methods("POST", "OPTIONS")
apiRouter.HandleFunc("/getshortestdistanceinfo", endpoints.GetShortestDistanceInfo).Methods("POST", "OPTIONS")
apiRouter.HandleFunc("/getfinishedcrawlsaftertimestamp", endpoints.GetFinishedCrawlsAfterTimestamp).Methods("GET", "OPTIONS")
apiRouter.HandleFunc("/getfinishedshortestdistancecrawlsaftertimestamp", endpoints.GetFinishedShortestDistanceCrawlsAfterTimestamp).Methods("GET", "OPTIONS")
apiRouter.HandleFunc("/gettotalusersindb", endpoints.GetTotalUsersInDB).Methods("GET", "OPTIONS")
apiRouter.HandleFunc("/gettotalcrawlscompleted", endpoints.GetTotalCrawlsCompleted).Methods("GET", "OPTIONS")
apiRouter.Use(endpoints.AuthMiddleware)
apiRouter.Use(endpoints.LoggingMiddleware)
wsRouter := r.PathPrefix("/ws").Subrouter()
wsRouter.HandleFunc("/newuserstream", endpoints.NewUserStream).Methods("GET")
wsRouter.HandleFunc("/crawlingstatstream/{crawlid}", endpoints.CrawlingStatsUpdateStream).Methods("GET")
return r
}
func (endpoints *Endpoints) AuthMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
baseURLPath := util.GetBaseURLPath(r)
if _, requiresAuth := authRequiredEndpoints[baseURLPath]; requiresAuth {
if r.Header.Get("Authentication") != os.Getenv("AUTH_KEY") {
configuration.Logger.Sugar().Infof("ip: %s with user-agent: %s wasn't authorized to access %s",
r.RemoteAddr, r.Header.Get("User-Agent"), r.URL.Path)
w.WriteHeader(http.StatusForbidden)
response := struct {
Error string `json:"error"`
}{
"You are not authorized to access this endpoint",
}
json.NewEncoder(w).Encode(response)
return
}
}
next.ServeHTTP(w, r)
})
}
func (endpoints *Endpoints) LoggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
util.SetupCORS(&w, r)
if (*r).Method == "OPTIONS" {
return
}
defer func() {
if err := recover(); err != nil {
vars := mux.Vars(r)
w.WriteHeader(http.StatusInternalServerError)
response := struct {
Error string `json:"error"`
}{
fmt.Sprintf("Give the code monkeys this ID: '%s'", vars["requestID"]),
}
json.NewEncoder(w).Encode(response)
requestStartTime, timeParseErr := strconv.ParseInt(vars["requestStartTime"], 10, 64)
if timeParseErr != nil {
configuration.Logger.Fatal(fmt.Sprintf("%v", err),
zap.String("requestID", vars["requestID"]),
zap.Int("status", http.StatusInternalServerError),
zap.Int64("duration", util.GetCurrentTimeInMs()-requestStartTime),
zap.String("path", r.URL.EscapedPath()),
zap.String("shortPath", util.GetBaseURLPath(r)),
)
panic(timeParseErr)
}
configuration.Logger.Error(fmt.Sprintf("%v", err),
zap.String("requestID", vars["requestID"]),
zap.Int("status", http.StatusInternalServerError),
zap.Int64("duration", util.GetCurrentTimeInMs()-requestStartTime),
zap.String("path", r.URL.EscapedPath()),
zap.String("shortPath", util.GetBaseURLPath(r)),
)
}
}()
vars := mux.Vars(r)
identifier := ksuid.New()
vars["requestID"] = identifier.String()
requestStartTime := time.Now().UnixNano() / int64(time.Millisecond)
vars["requestStartTime"] = strconv.Itoa(int(requestStartTime))
wrapped := wrapResponseWriter(w)
next.ServeHTTP(wrapped, r)
configuration.Logger.Info("served content",
zap.String("requestID", vars["requestID"]),
zap.Int("status", wrapped.status),
zap.Int64("duration", util.GetCurrentTimeInMs()-requestStartTime),
zap.String("path", r.URL.EscapedPath()),
zap.String("shortPath", util.GetBaseURLPath(r)),
)
point := influxdb2.NewPointWithMeasurement("endpointLatencies").
AddTag("path", util.GetBaseURLPath(r)).
AddTag("service", "datastore").
AddField("latency", util.GetCurrentTimeInMs()-requestStartTime).
SetTime(time.Now())
configuration.EndpointWriteAPI.WritePoint(point)
})
}
func (endpoints *Endpoints) SaveUser(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
saveUserDTO := dtos.SaveUserDTO{}
err := json.NewDecoder(r.Body).Decode(&saveUserDTO)
if err != nil {
util.SendBasicInvalidResponse(w, r, "Invalid input", vars, http.StatusBadRequest)
return
}
crawlingStats := common.CrawlingStatus{
CrawlID: saveUserDTO.CrawlID,
OriginalCrawlTarget: saveUserDTO.OriginalCrawlTarget,
MaxLevel: saveUserDTO.MaxLevel,
TotalUsersToCrawl: len(saveUserDTO.User.FriendIDs),
}
err = app.SaveCrawlingStatsToDB(endpoints.Cntr, saveUserDTO.CurrentLevel, crawlingStats)
if err != nil {
logMsg := fmt.Sprintf("failed to save crawling stats to DB: %+v", err)
configuration.Logger.Sugar().Error(logMsg)
panic(logMsg)
}
err = app.SaveUserToDB(endpoints.Cntr, saveUserDTO.User)
if err != nil {
logMsg := fmt.Sprintf("failed to save user to DB: %+v", err)
configuration.Logger.Error(logMsg)
panic(logMsg)
}
configuration.Logger.Sugar().Infof("successfully saved user %s to db", saveUserDTO.User.AccDetails.SteamID)
response := struct {
Status string `json:"status"`
Message string `json:"message"`
}{
"success",
"very good",
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
}
func (endpoints *Endpoints) InsertGame(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
bareGameInfo := common.BareGameInfo{}
err := json.NewDecoder(r.Body).Decode(&bareGameInfo)
if err != nil {
util.SendBasicInvalidResponse(w, r, "Invalid input", vars, http.StatusBadRequest)
return
}
success, err := endpoints.Cntr.InsertGame(context.TODO(), bareGameInfo)
if err != nil || !success {
logMsg := fmt.Sprintf("failed to insert game: %+v", err)
configuration.Logger.Sugar().Error(logMsg)
panic(logMsg)
}
response := struct {
Status string `json:"status"`
Message string `json:"message"`
}{
"success",
"very good",
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
}
func (endpoints *Endpoints) SaveCrawlingStatsToDB(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
crawlingStatusInput := dtos.SaveCrawlingStatsDTO{}
err := json.NewDecoder(r.Body).Decode(&crawlingStatusInput)
if err != nil {
util.SendBasicInvalidResponse(w, r, "Invalid input", vars, http.StatusBadRequest)
return
}
err = app.SaveCrawlingStatsToDB(endpoints.Cntr, crawlingStatusInput.CurrentLevel, crawlingStatusInput.CrawlingStatus)
if err != nil {
logMsg := fmt.Sprintf("failed to save crawling stats: %+v", err)
configuration.Logger.Sugar().Error(logMsg)
panic(logMsg)
}
response := struct {
Status string `json:"status"`
Message string `json:"message"`
}{
"success",
"very good",
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
}
func (endpoints *Endpoints) GetCrawlingUser(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
// Validate crawlid
_, err := ksuid.Parse(vars["crawlid"])
if err != nil {
util.SendBasicInvalidResponse(w, r, "invalid crawlid", vars, http.StatusNotFound)
return
}
// If the user is a crawlTarget and crawling is in progress
isCurrentlyBeingCrawled, steamID, err := app.IsCurrentlyBeingCrawled(endpoints.Cntr, vars["crawlid"])
if err != nil {
logMsg := fmt.Sprintf("could not check crawling progress: %+v", err)
configuration.Logger.Sugar().Error(logMsg)
panic(logMsg)
}
if !isCurrentlyBeingCrawled {
util.SendBasicInvalidResponse(w, r, "User is not currently being crawled", vars, http.StatusNotFound)
return
}
user, err := app.GetUserFromDB(endpoints.Cntr, steamID)
if err == mongo.ErrNoDocuments {
util.SendBasicInvalidResponse(w, r, "user does not exist", vars, http.StatusNotFound)
return
}
if err != nil {
logMsg := fmt.Sprintf("couldn't get user: %+v", err)
configuration.Logger.Sugar().Error(logMsg)
panic(logMsg)
}
response := struct {
Status string `json:"status"`
User common.UserDocument `json:"user"`
}{
"success",
user,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
}
func (endpoints *Endpoints) HasBeenCrawledBefore(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
crawlDetails := dtos.HasBeenCrawledBeforeInputDTO{}
err := json.NewDecoder(r.Body).Decode(&crawlDetails)
if err != nil {
util.SendBasicInvalidResponse(w, r, "Invalid input", vars, http.StatusBadRequest)
return
}
if isValidFormat := util.IsValidFormatSteamID(crawlDetails.SteamID); !isValidFormat {
util.SendBasicInvalidResponse(w, r, "Invalid input", vars, http.StatusBadRequest)
return
}
if crawlDetails.Level < 1 || crawlDetails.Level > 3 {
util.SendBasicInvalidResponse(w, r, "Invalid input", vars, http.StatusBadRequest)
return
}
crawlID, err := endpoints.Cntr.HasUserBeenCrawledBeforeAtLevel(context.TODO(), crawlDetails.Level, crawlDetails.SteamID)
if err != nil {
logMsg := fmt.Sprintf("couldn't lookup has user been crawled before: %+v", err)
configuration.Logger.Error(logMsg)
panic(err)
}
response := struct {
Status string `json:"status"`
Message string `json:"message"`
}{
"success",
crawlID,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
}
func (endpoints *Endpoints) GetUser(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
// Validate steamid
if isValid := util.IsValidFormatSteamID(vars["steamid"]); !isValid {
util.SendBasicInvalidResponse(w, r, "Invalid input", vars, http.StatusBadRequest)
return
}
user, err := app.GetUserFromDB(endpoints.Cntr, vars["steamid"])
if err == mongo.ErrNoDocuments {
util.SendBasicInvalidResponse(w, r, "user does not exist", vars, http.StatusNotFound)
return
}
if err != nil {
logMsg := fmt.Sprintf("couldn't get user: %+v", err)
configuration.Logger.Error(logMsg)
panic(logMsg)
}
response := struct {
Status string `json:"status"`
User common.UserDocument `json:"user"`
}{
"success",
user,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
}
func (endpoints *Endpoints) GetDetailsForGames(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
gamesInput := dtos.GetDetailsForGamesInputDTO{}
err := json.NewDecoder(r.Body).Decode(&gamesInput)
if err != nil {
util.SendBasicInvalidResponse(w, r, "Invalid input", vars, http.StatusBadRequest)
return
}
if len(gamesInput.GameIDs) == 0 || len(gamesInput.GameIDs) > 20 {
util.SendBasicInvalidResponse(w, r, "Can only request 1-20 games in a request", vars, http.StatusBadRequest)
return
}
gameDetails, err := endpoints.Cntr.GetDetailsForGames(context.TODO(), gamesInput.GameIDs)
if err != nil {
logMsg := fmt.Sprintf("error retrieving games: %+v", err)
configuration.Logger.Error(logMsg)
panic(logMsg)
}
if len(gameDetails) == 0 {
gameDetails = []common.BareGameInfo{}
}
response := dtos.GetDetailsForGamesDTO{
Status: "success",
Games: gameDetails,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
}
func (endpoints *Endpoints) GetCrawlingStatus(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
_, err := ksuid.Parse(vars["crawlid"])
if err != nil {
util.SendBasicInvalidResponse(w, r, "invalid crawlid", vars, http.StatusNotFound)
return
}
crawlingStatus, err := app.GetCrawlingStatsFromDBFromCrawlID(endpoints.Cntr, vars["crawlid"])
if err != nil {
logMsg := fmt.Sprintf("couldn't get crawling status: %+v", err)
configuration.Logger.Error(logMsg)
panic(logMsg)
}
response := dtos.GetCrawlingStatusDTO{
Status: "success",
CrawlingStatus: crawlingStatus,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
}
func (endpoints *Endpoints) GetGraphableData(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
// Validate steamid
if isValid := util.IsValidFormatSteamID(vars["steamid"]); !isValid {
util.SendBasicInvalidResponse(w, r, "Invalid input", vars, http.StatusBadRequest)
return
}
user, err := endpoints.Cntr.GetUser(context.TODO(), vars["steamid"])
if err != nil {
util.SendBasicInvalidResponse(w, r, "couldn't get user", vars, http.StatusNotFound)
return
}
graphableDataForUser := dtos.GetGraphableDataForUserDTO{
Username: user.AccDetails.Personaname,
SteamID: user.AccDetails.SteamID,
FriendIDs: user.FriendIDs,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(graphableDataForUser)
}
func (endpoints *Endpoints) GetUsernamesFromSteamIDs(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
steamIDsInput := dtos.GetUsernamesFromSteamIDsInputDTO{}
err := json.NewDecoder(r.Body).Decode(&steamIDsInput)
if err != nil {
util.SendBasicInvalidResponse(w, r, "Invalid input", vars, http.StatusBadRequest)
return
}
// Validate all given steamids
for _, steamID := range steamIDsInput.SteamIDs {
if isValid := util.IsValidFormatSteamID(steamID); !isValid {
util.SendBasicInvalidResponse(w, r, "Invalid input", vars, http.StatusBadRequest)
return
}
}
steamIDsToUsernames, err := endpoints.Cntr.GetUsernames(context.TODO(), steamIDsInput.SteamIDs)
if err != nil {
configuration.Logger.Sugar().Errorf("couldn't get usernames: %+v", err)
util.SendBasicInvalidResponse(w, r, "couldn't get usernames", vars, http.StatusBadRequest)
return
}
returnJSON := dtos.GetUsernamesFromSteamIDsDTO{}
for key, val := range steamIDsToUsernames {
currentMapping := dtos.SteamIDAndUsername{
SteamID: key,
Username: val,
}
returnJSON.SteamIDAndUsername = append(returnJSON.SteamIDAndUsername, currentMapping)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(returnJSON)
}
func (endpoints *Endpoints) SaveProcessedGraphData(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
_, err := ksuid.Parse(vars["crawlid"])
if err != nil {
util.SendBasicInvalidResponse(w, r, "invalid input", vars, http.StatusBadRequest)
return
}
graphData := common.UsersGraphData{}
reqBodyBytes, err := gunzip(r.Body)
if err != nil {
util.SendBasicInvalidResponse(w, r, "invalid input", vars, http.StatusBadRequest)
return
}
err = json.Unmarshal(reqBodyBytes, &graphData)
if err != nil {
util.SendBasicInvalidResponse(w, r, "Invalid input", vars, http.StatusBadRequest)
return
}
success, err := endpoints.Cntr.SaveProcessedGraphData(vars["crawlid"], graphData)
if err != nil || !success {
configuration.Logger.Sugar().Errorf("could not save graph data: %+v", err)
util.SendBasicInvalidResponse(w, r, "could not save graph data", vars, http.StatusBadRequest)
return
}
response := struct {
Status string `json:"status"`
}{
"success",
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
}
func (endpoints *Endpoints) GetProcessedGraphData(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
_, err := ksuid.Parse(vars["crawlid"])
if err != nil {
util.SendBasicInvalidResponse(w, r, "invalid input", vars, http.StatusBadRequest)
return
}
responseType := r.URL.Query().Get("responsetype")
if responseType != "json" {
if responseType != "" {
util.SendBasicInvalidResponse(w, r, "invalid input", vars, http.StatusBadRequest)
return
}
}
usersProcessedGraphData, err := endpoints.Cntr.GetProcessedGraphData(vars["crawlid"])
if err != nil {
configuration.Logger.Sugar().Errorf("failed to get processed graph data: %+v", err)
util.SendBasicInvalidResponse(w, r, "failed to get processed graph data", vars, http.StatusBadRequest)
return
}
response := datastructures.GetProcessedGraphDataDTO{
Status: "success",
UserGraphData: usersProcessedGraphData,
}
jsonResponse, err := json.Marshal(response)
if err != nil {
configuration.Logger.Sugar().Errorf("failed to marshal processedgraphdata: %+v", err)
}
switch responseType {
case "json":
w.Header().Set("Content-Length", strconv.Itoa(len(string(jsonResponse))+1))
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
default:
w.Header().Set("Content-Encoding", "gzip")
w.Header().Set("Content-Type", "application/javascript")
gz := gzip.NewWriter(w)
defer gz.Close()
gz.Write(jsonResponse)
}
}
func (endpoints *Endpoints) DoesProcessedGraphDataExist(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
_, err := ksuid.Parse(vars["crawlid"])
if err != nil {
util.SendBasicInvalidResponse(w, r, "invalid input", vars, http.StatusBadRequest)
return
}
exists, err := endpoints.Cntr.DoesProcessedGraphDataExist(vars["crawlid"])
if err != nil {
util.SendBasicInvalidResponse(w, r, "failed to get processed graph data", vars, http.StatusBadRequest)
configuration.Logger.Sugar().Errorf("failed to get processed graph data: %+v", err)
return
}
response := dtos.DoesProcessedGraphDataExistDTO{
Status: "success",
}
if exists {
response.Exists = "yes"
} else {
response.Exists = "no"
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
}
func (endpoints *Endpoints) CalculateShortestDistanceInfo(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
crawlIDsInput := datastructures.GetShortestDistanceInfoDataInputDTO{}
err := json.NewDecoder(r.Body).Decode(&crawlIDsInput)
if err != nil {
util.SendBasicInvalidResponse(w, r, "Invalid input", vars, http.StatusBadRequest)
return
}
for _, crawlID := range crawlIDsInput.CrawlIDs {
_, err := ksuid.Parse(crawlID)
if err != nil {
util.SendBasicInvalidResponse(w, r, "invalid crawlid", vars, http.StatusBadRequest)
return
}
}
if len(crawlIDsInput.CrawlIDs) != 2 {
util.SendBasicInvalidResponse(w, r, "two crawl IDS must be given", vars, http.StatusBadRequest)
return
}
exists, existingShortestDistanceInfo, err := endpoints.Cntr.GetShortestDistanceInfo(context.TODO(), crawlIDsInput.CrawlIDs)
if err != nil {
util.SendBasicInvalidResponse(w, r, "could not find shortest distance", vars, http.StatusBadRequest)
configuration.Logger.Sugar().Errorf("failed to retrieve existing shortestDistanceInfo: %+v", err)
}
if exists {
response := struct {
Status string `json:"status"`
Data datastructures.ShortestDistanceInfo `json:"shortestdistanceinfo"`
}{
"success",
existingShortestDistanceInfo,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
return
}
_, shortestDistanceInfo, err := app.CalulateShortestDistanceInfo(endpoints.Cntr, crawlIDsInput.CrawlIDs[0], crawlIDsInput.CrawlIDs[1])
if err != nil {
util.SendBasicInvalidResponse(w, r, "could not find shortest distance", vars, http.StatusBadRequest)
configuration.Logger.Sugar().Errorf("failed to get shortest distance: %s", err.Error())
return
}
success, err := endpoints.Cntr.SaveShortestDistance(context.TODO(), shortestDistanceInfo)
if err != nil || !success {
util.SendBasicInvalidResponse(w, r, "could not save shortest distance", vars, http.StatusBadRequest)
configuration.Logger.Sugar().Errorf("could not save shortest distance: %s", err.Error())
return
}
response := struct {
Status string `json:"status"`
Data datastructures.ShortestDistanceInfo `json:"shortestdistanceinfo"`
}{
"success",
shortestDistanceInfo,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
}
func (endpoints *Endpoints) GetShortestDistanceInfo(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
crawlIDsInput := datastructures.GetShortestDistanceInfoDataInputDTO{}
err := json.NewDecoder(r.Body).Decode(&crawlIDsInput)
if err != nil {
util.SendBasicInvalidResponse(w, r, "Invalid input", vars, http.StatusBadRequest)
return
}
for _, crawlID := range crawlIDsInput.CrawlIDs {
_, err := ksuid.Parse(crawlID)
if err != nil {
util.SendBasicInvalidResponse(w, r, "invalid crawlid", vars, http.StatusBadRequest)
return
}
}
if len(crawlIDsInput.CrawlIDs) != 2 {
util.SendBasicInvalidResponse(w, r, "two crawl IDS must be given", vars, http.StatusBadRequest)
return
}
_, shortestDistanceInfo, err := endpoints.Cntr.GetShortestDistanceInfo(context.TODO(), crawlIDsInput.CrawlIDs)
if err != nil {
util.SendBasicInvalidResponse(w, r, "could not get shortest distance", vars, http.StatusBadRequest)
configuration.Logger.Sugar().Errorf("could not get shortest distance: %s", err.Error())
return
}
response := struct {
Status string `json:"status"`
Data datastructures.ShortestDistanceInfo `json:"shortestdistanceinfo"`
}{
"success",
shortestDistanceInfo,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
}
func (endpoints *Endpoints) GetFinishedCrawlsAfterTimestamp(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
timestampString := r.URL.Query().Get("timestamp")
timeStampint64, err := strconv.ParseInt(timestampString, 10, 64)
if err != nil {
util.SendBasicInvalidResponse(w, r, "invalid timestamp", vars, http.StatusBadRequest)
return
}
crawlsFinishedAfterTimeStamp := dbmonitor.GetRecentFinishedCrawlsAfterTimestamp(timeStampint64)
response := datastructures.GetFinishedCrawlsDTO{
Status: "success",
AllFinishedCrawlsWithUsers: crawlsFinishedAfterTimeStamp,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
}
func (endpoints *Endpoints) GetFinishedShortestDistanceCrawlsAfterTimestamp(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
timestampString := r.URL.Query().Get("timestamp")
timeStampint64, err := strconv.ParseInt(timestampString, 10, 64)
if err != nil {
util.SendBasicInvalidResponse(w, r, "invalid timestamp", vars, http.StatusBadRequest)
return
}
crawlsFinishedAfterTimeStamp := dbmonitor.GetRecentFinishedShortestDistanceCrawlsAfterTimestamp(timeStampint64)
response := datastructures.GetFinishedShortestDistanceCrawlsDTO{
Status: "success",
CrawlingStatus: crawlsFinishedAfterTimeStamp,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
}
func (endpoints *Endpoints) GetTotalUsersInDB(w http.ResponseWriter, r *http.Request) {
response := struct {
Status string `json:"status"`
Usersindb int64 `json:"usersindb"`
}{
"success",
dbmonitor.GetTotalUsersInDB(),
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
}
func (endpoints *Endpoints) GetTotalCrawlsCompleted(w http.ResponseWriter, r *http.Request) {
response := struct {
Status string `json:"status"`
Totalcrawls int64 `json:"totalcrawls"`
}{
"success",
dbmonitor.GetTotalCrawlsCompleted(),
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
}
func (endpoints *Endpoints) Status(w http.ResponseWriter, r *http.Request) {
req := common.UptimeResponse{
Uptime: time.Since(configuration.ApplicationStartUpTime),
Status: "operational",
}
jsonObj, err := json.Marshal(req)
if err != nil {
log.Fatal(err)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, string(jsonObj))
}
| [
"\"AUTH_KEY\""
] | [] | [
"AUTH_KEY"
] | [] | ["AUTH_KEY"] | go | 1 | 0 | |
scripts/get_repos.py | import os
from github_client.client import GitHubClient
from utils.painter import paint
if __name__ == '__main__':
user = os.environ['user_name']
password = os.environ['user_password']
client = GitHubClient(user, password)
client.connect()
repositories = client.get_repositories()
user = client.get_user()
paint([user.login], [repositories], 500, 10000)
| [] | [] | [
"user_password",
"user_name"
] | [] | ["user_password", "user_name"] | python | 2 | 0 | |
Drivers/Community/rp_linux.l2.switch.driver/linux.switch.driver.py | import sys
import os
import paramiko
import time
import logging
import json
import re
# create logger and set debugging level
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
#logger.setLevel(logging.DEBUG)
# create console handler and set level
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
class sshSession:
def __init__(self, address, username, password):
# connect to linux system via ssh
self.client = paramiko.client.SSHClient()
self.client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
self.client.connect(address, username=username, password=password, look_for_keys=False)
def open(self):
# invoke a shell on the linux system
self.shell = self.client.invoke_shell()
def send(self, command):
# send a command to the linux system
if(self.shell):
self.shell.send(command + '\n')
def recv(self, prompt):
# receive a response from the linux system
timeout = time.time() + 10
response = ''
while not response.endswith(prompt):
time.sleep(.10)
if time.time() > timeout:
break
else:
while self.shell.recv_ready():
response += self.shell.recv(4096).decode('utf-8').encode('utf-8')
logger.debug("Full Response: " + response)
# trim the first line (the command echoed back) and the last line (the prompt)
trimmedResponse = response.split('\r\n')[1:-1]
logger.debug("Trimmed RX Response: " + str(trimmedResponse))
# return the trimmed response
return trimmedResponse
def getPorts(self):
# get the ports on the system
portList = []
self.send("ls -l /sys/class/net/ |grep --color=never -v /devices/virtual/net |awk '{if(NR>1)print $9}'")
netList = self.recv(prompt)
for net in netList:
# get the link status of each port
self.send('cat /sys/class/net/'+ net +'/carrier')
carrier = self.recv(prompt)[0]
if carrier == '1':
linkState = 'online'
else:
linkState = 'offline'
# create a port properties dictionary for every port that not excluded
if net != excludedInterface:
portProperties = {"name" : net, "status" : linkState, "container" : "System"}
logger.debug("Port Properties: " + str(portProperties))
# add the port port properties to the port list
portList.append(portProperties)
# response
returnDictionary = {'ports' : portList}
return returnDictionary
def getProperties(self, args):
# get the system hostname
self.send('hostname')
response1 = self.recv(prompt)
# get the system make
self.send('cat /sys/devices/virtual/dmi/id/board_vendor')
response2 = self.recv(prompt)
# get the system model
self.send('cat /sys/devices/virtual/dmi/id/product_name')
response3 = self.recv(prompt)
# create resource properties dictionary
resourceProperties = {"Hostname" : response1[0], "Make" : response2[0], "Model" : response3[0]}
# response
returnDictionary = { 'properties' : resourceProperties }
# include ports in response if argument is true
if args[0] == 'true':
portList = self.getPorts()
returnDictionary['ports'] = portList['ports']
return returnDictionary
def createVlan(self, args):
# assign the VLAN Id argument
vlanId = args[0]
# create the bridge
self.send('sudo brctl addbr br-' + vlanId)
response = self.recv(prompt)
# enable STP
self.send('sudo brctl stp br-' + vlanId + ' on')
response = self.recv(prompt)
# verify the bridge was created with STP enabled
self.send("brctl show br-" + vlanId + " | grep --color=never br-" + vlanId + " | awk '{print $1 $3}'")
assert self.recv(prompt)[0] == 'br-' + vlanId + 'yes'
# bring up the bridge interface on the system
self.send('sudo ip link set dev br-' + vlanId + ' up')
response = self.recv(prompt)
# verify the bridge interface is up on the system
self.send('ip link show br-' + vlanId + ' | grep --color=never -o UP')
assert self.recv(prompt)[0] == 'UP'
def addToVlan(self, args):
# assign the arguments
vlanId = args[0]
portId = args[1]
tagSetting = args[2]
# create a tagged interface if necessary
if tagSetting == 'tagged':
# create the tagged interface on the system
self.send('sudo ip link add link ' + portId + ' name ' + portId + '.' + vlanId + ' type vlan id ' + vlanId)
response = self.recv(prompt)
portId = portId+'.'+vlanId
# verify the tagged interface was added to the system
self.send('ip link show ' + portId)
assert not re.match('.*does not exist',self.recv(prompt)[0])
# bring up the interface on the system
self.send('sudo ip link set dev ' + portId + ' up')
response = self.recv(prompt)
# verify the interface is up on the system
self.send('ip link show ' + portId + ' | grep --color=never -o UP')
assert self.recv(prompt)[0] == 'UP'
# add the interface to the bridge
self.send('sudo brctl addif br-' + vlanId + ' ' + portId)
response = self.recv(prompt)
# verify the interface was added to the bridge
self.send('brctl show br-' + vlanId + ' | grep --color=never -o ' + portId + '$')
assert self.recv(prompt)[0] == portId
def removeFromVlan(self, args):
# assign the arguments
vlanId = args[0]
portId = args[1]
tagSetting = args[2]
# determine if the interface is tagged
if tagSetting == 'tagged':
portId = portId+'.'+vlanId
# remove the interface from the bridge
self.send('sudo brctl delif br-' + vlanId + ' ' + portId)
response = self.recv(prompt)
# verify the interface was removed from the bridge
self.send('brctl show br-' + vlanId + ' | grep --color=never ' + portId + '$')
assert len(self.recv(prompt)) == 0
# remove the interface if it's tagged
if tagSetting == 'tagged':
self.send('sudo ip link delete ' + portId)
response = self.recv(prompt)
# verify the tagged interface was removed from the system
self.send('ip link show ' + portId)
assert re.match('.*does not exist',self.recv(prompt)[0])
def destroyVlan(self, args):
# assign the VLAN Id argument
vlanId = args[0]
# take down the bridge interface on the system
self.send('sudo ip link set dev br-' + vlanId + ' down')
response = self.recv(prompt)
# verify the bridge interface is down on the system
self.send('ip link show br-' + vlanId + ' | grep --color=never -o DOWN')
assert self.recv(prompt)[0] == 'DOWN'
# delete the bridge
self.send('sudo brctl delbr br-' + vlanId)
response = self.recv(prompt)
# verify the bridge was removed
self.send("brctl show | grep -o --color=never br-" + vlanId)
assert len(self.recv(prompt)) == 0
def close(self):
if(self.client != None):
self.client.close()
# get the driver call count from the external environment variables when running on a live agent
# otherwise use hard-coded values below for development
if 'VELOCITY_PARAM_call_count' not in os.environ:
# development area
callBlock = '2'
if callBlock == '1':
os.environ["VELOCITY_PARAM_call_count"] = '1'
os.environ["VELOCITY_PARAM_call_0"] = 'getProperties true'
elif callBlock == '2':
os.environ["VELOCITY_PARAM_call_count"] = '1'
os.environ["VELOCITY_PARAM_call_0"] = 'getPorts'
elif callBlock == '3':
os.environ["VELOCITY_PARAM_call_count"] = '3'
os.environ["VELOCITY_PARAM_call_0"] = 'createVlan 301'
os.environ["VELOCITY_PARAM_call_1"] = 'addToVlan 301 eth1 untagged'
os.environ["VELOCITY_PARAM_call_2"] = 'addToVlan 301 eth3 tagged'
elif callBlock == '4':
os.environ["VELOCITY_PARAM_call_count"] = '3'
os.environ["VELOCITY_PARAM_call_0"] = 'removeFromVlan 301 eth1 untagged'
os.environ["VELOCITY_PARAM_call_1"] = 'removeFromVlan 301 eth3 tagged'
os.environ["VELOCITY_PARAM_call_2"] = 'destroyVlan 301'
# hard-coded credentials and linux host for dev
sshUsername = 'spirent'
sshPassword = 'spirent'
sshServer = '10.18.36.168'
else:
# derive the credentials from the os environment when running on live agent
sshServer = os.environ["VELOCITY_PARAM_property_ipAddress"]
sshUsername = os.environ["VELOCITY_PARAM_property_username"]
sshPassword = os.environ["VELOCITY_PARAM_property_password"]
# the prompt to expect after each command
prompt = '$ '
# the interface to exclude from discovery
# normally this is the management interface of the system
# which is not a good choice as a bridge interface
excludedInterface = 'eth0'
# open the SSH session to the linux system
c = sshSession(sshServer, sshUsername, sshPassword)
c.open()
c.recv(prompt)
# perform each driver call
for callNumber in range(int(os.environ['VELOCITY_PARAM_call_count'])):
envVar = os.environ['VELOCITY_PARAM_call_' + str(callNumber)]
# set the call name and arguments
callName = envVar.split()[0]
callArgs = envVar.split()[1:]
# invoke each driver call with arguments and send output to stdout
retVal = eval('c.'+callName+'()') if len(callArgs) < 1 else eval('c.'+callName+'(callArgs)')
print(json.dumps(retVal))
# close the ssh session
c.close()
| [] | [] | [
"VELOCITY_PARAM_property_username",
"VELOCITY_PARAM_call_' + str(callNumber",
"VELOCITY_PARAM_call_count",
"VELOCITY_PARAM_call_2",
"VELOCITY_PARAM_property_ipAddress",
"VELOCITY_PARAM_call_1",
"VELOCITY_PARAM_property_password",
"VELOCITY_PARAM_call_0"
] | [] | ["VELOCITY_PARAM_property_username", "VELOCITY_PARAM_call_' + str(callNumber", "VELOCITY_PARAM_call_count", "VELOCITY_PARAM_call_2", "VELOCITY_PARAM_property_ipAddress", "VELOCITY_PARAM_call_1", "VELOCITY_PARAM_property_password", "VELOCITY_PARAM_call_0"] | python | 8 | 0 | |
vendor/github.com/CapChrisCap/go-color/color.go | package color
import (
"fmt"
"io"
"os"
"strconv"
"strings"
"sync"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
)
var (
// NoColor defines if the output is colorized or not. It's dynamically set to
// false or true based on the stdout's file descriptor referring to a terminal
// or not. This is a global option and affects all colors. For more control
// over each color block use the methods DisableColor() individually.
NoColor = os.Getenv("FORCE_COLOR") != "true" && (os.Getenv("TERM") == "dumb" ||
(!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())))
// Output defines the standard output of the print functions. By default
// os.Stdout is used.
Output = colorable.NewColorableStdout()
// Error defines a color supporting writer for os.Stderr.
Error = colorable.NewColorableStderr()
// colorsCache is used to reduce the count of created Color objects and
// allows to reuse already created objects with required Attribute.
colorsCache = make(map[Attribute]*Color)
colorsCacheMu sync.Mutex // protects colorsCache
)
// Color defines a custom color object which is defined by SGR parameters.
type Color struct {
params []Attribute
noColor *bool
}
// Attribute defines a single SGR Code
type Attribute int
const escape = "\x1b"
// Base attributes
const (
Reset Attribute = iota
Bold
Faint
Italic
Underline
BlinkSlow
BlinkRapid
ReverseVideo
Concealed
CrossedOut
)
// Foreground text colors
const (
FgBlack Attribute = iota + 30
FgRed
FgGreen
FgYellow
FgBlue
FgMagenta
FgCyan
FgWhite
)
// Foreground Hi-Intensity text colors
const (
FgHiBlack Attribute = iota + 90
FgHiRed
FgHiGreen
FgHiYellow
FgHiBlue
FgHiMagenta
FgHiCyan
FgHiWhite
)
// Background text colors
const (
BgBlack Attribute = iota + 40
BgRed
BgGreen
BgYellow
BgBlue
BgMagenta
BgCyan
BgWhite
)
// Background Hi-Intensity text colors
const (
BgHiBlack Attribute = iota + 100
BgHiRed
BgHiGreen
BgHiYellow
BgHiBlue
BgHiMagenta
BgHiCyan
BgHiWhite
)
// New returns a newly created color object.
func New(value ...Attribute) *Color {
c := &Color{params: make([]Attribute, 0)}
c.Add(value...)
return c
}
// Set sets the given parameters immediately. It will change the color of
// output with the given SGR parameters until color.Unset() is called.
func Set(p ...Attribute) *Color {
c := New(p...)
c.Set()
return c
}
// Unset resets all escape attributes and clears the output. Usually should
// be called after Set().
func Unset() {
if NoColor {
return
}
fmt.Fprintf(Output, "%s[%dm", escape, Reset)
}
// Set sets the SGR sequence.
func (c *Color) Set() *Color {
if c.isNoColorSet() {
return c
}
fmt.Fprintf(Output, c.format())
return c
}
func (c *Color) unset() {
if c.isNoColorSet() {
return
}
Unset()
}
func (c *Color) setWriter(w io.Writer) *Color {
if c.isNoColorSet() {
return c
}
fmt.Fprintf(w, c.format())
return c
}
func (c *Color) unsetWriter(w io.Writer) {
if c.isNoColorSet() {
return
}
if NoColor {
return
}
fmt.Fprintf(w, "%s[%dm", escape, Reset)
}
// Add is used to chain SGR parameters. Use as many as parameters to combine
// and create custom color objects. Example: Add(color.FgRed, color.Underline).
func (c *Color) Add(value ...Attribute) *Color {
c.params = append(c.params, value...)
return c
}
func (c *Color) prepend(value Attribute) {
c.params = append(c.params, 0)
copy(c.params[1:], c.params[0:])
c.params[0] = value
}
// Fprint formats using the default formats for its operands and writes to w.
// Spaces are added between operands when neither is a string.
// It returns the number of bytes written and any write error encountered.
// On Windows, users should wrap w with colorable.NewColorable() if w is of
// type *os.File.
func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
c.setWriter(w)
defer c.unsetWriter(w)
return fmt.Fprint(w, a...)
}
// Print formats using the default formats for its operands and writes to
// standard output. Spaces are added between operands when neither is a
// string. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Print(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprint(Output, a...)
}
// Fprintf formats according to a format specifier and writes to w.
// It returns the number of bytes written and any write error encountered.
// On Windows, users should wrap w with colorable.NewColorable() if w is of
// type *os.File.
func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
c.setWriter(w)
defer c.unsetWriter(w)
return fmt.Fprintf(w, format, a...)
}
// Printf formats according to a format specifier and writes to standard output.
// It returns the number of bytes written and any write error encountered.
// This is the standard fmt.Printf() method wrapped with the given color.
func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintf(Output, format, a...)
}
// Fprintln formats using the default formats for its operands and writes to w.
// Spaces are always added between operands and a newline is appended.
// On Windows, users should wrap w with colorable.NewColorable() if w is of
// type *os.File.
func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
c.setWriter(w)
defer c.unsetWriter(w)
return fmt.Fprintln(w, a...)
}
// Println formats using the default formats for its operands and writes to
// standard output. Spaces are always added between operands and a newline is
// appended. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Println(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintln(Output, a...)
}
// Sprint is just like Print, but returns a string instead of printing it.
func (c *Color) Sprint(a ...interface{}) string {
return c.wrap(fmt.Sprint(a...))
}
// Sprintln is just like Println, but returns a string instead of printing it.
func (c *Color) Sprintln(a ...interface{}) string {
return c.wrap(fmt.Sprintln(a...))
}
// Sprintf is just like Printf, but returns a string instead of printing it.
func (c *Color) Sprintf(format string, a ...interface{}) string {
return c.wrap(fmt.Sprintf(format, a...))
}
// FprintFunc returns a new function that prints the passed arguments as
// colorized with color.Fprint().
func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) {
return func(w io.Writer, a ...interface{}) {
c.Fprint(w, a...)
}
}
// PrintFunc returns a new function that prints the passed arguments as
// colorized with color.Print().
func (c *Color) PrintFunc() func(a ...interface{}) {
return func(a ...interface{}) {
c.Print(a...)
}
}
// FprintfFunc returns a new function that prints the passed arguments as
// colorized with color.Fprintf().
func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) {
return func(w io.Writer, format string, a ...interface{}) {
c.Fprintf(w, format, a...)
}
}
// PrintfFunc returns a new function that prints the passed arguments as
// colorized with color.Printf().
func (c *Color) PrintfFunc() func(format string, a ...interface{}) {
return func(format string, a ...interface{}) {
c.Printf(format, a...)
}
}
// FprintlnFunc returns a new function that prints the passed arguments as
// colorized with color.Fprintln().
func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) {
return func(w io.Writer, a ...interface{}) {
c.Fprintln(w, a...)
}
}
// PrintlnFunc returns a new function that prints the passed arguments as
// colorized with color.Println().
func (c *Color) PrintlnFunc() func(a ...interface{}) {
return func(a ...interface{}) {
c.Println(a...)
}
}
// SprintFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprint(). Useful to put into or mix into other
// string. Windows users should use this in conjunction with color.Output, example:
//
// put := New(FgYellow).SprintFunc()
// fmt.Fprintf(color.Output, "This is a %s", put("warning"))
func (c *Color) SprintFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprint(a...))
}
}
// SprintfFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintf(). Useful to put into or mix into other
// string. Windows users should use this in conjunction with color.Output.
func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
return func(format string, a ...interface{}) string {
return c.wrap(fmt.Sprintf(format, a...))
}
}
// SprintlnFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintln(). Useful to put into or mix into other
// string. Windows users should use this in conjunction with color.Output.
func (c *Color) SprintlnFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprintln(a...))
}
}
// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m"
// an example output might be: "1;36" -> bold cyan
func (c *Color) sequence() string {
format := make([]string, len(c.params))
for i, v := range c.params {
format[i] = strconv.Itoa(int(v))
}
return strings.Join(format, ";")
}
// wrap wraps the s string with the colors attributes. The string is ready to
// be printed.
func (c *Color) wrap(s string) string {
if c.isNoColorSet() {
return s
}
return c.format() + s + c.unformat()
}
func (c *Color) format() string {
return fmt.Sprintf("%s[%sm", escape, c.sequence())
}
func (c *Color) unformat() string {
return fmt.Sprintf("%s[%dm", escape, Reset)
}
// DisableColor disables the color output. Useful to not change any existing
// code and still being able to output. Can be used for flags like
// "--no-color". To enable back use EnableColor() method.
func (c *Color) DisableColor() {
c.noColor = boolPtr(true)
}
// EnableColor enables the color output. Use it in conjunction with
// DisableColor(). Otherwise this method has no side effects.
func (c *Color) EnableColor() {
c.noColor = boolPtr(false)
}
func (c *Color) isNoColorSet() bool {
// check first if we have user setted action
if c.noColor != nil {
return *c.noColor
}
// if not return the global option, which is disabled by default
return NoColor
}
// Equals returns a boolean value indicating whether two colors are equal.
func (c *Color) Equals(c2 *Color) bool {
if len(c.params) != len(c2.params) {
return false
}
for _, attr := range c.params {
if !c2.attrExists(attr) {
return false
}
}
return true
}
func (c *Color) attrExists(a Attribute) bool {
for _, attr := range c.params {
if attr == a {
return true
}
}
return false
}
func boolPtr(v bool) *bool {
return &v
}
func getCachedColor(p Attribute) *Color {
colorsCacheMu.Lock()
defer colorsCacheMu.Unlock()
c, ok := colorsCache[p]
if !ok {
c = New(p)
colorsCache[p] = c
}
return c
}
func colorPrint(format string, p Attribute, a ...interface{}) {
c := getCachedColor(p)
if !strings.HasSuffix(format, "\n") {
format += "\n"
}
if len(a) == 0 {
c.Print(format)
} else {
c.Printf(format, a...)
}
}
func colorString(format string, p Attribute, a ...interface{}) string {
c := getCachedColor(p)
if len(a) == 0 {
return c.SprintFunc()(format)
}
return c.SprintfFunc()(format, a...)
}
// Black is a convenient helper function to print with black foreground. A
// newline is appended to format by default.
func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) }
// Red is a convenient helper function to print with red foreground. A
// newline is appended to format by default.
func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) }
// Green is a convenient helper function to print with green foreground. A
// newline is appended to format by default.
func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) }
// Yellow is a convenient helper function to print with yellow foreground.
// A newline is appended to format by default.
func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) }
// Blue is a convenient helper function to print with blue foreground. A
// newline is appended to format by default.
func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) }
// Magenta is a convenient helper function to print with magenta foreground.
// A newline is appended to format by default.
func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) }
// Cyan is a convenient helper function to print with cyan foreground. A
// newline is appended to format by default.
func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) }
// White is a convenient helper function to print with white foreground. A
// newline is appended to format by default.
func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) }
// BlackString is a convenient helper function to return a string with black
// foreground.
func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) }
// RedString is a convenient helper function to return a string with red
// foreground.
func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) }
// GreenString is a convenient helper function to return a string with green
// foreground.
func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) }
// YellowString is a convenient helper function to return a string with yellow
// foreground.
func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) }
// BlueString is a convenient helper function to return a string with blue
// foreground.
func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) }
// MagentaString is a convenient helper function to return a string with magenta
// foreground.
func MagentaString(format string, a ...interface{}) string {
return colorString(format, FgMagenta, a...)
}
// CyanString is a convenient helper function to return a string with cyan
// foreground.
func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) }
// WhiteString is a convenient helper function to return a string with white
// foreground.
func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) }
// HiBlack is a convenient helper function to print with hi-intensity black foreground. A
// newline is appended to format by default.
func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) }
// HiRed is a convenient helper function to print with hi-intensity red foreground. A
// newline is appended to format by default.
func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) }
// HiGreen is a convenient helper function to print with hi-intensity green foreground. A
// newline is appended to format by default.
func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) }
// HiYellow is a convenient helper function to print with hi-intensity yellow foreground.
// A newline is appended to format by default.
func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) }
// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A
// newline is appended to format by default.
func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) }
// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground.
// A newline is appended to format by default.
func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) }
// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A
// newline is appended to format by default.
func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) }
// HiWhite is a convenient helper function to print with hi-intensity white foreground. A
// newline is appended to format by default.
func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) }
// HiBlackString is a convenient helper function to return a string with hi-intensity black
// foreground.
func HiBlackString(format string, a ...interface{}) string {
return colorString(format, FgHiBlack, a...)
}
// HiRedString is a convenient helper function to return a string with hi-intensity red
// foreground.
func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) }
// HiGreenString is a convenient helper function to return a string with hi-intensity green
// foreground.
func HiGreenString(format string, a ...interface{}) string {
return colorString(format, FgHiGreen, a...)
}
// HiYellowString is a convenient helper function to return a string with hi-intensity yellow
// foreground.
func HiYellowString(format string, a ...interface{}) string {
return colorString(format, FgHiYellow, a...)
}
// HiBlueString is a convenient helper function to return a string with hi-intensity blue
// foreground.
func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) }
// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta
// foreground.
func HiMagentaString(format string, a ...interface{}) string {
return colorString(format, FgHiMagenta, a...)
}
// HiCyanString is a convenient helper function to return a string with hi-intensity cyan
// foreground.
func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) }
// HiWhiteString is a convenient helper function to return a string with hi-intensity white
// foreground.
func HiWhiteString(format string, a ...interface{}) string {
return colorString(format, FgHiWhite, a...)
}
| [
"\"FORCE_COLOR\"",
"\"TERM\""
] | [] | [
"TERM",
"FORCE_COLOR"
] | [] | ["TERM", "FORCE_COLOR"] | go | 2 | 0 | |
test/integration_test.go | package test
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math"
"math/rand"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"testing"
"time"
"cloud.google.com/go/storage"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/sts"
terraws "github.com/gruntwork-io/terratest/modules/aws"
"github.com/gruntwork-io/terratest/modules/git"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/api/iterator"
"github.com/gruntwork-io/terragrunt/aws_helper"
"github.com/gruntwork-io/terragrunt/cli"
"github.com/gruntwork-io/terragrunt/cli/tfsource"
"github.com/gruntwork-io/terragrunt/codegen"
"github.com/gruntwork-io/terragrunt/config"
"github.com/gruntwork-io/terragrunt/configstack"
terragruntDynamoDb "github.com/gruntwork-io/terragrunt/dynamodb"
"github.com/gruntwork-io/terragrunt/errors"
"github.com/gruntwork-io/terragrunt/options"
"github.com/gruntwork-io/terragrunt/remote"
"github.com/gruntwork-io/terragrunt/shell"
"github.com/gruntwork-io/terragrunt/util"
)
// hard-code this to match the test fixture for now
const (
TERRAFORM_REMOTE_STATE_S3_REGION = "us-west-2"
TERRAFORM_REMOTE_STATE_GCP_REGION = "eu"
TEST_FIXTURE_PATH = "fixture/"
TEST_FIXTURE_CODEGEN_PATH = "fixture-codegen"
TEST_FIXTURE_GCS_PATH = "fixture-gcs/"
TEST_FIXTURE_GCS_BYO_BUCKET_PATH = "fixture-gcs-byo-bucket/"
TEST_FIXTURE_INCLUDE_PATH = "fixture-include/"
TEST_FIXTURE_INCLUDE_CHILD_REL_PATH = "qa/my-app"
TEST_FIXTURE_STACK = "fixture-stack/"
TEST_FIXTURE_GRAPH_DEPENDENCIES = "fixture-graph-dependencies"
TEST_FIXTURE_OUTPUT_ALL = "fixture-output-all"
TEST_FIXTURE_STDOUT = "fixture-download/stdout-test"
TEST_FIXTURE_EXTRA_ARGS_PATH = "fixture-extra-args/"
TEST_FIXTURE_ENV_VARS_BLOCK_PATH = "fixture-env-vars-block/"
TEST_FIXTURE_SKIP = "fixture-skip/"
TEST_FIXTURE_CONFIG_SINGLE_JSON_PATH = "fixture-config-files/single-json-config"
TEST_FIXTURE_LOCAL_DOWNLOAD_PATH = "fixture-download/local"
TEST_FIXTURE_CUSTOM_LOCK_FILE = "fixture-download/custom-lock-file"
TEST_FIXTURE_REMOTE_DOWNLOAD_PATH = "fixture-download/remote"
TEST_FIXTURE_OVERRIDE_DOWNLOAD_PATH = "fixture-download/override"
TEST_FIXTURE_LOCAL_RELATIVE_DOWNLOAD_PATH = "fixture-download/local-relative"
TEST_FIXTURE_REMOTE_RELATIVE_DOWNLOAD_PATH = "fixture-download/remote-relative"
TEST_FIXTURE_LOCAL_WITH_BACKEND = "fixture-download/local-with-backend"
TEST_FIXTURE_LOCAL_WITH_EXCLUDE_DIR = "fixture-download/local-with-exclude-dir"
TEST_FIXTURE_LOCAL_WITH_INCLUDE_DIR = "fixture-download/local-with-include-dir"
TEST_FIXTURE_REMOTE_WITH_BACKEND = "fixture-download/remote-with-backend"
TEST_FIXTURE_REMOTE_MODULE_IN_ROOT = "fixture-download/remote-module-in-root"
TEST_FIXTURE_LOCAL_MISSING_BACKEND = "fixture-download/local-with-missing-backend"
TEST_FIXTURE_LOCAL_WITH_HIDDEN_FOLDER = "fixture-download/local-with-hidden-folder"
TEST_FIXTURE_PREVENT_DESTROY_OVERRIDE = "fixture-prevent-destroy-override/child"
TEST_FIXTURE_PREVENT_DESTROY_NOT_SET = "fixture-prevent-destroy-not-set/child"
TEST_FIXTURE_LOCAL_PREVENT_DESTROY = "fixture-download/local-with-prevent-destroy"
TEST_FIXTURE_LOCAL_PREVENT_DESTROY_DEPENDENCIES = "fixture-download/local-with-prevent-destroy-dependencies"
TEST_FIXTURE_LOCAL_INCLUDE_PREVENT_DESTROY_DEPENDENCIES = "fixture-download/local-include-with-prevent-destroy-dependencies"
TEST_FIXTURE_EXTERNAL_DEPENDENCIE = "fixture-external-dependencies"
TEST_FIXTURE_GET_OUTPUT = "fixture-get-output"
TEST_FIXTURE_HOOKS_BEFORE_ONLY_PATH = "fixture-hooks/before-only"
TEST_FIXTURE_HOOKS_ALL_PATH = "fixture-hooks/all"
TEST_FIXTURE_HOOKS_AFTER_ONLY_PATH = "fixture-hooks/after-only"
TEST_FIXTURE_HOOKS_BEFORE_AND_AFTER_PATH = "fixture-hooks/before-and-after"
TEST_FIXTURE_HOOKS_BEFORE_AND_AFTER_MERGE_PATH = "fixture-hooks/before-and-after-merge"
TEST_FIXTURE_HOOKS_SKIP_ON_ERROR_PATH = "fixture-hooks/skip-on-error"
TEST_FIXTURE_HOOKS_ONE_ARG_ACTION_PATH = "fixture-hooks/one-arg-action"
TEST_FIXTURE_HOOKS_EMPTY_STRING_COMMAND_PATH = "fixture-hooks/bad-arg-action/empty-string-command"
TEST_FIXTURE_HOOKS_EMPTY_COMMAND_LIST_PATH = "fixture-hooks/bad-arg-action/empty-command-list"
TEST_FIXTURE_HOOKS_INTERPOLATIONS_PATH = "fixture-hooks/interpolations"
TEST_FIXTURE_HOOKS_INIT_ONCE_NO_SOURCE_NO_BACKEND = "fixture-hooks/init-once/no-source-no-backend"
TEST_FIXTURE_HOOKS_INIT_ONCE_NO_SOURCE_WITH_BACKEND = "fixture-hooks/init-once/no-source-with-backend"
TEST_FIXTURE_HOOKS_INIT_ONCE_WITH_SOURCE_NO_BACKEND = "fixture-hooks/init-once/with-source-no-backend"
TEST_FIXTURE_HOOKS_INIT_ONCE_WITH_SOURCE_WITH_BACKEND = "fixture-hooks/init-once/with-source-with-backend"
TEST_FIXTURE_FAILED_TERRAFORM = "fixture-failure"
TEST_FIXTURE_EXIT_CODE = "fixture-exit-code"
TEST_FIXTURE_AUTO_RETRY_RERUN = "fixture-auto-retry/re-run"
TEST_FIXTURE_AUTO_RETRY_EXHAUST = "fixture-auto-retry/exhaust"
TEST_FIXTURE_AUTO_RETRY_CUSTOM_ERRORS = "fixture-auto-retry/custom-errors"
TEST_FIXTURE_AUTO_RETRY_CUSTOM_ERRORS_NOT_SET = "fixture-auto-retry/custom-errors-not-set"
TEST_FIXTURE_AUTO_RETRY_APPLY_ALL_RETRIES = "fixture-auto-retry/apply-all"
TEST_FIXTURE_AUTO_RETRY_CONFIGURABLE_RETRIES = "fixture-auto-retry/configurable-retries"
TEST_FIXTURE_AUTO_RETRY_CONFIGURABLE_RETRIES_ERROR_1 = "fixture-auto-retry/configurable-retries-incorrect-retry-attempts"
TEST_FIXTURE_AUTO_RETRY_CONFIGURABLE_RETRIES_ERROR_2 = "fixture-auto-retry/configurable-retries-incorrect-sleep-interval"
TEST_FIXTURE_AWS_PROVIDER_PATCH = "fixture-aws-provider-patch"
TEST_FIXTURE_INPUTS = "fixture-inputs"
TEST_FIXTURE_LOCALS_ERROR_UNDEFINED_LOCAL = "fixture-locals-errors/undefined-local"
TEST_FIXTURE_LOCALS_ERROR_UNDEFINED_LOCAL_BUT_INPUT = "fixture-locals-errors/undefined-local-but-input"
TEST_FIXTURE_LOCALS_CANONICAL = "fixture-locals/canonical"
TEST_FIXTURE_LOCALS_IN_INCLUDE = "fixture-locals/local-in-include"
TEST_FIXTURE_LOCALS_IN_INCLUDE_CHILD_REL_PATH = "qa/my-app"
TEST_FIXTURE_READ_CONFIG = "fixture-read-config"
TEST_FIXTURE_AWS_GET_CALLER_IDENTITY = "fixture-get-aws-caller-identity"
TEST_FIXTURE_GET_PLATFORM = "fixture-get-platform"
TEST_FIXTURE_GET_TERRAGRUNT_SOURCE_HCL = "fixture-get-terragrunt-source-hcl"
TEST_FIXTURE_GET_TERRAGRUNT_SOURCE_CLI = "fixture-get-terragrunt-source-cli"
TEST_FIXTURE_REGRESSIONS = "fixture-regressions"
TEST_FIXTURE_DIRS_PATH = "fixture-dirs"
TEST_FIXTURE_PARALLELISM = "fixture-parallelism"
TEST_FIXTURE_SOPS = "fixture-sops"
TERRAFORM_BINARY = "terraform"
TERRAFORM_FOLDER = ".terraform"
TERRAFORM_STATE = "terraform.tfstate"
TERRAFORM_STATE_BACKUP = "terraform.tfstate.backup"
TERRAGRUNT_CACHE = ".terragrunt-cache"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func TestTerragruntInitHookNoSourceNoBackend(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_INIT_ONCE_NO_SOURCE_NO_BACKEND)
tmpEnvPath := copyEnvironment(t, "fixture-hooks/init-once")
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_INIT_ONCE_NO_SOURCE_NO_BACKEND)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
output := stderr.String()
if err != nil {
t.Errorf("Did not expect to get error: %s", err.Error())
}
assert.Equal(t, 1, strings.Count(output, "AFTER_INIT_ONLY_ONCE"), "Hooks on init command executed more than once")
// With no source, `init-from-module` should not execute
assert.NotContains(t, output, "AFTER_INIT_FROM_MODULE_ONLY_ONCE", "Hooks on init-from-module command executed when no source was specified")
}
func TestTerragruntInitHookNoSourceWithBackend(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_INIT_ONCE_NO_SOURCE_WITH_BACKEND)
tmpEnvPath := copyEnvironment(t, "fixture-hooks/init-once")
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_INIT_ONCE_NO_SOURCE_WITH_BACKEND)
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
rootTerragruntConfigPath := util.JoinPath(rootPath, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, "not-used", TERRAFORM_REMOTE_STATE_S3_REGION)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
output := stderr.String()
if err != nil {
t.Errorf("Did not expect to get error: %s", err.Error())
}
assert.Equal(t, 1, strings.Count(output, "AFTER_INIT_ONLY_ONCE"), "Hooks on init command executed more than once")
// With no source, `init-from-module` should not execute
assert.NotContains(t, output, "AFTER_INIT_FROM_MODULE_ONLY_ONCE", "Hooks on init-from-module command executed when no source was specified")
}
func TestTerragruntInitHookWithSourceNoBackend(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_INIT_ONCE_WITH_SOURCE_NO_BACKEND)
tmpEnvPath := copyEnvironment(t, "fixture-hooks/init-once")
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_INIT_ONCE_WITH_SOURCE_NO_BACKEND)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s --terragrunt-log-level debug", rootPath), &stdout, &stderr)
logBufferContentsLineByLine(t, stdout, "apply stdout")
logBufferContentsLineByLine(t, stderr, "apply stderr")
output := stderr.String()
if err != nil {
t.Errorf("Did not expect to get error: %s", err.Error())
}
assert.Equal(t, 1, strings.Count(output, "AFTER_INIT_ONLY_ONCE\n"), "Hooks on init command executed more than once")
assert.Equal(t, 1, strings.Count(output, "AFTER_INIT_FROM_MODULE_ONLY_ONCE\n"), "Hooks on init-from-module command executed more than once")
}
func TestTerragruntInitHookWithSourceWithBackend(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_INIT_ONCE_WITH_SOURCE_WITH_BACKEND)
tmpEnvPath := copyEnvironment(t, "fixture-hooks/init-once")
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_INIT_ONCE_WITH_SOURCE_WITH_BACKEND)
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
rootTerragruntConfigPath := util.JoinPath(rootPath, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, "not-used", TERRAFORM_REMOTE_STATE_S3_REGION)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
output := stderr.String()
if err != nil {
t.Errorf("Did not expect to get error: %s", err.Error())
}
// `init` hook should execute only once
assert.Equal(t, 1, strings.Count(output, "AFTER_INIT_ONLY_ONCE"), "Hooks on init command executed more than once")
// `init-from-module` hook should execute only once
assert.Equal(t, 1, strings.Count(output, "AFTER_INIT_FROM_MODULE_ONLY_ONCE"), "Hooks on init-from-module command executed more than once")
}
func TestTerragruntHookRunAllApply(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_ALL_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_ALL_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_ALL_PATH)
beforeOnlyPath := util.JoinPath(rootPath, "before-only")
afterOnlyPath := util.JoinPath(rootPath, "after-only")
runTerragrunt(t, fmt.Sprintf("terragrunt run-all apply -auto-approve --terragrunt-log-level debug --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
_, beforeErr := ioutil.ReadFile(beforeOnlyPath + "/file.out")
assert.NoError(t, beforeErr)
_, afterErr := ioutil.ReadFile(afterOnlyPath + "/file.out")
assert.NoError(t, afterErr)
}
func TestTerragruntHookApplyAll(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_ALL_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_ALL_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_ALL_PATH)
beforeOnlyPath := util.JoinPath(rootPath, "before-only")
afterOnlyPath := util.JoinPath(rootPath, "after-only")
runTerragrunt(t, fmt.Sprintf("terragrunt apply-all -auto-approve --terragrunt-log-level debug --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
_, beforeErr := ioutil.ReadFile(beforeOnlyPath + "/file.out")
assert.NoError(t, beforeErr)
_, afterErr := ioutil.ReadFile(afterOnlyPath + "/file.out")
assert.NoError(t, afterErr)
}
func TestTerragruntBeforeHook(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_BEFORE_ONLY_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_BEFORE_ONLY_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_BEFORE_ONLY_PATH)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
_, exception := ioutil.ReadFile(rootPath + "/file.out")
assert.NoError(t, exception)
}
func TestTerragruntHookWorkingDir(t *testing.T) {
t.Parallel()
fixturePath := "fixture-hooks/working_dir"
cleanupTerraformFolder(t, fixturePath)
tmpEnvPath := copyEnvironment(t, fixturePath)
rootPath := util.JoinPath(tmpEnvPath, fixturePath)
runTerragrunt(t, fmt.Sprintf("terragrunt validate --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
}
func TestTerragruntAfterHook(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_AFTER_ONLY_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_AFTER_ONLY_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_AFTER_ONLY_PATH)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
_, exception := ioutil.ReadFile(rootPath + "/file.out")
assert.NoError(t, exception)
}
func TestTerragruntBeforeAndAfterHook(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_BEFORE_AND_AFTER_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_BEFORE_AND_AFTER_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_BEFORE_AND_AFTER_PATH)
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
_, beforeException := ioutil.ReadFile(rootPath + "/before.out")
_, afterException := ioutil.ReadFile(rootPath + "/after.out")
output := stderr.String()
if err != nil {
t.Errorf("Did not expect to get error: %s", err.Error())
}
assert.Equal(t, 0, strings.Count(output, "BEFORE_TERRAGRUNT_READ_CONFIG"), "terragrunt-read-config before_hook should not be triggered")
t.Logf("output: %s", output)
assert.Equal(t, 1, strings.Count(output, "AFTER_TERRAGRUNT_READ_CONFIG"), "Hooks on terragrunt-read-config command executed more than once")
assert.NoError(t, beforeException)
assert.NoError(t, afterException)
}
func TestTerragruntBeforeAndAfterMergeHook(t *testing.T) {
t.Parallel()
childPath := util.JoinPath(TEST_FIXTURE_HOOKS_BEFORE_AND_AFTER_MERGE_PATH, TEST_FIXTURE_INCLUDE_CHILD_REL_PATH)
cleanupTerraformFolder(t, childPath)
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
t.Logf("bucketName: %s", s3BucketName)
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
tmpTerragruntConfigPath := createTmpTerragruntConfigWithParentAndChild(t, TEST_FIXTURE_HOOKS_BEFORE_AND_AFTER_MERGE_PATH, TEST_FIXTURE_INCLUDE_CHILD_REL_PATH, s3BucketName, config.DefaultTerragruntConfigPath, config.DefaultTerragruntConfigPath)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-config %s --terragrunt-working-dir %s", tmpTerragruntConfigPath, childPath))
_, beforeException := ioutil.ReadFile(childPath + "/before.out")
_, beforeChildException := ioutil.ReadFile(childPath + "/before-child.out")
_, beforeOverriddenParentException := ioutil.ReadFile(childPath + "/before-parent.out")
_, afterException := ioutil.ReadFile(childPath + "/after.out")
_, afterParentException := ioutil.ReadFile(childPath + "/after-parent.out")
assert.NoError(t, beforeException)
assert.NoError(t, beforeChildException)
assert.NoError(t, afterException)
assert.NoError(t, afterParentException)
// PathError because no file found
assert.Error(t, beforeOverriddenParentException)
}
func TestTerragruntSkipOnError(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_SKIP_ON_ERROR_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_SKIP_ON_ERROR_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_SKIP_ON_ERROR_PATH)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
output := stderr.String()
if err != nil {
assert.Contains(t, output, "BEFORE_SHOULD_DISPLAY")
assert.NotContains(t, output, "BEFORE_NODISPLAY")
assert.Contains(t, output, "AFTER_SHOULD_DISPLAY")
assert.NotContains(t, output, "AFTER_NODISPLAY")
} else {
t.Error("Expected NO terragrunt execution due to previous errors but it did run.")
}
}
func TestTerragruntBeforeOneArgAction(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_ONE_ARG_ACTION_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_ONE_ARG_ACTION_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_ONE_ARG_ACTION_PATH)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s --terragrunt-log-level debug", rootPath), &stdout, &stderr)
output := stderr.String()
if err != nil {
t.Error("Expected successful execution of terragrunt with 1 before hook execution.")
} else {
assert.Contains(t, output, "Running command: date")
}
}
func TestTerragruntEmptyStringCommandHook(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_EMPTY_STRING_COMMAND_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_EMPTY_STRING_COMMAND_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_EMPTY_STRING_COMMAND_PATH)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
if err != nil {
assert.Contains(t, err.Error(), "Need at least one non-empty argument in 'execute'.")
} else {
t.Error("Expected an Error with message: 'Need at least one argument'")
}
}
func TestTerragruntEmptyCommandListHook(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_EMPTY_COMMAND_LIST_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_EMPTY_COMMAND_LIST_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_EMPTY_COMMAND_LIST_PATH)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
if err != nil {
assert.Contains(t, err.Error(), "Need at least one non-empty argument in 'execute'.")
} else {
t.Error("Expected an Error with message: 'Need at least one argument'")
}
}
func TestTerragruntHookInterpolation(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_INTERPOLATIONS_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_INTERPOLATIONS_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_INTERPOLATIONS_PATH)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
erroutput := stderr.String()
homePath := os.Getenv("HOME")
if homePath == "" {
homePath = "HelloWorld"
}
if err != nil {
t.Errorf("Did not expect to get error: %s", err.Error())
}
assert.Contains(t, erroutput, homePath)
}
func TestTerragruntWorksWithLocalTerraformVersion(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_PATH)
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
lockTableName := fmt.Sprintf("terragrunt-test-locks-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
defer cleanupTableForTest(t, lockTableName, TERRAFORM_REMOTE_STATE_S3_REGION)
tmpTerragruntConfigPath := createTmpTerragruntConfig(t, TEST_FIXTURE_PATH, s3BucketName, lockTableName, config.DefaultTerragruntConfigPath)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-config %s --terragrunt-working-dir %s", tmpTerragruntConfigPath, TEST_FIXTURE_PATH))
var expectedS3Tags = map[string]string{
"owner": "terragrunt integration test",
"name": "Terraform state storage"}
validateS3BucketExistsAndIsTagged(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName, expectedS3Tags)
var expectedDynamoDBTableTags = map[string]string{
"owner": "terragrunt integration test",
"name": "Terraform lock table"}
validateDynamoDBTableExistsAndIsTagged(t, TERRAFORM_REMOTE_STATE_S3_REGION, lockTableName, expectedDynamoDBTableTags)
}
// Regression test to ensure that `accesslogging_bucket_name` and `accesslogging_target_prefix` are taken into account
// & the TargetLogs bucket is set to a new S3 bucket, different from the origin S3 bucket
// & the logs objects are prefixed with the `accesslogging_target_prefix` value
func TestTerragruntSetsAccessLoggingForTfSTateS3BuckeToADifferentBucketWithGivenTargetPrefix(t *testing.T) {
t.Parallel()
examplePath := filepath.Join(TEST_FIXTURE_REGRESSIONS, "accesslogging-bucket/with-target-prefix-input")
cleanupTerraformFolder(t, examplePath)
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
s3BucketLogsName := fmt.Sprintf("%s-tf-state-logs", s3BucketName)
s3BucketLogsTargetPrefix := "logs/"
lockTableName := fmt.Sprintf("terragrunt-test-locks-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
defer cleanupTableForTest(t, lockTableName, TERRAFORM_REMOTE_STATE_S3_REGION)
tmpTerragruntConfigPath := createTmpTerragruntConfig(
t,
examplePath,
s3BucketName,
lockTableName,
"remote_terragrunt.hcl",
)
runTerragrunt(t, fmt.Sprintf("terragrunt validate --terragrunt-non-interactive --terragrunt-config %s --terragrunt-working-dir %s", tmpTerragruntConfigPath, examplePath))
targetLoggingBucket := terraws.GetS3BucketLoggingTarget(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
targetLoggingBucketPrefix := terraws.GetS3BucketLoggingTargetPrefix(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
assert.Equal(t, s3BucketLogsName, targetLoggingBucket)
assert.Equal(t, s3BucketLogsTargetPrefix, targetLoggingBucketPrefix)
}
// Regression test to ensure that `accesslogging_bucket_name` is taken into account
// & when no `accesslogging_target_prefix` provided, then **default** value is used for TargetPrefix
func TestTerragruntSetsAccessLoggingForTfSTateS3BuckeToADifferentBucketWithDefaultTargetPrefix(t *testing.T) {
t.Parallel()
examplePath := filepath.Join(TEST_FIXTURE_REGRESSIONS, "accesslogging-bucket/no-target-prefix-input")
cleanupTerraformFolder(t, examplePath)
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
s3BucketLogsName := fmt.Sprintf("%s-tf-state-logs", s3BucketName)
lockTableName := fmt.Sprintf("terragrunt-test-locks-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
defer cleanupTableForTest(t, lockTableName, TERRAFORM_REMOTE_STATE_S3_REGION)
tmpTerragruntConfigPath := createTmpTerragruntConfig(
t,
examplePath,
s3BucketName,
lockTableName,
"remote_terragrunt.hcl",
)
runTerragrunt(t, fmt.Sprintf("terragrunt validate --terragrunt-non-interactive --terragrunt-config %s --terragrunt-working-dir %s", tmpTerragruntConfigPath, examplePath))
targetLoggingBucket := terraws.GetS3BucketLoggingTarget(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
targetLoggingBucketPrefix := terraws.GetS3BucketLoggingTargetPrefix(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
assert.Equal(t, s3BucketLogsName, targetLoggingBucket)
assert.Equal(t, remote.DefaultS3BucketAccessLoggingTargetPrefix, targetLoggingBucketPrefix)
}
func TestTerragruntWorksWithGCSBackend(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_GCS_PATH)
// We need a project to create the bucket in, so we pull one from the recommended environment variable.
project := os.Getenv("GOOGLE_CLOUD_PROJECT")
gcsBucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
defer deleteGCSBucket(t, gcsBucketName)
tmpTerragruntGCSConfigPath := createTmpTerragruntGCSConfig(t, TEST_FIXTURE_GCS_PATH, project, TERRAFORM_REMOTE_STATE_GCP_REGION, gcsBucketName, config.DefaultTerragruntConfigPath)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-config %s --terragrunt-working-dir %s", tmpTerragruntGCSConfigPath, TEST_FIXTURE_GCS_PATH))
var expectedGCSLabels = map[string]string{
"owner": "terragrunt_test",
"name": "terraform_state_storage"}
validateGCSBucketExistsAndIsLabeled(t, TERRAFORM_REMOTE_STATE_GCP_REGION, gcsBucketName, expectedGCSLabels)
}
func TestTerragruntWorksWithExistingGCSBucket(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_GCS_BYO_BUCKET_PATH)
// We need a project to create the bucket in, so we pull one from the recommended environment variable.
project := os.Getenv("GOOGLE_CLOUD_PROJECT")
gcsBucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
defer deleteGCSBucket(t, gcsBucketName)
// manually create the GCS bucket outside the US (default) to test Terragrunt works correctly with an existing bucket.
location := TERRAFORM_REMOTE_STATE_GCP_REGION
createGCSBucket(t, project, location, gcsBucketName)
tmpTerragruntGCSConfigPath := createTmpTerragruntGCSConfig(t, TEST_FIXTURE_GCS_BYO_BUCKET_PATH, project, TERRAFORM_REMOTE_STATE_GCP_REGION, gcsBucketName, config.DefaultTerragruntConfigPath)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-config %s --terragrunt-working-dir %s", tmpTerragruntGCSConfigPath, TEST_FIXTURE_GCS_BYO_BUCKET_PATH))
validateGCSBucketExistsAndIsLabeled(t, location, gcsBucketName, nil)
}
func TestTerragruntWorksWithIncludes(t *testing.T) {
t.Parallel()
childPath := util.JoinPath(TEST_FIXTURE_INCLUDE_PATH, TEST_FIXTURE_INCLUDE_CHILD_REL_PATH)
cleanupTerraformFolder(t, childPath)
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
tmpTerragruntConfigPath := createTmpTerragruntConfigWithParentAndChild(t, TEST_FIXTURE_INCLUDE_PATH, TEST_FIXTURE_INCLUDE_CHILD_REL_PATH, s3BucketName, config.DefaultTerragruntConfigPath, config.DefaultTerragruntConfigPath)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-config %s --terragrunt-working-dir %s", tmpTerragruntConfigPath, childPath))
}
func TestTerragruntWorksWithSingleJsonConfig(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_CONFIG_SINGLE_JSON_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_CONFIG_SINGLE_JSON_PATH)
rootTerragruntConfigPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_CONFIG_SINGLE_JSON_PATH)
runTerragrunt(t, fmt.Sprintf("terragrunt plan --terragrunt-non-interactive --terragrunt-working-dir %s", rootTerragruntConfigPath))
}
func TestTerragruntReportsTerraformErrorsWithPlanAll(t *testing.T) {
cleanupTerraformFolder(t, TEST_FIXTURE_FAILED_TERRAFORM)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_FAILED_TERRAFORM)
rootTerragruntConfigPath := util.JoinPath(tmpEnvPath, "fixture-failure")
cmd := fmt.Sprintf("terragrunt plan-all --terragrunt-non-interactive --terragrunt-working-dir %s", rootTerragruntConfigPath)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
// Call runTerragruntCommand directly because this command contains failures (which causes runTerragruntRedirectOutput to abort) but we don't care.
if err := runTerragruntCommand(t, cmd, &stdout, &stderr); err == nil {
t.Fatalf("Failed to properly fail command: %v. The terraform should be bad", cmd)
}
output := stdout.String()
errOutput := stderr.String()
fmt.Printf("STDERR is %s.\n STDOUT is %s", errOutput, output)
assert.True(t, strings.Contains(errOutput, "missingvar1") || strings.Contains(output, "missingvar1"))
assert.True(t, strings.Contains(errOutput, "missingvar2") || strings.Contains(output, "missingvar2"))
}
func TestTerragruntGraphDependenciesCommand(t *testing.T) {
t.Parallel()
// this test doesn't even run plan, it exits right after the stack was created
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_GRAPH_DEPENDENCIES)
rootTerragruntConfigPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_GRAPH_DEPENDENCIES, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, "not-used", "not-used")
environmentPath := fmt.Sprintf("%s/%s/root", tmpEnvPath, TEST_FIXTURE_GRAPH_DEPENDENCIES)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
runTerragruntRedirectOutput(t, fmt.Sprintf("terragrunt graph-dependencies --terragrunt-working-dir %s", environmentPath), &stdout, &stderr)
output := stdout.String()
assert.True(t, strings.Contains(output, strings.TrimSpace(`
digraph {
"backend-app" ;
"backend-app" -> "mysql";
"backend-app" -> "redis";
"backend-app" -> "vpc";
"frontend-app" ;
"frontend-app" -> "backend-app";
"frontend-app" -> "vpc";
"mysql" ;
"mysql" -> "vpc";
"redis" ;
"redis" -> "vpc";
"vpc" ;
}
`)))
}
func TestTerragruntRunAllCommand(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_OUTPUT_ALL)
rootTerragruntConfigPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_OUTPUT_ALL, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, "not-used", "not-used")
environmentPath := fmt.Sprintf("%s/%s/env1", tmpEnvPath, TEST_FIXTURE_OUTPUT_ALL)
runTerragrunt(t, fmt.Sprintf("terragrunt run-all init --terragrunt-non-interactive --terragrunt-working-dir %s", environmentPath))
}
func TestTerragruntOutputAllCommand(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_OUTPUT_ALL)
rootTerragruntConfigPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_OUTPUT_ALL, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, "not-used", "not-used")
environmentPath := fmt.Sprintf("%s/%s/env1", tmpEnvPath, TEST_FIXTURE_OUTPUT_ALL)
runTerragrunt(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", environmentPath))
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
runTerragruntRedirectOutput(t, fmt.Sprintf("terragrunt output-all --terragrunt-non-interactive --terragrunt-working-dir %s", environmentPath), &stdout, &stderr)
output := stdout.String()
assert.True(t, strings.Contains(output, "app1 output"))
assert.True(t, strings.Contains(output, "app2 output"))
assert.True(t, strings.Contains(output, "app3 output"))
assert.True(t, (strings.Index(output, "app3 output") < strings.Index(output, "app1 output")) && (strings.Index(output, "app1 output") < strings.Index(output, "app2 output")))
}
func TestTerragruntValidateAllCommand(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_OUTPUT_ALL)
rootTerragruntConfigPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_OUTPUT_ALL, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, "not-used", "not-used")
environmentPath := fmt.Sprintf("%s/%s/env1", tmpEnvPath, TEST_FIXTURE_OUTPUT_ALL)
runTerragrunt(t, fmt.Sprintf("terragrunt validate-all --terragrunt-non-interactive --terragrunt-working-dir %s", environmentPath))
}
// Check that Terragrunt does not pollute stdout with anything
func TestTerragruntStdOut(t *testing.T) {
t.Parallel()
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_STDOUT))
runTerragruntRedirectOutput(t, fmt.Sprintf("terragrunt output foo --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_STDOUT), &stdout, &stderr)
output := stdout.String()
assert.Equal(t, "\"foo\"\n", output)
}
func TestTerragruntOutputAllCommandSpecificVariableIgnoreDependencyErrors(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_OUTPUT_ALL)
rootTerragruntConfigPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_OUTPUT_ALL, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, "not-used", "not-used")
environmentPath := fmt.Sprintf("%s/%s/env1", tmpEnvPath, TEST_FIXTURE_OUTPUT_ALL)
runTerragrunt(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", environmentPath))
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
// Call runTerragruntCommand directly because this command contains failures (which causes runTerragruntRedirectOutput to abort) but we don't care.
runTerragruntCommand(t, fmt.Sprintf("terragrunt output-all app2_text --terragrunt-ignore-dependency-errors --terragrunt-non-interactive --terragrunt-working-dir %s", environmentPath), &stdout, &stderr)
output := stdout.String()
logBufferContentsLineByLine(t, stdout, "output-all stdout")
logBufferContentsLineByLine(t, stderr, "output-all stderr")
// Without --terragrunt-ignore-dependency-errors, app2 never runs because its dependencies have "errors" since they don't have the output "app2_text".
assert.True(t, strings.Contains(output, "app2 output"))
}
func testRemoteFixtureParallelism(t *testing.T, parallelism int, numberOfModules int, timeToDeployEachModule time.Duration) (string, int, error) {
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
// folders inside the fixture
//fixtureTemplate := path.Join(TEST_FIXTURE_PARALLELISM, "template")
//fixtureApp := path.Join(TEST_FIXTURE_PARALLELISM, "app")
// copy the template `numberOfModules` times into the app
tmpEnvPath, err := ioutil.TempDir("", "terragrunt-test")
if err != nil {
t.Fatalf("Failed to create temp dir due to error: %v", err)
}
for i := 0; i < numberOfModules; i++ {
err := util.CopyFolderContents(TEST_FIXTURE_PARALLELISM, tmpEnvPath, ".terragrunt-test")
if err != nil {
return "", 0, err
}
err = os.Rename(
path.Join(tmpEnvPath, "template"),
path.Join(tmpEnvPath, "app"+strconv.Itoa(i)))
if err != nil {
return "", 0, err
}
}
rootTerragruntConfigPath := util.JoinPath(tmpEnvPath, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, "not-used", "not-used")
environmentPath := fmt.Sprintf("%s", tmpEnvPath)
// forces plugin download & initialization (no parallelism control)
runTerragrunt(t, fmt.Sprintf("terragrunt plan-all --terragrunt-non-interactive --terragrunt-working-dir %s -var sleep_seconds=%d", environmentPath, timeToDeployEachModule/time.Second))
// apply all with parallelism set
// NOTE: we can't run just apply-all and not plan-all because the time to initialize the plugins skews the results of the test
testStart := int(time.Now().Unix())
t.Logf("apply-all start time = %d, %s", testStart, time.Now().Format(time.RFC3339))
runTerragrunt(t, fmt.Sprintf("terragrunt apply-all --terragrunt-parallelism %d --terragrunt-non-interactive --terragrunt-working-dir %s -var sleep_seconds=%d", parallelism, environmentPath, timeToDeployEachModule/time.Second))
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
// Call runTerragruntCommand directly because this command contains failures (which causes runTerragruntRedirectOutput to abort) but we don't care.
err = runTerragruntCommand(t, fmt.Sprintf("terragrunt output-all --terragrunt-non-interactive --terragrunt-working-dir %s", environmentPath), &stdout, &stderr)
if err != nil {
return "", 0, err
}
return stdout.String(), testStart, nil
}
func testTerragruntParallelism(t *testing.T, parallelism int, numberOfModules int, timeToDeployEachModule time.Duration, expectedTimings []int) {
output, testStart, err := testRemoteFixtureParallelism(t, parallelism, numberOfModules, timeToDeployEachModule)
require.NoError(t, err)
// parse output and sort the times, the regex captures a string in the format time.RFC3339 emitted by terraform's timestamp function
r, err := regexp.Compile(`out = "([-:\w]+)"`)
require.NoError(t, err)
matches := r.FindAllStringSubmatch(output, -1)
require.True(t, len(matches) == numberOfModules)
var times []int
for _, v := range matches {
// timestamp() is parsed
parsed, err := time.Parse(time.RFC3339, v[1])
require.NoError(t, err)
times = append(times, int(parsed.Unix())-testStart)
}
sort.Slice(times, func(i, j int) bool {
return times[i] < times[j]
})
// the reported times are skewed (running terragrunt/terraform apply adds a little bit of overhead)
// we apply a simple scaling algorithm on the times based on the last expected time and the last actual time
k := float64(times[len(times)-1]) / float64(expectedTimings[len(expectedTimings)-1])
scaledTimes := make([]float64, len(times))
for i := 0; i < len(times); i += 1 {
scaledTimes[i] = float64(times[i]) / k
}
t.Logf("Parallelism test numberOfModules=%d p=%d expectedTimes=%v times=%v scaledTimes=%v scaleFactor=%f", numberOfModules, parallelism, expectedTimings, times, scaledTimes, k)
maxDiffInSeconds := 3.0
isEqual := func(x, y float64) bool {
return math.Abs(x-y) <= maxDiffInSeconds
}
for i := 0; i < len(times); i += 1 {
// it's impossible to know when will the first test finish however once a test finishes
// we know that all the other times are relative to the first one
assert.True(t, isEqual(scaledTimes[i], float64(expectedTimings[i])))
}
}
func TestTerragruntParallelism(t *testing.T) {
t.Parallel()
testCases := []struct {
parallelism int
numberOfModules int
timeToDeployEachModule time.Duration
expectedTimings []int
}{
{1, 10, 5 * time.Second, []int{5, 10, 15, 20, 25, 30, 35, 40, 45, 50}},
{3, 10, 5 * time.Second, []int{5, 5, 5, 10, 10, 10, 15, 15, 15, 20}},
{5, 10, 5 * time.Second, []int{5, 5, 5, 5, 5, 5, 5, 5, 5, 5}},
}
for _, tc := range testCases {
tc := tc // shadow and force execution with this case
t.Run(fmt.Sprintf("parallelism=%d numberOfModules=%d timeToDeployEachModule=%v expectedTimings=%v", tc.parallelism, tc.numberOfModules, tc.timeToDeployEachModule, tc.expectedTimings), func(t *testing.T) {
// t.Parallel()
testTerragruntParallelism(t, tc.parallelism, tc.numberOfModules, tc.timeToDeployEachModule, tc.expectedTimings)
})
}
}
func TestTerragruntStackCommands(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
lockTableName := fmt.Sprintf("terragrunt-test-locks-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
defer cleanupTableForTest(t, lockTableName, TERRAFORM_REMOTE_STATE_S3_REGION)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_STACK)
rootTerragruntConfigPath := util.JoinPath(tmpEnvPath, "fixture-stack", config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, lockTableName, "not-used")
mgmtEnvironmentPath := fmt.Sprintf("%s/fixture-stack/mgmt", tmpEnvPath)
stageEnvironmentPath := fmt.Sprintf("%s/fixture-stack/stage", tmpEnvPath)
runTerragrunt(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", mgmtEnvironmentPath))
runTerragrunt(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", stageEnvironmentPath))
runTerragrunt(t, fmt.Sprintf("terragrunt output-all --terragrunt-non-interactive --terragrunt-working-dir %s", mgmtEnvironmentPath))
runTerragrunt(t, fmt.Sprintf("terragrunt output-all --terragrunt-non-interactive --terragrunt-working-dir %s", stageEnvironmentPath))
runTerragrunt(t, fmt.Sprintf("terragrunt destroy-all --terragrunt-non-interactive --terragrunt-working-dir %s", stageEnvironmentPath))
runTerragrunt(t, fmt.Sprintf("terragrunt destroy-all --terragrunt-non-interactive --terragrunt-working-dir %s", mgmtEnvironmentPath))
}
func TestTerragruntStackCommandsWithPlanFile(t *testing.T) {
t.Parallel()
disjointEnvironmentPath := "fixture-stack/disjoint"
cleanupTerraformFolder(t, disjointEnvironmentPath)
runTerragrunt(t, fmt.Sprintf("terragrunt plan-all -out=plan.tfplan --terragrunt-log-level info --terragrunt-non-interactive --terragrunt-working-dir %s", disjointEnvironmentPath))
runTerragrunt(t, fmt.Sprintf("terragrunt apply-all plan.tfplan --terragrunt-log-level info --terragrunt-non-interactive --terragrunt-working-dir %s", disjointEnvironmentPath))
}
func TestLocalDownload(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_LOCAL_DOWNLOAD_PATH)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_DOWNLOAD_PATH))
// As of Terraform 0.14.0 we should be copying the lock file from .terragrunt-cache to the working directory
assert.FileExists(t, util.JoinPath(TEST_FIXTURE_LOCAL_DOWNLOAD_PATH, util.TerraformLockFile))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_DOWNLOAD_PATH))
}
// As of Terraform 0.14.0, if there's already a lock file in the working directory, we should be copying it into
// .terragrunt-cache
func TestCustomLockFile(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_CUSTOM_LOCK_FILE)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_CUSTOM_LOCK_FILE))
source := "../custom-lock-file-module"
downloadDir := util.JoinPath(TEST_FIXTURE_CUSTOM_LOCK_FILE, TERRAGRUNT_CACHE)
result, err := tfsource.NewTerraformSource(source, downloadDir, TEST_FIXTURE_CUSTOM_LOCK_FILE, util.CreateLogEntry("", options.DEFAULT_LOG_LEVEL))
require.NoError(t, err)
lockFilePath := util.JoinPath(result.WorkingDir, util.TerraformLockFile)
require.FileExists(t, lockFilePath)
readFile, err := ioutil.ReadFile(lockFilePath)
require.NoError(t, err)
// In our lock file, we intentionally have hashes for an older version of the AWS provider. If the lock file
// copying works, then Terraform will stick with this older version. If there is a bug, Terraform will end up
// installing a newer version (since the version is not pinned in the .tf code, only in the lock file).
assert.Contains(t, string(readFile), `version = "3.0.0"`)
}
func TestLocalDownloadWithHiddenFolder(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_LOCAL_WITH_HIDDEN_FOLDER)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_WITH_HIDDEN_FOLDER))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_WITH_HIDDEN_FOLDER))
}
func TestLocalDownloadWithRelativePath(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_LOCAL_RELATIVE_DOWNLOAD_PATH)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_RELATIVE_DOWNLOAD_PATH))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_RELATIVE_DOWNLOAD_PATH))
}
func TestRemoteDownload(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_REMOTE_DOWNLOAD_PATH)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_REMOTE_DOWNLOAD_PATH))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_REMOTE_DOWNLOAD_PATH))
}
func TestRemoteDownloadWithRelativePath(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_REMOTE_RELATIVE_DOWNLOAD_PATH)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_REMOTE_RELATIVE_DOWNLOAD_PATH))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_REMOTE_RELATIVE_DOWNLOAD_PATH))
}
func TestRemoteDownloadOverride(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_OVERRIDE_DOWNLOAD_PATH)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s --terragrunt-source %s", TEST_FIXTURE_OVERRIDE_DOWNLOAD_PATH, "../hello-world"))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s --terragrunt-source %s", TEST_FIXTURE_OVERRIDE_DOWNLOAD_PATH, "../hello-world"))
}
func TestLocalWithBackend(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
lockTableName := fmt.Sprintf("terragrunt-lock-table-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
defer cleanupTableForTest(t, lockTableName, TERRAFORM_REMOTE_STATE_S3_REGION)
tmpEnvPath := copyEnvironment(t, "fixture-download")
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_LOCAL_WITH_BACKEND)
rootTerragruntConfigPath := util.JoinPath(rootPath, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, lockTableName, "not-used")
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
}
func TestLocalWithMissingBackend(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
lockTableName := fmt.Sprintf("terragrunt-lock-table-%s", strings.ToLower(uniqueId()))
tmpEnvPath := copyEnvironment(t, "fixture-download")
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_LOCAL_MISSING_BACKEND)
rootTerragruntConfigPath := util.JoinPath(rootPath, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, lockTableName, "not-used")
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), os.Stdout, os.Stderr)
if assert.Error(t, err) {
underlying := errors.Unwrap(err)
assert.IsType(t, cli.BackendNotDefined{}, underlying)
}
}
func TestRemoteWithBackend(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
lockTableName := fmt.Sprintf("terragrunt-lock-table-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
defer cleanupTableForTest(t, lockTableName, TERRAFORM_REMOTE_STATE_S3_REGION)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_REMOTE_WITH_BACKEND)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_REMOTE_WITH_BACKEND)
rootTerragruntConfigPath := util.JoinPath(rootPath, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, lockTableName, "not-used")
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
}
func TestRemoteWithModuleInRoot(t *testing.T) {
t.Parallel()
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_REMOTE_MODULE_IN_ROOT)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_REMOTE_MODULE_IN_ROOT)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
}
// Run terragrunt plan -detailed-exitcode on a folder with some uncreated resources and make sure that you get an exit
// code of "2", which means there are changes to apply.
func TestExitCode(t *testing.T) {
t.Parallel()
rootPath := copyEnvironment(t, TEST_FIXTURE_EXIT_CODE)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_EXIT_CODE)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt plan -detailed-exitcode --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), os.Stdout, os.Stderr)
exitCode, exitCodeErr := shell.GetExitCode(err)
assert.Nil(t, exitCodeErr)
assert.Equal(t, 2, exitCode)
}
func TestAutoRetryBasicRerun(t *testing.T) {
t.Parallel()
out := new(bytes.Buffer)
rootPath := copyEnvironment(t, TEST_FIXTURE_AUTO_RETRY_RERUN)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_AUTO_RETRY_RERUN)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), out, os.Stderr)
assert.Nil(t, err)
assert.Contains(t, out.String(), "Apply complete!")
}
func TestAutoRetrySkip(t *testing.T) {
t.Parallel()
out := new(bytes.Buffer)
rootPath := copyEnvironment(t, TEST_FIXTURE_AUTO_RETRY_RERUN)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_AUTO_RETRY_RERUN)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-no-auto-retry --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), out, os.Stderr)
assert.NotNil(t, err)
assert.NotContains(t, out.String(), "Apply complete!")
}
func TestAutoRetryExhaustRetries(t *testing.T) {
t.Parallel()
out := new(bytes.Buffer)
rootPath := copyEnvironment(t, TEST_FIXTURE_AUTO_RETRY_EXHAUST)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_AUTO_RETRY_EXHAUST)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), out, os.Stderr)
assert.NotNil(t, err)
assert.Contains(t, out.String(), "Failed to load backend")
assert.NotContains(t, out.String(), "Apply complete!")
}
func TestAutoRetryCustomRetryableErrors(t *testing.T) {
t.Parallel()
out := new(bytes.Buffer)
rootPath := copyEnvironment(t, TEST_FIXTURE_AUTO_RETRY_CUSTOM_ERRORS)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_AUTO_RETRY_CUSTOM_ERRORS)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply --auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), out, os.Stderr)
assert.Nil(t, err)
assert.Contains(t, out.String(), "My own little error")
assert.Contains(t, out.String(), "Apply complete!")
}
func TestAutoRetryCustomRetryableErrorsFailsWhenRetryableErrorsNotSet(t *testing.T) {
t.Parallel()
out := new(bytes.Buffer)
rootPath := copyEnvironment(t, TEST_FIXTURE_AUTO_RETRY_CUSTOM_ERRORS_NOT_SET)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_AUTO_RETRY_CUSTOM_ERRORS_NOT_SET)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply --auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), out, os.Stderr)
assert.NotNil(t, err)
assert.Contains(t, out.String(), "My own little error")
assert.NotContains(t, out.String(), "Apply complete!")
}
func TestAutoRetryFlagWithRecoverableError(t *testing.T) {
t.Parallel()
out := new(bytes.Buffer)
rootPath := copyEnvironment(t, TEST_FIXTURE_AUTO_RETRY_RERUN)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_AUTO_RETRY_RERUN)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-no-auto-retry --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), out, os.Stderr)
assert.NotNil(t, err)
assert.NotContains(t, out.String(), "Apply complete!")
}
func TestAutoRetryEnvVarWithRecoverableError(t *testing.T) {
os.Setenv("TERRAGRUNT_AUTO_RETRY", "false")
defer os.Unsetenv("TERRAGRUNT_AUTO_RETRY")
out := new(bytes.Buffer)
rootPath := copyEnvironment(t, TEST_FIXTURE_AUTO_RETRY_RERUN)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_AUTO_RETRY_RERUN)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), out, os.Stderr)
assert.NotNil(t, err)
assert.NotContains(t, out.String(), "Apply complete!")
}
func TestAutoRetryApplyAllDependentModuleRetries(t *testing.T) {
t.Parallel()
out := new(bytes.Buffer)
rootPath := copyEnvironment(t, TEST_FIXTURE_AUTO_RETRY_APPLY_ALL_RETRIES)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_AUTO_RETRY_APPLY_ALL_RETRIES)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply-all -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), out, os.Stderr)
assert.Nil(t, err)
s := out.String()
assert.Contains(t, s, "app1 output")
assert.Contains(t, s, "app2 output")
assert.Contains(t, s, "app3 output")
assert.Contains(t, s, "Apply complete!")
}
func TestAutoRetryConfigurableRetries(t *testing.T) {
t.Parallel()
stdout := new(bytes.Buffer)
stderr := new(bytes.Buffer)
rootPath := copyEnvironment(t, TEST_FIXTURE_AUTO_RETRY_CONFIGURABLE_RETRIES)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_AUTO_RETRY_CONFIGURABLE_RETRIES)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), stdout, stderr)
sleeps := regexp.MustCompile("Sleeping 0s before retrying.").FindAllStringIndex(stderr.String(), -1)
assert.Nil(t, err)
assert.Equal(t, 4, len(sleeps)) // 5 retries, so 4 sleeps
assert.Contains(t, stdout.String(), "Apply complete!")
}
func TestAutoRetryConfigurableRetriesErrors(t *testing.T) {
t.Parallel()
testCases := []struct {
fixture string
errorMessage string
}{
{TEST_FIXTURE_AUTO_RETRY_CONFIGURABLE_RETRIES_ERROR_1, "Cannot have less than 1 max retry"},
{TEST_FIXTURE_AUTO_RETRY_CONFIGURABLE_RETRIES_ERROR_2, "Cannot sleep for less than 0 seconds"},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.fixture, func(t *testing.T) {
t.Parallel()
stdout := new(bytes.Buffer)
stderr := new(bytes.Buffer)
rootPath := copyEnvironment(t, tc.fixture)
modulePath := util.JoinPath(rootPath, tc.fixture)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), stdout, stderr)
assert.NotNil(t, err)
assert.NotContains(t, stdout.String(), "Apply complete!")
assert.Contains(t, err.Error(), tc.errorMessage)
})
}
}
func TestAwsProviderPatch(t *testing.T) {
t.Parallel()
stderr := new(bytes.Buffer)
rootPath := copyEnvironment(t, TEST_FIXTURE_AWS_PROVIDER_PATCH)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_AWS_PROVIDER_PATCH)
mainTFFile := filepath.Join(modulePath, "main.tf")
// fill in branch so we can test against updates to the test case file
mainContents, err := util.ReadFileAsString(mainTFFile)
require.NoError(t, err)
mainContents = strings.Replace(mainContents, "__BRANCH_NAME__", git.GetCurrentBranchName(t), -1)
require.NoError(t, ioutil.WriteFile(mainTFFile, []byte(mainContents), 0444))
assert.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt aws-provider-patch --terragrunt-override-attr region=eu-west-1 --terragrunt-working-dir %s --terragrunt-log-level debug", modulePath), os.Stdout, stderr),
)
t.Log(stderr.String())
assert.Regexp(t, "Patching AWS provider in .+test/fixture-aws-provider-patch/example-module/main.tf", stderr.String())
// Make sure the resulting terraform code is still valid
assert.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt validate --terragrunt-working-dir %s", modulePath), os.Stdout, os.Stderr),
)
}
// This tests terragrunt properly passes through terraform commands and any number of specified args
func TestTerraformCommandCliArgs(t *testing.T) {
t.Parallel()
testCases := []struct {
command []string
expected string
}{
{
[]string{"version"},
"terraform version",
},
{
[]string{"version", "foo"},
"terraform version foo",
},
{
[]string{"version", "foo", "bar", "baz"},
"terraform version foo bar baz",
},
{
[]string{"version", "foo", "bar", "baz", "foobar"},
"terraform version foo bar baz foobar",
},
}
for _, testCase := range testCases {
cmd := fmt.Sprintf("terragrunt %s --terragrunt-non-interactive --terragrunt-log-level debug --terragrunt-working-dir %s", strings.Join(testCase.command, " "), TEST_FIXTURE_EXTRA_ARGS_PATH)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
runTerragruntRedirectOutput(t, cmd, &stdout, &stderr)
output := stdout.String()
errOutput := stderr.String()
assert.True(t, strings.Contains(errOutput, testCase.expected) || strings.Contains(output, testCase.expected))
}
}
// This tests terragrunt properly passes through terraform commands with sub commands
// and any number of specified args
func TestTerraformSubcommandCliArgs(t *testing.T) {
t.Parallel()
testCases := []struct {
command []string
expected string
}{
{
[]string{"force-unlock"},
"terraform force-unlock",
},
{
[]string{"force-unlock", "foo"},
"terraform force-unlock foo",
},
{
[]string{"force-unlock", "foo", "bar", "baz"},
"terraform force-unlock foo bar baz",
},
{
[]string{"force-unlock", "foo", "bar", "baz", "foobar"},
"terraform force-unlock foo bar baz foobar",
},
}
for _, testCase := range testCases {
cmd := fmt.Sprintf("terragrunt %s --terragrunt-non-interactive --terragrunt-log-level debug --terragrunt-working-dir %s", strings.Join(testCase.command, " "), TEST_FIXTURE_EXTRA_ARGS_PATH)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
// Call runTerragruntCommand directly because this command contains failures (which causes runTerragruntRedirectOutput to abort) but we don't care.
if err := runTerragruntCommand(t, cmd, &stdout, &stderr); err == nil {
t.Fatalf("Failed to properly fail command: %v.", cmd)
}
output := stdout.String()
errOutput := stderr.String()
assert.True(t, strings.Contains(errOutput, testCase.expected) || strings.Contains(output, testCase.expected))
}
}
func TestPreventDestroyOverride(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_PREVENT_DESTROY_OVERRIDE)
assert.NoError(t, runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-working-dir %s", TEST_FIXTURE_PREVENT_DESTROY_OVERRIDE), os.Stdout, os.Stderr))
assert.NoError(t, runTerragruntCommand(t, fmt.Sprintf("terragrunt destroy -auto-approve --terragrunt-working-dir %s", TEST_FIXTURE_PREVENT_DESTROY_OVERRIDE), os.Stdout, os.Stderr))
}
func TestPreventDestroyNotSet(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_PREVENT_DESTROY_NOT_SET)
assert.NoError(t, runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-working-dir %s", TEST_FIXTURE_PREVENT_DESTROY_NOT_SET), os.Stdout, os.Stderr))
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt destroy -auto-approve --terragrunt-working-dir %s", TEST_FIXTURE_PREVENT_DESTROY_NOT_SET), os.Stdout, os.Stderr)
if assert.Error(t, err) {
underlying := errors.Unwrap(err)
assert.IsType(t, cli.ModuleIsProtected{}, underlying)
}
}
func TestPreventDestroy(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_LOCAL_PREVENT_DESTROY)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_PREVENT_DESTROY))
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt destroy -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_PREVENT_DESTROY), os.Stdout, os.Stderr)
if assert.Error(t, err) {
underlying := errors.Unwrap(err)
assert.IsType(t, cli.ModuleIsProtected{}, underlying)
}
}
func TestPreventDestroyDependencies(t *testing.T) {
t.Parallel()
// Populate module paths.
moduleNames := []string{
"module-a",
"module-b",
"module-c",
"module-d",
"module-e",
}
modulePaths := make(map[string]string, len(moduleNames))
for _, moduleName := range moduleNames {
modulePaths[moduleName] = util.JoinPath(TEST_FIXTURE_LOCAL_PREVENT_DESTROY_DEPENDENCIES, moduleName)
}
// Cleanup all modules directories.
cleanupTerraformFolder(t, TEST_FIXTURE_LOCAL_PREVENT_DESTROY_DEPENDENCIES)
for _, modulePath := range modulePaths {
cleanupTerraformFolder(t, modulePath)
}
var (
applyAllStdout bytes.Buffer
applyAllStderr bytes.Buffer
)
// Apply and destroy all modules.
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_PREVENT_DESTROY_DEPENDENCIES), &applyAllStdout, &applyAllStderr)
logBufferContentsLineByLine(t, applyAllStdout, "apply-all stdout")
logBufferContentsLineByLine(t, applyAllStderr, "apply-all stderr")
if err != nil {
t.Fatalf("apply-all in TestPreventDestroyDependencies failed with error: %v. Full std", err)
}
var (
destroyAllStdout bytes.Buffer
destroyAllStderr bytes.Buffer
)
err = runTerragruntCommand(t, fmt.Sprintf("terragrunt destroy-all --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_PREVENT_DESTROY_DEPENDENCIES), &destroyAllStdout, &destroyAllStderr)
logBufferContentsLineByLine(t, destroyAllStdout, "destroy-all stdout")
logBufferContentsLineByLine(t, destroyAllStderr, "destroy-all stderr")
if assert.Error(t, err) {
underlying := errors.Unwrap(err)
assert.IsType(t, configstack.MultiError{}, underlying)
}
// Check that modules C, D and E were deleted and modules A and B weren't.
for moduleName, modulePath := range modulePaths {
var (
showStdout bytes.Buffer
showStderr bytes.Buffer
)
err = runTerragruntCommand(t, fmt.Sprintf("terragrunt show --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), &showStdout, &showStderr)
logBufferContentsLineByLine(t, showStdout, fmt.Sprintf("show stdout for %s", modulePath))
logBufferContentsLineByLine(t, showStderr, fmt.Sprintf("show stderr for %s", modulePath))
assert.NoError(t, err)
output := showStdout.String()
switch moduleName {
case "module-a":
assert.Contains(t, output, "Hello, Module A")
case "module-b":
assert.Contains(t, output, "Hello, Module B")
case "module-c":
assert.NotContains(t, output, "Hello, Module C")
case "module-d":
assert.NotContains(t, output, "Hello, Module D")
case "module-e":
assert.NotContains(t, output, "Hello, Module E")
}
}
}
func validateInputs(t *testing.T, outputs map[string]TerraformOutput) {
assert.Equal(t, outputs["bool"].Value, true)
assert.Equal(t, outputs["list_bool"].Value, []interface{}{true, false})
assert.Equal(t, outputs["list_number"].Value, []interface{}{1.0, 2.0, 3.0})
assert.Equal(t, outputs["list_string"].Value, []interface{}{"a", "b", "c"})
assert.Equal(t, outputs["map_bool"].Value, map[string]interface{}{"foo": true, "bar": false, "baz": true})
assert.Equal(t, outputs["map_number"].Value, map[string]interface{}{"foo": 42.0, "bar": 12345.0})
assert.Equal(t, outputs["map_string"].Value, map[string]interface{}{"foo": "bar"})
assert.Equal(t, outputs["number"].Value, 42.0)
assert.Equal(t, outputs["object"].Value, map[string]interface{}{"list": []interface{}{1.0, 2.0, 3.0}, "map": map[string]interface{}{"foo": "bar"}, "num": 42.0, "str": "string"})
assert.Equal(t, outputs["string"].Value, "string")
assert.Equal(t, outputs["from_env"].Value, "default")
}
func TestInputsPassedThroughCorrectly(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_INPUTS)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_INPUTS)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_INPUTS)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
require.NoError(t, err)
outputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout.String()), &outputs))
validateInputs(t, outputs)
}
func TestNoAutoInit(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_REGRESSIONS)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_REGRESSIONS)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_REGRESSIONS, "skip-init")
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply --terragrunt-no-auto-init --terragrunt-log-level debug --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
logBufferContentsLineByLine(t, stdout, "no force apply stdout")
logBufferContentsLineByLine(t, stderr, "no force apply stderr")
require.Error(t, err)
require.Contains(t, stderr.String(), "This module is not yet installed.")
}
func TestLocalsParsing(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_LOCALS_CANONICAL)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCALS_CANONICAL))
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCALS_CANONICAL), &stdout, &stderr)
require.NoError(t, err)
outputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout.String()), &outputs))
assert.Equal(t, outputs["data"].Value, "Hello world\n")
assert.Equal(t, outputs["answer"].Value, float64(42))
}
func TestLocalsInInclude(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_LOCALS_IN_INCLUDE)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_LOCALS_IN_INCLUDE)
childPath := filepath.Join(tmpEnvPath, TEST_FIXTURE_LOCALS_IN_INCLUDE, TEST_FIXTURE_LOCALS_IN_INCLUDE_CHILD_REL_PATH)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve -no-color --terragrunt-non-interactive --terragrunt-working-dir %s", childPath))
// Check the outputs of the dir functions referenced in locals to make sure they return what is expected
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", childPath), &stdout, &stderr),
)
outputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout.String()), &outputs))
assert.Equal(
t,
filepath.Join(tmpEnvPath, TEST_FIXTURE_LOCALS_IN_INCLUDE),
outputs["parent_terragrunt_dir"].Value,
)
assert.Equal(
t,
childPath,
outputs["terragrunt_dir"].Value,
)
assert.Equal(
t,
"apply",
outputs["terraform_command"].Value,
)
assert.Equal(
t,
"[\"apply\",\"-auto-approve\",\"-no-color\"]",
outputs["terraform_cli_args"].Value,
)
}
func TestUndefinedLocalsReferenceBreaks(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_LOCALS_ERROR_UNDEFINED_LOCAL)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCALS_ERROR_UNDEFINED_LOCAL), os.Stdout, os.Stderr)
assert.Error(t, err)
}
func TestUndefinedLocalsReferenceToInputsBreaks(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_LOCALS_ERROR_UNDEFINED_LOCAL_BUT_INPUT)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCALS_ERROR_UNDEFINED_LOCAL_BUT_INPUT), os.Stdout, os.Stderr)
assert.Error(t, err)
}
type TerraformOutput struct {
Sensitive bool
Type interface{}
Value interface{}
}
func TestPreventDestroyDependenciesIncludedConfig(t *testing.T) {
t.Parallel()
// Populate module paths.
moduleNames := []string{
"module-a",
"module-b",
"module-c",
}
modulePaths := make(map[string]string, len(moduleNames))
for _, moduleName := range moduleNames {
modulePaths[moduleName] = util.JoinPath(TEST_FIXTURE_LOCAL_INCLUDE_PREVENT_DESTROY_DEPENDENCIES, moduleName)
}
// Cleanup all modules directories.
cleanupTerraformFolder(t, TEST_FIXTURE_LOCAL_INCLUDE_PREVENT_DESTROY_DEPENDENCIES)
for _, modulePath := range modulePaths {
cleanupTerraformFolder(t, modulePath)
}
var (
applyAllStdout bytes.Buffer
applyAllStderr bytes.Buffer
)
// Apply and destroy all modules.
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_INCLUDE_PREVENT_DESTROY_DEPENDENCIES), &applyAllStdout, &applyAllStderr)
logBufferContentsLineByLine(t, applyAllStdout, "apply-all stdout")
logBufferContentsLineByLine(t, applyAllStderr, "apply-all stderr")
if err != nil {
t.Fatalf("apply-all in TestPreventDestroyDependenciesIncludedConfig failed with error: %v. Full std", err)
}
var (
destroyAllStdout bytes.Buffer
destroyAllStderr bytes.Buffer
)
err = runTerragruntCommand(t, fmt.Sprintf("terragrunt destroy-all --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_INCLUDE_PREVENT_DESTROY_DEPENDENCIES), &destroyAllStdout, &destroyAllStderr)
logBufferContentsLineByLine(t, destroyAllStdout, "destroy-all stdout")
logBufferContentsLineByLine(t, destroyAllStderr, "destroy-all stderr")
if assert.Error(t, err) {
underlying := errors.Unwrap(err)
assert.IsType(t, configstack.MultiError{}, underlying)
}
// Check that modules C, D and E were deleted and modules A and B weren't.
for moduleName, modulePath := range modulePaths {
var (
showStdout bytes.Buffer
showStderr bytes.Buffer
)
err = runTerragruntCommand(t, fmt.Sprintf("terragrunt show --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), &showStdout, &showStderr)
logBufferContentsLineByLine(t, showStdout, fmt.Sprintf("show stdout for %s", modulePath))
logBufferContentsLineByLine(t, showStderr, fmt.Sprintf("show stderr for %s", modulePath))
assert.NoError(t, err)
output := showStdout.String()
switch moduleName {
case "module-a":
assert.Contains(t, output, "Hello, Module A")
case "module-b":
assert.Contains(t, output, "Hello, Module B")
case "module-c":
assert.NotContains(t, output, "Hello, Module C")
}
}
}
func TestExcludeDirs(t *testing.T) {
t.Parallel()
// Populate module paths.
moduleNames := []string{
"integration-env/aws/module-aws-a",
"integration-env/gce/module-gce-b",
"integration-env/gce/module-gce-c",
"production-env/aws/module-aws-d",
"production-env/gce/module-gce-e",
}
testCases := []struct {
workingDir string
excludeArgs string
excludedModuleOutputs []string
}{
{TEST_FIXTURE_LOCAL_WITH_EXCLUDE_DIR, "--terragrunt-exclude-dir **/gce/**/*", []string{"Module GCE B", "Module GCE C", "Module GCE E"}},
{TEST_FIXTURE_LOCAL_WITH_EXCLUDE_DIR, "--terragrunt-exclude-dir production-env/**/* --terragrunt-exclude-dir **/module-gce-c", []string{"Module GCE C", "Module AWS D", "Module GCE E"}},
{TEST_FIXTURE_LOCAL_WITH_EXCLUDE_DIR, "--terragrunt-exclude-dir integration-env/gce/module-gce-b --terragrunt-exclude-dir integration-env/gce/module-gce-c --terragrunt-exclude-dir **/module-aws*", []string{"Module AWS A", "Module GCE B", "Module GCE C", "Module AWS D"}},
}
modulePaths := make(map[string]string, len(moduleNames))
for _, moduleName := range moduleNames {
modulePaths[moduleName] = util.JoinPath(TEST_FIXTURE_LOCAL_WITH_EXCLUDE_DIR, moduleName)
}
for _, testCase := range testCases {
applyAllStdout := bytes.Buffer{}
applyAllStderr := bytes.Buffer{}
// Cleanup all modules directories.
cleanupTerragruntFolder(t, TEST_FIXTURE_LOCAL_WITH_EXCLUDE_DIR)
for _, modulePath := range modulePaths {
cleanupTerragruntFolder(t, modulePath)
}
// Apply modules according to test cases
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s %s", testCase.workingDir, testCase.excludeArgs), &applyAllStdout, &applyAllStderr)
logBufferContentsLineByLine(t, applyAllStdout, "apply-all stdout")
logBufferContentsLineByLine(t, applyAllStderr, "apply-all stderr")
if err != nil {
t.Fatalf("apply-all in TestExcludeDirs failed with error: %v. Full std", err)
}
// Check that the excluded module output is not present
for _, modulePath := range modulePaths {
showStdout := bytes.Buffer{}
showStderr := bytes.Buffer{}
err = runTerragruntCommand(t, fmt.Sprintf("terragrunt show --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), &showStdout, &showStderr)
logBufferContentsLineByLine(t, showStdout, fmt.Sprintf("show stdout for %s", modulePath))
logBufferContentsLineByLine(t, showStderr, fmt.Sprintf("show stderr for %s", modulePath))
assert.NoError(t, err)
output := showStdout.String()
for _, excludedModuleOutput := range testCase.excludedModuleOutputs {
assert.NotContains(t, output, excludedModuleOutput)
}
}
}
}
func TestIncludeDirs(t *testing.T) {
t.Parallel()
// Populate module paths.
moduleNames := []string{
"integration-env/aws/module-aws-a",
"integration-env/gce/module-gce-b",
"integration-env/gce/module-gce-c",
"production-env/aws/module-aws-d",
"production-env/gce/module-gce-e",
}
testCases := []struct {
workingDir string
includeArgs string
includedModuleOutputs []string
}{
{TEST_FIXTURE_LOCAL_WITH_INCLUDE_DIR, "--terragrunt-include-dir */aws", []string{"Module GCE B", "Module GCE C", "Module GCE E"}},
{TEST_FIXTURE_LOCAL_WITH_INCLUDE_DIR, "--terragrunt-include-dir production-env --terragrunt-include-dir **/module-gce-c", []string{"Module GCE B", "Module AWS A"}},
{TEST_FIXTURE_LOCAL_WITH_INCLUDE_DIR, "--terragrunt-include-dir integration-env/gce/module-gce-b --terragrunt-include-dir integration-env/gce/module-gce-c --terragrunt-include-dir **/module-aws*", []string{"Module GCE E"}},
}
modulePaths := make(map[string]string, len(moduleNames))
for _, moduleName := range moduleNames {
modulePaths[moduleName] = util.JoinPath(TEST_FIXTURE_LOCAL_WITH_INCLUDE_DIR, moduleName)
}
for _, testCase := range testCases {
applyAllStdout := bytes.Buffer{}
applyAllStderr := bytes.Buffer{}
// Cleanup all modules directories.
cleanupTerragruntFolder(t, TEST_FIXTURE_LOCAL_WITH_INCLUDE_DIR)
for _, modulePath := range modulePaths {
cleanupTerragruntFolder(t, modulePath)
}
// Apply modules according to test cases
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s %s", testCase.workingDir, testCase.includeArgs), &applyAllStdout, &applyAllStderr)
logBufferContentsLineByLine(t, applyAllStdout, "apply-all stdout")
logBufferContentsLineByLine(t, applyAllStderr, "apply-all stderr")
if err != nil {
t.Fatalf("apply-all in TestExcludeDirs failed with error: %v. Full std", err)
}
// Check that the included module output is present
for _, modulePath := range modulePaths {
showStdout := bytes.Buffer{}
showStderr := bytes.Buffer{}
err = runTerragruntCommand(t, fmt.Sprintf("terragrunt show --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), &showStdout, &showStderr)
logBufferContentsLineByLine(t, showStdout, fmt.Sprintf("show stdout for %s", modulePath))
logBufferContentsLineByLine(t, showStderr, fmt.Sprintf("show stderr for %s", modulePath))
assert.NoError(t, err)
output := showStdout.String()
for _, includedModuleOutput := range testCase.includedModuleOutputs {
assert.NotContains(t, output, includedModuleOutput)
}
}
}
}
func TestIncludeDirsDependencyConsistencyRegression(t *testing.T) {
t.Parallel()
modulePaths := []string{
"amazing-app/k8s",
"clusters/eks",
"testapp/k8s",
}
testPath := filepath.Join(TEST_FIXTURE_REGRESSIONS, "exclude-dependency")
cleanupTerragruntFolder(t, testPath)
for _, modulePath := range modulePaths {
cleanupTerragruntFolder(t, filepath.Join(testPath, modulePath))
}
includedModulesWithNone := runValidateAllWithIncludeAndGetIncludedModules(t, testPath, []string{}, false)
assert.Greater(t, len(includedModulesWithNone), 0)
includedModulesWithAmzApp := runValidateAllWithIncludeAndGetIncludedModules(t, testPath, []string{"amazing-app/k8s"}, false)
assert.Equal(t, includedModulesWithAmzApp, []string{"amazing-app/k8s", "clusters/eks"})
includedModulesWithTestApp := runValidateAllWithIncludeAndGetIncludedModules(t, testPath, []string{"testapp/k8s"}, false)
assert.Equal(t, includedModulesWithTestApp, []string{"clusters/eks", "testapp/k8s"})
}
func TestIncludeDirsStrict(t *testing.T) {
t.Parallel()
modulePaths := []string{
"amazing-app/k8s",
"clusters/eks",
"testapp/k8s",
}
testPath := filepath.Join(TEST_FIXTURE_REGRESSIONS, "exclude-dependency")
cleanupTerragruntFolder(t, testPath)
for _, modulePath := range modulePaths {
cleanupTerragruntFolder(t, filepath.Join(testPath, modulePath))
}
includedModulesWithNone := runValidateAllWithIncludeAndGetIncludedModules(t, testPath, []string{}, true)
assert.Equal(t, includedModulesWithNone, []string{})
includedModulesWithAmzApp := runValidateAllWithIncludeAndGetIncludedModules(t, testPath, []string{"amazing-app/k8s"}, true)
assert.Equal(t, includedModulesWithAmzApp, []string{"amazing-app/k8s"})
includedModulesWithTestApp := runValidateAllWithIncludeAndGetIncludedModules(t, testPath, []string{"testapp/k8s"}, true)
assert.Equal(t, includedModulesWithTestApp, []string{"testapp/k8s"})
}
func TestTerragruntExternalDependencies(t *testing.T) {
t.Parallel()
modules := []string{
"module-a",
"module-b",
}
cleanupTerraformFolder(t, TEST_FIXTURE_EXTERNAL_DEPENDENCIE)
for _, module := range modules {
cleanupTerraformFolder(t, util.JoinPath(TEST_FIXTURE_EXTERNAL_DEPENDENCIE, module))
}
var (
applyAllStdout bytes.Buffer
applyAllStderr bytes.Buffer
)
rootPath := copyEnvironment(t, TEST_FIXTURE_EXTERNAL_DEPENDENCIE)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_EXTERNAL_DEPENDENCIE, "module-b")
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-include-external-dependencies --terragrunt-working-dir %s", modulePath), &applyAllStdout, &applyAllStderr)
logBufferContentsLineByLine(t, applyAllStdout, "apply-all stdout")
logBufferContentsLineByLine(t, applyAllStderr, "apply-all stderr")
applyAllStdoutString := applyAllStdout.String()
if err != nil {
t.Errorf("Did not expect to get error: %s", err.Error())
}
for _, module := range modules {
assert.Contains(t, applyAllStdoutString, fmt.Sprintf("Hello World, %s", module))
}
}
func TestTerragruntExcludeExternalDependencies(t *testing.T) {
t.Parallel()
excludedModule := "module-a"
includedModule := "module-b"
modules := []string{
excludedModule,
includedModule,
}
cleanupTerraformFolder(t, TEST_FIXTURE_EXTERNAL_DEPENDENCIE)
for _, module := range modules {
cleanupTerraformFolder(t, util.JoinPath(TEST_FIXTURE_EXTERNAL_DEPENDENCIE, module))
}
var (
applyAllStdout bytes.Buffer
applyAllStderr bytes.Buffer
)
rootPath := copyEnvironment(t, TEST_FIXTURE_EXTERNAL_DEPENDENCIE)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_EXTERNAL_DEPENDENCIE, includedModule)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-ignore-external-dependencies --terragrunt-working-dir %s", modulePath), &applyAllStdout, &applyAllStderr)
logBufferContentsLineByLine(t, applyAllStdout, "apply-all stdout")
logBufferContentsLineByLine(t, applyAllStderr, "apply-all stderr")
applyAllStdoutString := applyAllStdout.String()
if err != nil {
t.Errorf("Did not expect to get error: %s", err.Error())
}
assert.Contains(t, applyAllStdoutString, fmt.Sprintf("Hello World, %s", includedModule))
assert.NotContains(t, applyAllStdoutString, fmt.Sprintf("Hello World, %s", excludedModule))
}
func TestApplySkipTrue(t *testing.T) {
t.Parallel()
rootPath := copyEnvironment(t, TEST_FIXTURE_SKIP)
rootPath = util.JoinPath(rootPath, TEST_FIXTURE_SKIP, "skip-true")
showStdout := bytes.Buffer{}
showStderr := bytes.Buffer{}
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-log-level info --terragrunt-non-interactive --terragrunt-working-dir %s --var person=Hobbs", rootPath), &showStdout, &showStderr)
logBufferContentsLineByLine(t, showStdout, "show stdout")
logBufferContentsLineByLine(t, showStderr, "show stderr")
stdout := showStdout.String()
stderr := showStderr.String()
assert.Nil(t, err)
assert.Regexp(t, regexp.MustCompile("Skipping terragrunt module .*fixture-skip/skip-true/terragrunt.hcl due to skip = true."), stderr)
assert.NotContains(t, stdout, "hello, Hobbs")
}
func TestApplySkipFalse(t *testing.T) {
t.Parallel()
rootPath := copyEnvironment(t, TEST_FIXTURE_SKIP)
rootPath = util.JoinPath(rootPath, TEST_FIXTURE_SKIP, "skip-false")
showStdout := bytes.Buffer{}
showStderr := bytes.Buffer{}
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &showStdout, &showStderr)
logBufferContentsLineByLine(t, showStdout, "show stdout")
logBufferContentsLineByLine(t, showStderr, "show stderr")
stderr := showStderr.String()
stdout := showStdout.String()
assert.Nil(t, err)
assert.Contains(t, stdout, "hello, Hobbs")
assert.NotContains(t, stderr, "Skipping terragrunt module")
}
func TestApplyAllSkipTrue(t *testing.T) {
t.Parallel()
rootPath := copyEnvironment(t, TEST_FIXTURE_SKIP)
rootPath = util.JoinPath(rootPath, TEST_FIXTURE_SKIP, "skip-true")
showStdout := bytes.Buffer{}
showStderr := bytes.Buffer{}
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s --terragrunt-log-level info", rootPath), &showStdout, &showStderr)
logBufferContentsLineByLine(t, showStdout, "show stdout")
logBufferContentsLineByLine(t, showStderr, "show stderr")
stdout := showStdout.String()
stderr := showStderr.String()
assert.Nil(t, err)
assert.Regexp(t, regexp.MustCompile("Skipping terragrunt module .*fixture-skip/skip-true/terragrunt.hcl due to skip = true."), stderr)
assert.Contains(t, stdout, "hello, Ernie")
assert.Contains(t, stdout, "hello, Bert")
}
func TestApplyAllSkipFalse(t *testing.T) {
t.Parallel()
rootPath := copyEnvironment(t, TEST_FIXTURE_SKIP)
rootPath = util.JoinPath(rootPath, TEST_FIXTURE_SKIP, "skip-false")
showStdout := bytes.Buffer{}
showStderr := bytes.Buffer{}
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &showStdout, &showStderr)
logBufferContentsLineByLine(t, showStdout, "show stdout")
logBufferContentsLineByLine(t, showStderr, "show stderr")
stdout := showStdout.String()
stderr := showStderr.String()
assert.Nil(t, err)
assert.Contains(t, stdout, "hello, Hobbs")
assert.Contains(t, stdout, "hello, Ernie")
assert.Contains(t, stdout, "hello, Bert")
assert.NotContains(t, stderr, "Skipping terragrunt module")
}
func TestTerragruntInfo(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_INIT_ONCE_WITH_SOURCE_NO_BACKEND)
tmpEnvPath := copyEnvironment(t, "fixture-hooks/init-once")
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_INIT_ONCE_WITH_SOURCE_NO_BACKEND)
showStdout := bytes.Buffer{}
showStderr := bytes.Buffer{}
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt terragrunt-info --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &showStdout, &showStderr)
assert.Nil(t, err)
logBufferContentsLineByLine(t, showStdout, "show stdout")
var dat cli.TerragruntInfoGroup
err_unmarshal := json.Unmarshal(showStdout.Bytes(), &dat)
assert.Nil(t, err_unmarshal)
assert.Equal(t, dat.DownloadDir, fmt.Sprintf("%s/%s", rootPath, TERRAGRUNT_CACHE))
assert.Equal(t, dat.TerraformBinary, TERRAFORM_BINARY)
assert.Equal(t, dat.IamRole, "")
}
// Test case for yamldecode bug: https://github.com/gruntwork-io/terragrunt/issues/834
func TestYamlDecodeRegressions(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_REGRESSIONS)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_REGRESSIONS)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_REGRESSIONS, "yamldecode")
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
// Check the output of yamldecode and make sure it doesn't parse the string incorrectly
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr),
)
outputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout.String()), &outputs))
assert.Equal(t, outputs["test1"].Value, "003")
assert.Equal(t, outputs["test2"].Value, "1.00")
}
// We test the path with remote_state blocks by:
// - Applying all modules initially
// - Deleting the local state of the nested deep dependency
// - Running apply on the root module
// If output optimization is working, we should still get the same correct output even though the state of the upmost
// module has been destroyed.
func TestDependencyOutputOptimization(t *testing.T) {
expectOutputLogs := []string{
`Running command: terraform init -get=false prefix=\[.*fixture-get-output/nested-optimization/dep\]`,
}
dependencyOutputOptimizationTest(t, "nested-optimization", true, expectOutputLogs)
}
func TestDependencyOutputOptimizationSkipInit(t *testing.T) {
expectOutputLogs := []string{
`Detected module .*nested-optimization/dep/terragrunt.hcl is already init-ed. Retrieving outputs directly from working directory. prefix=\[.*fixture-get-output/nested-optimization/dep\]`,
}
dependencyOutputOptimizationTest(t, "nested-optimization", false, expectOutputLogs)
}
func TestDependencyOutputOptimizationNoGenerate(t *testing.T) {
expectOutputLogs := []string{
`Running command: terraform init -get=false prefix=\[.*fixture-get-output/nested-optimization-nogen/dep\]`,
}
dependencyOutputOptimizationTest(t, "nested-optimization-nogen", true, expectOutputLogs)
}
func dependencyOutputOptimizationTest(t *testing.T, moduleName string, forceInit bool, expectedOutputLogs []string) {
t.Parallel()
expectedOutput := `They said, "No, The answer is 42"`
generatedUniqueId := uniqueId()
cleanupTerraformFolder(t, TEST_FIXTURE_GET_OUTPUT)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_GET_OUTPUT)
rootPath := filepath.Join(tmpEnvPath, TEST_FIXTURE_GET_OUTPUT, moduleName)
rootTerragruntConfigPath := filepath.Join(rootPath, config.DefaultTerragruntConfigPath)
livePath := filepath.Join(rootPath, "live")
deepDepPath := filepath.Join(rootPath, "deepdep")
depPath := filepath.Join(rootPath, "dep")
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(generatedUniqueId))
lockTableName := fmt.Sprintf("terragrunt-test-locks-%s", strings.ToLower(generatedUniqueId))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
defer cleanupTableForTest(t, lockTableName, TERRAFORM_REMOTE_STATE_S3_REGION)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, lockTableName, TERRAFORM_REMOTE_STATE_S3_REGION)
runTerragrunt(t, fmt.Sprintf("terragrunt apply-all --terragrunt-log-level debug --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
// We need to bust the output cache that stores the dependency outputs so that the second run pulls the outputs.
// This is only a problem during testing, where the process is shared across terragrunt runs.
config.ClearOutputCache()
// verify expected output
stdout, _, err := runTerragruntCommandWithOutput(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-log-level debug --terragrunt-non-interactive --terragrunt-working-dir %s", livePath))
require.NoError(t, err)
outputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout), &outputs))
assert.Equal(t, expectedOutput, outputs["output"].Value)
// If we want to force reinit, delete the relevant .terraform directories
if forceInit {
cleanupTerraformFolder(t, depPath)
}
// Now delete the deepdep state and verify still works (note we need to bust the cache again)
config.ClearOutputCache()
require.NoError(t, os.Remove(filepath.Join(deepDepPath, "terraform.tfstate")))
reout, reerr, err := runTerragruntCommandWithOutput(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-log-level debug --terragrunt-non-interactive --terragrunt-working-dir %s", livePath))
require.NoError(t, err)
require.NoError(t, json.Unmarshal([]byte(reout), &outputs))
assert.Equal(t, expectedOutput, outputs["output"].Value)
for _, logRegexp := range expectedOutputLogs {
re, err := regexp.Compile(logRegexp)
require.NoError(t, err)
matches := re.FindAllString(reerr, -1)
assert.Greater(t, len(matches), 0)
}
}
func TestDependencyOutputOptimizationDisableTest(t *testing.T) {
t.Parallel()
expectedOutput := `They said, "No, The answer is 42"`
generatedUniqueId := uniqueId()
cleanupTerraformFolder(t, TEST_FIXTURE_GET_OUTPUT)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_GET_OUTPUT)
rootPath := filepath.Join(tmpEnvPath, TEST_FIXTURE_GET_OUTPUT, "nested-optimization-disable")
rootTerragruntConfigPath := filepath.Join(rootPath, config.DefaultTerragruntConfigPath)
livePath := filepath.Join(rootPath, "live")
deepDepPath := filepath.Join(rootPath, "deepdep")
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(generatedUniqueId))
lockTableName := fmt.Sprintf("terragrunt-test-locks-%s", strings.ToLower(generatedUniqueId))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
defer cleanupTableForTest(t, lockTableName, TERRAFORM_REMOTE_STATE_S3_REGION)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, lockTableName, TERRAFORM_REMOTE_STATE_S3_REGION)
runTerragrunt(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
// We need to bust the output cache that stores the dependency outputs so that the second run pulls the outputs.
// This is only a problem during testing, where the process is shared across terragrunt runs.
config.ClearOutputCache()
// verify expected output
stdout, _, err := runTerragruntCommandWithOutput(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", livePath))
require.NoError(t, err)
outputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout), &outputs))
assert.Equal(t, expectedOutput, outputs["output"].Value)
// Now delete the deepdep state and verify it no longer works, because it tries to fetch the deepdep dependency
config.ClearOutputCache()
require.NoError(t, os.Remove(filepath.Join(deepDepPath, "terraform.tfstate")))
require.NoError(t, os.RemoveAll(filepath.Join(deepDepPath, ".terraform")))
_, _, err = runTerragruntCommandWithOutput(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", livePath))
require.Error(t, err)
}
func TestDependencyOutput(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_GET_OUTPUT)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_GET_OUTPUT)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_GET_OUTPUT, "integration")
runTerragrunt(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
// verify expected output 42
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
app3Path := util.JoinPath(rootPath, "app3")
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", app3Path), &stdout, &stderr),
)
outputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout.String()), &outputs))
assert.Equal(t, int(outputs["z"].Value.(float64)), 42)
}
func TestDependencyOutputErrorBeforeApply(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_GET_OUTPUT)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_GET_OUTPUT)
rootPath := filepath.Join(tmpEnvPath, TEST_FIXTURE_GET_OUTPUT, "integration")
app3Path := filepath.Join(rootPath, "app3")
showStdout := bytes.Buffer{}
showStderr := bytes.Buffer{}
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt plan --terragrunt-non-interactive --terragrunt-working-dir %s", app3Path), &showStdout, &showStderr)
assert.Error(t, err)
// Verify that we fail because the dependency is not applied yet
assert.Contains(t, err.Error(), "has not been applied yet")
logBufferContentsLineByLine(t, showStdout, "show stdout")
logBufferContentsLineByLine(t, showStderr, "show stderr")
}
func TestDependencyOutputSkipOutputs(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_GET_OUTPUT)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_GET_OUTPUT)
rootPath := filepath.Join(tmpEnvPath, TEST_FIXTURE_GET_OUTPUT, "integration")
emptyPath := filepath.Join(rootPath, "empty")
showStdout := bytes.Buffer{}
showStderr := bytes.Buffer{}
// Test that even if the dependency (app1) is not applied, using skip_outputs will skip pulling the outputs so there
// will be no errors.
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt plan --terragrunt-non-interactive --terragrunt-working-dir %s", emptyPath), &showStdout, &showStderr)
assert.NoError(t, err)
logBufferContentsLineByLine(t, showStdout, "show stdout")
logBufferContentsLineByLine(t, showStderr, "show stderr")
}
func TestDependencyOutputSkipOutputsWithMockOutput(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_GET_OUTPUT)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_GET_OUTPUT)
rootPath := filepath.Join(tmpEnvPath, TEST_FIXTURE_GET_OUTPUT, "mock-outputs")
dependent3Path := filepath.Join(rootPath, "dependent3")
showStdout := bytes.Buffer{}
showStderr := bytes.Buffer{}
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", dependent3Path), &showStdout, &showStderr)
assert.NoError(t, err)
logBufferContentsLineByLine(t, showStdout, "show stdout")
logBufferContentsLineByLine(t, showStderr, "show stderr")
// verify expected output when mocks are used: The answer is 0
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", dependent3Path), &stdout, &stderr),
)
outputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout.String()), &outputs))
assert.Equal(t, outputs["truth"].Value, "The answer is 0")
// Now apply-all so that the dependency is applied, and verify it still uses the mock output
err = runTerragruntCommand(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &showStdout, &showStderr)
assert.NoError(t, err)
logBufferContentsLineByLine(t, showStdout, "show stdout")
logBufferContentsLineByLine(t, showStderr, "show stderr")
// verify expected output when mocks are used: The answer is 0
stdout = bytes.Buffer{}
stderr = bytes.Buffer{}
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", dependent3Path), &stdout, &stderr),
)
outputs = map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout.String()), &outputs))
assert.Equal(t, outputs["truth"].Value, "The answer is 0")
}
// Test that when you have a mock_output on a dependency, the dependency will use the mock as the output instead
// of erroring out.
func TestDependencyMockOutput(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_GET_OUTPUT)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_GET_OUTPUT)
rootPath := filepath.Join(tmpEnvPath, TEST_FIXTURE_GET_OUTPUT, "mock-outputs")
dependent1Path := filepath.Join(rootPath, "dependent1")
showStdout := bytes.Buffer{}
showStderr := bytes.Buffer{}
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", dependent1Path), &showStdout, &showStderr)
assert.NoError(t, err)
logBufferContentsLineByLine(t, showStdout, "show stdout")
logBufferContentsLineByLine(t, showStderr, "show stderr")
// verify expected output when mocks are used: The answer is 0
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", dependent1Path), &stdout, &stderr),
)
outputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout.String()), &outputs))
assert.Equal(t, outputs["truth"].Value, "The answer is 0")
// We need to bust the output cache that stores the dependency outputs so that the second run pulls the outputs.
// This is only a problem during testing, where the process is shared across terragrunt runs.
config.ClearOutputCache()
// Now apply-all so that the dependency is applied, and verify it uses the dependency output
err = runTerragruntCommand(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &showStdout, &showStderr)
assert.NoError(t, err)
logBufferContentsLineByLine(t, showStdout, "show stdout")
logBufferContentsLineByLine(t, showStderr, "show stderr")
// verify expected output when mocks are used: The answer is 0
stdout = bytes.Buffer{}
stderr = bytes.Buffer{}
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", dependent1Path), &stdout, &stderr),
)
outputs = map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout.String()), &outputs))
assert.Equal(t, outputs["truth"].Value, "The answer is 42")
}
// Test that when you have a mock_output on a dependency, the dependency will use the mock as the output instead
// of erroring out when running an allowed command.
func TestDependencyMockOutputRestricted(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_GET_OUTPUT)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_GET_OUTPUT)
rootPath := filepath.Join(tmpEnvPath, TEST_FIXTURE_GET_OUTPUT, "mock-outputs")
dependent2Path := filepath.Join(rootPath, "dependent2")
showStdout := bytes.Buffer{}
showStderr := bytes.Buffer{}
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", dependent2Path), &showStdout, &showStderr)
assert.Error(t, err)
// Verify that we fail because the dependency is not applied yet
assert.Contains(t, err.Error(), "has not been applied yet")
logBufferContentsLineByLine(t, showStdout, "show stdout")
logBufferContentsLineByLine(t, showStderr, "show stderr")
// Verify we can run when using one of the allowed commands
showStdout.Reset()
showStderr.Reset()
err = runTerragruntCommand(t, fmt.Sprintf("terragrunt validate --terragrunt-non-interactive --terragrunt-working-dir %s", dependent2Path), &showStdout, &showStderr)
assert.NoError(t, err)
logBufferContentsLineByLine(t, showStdout, "show stdout")
logBufferContentsLineByLine(t, showStderr, "show stderr")
// Verify that validate-all works as well.
showStdout.Reset()
showStderr.Reset()
err = runTerragruntCommand(t, fmt.Sprintf("terragrunt validate-all --terragrunt-non-interactive --terragrunt-working-dir %s", dependent2Path), &showStdout, &showStderr)
assert.NoError(t, err)
logBufferContentsLineByLine(t, showStdout, "show stdout")
logBufferContentsLineByLine(t, showStderr, "show stderr")
showStdout.Reset()
showStderr.Reset()
err = runTerragruntCommand(t, fmt.Sprintf("terragrunt validate-all --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &showStdout, &showStderr)
assert.NoError(t, err)
logBufferContentsLineByLine(t, showStdout, "show stdout")
logBufferContentsLineByLine(t, showStderr, "show stderr")
}
func TestDependencyOutputTypeConversion(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_GET_OUTPUT)
cleanupTerraformFolder(t, TEST_FIXTURE_INPUTS)
tmpEnvPath := copyEnvironment(t, ".")
inputsPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_INPUTS)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_GET_OUTPUT, "type-conversion")
// First apply the inputs module
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", inputsPath))
// Then apply the outputs module
showStdout := bytes.Buffer{}
showStderr := bytes.Buffer{}
assert.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &showStdout, &showStderr),
)
logBufferContentsLineByLine(t, showStdout, "show stdout")
logBufferContentsLineByLine(t, showStderr, "show stderr")
// Now check the outputs to make sure they are as expected
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr),
)
outputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout.String()), &outputs))
assert.Equal(t, outputs["bool"].Value, true)
assert.Equal(t, outputs["list_bool"].Value, []interface{}{true, false})
assert.Equal(t, outputs["list_number"].Value, []interface{}{1.0, 2.0, 3.0})
assert.Equal(t, outputs["list_string"].Value, []interface{}{"a", "b", "c"})
assert.Equal(t, outputs["map_bool"].Value, map[string]interface{}{"foo": true, "bar": false, "baz": true})
assert.Equal(t, outputs["map_number"].Value, map[string]interface{}{"foo": 42.0, "bar": 12345.0})
assert.Equal(t, outputs["map_string"].Value, map[string]interface{}{"foo": "bar"})
assert.Equal(t, outputs["number"].Value, 42.0)
assert.Equal(t, outputs["object"].Value, map[string]interface{}{"list": []interface{}{1.0, 2.0, 3.0}, "map": map[string]interface{}{"foo": "bar"}, "num": 42.0, "str": "string"})
assert.Equal(t, outputs["string"].Value, "string")
assert.Equal(t, outputs["from_env"].Value, "default")
}
// Regression testing for https://github.com/gruntwork-io/terragrunt/issues/1102: Ordering keys from
// maps to avoid random placements when terraform file is generated.
func TestOrderedMapOutputRegressions1102(t *testing.T) {
t.Parallel()
generateTestCase := filepath.Join(TEST_FIXTURE_GET_OUTPUT, "regression-1102")
cleanupTerraformFolder(t, generateTestCase)
cleanupTerragruntFolder(t, generateTestCase)
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
command := fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", generateTestCase)
path := filepath.Join(generateTestCase, "backend.tf")
// runs terragrunt for the first time and checks the output "backend.tf" file.
require.NoError(
t,
runTerragruntCommand(t, command, &stdout, &stderr),
)
expected, _ := ioutil.ReadFile(path)
require.Contains(t, string(expected), "local")
// runs terragrunt again. All the outputs must be
// equal to the first run.
for i := 0; i < 20; i++ {
require.NoError(
t,
runTerragruntCommand(t, command, &stdout, &stderr),
)
actual, _ := ioutil.ReadFile(path)
require.Equal(t, expected, actual)
}
}
// Test that we get the expected error message about dependency cycles when there is a cycle in the dependency chain
func TestDependencyOutputCycleHandling(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_GET_OUTPUT)
testCases := []string{
"aa",
"aba",
"abca",
"abcda",
}
for _, testCase := range testCases {
// Capture range variable into forloop so that the binding is consistent across runs.
testCase := testCase
t.Run(testCase, func(t *testing.T) {
t.Parallel()
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_GET_OUTPUT)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_GET_OUTPUT, "cycle", testCase)
fooPath := util.JoinPath(rootPath, "foo")
planStdout := bytes.Buffer{}
planStderr := bytes.Buffer{}
err := runTerragruntCommand(
t,
fmt.Sprintf("terragrunt plan --terragrunt-non-interactive --terragrunt-working-dir %s", fooPath),
&planStdout,
&planStderr,
)
logBufferContentsLineByLine(t, planStdout, "plan stdout")
logBufferContentsLineByLine(t, planStderr, "plan stderr")
assert.Error(t, err)
assert.True(t, strings.Contains(err.Error(), "Found a dependency cycle between modules"))
})
}
}
// Regression testing for https://github.com/gruntwork-io/terragrunt/issues/854: Referencing a dependency that is a
// subdirectory of the current config, which includes an `include` block has problems resolving the correct relative
// path.
func TestDependencyOutputRegression854(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_GET_OUTPUT)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_GET_OUTPUT)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_GET_OUTPUT, "regression-854", "root")
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
err := runTerragruntCommand(
t,
fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath),
&stdout,
&stderr,
)
logBufferContentsLineByLine(t, stdout, "stdout")
logBufferContentsLineByLine(t, stderr, "stderr")
require.NoError(t, err)
}
// Regression testing for https://github.com/gruntwork-io/terragrunt/issues/906
func TestDependencyOutputSameOutputConcurrencyRegression(t *testing.T) {
t.Parallel()
// Use func to isolate each test run to a single s3 bucket that is deleted. We run the test multiple times
// because the underlying error we are trying to test against is nondeterministic, and thus may not always work
// the first time.
testCase := func() {
cleanupTerraformFolder(t, TEST_FIXTURE_GET_OUTPUT)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_GET_OUTPUT)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_GET_OUTPUT, "regression-906")
// Make sure to fill in the s3 bucket to the config. Also ensure the bucket is deleted before the next for
// loop call.
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s%s", strings.ToLower(uniqueId()), strings.ToLower(uniqueId()))
defer deleteS3BucketWithRetry(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
commonDepConfigPath := util.JoinPath(rootPath, "common-dep", "terragrunt.hcl")
copyTerragruntConfigAndFillPlaceholders(t, commonDepConfigPath, commonDepConfigPath, s3BucketName, "not-used", "not-used")
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
err := runTerragruntCommand(
t,
fmt.Sprintf("terragrunt apply-all --terragrunt-source-update --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath),
&stdout,
&stderr,
)
logBufferContentsLineByLine(t, stdout, "stdout")
logBufferContentsLineByLine(t, stderr, "stderr")
require.NoError(t, err)
}
for i := 0; i < 3; i++ {
testCase()
// We need to bust the output cache that stores the dependency outputs so that the second run pulls the outputs.
// This is only a problem during testing, where the process is shared across terragrunt runs.
config.ClearOutputCache()
}
}
// Regression testing for bug where terragrunt output runs on dependency blocks are done in the terragrunt-cache for the
// child, not the parent.
func TestDependencyOutputCachePathBug(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_GET_OUTPUT)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_GET_OUTPUT)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_GET_OUTPUT, "localstate", "live")
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
err := runTerragruntCommand(
t,
fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath),
&stdout,
&stderr,
)
logBufferContentsLineByLine(t, stdout, "stdout")
logBufferContentsLineByLine(t, stderr, "stderr")
require.NoError(t, err)
}
func TestDependencyOutputWithTerragruntSource(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_GET_OUTPUT)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_GET_OUTPUT)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_GET_OUTPUT, "regression-1124", "live")
modulePath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_GET_OUTPUT, "regression-1124", "modules")
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
err := runTerragruntCommand(
t,
fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s --terragrunt-source %s", rootPath, modulePath),
&stdout,
&stderr,
)
logBufferContentsLineByLine(t, stdout, "stdout")
logBufferContentsLineByLine(t, stderr, "stderr")
require.NoError(t, err)
}
func TestDependencyOutputWithHooks(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_GET_OUTPUT)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_GET_OUTPUT)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_GET_OUTPUT, "regression-1273")
depPathFileOut := util.JoinPath(rootPath, "dep", "file.out")
mainPath := util.JoinPath(rootPath, "main")
mainPathFileOut := util.JoinPath(mainPath, "file.out")
runTerragrunt(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
// We need to bust the output cache that stores the dependency outputs so that the second run pulls the outputs.
// This is only a problem during testing, where the process is shared across terragrunt runs.
config.ClearOutputCache()
// The file should exist in the first run.
assert.True(t, util.FileExists(depPathFileOut))
assert.False(t, util.FileExists(mainPathFileOut))
// Now delete file and run just main again. It should NOT create file.out.
require.NoError(t, os.Remove(depPathFileOut))
runTerragrunt(t, fmt.Sprintf("terragrunt plan --terragrunt-non-interactive --terragrunt-working-dir %s", mainPath))
assert.False(t, util.FileExists(depPathFileOut))
assert.False(t, util.FileExists(mainPathFileOut))
}
func TestDeepDependencyOutputWithMock(t *testing.T) {
// Test that the terraform command flows through for mock output retrieval to deeper dependencies. Previously the
// terraform command was being overwritten, so by the time the deep dependency retrieval runs, it was replaced with
// "output" instead of the original one.
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_GET_OUTPUT)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_GET_OUTPUT)
rootPath := filepath.Join(tmpEnvPath, TEST_FIXTURE_GET_OUTPUT, "nested-mocks", "live")
// Since we haven't applied anything, this should only succeed if mock outputs are used.
runTerragrunt(t, fmt.Sprintf("terragrunt validate --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
}
func TestAWSGetCallerIdentityFunctions(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_AWS_GET_CALLER_IDENTITY)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_AWS_GET_CALLER_IDENTITY)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_AWS_GET_CALLER_IDENTITY)
runTerragrunt(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
// verify expected outputs are not empty
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr),
)
// Get values from STS
sess, err := session.NewSession()
if err != nil {
t.Fatalf("Error while creating AWS session: %v", err)
}
identity, err := sts.New(sess).GetCallerIdentity(nil)
if err != nil {
t.Fatalf("Error while getting AWS caller identity: %v", err)
}
outputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout.String()), &outputs))
assert.Equal(t, outputs["account"].Value, *identity.Account)
assert.Equal(t, outputs["arn"].Value, *identity.Arn)
assert.Equal(t, outputs["user_id"].Value, *identity.UserId)
}
func TestGetPlatform(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_GET_PLATFORM)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_GET_PLATFORM)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_GET_PLATFORM)
runTerragrunt(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
// verify expected outputs are not empty
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr),
)
outputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout.String()), &outputs))
platform, hasPlatform := outputs["platform"]
require.True(t, hasPlatform)
require.Equal(t, platform.Value, runtime.GOOS)
}
func TestDataDir(t *testing.T) {
// Cannot be run in parallel with other tests as it modifies process' environment.
cleanupTerraformFolder(t, TEST_FIXTURE_DIRS_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_DIRS_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_DIRS_PATH)
os.Setenv("TF_DATA_DIR", util.JoinPath(tmpEnvPath, "data_dir"))
defer os.Unsetenv("TF_DATA_DIR")
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt plan --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
erroutput := stderr.String()
if err != nil {
t.Errorf("Did not expect to get an error: %s", err.Error())
}
assert.Contains(t, erroutput, "Initializing provider plugins")
var (
stdout2 bytes.Buffer
stderr2 bytes.Buffer
)
err = runTerragruntCommand(t, fmt.Sprintf("terragrunt plan --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout2, &stderr2)
erroutput2 := stderr2.String()
if err != nil {
t.Errorf("Did not expect to get an error: %s", err.Error())
}
assert.NotContains(t, erroutput2, "Initializing provider plugins")
}
func TestReadTerragruntConfigWithDependency(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_READ_CONFIG)
cleanupTerraformFolder(t, TEST_FIXTURE_INPUTS)
tmpEnvPath := copyEnvironment(t, ".")
inputsPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_INPUTS)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_READ_CONFIG, "with_dependency")
// First apply the inputs module
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", inputsPath))
// Then apply the read config module
showStdout := bytes.Buffer{}
showStderr := bytes.Buffer{}
assert.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &showStdout, &showStderr),
)
logBufferContentsLineByLine(t, showStdout, "show stdout")
logBufferContentsLineByLine(t, showStderr, "show stderr")
// Now check the outputs to make sure they are as expected
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr),
)
outputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout.String()), &outputs))
assert.Equal(t, outputs["bool"].Value, true)
assert.Equal(t, outputs["list_bool"].Value, []interface{}{true, false})
assert.Equal(t, outputs["list_number"].Value, []interface{}{1.0, 2.0, 3.0})
assert.Equal(t, outputs["list_string"].Value, []interface{}{"a", "b", "c"})
assert.Equal(t, outputs["map_bool"].Value, map[string]interface{}{"foo": true, "bar": false, "baz": true})
assert.Equal(t, outputs["map_number"].Value, map[string]interface{}{"foo": 42.0, "bar": 12345.0})
assert.Equal(t, outputs["map_string"].Value, map[string]interface{}{"foo": "bar"})
assert.Equal(t, outputs["number"].Value, 42.0)
assert.Equal(t, outputs["object"].Value, map[string]interface{}{"list": []interface{}{1.0, 2.0, 3.0}, "map": map[string]interface{}{"foo": "bar"}, "num": 42.0, "str": "string"})
assert.Equal(t, outputs["string"].Value, "string")
assert.Equal(t, outputs["from_env"].Value, "default")
}
func TestReadTerragruntConfigFromDependency(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_READ_CONFIG)
tmpEnvPath := copyEnvironment(t, ".")
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_READ_CONFIG, "from_dependency")
showStdout := bytes.Buffer{}
showStderr := bytes.Buffer{}
assert.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &showStdout, &showStderr),
)
logBufferContentsLineByLine(t, showStdout, "show stdout")
logBufferContentsLineByLine(t, showStderr, "show stderr")
// Now check the outputs to make sure they are as expected
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr),
)
outputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout.String()), &outputs))
assert.Equal(t, outputs["bar"].Value, "hello world")
}
func TestReadTerragruntConfigWithDefault(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_READ_CONFIG)
rootPath := util.JoinPath(TEST_FIXTURE_READ_CONFIG, "with_default")
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
// check the outputs to make sure they are as expected
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr),
)
outputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout.String()), &outputs))
assert.Equal(t, outputs["data"].Value, "default value")
}
func TestReadTerragruntConfigWithOriginalTerragruntDir(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_READ_CONFIG)
rootPath := util.JoinPath(TEST_FIXTURE_READ_CONFIG, "with_original_terragrunt_dir")
rootPathAbs, err := filepath.Abs(rootPath)
require.NoError(t, err)
fooPathAbs := filepath.Join(rootPathAbs, "foo")
depPathAbs := filepath.Join(rootPathAbs, "dep")
// Run apply on the dependency module and make sure we get the outputs we expect
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", depPathAbs))
depStdout := bytes.Buffer{}
depStderr := bytes.Buffer{}
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", depPathAbs), &depStdout, &depStderr),
)
depOutputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(depStdout.String()), &depOutputs))
assert.Equal(t, depPathAbs, depOutputs["terragrunt_dir"].Value)
assert.Equal(t, depPathAbs, depOutputs["original_terragrunt_dir"].Value)
assert.Equal(t, fooPathAbs, depOutputs["bar_terragrunt_dir"].Value)
assert.Equal(t, depPathAbs, depOutputs["bar_original_terragrunt_dir"].Value)
// Run apply on the root module and make sure we get the expected outputs
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
rootStdout := bytes.Buffer{}
rootStderr := bytes.Buffer{}
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &rootStdout, &rootStderr),
)
rootOutputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(rootStdout.String()), &rootOutputs))
assert.Equal(t, fooPathAbs, rootOutputs["terragrunt_dir"].Value)
assert.Equal(t, rootPathAbs, rootOutputs["original_terragrunt_dir"].Value)
assert.Equal(t, depPathAbs, rootOutputs["dep_terragrunt_dir"].Value)
assert.Equal(t, depPathAbs, rootOutputs["dep_original_terragrunt_dir"].Value)
assert.Equal(t, fooPathAbs, rootOutputs["dep_bar_terragrunt_dir"].Value)
assert.Equal(t, depPathAbs, rootOutputs["dep_bar_original_terragrunt_dir"].Value)
// Run 'run-all apply' and make sure all the outputs are identical in the root module and the dependency module
runTerragrunt(t, fmt.Sprintf("terragrunt run-all apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
runAllRootStdout := bytes.Buffer{}
runAllRootStderr := bytes.Buffer{}
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &runAllRootStdout, &runAllRootStderr),
)
runAllRootOutputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(runAllRootStdout.String()), &runAllRootOutputs))
runAllDepStdout := bytes.Buffer{}
runAllDepStderr := bytes.Buffer{}
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", depPathAbs), &runAllDepStdout, &runAllDepStderr),
)
runAllDepOutputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(runAllDepStdout.String()), &runAllDepOutputs))
assert.Equal(t, fooPathAbs, runAllRootOutputs["terragrunt_dir"].Value)
assert.Equal(t, rootPathAbs, runAllRootOutputs["original_terragrunt_dir"].Value)
assert.Equal(t, depPathAbs, runAllRootOutputs["dep_terragrunt_dir"].Value)
assert.Equal(t, depPathAbs, runAllRootOutputs["dep_original_terragrunt_dir"].Value)
assert.Equal(t, fooPathAbs, runAllRootOutputs["dep_bar_terragrunt_dir"].Value)
assert.Equal(t, depPathAbs, runAllRootOutputs["dep_bar_original_terragrunt_dir"].Value)
assert.Equal(t, depPathAbs, runAllDepOutputs["terragrunt_dir"].Value)
assert.Equal(t, depPathAbs, runAllDepOutputs["original_terragrunt_dir"].Value)
assert.Equal(t, fooPathAbs, runAllDepOutputs["bar_terragrunt_dir"].Value)
assert.Equal(t, depPathAbs, runAllDepOutputs["bar_original_terragrunt_dir"].Value)
}
func TestReadTerragruntConfigFull(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_READ_CONFIG)
rootPath := util.JoinPath(TEST_FIXTURE_READ_CONFIG, "full")
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
// check the outputs to make sure they are as expected
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr),
)
outputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout.String()), &outputs))
// Primitive config attributes
assert.Equal(t, outputs["terraform_binary"].Value, "terragrunt")
assert.Equal(t, outputs["terraform_version_constraint"].Value, "= 0.12.20")
assert.Equal(t, outputs["terragrunt_version_constraint"].Value, "= 0.23.18")
assert.Equal(t, outputs["download_dir"].Value, ".terragrunt-cache")
assert.Equal(t, outputs["iam_role"].Value, "TerragruntIAMRole")
assert.Equal(t, outputs["skip"].Value, "true")
assert.Equal(t, outputs["prevent_destroy"].Value, "true")
// Simple maps
localstgOut := map[string]interface{}{}
require.NoError(t, json.Unmarshal([]byte(outputs["localstg"].Value.(string)), &localstgOut))
assert.Equal(t, localstgOut, map[string]interface{}{"the_answer": float64(42)})
inputsOut := map[string]interface{}{}
require.NoError(t, json.Unmarshal([]byte(outputs["inputs"].Value.(string)), &inputsOut))
assert.Equal(t, inputsOut, map[string]interface{}{"doc": "Emmett Brown"})
// Complex blocks
depsOut := map[string]interface{}{}
require.NoError(t, json.Unmarshal([]byte(outputs["dependencies"].Value.(string)), &depsOut))
assert.Equal(
t,
depsOut,
map[string]interface{}{
"paths": []interface{}{"../module-a"},
},
)
generateOut := map[string]interface{}{}
require.NoError(t, json.Unmarshal([]byte(outputs["generate"].Value.(string)), &generateOut))
assert.Equal(
t,
generateOut,
map[string]interface{}{
"provider": map[string]interface{}{
"path": "provider.tf",
"if_exists": "overwrite_terragrunt",
"comment_prefix": "# ",
"disable_signature": false,
"contents": `provider "aws" {
region = "us-east-1"
}
`,
},
},
)
remoteStateOut := map[string]interface{}{}
require.NoError(t, json.Unmarshal([]byte(outputs["remote_state"].Value.(string)), &remoteStateOut))
assert.Equal(
t,
remoteStateOut,
map[string]interface{}{
"backend": "local",
"disable_init": false,
"disable_dependency_optimization": false,
"generate": map[string]interface{}{"path": "backend.tf", "if_exists": "overwrite_terragrunt"},
"config": map[string]interface{}{"path": "foo.tfstate"},
},
)
terraformOut := map[string]interface{}{}
require.NoError(t, json.Unmarshal([]byte(outputs["terraformtg"].Value.(string)), &terraformOut))
assert.Equal(
t,
terraformOut,
map[string]interface{}{
"source": "./delorean",
"extra_arguments": map[string]interface{}{
"var-files": map[string]interface{}{
"name": "var-files",
"commands": []interface{}{"apply", "plan"},
"arguments": nil,
"required_var_files": []interface{}{"extra.tfvars"},
"optional_var_files": []interface{}{"optional.tfvars"},
"env_vars": map[string]interface{}{
"TF_VAR_custom_var": "I'm set in extra_arguments env_vars",
},
},
},
"before_hook": map[string]interface{}{
"before_hook_1": map[string]interface{}{
"name": "before_hook_1",
"commands": []interface{}{"apply", "plan"},
"execute": []interface{}{"touch", "before.out"},
"working_dir": nil,
"run_on_error": true,
},
},
"after_hook": map[string]interface{}{
"after_hook_1": map[string]interface{}{
"name": "after_hook_1",
"commands": []interface{}{"apply", "plan"},
"execute": []interface{}{"touch", "after.out"},
"working_dir": nil,
"run_on_error": true,
},
},
},
)
}
func logBufferContentsLineByLine(t *testing.T, out bytes.Buffer, label string) {
t.Logf("[%s] Full contents of %s:", t.Name(), label)
lines := strings.Split(out.String(), "\n")
for _, line := range lines {
t.Logf("[%s] %s", t.Name(), line)
}
}
func TestTerragruntGenerateBlockSkip(t *testing.T) {
t.Parallel()
generateTestCase := filepath.Join(TEST_FIXTURE_CODEGEN_PATH, "generate-block", "skip")
cleanupTerraformFolder(t, generateTestCase)
cleanupTerragruntFolder(t, generateTestCase)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", generateTestCase))
assert.False(t, fileIsInFolder(t, "foo.tfstate", generateTestCase))
}
func TestTerragruntGenerateBlockOverwrite(t *testing.T) {
t.Parallel()
generateTestCase := filepath.Join(TEST_FIXTURE_CODEGEN_PATH, "generate-block", "overwrite")
cleanupTerraformFolder(t, generateTestCase)
cleanupTerragruntFolder(t, generateTestCase)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", generateTestCase))
// If the state file was written as foo.tfstate, that means it overwrote the local backend config.
assert.True(t, fileIsInFolder(t, "foo.tfstate", generateTestCase))
assert.False(t, fileIsInFolder(t, "bar.tfstate", generateTestCase))
}
func TestTerragruntGenerateAttr(t *testing.T) {
t.Parallel()
generateTestCase := filepath.Join(TEST_FIXTURE_CODEGEN_PATH, "generate-attr")
cleanupTerraformFolder(t, generateTestCase)
cleanupTerragruntFolder(t, generateTestCase)
text := "test-terragrunt-generate-attr-hello-world"
stdout, _, err := runTerragruntCommandWithOutput(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s -var text=\"%s\"", generateTestCase, text))
require.NoError(t, err)
require.Contains(t, stdout, text)
}
func TestTerragruntGenerateBlockOverwriteTerragruntSuccess(t *testing.T) {
t.Parallel()
generateTestCase := filepath.Join(TEST_FIXTURE_CODEGEN_PATH, "generate-block", "overwrite_terragrunt")
cleanupTerraformFolder(t, generateTestCase)
cleanupTerragruntFolder(t, generateTestCase)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", generateTestCase))
// If the state file was written as foo.tfstate, that means it overwrote the local backend config.
assert.True(t, fileIsInFolder(t, "foo.tfstate", generateTestCase))
assert.False(t, fileIsInFolder(t, "bar.tfstate", generateTestCase))
}
func TestTerragruntGenerateBlockOverwriteTerragruntFail(t *testing.T) {
t.Parallel()
generateTestCase := filepath.Join(TEST_FIXTURE_CODEGEN_PATH, "generate-block", "overwrite_terragrunt_error")
cleanupTerraformFolder(t, generateTestCase)
cleanupTerragruntFolder(t, generateTestCase)
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", generateTestCase), &stdout, &stderr)
require.Error(t, err)
_, ok := errors.Unwrap(err).(codegen.GenerateFileExistsError)
assert.True(t, ok)
}
func TestTerragruntGenerateBlockNestedInherit(t *testing.T) {
t.Parallel()
generateTestCase := filepath.Join(TEST_FIXTURE_CODEGEN_PATH, "generate-block", "nested", "child_inherit")
cleanupTerraformFolder(t, generateTestCase)
cleanupTerragruntFolder(t, generateTestCase)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", generateTestCase))
// If the state file was written as foo.tfstate, that means it inherited the config
assert.True(t, fileIsInFolder(t, "foo.tfstate", generateTestCase))
assert.False(t, fileIsInFolder(t, "bar.tfstate", generateTestCase))
// Also check to make sure the child config generate block was included
assert.True(t, fileIsInFolder(t, "random_file.txt", generateTestCase))
}
func TestTerragruntGenerateBlockNestedOverwrite(t *testing.T) {
t.Parallel()
generateTestCase := filepath.Join(TEST_FIXTURE_CODEGEN_PATH, "generate-block", "nested", "child_overwrite")
cleanupTerraformFolder(t, generateTestCase)
cleanupTerragruntFolder(t, generateTestCase)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", generateTestCase))
// If the state file was written as bar.tfstate, that means it overwrite the parent config
assert.False(t, fileIsInFolder(t, "foo.tfstate", generateTestCase))
assert.True(t, fileIsInFolder(t, "bar.tfstate", generateTestCase))
// Also check to make sure the child config generate block was included
assert.True(t, fileIsInFolder(t, "random_file.txt", generateTestCase))
}
func TestTerragruntGenerateBlockDisableSignature(t *testing.T) {
t.Parallel()
generateTestCase := filepath.Join(TEST_FIXTURE_CODEGEN_PATH, "generate-block", "disable-signature")
cleanupTerraformFolder(t, generateTestCase)
cleanupTerragruntFolder(t, generateTestCase)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", generateTestCase))
// Now check the outputs to make sure they are as expected
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
require.NoError(
t,
runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", generateTestCase), &stdout, &stderr),
)
outputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout.String()), &outputs))
assert.Equal(t, outputs["text"].Value, "Hello, World!")
}
func TestTerragruntRemoteStateCodegenGeneratesBackendBlock(t *testing.T) {
t.Parallel()
generateTestCase := filepath.Join(TEST_FIXTURE_CODEGEN_PATH, "remote-state", "base")
cleanupTerraformFolder(t, generateTestCase)
cleanupTerragruntFolder(t, generateTestCase)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", generateTestCase))
// If the state file was written as foo.tfstate, that means it wrote out the local backend config.
assert.True(t, fileIsInFolder(t, "foo.tfstate", generateTestCase))
}
func TestTerragruntRemoteStateCodegenOverwrites(t *testing.T) {
t.Parallel()
generateTestCase := filepath.Join(TEST_FIXTURE_CODEGEN_PATH, "remote-state", "overwrite")
cleanupTerraformFolder(t, generateTestCase)
cleanupTerragruntFolder(t, generateTestCase)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", generateTestCase))
// If the state file was written as foo.tfstate, that means it overwrote the local backend config.
assert.True(t, fileIsInFolder(t, "foo.tfstate", generateTestCase))
assert.False(t, fileIsInFolder(t, "bar.tfstate", generateTestCase))
}
func TestTerragruntRemoteStateCodegenGeneratesBackendBlockS3(t *testing.T) {
t.Parallel()
generateTestCase := filepath.Join(TEST_FIXTURE_CODEGEN_PATH, "remote-state", "s3")
cleanupTerraformFolder(t, generateTestCase)
cleanupTerragruntFolder(t, generateTestCase)
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
lockTableName := fmt.Sprintf("terragrunt-test-locks-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
defer cleanupTableForTest(t, lockTableName, TERRAFORM_REMOTE_STATE_S3_REGION)
tmpTerragruntConfigPath := createTmpTerragruntConfig(t, generateTestCase, s3BucketName, lockTableName, config.DefaultTerragruntConfigPath)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-config %s --terragrunt-working-dir %s", tmpTerragruntConfigPath, generateTestCase))
}
func TestTerragruntRemoteStateCodegenErrorsIfExists(t *testing.T) {
t.Parallel()
generateTestCase := filepath.Join(TEST_FIXTURE_CODEGEN_PATH, "remote-state", "error")
cleanupTerraformFolder(t, generateTestCase)
cleanupTerragruntFolder(t, generateTestCase)
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", generateTestCase), &stdout, &stderr)
require.Error(t, err)
_, ok := errors.Unwrap(err).(codegen.GenerateFileExistsError)
assert.True(t, ok)
}
func TestTerragruntRemoteStateCodegenDoesNotGenerateWithSkip(t *testing.T) {
t.Parallel()
generateTestCase := filepath.Join(TEST_FIXTURE_CODEGEN_PATH, "remote-state", "skip")
cleanupTerraformFolder(t, generateTestCase)
cleanupTerragruntFolder(t, generateTestCase)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", generateTestCase))
assert.False(t, fileIsInFolder(t, "foo.tfstate", generateTestCase))
}
func TestTerragruntValidateAllWithVersionChecks(t *testing.T) {
t.Parallel()
tmpEnvPath := copyEnvironment(t, "fixture-version-check")
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
err := runTerragruntVersionCommand(t, "v0.23.21", fmt.Sprintf("terragrunt validate-all --terragrunt-non-interactive --terragrunt-working-dir %s", tmpEnvPath), &stdout, &stderr)
logBufferContentsLineByLine(t, stdout, "stdout")
logBufferContentsLineByLine(t, stderr, "stderr")
require.NoError(t, err)
}
func TestTerragruntVersionConstraints(t *testing.T) {
testCases := []struct {
name string
terragruntVersion string
terragruntConstraint string
shouldSucceed bool
}{
{
"version meets constraint equal",
"v0.23.18",
"terragrunt_version_constraint = \">= v0.23.18\"",
true,
},
{
"version meets constriant greater patch",
"v0.23.19",
"terragrunt_version_constraint = \">= v0.23.18\"",
true,
},
{
"version meets constriant greater major",
"v1.0.0",
"terragrunt_version_constraint = \">= v0.23.18\"",
true,
},
{
"version meets constriant less patch",
"v0.23.17",
"terragrunt_version_constraint = \">= v0.23.18\"",
false,
},
{
"version meets constriant less major",
"v0.22.18",
"terragrunt_version_constraint = \">= v0.23.18\"",
false,
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_READ_CONFIG)
rootPath := filepath.Join(tmpEnvPath, TEST_FIXTURE_READ_CONFIG, "with_constraints")
tmpTerragruntConfigPath := createTmpTerragruntConfigContent(t, testCase.terragruntConstraint, config.DefaultTerragruntConfigPath)
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
err := runTerragruntVersionCommand(t, testCase.terragruntVersion, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-config %s --terragrunt-working-dir %s", tmpTerragruntConfigPath, rootPath), &stdout, &stderr)
logBufferContentsLineByLine(t, stdout, "stdout")
logBufferContentsLineByLine(t, stderr, "stderr")
if testCase.shouldSucceed {
require.NoError(t, err)
} else {
require.Error(t, err)
}
})
}
}
func TestTerragruntVersionConstraintsPartialParse(t *testing.T) {
fixturePath := "fixture-partial-parse/terragrunt-version-constraint"
cleanupTerragruntFolder(t, fixturePath)
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
err := runTerragruntVersionCommand(t, "0.21.23", fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", fixturePath), &stdout, &stderr)
logBufferContentsLineByLine(t, stdout, "stdout")
logBufferContentsLineByLine(t, stderr, "stderr")
assert.Error(t, err)
_, isTgVersionError := errors.Unwrap(err).(cli.InvalidTerragruntVersion)
assert.True(t, isTgVersionError)
}
func cleanupTerraformFolder(t *testing.T, templatesPath string) {
removeFile(t, util.JoinPath(templatesPath, TERRAFORM_STATE))
removeFile(t, util.JoinPath(templatesPath, TERRAFORM_STATE_BACKUP))
removeFile(t, util.JoinPath(templatesPath, TERRAGRUNT_DEBUG_FILE))
removeFolder(t, util.JoinPath(templatesPath, TERRAFORM_FOLDER))
}
func cleanupTerragruntFolder(t *testing.T, templatesPath string) {
removeFolder(t, util.JoinPath(templatesPath, TERRAGRUNT_CACHE))
}
func removeFile(t *testing.T, path string) {
if util.FileExists(path) {
if err := os.Remove(path); err != nil {
t.Fatalf("Error while removing %s: %v", path, err)
}
}
}
func removeFolder(t *testing.T, path string) {
if util.FileExists(path) {
if err := os.RemoveAll(path); err != nil {
t.Fatalf("Error while removing %s: %v", path, err)
}
}
}
func runTerragruntCommand(t *testing.T, command string, writer io.Writer, errwriter io.Writer) error {
return runTerragruntVersionCommand(t, "TEST", command, writer, errwriter)
}
func runTerragruntVersionCommand(t *testing.T, version string, command string, writer io.Writer, errwriter io.Writer) error {
args := strings.Split(command, " ")
app := cli.CreateTerragruntCli(version, writer, errwriter)
return app.Run(args)
}
func runTerragrunt(t *testing.T, command string) {
runTerragruntRedirectOutput(t, command, os.Stdout, os.Stderr)
}
func runTerragruntCommandWithOutput(t *testing.T, command string) (string, string, error) {
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
err := runTerragruntCommand(t, command, &stdout, &stderr)
logBufferContentsLineByLine(t, stdout, "stdout")
logBufferContentsLineByLine(t, stderr, "stderr")
return stdout.String(), stderr.String(), err
}
func runTerragruntRedirectOutput(t *testing.T, command string, writer io.Writer, errwriter io.Writer) {
if err := runTerragruntCommand(t, command, writer, errwriter); err != nil {
stdout := "(see log output above)"
if stdoutAsBuffer, stdoutIsBuffer := writer.(*bytes.Buffer); stdoutIsBuffer {
stdout = stdoutAsBuffer.String()
}
stderr := "(see log output above)"
if stderrAsBuffer, stderrIsBuffer := errwriter.(*bytes.Buffer); stderrIsBuffer {
stderr = stderrAsBuffer.String()
}
t.Fatalf("Failed to run Terragrunt command '%s' due to error: %s\n\nStdout: %s\n\nStderr: %s", command, err, stdout, stderr)
}
}
func copyEnvironment(t *testing.T, environmentPath string) string {
tmpDir, err := ioutil.TempDir("", "terragrunt-test")
if err != nil {
t.Fatalf("Failed to create temp dir due to error: %v", err)
}
t.Logf("Copying %s to %s", environmentPath, tmpDir)
require.NoError(t, util.CopyFolderContents(environmentPath, util.JoinPath(tmpDir, environmentPath), ".terragrunt-test"))
return tmpDir
}
func copyFile(srcPath string, destPath string) error {
contents, err := ioutil.ReadFile(srcPath)
if err != nil {
return err
}
return ioutil.WriteFile(destPath, contents, 0644)
}
func createTmpTerragruntConfigWithParentAndChild(t *testing.T, parentPath string, childRelPath string, s3BucketName string, parentConfigFileName string, childConfigFileName string) string {
tmpDir, err := ioutil.TempDir("", "terragrunt-parent-child-test")
if err != nil {
t.Fatalf("Failed to create temp dir due to error: %v", err)
}
childDestPath := util.JoinPath(tmpDir, childRelPath)
if err := os.MkdirAll(childDestPath, 0777); err != nil {
t.Fatalf("Failed to create temp dir %s due to error %v", childDestPath, err)
}
parentTerragruntSrcPath := util.JoinPath(parentPath, parentConfigFileName)
parentTerragruntDestPath := util.JoinPath(tmpDir, parentConfigFileName)
copyTerragruntConfigAndFillPlaceholders(t, parentTerragruntSrcPath, parentTerragruntDestPath, s3BucketName, "not-used", "not-used")
childTerragruntSrcPath := util.JoinPath(util.JoinPath(parentPath, childRelPath), childConfigFileName)
childTerragruntDestPath := util.JoinPath(childDestPath, childConfigFileName)
copyTerragruntConfigAndFillPlaceholders(t, childTerragruntSrcPath, childTerragruntDestPath, s3BucketName, "not-used", "not-used")
return childTerragruntDestPath
}
func createTmpTerragruntConfig(t *testing.T, templatesPath string, s3BucketName string, lockTableName string, configFileName string) string {
tmpFolder, err := ioutil.TempDir("", "terragrunt-test")
if err != nil {
t.Fatalf("Failed to create temp folder due to error: %v", err)
}
tmpTerragruntConfigFile := util.JoinPath(tmpFolder, configFileName)
originalTerragruntConfigPath := util.JoinPath(templatesPath, configFileName)
copyTerragruntConfigAndFillPlaceholders(t, originalTerragruntConfigPath, tmpTerragruntConfigFile, s3BucketName, lockTableName, "not-used")
return tmpTerragruntConfigFile
}
func createTmpTerragruntConfigContent(t *testing.T, contents string, configFileName string) string {
tmpFolder, err := ioutil.TempDir("", "terragrunt-test")
if err != nil {
t.Fatalf("Failed to create temp folder due to error: %v", err)
}
tmpTerragruntConfigFile := util.JoinPath(tmpFolder, configFileName)
if err := ioutil.WriteFile(tmpTerragruntConfigFile, []byte(contents), 0444); err != nil {
t.Fatalf("Error writing temp Terragrunt config to %s: %v", tmpTerragruntConfigFile, err)
}
return tmpTerragruntConfigFile
}
func createTmpTerragruntGCSConfig(t *testing.T, templatesPath string, project string, location string, gcsBucketName string, configFileName string) string {
tmpFolder, err := ioutil.TempDir("", "terragrunt-test")
if err != nil {
t.Fatalf("Failed to create temp folder due to error: %v", err)
}
tmpTerragruntConfigFile := util.JoinPath(tmpFolder, configFileName)
originalTerragruntConfigPath := util.JoinPath(templatesPath, configFileName)
copyTerragruntGCSConfigAndFillPlaceholders(t, originalTerragruntConfigPath, tmpTerragruntConfigFile, project, location, gcsBucketName)
return tmpTerragruntConfigFile
}
func copyTerragruntConfigAndFillPlaceholders(t *testing.T, configSrcPath string, configDestPath string, s3BucketName string, lockTableName string, region string) {
contents, err := util.ReadFileAsString(configSrcPath)
if err != nil {
t.Fatalf("Error reading Terragrunt config at %s: %v", configSrcPath, err)
}
contents = strings.Replace(contents, "__FILL_IN_BUCKET_NAME__", s3BucketName, -1)
contents = strings.Replace(contents, "__FILL_IN_LOCK_TABLE_NAME__", lockTableName, -1)
contents = strings.Replace(contents, "__FILL_IN_REGION__", region, -1)
contents = strings.Replace(contents, "__FILL_IN_LOGS_BUCKET_NAME__", s3BucketName+"-tf-state-logs", -1)
if err := ioutil.WriteFile(configDestPath, []byte(contents), 0444); err != nil {
t.Fatalf("Error writing temp Terragrunt config to %s: %v", configDestPath, err)
}
}
func copyTerragruntGCSConfigAndFillPlaceholders(t *testing.T, configSrcPath string, configDestPath string, project string, location string, gcsBucketName string) {
contents, err := util.ReadFileAsString(configSrcPath)
if err != nil {
t.Fatalf("Error reading Terragrunt config at %s: %v", configSrcPath, err)
}
contents = strings.Replace(contents, "__FILL_IN_PROJECT__", project, -1)
contents = strings.Replace(contents, "__FILL_IN_LOCATION__", location, -1)
contents = strings.Replace(contents, "__FILL_IN_BUCKET_NAME__", gcsBucketName, -1)
if err := ioutil.WriteFile(configDestPath, []byte(contents), 0444); err != nil {
t.Fatalf("Error writing temp Terragrunt config to %s: %v", configDestPath, err)
}
}
// Returns a unique (ish) id we can attach to resources and tfstate files so they don't conflict with each other
// Uses base 62 to generate a 6 character string that's unlikely to collide with the handful of tests we run in
// parallel. Based on code here: http://stackoverflow.com/a/9543797/483528
func uniqueId() string {
const BASE_62_CHARS = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
const UNIQUE_ID_LENGTH = 6 // Should be good for 62^6 = 56+ billion combinations
var out bytes.Buffer
for i := 0; i < UNIQUE_ID_LENGTH; i++ {
out.WriteByte(BASE_62_CHARS[rand.Intn(len(BASE_62_CHARS))])
}
return out.String()
}
// Check that the S3 Bucket of the given name and region exists. Terragrunt should create this bucket during the test.
// Also check if bucket got tagged properly and that public access is disabled completely.
func validateS3BucketExistsAndIsTagged(t *testing.T, awsRegion string, bucketName string, expectedTags map[string]string) {
mockOptions, err := options.NewTerragruntOptionsForTest("integration_test")
if err != nil {
t.Fatalf("Error creating mockOptions: %v", err)
}
sessionConfig := &aws_helper.AwsSessionConfig{
Region: awsRegion,
}
s3Client, err := remote.CreateS3Client(sessionConfig, mockOptions)
if err != nil {
t.Fatalf("Error creating S3 client: %v", err)
}
remoteStateConfig := remote.RemoteStateConfigS3{Bucket: bucketName, Region: awsRegion}
assert.True(t, remote.DoesS3BucketExist(s3Client, &remoteStateConfig.Bucket), "Terragrunt failed to create remote state S3 bucket %s", bucketName)
if expectedTags != nil {
assertS3Tags(expectedTags, bucketName, s3Client, t)
}
assertS3PublicAccessBlocks(t, s3Client, bucketName)
}
// Check that the DynamoDB table of the given name and region exists. Terragrunt should create this table during the test.
// Also check if table got tagged properly
func validateDynamoDBTableExistsAndIsTagged(t *testing.T, awsRegion string, tableName string, expectedTags map[string]string) {
client := createDynamoDbClientForTest(t, awsRegion)
var description, err = client.DescribeTable(&dynamodb.DescribeTableInput{TableName: aws.String(tableName)})
if err != nil {
// This is a ResourceNotFoundException in case the table does not exist
t.Fatal(err)
}
var tags, err2 = client.ListTagsOfResource(&dynamodb.ListTagsOfResourceInput{ResourceArn: description.Table.TableArn})
if err2 != nil {
t.Fatal(err2)
}
var actualTags = make(map[string]string)
for _, element := range tags.Tags {
actualTags[*element.Key] = *element.Value
}
assert.Equal(t, expectedTags, actualTags, "Did not find expected tags on dynamo table.")
}
func assertS3Tags(expectedTags map[string]string, bucketName string, client *s3.S3, t *testing.T) {
var in = s3.GetBucketTaggingInput{}
in.SetBucket(bucketName)
var tags, err2 = client.GetBucketTagging(&in)
if err2 != nil {
t.Fatal(err2)
}
var actualTags = make(map[string]string)
for _, element := range tags.TagSet {
actualTags[*element.Key] = *element.Value
}
assert.Equal(t, expectedTags, actualTags, "Did not find expected tags on s3 bucket.")
}
func assertS3PublicAccessBlocks(t *testing.T, client *s3.S3, bucketName string) {
resp, err := client.GetPublicAccessBlock(
&s3.GetPublicAccessBlockInput{Bucket: aws.String(bucketName)},
)
require.NoError(t, err)
publicAccessBlockConfig := resp.PublicAccessBlockConfiguration
assert.True(t, aws.BoolValue(publicAccessBlockConfig.BlockPublicAcls))
assert.True(t, aws.BoolValue(publicAccessBlockConfig.BlockPublicPolicy))
assert.True(t, aws.BoolValue(publicAccessBlockConfig.IgnorePublicAcls))
assert.True(t, aws.BoolValue(publicAccessBlockConfig.RestrictPublicBuckets))
}
// deleteS3BucketWithRetry will attempt to delete the specified S3 bucket, retrying up to 3 times if there are errors to
// handle eventual consistency issues.
func deleteS3BucketWithRetry(t *testing.T, awsRegion string, bucketName string) {
for i := 0; i < 3; i++ {
err := deleteS3BucketE(t, awsRegion, bucketName)
if err == nil {
return
}
t.Logf("Error deleting s3 bucket %s. Sleeping for 10 seconds before retrying.", bucketName)
time.Sleep(10 * time.Second)
}
t.Fatalf("Max retries attempting to delete s3 bucket %s in region %s", bucketName, awsRegion)
}
// Delete the specified S3 bucket to clean up after a test
func deleteS3Bucket(t *testing.T, awsRegion string, bucketName string) {
require.NoError(t, deleteS3BucketE(t, awsRegion, bucketName))
}
func deleteS3BucketE(t *testing.T, awsRegion string, bucketName string) error {
mockOptions, err := options.NewTerragruntOptionsForTest("integration_test")
if err != nil {
t.Logf("Error creating mockOptions: %v", err)
return err
}
sessionConfig := &aws_helper.AwsSessionConfig{
Region: awsRegion,
}
s3Client, err := remote.CreateS3Client(sessionConfig, mockOptions)
if err != nil {
t.Logf("Error creating S3 client: %v", err)
return err
}
t.Logf("Deleting test s3 bucket %s", bucketName)
out, err := s3Client.ListObjectVersions(&s3.ListObjectVersionsInput{Bucket: aws.String(bucketName)})
if err != nil {
t.Logf("Failed to list object versions in s3 bucket %s: %v", bucketName, err)
return err
}
objectIdentifiers := []*s3.ObjectIdentifier{}
for _, version := range out.Versions {
objectIdentifiers = append(objectIdentifiers, &s3.ObjectIdentifier{
Key: version.Key,
VersionId: version.VersionId,
})
}
if len(objectIdentifiers) > 0 {
deleteInput := &s3.DeleteObjectsInput{
Bucket: aws.String(bucketName),
Delete: &s3.Delete{Objects: objectIdentifiers},
}
if _, err := s3Client.DeleteObjects(deleteInput); err != nil {
t.Logf("Error deleting all versions of all objects in bucket %s: %v", bucketName, err)
return err
}
}
if _, err := s3Client.DeleteBucket(&s3.DeleteBucketInput{Bucket: aws.String(bucketName)}); err != nil {
t.Logf("Failed to delete S3 bucket %s: %v", bucketName, err)
return err
}
return nil
}
// Create an authenticated client for DynamoDB
func createDynamoDbClient(awsRegion, awsProfile string, iamRoleArn string) (*dynamodb.DynamoDB, error) {
mockOptions, err := options.NewTerragruntOptionsForTest("integration_test")
if err != nil {
return nil, err
}
sessionConfig := &aws_helper.AwsSessionConfig{
Region: awsRegion,
Profile: awsProfile,
RoleArn: iamRoleArn,
}
session, err := aws_helper.CreateAwsSession(sessionConfig, mockOptions)
if err != nil {
return nil, err
}
return dynamodb.New(session), nil
}
func createDynamoDbClientForTest(t *testing.T, awsRegion string) *dynamodb.DynamoDB {
client, err := createDynamoDbClient(awsRegion, "", "")
if err != nil {
t.Fatal(err)
}
return client
}
func cleanupTableForTest(t *testing.T, tableName string, awsRegion string) {
client := createDynamoDbClientForTest(t, awsRegion)
err := terragruntDynamoDb.DeleteTable(tableName, client)
assert.NoError(t, err)
}
// Check that the GCS Bucket of the given name and location exists. Terragrunt should create this bucket during the test.
// Also check if bucket got labeled properly.
func validateGCSBucketExistsAndIsLabeled(t *testing.T, location string, bucketName string, expectedLabels map[string]string) {
remoteStateConfig := remote.RemoteStateConfigGCS{Bucket: bucketName}
gcsClient, err := remote.CreateGCSClient(remoteStateConfig)
if err != nil {
t.Fatalf("Error creating GCS client: %v", err)
}
// verify the bucket exists
assert.True(t, remote.DoesGCSBucketExist(gcsClient, &remoteStateConfig), "Terragrunt failed to create remote state GCS bucket %s", bucketName)
// verify the bucket location
ctx := context.Background()
bucket := gcsClient.Bucket(bucketName)
attrs, err := bucket.Attrs(ctx)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, strings.ToUpper(location), attrs.Location, "Did not find GCS bucket in expected location.")
if expectedLabels != nil {
assertGCSLabels(t, expectedLabels, bucketName, gcsClient)
}
}
func assertGCSLabels(t *testing.T, expectedLabels map[string]string, bucketName string, client *storage.Client) {
ctx := context.Background()
bucket := client.Bucket(bucketName)
attrs, err := bucket.Attrs(ctx)
if err != nil {
t.Fatal(err)
}
var actualLabels = make(map[string]string)
for key, value := range attrs.Labels {
actualLabels[key] = value
}
assert.Equal(t, expectedLabels, actualLabels, "Did not find expected labels on GCS bucket.")
}
// Create the specified GCS bucket
func createGCSBucket(t *testing.T, projectID string, location string, bucketName string) {
var gcsConfig remote.RemoteStateConfigGCS
gcsClient, err := remote.CreateGCSClient(gcsConfig)
if err != nil {
t.Fatalf("Error creating GCS client: %v", err)
}
t.Logf("Creating test GCS bucket %s in project %s, location %s", bucketName, projectID, location)
ctx := context.Background()
bucket := gcsClient.Bucket(bucketName)
bucketAttrs := &storage.BucketAttrs{
Location: location,
VersioningEnabled: true,
}
if err := bucket.Create(ctx, projectID, bucketAttrs); err != nil {
t.Fatalf("Failed to create GCS bucket %s: %v", bucketName, err)
}
}
// Delete the specified GCS bucket to clean up after a test
func deleteGCSBucket(t *testing.T, bucketName string) {
var gcsConfig remote.RemoteStateConfigGCS
gcsClient, err := remote.CreateGCSClient(gcsConfig)
if err != nil {
t.Fatalf("Error creating GCS client: %v", err)
}
t.Logf("Deleting test GCS bucket %s", bucketName)
ctx := context.Background()
// List all objects including their versions in the bucket
bucket := gcsClient.Bucket(bucketName)
q := &storage.Query{
Versions: true,
}
it := bucket.Objects(ctx, q)
for {
objectAttrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
t.Fatalf("Failed to list objects and versions in GCS bucket %s: %v", bucketName, err)
}
// purge the object version
if err := bucket.Object(objectAttrs.Name).Generation(objectAttrs.Generation).Delete(ctx); err != nil {
t.Fatalf("Failed to delete GCS bucket object %s: %v", objectAttrs.Name, err)
}
}
// remote empty bucket
if err := bucket.Delete(ctx); err != nil {
t.Fatalf("Failed to delete GCS bucket %s: %v", bucketName, err)
}
}
func fileIsInFolder(t *testing.T, name string, path string) bool {
found := false
err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
assert.NoError(t, err)
if filepath.Base(path) == name {
found = true
}
return nil
})
require.NoError(t, err)
return found
}
func runValidateAllWithIncludeAndGetIncludedModules(t *testing.T, rootModulePath string, includeModulePaths []string, strictInclude bool) []string {
cmd_parts := []string{
"terragrunt", "run-all", "validate",
"--terragrunt-non-interactive",
"--terragrunt-log-level", "debug",
"--terragrunt-working-dir", rootModulePath,
}
for _, module := range includeModulePaths {
cmd_parts = append(cmd_parts, "--terragrunt-include-dir", module)
}
if strictInclude {
cmd_parts = append(cmd_parts, "--terragrunt-strict-include")
}
cmd := strings.Join(cmd_parts, " ")
validateAllStdout := bytes.Buffer{}
validateAllStderr := bytes.Buffer{}
err := runTerragruntCommand(
t,
cmd,
&validateAllStdout,
&validateAllStderr,
)
logBufferContentsLineByLine(t, validateAllStdout, "validate-all stdout")
logBufferContentsLineByLine(t, validateAllStderr, "validate-all stderr")
require.NoError(t, err)
currentDir, err := os.Getwd()
require.NoError(t, err)
includedModulesRegexp, err := regexp.Compile(
fmt.Sprintf(
`=> Module %s/%s/(.+) \(excluded: (true|false)`,
currentDir,
rootModulePath,
),
)
require.NoError(t, err)
matches := includedModulesRegexp.FindAllStringSubmatch(string(validateAllStderr.Bytes()), -1)
includedModules := []string{}
for _, match := range matches {
if match[2] == "false" {
includedModules = append(includedModules, match[1])
}
}
sort.Strings(includedModules)
return includedModules
}
// sops decrypting for inputs
func TestSopsDecryptedCorrectly(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_SOPS)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_SOPS)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_SOPS)
runTerragrunt(t, fmt.Sprintf("terragrunt apply -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt output -no-color -json --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
require.NoError(t, err)
outputs := map[string]TerraformOutput{}
require.NoError(t, json.Unmarshal([]byte(stdout.String()), &outputs))
assert.Equal(t, outputs["json_bool_array"].Value, []interface{}{true, false})
assert.Equal(t, outputs["json_string_array"].Value, []interface{}{"example_value1", "example_value2"})
assert.Equal(t, outputs["json_number"].Value, 1234.56789)
assert.Equal(t, outputs["json_string"].Value, "example_value")
assert.Equal(t, outputs["json_hello"].Value, "Welcome to SOPS! Edit this file as you please!")
assert.Equal(t, outputs["yaml_bool_array"].Value, []interface{}{true, false})
assert.Equal(t, outputs["yaml_string_array"].Value, []interface{}{"example_value1", "example_value2"})
assert.Equal(t, outputs["yaml_number"].Value, 1234.5679)
assert.Equal(t, outputs["yaml_string"].Value, "example_value")
assert.Equal(t, outputs["yaml_hello"].Value, "Welcome to SOPS! Edit this file as you please!")
}
func TestTerragruntRunAllCommandPrompt(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_OUTPUT_ALL)
rootTerragruntConfigPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_OUTPUT_ALL, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, "not-used", "not-used")
environmentPath := fmt.Sprintf("%s/%s/env1", tmpEnvPath, TEST_FIXTURE_OUTPUT_ALL)
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt run-all apply --terragrunt-working-dir %s", environmentPath), &stdout, &stderr)
logBufferContentsLineByLine(t, stdout, "stdout")
logBufferContentsLineByLine(t, stderr, "stderr")
assert.Contains(t, stderr.String(), "Are you sure you want to run 'terragrunt apply' in each folder of the stack described above? (y/n)")
assert.Error(t, err)
}
| [
"\"HOME\"",
"\"GOOGLE_CLOUD_PROJECT\"",
"\"GOOGLE_CLOUD_PROJECT\""
] | [] | [
"HOME",
"GOOGLE_CLOUD_PROJECT"
] | [] | ["HOME", "GOOGLE_CLOUD_PROJECT"] | go | 2 | 0 | |
internal/client.go | /*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/apache/rocketmq-client-go/internal/remote"
"github.com/apache/rocketmq-client-go/internal/utils"
"github.com/apache/rocketmq-client-go/primitive"
"github.com/apache/rocketmq-client-go/rlog"
)
const (
defaultTraceRegionID = "DefaultRegion"
// tracing message switch
_TranceOff = "false"
// Pulling topic information interval from the named server
_PullNameServerInterval = 30 * time.Second
// Sending heart beat interval to all broker
_HeartbeatBrokerInterval = 30 * time.Second
// Offset persistent interval for consumer
_PersistOffset = 5 * time.Second
// Rebalance interval
_RebalanceInterval = 10 * time.Second
)
var (
ErrServiceState = errors.New("service close is not running, please check")
_VIPChannelEnable = false
)
func init() {
if os.Getenv("com.rocketmq.sendMessageWithVIPChannel") != "" {
value, err := strconv.ParseBool(os.Getenv("com.rocketmq.sendMessageWithVIPChannel"))
if err == nil {
_VIPChannelEnable = value
}
}
}
type InnerProducer interface {
PublishTopicList() []string
UpdateTopicPublishInfo(topic string, info *TopicPublishInfo)
IsPublishTopicNeedUpdate(topic string) bool
IsUnitMode() bool
}
type InnerConsumer interface {
PersistConsumerOffset() error
UpdateTopicSubscribeInfo(topic string, mqs []*primitive.MessageQueue)
IsSubscribeTopicNeedUpdate(topic string) bool
SubscriptionDataList() []*SubscriptionData
Rebalance()
IsUnitMode() bool
}
func DefaultClientOptions() ClientOptions {
opts := ClientOptions{
InstanceName: "DEFAULT",
RetryTimes: 3,
ClientIP: utils.LocalIP(),
}
return opts
}
type ClientOptions struct {
GroupName string
NameServerAddrs []string
ClientIP string
InstanceName string
UnitMode bool
UnitName string
VIPChannelEnabled bool
ACLEnabled bool
RetryTimes int
Interceptors []primitive.Interceptor
Credentials primitive.Credentials
}
func (opt *ClientOptions) ChangeInstanceNameToPID() {
if opt.InstanceName == "DEFAULT" {
opt.InstanceName = strconv.Itoa(os.Getegid())
}
}
func (opt *ClientOptions) String() string {
return fmt.Sprintf("ClientOption [ClientIP=%s, InstanceName=%s, "+
"UnitMode=%v, UnitName=%s, VIPChannelEnabled=%v, ACLEnabled=%v]", opt.ClientIP,
opt.InstanceName, opt.UnitMode, opt.UnitName, opt.VIPChannelEnabled, opt.ACLEnabled)
}
//go:generate mockgen -source client.go -destination mock_client.go --package internal RMQClient
type RMQClient interface {
Start()
Shutdown()
ClientID() string
RegisterProducer(group string, producer InnerProducer)
InvokeSync(addr string, request *remote.RemotingCommand,
timeoutMillis time.Duration) (*remote.RemotingCommand, error)
InvokeAsync(addr string, request *remote.RemotingCommand,
timeoutMillis time.Duration, f func(*remote.RemotingCommand, error)) error
InvokeOneWay(addr string, request *remote.RemotingCommand,
timeoutMillis time.Duration) error
CheckClientInBroker()
SendHeartbeatToAllBrokerWithLock()
UpdateTopicRouteInfo()
ProcessSendResponse(brokerName string, cmd *remote.RemotingCommand, resp *primitive.SendResult, msgs ...*primitive.Message) error
RegisterConsumer(group string, consumer InnerConsumer) error
UnregisterConsumer(group string)
PullMessage(ctx context.Context, brokerAddrs string, request *PullMessageRequest) (*primitive.PullResult, error)
PullMessageAsync(ctx context.Context, brokerAddrs string, request *PullMessageRequest, f func(result *primitive.PullResult)) error
RebalanceImmediately()
UpdatePublishInfo(topic string, data *TopicRouteData)
}
var _ RMQClient = new(rmqClient)
type rmqClient struct {
option ClientOptions
// group -> InnerProducer
producerMap sync.Map
// group -> InnerConsumer
consumerMap sync.Map
once sync.Once
remoteClient *remote.RemotingClient
hbMutex sync.Mutex
close bool
}
var clientMap sync.Map
func GetOrNewRocketMQClient(option ClientOptions) *rmqClient {
client := &rmqClient{
option: option,
remoteClient: remote.NewRemotingClient(),
}
actual, loaded := clientMap.LoadOrStore(client.ClientID(), client)
if !loaded {
client.remoteClient.RegisterRequestFunc(ReqNotifyConsumerIdsChanged, func(req *remote.RemotingCommand) *remote.RemotingCommand {
rlog.Infof("receive broker's notification, the consumer group: %s", req.ExtFields["consumerGroup"])
client.RebalanceImmediately()
return nil
})
}
return actual.(*rmqClient)
}
func (c *rmqClient) Start() {
//ctx, cancel := context.WithCancel(context.Background())
//c.cancel = cancel
c.close = false
c.once.Do(func() {
// TODO fetchNameServerAddr
if !c.option.Credentials.IsEmpty() {
c.remoteClient.RegisterInterceptor(remote.ACLInterceptor(c.option.Credentials))
}
go func() {}()
// schedule update route info
go func() {
// delay
time.Sleep(50 * time.Millisecond)
for !c.close {
c.UpdateTopicRouteInfo()
time.Sleep(_PullNameServerInterval)
}
}()
// TODO cleanOfflineBroker & sendHeartbeatToAllBrokerWithLock
go func() {
for !c.close {
cleanOfflineBroker()
c.SendHeartbeatToAllBrokerWithLock()
time.Sleep(_HeartbeatBrokerInterval)
}
}()
// schedule persist offset
go func() {
//time.Sleep(10 * time.Second)
for !c.close {
c.consumerMap.Range(func(key, value interface{}) bool {
consumer := value.(InnerConsumer)
err := consumer.PersistConsumerOffset()
if err != nil {
rlog.Errorf("persist offset failed. err: %v", err)
}
return true
})
time.Sleep(_PersistOffset)
}
}()
go func() {
for !c.close {
c.RebalanceImmediately()
time.Sleep(_RebalanceInterval)
}
}()
})
}
func (c *rmqClient) Shutdown() {
c.remoteClient.ShutDown()
c.close = true
}
func (c *rmqClient) ClientID() string {
id := c.option.ClientIP + "@" + c.option.InstanceName
if c.option.UnitName != "" {
id += "@" + c.option.UnitName
}
return id
}
func (c *rmqClient) InvokeSync(addr string, request *remote.RemotingCommand,
timeoutMillis time.Duration) (*remote.RemotingCommand, error) {
if c.close {
return nil, ErrServiceState
}
return c.remoteClient.InvokeSync(addr, request, timeoutMillis)
}
func (c *rmqClient) InvokeAsync(addr string, request *remote.RemotingCommand,
timeoutMillis time.Duration, f func(*remote.RemotingCommand, error)) error {
if c.close {
return ErrServiceState
}
return c.remoteClient.InvokeAsync(addr, request, timeoutMillis, func(future *remote.ResponseFuture) {
f(future.ResponseCommand, future.Err)
})
}
func (c *rmqClient) InvokeOneWay(addr string, request *remote.RemotingCommand,
timeoutMillis time.Duration) error {
if c.close {
return ErrServiceState
}
return c.remoteClient.InvokeOneWay(addr, request, timeoutMillis)
}
func (c *rmqClient) CheckClientInBroker() {
}
// TODO
func (c *rmqClient) SendHeartbeatToAllBrokerWithLock() {
c.hbMutex.Lock()
defer c.hbMutex.Unlock()
hbData := &heartbeatData{
ClientId: c.ClientID(),
}
pData := make([]producerData, 0)
c.producerMap.Range(func(key, value interface{}) bool {
pData = append(pData, producerData(key.(string)))
return true
})
cData := make([]consumerData, 0)
c.consumerMap.Range(func(key, value interface{}) bool {
consumer := value.(InnerConsumer)
cData = append(cData, consumerData{
GroupName: key.(string),
CType: "PUSH",
MessageModel: "CLUSTERING",
Where: "CONSUME_FROM_FIRST_OFFSET",
UnitMode: consumer.IsUnitMode(),
SubscriptionDatas: consumer.SubscriptionDataList(),
})
return true
})
hbData.ProducerDatas = pData
hbData.ConsumerDatas = cData
if len(pData) == 0 && len(cData) == 0 {
rlog.Info("sending heartbeat, but no producer and no consumer")
return
}
brokerAddressesMap.Range(func(key, value interface{}) bool {
brokerName := key.(string)
data := value.(*BrokerData)
for id, addr := range data.BrokerAddresses {
cmd := remote.NewRemotingCommand(ReqHeartBeat, nil, hbData.encode())
response, err := c.remoteClient.InvokeSync(addr, cmd, 3*time.Second)
if err != nil {
rlog.Warnf("send heart beat to broker error: %s", err.Error())
return true
}
if response.Code == ResSuccess {
v, exist := brokerVersionMap.Load(brokerName)
var m map[string]int32
if exist {
m = v.(map[string]int32)
} else {
m = make(map[string]int32, 4)
brokerVersionMap.Store(brokerName, m)
}
m[brokerName] = int32(response.Version)
rlog.Infof("send heart beat to broker[%s %d %s] success", brokerName, id, addr)
}
}
return true
})
}
func (c *rmqClient) UpdateTopicRouteInfo() {
publishTopicSet := make(map[string]bool, 0)
c.producerMap.Range(func(key, value interface{}) bool {
producer := value.(InnerProducer)
list := producer.PublishTopicList()
for idx := range list {
publishTopicSet[list[idx]] = true
}
return true
})
for topic := range publishTopicSet {
c.UpdatePublishInfo(topic, UpdateTopicRouteInfo(topic))
}
subscribedTopicSet := make(map[string]bool, 0)
c.consumerMap.Range(func(key, value interface{}) bool {
consumer := value.(InnerConsumer)
list := consumer.SubscriptionDataList()
for idx := range list {
subscribedTopicSet[list[idx].Topic] = true
}
return true
})
for topic := range subscribedTopicSet {
c.updateSubscribeInfo(topic, UpdateTopicRouteInfo(topic))
}
}
// SendMessageAsync send message with batch by async
func (c *rmqClient) SendMessageAsync(ctx context.Context, brokerAddrs, brokerName string, request *SendMessageRequest,
msgs []*primitive.Message, f func(result *primitive.SendResult)) error {
return nil
}
func (c *rmqClient) SendMessageOneWay(ctx context.Context, brokerAddrs string, request *SendMessageRequest,
msgs []*primitive.Message) (*primitive.SendResult, error) {
cmd := remote.NewRemotingCommand(ReqSendBatchMessage, request, encodeMessages(msgs))
err := c.remoteClient.InvokeOneWay(brokerAddrs, cmd, 3*time.Second)
if err != nil {
rlog.Warnf("send messages with oneway error: %v", err)
}
return nil, err
}
func (c *rmqClient) ProcessSendResponse(brokerName string, cmd *remote.RemotingCommand, resp *primitive.SendResult, msgs ...*primitive.Message) error {
var status primitive.SendStatus
switch cmd.Code {
case ResFlushDiskTimeout:
status = primitive.SendFlushDiskTimeout
case ResFlushSlaveTimeout:
status = primitive.SendFlushSlaveTimeout
case ResSlaveNotAvailable:
status = primitive.SendSlaveNotAvailable
case ResSuccess:
status = primitive.SendOK
default:
status = primitive.SendUnknownError
return errors.New(cmd.Remark)
}
msgIDs := make([]string, 0)
for i := 0; i < len(msgs); i++ {
msgIDs = append(msgIDs, msgs[i].Properties[primitive.PropertyUniqueClientMessageIdKeyIndex])
}
regionId := cmd.ExtFields[primitive.PropertyMsgRegion]
trace := cmd.ExtFields[primitive.PropertyTraceSwitch]
if regionId == "" {
regionId = defaultTraceRegionID
}
qId, _ := strconv.Atoi(cmd.ExtFields["queueId"])
off, _ := strconv.ParseInt(cmd.ExtFields["queueOffset"], 10, 64)
resp.Status = status
resp.MsgID = cmd.ExtFields["msgId"]
resp.OffsetMsgID = cmd.ExtFields["msgId"]
resp.MessageQueue = &primitive.MessageQueue{
Topic: msgs[0].Topic,
BrokerName: brokerName,
QueueId: qId,
}
resp.QueueOffset = off
//TransactionID: sendResponse.TransactionId,
resp.RegionID = regionId
resp.TraceOn = trace != "" && trace != _TranceOff
return nil
}
// PullMessage with sync
func (c *rmqClient) PullMessage(ctx context.Context, brokerAddrs string, request *PullMessageRequest) (*primitive.PullResult, error) {
cmd := remote.NewRemotingCommand(ReqPullMessage, request, nil)
res, err := c.remoteClient.InvokeSync(brokerAddrs, cmd, 10*time.Second)
if err != nil {
return nil, err
}
return c.processPullResponse(res)
}
func (c *rmqClient) processPullResponse(response *remote.RemotingCommand) (*primitive.PullResult, error) {
pullResult := &primitive.PullResult{}
switch response.Code {
case ResSuccess:
pullResult.Status = primitive.PullFound
case ResPullNotFound:
pullResult.Status = primitive.PullNoNewMsg
case ResPullRetryImmediately:
pullResult.Status = primitive.PullNoMsgMatched
case ResPullOffsetMoved:
pullResult.Status = primitive.PullOffsetIllegal
default:
return nil, fmt.Errorf("unknown Response Code: %d, remark: %s", response.Code, response.Remark)
}
c.decodeCommandCustomHeader(pullResult, response)
pullResult.SetBody(response.Body)
return pullResult, nil
}
func (c *rmqClient) decodeCommandCustomHeader(pr *primitive.PullResult, cmd *remote.RemotingCommand) {
v, exist := cmd.ExtFields["maxOffset"]
if exist {
pr.MaxOffset, _ = strconv.ParseInt(v, 10, 64)
}
v, exist = cmd.ExtFields["minOffset"]
if exist {
pr.MinOffset, _ = strconv.ParseInt(v, 10, 64)
}
v, exist = cmd.ExtFields["nextBeginOffset"]
if exist {
pr.NextBeginOffset, _ = strconv.ParseInt(v, 10, 64)
}
v, exist = cmd.ExtFields["suggestWhichBrokerId"]
if exist {
pr.SuggestWhichBrokerId, _ = strconv.ParseInt(v, 10, 64)
}
}
// PullMessageAsync pull message async
func (c *rmqClient) PullMessageAsync(ctx context.Context, brokerAddrs string, request *PullMessageRequest, f func(result *primitive.PullResult)) error {
return nil
}
func (c *rmqClient) RegisterConsumer(group string, consumer InnerConsumer) error {
c.consumerMap.Store(group, consumer)
return nil
}
func (c *rmqClient) UnregisterConsumer(group string) {
}
func (c *rmqClient) RegisterProducer(group string, producer InnerProducer) {
c.producerMap.Store(group, producer)
}
func (c *rmqClient) UnregisterProducer(group string) {
}
func (c *rmqClient) SelectProducer(group string) InnerProducer {
return nil
}
func (c *rmqClient) SelectConsumer(group string) InnerConsumer {
return nil
}
func (c *rmqClient) RebalanceImmediately() {
c.consumerMap.Range(func(key, value interface{}) bool {
consumer := value.(InnerConsumer)
consumer.Rebalance()
return true
})
}
func (c *rmqClient) UpdatePublishInfo(topic string, data *TopicRouteData) {
if data == nil {
return
}
if !c.isNeedUpdatePublishInfo(topic) {
return
}
c.producerMap.Range(func(key, value interface{}) bool {
p := value.(InnerProducer)
publishInfo := routeData2PublishInfo(topic, data)
publishInfo.HaveTopicRouterInfo = true
p.UpdateTopicPublishInfo(topic, publishInfo)
return true
})
}
func (c *rmqClient) isNeedUpdatePublishInfo(topic string) bool {
var result bool
c.producerMap.Range(func(key, value interface{}) bool {
p := value.(InnerProducer)
if p.IsPublishTopicNeedUpdate(topic) {
result = true
return false
}
return true
})
return result
}
func (c *rmqClient) updateSubscribeInfo(topic string, data *TopicRouteData) {
if data == nil {
return
}
if !c.isNeedUpdateSubscribeInfo(topic) {
return
}
c.consumerMap.Range(func(key, value interface{}) bool {
consumer := value.(InnerConsumer)
// TODO
consumer.UpdateTopicSubscribeInfo(topic, routeData2SubscribeInfo(topic, data))
return true
})
}
func (c *rmqClient) isNeedUpdateSubscribeInfo(topic string) bool {
var result bool
c.consumerMap.Range(func(key, value interface{}) bool {
consumer := value.(InnerConsumer)
if consumer.IsSubscribeTopicNeedUpdate(topic) {
result = true
return false
}
return true
})
return result
}
func routeData2SubscribeInfo(topic string, data *TopicRouteData) []*primitive.MessageQueue {
list := make([]*primitive.MessageQueue, 0)
for idx := range data.QueueDataList {
qd := data.QueueDataList[idx]
if queueIsReadable(qd.Perm) {
for i := 0; i < qd.ReadQueueNums; i++ {
list = append(list, &primitive.MessageQueue{
Topic: topic,
BrokerName: qd.BrokerName,
QueueId: i,
})
}
}
}
return list
}
func encodeMessages(message []*primitive.Message) []byte {
var buffer bytes.Buffer
index := 0
for index < len(message) {
buffer.Write(message[index].Body)
index++
}
return buffer.Bytes()
}
func brokerVIPChannel(brokerAddr string) string {
if !_VIPChannelEnable {
return brokerAddr
}
var brokerAddrNew strings.Builder
ipAndPort := strings.Split(brokerAddr, ":")
port, err := strconv.Atoi(ipAndPort[1])
if err != nil {
return ""
}
brokerAddrNew.WriteString(ipAndPort[0])
brokerAddrNew.WriteString(":")
brokerAddrNew.WriteString(strconv.Itoa(port - 2))
return brokerAddrNew.String()
}
| [
"\"com.rocketmq.sendMessageWithVIPChannel\"",
"\"com.rocketmq.sendMessageWithVIPChannel\""
] | [] | [
"com.rocketmq.sendMessageWithVIPChannel"
] | [] | ["com.rocketmq.sendMessageWithVIPChannel"] | go | 1 | 0 | |
hmsclient/hmsclient_test.go | // Copyright © 2018 Alex Kolbasov
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hmsclient_test
import (
"fmt"
"log"
"os"
"strconv"
"testing"
"github.com/akolb1/gometastore/hmsclient"
)
func ExampleOpen() {
client, err := hmsclient.Open("localhost", 9083)
if err != nil {
log.Fatal(err)
}
fmt.Println(client.GetAllDatabases())
}
func ExampleMetastoreClient_GetAllDatabases() {
client, err := hmsclient.Open("localhost", 9083)
if err != nil {
log.Fatal(err)
}
defer client.Close()
fmt.Println(client.GetAllDatabases())
}
func TestOpenBadHost(t *testing.T) {
t.Log("connecting to fake host")
client, err := hmsclient.Open("foobar", 1)
if err == nil {
t.Error("connection to bad host succeeded")
}
if client != nil {
t.Error("connecting to bad host returned valid client")
}
}
func getClient(t *testing.T) (*hmsclient.MetastoreClient, error) {
host := os.Getenv("HMS_SERVER")
port := os.Getenv("HMS_PORT")
if port == "" {
port = "9083"
}
if host == "" {
host = "localhost"
}
portVal, err := strconv.ParseInt(port, 10, 32)
if err != nil {
t.Error("invalid port", portVal, err)
return nil, err
}
t.Log("connecting to", host)
client, err := hmsclient.Open(host, int(portVal))
if err != nil {
t.Error("failed connection to", host, err)
return nil, err
}
return client, nil
}
func TestGetDatabases(t *testing.T) {
client, err := getClient(t)
if err != nil {
return
}
defer client.Close()
databases, err := client.GetAllDatabases()
if err != nil {
t.Error("failed to get databases", err)
return
}
if len(databases) == 0 {
t.Error("no databases available")
return
}
}
func TestMetastoreClient_CreateDatabase(t *testing.T) {
dbName := os.Getenv("HMS_TEST_DATABASE")
owner := os.Getenv("HADOOP_USER_NAME")
if dbName == "" {
dbName = "hms_test_database"
}
t.Log("Testing creating database", dbName, "and owner", owner)
client, err := getClient(t)
if err != nil {
return
}
defer client.Close()
description := "test database"
err = client.CreateDatabase(&hmsclient.Database{Name: dbName, Description: description, Owner: owner})
if err != nil {
t.Error("failed to create database:", err)
return
}
db, err := client.GetDatabase(dbName)
if err != nil {
t.Error("failed to get database:", err)
return
}
if db.Name != dbName {
t.Errorf("dbname %s is not equal %s", db.Name, dbName)
}
if description != db.Description {
t.Errorf("description %s is not equal %s", db.Description, description)
}
if owner != db.Owner {
t.Errorf("owner %s is not equal %s", db.Owner, owner)
}
err = client.DropDatabase(dbName, true, false)
if err != nil {
t.Error("failed to drop database", err)
return
}
}
| [
"\"HMS_SERVER\"",
"\"HMS_PORT\"",
"\"HMS_TEST_DATABASE\"",
"\"HADOOP_USER_NAME\""
] | [] | [
"HMS_PORT",
"HADOOP_USER_NAME",
"HMS_TEST_DATABASE",
"HMS_SERVER"
] | [] | ["HMS_PORT", "HADOOP_USER_NAME", "HMS_TEST_DATABASE", "HMS_SERVER"] | go | 4 | 0 | |
modules/python/package/setup.py | import os
import sys
import platform
import setuptools
SCRIPT_DIR=os.path.dirname(os.path.abspath(__file__))
def main():
os.chdir(SCRIPT_DIR)
package_name = 'opencv'
package_version = os.environ.get('OPENCV_VERSION', '4.5.1') # TODO
long_description = 'Open Source Computer Vision Library Python bindings' # TODO
setuptools.setup(
name=package_name,
version=package_version,
url='https://github.com/opencv/opencv',
license='Apache 2.0',
description='OpenCV python bindings',
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
maintainer="OpenCV Team",
install_requires="numpy",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: Apache 2.0 License',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: C++',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
],
)
if __name__ == '__main__':
main()
| [] | [] | [
"OPENCV_VERSION"
] | [] | ["OPENCV_VERSION"] | python | 1 | 0 | |
test/functional/test_framework/util.py | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import unhexlify
from decimal import Decimal, ROUND_DOWN
from subprocess import CalledProcessError
import hashlib
import inspect
import json
import logging
import os
import re
import time
import unittest
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
from io import BytesIO
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_approx(v, vexp, vspan=0.00001):
"""Assert that `v` is within `vspan` of `vexp`"""
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError(
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
message, e.error['message']))
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError(
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
message, e.error['message']))
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError("Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError("String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError("String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def EncodeDecimal(o):
if isinstance(o, Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until_helper(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None, timeout_factor=1.0):
"""Sleep until the predicate resolves to be True.
Warning: Note that this method is not recommended to be used in tests as it is
not aware of the context of the test framework. Using the `wait_until()` members
from `BitcoinTestFramework` or `P2PInterface` class ensures the timeout is
properly scaled. Furthermore, `wait_until()` from `P2PInterface` class in
`p2p.py` has a preset lock.
"""
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
timeout = timeout * timeout_factor
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
def sha256sum_file(filename):
h = hashlib.sha256()
with open(filename, 'rb') as f:
d = f.read(4096)
while len(d) > 0:
h.update(d)
d = f.read(4096)
return h.digest()
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 12
# Don't assign rpc or p2p ports lower than this
PORT_MIN = int(os.getenv('TEST_RUNNER_PORT_MIN', default=11000))
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, *, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
coveragedir (str): Directory
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = int(timeout)
print("Proxy: " + url)
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert n <= MAX_NODES
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, chain, rpchost):
rpc_u, rpc_p = get_auth_cookie(datadir, chain)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n, chain):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
# Translate chain name to config name
if chain == 'testnet3':
chain_name_conf_arg = 'testnet'
chain_name_conf_section = 'test'
else:
chain_name_conf_arg = chain
chain_name_conf_section = chain
with open(os.path.join(datadir, "dogecoin.conf"), 'w', encoding='utf8') as f:
f.write("{}=1\n".format(chain_name_conf_arg))
f.write("[{}]\n".format(chain_name_conf_section))
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("fallbackfee=0.0002\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("dnsseed=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
f.write("upnp=0\n")
f.write("shrinkdebugfile=0\n")
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "dogecoin.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir, chain):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "dogecoin.conf")):
with open(os.path.join(datadir, "dogecoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
try:
with open(os.path.join(datadir, chain, ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
except OSError:
pass
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir, chain):
if os.path.isfile(os.path.join(datadir, chain, ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, chain, ".cookie"))
def softfork_active(node, key):
"""Return whether a softfork is active."""
return node.getblockchaininfo()['softforks'][key]['active']
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
# Transaction/Block functions
#############################
def find_output(node, txid, amount, *, blockhash=None):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1, blockhash)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 241
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for _ in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert len(utxos) >= count
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for _ in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = []
from .messages import CTxOut
txout = CTxOut()
txout.nValue = 0
txout.scriptPubKey = hex_str_to_bytes(script_pubkey)
for _ in range(128):
txouts.append(txout)
return txouts
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
from .messages import CTransaction
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(rawtx)))
for txout in txouts:
tx.vout.append(txout)
newtx = tx.serialize().hex()
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], 0)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
def modinv(a, n):
"""Compute the modular inverse of a modulo n using the extended Euclidean
Algorithm. See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Modular_integers.
"""
# TODO: Change to pow(a, -1, n) available in Python 3.8
t1, t2 = 0, 1
r1, r2 = n, a
while r2 != 0:
q = r1 // r2
t1, t2 = t2, t1 - q * t2
r1, r2 = r2, r1 - q * r2
if r1 > 1:
return None
if t1 < 0:
t1 += n
return t1
class TestFrameworkUtil(unittest.TestCase):
def test_modinv(self):
test_vectors = [
[7, 11],
[11, 29],
[90, 13],
[1891, 3797],
[6003722857, 77695236973],
]
for a, n in test_vectors:
self.assertEqual(modinv(a, n), pow(a, n-2, n))
| [] | [] | [
"TEST_RUNNER_PORT_MIN"
] | [] | ["TEST_RUNNER_PORT_MIN"] | python | 1 | 0 | |
tb/eth_mac_10g/test_eth_mac_10g.py | #!/usr/bin/env python
"""
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import pytest
import cocotb_test.simulator
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotb.regression import TestFactory
from cocotbext.eth import XgmiiFrame, XgmiiSource, XgmiiSink
from cocotbext.axi import AxiStreamSource, AxiStreamSink
class TB:
def __init__(self, dut):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
if len(dut.xgmii_txd) == 64:
cocotb.fork(Clock(dut.rx_clk, 6.4, units="ns").start())
cocotb.fork(Clock(dut.tx_clk, 6.4, units="ns").start())
else:
cocotb.fork(Clock(dut.rx_clk, 3.2, units="ns").start())
cocotb.fork(Clock(dut.tx_clk, 3.2, units="ns").start())
self.xgmii_source = XgmiiSource(dut.xgmii_rxd, dut.xgmii_rxc, dut.rx_clk, dut.rx_rst)
self.xgmii_sink = XgmiiSink(dut.xgmii_txd, dut.xgmii_txc, dut.tx_clk, dut.tx_rst)
self.axis_source = AxiStreamSource(dut, "tx_axis", dut.tx_clk, dut.tx_rst)
self.axis_sink = AxiStreamSink(dut, "rx_axis", dut.rx_clk, dut.rx_rst)
dut.rx_ptp_ts.setimmediatevalue(0)
dut.tx_ptp_ts.setimmediatevalue(0)
async def reset(self):
self.dut.rx_rst.setimmediatevalue(0)
self.dut.tx_rst.setimmediatevalue(0)
await RisingEdge(self.dut.rx_clk)
await RisingEdge(self.dut.rx_clk)
self.dut.rx_rst <= 1
self.dut.tx_rst <= 1
await RisingEdge(self.dut.rx_clk)
await RisingEdge(self.dut.rx_clk)
self.dut.rx_rst <= 0
self.dut.tx_rst <= 0
await RisingEdge(self.dut.rx_clk)
await RisingEdge(self.dut.rx_clk)
async def run_test_rx(dut, payload_lengths=None, payload_data=None, ifg=12):
tb = TB(dut)
tb.xgmii_source.ifg = ifg
tb.dut.ifg_delay <= ifg
await tb.reset()
test_frames = [payload_data(x) for x in payload_lengths()]
for test_data in test_frames:
test_frame = XgmiiFrame.from_payload(test_data)
await tb.xgmii_source.send(test_frame)
for test_data in test_frames:
rx_frame = await tb.axis_sink.recv()
assert rx_frame.tdata == test_data
assert rx_frame.tuser == 0
assert tb.axis_sink.empty()
await RisingEdge(dut.rx_clk)
await RisingEdge(dut.rx_clk)
async def run_test_tx(dut, payload_lengths=None, payload_data=None, ifg=12):
tb = TB(dut)
tb.xgmii_source.ifg = ifg
tb.dut.ifg_delay <= ifg
await tb.reset()
test_frames = [payload_data(x) for x in payload_lengths()]
for test_data in test_frames:
await tb.axis_source.send(test_data)
for test_data in test_frames:
rx_frame = await tb.xgmii_sink.recv()
assert rx_frame.get_payload() == test_data
assert rx_frame.check_fcs()
assert tb.xgmii_sink.empty()
await RisingEdge(dut.tx_clk)
await RisingEdge(dut.tx_clk)
async def run_test_tx_alignment(dut, payload_data=None, ifg=12):
enable_dic = int(os.getenv("PARAM_ENABLE_DIC"))
tb = TB(dut)
byte_width = tb.axis_source.width // 8
tb.xgmii_source.ifg = ifg
tb.dut.ifg_delay <= ifg
for length in range(60, 92):
await tb.reset()
test_frames = [payload_data(length) for k in range(10)]
start_lane = []
for test_data in test_frames:
await tb.axis_source.send(test_data)
for test_data in test_frames:
rx_frame = await tb.xgmii_sink.recv()
assert rx_frame.get_payload() == test_data
assert rx_frame.check_fcs()
assert rx_frame.ctrl is None
start_lane.append(rx_frame.rx_start_lane)
tb.log.info("length: %d", length)
tb.log.info("start_lane: %s", start_lane)
start_lane_ref = []
# compute expected starting lanes
lane = 0
deficit_idle_count = 0
for test_data in test_frames:
if ifg == 0:
lane = 0
start_lane_ref.append(lane)
lane = (lane + len(test_data)+4+ifg) % byte_width
if enable_dic:
offset = lane % 4
if deficit_idle_count+offset >= 4:
offset += 4
lane = (lane - offset) % byte_width
deficit_idle_count = (deficit_idle_count + offset) % 4
else:
offset = lane % 4
if offset > 0:
offset += 4
lane = (lane - offset) % byte_width
tb.log.info("start_lane_ref: %s", start_lane_ref)
assert start_lane_ref == start_lane
await RisingEdge(dut.tx_clk)
assert tb.xgmii_sink.empty()
await RisingEdge(dut.tx_clk)
await RisingEdge(dut.tx_clk)
def size_list():
return list(range(60, 128)) + [512, 1514, 9214] + [60]*10
def incrementing_payload(length):
return bytearray(itertools.islice(itertools.cycle(range(256)), length))
def cycle_en():
return itertools.cycle([0, 0, 0, 1])
if cocotb.SIM_NAME:
for test in [run_test_rx, run_test_tx]:
factory = TestFactory(test)
factory.add_option("payload_lengths", [size_list])
factory.add_option("payload_data", [incrementing_payload])
factory.add_option("ifg", [12, 0])
factory.generate_tests()
factory = TestFactory(run_test_tx_alignment)
factory.add_option("payload_data", [incrementing_payload])
factory.add_option("ifg", [12])
factory.generate_tests()
# cocotb-test
tests_dir = os.path.abspath(os.path.dirname(__file__))
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axis', 'rtl'))
@pytest.mark.parametrize("enable_dic", [1, 0])
@pytest.mark.parametrize("data_width", [32, 64])
def test_eth_mac_10g(request, data_width, enable_dic):
dut = "eth_mac_10g"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, "axis_xgmii_rx_32.v"),
os.path.join(rtl_dir, "axis_xgmii_rx_64.v"),
os.path.join(rtl_dir, "axis_xgmii_tx_32.v"),
os.path.join(rtl_dir, "axis_xgmii_tx_64.v"),
os.path.join(rtl_dir, "lfsr.v"),
]
parameters = {}
parameters['DATA_WIDTH'] = data_width
parameters['KEEP_WIDTH'] = parameters['DATA_WIDTH'] // 8
parameters['CTRL_WIDTH'] = parameters['DATA_WIDTH'] // 8
parameters['ENABLE_PADDING'] = 1
parameters['ENABLE_DIC'] = enable_dic
parameters['MIN_FRAME_LENGTH'] = 64
parameters['PTP_PERIOD_NS'] = 0x6 if parameters['DATA_WIDTH'] == 64 else 0x3
parameters['PTP_PERIOD_FNS'] = 0x6666 if parameters['DATA_WIDTH'] == 64 else 0x3333
parameters['TX_PTP_TS_ENABLE'] = 0
parameters['TX_PTP_TS_WIDTH'] = 96
parameters['TX_PTP_TAG_ENABLE'] = parameters['TX_PTP_TS_ENABLE']
parameters['TX_PTP_TAG_WIDTH'] = 16
parameters['RX_PTP_TS_ENABLE'] = 0
parameters['RX_PTP_TS_WIDTH'] = 96
parameters['TX_USER_WIDTH'] = (parameters['TX_PTP_TAG_WIDTH'] if parameters['TX_PTP_TAG_ENABLE'] else 0) + 1
parameters['RX_USER_WIDTH'] = (parameters['RX_PTP_TS_WIDTH'] if parameters['RX_PTP_TS_ENABLE'] else 0) + 1
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| [] | [] | [
"PARAM_ENABLE_DIC"
] | [] | ["PARAM_ENABLE_DIC"] | python | 1 | 0 | |
GTSRB/train_standard_vgg.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
This is the implement of standard training on GTSRB dataset.
Copyright (c) Yiming Li, 2020
'''
from __future__ import print_function
import argparse
import os
import shutil
import time
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import gtsrb_dataset as dataset
from model import *
from utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig
parser = argparse.ArgumentParser(description='PyTorch GTSRB')
# Datasets
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
# Optimization options
parser.add_argument('--epochs', default=30, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--train-batch', default=128, type=int, metavar='N',
help='train batchsize')
parser.add_argument('--test-batch', default=128, type=int, metavar='N',
help='test batchsize')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--drop', '--dropout', default=0, type=float,
metavar='Dropout', help='Dropout ratio')
parser.add_argument('--schedule', type=int, nargs='+', default=[20],
help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
# Checkpoints
parser.add_argument('-c', '--checkpoint', default='checkpoint/benign', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
# Miscs
parser.add_argument('--manualSeed', type=int, default=1, help='manual seed')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
#Device options
parser.add_argument('--gpu-id', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
# Use CUDA
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
use_cuda = torch.cuda.is_available()
# Random seed
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if use_cuda:
torch.cuda.manual_seed_all(args.manualSeed)
best_acc = 0 # best test accuracy
def main():
global best_acc
start_epoch = args.start_epoch # start from epoch 0 or last checkpoint epoch
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
# Dataset preprocessing
title = 'GTSRB'
print('==> Preparing GTSRB dataset')
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor()
])
# Create Datasets
trainset = dataset.GTSRB(
root_dir='./data', train=True, transform=transform)
testset = dataset.GTSRB(
root_dir='./data', train=False, transform=transform)
# Load Datasets
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=args.train_batch, shuffle=True, num_workers=args.workers)
testloader = torch.utils.data.DataLoader(
testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)
# Model
model = vgg19_bn()
model = torch.nn.DataParallel(model).cuda()
cudnn.benchmark = True
print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# Resume
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
args.checkpoint = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
else:
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])
if args.evaluate:
print('\nEvaluation only')
test_loss, test_acc = test(testloader, model, criterion, start_epoch, use_cuda)
print(' Test Loss: %.8f, Test Acc: %.2f' % (test_loss, test_acc))
return
# Train and val
for epoch in range(start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
train_loss, train_acc = train(args, model, trainloader, criterion, optimizer, epoch, use_cuda)
test_loss, test_acc = test(testloader, model, criterion, epoch, use_cuda)
# append logger file
logger.append([state['lr'], train_loss, test_loss, train_acc, test_acc])
# save model
is_best = test_acc > best_acc
best_acc = max(test_acc, best_acc)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'acc': test_acc,
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
}, is_best, checkpoint=args.checkpoint)
logger.close()
logger.plot()
savefig(os.path.join(args.checkpoint, 'log.eps'))
print('Best acc:')
print(best_acc)
def train(args, model, trainloader, criterion, optimizer, epoch, use_cuda):
# switch to train mode
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
bar = Bar('Processing', max=len(trainloader))
for batch_idx, (image, target) in enumerate(trainloader):
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
image, target = image.cuda(), target.cuda()
# compute loss and do SGD step
outputs = model(image)
loss = criterion(outputs, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure train accuracy and record loss
prec1, prec5 = accuracy(outputs.data, target.data, topk=(1, 5))
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
top5.update(prec5.item(), image.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=batch_idx + 1,
size=len(trainloader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
return (losses.avg, top1.avg)
def test(testloader, model, criterion, epoch, use_cuda):
global best_acc
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
bar = Bar('Processing', max=len(testloader))
for batch_idx, (inputs, targets) in enumerate(testloader):
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
# compute output
outputs = model(inputs)
loss = criterion(outputs, targets)
# measure accuracy and record standard loss
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=batch_idx + 1,
size=len(testloader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
return (losses.avg, top1.avg)
def save_checkpoint(state, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))
def adjust_learning_rate(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr'] = state['lr']
if __name__ == '__main__':
main()
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
src/remote/remote_test.go | package remote
import (
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"testing"
"time"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/digest"
pb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2"
"github.com/golang/protobuf/ptypes"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/thought-machine/please/src/fs"
"github.com/thought-machine/please/src/core"
)
func TestInit(t *testing.T) {
c := newClient()
assert.NoError(t, c.CheckInitialised())
}
func TestBadAPIVersion(t *testing.T) {
// We specify a required API version of v2.0.0, so should fail initialisation if the server
// specifies something incompatible with that.
defer server.Reset()
server.HighAPIVersion.Major = 1
server.LowAPIVersion.Major = 1
c := newClient()
assert.Error(t, c.CheckInitialised())
assert.Contains(t, c.CheckInitialised().Error(), "1.0.0 - 1.1.0")
}
func TestUnsupportedDigest(t *testing.T) {
defer server.Reset()
server.DigestFunction = []pb.DigestFunction_Value{
pb.DigestFunction_MD5,
pb.DigestFunction_SHA384,
pb.DigestFunction_SHA512,
}
c := newClient()
assert.Error(t, c.CheckInitialised())
}
func TestExecuteBuild(t *testing.T) {
c := newClient()
target := core.NewBuildTarget(core.BuildLabel{PackageName: "package", Name: "target2"})
target.AddSource(core.FileLabel{File: "src1.txt", Package: "package"})
target.AddSource(core.FileLabel{File: "src2.txt", Package: "package"})
target.AddOutput("out2.txt")
target.BuildTimeout = time.Minute
// We need to set this to force stdout to be retrieved (it is otherwise unnecessary
// on success).
target.PostBuildFunction = testFunction{}
target.Command = "echo hello && echo test > $OUT"
metadata, err := c.Build(0, target)
assert.NoError(t, err)
assert.Equal(t, []byte("hello\n"), metadata.Stdout)
}
type postBuildFunction func(*core.BuildTarget, string) error //nolint:unused
//nolint:unused
func (f postBuildFunction) Call(target *core.BuildTarget, output string) error {
return f(target, output)
}
//nolint:unused
func (f postBuildFunction) String() string { return "" }
func TestExecutePostBuildFunction(t *testing.T) {
t.Skip("Post-build function currently triggered at a higher level")
c := newClient()
target := core.NewBuildTarget(core.BuildLabel{PackageName: "package", Name: "target5"})
target.BuildTimeout = time.Minute
target.Command = "echo 'wibble wibble wibble' | tee file7"
target.PostBuildFunction = postBuildFunction(func(target *core.BuildTarget, output string) error {
target.AddOutput("somefile")
assert.Equal(t, "wibble wibble wibble", output)
return nil
})
_, err := c.Build(0, target)
assert.NoError(t, err)
assert.Equal(t, []string{"somefile"}, target.Outputs())
}
func TestExecuteFetch(t *testing.T) {
c := newClient()
target := core.NewBuildTarget(core.BuildLabel{PackageName: "package", Name: "remote1"})
target.IsRemoteFile = true
target.AddSource(core.URLLabel("https://get.please.build/linux_amd64/14.2.0/please_14.2.0.tar.gz"))
target.AddOutput("please_14.2.0.tar.gz")
target.Hashes = []string{"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"}
target.BuildTimeout = time.Minute
_, err := c.Build(0, target)
assert.NoError(t, err)
}
func TestExecuteTest(t *testing.T) {
c := newClientInstance("test")
target := core.NewBuildTarget(core.BuildLabel{PackageName: "package", Name: "target3"})
target.AddOutput("remote_test")
target.TestTimeout = time.Minute
target.TestCommand = "$TEST"
target.IsTest = true
target.IsBinary = true
target.SetState(core.Building)
err := c.Store(target)
assert.NoError(t, err)
c.state.Graph.AddTarget(target)
_, err = c.Test(0, target, 1)
assert.NoError(t, err)
results, err := ioutil.ReadFile(filepath.Join(target.TestDir(1), core.TestResultsFile))
require.NoError(t, err)
assert.Equal(t, testResults, results)
}
func TestExecuteTestWithCoverage(t *testing.T) {
c := newClientInstance("test")
c.state.NeedCoverage = true // bit of a hack but we need to turn this on somehow
target := core.NewBuildTarget(core.BuildLabel{PackageName: "package", Name: "target4"})
target.AddOutput("remote_test")
target.TestTimeout = time.Minute
target.TestCommand = "$TEST"
target.IsTest = true
target.IsBinary = true
err := c.Store(target)
assert.NoError(t, err)
target.SetState(core.Built)
c.state.Graph.AddTarget(target)
_, err = c.Test(0, target, 1)
assert.NoError(t, err)
results, err := ioutil.ReadFile(filepath.Join(target.TestDir(1), core.TestResultsFile))
require.NoError(t, err)
coverage, err := ioutil.ReadFile(filepath.Join(target.TestDir(1), core.CoverageFile))
require.NoError(t, err)
assert.Equal(t, testResults, results)
assert.Equal(t, coverageData, coverage)
}
var testResults = []byte(`<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<testcase name="//src/remote:remote_test">
<test name="testResults" success="true" time="172" type="SUCCESS"/>
</testcase>
`)
var coverageData = []byte(`mode: set
src/core/build_target.go:134.54,143.2 7 0
src/core/build_target.go:159.52,172.2 12 0
src/core/build_target.go:177.44,179.2 1 0
`)
func TestNoAbsolutePaths(t *testing.T) {
c := newClientInstance("test")
tool := core.NewBuildTarget(core.BuildLabel{PackageName: "package", Name: "tool"})
tool.AddOutput("bin")
c.state.Graph.AddTarget(tool)
target := core.NewBuildTarget(core.BuildLabel{PackageName: "package", Name: "target5"})
target.AddOutput("remote_test")
target.AddSource(core.FileLabel{Package: "package", File: "file"})
target.AddTool(tool.Label)
cmd, _ := c.buildCommand(target, &pb.Directory{}, false, false, false)
testDir := os.Getenv("TEST_DIR")
for _, env := range cmd.EnvironmentVariables {
if !strings.HasPrefix(env.Value, "//") {
assert.False(t, path.IsAbs(env.Value), "Env var %s has an absolute path: %s", env.Name, env.Value)
assert.NotContains(t, env.Value, core.OutDir, "Env var %s contains %s: %s", env.Name, core.OutDir, env.Value)
assert.NotContains(t, env.Value, testDir, "Env var %s contains the test dir %s: %s", env.Name, testDir, env.Value)
}
}
}
func TestNoAbsolutePaths2(t *testing.T) {
c := newClientInstance("test")
tool := core.NewBuildTarget(core.BuildLabel{PackageName: "package", Name: "tool"})
tool.AddOutput("bin")
c.state.Graph.AddTarget(tool)
target := core.NewBuildTarget(core.BuildLabel{PackageName: "package", Name: "target5"})
target.AddOutput("remote_test")
target.AddTool(core.SystemPathLabel{Path: []string{os.Getenv("TMP_DIR")}, Name: "remote_test"})
cmd, _ := c.buildCommand(target, &pb.Directory{}, false, false, false)
for _, env := range cmd.EnvironmentVariables {
if !strings.HasPrefix(env.Value, "//") {
assert.False(t, path.IsAbs(env.Value), "Env var %s has an absolute path: %s", env.Name, env.Value)
assert.NotContains(t, env.Value, core.OutDir, "Env var %s contains %s: %s", env.Name, core.OutDir, env.Value)
}
}
}
func TestRemoteFilesHashConsistently(t *testing.T) {
c := newClientInstance("test")
target := core.NewBuildTarget(core.BuildLabel{PackageName: "package", Name: "download"})
target.IsRemoteFile = true
target.AddSource(core.URLLabel("https://localhost/file"))
cmd, digest, err := c.buildAction(target, false, false)
assert.NoError(t, err)
// After we change this path, the rule should still give back the same protos since it is
// not relevant to how we fetch a remote asset.
c.state.Config.Build.Path = []string{"/usr/bin/nope"}
cmd2, digest2, err := c.buildAction(target, false, false)
assert.NoError(t, err)
assert.Equal(t, cmd, cmd2)
assert.Equal(t, digest, digest2)
}
func TestOutDirsSetOutsOnTarget(t *testing.T) {
c := newClientInstance("mock")
foo := []byte("this is the content of foo")
fooDigest := digest.NewFromBlob(foo)
bar := []byte("this is the content of bar")
barDigest := digest.NewFromBlob(bar)
tree := mustMarshal(&pb.Tree{
Root: &pb.Directory{
Files: []*pb.FileNode{
{Name: "foo.txt", Digest: fooDigest.ToProto()},
{Name: "bar.txt", Digest: barDigest.ToProto()},
},
},
})
treeDigest := digest.NewFromBlob(tree)
server.mockActionResult = &pb.ActionResult{
OutputDirectories: []*pb.OutputDirectory{
{
Path: "foo",
TreeDigest: treeDigest.ToProto(),
},
},
ExitCode: 0,
ExecutionMetadata: &pb.ExecutedActionMetadata{
Worker: "kev",
QueuedTimestamp: ptypes.TimestampNow(),
ExecutionStartTimestamp: ptypes.TimestampNow(),
ExecutionCompletedTimestamp: ptypes.TimestampNow(),
},
}
server.blobs[treeDigest.Hash] = tree
server.blobs[fooDigest.Hash] = foo
server.blobs[barDigest.Hash] = bar
outDirTarget := core.NewBuildTarget(core.BuildLabel{
PackageName: "package",
Name: "out_dir_target",
})
c.state.AddOriginalTarget(outDirTarget.Label, true)
c.state.DownloadOutputs = true
require.True(t, c.state.ShouldDownload(outDirTarget))
outDirTarget.AddOutputDirectory("foo")
// Doesn't actually get executed but gives an idea as to how this rule is mocked up
outDirTarget.Command = "touch foo/bar.txt && touch foo/baz.txt"
c.state.Graph.AddTarget(outDirTarget)
_, err := c.Build(0, outDirTarget)
require.NoError(t, err)
assert.Len(t, outDirTarget.Outputs(), 2)
assert.ElementsMatch(t, []string{"foo.txt", "bar.txt"}, outDirTarget.Outputs())
for _, out := range outDirTarget.Outputs() {
assert.True(t, fs.FileExists(filepath.Join(outDirTarget.OutDir(), out)), "output %s doesn't exist in target out folder", out)
}
}
func TestDirectoryMetadataStore(t *testing.T) {
cacheDuration := time.Hour
now := time.Now().UTC()
store := directoryMetadataStore{
directory: storeDirectoryName,
cacheDuration: cacheDuration,
}
mds := map[string]*core.BuildMetadata{
"delete": {
Timestamp: now.Add(-cacheDuration * 2),
},
"keep": {
Timestamp: now,
},
}
for key, value := range mds {
err := store.storeMetadata(key, value)
require.NoError(t, err)
assert.FileExists(t, filepath.Join(storeDirectoryName, key[:2], key))
}
md, err := store.retrieveMetadata("delete")
require.NoError(t, err)
assert.Nil(t, md)
md, err = store.retrieveMetadata("keep")
require.NoError(t, err)
assert.Equal(t, md, mds["keep"])
store.clean()
assert.FileExists(t, filepath.Join(storeDirectoryName, "ke", "keep"))
_, err = os.Lstat(filepath.Join(storeDirectoryName, "de", "delete"))
assert.True(t, os.IsNotExist(err))
}
func TestTargetPlatform(t *testing.T) {
c := newClientInstance("platform_test")
c.platform = convertPlatform(c.state.Config.Remote.Platform) // Bit of a hack but we can't go through the normal path.
target := core.NewBuildTarget(core.BuildLabel{PackageName: "package", Name: "target"})
cmd, err := c.buildCommand(target, &pb.Directory{}, false, false, false)
assert.NoError(t, err)
assert.Equal(t, &pb.Platform{
Properties: []*pb.Platform_Property{
{
Name: "OSFamily",
Value: "linux",
},
},
}, cmd.Platform)
target.Labels = []string{"remote-platform-property:size=chomky"}
cmd, err = c.buildCommand(target, &pb.Directory{}, false, false, false)
assert.NoError(t, err)
assert.Equal(t, &pb.Platform{
Properties: []*pb.Platform_Property{
{
Name: "size",
Value: "chomky",
},
{
Name: "OSFamily",
Value: "linux",
},
},
}, cmd.Platform)
}
// Store is a small hack that stores a target's outputs for testing only.
func (c *Client) Store(target *core.BuildTarget) error {
if err := c.CheckInitialised(); err != nil {
return err
}
return c.uploadLocalTarget(target)
}
| [
"\"TEST_DIR\"",
"\"TMP_DIR\""
] | [] | [
"TMP_DIR",
"TEST_DIR"
] | [] | ["TMP_DIR", "TEST_DIR"] | go | 2 | 0 | |
soc_linux.py | #!/usr/bin/env python3
import os
import subprocess
from migen import *
from litex.soc.interconnect import wishbone
from litex.soc.cores.gpio import GPIOOut, GPIOIn
from litex.soc.cores.spi import SPIMaster
from litex.soc.cores.bitbang import I2CMaster
from litex.soc.cores.xadc import XADC
from litex.soc.cores.pwm import PWM
from litex.soc.cores.icap import ICAPBitstream
from litex.soc.cores.clock import S7MMCM
from litevideo.output import VideoOut
# Predefined values --------------------------------------------------------------------------------
video_resolutions = {
"1920x1080_60Hz" : {
"pix_clk" : 148.5e6,
"h-active" : 1920,
"h-blanking" : 280,
"h-sync" : 44,
"h-front-porch" : 148,
"v-active" : 1080,
"v-blanking" : 45,
"v-sync" : 5,
"v-front-porch" : 36,
},
"1280x720_60Hz" : {
"pix_clk" : 74.25e6,
"h-active" : 1280,
"h-blanking" : 370,
"h-sync" : 40,
"h-front-porch" : 220,
"v-active" : 720,
"v-blanking" : 30,
"v-sync" : 5,
"v-front-porch" : 20,
},
"640x480_75Hz" : {
"pix_clk" : 31.5e6,
"h-active" : 640,
"h-blanking" : 200,
"h-sync" : 64,
"h-front-porch" : 16,
"v-active" : 480,
"v-blanking" : 20,
"v-sync" : 3,
"v-front-porch" : 1,
}
}
# Helpers ------------------------------------------------------------------------------------------
def platform_request_all(platform, name):
from litex.build.generic_platform import ConstraintError
r = []
while True:
try:
r += [platform.request(name, len(r))]
except ConstraintError:
break
if r == []:
raise ValueError
return r
# SoCLinux -----------------------------------------------------------------------------------------
def SoCLinux(soc_cls, **kwargs):
class _SoCLinux(soc_cls):
csr_map = {**soc_cls.csr_map, **{
"ctrl": 0,
"uart": 2,
"timer0": 3,
}}
interrupt_map = {**soc_cls.interrupt_map, **{
"uart": 0,
"timer0": 1,
}}
mem_map = {**soc_cls.mem_map, **{
"ethmac": 0xb0000000,
"spiflash": 0xd0000000,
"csr": 0xf0000000,
}}
def __init__(self, cpu_variant="linux", uart_baudrate=1e6, **kwargs):
# SoC ----------------------------------------------------------------------------------
soc_cls.__init__(self,
cpu_type = "vexriscv",
cpu_variant = cpu_variant,
uart_baudrate = uart_baudrate,
max_sdram_size = 0x40000000, # Limit mapped SDRAM to 1GB.
**kwargs)
# Add linker region for machine mode emulator
self.add_memory_region("emulator", self.mem_map["main_ram"] + 0x01100000, 0x4000,
type="cached+linker")
# Leds -------------------------------------------------------------------------------------
def add_leds(self):
self.submodules.leds = GPIOOut(Cat(platform_request_all(self.platform, "user_led")))
self.add_csr("leds")
# RGB Led ----------------------------------------------------------------------------------
def add_rgb_led(self):
rgb_led_pads = self.platform.request("rgb_led", 0)
for n in "rgb":
setattr(self.submodules, "rgb_led_{}0".format(n), PWM(getattr(rgb_led_pads, n)))
self.add_csr("rgb_led_{}0".format(n))
# Switches ---------------------------------------------------------------------------------
def add_switches(self):
self.submodules.switches = GPIOIn(Cat(platform_request_all(self.platform, "user_sw")))
self.add_csr("switches")
# SPI --------------------------------------------------------------------------------------
def add_spi(self, data_width, clk_freq):
spi_pads = self.platform.request("spi")
self.submodules.spi = SPIMaster(spi_pads, data_width, self.clk_freq, clk_freq)
self.add_csr("spi")
# I2C --------------------------------------------------------------------------------------
def add_i2c(self):
self.submodules.i2c0 = I2CMaster(self.platform.request("i2c", 0))
self.add_csr("i2c0")
# XADC (Xilinx only) -----------------------------------------------------------------------
def add_xadc(self):
self.submodules.xadc = XADC()
self.add_csr("xadc")
# Framebuffer (Xilinx only) ----------------------------------------------------------------
def add_framebuffer(self, video_settings):
platform = self.platform
assert platform.device[:4] == "xc7a"
dram_port = self.sdram.crossbar.get_port(
mode = "read",
data_width = 32,
clock_domain = "pix",
reverse = True)
framebuffer = VideoOut(
device = platform.device,
pads = platform.request("hdmi_out"),
dram_port = dram_port)
self.submodules.framebuffer = framebuffer
self.add_csr("framebuffer")
clocking = framebuffer.driver.clocking
platform.add_period_constraint(clocking.cd_pix.clk, 1e9/video_settings["pix_clk"])
platform.add_period_constraint(clocking.cd_pix5x.clk, 1e9/(5*video_settings["pix_clk"]))
platform.add_false_path_constraints(
self.crg.cd_sys.clk,
framebuffer.driver.clocking.cd_pix.clk,
framebuffer.driver.clocking.cd_pix5x.clk)
self.add_constant("litevideo_pix_clk", video_settings["pix_clk"])
self.add_constant("litevideo_h_active", video_settings["h-active"])
self.add_constant("litevideo_h_blanking", video_settings["h-blanking"])
self.add_constant("litevideo_h_sync", video_settings["h-sync"])
self.add_constant("litevideo_h_front_porch", video_settings["h-front-porch"])
self.add_constant("litevideo_v_active", video_settings["v-active"])
self.add_constant("litevideo_v_blanking", video_settings["v-blanking"])
self.add_constant("litevideo_v_sync", video_settings["v-sync"])
self.add_constant("litevideo_v_front_porch", video_settings["v-front-porch"])
# ICAP Bitstream (Xilinx only) -------------------------------------------------------------
def add_icap_bitstream(self):
self.submodules.icap_bit = ICAPBitstream();
self.add_csr("icap_bit")
# MMCM (Xilinx only) -----------------------------------------------------------------------
def add_mmcm(self, nclkout):
if (nclkout > 7):
raise ValueError("nclkout cannot be above 7!")
self.cd_mmcm_clkout = []
self.submodules.mmcm = S7MMCM(speedgrade=-1)
self.mmcm.register_clkin(self.crg.cd_sys.clk, self.clk_freq)
for n in range(nclkout):
self.cd_mmcm_clkout += [ClockDomain(name="cd_mmcm_clkout{}".format(n))]
self.mmcm.create_clkout(self.cd_mmcm_clkout[n], self.clk_freq)
self.add_constant("clkout_def_freq", int(self.clk_freq))
self.add_constant("clkout_def_phase", int(0))
self.add_constant("clkout_def_duty_num", int(50))
self.add_constant("clkout_def_duty_den", int(100))
# We need to write exponent of clkout_margin to allow the driver for smaller inaccuracy
from math import log10
exp = log10(self.mmcm.clkouts[0][3])
if exp < 0:
self.add_constant("clkout_margin_exp", int(abs(exp)))
self.add_constant("clkout_margin", int(self.mmcm.clkouts[0][3] * 10 ** abs(exp)))
else:
self.add_constant("clkout_margin", int(self.mmcm.clkouts[0][3]))
self.add_constant("clkout_margin_exp", int(0))
self.add_constant("nclkout", int(nclkout))
self.add_constant("mmcm_lock_timeout", int(10))
self.add_constant("mmcm_drdy_timeout", int(10))
self.add_constant("vco_margin", int(self.mmcm.vco_margin))
self.add_constant("vco_freq_range_min", int(self.mmcm.vco_freq_range[0]))
self.add_constant("vco_freq_range_max", int(self.mmcm.vco_freq_range[1]))
self.add_constant("clkfbout_mult_frange_min", int(self.mmcm.clkfbout_mult_frange[0]))
self.add_constant("clkfbout_mult_frange_max", int(self.mmcm.clkfbout_mult_frange[1]))
self.add_constant("divclk_divide_range_min", int(self.mmcm.divclk_divide_range[0]))
self.add_constant("divclk_divide_range_max", int(self.mmcm.divclk_divide_range[1]))
self.add_constant("clkout_divide_range_min", int(self.mmcm.clkout_divide_range[0]))
self.add_constant("clkout_divide_range_max", int(self.mmcm.clkout_divide_range[1]))
self.mmcm.expose_drp()
self.add_csr("mmcm")
self.comb += self.mmcm.reset.eq(self.mmcm.drp_reset.re)
# Ethernet configuration -------------------------------------------------------------------
def configure_ethernet(self, local_ip, remote_ip):
local_ip = local_ip.split(".")
remote_ip = remote_ip.split(".")
self.add_constant("LOCALIP1", int(local_ip[0]))
self.add_constant("LOCALIP2", int(local_ip[1]))
self.add_constant("LOCALIP3", int(local_ip[2]))
self.add_constant("LOCALIP4", int(local_ip[3]))
self.add_constant("REMOTEIP1", int(remote_ip[0]))
self.add_constant("REMOTEIP2", int(remote_ip[1]))
self.add_constant("REMOTEIP3", int(remote_ip[2]))
self.add_constant("REMOTEIP4", int(remote_ip[3]))
# Boot configuration -----------------------------------------------------------------------
def configure_boot(self):
if hasattr(self, "spiflash"):
self.add_constant("FLASH_BOOT_ADDRESS", self.mem_map["spiflash"])
# DTS generation ---------------------------------------------------------------------------
def generate_dts(self, board_name):
json = os.path.join("build", board_name, "csr.json")
dts = os.path.join("build", board_name, "{}.dts".format(board_name))
subprocess.check_call(
"./json2dts.py {} > {}".format(json, dts), shell=True)
# DTS compilation --------------------------------------------------------------------------
def compile_dts(self, board_name):
dts = os.path.join("build", board_name, "{}.dts".format(board_name))
dtb = os.path.join("buildroot", "rv32.dtb")
subprocess.check_call(
"dtc -O dtb -o {} {}".format(dtb, dts), shell=True)
# Emulator compilation ---------------------------------------------------------------------
def compile_emulator(self, board_name):
os.environ["BOARD"] = board_name
subprocess.check_call("cd emulator && make", shell=True)
# Documentation generation -----------------------------------------------------------------
def generate_doc(self, board_name):
from litex.soc.doc import generate_docs
doc_dir = os.path.join("build", board_name, "doc")
generate_docs(self, doc_dir)
os.system("sphinx-build -M html {}/ {}/_build".format(doc_dir, doc_dir))
return _SoCLinux(**kwargs)
| [] | [] | [
"BOARD"
] | [] | ["BOARD"] | python | 1 | 0 | |
cmd/nmi/main.go | package main
import (
goflag "flag"
"net/http"
_ "net/http/pprof"
"os"
"strings"
"github.com/Azure/aad-pod-identity/pkg/metrics"
"github.com/Azure/aad-pod-identity/pkg/nmi"
server "github.com/Azure/aad-pod-identity/pkg/nmi/server"
"github.com/Azure/aad-pod-identity/pkg/probes"
"github.com/Azure/aad-pod-identity/version"
"github.com/spf13/pflag"
"k8s.io/klog"
)
const (
defaultMetadataIP = "169.254.169.254"
defaultMetadataPort = "80"
defaultNmiPort = "2579"
defaultIPTableUpdateTimeIntervalInSeconds = 60
defaultlistPodIDsRetryAttemptsForCreated = 16
defaultlistPodIDsRetryAttemptsForAssigned = 4
defaultlistPodIDsRetryIntervalInSeconds = 5
)
var (
versionInfo = pflag.Bool("version", false, "prints the version information")
nmiPort = pflag.String("nmi-port", defaultNmiPort, "NMI application port")
metadataIP = pflag.String("metadata-ip", defaultMetadataIP, "instance metadata host ip")
metadataPort = pflag.String("metadata-port", defaultMetadataPort, "instance metadata host ip")
nodename = pflag.String("node", "", "node name")
ipTableUpdateTimeIntervalInSeconds = pflag.Int("ipt-update-interval-sec", defaultIPTableUpdateTimeIntervalInSeconds, "update interval of iptables")
forceNamespaced = pflag.Bool("forceNamespaced", false, "Forces mic to namespace identities, binding, and assignment")
micNamespace = pflag.String("MICNamespace", "default", "MIC namespace to short circuit MIC token requests")
httpProbePort = pflag.String("http-probe-port", "8080", "Http health and liveness probe port")
retryAttemptsForCreated = pflag.Int("retry-attempts-for-created", defaultlistPodIDsRetryAttemptsForCreated, "Number of retries in NMI to find assigned identity in CREATED state")
retryAttemptsForAssigned = pflag.Int("retry-attempts-for-assigned", defaultlistPodIDsRetryAttemptsForAssigned, "Number of retries in NMI to find assigned identity in ASSIGNED state")
findIdentityRetryIntervalInSeconds = pflag.Int("find-identity-retry-interval", defaultlistPodIDsRetryIntervalInSeconds, "Retry interval to find assigned identities in seconds")
enableProfile = pflag.Bool("enableProfile", false, "Enable/Disable pprof profiling")
enableScaleFeatures = pflag.Bool("enableScaleFeatures", false, "Enable/Disable features for scale clusters")
blockInstanceMetadata = pflag.Bool("block-instance-metadata", false, "Block instance metadata endpoints")
metadataHeaderRequired = pflag.Bool("metadata-header-required", false, "Metadata header required for querying Azure Instance Metadata service")
prometheusPort = pflag.String("prometheus-port", "9090", "Prometheus port for metrics")
operationMode = pflag.String("operation-mode", "standard", "NMI operation mode")
)
func main() {
klog.InitFlags(nil)
// this is done for glog used by client-go underneath
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
pflag.Parse()
if *versionInfo {
version.PrintVersionAndExit()
}
klog.Infof("starting nmi process. Version: %v. Build date: %v.", version.NMIVersion, version.BuildDate)
if *enableProfile {
profilePort := "6060"
klog.Infof("starting profiling on port %s", profilePort)
go func() {
addr := "localhost:" + profilePort
if err := http.ListenAndServe(addr, nil); err != nil {
klog.Errorf("failed to listen and serve %s, error: %+v", addr, err)
}
}()
}
if *enableScaleFeatures {
klog.Infof("features for scale clusters enabled")
}
// normalize operation mode
*operationMode = strings.ToLower(*operationMode)
client, err := nmi.GetKubeClient(*nodename, *operationMode, *enableScaleFeatures)
if err != nil {
klog.Fatalf("failed to get kube client, error: %+v", err)
}
exit := make(<-chan struct{})
client.Start(exit)
*forceNamespaced = *forceNamespaced || "true" == os.Getenv("FORCENAMESPACED")
klog.Infof("running NMI in namespaced mode: %v", *forceNamespaced)
s := server.NewServer(*micNamespace, *blockInstanceMetadata, *metadataHeaderRequired)
s.KubeClient = client
s.MetadataIP = *metadataIP
s.MetadataPort = *metadataPort
s.NMIPort = *nmiPort
s.NodeName = *nodename
s.IPTableUpdateTimeIntervalInSeconds = *ipTableUpdateTimeIntervalInSeconds
nmiConfig := nmi.Config{
Mode: strings.ToLower(*operationMode),
RetryAttemptsForCreated: *retryAttemptsForCreated,
RetryAttemptsForAssigned: *retryAttemptsForAssigned,
FindIdentityRetryIntervalInSeconds: *findIdentityRetryIntervalInSeconds,
Namespaced: *forceNamespaced,
}
// Create new token client based on the nmi mode
tokenClient, err := nmi.GetTokenClient(client, nmiConfig)
if err != nil {
klog.Fatalf("failed to initialize token client, error: %+v", err)
}
s.TokenClient = tokenClient
// Health probe will always report success once its started. The contents
// will report "Active" once the iptables rules are set
probes.InitAndStart(*httpProbePort, &s.Initialized)
// Register and expose metrics views
if err = metrics.RegisterAndExport(*prometheusPort); err != nil {
klog.Fatalf("failed to register and export metrics on port %s, error: %+v", *prometheusPort, err)
}
if err := s.Run(); err != nil {
klog.Fatalf("%s", err)
}
}
| [
"\"FORCENAMESPACED\""
] | [] | [
"FORCENAMESPACED"
] | [] | ["FORCENAMESPACED"] | go | 1 | 0 | |
src/flashkit/api/build/_simulation.py | """Build and compile a FLASH simulation directory."""
# type annotations
from __future__ import annotations
from typing import Any
# standard libraries
import logging
import os
import sys
from pathlib import Path
# internal libraries
from ...core.error import AutoError
from ...core.parallel import safe, single, squash
from ...core.progress import attach_context
from ...core.stream import Instructions, mail
from ...library.build_simulation import build, make
from ...resources import CONFIG
logger = logging.getLogger(__name__)
# define public interface
__all__ = ['simulation', ]
def adapt_arguments(**args: Any) -> dict[str, Any]:
"""Process arguments to implement behaviors; will throw if some defaults missing."""
# ensure arguments provided
if any(arg not in args for arg in {'path', 'name'}):
raise AutoError('Both simulation and directory arguments needed!')
try:
# resolve proper absolute directory paths
args['source'] = Path(args['source']).expanduser().resolve(strict=True)
except FileNotFoundError:
raise AutoError('Cannot resolve path to FLASH source repository!')
logger.debug(f'api -- Fully resolved the FLASH source repository.')
try:
# format options and validation of user input
ndim = max(min(3, args['ndim']), 2)
nxb, nyb, nzb = (args[b] for b in ('nxb', 'nyb', 'nzb'))
grid = {'paramesh': 'pm4dev', 'uniform': 'ug', 'regular': 'rg'}.get(args['grid'], args['grid'].strip('-+'))
sub = args['subpath'].rstrip('/')
sim = args['path'].rstrip('/')
objdir = f"{grid}{args['name']}_{nxb}_{nyb}" + '' if ndim == 2 else f'_{nzb}'
python = './setup' if max(min(3, args['python']), 2) == 2 else './setup3'
flag = args['optimize'].strip('-+')
parallelIO = '' if not args.get('parallelIO', False) else ' +parallelIO'
shortcuts = '' if not args.get('shortcuts', False) else ' +' + ' +'.join(args['shortcuts'])
options = '' if not args.get('flags', False) else ' -' + ' -'.join(args['flags'])
variables = ' '.join([f'-{name}={value}' for name, value in args.get('variables', {}).items()])
site = args['site']
except:
raise AutoError('Failed to understand and validate input!')
logger.debug(f'api -- Validated user input and formated options.')
# create setup command
args['setup'] = f'{python} {sub}/{sim}/ +{grid} +hdf5{parallelIO}{shortcuts}' \
f' -auto -{flag}{options} -objdir={objdir} -site={site}' \
f' -{ndim}d -nxb={nxb} -nyb={nyb}{f" -nzb={nzb}" if ndim == 3 else ""}' \
f'{variables}'.split()
args['name'] = objdir
logger.debug(f'api -- Constructed setup command.')
return args
# default constants for handling the argument stream
PACKAGES = {'path', 'name', 'ndim', 'nxb', 'nyb', 'nzb', 'grid', 'python', 'site', 'optimize', 'shortcuts',
'flags', 'variables', 'subpath', 'source', 'jobs', 'parallelIO', 'compile', 'build', 'force'}
ROUTE = ('build', 'simulation')
PRIORITY = {'ignore', 'cmdline'}
CRATES = (adapt_arguments, attach_context)
DROPS = {'ignore', 'ndim', 'nxb', 'nyb', 'nzb', 'grid', 'python', 'site', 'optimize', 'shortcuts',
'flags', 'variables', 'subpath', 'parallelIO'}
INSTRUCTIONS = Instructions(packages=PACKAGES, route=ROUTE, priority=PRIORITY, crates=CRATES, drops=DROPS)
@single
@mail(INSTRUCTIONS)
def process_arguments(**arguments: Any) -> dict[str, Any]:
"""Composition of behaviors intended prior to dispatching to library."""
return arguments
@safe
def simulation(**arguments: Any) -> None:
"""Python application interface for building and compiling FLASH simulation build directories.
This method creates a FLASH simulation setup command using options and specified defaults
executes the build in a subprocess to enable the creation of FLASH simulation build directories.
The build directories can also be compiled (even in parallel; e.g., 'make -j 4'), if desired.
Keyword Arguments:
path (str): Specify a simulation directory contained in the FLASH source/Simulation/SimulationMain.
name (str): Specify a build directory basename; will be determined if not provided.
ndim (int): Number of simulation dimensions (i.e., 2 or 3).
nxb (int): Number of grid points per block in the i direction.
nyb (int): Number of grid points per block in the j direction.
nzb (int): Number of grid points per block in the k direction.
grid (str): Type of FLASH simulation grid used by the Grid Package (e.g., 'regular').
python (int): Python version for the setup script; specifically use either 2 (legacy) or 3 (modern).
site (str): Hostname of the machine on which setup is being run; Makefile should be in sites/SITE.
optimize (str): Flag (e.g., 'debug') for compilation and linking; using either optimized, debugging, or other.
shortcuts (list): Additional setup shortcuts (example: +rg) which begin with a plus symbol.
flags (list): Additional setup options (example: -auto) which begin with a dash.
variables (dict): Additional setup variable pairs (example: -nxb=12) which begin with a dash.
subpath (str): Specify a Simulation directory sub-path to PATH within source/Simulation/SimulationMain.
source (str): Specify a path to the local FLASH source repository.
jobs (int): Number of parallel processes to use when executing the make command.
parallelIO (bool): Use the parallel HDF5 Input/Output library.
compile (bool): Compile the FLASH simulation build directory after it is constructed.
build (bool): Force the building of the simulation directory, even if the directory exists.
force (bool): Force the compilation of the build directory, even if the binary is present.
ignore (bool): Ignore configuration file provided arguments, options, and flags.
"""
args = process_arguments(**arguments)
force_build = args.pop('build', False)
force_make = args.pop('force', False)
comp = args.pop('compile', False)
jobs = args.pop('jobs')
setup = args.pop('setup')
path = args.pop('path')
cmdline = args.pop('cmdline', False)
build(path=path, force=force_build, setup=setup, **args)
if comp: make(force=force_make, jobs=jobs, **args)
| [] | [] | [] | [] | [] | python | null | null | null |
venv/lib/python2.7/site-packages/ansible/modules/network/nxos/nxos_acl.py | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_acl
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages access list entries for ACLs.
description:
- Manages access list entries for ACLs.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- C(state=absent) removes the ACE if it exists.
- C(state=delete_acl) deletes the ACL if it exists.
- For idempotency, use port numbers for the src/dest port
params like I(src_port1) and names for the well defined protocols
for the I(proto) param.
- Although this module is idempotent in that if the ace as presented in
the task is identical to the one on the switch, no changes will be made.
If there is any difference, what is in Ansible will be pushed (configured
options will be overridden). This is to improve security, but at the
same time remember an ACE is removed, then re-added, so if there is a
change, the new ACE will be exactly what parameters you are sending to
the module.
options:
seq:
description:
- Sequence number of the entry (ACE).
name:
description:
- Case sensitive name of the access list (ACL).
required: true
action:
description:
- Action of the ACE.
choices: ['permit', 'deny', 'remark']
remark:
description:
- If action is set to remark, this is the description.
proto:
description:
- Port number or protocol (as supported by the switch).
src:
description:
- Source ip and mask using IP/MASK notation and
supports keyword 'any'.
src_port_op:
description:
- Source port operands such as eq, neq, gt, lt, range.
choices: ['any', 'eq', 'gt', 'lt', 'neq', 'range']
src_port1:
description:
- Port/protocol and also first (lower) port when using range
operand.
src_port2:
description:
- Second (end) port when using range operand.
dest:
description:
- Destination ip and mask using IP/MASK notation and supports the
keyword 'any'.
dest_port_op:
description:
- Destination port operands such as eq, neq, gt, lt, range.
choices: ['any', 'eq', 'gt', 'lt', 'neq', 'range']
dest_port1:
description:
- Port/protocol and also first (lower) port when using range
operand.
dest_port2:
description:
- Second (end) port when using range operand.
log:
description:
- Log matches against this entry.
choices: ['enable']
urg:
description:
- Match on the URG bit.
choices: ['enable']
ack:
description:
- Match on the ACK bit.
choices: ['enable']
psh:
description:
- Match on the PSH bit.
choices: ['enable']
rst:
description:
- Match on the RST bit.
choices: ['enable']
syn:
description:
- Match on the SYN bit.
choices: ['enable']
fin:
description:
- Match on the FIN bit.
choices: ['enable']
established:
description:
- Match established connections.
choices: ['enable']
fragments:
description:
- Check non-initial fragments.
choices: ['enable']
time_range:
description:
- Name of time-range to apply.
precedence:
description:
- Match packets with given precedence.
choices: ['critical', 'flash', 'flash-override', 'immediate',
'internet', 'network', 'priority', 'routine']
dscp:
description:
- Match packets with given dscp value.
choices: ['af11', 'af12', 'af13', 'af21', 'af22', 'af23','af31','af32',
'af33', 'af41', 'af42', 'af43', 'cs1', 'cs2', 'cs3', 'cs4',
'cs5', 'cs6', 'cs7', 'default', 'ef']
state:
description:
- Specify desired state of the resource.
default: present
choices: ['present','absent','delete_acl']
'''
EXAMPLES = '''
# configure ACL ANSIBLE
- nxos_acl:
name: ANSIBLE
seq: 10
action: permit
proto: tcp
src: 1.1.1.1/24
dest: any
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["ip access-list ANSIBLE", "10 permit tcp 1.1.1.1/24 any"]
'''
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module):
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
return body
def get_acl(module, acl_name, seq_number):
command = 'show ip access-list'
new_acl = []
saveme = {}
acl_body = {}
body = execute_show_command(command, module)[0]
if body:
all_acl_body = body['TABLE_ip_ipv6_mac']['ROW_ip_ipv6_mac']
else:
# no access-lists configured on the device
return {}, []
if isinstance(all_acl_body, dict):
# Only 1 ACL configured.
if all_acl_body.get('acl_name') == acl_name:
acl_body = all_acl_body
else:
for acl in all_acl_body:
if acl.get('acl_name') == acl_name:
acl_body = acl
break
try:
acl_entries = acl_body['TABLE_seqno']['ROW_seqno']
acl_name = acl_body.get('acl_name')
except KeyError: # could be raised if no ACEs are configured for an ACL
return {}, [{'acl': 'no_entries'}]
if isinstance(acl_entries, dict):
acl_entries = [acl_entries]
for each in acl_entries:
temp = {}
options = {}
remark = each.get('remark')
temp['name'] = acl_name
temp['seq'] = str(each.get('seqno'))
if remark:
temp['remark'] = remark
temp['action'] = 'remark'
else:
temp['action'] = each.get('permitdeny')
temp['proto'] = str(each.get('proto', each.get('proto_str', each.get('ip'))))
temp['src'] = each.get('src_any', each.get('src_ip_prefix'))
temp['src_port_op'] = each.get('src_port_op')
temp['src_port1'] = each.get('src_port1_num')
temp['src_port2'] = each.get('src_port2_num')
temp['dest'] = each.get('dest_any', each.get('dest_ip_prefix'))
temp['dest_port_op'] = each.get('dest_port_op')
temp['dest_port1'] = each.get('dest_port1_num')
temp['dest_port2'] = each.get('dest_port2_num')
options['log'] = each.get('log')
options['urg'] = each.get('urg')
options['ack'] = each.get('ack')
options['psh'] = each.get('psh')
options['rst'] = each.get('rst')
options['syn'] = each.get('syn')
options['fin'] = each.get('fin')
options['established'] = each.get('established')
options['dscp'] = each.get('dscp_str')
options['precedence'] = each.get('precedence_str')
options['fragments'] = each.get('fragments')
options['time_range'] = each.get('timerange')
keep = {}
for key, value in temp.items():
if value:
keep[key] = value
options_no_null = {}
for key, value in options.items():
if value is not None:
options_no_null[key] = value
keep['options'] = options_no_null
if keep.get('seq') == seq_number:
saveme = dict(keep)
new_acl.append(keep)
return saveme, new_acl
def _acl_operand(operand, srcp1, sprcp2):
sub_entry = ' ' + operand
if operand == 'range':
sub_entry += ' ' + srcp1 + ' ' + sprcp2
else:
sub_entry += ' ' + srcp1
return sub_entry
def config_core_acl(proposed):
seq = proposed.get('seq')
action = proposed.get('action')
remark = proposed.get('remark')
proto = proposed.get('proto')
src = proposed.get('src')
src_port_op = proposed.get('src_port_op')
src_port1 = proposed.get('src_port1')
src_port2 = proposed.get('src_port2')
dest = proposed.get('dest')
dest_port_op = proposed.get('dest_port_op')
dest_port1 = proposed.get('dest_port1')
dest_port2 = proposed.get('dest_port2')
ace_start_entries = [action, proto, src]
if not remark:
ace = seq + ' ' + ' '.join(ace_start_entries)
if src_port_op:
ace += _acl_operand(src_port_op, src_port1, src_port2)
ace += ' ' + dest
if dest_port_op:
ace += _acl_operand(dest_port_op, dest_port1, dest_port2)
else:
ace = seq + ' remark ' + remark
return ace
def config_acl_options(options):
ENABLE_ONLY = ['psh', 'urg', 'log', 'ack', 'syn',
'established', 'rst', 'fin', 'fragments',
'log']
OTHER = ['dscp', 'precedence', 'time-range']
# packet-length is the only option not currently supported
if options.get('time_range'):
options['time-range'] = options.get('time_range')
options.pop('time_range')
command = ''
for option, value in options.items():
if option in ENABLE_ONLY:
if value == 'enable':
command += ' ' + option
elif option in OTHER:
command += ' ' + option + ' ' + value
if command:
command = command.strip()
return command
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def main():
argument_spec = dict(
seq=dict(required=False, type='str'),
name=dict(required=True, type='str'),
action=dict(required=False, choices=['remark', 'permit', 'deny']),
remark=dict(required=False, type='str'),
proto=dict(required=False, type='str'),
src=dict(required=False, type='str'),
src_port_op=dict(required=False),
src_port1=dict(required=False, type='str'),
src_port2=dict(required=False, type='str'),
dest=dict(required=False, type='str'),
dest_port_op=dict(required=False),
dest_port1=dict(required=False, type='str'),
dest_port2=dict(required=False, type='str'),
log=dict(required=False, choices=['enable']),
urg=dict(required=False, choices=['enable']),
ack=dict(required=False, choices=['enable']),
psh=dict(required=False, choices=['enable']),
rst=dict(required=False, choices=['enable']),
syn=dict(required=False, choices=['enable']),
fragments=dict(required=False, choices=['enable']),
fin=dict(required=False, choices=['enable']),
established=dict(required=False, choices=['enable']),
time_range=dict(required=False),
precedence=dict(required=False, choices=['critical', 'flash',
'flash-override',
'immediate', 'internet',
'network', 'priority',
'routine']),
dscp=dict(required=False, choices=['af11', 'af12', 'af13', 'af21',
'af22', 'af23', 'af31', 'af32',
'af33', 'af41', 'af42', 'af43',
'cs1', 'cs2', 'cs3', 'cs4',
'cs5', 'cs6', 'cs7', 'default',
'ef']),
state=dict(choices=['absent', 'present', 'delete_acl'], default='present')
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = dict(changed=False, warnings=warnings)
state = module.params['state']
action = module.params['action']
remark = module.params['remark']
dscp = module.params['dscp']
precedence = module.params['precedence']
seq = module.params['seq']
name = module.params['name']
seq = module.params['seq']
if action == 'remark' and not remark:
module.fail_json(msg='when state is action, remark param is also required')
REQUIRED = ['seq', 'name', 'action', 'proto', 'src', 'dest']
ABSENT = ['name', 'seq']
if state == 'present':
if action and remark and seq:
pass
else:
for each in REQUIRED:
if module.params[each] is None:
module.fail_json(msg="req'd params when state is present:",
params=REQUIRED)
elif state == 'absent':
for each in ABSENT:
if module.params[each] is None:
module.fail_json(msg='require params when state is absent',
params=ABSENT)
elif state == 'delete_acl':
if module.params['name'] is None:
module.fail_json(msg="param name req'd when state is delete_acl")
if dscp and precedence:
module.fail_json(msg='only one of the params dscp/precedence '
'are allowed')
OPTIONS_NAMES = ['log', 'urg', 'ack', 'psh', 'rst', 'syn', 'fin',
'established', 'dscp', 'precedence', 'fragments',
'time_range']
CORE = ['seq', 'name', 'action', 'proto', 'src', 'src_port_op',
'src_port1', 'src_port2', 'dest', 'dest_port_op',
'dest_port1', 'dest_port2', 'remark']
proposed_core = dict((param, value) for (param, value) in
module.params.items()
if param in CORE and value is not None)
proposed_options = dict((param, value) for (param, value) in
module.params.items()
if param in OPTIONS_NAMES and value is not None)
proposed = {}
proposed.update(proposed_core)
proposed.update(proposed_options)
existing_options = {}
# getting existing existing_core=dict, acl=list, seq=list
existing_core, acl = get_acl(module, name, seq)
if existing_core:
existing_options = existing_core.get('options')
existing_core.pop('options')
commands = []
delta_core = {}
delta_options = {}
if not existing_core.get('remark'):
dcore = dict(
set(proposed_core.items()).difference(
existing_core.items())
)
if not dcore:
# check the diff in the other way just in case
dcore = dict(
set(existing_core.items()).difference(
proposed_core.items())
)
delta_core = dcore
if delta_core:
delta_options = proposed_options
else:
doptions = dict(
set(proposed_options.items()).difference(
existing_options.items())
)
# check the diff in the other way just in case
if not doptions:
doptions = dict(
set(existing_options.items()).difference(
proposed_options.items())
)
delta_options = doptions
else:
delta_core = dict(
set(proposed_core.items()).difference(
existing_core.items())
)
if state == 'present':
if delta_core or delta_options:
if existing_core: # if the ace exists already
commands.append(['no {0}'.format(seq)])
if delta_options:
myacl_str = config_core_acl(proposed_core)
myacl_str += ' ' + config_acl_options(proposed_options)
else:
myacl_str = config_core_acl(proposed_core)
command = [myacl_str]
commands.append(command)
elif state == 'absent':
if existing_core:
commands.append(['no {0}'.format(seq)])
elif state == 'delete_acl':
if acl[0].get('acl') != 'no_entries':
commands.append(['no ip access-list {0}'.format(name)])
cmds = []
if commands:
preface = []
if state in ['present', 'absent']:
preface = ['ip access-list {0}'.format(name)]
commands.insert(0, preface)
cmds = flatten_list(commands)
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
results['changed'] = True
if 'configure' in cmds:
cmds.pop(0)
results['commands'] = cmds
module.exit_json(**results)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | null | null | null |
docs/update_req_for_rtd.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""
Whenever the requirements in ../setup.json are updated, run
also this script to update the requirements for Read the Docs.
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import json
import click
@click.command()
@click.option('--pre-commit', is_flag=True)
def update_req_for_rtd(pre_commit):
"""Update the separate requirements file for Read the Docs"""
docs_dir = os.path.abspath(os.path.dirname(__file__))
root_dir = os.path.join(docs_dir, os.pardir)
with open(os.path.join(root_dir, 'setup.json'), 'r') as info:
setup_json = json.load(info)
extras = setup_json['extras_require']
reqs = set(extras['testing'] + extras['docs'] + extras['rest'] + extras['atomic_tools'] +
# To avoid that it requires also the postgres libraries
[p for p in setup_json['install_requires'] if not p.startswith('psycopg2')])
reqs_str = "\n".join(sorted(reqs))
basename = 'requirements_for_rtd.txt'
# pylint: disable=bad-continuation
with open(os.path.join(docs_dir, basename), 'w') as reqs_file:
reqs_file.write(reqs_str)
click.echo("File '{}' written.".format(basename))
if pre_commit:
msg = 'Some requirements for Read the Docs have changed, {}'
local_help = 'please add the changes and commit again'
travis_help = 'please run aiida/docs/update_req_for_rtd.py locally and commit the changes it makes'
help_msg = msg.format(travis_help if os.environ.get('TRAVIS') else local_help)
click.echo(help_msg, err=True)
if __name__ == '__main__':
update_req_for_rtd() # pylint: disable=no-value-for-parameter
| [] | [] | [
"TRAVIS"
] | [] | ["TRAVIS"] | python | 1 | 0 | |
integration/env_test.go | package integration_test
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"testing"
"time"
"github.com/cenkalti/backoff"
"github.com/goccy/go-yaml"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
starportconf "github.com/tendermint/starport/starport/chainconf"
"github.com/tendermint/starport/starport/pkg/availableport"
"github.com/tendermint/starport/starport/pkg/cmdrunner"
"github.com/tendermint/starport/starport/pkg/cmdrunner/step"
"github.com/tendermint/starport/starport/pkg/gocmd"
"github.com/tendermint/starport/starport/pkg/httpstatuschecker"
"github.com/tendermint/starport/starport/pkg/xurl"
)
const (
serveTimeout = time.Minute * 15
)
var isCI, _ = strconv.ParseBool(os.Getenv("CI"))
// env provides an isolated testing environment and what's needed to
// make it possible.
type env struct {
t *testing.T
ctx context.Context
}
// env creates a new testing environment.
func newEnv(t *testing.T) env {
ctx, cancel := context.WithCancel(context.Background())
e := env{
t: t,
ctx: ctx,
}
t.Cleanup(cancel)
return e
}
// Ctx returns parent context for the test suite to use for cancelations.
func (e env) Ctx() context.Context {
return e.ctx
}
type execOptions struct {
ctx context.Context
shouldErr, shouldRetry bool
stdout, stderr io.Writer
}
type execOption func(*execOptions)
// ExecShouldError sets the expectations of a command's execution to end with a failure.
func ExecShouldError() execOption {
return func(o *execOptions) {
o.shouldErr = true
}
}
// ExecCtx sets cancelation context for the execution.
func ExecCtx(ctx context.Context) execOption {
return func(o *execOptions) {
o.ctx = ctx
}
}
// ExecStdout captures stdout of an execution.
func ExecStdout(w io.Writer) execOption {
return func(o *execOptions) {
o.stdout = w
}
}
// ExecSterr captures stderr of an execution.
func ExecStderr(w io.Writer) execOption {
return func(o *execOptions) {
o.stderr = w
}
}
// ExecRetry retries command until it is successful before context is canceled.
func ExecRetry() execOption {
return func(o *execOptions) {
o.shouldRetry = true
}
}
// Exec executes a command step with options where msg describes the expectation from the test.
// unless calling with Must(), Exec() will not exit test runtime on failure.
func (e env) Exec(msg string, steps step.Steps, options ...execOption) (ok bool) {
opts := &execOptions{
ctx: e.ctx,
stdout: ioutil.Discard,
stderr: ioutil.Discard,
}
for _, o := range options {
o(opts)
}
var (
stdout = &bytes.Buffer{}
stderr = &bytes.Buffer{}
)
copts := []cmdrunner.Option{
cmdrunner.DefaultStdout(io.MultiWriter(stdout, opts.stdout)),
cmdrunner.DefaultStderr(io.MultiWriter(stderr, opts.stderr)),
}
if isCI {
copts = append(copts, cmdrunner.EndSignal(os.Kill))
}
err := cmdrunner.
New(copts...).
Run(opts.ctx, steps...)
if err == context.Canceled {
err = nil
}
if err != nil {
fmt.Fprintln(os.Stderr, err)
if opts.shouldRetry && opts.ctx.Err() == nil {
time.Sleep(time.Second)
return e.Exec(msg, steps, options...)
}
}
if err != nil {
msg = fmt.Sprintf("%s\n\nLogs:\n\n%s\n\nError Logs:\n\n%s\n",
msg,
stdout.String(),
stderr.String())
}
if opts.shouldErr {
return assert.Error(e.t, err, msg)
}
return assert.NoError(e.t, err, msg)
}
const (
Stargate = "stargate"
)
// Scaffold scaffolds an app to a unique appPath and returns it.
func (e env) Scaffold(appName string) (appPath string) {
root := e.TmpDir()
e.Exec("scaffold an app",
step.NewSteps(step.New(
step.Exec(
"starport",
"app",
fmt.Sprintf("github.com/test/%s", appName),
),
step.Workdir(root),
)),
)
// Cleanup the home directory of the app
e.t.Cleanup(func() {
os.RemoveAll(filepath.Join(e.Home(), fmt.Sprintf(".%s", appName)))
})
return filepath.Join(root, appName)
}
// Serve serves an application lives under path with options where msg describes the
// expection from the serving action.
// unless calling with Must(), Serve() will not exit test runtime on failure.
func (e env) Serve(msg, path, home, configPath string, options ...execOption) (ok bool) {
serveCommand := []string{
"serve",
"-v",
}
if home != "" {
serveCommand = append(serveCommand, "--home", home)
}
if configPath != "" {
serveCommand = append(serveCommand, "--config", configPath)
}
return e.Exec(msg,
step.NewSteps(step.New(
step.Exec("starport", serveCommand...),
step.Workdir(path),
)),
options...,
)
}
// EnsureAppIsSteady ensures that app living at the path can compile and its tests
// are passing.
func (e env) EnsureAppIsSteady(appPath string) {
e.Exec("make sure app is steady",
step.NewSteps(step.New(
step.Exec(gocmd.Name(), "test", "./..."),
step.Workdir(appPath),
)),
)
}
// IsAppServed checks that app is served properly and servers are started to listening
// before ctx canceled.
func (e env) IsAppServed(ctx context.Context, host starportconf.Host) error {
checkAlive := func() error {
ok, err := httpstatuschecker.Check(ctx, xurl.HTTP(host.API)+"/node_info")
if err == nil && !ok {
err = errors.New("app is not online")
}
return err
}
return backoff.Retry(checkAlive, backoff.WithContext(backoff.NewConstantBackOff(time.Second), ctx))
}
// TmpDir creates a new temporary directory.
func (e env) TmpDir() (path string) {
path, err := ioutil.TempDir("", "integration")
require.NoError(e.t, err, "create a tmp dir")
e.t.Cleanup(func() { os.RemoveAll(path) })
return path
}
// RandomizeServerPorts randomizes server ports for the app at path, updates
// its config.yml and returns new values.
func (e env) RandomizeServerPorts(path string, configFile string) starportconf.Host {
if configFile == "" {
configFile = "config.yml"
}
// generate random server ports and servers list.
ports, err := availableport.Find(5)
require.NoError(e.t, err)
genAddr := func(port int) string {
return fmt.Sprintf("localhost:%d", port)
}
servers := starportconf.Host{
RPC: genAddr(ports[0]),
P2P: genAddr(ports[1]),
Prof: genAddr(ports[2]),
GRPC: genAddr(ports[3]),
API: genAddr(ports[4]),
}
// update config.yml with the generated servers list.
configyml, err := os.OpenFile(filepath.Join(path, configFile), os.O_RDWR|os.O_CREATE, 0755)
require.NoError(e.t, err)
defer configyml.Close()
var conf starportconf.Config
require.NoError(e.t, yaml.NewDecoder(configyml).Decode(&conf))
conf.Host = servers
require.NoError(e.t, configyml.Truncate(0))
_, err = configyml.Seek(0, 0)
require.NoError(e.t, err)
require.NoError(e.t, yaml.NewEncoder(configyml).Encode(conf))
return servers
}
// SetRandomHomeConfig sets in the blockchain config files generated temporary directories for home directories
func (e env) SetRandomHomeConfig(path string, configFile string) {
if configFile == "" {
configFile = "config.yml"
}
// update config.yml with the generated temporary directories
configyml, err := os.OpenFile(filepath.Join(path, configFile), os.O_RDWR|os.O_CREATE, 0755)
require.NoError(e.t, err)
defer configyml.Close()
var conf starportconf.Config
require.NoError(e.t, yaml.NewDecoder(configyml).Decode(&conf))
conf.Init.Home = e.TmpDir()
require.NoError(e.t, configyml.Truncate(0))
_, err = configyml.Seek(0, 0)
require.NoError(e.t, err)
require.NoError(e.t, yaml.NewEncoder(configyml).Encode(conf))
}
// Must fails the immediately if not ok.
// t.Fail() needs to be called for the failing tests before running Must().
func (e env) Must(ok bool) {
if !ok {
e.t.FailNow()
}
}
// Home returns user's home dir.
func (e env) Home() string {
home, err := os.UserHomeDir()
require.NoError(e.t, err)
return home
}
// AppHome returns appd's home dir.
func (e env) AppdHome(name string) string {
return filepath.Join(e.Home(), fmt.Sprintf(".%s", name))
}
| [
"\"CI\""
] | [] | [
"CI"
] | [] | ["CI"] | go | 1 | 0 | |
webimg.go | package main
import (
"context"
"encoding/base64"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"strings"
"time"
"golang.org/x/sync/syncmap"
"github.com/PuerkitoBio/goquery"
uuid "github.com/nu7hatch/gouuid"
"github.com/phayes/freeport"
"github.com/vjeantet/chromedp"
"github.com/vjeantet/chromedp/runner"
)
// var cm *chromedp.CDP
// var cs.ctx context.Context
type ChromeShot struct {
cdp *chromedp.CDP
ctx context.Context
cancel context.CancelFunc
Logf func(string, ...interface{})
}
func NewChromeShot(showBrowser bool, logf func(string, ...interface{})) (*ChromeShot, error) {
// var
// create context
ctx, cancel := context.WithCancel(context.Background())
// defer cancel()
// create chrome instance
// cm, err = chromedp.New(cs.ctx, chromedp.WithLogf(l.Printf))
// if err != nil {
// log.Fatal(err)
// }
// Start Webserver
http.Handle("/public/", http.StripPrefix("/public/", http.FileServer(http.Dir(os.Getenv("BF_COMMONS_PATH")+string(os.PathSeparator)+"public"))))
http.HandleFunc("/", helloWorld)
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
panic(err)
}
baseURL = "http://" + listener.Addr().String()
logf("baseURL=%s", baseURL)
go http.Serve(listener, nil)
port, err := freeport.GetFreePort()
if err != nil {
logf(err.Error())
}
cdp, err := chromedp.New(ctx, chromedp.WithRunnerOptions(
runner.Flag("headless", !showBrowser),
runner.Flag("disable-gpu", true),
runner.Flag("no-first-run", true),
runner.Flag("no-default-browser-check", true),
runner.Port(port),
), chromedp.WithLog(log.New(ioutil.Discard, "", 0).Printf))
// ), chromedp.WithLog(log.New(os.Stderr, "", 0).Printf))
if err != nil {
return nil, err
}
return &ChromeShot{
cdp: cdp,
ctx: ctx,
cancel: cancel,
Logf: logf,
}, nil
}
var htmlMap = syncmap.Map{}
var baseURL = ""
func helloWorld(w http.ResponseWriter, r *http.Request) {
hUrl := strings.ToLower(r.URL.Path)
hUrl = strings.TrimLeft(hUrl, "/")
if htmlContent, ok := htmlMap.Load(hUrl); ok {
w.Write(htmlContent.([]byte))
} else {
// cs.Logf("not found - ", hUrl)
w.WriteHeader(404)
w.Write([]byte("Not Found !!!"))
}
}
func (c *ChromeShot) revokeUrl(uid string) {
htmlMap.Delete(uid)
}
func (c *ChromeShot) urlForHtmlContent(content string) (string, string, error) {
// Set URL
uid, err := uuid.NewV4()
if err != nil {
return "", "", err
}
// Set Content for URL
htmlMap.Store(uid.String(), []byte(content))
return baseURL + "/" + uid.String(), uid.String(), nil
}
func (cs *ChromeShot) EmbedImageForDomElements(htmlContent string, selectors []string) (string, error) {
urlStr, uid, err := cs.urlForHtmlContent(htmlContent)
if err != nil {
return "", err
}
cs.Logf("urlStr--> %s", urlStr)
// Check ID presence
doc, err := goquery.NewDocument(urlStr)
if err != nil {
return "", err
}
idsOk := []string{}
for i, id := range selectors {
sel := doc.Find(id)
if len(sel.Nodes) != 1 {
cs.Logf("Unique HTML element [%s] not found in document", id)
} else {
idsOk = append(idsOk, selectors[i])
}
}
cs.cdp.Run(cs.ctx, chromedp.Navigate(urlStr))
cs.cdp.Run(cs.ctx, chromedp.Sleep(2*time.Second))
datas := map[string][]byte{}
cs.Logf("selectors -->%s", idsOk)
for k, id := range idsOk {
cs.Logf("k,selector-->%d,%s", k, id)
var buf []byte
cctx, _ := context.WithTimeout(context.Background(), time.Second*5)
if err := cs.cdp.Run(cctx, chromedp.WaitVisible(id, chromedp.BySearch)); err != nil {
cs.Logf("err-->", err)
continue
}
cs.cdp.Run(cs.ctx, chromedp.Screenshot(id, &buf, chromedp.NodeVisible, chromedp.BySearch))
// cs.cdp.Run(cs.ctx, chromedp.CaptureScreenshot(&buf))
// cs.Logf(string(buf))
datas[id] = buf
}
cs.Logf("--> END")
// doc, err := goquery.NewDocument(urlStr)
if err != nil {
return "", err
}
for i, v := range datas {
// Write To Disk
//ioutil.WriteFile(slugify.Slugify(i)+".png", v, 0644)
// cs.Logf("i=", i)
// Replace HTML
buf64 := base64.StdEncoding.EncodeToString(v)
sel := doc.Find(i)
for k := range sel.Nodes {
single := sel.Eq(k)
single.ReplaceWithHtml(`<img src="data:image/png;base64,` + buf64 + `" />`)
}
}
// cs.Logf("final")
// remove url content form memory
cs.revokeUrl(uid)
return doc.Html()
}
func (cs *ChromeShot) Stop() error {
cs.cdp.Kill()
return nil
// cs.cancel()
// // shutdown chrome
// err := cs.cdp.Shutdown(cs.ctx)
// if err != nil {
// return err
// }
// don't wait for chrome to finish
// cs.cdp.Kill()
// return nil
}
| [
"\"BF_COMMONS_PATH\""
] | [] | [
"BF_COMMONS_PATH"
] | [] | ["BF_COMMONS_PATH"] | go | 1 | 0 | |
chain/sync.go | package chain
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"sort"
"strings"
"sync"
"time"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/Gurpartap/async"
"github.com/hashicorp/go-multierror"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
"github.com/libp2p/go-libp2p-core/connmgr"
"github.com/libp2p/go-libp2p-core/peer"
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/whyrusleeping/pubsub"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
ffi "github.com/filecoin-project/filecoin-ffi"
// named msgarray here to make it clear that these are the types used by
// messages, regardless of specs-actors version.
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
"github.com/filecoin-project/lotus/api"
bstore "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/exchange"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/metrics"
)
// Blocks that are more than MaxHeightDrift epochs above
// the theoretical max height based on systime are quickly rejected
const MaxHeightDrift = 5
var (
// LocalIncoming is the _local_ pubsub (unrelated to libp2p pubsub) topic
// where the Syncer publishes candidate chain heads to be synced.
LocalIncoming = "incoming"
log = logging.Logger("chain")
concurrentSyncRequests = exchange.ShufflePeersPrefix
syncRequestBatchSize = 8
syncRequestRetries = 5
)
// Syncer is in charge of running the chain synchronization logic. As such, it
// is tasked with these functions, amongst others:
//
// * Fast-forwards the chain as it learns of new TipSets from the network via
// the SyncManager.
// * Applies the fork choice rule to select the correct side when confronted
// with a fork in the network.
// * Requests block headers and messages from other peers when not available
// in our BlockStore.
// * Tracks blocks marked as bad in a cache.
// * Keeps the BlockStore and ChainStore consistent with our view of the world,
// the latter of which in turn informs other components when a reorg has been
// committed.
//
// The Syncer does not run workers itself. It's mainly concerned with
// ensuring a consistent state of chain consensus. The reactive and network-
// interfacing processes are part of other components, such as the SyncManager
// (which owns the sync scheduler and sync workers), ChainExchange, the HELLO
// protocol, and the gossipsub block propagation layer.
//
// {hint/concept} The fork-choice rule as it currently stands is: "pick the
// chain with the heaviest weight, so long as it hasn’t deviated one finality
// threshold from our head (900 epochs, parameter determined by spec-actors)".
type Syncer struct {
// The interface for accessing and putting tipsets into local storage
store *store.ChainStore
// handle to the random beacon for verification
beacon beacon.Schedule
// the state manager handles making state queries
sm *stmgr.StateManager
// The known Genesis tipset
Genesis *types.TipSet
// TipSets known to be invalid
bad *BadBlockCache
// handle to the block sync service
Exchange exchange.Client
self peer.ID
syncmgr SyncManager
connmgr connmgr.ConnManager
incoming *pubsub.PubSub
receiptTracker *blockReceiptTracker
verifier ffiwrapper.Verifier
tickerCtxCancel context.CancelFunc
ds dtypes.MetadataDS
}
type SyncManagerCtor func(syncFn SyncFunc) SyncManager
// NewSyncer creates a new Syncer object.
func NewSyncer(ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.Client, syncMgrCtor SyncManagerCtor, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.Schedule, verifier ffiwrapper.Verifier) (*Syncer, error) {
gen, err := sm.ChainStore().GetGenesis()
if err != nil {
return nil, xerrors.Errorf("getting genesis block: %w", err)
}
gent, err := types.NewTipSet([]*types.BlockHeader{gen})
if err != nil {
return nil, err
}
s := &Syncer{
ds: ds,
beacon: beacon,
bad: NewBadBlockCache(),
Genesis: gent,
Exchange: exchange,
store: sm.ChainStore(),
sm: sm,
self: self,
receiptTracker: newBlockReceiptTracker(),
connmgr: connmgr,
verifier: verifier,
incoming: pubsub.New(50),
}
if build.InsecurePoStValidation {
log.Warn("*********************************************************************************************")
log.Warn(" [INSECURE-POST-VALIDATION] Insecure test validation is enabled. If you see this outside of a test, it is a severe bug! ")
log.Warn("*********************************************************************************************")
}
s.syncmgr = syncMgrCtor(s.Sync)
return s, nil
}
func (syncer *Syncer) Start() {
tickerCtx, tickerCtxCancel := context.WithCancel(context.Background())
syncer.syncmgr.Start()
syncer.tickerCtxCancel = tickerCtxCancel
go syncer.runMetricsTricker(tickerCtx)
}
func (syncer *Syncer) runMetricsTricker(tickerCtx context.Context) {
genesisTime := time.Unix(int64(syncer.Genesis.MinTimestamp()), 0)
ticker := build.Clock.Ticker(time.Duration(build.BlockDelaySecs) * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
sinceGenesis := build.Clock.Now().Sub(genesisTime)
expectedHeight := int64(sinceGenesis.Seconds()) / int64(build.BlockDelaySecs)
stats.Record(tickerCtx, metrics.ChainNodeHeightExpected.M(expectedHeight))
case <-tickerCtx.Done():
return
}
}
}
func (syncer *Syncer) Stop() {
syncer.syncmgr.Stop()
syncer.tickerCtxCancel()
}
// InformNewHead informs the syncer about a new potential tipset
// This should be called when connecting to new peers, and additionally
// when receiving new blocks from the network
func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
defer func() {
if err := recover(); err != nil {
log.Errorf("panic in InformNewHead: ", err)
}
}()
ctx := context.Background()
if fts == nil {
log.Errorf("got nil tipset in InformNewHead")
return false
}
if syncer.IsEpochBeyondCurrMax(fts.TipSet().Height()) {
log.Errorf("Received block with impossibly large height %d", fts.TipSet().Height())
return false
}
for _, b := range fts.Blocks {
if reason, ok := syncer.bad.Has(b.Cid()); ok {
log.Warnf("InformNewHead called on block marked as bad: %s (reason: %s)", b.Cid(), reason)
return false
}
if err := syncer.ValidateMsgMeta(b); err != nil {
log.Warnf("invalid block received: %s", err)
return false
}
}
syncer.incoming.Pub(fts.TipSet().Blocks(), LocalIncoming)
// TODO: IMPORTANT(GARBAGE) this needs to be put in the 'temporary' side of
// the blockstore
if err := syncer.store.PersistBlockHeaders(fts.TipSet().Blocks()...); err != nil {
log.Warn("failed to persist incoming block header: ", err)
return false
}
syncer.Exchange.AddPeer(from)
hts := syncer.store.GetHeaviestTipSet()
bestPweight := hts.ParentWeight()
targetWeight := fts.TipSet().ParentWeight()
if targetWeight.LessThan(bestPweight) {
var miners []string
for _, blk := range fts.TipSet().Blocks() {
miners = append(miners, blk.Miner.String())
}
log.Debugw("incoming tipset does not appear to be better than our best chain, ignoring for now", "miners", miners, "bestPweight", bestPweight, "bestTS", hts.Cids(), "incomingWeight", targetWeight, "incomingTS", fts.TipSet().Cids())
return false
}
syncer.syncmgr.SetPeerHead(ctx, from, fts.TipSet())
return true
}
// IncomingBlocks spawns a goroutine that subscribes to the local eventbus to
// receive new block headers as they arrive from the network, and sends them to
// the returned channel.
//
// These blocks have not necessarily been incorporated to our view of the chain.
func (syncer *Syncer) IncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) {
sub := syncer.incoming.Sub(LocalIncoming)
out := make(chan *types.BlockHeader, 10)
go func() {
defer syncer.incoming.Unsub(sub, LocalIncoming)
for {
select {
case r := <-sub:
hs := r.([]*types.BlockHeader)
for _, h := range hs {
select {
case out <- h:
case <-ctx.Done():
return
}
}
case <-ctx.Done():
return
}
}
}()
return out, nil
}
// ValidateMsgMeta performs structural and content hash validation of the
// messages within this block. If validation passes, it stores the messages in
// the underlying IPLD block store.
func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error {
if msgc := len(fblk.BlsMessages) + len(fblk.SecpkMessages); msgc > build.BlockMessageLimit {
return xerrors.Errorf("block %s has too many messages (%d)", fblk.Header.Cid(), msgc)
}
// TODO: IMPORTANT(GARBAGE). These message puts and the msgmeta
// computation need to go into the 'temporary' side of the blockstore when
// we implement that
// We use a temporary bstore here to avoid writing intermediate pieces
// into the blockstore.
blockstore := bstore.NewMemory()
cst := cbor.NewCborStore(blockstore)
var bcids, scids []cid.Cid
for _, m := range fblk.BlsMessages {
c, err := store.PutMessage(blockstore, m)
if err != nil {
return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err)
}
bcids = append(bcids, c)
}
for _, m := range fblk.SecpkMessages {
c, err := store.PutMessage(blockstore, m)
if err != nil {
return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err)
}
scids = append(scids, c)
}
// Compute the root CID of the combined message trie.
smroot, err := computeMsgMeta(cst, bcids, scids)
if err != nil {
return xerrors.Errorf("validating msgmeta, compute failed: %w", err)
}
// Check that the message trie root matches with what's in the block.
if fblk.Header.Messages != smroot {
return xerrors.Errorf("messages in full block did not match msgmeta root in header (%s != %s)", fblk.Header.Messages, smroot)
}
// Finally, flush.
return vm.Copy(context.TODO(), blockstore, syncer.store.ChainBlockstore(), smroot)
}
func (syncer *Syncer) LocalPeer() peer.ID {
return syncer.self
}
func (syncer *Syncer) ChainStore() *store.ChainStore {
return syncer.store
}
func (syncer *Syncer) InformNewBlock(from peer.ID, blk *types.FullBlock) bool {
// TODO: search for other blocks that could form a tipset with this block
// and then send that tipset to InformNewHead
fts := &store.FullTipSet{Blocks: []*types.FullBlock{blk}}
return syncer.InformNewHead(from, fts)
}
func copyBlockstore(ctx context.Context, from, to bstore.Blockstore) error {
ctx, span := trace.StartSpan(ctx, "copyBlockstore")
defer span.End()
cids, err := from.AllKeysChan(ctx)
if err != nil {
return err
}
// TODO: should probably expose better methods on the blockstore for this operation
var blks []blocks.Block
for c := range cids {
b, err := from.Get(c)
if err != nil {
return err
}
blks = append(blks, b)
}
if err := to.PutMany(blks); err != nil {
return err
}
return nil
}
// TODO: this function effectively accepts unchecked input from the network,
// either validate it here, or ensure that its validated elsewhere (maybe make
// sure the blocksync code checks it?)
// maybe this code should actually live in blocksync??
func zipTipSetAndMessages(bs cbor.IpldStore, ts *types.TipSet, allbmsgs []*types.Message, allsmsgs []*types.SignedMessage, bmi, smi [][]uint64) (*store.FullTipSet, error) {
if len(ts.Blocks()) != len(smi) || len(ts.Blocks()) != len(bmi) {
return nil, fmt.Errorf("msgincl length didnt match tipset size")
}
fts := &store.FullTipSet{}
for bi, b := range ts.Blocks() {
if msgc := len(bmi[bi]) + len(smi[bi]); msgc > build.BlockMessageLimit {
return nil, fmt.Errorf("block %q has too many messages (%d)", b.Cid(), msgc)
}
var smsgs []*types.SignedMessage
var smsgCids []cid.Cid
for _, m := range smi[bi] {
smsgs = append(smsgs, allsmsgs[m])
smsgCids = append(smsgCids, allsmsgs[m].Cid())
}
var bmsgs []*types.Message
var bmsgCids []cid.Cid
for _, m := range bmi[bi] {
bmsgs = append(bmsgs, allbmsgs[m])
bmsgCids = append(bmsgCids, allbmsgs[m].Cid())
}
mrcid, err := computeMsgMeta(bs, bmsgCids, smsgCids)
if err != nil {
return nil, err
}
if b.Messages != mrcid {
return nil, fmt.Errorf("messages didnt match message root in header for ts %s", ts.Key())
}
fb := &types.FullBlock{
Header: b,
BlsMessages: bmsgs,
SecpkMessages: smsgs,
}
fts.Blocks = append(fts.Blocks, fb)
}
return fts, nil
}
// computeMsgMeta computes the root CID of the combined arrays of message CIDs
// of both types (BLS and Secpk).
func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cid.Cid) (cid.Cid, error) {
// block headers use adt0
store := blockadt.WrapStore(context.TODO(), bs)
bmArr := blockadt.MakeEmptyArray(store)
smArr := blockadt.MakeEmptyArray(store)
for i, m := range bmsgCids {
c := cbg.CborCid(m)
if err := bmArr.Set(uint64(i), &c); err != nil {
return cid.Undef, err
}
}
for i, m := range smsgCids {
c := cbg.CborCid(m)
if err := smArr.Set(uint64(i), &c); err != nil {
return cid.Undef, err
}
}
bmroot, err := bmArr.Root()
if err != nil {
return cid.Undef, err
}
smroot, err := smArr.Root()
if err != nil {
return cid.Undef, err
}
mrcid, err := store.Put(store.Context(), &types.MsgMeta{
BlsMessages: bmroot,
SecpkMessages: smroot,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to put msgmeta: %w", err)
}
return mrcid, nil
}
// FetchTipSet tries to load the provided tipset from the store, and falls back
// to the network (client) by querying the supplied peer if not found
// locally.
//
// {hint/usage} This is used from the HELLO protocol, to fetch the greeting
// peer's heaviest tipset if we don't have it.
func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) {
if fts, err := syncer.tryLoadFullTipSet(tsk); err == nil {
return fts, nil
}
// fall back to the network.
return syncer.Exchange.GetFullTipSet(ctx, p, tsk)
}
// tryLoadFullTipSet queries the tipset in the ChainStore, and returns a full
// representation of it containing FullBlocks. If ALL blocks are not found
// locally, it errors entirely with blockstore.ErrNotFound.
func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet, error) {
ts, err := syncer.store.LoadTipSet(tsk)
if err != nil {
return nil, err
}
fts := &store.FullTipSet{}
for _, b := range ts.Blocks() {
bmsgs, smsgs, err := syncer.store.MessagesForBlock(b)
if err != nil {
return nil, err
}
fb := &types.FullBlock{
Header: b,
BlsMessages: bmsgs,
SecpkMessages: smsgs,
}
fts.Blocks = append(fts.Blocks, fb)
}
return fts, nil
}
// Sync tries to advance our view of the chain to `maybeHead`. It does nothing
// if our current head is heavier than the requested tipset, or if we're already
// at the requested head, or if the head is the genesis.
//
// Most of the heavy-lifting logic happens in syncer#collectChain. Refer to the
// godocs on that method for a more detailed view.
func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error {
ctx, span := trace.StartSpan(ctx, "chain.Sync")
defer span.End()
if span.IsRecordingEvents() {
span.AddAttributes(
trace.StringAttribute("tipset", fmt.Sprint(maybeHead.Cids())),
trace.Int64Attribute("height", int64(maybeHead.Height())),
)
}
hts := syncer.store.GetHeaviestTipSet()
if hts.ParentWeight().GreaterThan(maybeHead.ParentWeight()) {
return nil
}
if syncer.Genesis.Equals(maybeHead) || hts.Equals(maybeHead) {
return nil
}
if err := syncer.collectChain(ctx, maybeHead, hts, false); err != nil {
span.AddAttributes(trace.StringAttribute("col_error", err.Error()))
span.SetStatus(trace.Status{
Code: 13,
Message: err.Error(),
})
return xerrors.Errorf("collectChain failed: %w", err)
}
// At this point we have accepted and synced to the new `maybeHead`
// (`StageSyncComplete`).
if err := syncer.store.PutTipSet(ctx, maybeHead); err != nil {
span.AddAttributes(trace.StringAttribute("put_error", err.Error()))
span.SetStatus(trace.Status{
Code: 13,
Message: err.Error(),
})
return xerrors.Errorf("failed to put synced tipset to chainstore: %w", err)
}
peers := syncer.receiptTracker.GetPeers(maybeHead)
if len(peers) > 0 {
syncer.connmgr.TagPeer(peers[0], "new-block", 40)
for _, p := range peers[1:] {
syncer.connmgr.TagPeer(p, "new-block", 25)
}
}
return nil
}
func isPermanent(err error) bool {
return !errors.Is(err, ErrTemporal)
}
func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet, useCache bool) error {
ctx, span := trace.StartSpan(ctx, "validateTipSet")
defer span.End()
span.AddAttributes(trace.Int64Attribute("height", int64(fts.TipSet().Height())))
ts := fts.TipSet()
if ts.Equals(syncer.Genesis) {
return nil
}
var futures []async.ErrorFuture
for _, b := range fts.Blocks {
b := b // rebind to a scoped variable
futures = append(futures, async.Err(func() error {
if err := syncer.ValidateBlock(ctx, b, useCache); err != nil {
if isPermanent(err) {
syncer.bad.Add(b.Cid(), NewBadBlockReason([]cid.Cid{b.Cid()}, err.Error()))
}
return xerrors.Errorf("validating block %s: %w", b.Cid(), err)
}
if err := syncer.sm.ChainStore().AddToTipSetTracker(b.Header); err != nil {
return xerrors.Errorf("failed to add validated header to tipset tracker: %w", err)
}
return nil
}))
}
for _, f := range futures {
if err := f.AwaitContext(ctx); err != nil {
return err
}
}
return nil
}
func (syncer *Syncer) minerIsValid(ctx context.Context, maddr address.Address, baseTs *types.TipSet) error {
act, err := syncer.sm.LoadActor(ctx, power.Address, baseTs)
if err != nil {
return xerrors.Errorf("failed to load power actor: %w", err)
}
powState, err := power.Load(syncer.store.ActorStore(ctx), act)
if err != nil {
return xerrors.Errorf("failed to load power actor state: %w", err)
}
_, exist, err := powState.MinerPower(maddr)
if err != nil {
return xerrors.Errorf("failed to look up miner's claim: %w", err)
}
if !exist {
return xerrors.New("miner isn't valid")
}
return nil
}
var ErrTemporal = errors.New("temporal error")
func blockSanityChecks(h *types.BlockHeader) error {
if h.ElectionProof == nil {
return xerrors.Errorf("block cannot have nil election proof")
}
if h.Ticket == nil {
return xerrors.Errorf("block cannot have nil ticket")
}
if h.BlockSig == nil {
return xerrors.Errorf("block had nil signature")
}
if h.BLSAggregate == nil {
return xerrors.Errorf("block had nil bls aggregate signature")
}
if h.Miner.Protocol() != address.ID {
return xerrors.Errorf("block had non-ID miner address")
}
return nil
}
// ValidateBlock should match up with 'Semantical Validation' in validation.md in the spec
func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, useCache bool) (err error) {
defer func() {
// b.Cid() could panic for empty blocks that are used in tests.
if rerr := recover(); rerr != nil {
err = xerrors.Errorf("validate block panic: %w", rerr)
return
}
}()
if useCache {
isValidated, err := syncer.store.IsBlockValidated(ctx, b.Cid())
if err != nil {
return xerrors.Errorf("check block validation cache %s: %w", b.Cid(), err)
}
if isValidated {
return nil
}
}
validationStart := build.Clock.Now()
defer func() {
stats.Record(ctx, metrics.BlockValidationDurationMilliseconds.M(metrics.SinceInMilliseconds(validationStart)))
log.Infow("block validation", "took", time.Since(validationStart), "height", b.Header.Height, "age", time.Since(time.Unix(int64(b.Header.Timestamp), 0)))
}()
ctx, span := trace.StartSpan(ctx, "validateBlock")
defer span.End()
if err := blockSanityChecks(b.Header); err != nil {
return xerrors.Errorf("incoming header failed basic sanity checks: %w", err)
}
h := b.Header
baseTs, err := syncer.store.LoadTipSet(types.NewTipSetKey(h.Parents...))
if err != nil {
return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err)
}
winPoStNv := syncer.sm.GetNtwkVersion(ctx, baseTs.Height())
lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, syncer.sm, baseTs, h.Height)
if err != nil {
return xerrors.Errorf("failed to get lookback tipset for block: %w", err)
}
prevBeacon, err := syncer.store.GetLatestBeaconEntry(baseTs)
if err != nil {
return xerrors.Errorf("failed to get latest beacon entry: %w", err)
}
// fast checks first
if h.Height <= baseTs.Height() {
return xerrors.Errorf("block height not greater than parent height: %d != %d", h.Height, baseTs.Height())
}
nulls := h.Height - (baseTs.Height() + 1)
if tgtTs := baseTs.MinTimestamp() + build.BlockDelaySecs*uint64(nulls+1); h.Timestamp != tgtTs {
return xerrors.Errorf("block has wrong timestamp: %d != %d", h.Timestamp, tgtTs)
}
now := uint64(build.Clock.Now().Unix())
if h.Timestamp > now+build.AllowableClockDriftSecs {
return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, ErrTemporal)
}
if h.Timestamp > now {
log.Warn("Got block from the future, but within threshold", h.Timestamp, build.Clock.Now().Unix())
}
msgsCheck := async.Err(func() error {
if b.Cid() == build.WhitelistedBlock {
return nil
}
if err := syncer.checkBlockMessages(ctx, b, baseTs); err != nil {
return xerrors.Errorf("block had invalid messages: %w", err)
}
return nil
})
minerCheck := async.Err(func() error {
if err := syncer.minerIsValid(ctx, h.Miner, baseTs); err != nil {
return xerrors.Errorf("minerIsValid failed: %w", err)
}
return nil
})
baseFeeCheck := async.Err(func() error {
baseFee, err := syncer.store.ComputeBaseFee(ctx, baseTs)
if err != nil {
return xerrors.Errorf("computing base fee: %w", err)
}
if types.BigCmp(baseFee, b.Header.ParentBaseFee) != 0 {
return xerrors.Errorf("base fee doesn't match: %s (header) != %s (computed)",
b.Header.ParentBaseFee, baseFee)
}
return nil
})
pweight, err := syncer.store.Weight(ctx, baseTs)
if err != nil {
return xerrors.Errorf("getting parent weight: %w", err)
}
if types.BigCmp(pweight, b.Header.ParentWeight) != 0 {
return xerrors.Errorf("parrent weight different: %s (header) != %s (computed)",
b.Header.ParentWeight, pweight)
}
stateRootCheck := async.Err(func() error {
stateroot, precp, err := syncer.sm.TipSetState(ctx, baseTs)
if err != nil {
return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err)
}
if stateroot != h.ParentStateRoot {
msgs, err := syncer.store.MessagesForTipset(baseTs)
if err != nil {
log.Error("failed to load messages for tipset during tipset state mismatch error: ", err)
} else {
log.Warn("Messages for tipset with mismatching state:")
for i, m := range msgs {
mm := m.VMMessage()
log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params)
}
}
return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot)
}
if precp != h.ParentMessageReceipts {
return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts)
}
return nil
})
// Stuff that needs worker address
waddr, err := stmgr.GetMinerWorkerRaw(ctx, syncer.sm, lbst, h.Miner)
if err != nil {
return xerrors.Errorf("GetMinerWorkerRaw failed: %w", err)
}
winnerCheck := async.Err(func() error {
if h.ElectionProof.WinCount < 1 {
return xerrors.Errorf("block is not claiming to be a winner")
}
eligible, err := stmgr.MinerEligibleToMine(ctx, syncer.sm, h.Miner, baseTs, lbts)
if err != nil {
return xerrors.Errorf("determining if miner has min power failed: %w", err)
}
if !eligible {
return xerrors.New("block's miner is ineligible to mine")
}
rBeacon := *prevBeacon
if len(h.BeaconEntries) != 0 {
rBeacon = h.BeaconEntries[len(h.BeaconEntries)-1]
}
buf := new(bytes.Buffer)
if err := h.Miner.MarshalCBOR(buf); err != nil {
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
}
vrfBase, err := store.DrawRandomness(rBeacon.Data, crypto.DomainSeparationTag_ElectionProofProduction, h.Height, buf.Bytes())
if err != nil {
return xerrors.Errorf("could not draw randomness: %w", err)
}
if err := VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.ElectionProof.VRFProof); err != nil {
return xerrors.Errorf("validating block election proof failed: %w", err)
}
slashed, err := stmgr.GetMinerSlashed(ctx, syncer.sm, baseTs, h.Miner)
if err != nil {
return xerrors.Errorf("failed to check if block miner was slashed: %w", err)
}
if slashed {
return xerrors.Errorf("received block was from slashed or invalid miner")
}
mpow, tpow, _, err := stmgr.GetPowerRaw(ctx, syncer.sm, lbst, h.Miner)
if err != nil {
return xerrors.Errorf("failed getting power: %w", err)
}
j := h.ElectionProof.ComputeWinCount(mpow.QualityAdjPower, tpow.QualityAdjPower)
if h.ElectionProof.WinCount != j {
return xerrors.Errorf("miner claims wrong number of wins: miner: %d, computed: %d", h.ElectionProof.WinCount, j)
}
return nil
})
blockSigCheck := async.Err(func() error {
if err := sigs.CheckBlockSignature(ctx, h, waddr); err != nil {
return xerrors.Errorf("check block signature failed: %w", err)
}
return nil
})
beaconValuesCheck := async.Err(func() error {
if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" {
return nil
}
if err := beacon.ValidateBlockValues(syncer.beacon, h, baseTs.Height(), *prevBeacon); err != nil {
return xerrors.Errorf("failed to validate blocks random beacon values: %w", err)
}
return nil
})
tktsCheck := async.Err(func() error {
buf := new(bytes.Buffer)
if err := h.Miner.MarshalCBOR(buf); err != nil {
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
}
if h.Height > build.UpgradeSmokeHeight {
buf.Write(baseTs.MinTicket().VRFProof)
}
beaconBase := *prevBeacon
if len(h.BeaconEntries) != 0 {
beaconBase = h.BeaconEntries[len(h.BeaconEntries)-1]
}
vrfBase, err := store.DrawRandomness(beaconBase.Data, crypto.DomainSeparationTag_TicketProduction, h.Height-build.TicketRandomnessLookback, buf.Bytes())
if err != nil {
return xerrors.Errorf("failed to compute vrf base for ticket: %w", err)
}
err = VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.Ticket.VRFProof)
if err != nil {
return xerrors.Errorf("validating block tickets failed: %w", err)
}
return nil
})
wproofCheck := async.Err(func() error {
if err := syncer.VerifyWinningPoStProof(ctx, winPoStNv, h, *prevBeacon, lbst, waddr); err != nil {
return xerrors.Errorf("invalid election post: %w", err)
}
return nil
})
await := []async.ErrorFuture{
minerCheck,
tktsCheck,
blockSigCheck,
beaconValuesCheck,
wproofCheck,
winnerCheck,
msgsCheck,
baseFeeCheck,
stateRootCheck,
}
var merr error
for _, fut := range await {
if err := fut.AwaitContext(ctx); err != nil {
merr = multierror.Append(merr, err)
}
}
if merr != nil {
mulErr := merr.(*multierror.Error)
mulErr.ErrorFormat = func(es []error) string {
if len(es) == 1 {
return fmt.Sprintf("1 error occurred:\n\t* %+v\n\n", es[0])
}
points := make([]string, len(es))
for i, err := range es {
points[i] = fmt.Sprintf("* %+v", err)
}
return fmt.Sprintf(
"%d errors occurred:\n\t%s\n\n",
len(es), strings.Join(points, "\n\t"))
}
return mulErr
}
if useCache {
if err := syncer.store.MarkBlockAsValidated(ctx, b.Cid()); err != nil {
return xerrors.Errorf("caching block validation %s: %w", b.Cid(), err)
}
}
return nil
}
func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, nv network.Version, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error {
if build.InsecurePoStValidation {
if len(h.WinPoStProof) == 0 {
return xerrors.Errorf("[INSECURE-POST-VALIDATION] No winning post proof given")
}
if string(h.WinPoStProof[0].ProofBytes) == "valid proof" {
return nil
}
return xerrors.Errorf("[INSECURE-POST-VALIDATION] winning post was invalid")
}
buf := new(bytes.Buffer)
if err := h.Miner.MarshalCBOR(buf); err != nil {
return xerrors.Errorf("failed to marshal miner address: %w", err)
}
rbase := prevBeacon
if len(h.BeaconEntries) > 0 {
rbase = h.BeaconEntries[len(h.BeaconEntries)-1]
}
rand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes())
if err != nil {
return xerrors.Errorf("failed to get randomness for verifying winning post proof: %w", err)
}
mid, err := address.IDFromAddress(h.Miner)
if err != nil {
return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err)
}
sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, syncer.verifier, syncer.sm, lbst, h.Miner, rand)
if err != nil {
return xerrors.Errorf("getting winning post sector set: %w", err)
}
ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof2.WinningPoStVerifyInfo{
Randomness: rand,
Proofs: h.WinPoStProof,
ChallengedSectors: sectors,
Prover: abi.ActorID(mid),
})
if err != nil {
return xerrors.Errorf("failed to verify election post: %w", err)
}
if !ok {
log.Errorf("invalid winning post (block: %s, %x; %v)", h.Cid(), rand, sectors)
return xerrors.Errorf("winning post was invalid")
}
return nil
}
// TODO: We should extract this somewhere else and make the message pool and miner use the same logic
func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock, baseTs *types.TipSet) error {
{
var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type
var pubks [][]byte
for _, m := range b.BlsMessages {
sigCids = append(sigCids, m.Cid())
pubk, err := syncer.sm.GetBlsPublicKey(ctx, m.From, baseTs)
if err != nil {
return xerrors.Errorf("failed to load bls public to validate block: %w", err)
}
pubks = append(pubks, pubk)
}
if err := syncer.verifyBlsAggregate(ctx, b.Header.BLSAggregate, sigCids, pubks); err != nil {
return xerrors.Errorf("bls aggregate signature was invalid: %w", err)
}
}
nonces := make(map[address.Address]uint64)
stateroot, _, err := syncer.sm.TipSetState(ctx, baseTs)
if err != nil {
return err
}
st, err := state.LoadStateTree(syncer.store.ActorStore(ctx), stateroot)
if err != nil {
return xerrors.Errorf("failed to load base state tree: %w", err)
}
nv := syncer.sm.GetNtwkVersion(ctx, b.Header.Height)
pl := vm.PricelistByEpoch(baseTs.Height())
var sumGasLimit int64
checkMsg := func(msg types.ChainMsg) error {
m := msg.VMMessage()
// Phase 1: syntactic validation, as defined in the spec
minGas := pl.OnChainMessage(msg.ChainLength())
if err := m.ValidForBlockInclusion(minGas.Total(), nv); err != nil {
return err
}
// ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit
// So below is overflow safe
sumGasLimit += m.GasLimit
if sumGasLimit > build.BlockGasLimit {
return xerrors.Errorf("block gas limit exceeded")
}
// Phase 2: (Partial) semantic validation:
// the sender exists and is an account actor, and the nonces make sense
var sender address.Address
if nv >= network.Version13 {
sender, err = st.LookupID(m.From)
if err != nil {
return err
}
} else {
sender = m.From
}
if _, ok := nonces[sender]; !ok {
// `GetActor` does not validate that this is an account actor.
act, err := st.GetActor(sender)
if err != nil {
return xerrors.Errorf("failed to get actor: %w", err)
}
if !builtin.IsAccountActor(act.Code) {
return xerrors.New("Sender must be an account actor")
}
nonces[sender] = act.Nonce
}
if nonces[sender] != m.Nonce {
return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[sender], m.Nonce)
}
nonces[sender]++
return nil
}
// Validate message arrays in a temporary blockstore.
tmpbs := bstore.NewMemory()
tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs))
bmArr := blockadt.MakeEmptyArray(tmpstore)
for i, m := range b.BlsMessages {
if err := checkMsg(m); err != nil {
return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err)
}
c, err := store.PutMessage(tmpbs, m)
if err != nil {
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
}
k := cbg.CborCid(c)
if err := bmArr.Set(uint64(i), &k); err != nil {
return xerrors.Errorf("failed to put bls message at index %d: %w", i, err)
}
}
smArr := blockadt.MakeEmptyArray(tmpstore)
for i, m := range b.SecpkMessages {
if err := checkMsg(m); err != nil {
return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err)
}
// `From` being an account actor is only validated inside the `vm.ResolveToKeyAddr` call
// in `StateManager.ResolveToKeyAddress` here (and not in `checkMsg`).
kaddr, err := syncer.sm.ResolveToKeyAddress(ctx, m.Message.From, baseTs)
if err != nil {
return xerrors.Errorf("failed to resolve key addr: %w", err)
}
if err := sigs.Verify(&m.Signature, kaddr, m.Message.Cid().Bytes()); err != nil {
return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err)
}
c, err := store.PutMessage(tmpbs, m)
if err != nil {
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
}
k := cbg.CborCid(c)
if err := smArr.Set(uint64(i), &k); err != nil {
return xerrors.Errorf("failed to put secpk message at index %d: %w", i, err)
}
}
bmroot, err := bmArr.Root()
if err != nil {
return err
}
smroot, err := smArr.Root()
if err != nil {
return err
}
mrcid, err := tmpstore.Put(ctx, &types.MsgMeta{
BlsMessages: bmroot,
SecpkMessages: smroot,
})
if err != nil {
return err
}
if b.Header.Messages != mrcid {
return fmt.Errorf("messages didnt match message root in header")
}
// Finally, flush.
return vm.Copy(ctx, tmpbs, syncer.store.ChainBlockstore(), mrcid)
}
func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig *crypto.Signature, msgs []cid.Cid, pubks [][]byte) error {
_, span := trace.StartSpan(ctx, "syncer.verifyBlsAggregate")
defer span.End()
span.AddAttributes(
trace.Int64Attribute("msgCount", int64(len(msgs))),
)
msgsS := make([]ffi.Message, len(msgs))
pubksS := make([]ffi.PublicKey, len(msgs))
for i := 0; i < len(msgs); i++ {
msgsS[i] = msgs[i].Bytes()
copy(pubksS[i][:], pubks[i][:ffi.PublicKeyBytes])
}
sigS := new(ffi.Signature)
copy(sigS[:], sig.Data[:ffi.SignatureBytes])
if len(msgs) == 0 {
return nil
}
valid := ffi.HashVerify(sigS, msgsS, pubksS)
if !valid {
return xerrors.New("bls aggregate signature failed to verify")
}
return nil
}
type syncStateKey struct{}
func extractSyncState(ctx context.Context) *SyncerState {
v := ctx.Value(syncStateKey{})
if v != nil {
return v.(*SyncerState)
}
return nil
}
// collectHeaders collects the headers from the blocks between any two tipsets.
//
// `incoming` is the heaviest/projected/target tipset we have learned about, and
// `known` is usually an anchor tipset we already have in our view of the chain
// (which could be the genesis).
//
// collectHeaders checks if portions of the chain are in our ChainStore; falling
// down to the network to retrieve the missing parts. If during the process, any
// portion we receive is in our denylist (bad list), we short-circuit.
//
// {hint/usage}: This is used by collectChain, which is in turn called from the
// main Sync method (Syncer#Sync), so it's a pretty central method.
//
// {hint/logic}: The logic of this method is as follows:
//
// 1. Check that the from tipset is not linked to a parent block known to be
// bad.
// 2. Check the consistency of beacon entries in the from tipset. We check
// total equality of the BeaconEntries in each block.
// 3. Traverse the chain backwards, for each tipset:
// 3a. Load it from the chainstore; if found, it move on to its parent.
// 3b. Query our peers via client in batches, requesting up to a
// maximum of 500 tipsets every time.
//
// Once we've concluded, if we find a mismatching tipset at the height where the
// anchor tipset should be, we are facing a fork, and we invoke Syncer#syncFork
// to resolve it. Refer to the godocs there.
//
// All throughout the process, we keep checking if the received blocks are in
// the deny list, and short-circuit the process if so.
func (syncer *Syncer) collectHeaders(ctx context.Context, incoming *types.TipSet, known *types.TipSet, ignoreCheckpoint bool) ([]*types.TipSet, error) {
ctx, span := trace.StartSpan(ctx, "collectHeaders")
defer span.End()
ss := extractSyncState(ctx)
span.AddAttributes(
trace.Int64Attribute("incomingHeight", int64(incoming.Height())),
trace.Int64Attribute("knownHeight", int64(known.Height())),
)
// Check if the parents of the from block are in the denylist.
// i.e. if a fork of the chain has been requested that we know to be bad.
for _, pcid := range incoming.Parents().Cids() {
if reason, ok := syncer.bad.Has(pcid); ok {
newReason := reason.Linked("linked to %s", pcid)
for _, b := range incoming.Cids() {
syncer.bad.Add(b, newReason)
}
return nil, xerrors.Errorf("chain linked to block marked previously as bad (%s, %s) (reason: %s)", incoming.Cids(), pcid, reason)
}
}
{
// ensure consistency of beacon entires
targetBE := incoming.Blocks()[0].BeaconEntries
sorted := sort.SliceIsSorted(targetBE, func(i, j int) bool {
return targetBE[i].Round < targetBE[j].Round
})
if !sorted {
syncer.bad.Add(incoming.Cids()[0], NewBadBlockReason(incoming.Cids(), "wrong order of beacon entires"))
return nil, xerrors.Errorf("wrong order of beacon entires")
}
for _, bh := range incoming.Blocks()[1:] {
if len(targetBE) != len(bh.BeaconEntries) {
// cannot mark bad, I think @Kubuxu
return nil, xerrors.Errorf("tipset contained different number for beacon entires")
}
for i, be := range bh.BeaconEntries {
if targetBE[i].Round != be.Round || !bytes.Equal(targetBE[i].Data, be.Data) {
// cannot mark bad, I think @Kubuxu
return nil, xerrors.Errorf("tipset contained different beacon entires")
}
}
}
}
blockSet := []*types.TipSet{incoming}
// Parent of the new (possibly better) tipset that we need to fetch next.
at := incoming.Parents()
// we want to sync all the blocks until the height above our
// best tipset so far
untilHeight := known.Height() + 1
ss.SetHeight(blockSet[len(blockSet)-1].Height())
var acceptedBlocks []cid.Cid
loop:
for blockSet[len(blockSet)-1].Height() > untilHeight {
for _, bc := range at.Cids() {
if reason, ok := syncer.bad.Has(bc); ok {
newReason := reason.Linked("change contained %s", bc)
for _, b := range acceptedBlocks {
syncer.bad.Add(b, newReason)
}
return nil, xerrors.Errorf("chain contained block marked previously as bad (%s, %s) (reason: %s)", incoming.Cids(), bc, reason)
}
}
// If, for some reason, we have a suffix of the chain locally, handle that here
ts, err := syncer.store.LoadTipSet(at)
if err == nil {
acceptedBlocks = append(acceptedBlocks, at.Cids()...)
blockSet = append(blockSet, ts)
at = ts.Parents()
continue
}
if !xerrors.Is(err, bstore.ErrNotFound) {
log.Warnf("loading local tipset: %s", err)
}
// NB: GetBlocks validates that the blocks are in-fact the ones we
// requested, and that they are correctly linked to one another. It does
// not validate any state transitions.
window := 500
if gap := int(blockSet[len(blockSet)-1].Height() - untilHeight); gap < window {
window = gap
}
blks, err := syncer.Exchange.GetBlocks(ctx, at, window)
if err != nil {
// Most likely our peers aren't fully synced yet, but forwarded
// new block message (ideally we'd find better peers)
log.Errorf("failed to get blocks: %+v", err)
span.AddAttributes(trace.StringAttribute("error", err.Error()))
// This error will only be logged above,
return nil, xerrors.Errorf("failed to get blocks: %w", err)
}
log.Info("Got blocks: ", blks[0].Height(), len(blks))
// Check that the fetched segment of the chain matches what we already
// have. Since we fetch from the head backwards our reassembled chain
// is sorted in reverse here: we have a child -> parent order, our last
// tipset then should be child of the first tipset retrieved.
// FIXME: The reassembly logic should be part of the `client`
// service, the consumer should not be concerned with the
// `MaxRequestLength` limitation, it should just be able to request
// an segment of arbitrary length. The same burden is put on
// `syncFork()` which needs to be aware this as well.
if blockSet[len(blockSet)-1].IsChildOf(blks[0]) == false {
return nil, xerrors.Errorf("retrieved segments of the chain are not connected at heights %d/%d",
blockSet[len(blockSet)-1].Height(), blks[0].Height())
// A successful `GetBlocks()` call is guaranteed to fetch at least
// one tipset so the acess `blks[0]` is safe.
}
for _, b := range blks {
if b.Height() < untilHeight {
break loop
}
for _, bc := range b.Cids() {
if reason, ok := syncer.bad.Has(bc); ok {
newReason := reason.Linked("change contained %s", bc)
for _, b := range acceptedBlocks {
syncer.bad.Add(b, newReason)
}
return nil, xerrors.Errorf("chain contained block marked previously as bad (%s, %s) (reason: %s)", incoming.Cids(), bc, reason)
}
}
blockSet = append(blockSet, b)
}
acceptedBlocks = append(acceptedBlocks, at.Cids()...)
ss.SetHeight(blks[len(blks)-1].Height())
at = blks[len(blks)-1].Parents()
}
base := blockSet[len(blockSet)-1]
if base.Equals(known) {
blockSet = blockSet[:len(blockSet)-1]
base = blockSet[len(blockSet)-1]
}
if base.IsChildOf(known) {
// common case: receiving blocks that are building on top of our best tipset
return blockSet, nil
}
knownParent, err := syncer.store.LoadTipSet(known.Parents())
if err != nil {
return nil, xerrors.Errorf("failed to load next local tipset: %w", err)
}
if base.IsChildOf(knownParent) {
// common case: receiving a block thats potentially part of the same tipset as our best block
return blockSet, nil
}
// We have now ascertained that this is *not* a 'fast forward'
log.Warnf("(fork detected) synced header chain (%s - %d) does not link to our best block (%s - %d)", incoming.Cids(), incoming.Height(), known.Cids(), known.Height())
fork, err := syncer.syncFork(ctx, base, known, ignoreCheckpoint)
if err != nil {
if xerrors.Is(err, ErrForkTooLong) || xerrors.Is(err, ErrForkCheckpoint) {
// TODO: we're marking this block bad in the same way that we mark invalid blocks bad. Maybe distinguish?
log.Warn("adding forked chain to our bad tipset cache")
for _, b := range incoming.Blocks() {
syncer.bad.Add(b.Cid(), NewBadBlockReason(incoming.Cids(), "fork past finality"))
}
}
return nil, xerrors.Errorf("failed to sync fork: %w", err)
}
blockSet = append(blockSet, fork...)
return blockSet, nil
}
var ErrForkTooLong = fmt.Errorf("fork longer than threshold")
var ErrForkCheckpoint = fmt.Errorf("fork would require us to diverge from checkpointed block")
// syncFork tries to obtain the chain fragment that links a fork into a common
// ancestor in our view of the chain.
//
// If the fork is too long (build.ForkLengthThreshold), or would cause us to diverge from the checkpoint (ErrForkCheckpoint),
// we add the entire subchain to the denylist. Else, we find the common ancestor, and add the missing chain
// fragment until the fork point to the returned []TipSet.
func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, known *types.TipSet, ignoreCheckpoint bool) ([]*types.TipSet, error) {
var chkpt *types.TipSet
if !ignoreCheckpoint {
chkpt = syncer.store.GetCheckpoint()
if known.Equals(chkpt) {
return nil, ErrForkCheckpoint
}
}
// TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2? Yes.
// Would it not be better to ask in smaller chunks, given that an ~ForkLengthThreshold is very rare?
tips, err := syncer.Exchange.GetBlocks(ctx, incoming.Parents(), int(build.ForkLengthThreshold))
if err != nil {
return nil, err
}
nts, err := syncer.store.LoadTipSet(known.Parents())
if err != nil {
return nil, xerrors.Errorf("failed to load next local tipset: %w", err)
}
// Track the fork length on our side of the synced chain to enforce
// `ForkLengthThreshold`. Initialized to 1 because we already walked back
// one tipset from `known` (our synced head).
forkLengthInHead := 1
for cur := 0; cur < len(tips); {
if nts.Height() == 0 {
if !syncer.Genesis.Equals(nts) {
return nil, xerrors.Errorf("somehow synced chain that linked back to a different genesis (bad genesis: %s)", nts.Key())
}
return nil, xerrors.Errorf("synced chain forked at genesis, refusing to sync; incoming: %s", incoming.Cids())
}
if nts.Equals(tips[cur]) {
return tips[:cur+1], nil
}
if nts.Height() < tips[cur].Height() {
cur++
} else {
// Walk back one block in our synced chain to try to meet the fork's
// height.
forkLengthInHead++
if forkLengthInHead > int(build.ForkLengthThreshold) {
return nil, ErrForkTooLong
}
// We will be forking away from nts, check that it isn't checkpointed
if nts.Equals(chkpt) {
return nil, ErrForkCheckpoint
}
nts, err = syncer.store.LoadTipSet(nts.Parents())
if err != nil {
return nil, xerrors.Errorf("loading next local tipset: %w", err)
}
}
}
return nil, ErrForkTooLong
}
func (syncer *Syncer) syncMessagesAndCheckState(ctx context.Context, headers []*types.TipSet) error {
ss := extractSyncState(ctx)
ss.SetHeight(headers[len(headers)-1].Height())
return syncer.iterFullTipsets(ctx, headers, func(ctx context.Context, fts *store.FullTipSet) error {
log.Debugw("validating tipset", "height", fts.TipSet().Height(), "size", len(fts.TipSet().Cids()))
if err := syncer.ValidateTipSet(ctx, fts, true); err != nil {
log.Errorf("failed to validate tipset: %+v", err)
return xerrors.Errorf("message processing failed: %w", err)
}
stats.Record(ctx, metrics.ChainNodeWorkerHeight.M(int64(fts.TipSet().Height())))
ss.SetHeight(fts.TipSet().Height())
return nil
})
}
// fills out each of the given tipsets with messages and calls the callback with it
func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipSet, cb func(context.Context, *store.FullTipSet) error) error {
ss := extractSyncState(ctx)
ctx, span := trace.StartSpan(ctx, "iterFullTipsets")
defer span.End()
span.AddAttributes(trace.Int64Attribute("num_headers", int64(len(headers))))
for i := len(headers) - 1; i >= 0; {
fts, err := syncer.store.TryFillTipSet(headers[i])
if err != nil {
return err
}
if fts != nil {
if err := cb(ctx, fts); err != nil {
return err
}
i--
continue
}
batchSize := concurrentSyncRequests * syncRequestBatchSize
if i < batchSize {
batchSize = i + 1
}
ss.SetStage(api.StageFetchingMessages)
startOffset := i + 1 - batchSize
bstout, batchErr := syncer.fetchMessages(ctx, headers[startOffset:startOffset+batchSize], startOffset)
ss.SetStage(api.StageMessages)
if batchErr != nil {
return xerrors.Errorf("failed to fetch messages: %w", batchErr)
}
for bsi := 0; bsi < len(bstout); bsi++ {
// temp storage so we don't persist data we dont want to
bs := bstore.NewMemory()
blks := cbor.NewCborStore(bs)
this := headers[i-bsi]
bstip := bstout[len(bstout)-(bsi+1)]
fts, err := zipTipSetAndMessages(blks, this, bstip.Bls, bstip.Secpk, bstip.BlsIncludes, bstip.SecpkIncludes)
if err != nil {
log.Warnw("zipping failed", "error", err, "bsi", bsi, "i", i,
"height", this.Height(),
"next-height", i+batchSize)
return xerrors.Errorf("message processing failed: %w", err)
}
if err := cb(ctx, fts); err != nil {
return err
}
if err := persistMessages(ctx, bs, bstip); err != nil {
return err
}
if err := copyBlockstore(ctx, bs, syncer.store.ChainBlockstore()); err != nil {
return xerrors.Errorf("message processing failed: %w", err)
}
}
i -= batchSize
}
return nil
}
func (syncer *Syncer) fetchMessages(ctx context.Context, headers []*types.TipSet, startOffset int) ([]*exchange.CompactedMessages, error) {
batchSize := len(headers)
batch := make([]*exchange.CompactedMessages, batchSize)
var wg sync.WaitGroup
var mx sync.Mutex
var batchErr error
start := build.Clock.Now()
for j := 0; j < batchSize; j += syncRequestBatchSize {
wg.Add(1)
go func(j int) {
defer wg.Done()
nreq := syncRequestBatchSize
if j+nreq > batchSize {
nreq = batchSize - j
}
failed := false
for offset := 0; !failed && offset < nreq; {
nextI := j + offset
lastI := j + nreq
var requestErr error
var requestResult []*exchange.CompactedMessages
for retry := 0; requestResult == nil && retry < syncRequestRetries; retry++ {
if retry > 0 {
log.Infof("fetching messages at %d (retry %d)", startOffset+nextI, retry)
} else {
log.Infof("fetching messages at %d", startOffset+nextI)
}
result, err := syncer.Exchange.GetChainMessages(ctx, headers[nextI:lastI])
if err != nil {
requestErr = multierror.Append(requestErr, err)
} else {
requestResult = result
}
}
mx.Lock()
if requestResult != nil {
copy(batch[j+offset:], requestResult)
offset += len(requestResult)
} else {
log.Errorf("error fetching messages at %d: %s", nextI, requestErr)
batchErr = multierror.Append(batchErr, requestErr)
failed = true
}
mx.Unlock()
}
}(j)
}
wg.Wait()
if batchErr != nil {
return nil, batchErr
}
log.Infof("fetching messages for %d tipsets at %d done; took %s", batchSize, startOffset, build.Clock.Since(start))
return batch, nil
}
func persistMessages(ctx context.Context, bs bstore.Blockstore, bst *exchange.CompactedMessages) error {
_, span := trace.StartSpan(ctx, "persistMessages")
defer span.End()
for _, m := range bst.Bls {
//log.Infof("putting BLS message: %s", m.Cid())
if _, err := store.PutMessage(bs, m); err != nil {
log.Errorf("failed to persist messages: %+v", err)
return xerrors.Errorf("BLS message processing failed: %w", err)
}
}
for _, m := range bst.Secpk {
if m.Signature.Type != crypto.SigTypeSecp256k1 {
return xerrors.Errorf("unknown signature type on message %s: %q", m.Cid(), m.Signature.Type)
}
//log.Infof("putting secp256k1 message: %s", m.Cid())
if _, err := store.PutMessage(bs, m); err != nil {
log.Errorf("failed to persist messages: %+v", err)
return xerrors.Errorf("secp256k1 message processing failed: %w", err)
}
}
return nil
}
// collectChain tries to advance our view of the chain to the purported head.
//
// It goes through various stages:
//
// 1. StageHeaders: we proceed in the sync process by requesting block headers
// from our peers, moving back from their heads, until we reach a tipset
// that we have in common (such a common tipset must exist, thought it may
// simply be the genesis block).
//
// If the common tipset is our head, we treat the sync as a "fast-forward",
// else we must drop part of our chain to connect to the peer's head
// (referred to as "forking").
//
// 2. StagePersistHeaders: now that we've collected the missing headers,
// augmented by those on the other side of a fork, we persist them to the
// BlockStore.
//
// 3. StageMessages: having acquired the headers and found a common tipset,
// we then move forward, requesting the full blocks, including the messages.
func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet, hts *types.TipSet, ignoreCheckpoint bool) error {
ctx, span := trace.StartSpan(ctx, "collectChain")
defer span.End()
ss := extractSyncState(ctx)
ss.Init(hts, ts)
headers, err := syncer.collectHeaders(ctx, ts, hts, ignoreCheckpoint)
if err != nil {
ss.Error(err)
return err
}
span.AddAttributes(trace.Int64Attribute("syncChainLength", int64(len(headers))))
if !headers[0].Equals(ts) {
log.Errorf("collectChain headers[0] should be equal to sync target. Its not: %s != %s", headers[0].Cids(), ts.Cids())
}
ss.SetStage(api.StagePersistHeaders)
toPersist := make([]*types.BlockHeader, 0, len(headers)*int(build.BlocksPerEpoch))
for _, ts := range headers {
toPersist = append(toPersist, ts.Blocks()...)
}
if err := syncer.store.PersistBlockHeaders(toPersist...); err != nil {
err = xerrors.Errorf("failed to persist synced blocks to the chainstore: %w", err)
ss.Error(err)
return err
}
toPersist = nil
ss.SetStage(api.StageMessages)
if err := syncer.syncMessagesAndCheckState(ctx, headers); err != nil {
err = xerrors.Errorf("collectChain syncMessages: %w", err)
ss.Error(err)
return err
}
ss.SetStage(api.StageSyncComplete)
log.Debugw("new tipset", "height", ts.Height(), "tipset", types.LogCids(ts.Cids()))
return nil
}
func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []byte, evrf []byte) error {
return gen.VerifyVRF(ctx, worker, rand, evrf)
}
func (syncer *Syncer) State() []SyncerStateSnapshot {
return syncer.syncmgr.State()
}
// MarkBad manually adds a block to the "bad blocks" cache.
func (syncer *Syncer) MarkBad(blk cid.Cid) {
syncer.bad.Add(blk, NewBadBlockReason([]cid.Cid{blk}, "manually marked bad"))
}
// UnmarkBad manually adds a block to the "bad blocks" cache.
func (syncer *Syncer) UnmarkBad(blk cid.Cid) {
syncer.bad.Remove(blk)
}
func (syncer *Syncer) UnmarkAllBad() {
syncer.bad.Purge()
}
func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) {
bbr, ok := syncer.bad.Has(blk)
return bbr.String(), ok
}
func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet) (*types.BeaconEntry, error) {
cur := ts
for i := 0; i < 20; i++ {
cbe := cur.Blocks()[0].BeaconEntries
if len(cbe) > 0 {
return &cbe[len(cbe)-1], nil
}
if cur.Height() == 0 {
return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry")
}
next, err := syncer.store.LoadTipSet(cur.Parents())
if err != nil {
return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err)
}
cur = next
}
return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets")
}
func (syncer *Syncer) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool {
if syncer.Genesis == nil {
return false
}
now := uint64(build.Clock.Now().Unix())
return epoch > (abi.ChainEpoch((now-syncer.Genesis.MinTimestamp())/build.BlockDelaySecs) + MaxHeightDrift)
}
| [
"\"LOTUS_IGNORE_DRAND\""
] | [] | [
"LOTUS_IGNORE_DRAND"
] | [] | ["LOTUS_IGNORE_DRAND"] | go | 1 | 0 | |
nifi-registry/nifi-registry-core/nifi-registry-bootstrap/src/main/java/org/apache/nifi/registry/bootstrap/RunNiFiRegistry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.registry.bootstrap;
import org.apache.commons.lang3.StringUtils;
import org.apache.nifi.bootstrap.util.OSUtils;
import org.apache.nifi.registry.util.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.Reader;
import java.lang.reflect.Method;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.attribute.PosixFilePermission;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
/**
* <p>
* The class which bootstraps Apache NiFi Registry. This class looks for the
* bootstrap.conf file by looking in the following places (in order):</p>
* <ol>
* <li>Java System Property named
* {@code org.apache.nifi.registry.bootstrap.config.file}</li>
* <li>${NIFI_HOME}/./conf/bootstrap.conf, where ${NIFI_REGISTRY_HOME} references an
* environment variable {@code NIFI_REGISTRY_HOME}</li>
* <li>./conf/bootstrap.conf, where {@code ./} represents the working
* directory.</li>
* </ol>
* <p>
* If the {@code bootstrap.conf} file cannot be found, throws a {@code FileNotFoundException}.
*/
public class RunNiFiRegistry {
public static final String DEFAULT_CONFIG_FILE = "./conf/bootstrap.conf";
public static final String DEFAULT_JAVA_CMD = "java";
public static final String DEFAULT_PID_DIR = "bin";
public static final String DEFAULT_LOG_DIR = "./logs";
public static final String DEFAULT_DOCS_DIR = "./docs";
public static final String GRACEFUL_SHUTDOWN_PROP = "graceful.shutdown.seconds";
public static final String DEFAULT_GRACEFUL_SHUTDOWN_VALUE = "20";
public static final String NIFI_REGISTRY_PID_DIR_PROP = "org.apache.nifi.registry.bootstrap.config.pid.dir";
public static final String NIFI_REGISTRY_PID_FILE_NAME = "nifi-registry.pid";
public static final String NIFI_REGISTRY_STATUS_FILE_NAME = "nifi-registry.status";
public static final String NIFI_REGISTRY_LOCK_FILE_NAME = "nifi-registry.lock";
public static final String NIFI_REGISTRY_BOOTSTRAP_SENSITIVE_KEY = "nifi.registry.bootstrap.sensitive.key";
public static final String PID_KEY = "pid";
public static final int STARTUP_WAIT_SECONDS = 60;
public static final String SHUTDOWN_CMD = "SHUTDOWN";
public static final String PING_CMD = "PING";
public static final String DUMP_CMD = "DUMP";
private static final int UNINITIALIZED_CC_PORT = -1;
private volatile boolean autoRestartNiFiRegistry = true;
private volatile int ccPort = UNINITIALIZED_CC_PORT;
private volatile long nifiRegistryPid = -1L;
private volatile String secretKey;
private volatile ShutdownHook shutdownHook;
private volatile boolean nifiRegistryStarted;
private final Lock startedLock = new ReentrantLock();
private final Lock lock = new ReentrantLock();
private final Condition startupCondition = lock.newCondition();
private final File bootstrapConfigFile;
// used for logging initial info; these will be logged to console by default when the app is started
private final Logger cmdLogger = LoggerFactory.getLogger("org.apache.nifi.registry.bootstrap.Command");
// used for logging all info. These by default will be written to the log file
private final Logger defaultLogger = LoggerFactory.getLogger(RunNiFiRegistry.class);
private final ExecutorService loggingExecutor;
private volatile Set<Future<?>> loggingFutures = new HashSet<>(2);
public RunNiFiRegistry(final File bootstrapConfigFile, final boolean verbose) throws IOException {
this.bootstrapConfigFile = bootstrapConfigFile;
loggingExecutor = Executors.newFixedThreadPool(2, new ThreadFactory() {
@Override
public Thread newThread(final Runnable runnable) {
final Thread t = Executors.defaultThreadFactory().newThread(runnable);
t.setDaemon(true);
t.setName("NiFi logging handler");
return t;
}
});
}
private static void printUsage() {
System.out.println("Usage:");
System.out.println();
System.out.println("java org.apache.nifi.bootstrap.RunNiFiRegistry [<-verbose>] <command> [options]");
System.out.println();
System.out.println("Valid commands include:");
System.out.println("");
System.out.println("Start : Start a new instance of Apache NiFi Registry");
System.out.println("Stop : Stop a running instance of Apache NiFi Registry");
System.out.println("Restart : Stop Apache NiFi Registry, if it is running, and then start a new instance");
System.out.println("Status : Determine if there is a running instance of Apache NiFi Registry");
System.out.println("Dump : Write a Thread Dump to the file specified by [options], or to the log if no file is given");
System.out.println("Run : Start a new instance of Apache NiFi Registry and monitor the Process, restarting if the instance dies");
System.out.println();
}
private static String[] shift(final String[] orig) {
return Arrays.copyOfRange(orig, 1, orig.length);
}
public static void main(String[] args) throws IOException, InterruptedException {
if (args.length < 1 || args.length > 3) {
printUsage();
return;
}
File dumpFile = null;
boolean verbose = false;
if (args[0].equals("-verbose")) {
verbose = true;
args = shift(args);
}
final String cmd = args[0];
if (cmd.equals("dump")) {
if (args.length > 1) {
dumpFile = new File(args[1]);
} else {
dumpFile = null;
}
}
switch (cmd.toLowerCase()) {
case "start":
case "run":
case "stop":
case "status":
case "dump":
case "restart":
case "env":
break;
default:
printUsage();
return;
}
final File configFile = getDefaultBootstrapConfFile();
final RunNiFiRegistry runNiFiRegistry = new RunNiFiRegistry(configFile, verbose);
Integer exitStatus = null;
switch (cmd.toLowerCase()) {
case "start":
runNiFiRegistry.start();
break;
case "run":
runNiFiRegistry.start();
break;
case "stop":
runNiFiRegistry.stop();
break;
case "status":
exitStatus = runNiFiRegistry.status();
break;
case "restart":
runNiFiRegistry.stop();
runNiFiRegistry.start();
break;
case "dump":
runNiFiRegistry.dump(dumpFile);
break;
case "env":
runNiFiRegistry.env();
break;
}
if (exitStatus != null) {
System.exit(exitStatus);
}
}
private static File getDefaultBootstrapConfFile() {
String configFilename = System.getProperty("org.apache.nifi.registry.bootstrap.config.file");
if (configFilename == null) {
final String nifiRegistryHome = System.getenv("NIFI_REGISTRY_HOME");
if (nifiRegistryHome != null) {
final File nifiRegistryHomeFile = new File(nifiRegistryHome.trim());
final File configFile = new File(nifiRegistryHomeFile, DEFAULT_CONFIG_FILE);
configFilename = configFile.getAbsolutePath();
}
}
if (configFilename == null) {
configFilename = DEFAULT_CONFIG_FILE;
}
final File configFile = new File(configFilename);
return configFile;
}
protected File getBootstrapFile(final Logger logger, String directory, String defaultDirectory, String fileName) throws IOException {
final File confDir = bootstrapConfigFile.getParentFile();
final File nifiHome = confDir.getParentFile();
String confFileDir = System.getProperty(directory);
final File fileDir;
if (confFileDir != null) {
fileDir = new File(confFileDir.trim());
} else {
fileDir = new File(nifiHome, defaultDirectory);
}
FileUtils.ensureDirectoryExistAndCanAccess(fileDir);
final File statusFile = new File(fileDir, fileName);
logger.debug("Status File: {}", statusFile);
return statusFile;
}
protected File getPidFile(final Logger logger) throws IOException {
return getBootstrapFile(logger, NIFI_REGISTRY_PID_DIR_PROP, DEFAULT_PID_DIR, NIFI_REGISTRY_PID_FILE_NAME);
}
protected File getStatusFile(final Logger logger) throws IOException {
return getBootstrapFile(logger, NIFI_REGISTRY_PID_DIR_PROP, DEFAULT_PID_DIR, NIFI_REGISTRY_STATUS_FILE_NAME);
}
protected File getLockFile(final Logger logger) throws IOException {
return getBootstrapFile(logger, NIFI_REGISTRY_PID_DIR_PROP, DEFAULT_PID_DIR, NIFI_REGISTRY_LOCK_FILE_NAME);
}
protected File getStatusFile() throws IOException {
return getStatusFile(defaultLogger);
}
private Properties loadProperties(final Logger logger) throws IOException {
final Properties props = new Properties();
final File statusFile = getStatusFile(logger);
if (statusFile == null || !statusFile.exists()) {
logger.debug("No status file to load properties from");
return props;
}
try (final FileInputStream fis = new FileInputStream(getStatusFile(logger))) {
props.load(fis);
}
final Map<Object, Object> modified = new HashMap<>(props);
modified.remove("secret.key");
logger.debug("Properties: {}", modified);
return props;
}
private synchronized void savePidProperties(final Properties pidProperties, final Logger logger) throws IOException {
final String pid = pidProperties.getProperty(PID_KEY);
if (!StringUtils.isBlank(pid)) {
writePidFile(pid, logger);
}
final File statusFile = getStatusFile(logger);
if (statusFile.exists() && !statusFile.delete()) {
logger.warn("Failed to delete {}", statusFile);
}
if (!statusFile.createNewFile()) {
throw new IOException("Failed to create file " + statusFile);
}
try {
final Set<PosixFilePermission> perms = new HashSet<>();
perms.add(PosixFilePermission.OWNER_READ);
perms.add(PosixFilePermission.OWNER_WRITE);
Files.setPosixFilePermissions(statusFile.toPath(), perms);
} catch (final Exception e) {
logger.warn("Failed to set permissions so that only the owner can read status file {}; "
+ "this may allows others to have access to the key needed to communicate with NiFi Registry. "
+ "Permissions should be changed so that only the owner can read this file", statusFile);
}
try (final FileOutputStream fos = new FileOutputStream(statusFile)) {
pidProperties.store(fos, null);
fos.getFD().sync();
}
logger.debug("Saved Properties {} to {}", new Object[]{pidProperties, statusFile});
}
private synchronized void writePidFile(final String pid, final Logger logger) throws IOException {
final File pidFile = getPidFile(logger);
if (pidFile.exists() && !pidFile.delete()) {
logger.warn("Failed to delete {}", pidFile);
}
if (!pidFile.createNewFile()) {
throw new IOException("Failed to create file " + pidFile);
}
try {
final Set<PosixFilePermission> perms = new HashSet<>();
perms.add(PosixFilePermission.OWNER_WRITE);
perms.add(PosixFilePermission.OWNER_READ);
perms.add(PosixFilePermission.GROUP_READ);
perms.add(PosixFilePermission.OTHERS_READ);
Files.setPosixFilePermissions(pidFile.toPath(), perms);
} catch (final Exception e) {
logger.warn("Failed to set permissions so that only the owner can read pid file {}; "
+ "this may allows others to have access to the key needed to communicate with NiFi Registry. "
+ "Permissions should be changed so that only the owner can read this file", pidFile);
}
try (final FileOutputStream fos = new FileOutputStream(pidFile)) {
fos.write(pid.getBytes(StandardCharsets.UTF_8));
fos.getFD().sync();
}
logger.debug("Saved Pid {} to {}", new Object[]{pid, pidFile});
}
private boolean isPingSuccessful(final int port, final String secretKey, final Logger logger) {
logger.debug("Pinging {}", port);
try (final Socket socket = new Socket("localhost", port)) {
final OutputStream out = socket.getOutputStream();
out.write((PING_CMD + " " + secretKey + "\n").getBytes(StandardCharsets.UTF_8));
out.flush();
logger.debug("Sent PING command");
socket.setSoTimeout(5000);
final InputStream in = socket.getInputStream();
final BufferedReader reader = new BufferedReader(new InputStreamReader(in));
final String response = reader.readLine();
logger.debug("PING response: {}", response);
out.close();
reader.close();
return PING_CMD.equals(response);
} catch (final IOException ioe) {
return false;
}
}
private Integer getCurrentPort(final Logger logger) throws IOException {
final Properties props = loadProperties(logger);
final String portVal = props.getProperty("port");
if (portVal == null) {
logger.debug("No Port found in status file");
return null;
} else {
logger.debug("Port defined in status file: {}", portVal);
}
final int port = Integer.parseInt(portVal);
final boolean success = isPingSuccessful(port, props.getProperty("secret.key"), logger);
if (success) {
logger.debug("Successful PING on port {}", port);
return port;
}
final String pid = props.getProperty(PID_KEY);
logger.debug("PID in status file is {}", pid);
if (pid != null) {
final boolean procRunning = isProcessRunning(pid, logger);
if (procRunning) {
return port;
} else {
return null;
}
}
return null;
}
private boolean isProcessRunning(final String pid, final Logger logger) {
try {
// We use the "ps" command to check if the process is still running.
final ProcessBuilder builder = new ProcessBuilder();
builder.command("ps", "-p", pid);
final Process proc = builder.start();
// Look for the pid in the output of the 'ps' command.
boolean running = false;
String line;
try (final InputStream in = proc.getInputStream();
final Reader streamReader = new InputStreamReader(in);
final BufferedReader reader = new BufferedReader(streamReader)) {
while ((line = reader.readLine()) != null) {
if (line.trim().startsWith(pid)) {
running = true;
}
}
}
// If output of the ps command had our PID, the process is running.
if (running) {
logger.debug("Process with PID {} is running", pid);
} else {
logger.debug("Process with PID {} is not running", pid);
}
return running;
} catch (final IOException ioe) {
System.err.println("Failed to determine if Process " + pid + " is running; assuming that it is not");
return false;
}
}
private Status getStatus(final Logger logger) {
final Properties props;
try {
props = loadProperties(logger);
} catch (final IOException ioe) {
return new Status(null, null, false, false);
}
if (props == null) {
return new Status(null, null, false, false);
}
final String portValue = props.getProperty("port");
final String pid = props.getProperty(PID_KEY);
final String secretKey = props.getProperty("secret.key");
if (portValue == null && pid == null) {
return new Status(null, null, false, false);
}
Integer port = null;
boolean pingSuccess = false;
if (portValue != null) {
try {
port = Integer.parseInt(portValue);
pingSuccess = isPingSuccessful(port, secretKey, logger);
} catch (final NumberFormatException nfe) {
return new Status(null, null, false, false);
}
}
if (pingSuccess) {
return new Status(port, pid, true, true);
}
final boolean alive = pid != null && isProcessRunning(pid, logger);
return new Status(port, pid, pingSuccess, alive);
}
public int status() throws IOException {
final Logger logger = cmdLogger;
final Status status = getStatus(logger);
if (status.isRespondingToPing()) {
logger.info("Apache NiFi Registry is currently running, listening to Bootstrap on port {}, PID={}",
new Object[]{status.getPort(), status.getPid() == null ? "unknown" : status.getPid()});
return 0;
}
if (status.isProcessRunning()) {
logger.info("Apache NiFi Registry is running at PID {} but is not responding to ping requests", status.getPid());
return 4;
}
if (status.getPort() == null) {
logger.info("Apache NiFi Registry is not running");
return 3;
}
if (status.getPid() == null) {
logger.info("Apache NiFi Registry is not responding to Ping requests. The process may have died or may be hung");
} else {
logger.info("Apache NiFi Registry is not running");
}
return 3;
}
public void env() {
final Logger logger = cmdLogger;
final Status status = getStatus(logger);
if (status.getPid() == null) {
logger.info("Apache NiFi Registry is not running");
return;
}
final Class<?> virtualMachineClass;
try {
virtualMachineClass = Class.forName("com.sun.tools.attach.VirtualMachine");
} catch (final ClassNotFoundException cnfe) {
logger.error("Seems tools.jar (Linux / Windows JDK) or classes.jar (Mac OS) is not available in classpath");
return;
}
final Method attachMethod;
final Method detachMethod;
try {
attachMethod = virtualMachineClass.getMethod("attach", String.class);
detachMethod = virtualMachineClass.getDeclaredMethod("detach");
} catch (final Exception e) {
logger.error("Methods required for getting environment not available", e);
return;
}
final Object virtualMachine;
try {
virtualMachine = attachMethod.invoke(null, status.getPid());
} catch (final Throwable t) {
logger.error("Problem attaching to NiFi", t);
return;
}
try {
final Method getSystemPropertiesMethod = virtualMachine.getClass().getMethod("getSystemProperties");
final Properties sysProps = (Properties) getSystemPropertiesMethod.invoke(virtualMachine);
for (Entry<Object, Object> syspropEntry : sysProps.entrySet()) {
logger.info(syspropEntry.getKey().toString() + " = " + syspropEntry.getValue().toString());
}
} catch (Throwable t) {
throw new RuntimeException(t);
} finally {
try {
detachMethod.invoke(virtualMachine);
} catch (final Exception e) {
logger.warn("Caught exception detaching from process", e);
}
}
}
/**
* Writes a NiFi thread dump to the given file; if file is null, logs at
* INFO level instead.
*
* @param dumpFile the file to write the dump content to
* @throws IOException if any issues occur while writing the dump file
*/
public void dump(final File dumpFile) throws IOException {
final Logger logger = defaultLogger; // dump to bootstrap log file by default
final Integer port = getCurrentPort(logger);
if (port == null) {
logger.info("Apache NiFi Registry is not currently running");
return;
}
final Properties nifiRegistryProps = loadProperties(logger);
final String secretKey = nifiRegistryProps.getProperty("secret.key");
final StringBuilder sb = new StringBuilder();
try (final Socket socket = new Socket()) {
logger.debug("Connecting to NiFi Registry instance");
socket.setSoTimeout(60000);
socket.connect(new InetSocketAddress("localhost", port));
logger.debug("Established connection to NiFi Registry instance.");
socket.setSoTimeout(60000);
logger.debug("Sending DUMP Command to port {}", port);
final OutputStream out = socket.getOutputStream();
out.write((DUMP_CMD + " " + secretKey + "\n").getBytes(StandardCharsets.UTF_8));
out.flush();
final InputStream in = socket.getInputStream();
try (final BufferedReader reader = new BufferedReader(new InputStreamReader(in))) {
String line;
while ((line = reader.readLine()) != null) {
sb.append(line).append("\n");
}
}
}
final String dump = sb.toString();
if (dumpFile == null) {
logger.info(dump);
} else {
try (final FileOutputStream fos = new FileOutputStream(dumpFile)) {
fos.write(dump.getBytes(StandardCharsets.UTF_8));
}
// we want to log to the console (by default) that we wrote the thread dump to the specified file
cmdLogger.info("Successfully wrote thread dump to {}", dumpFile.getAbsolutePath());
}
}
public void notifyStop() {
final String hostname = getHostname();
final SimpleDateFormat sdf = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss.SSS");
final String now = sdf.format(System.currentTimeMillis());
String user = System.getProperty("user.name");
if (user == null || user.trim().isEmpty()) {
user = "Unknown User";
}
}
public void stop() throws IOException {
final Logger logger = cmdLogger;
final Integer port = getCurrentPort(logger);
if (port == null) {
logger.info("Apache NiFi Registry is not currently running");
return;
}
// indicate that a stop command is in progress
final File lockFile = getLockFile(logger);
if (!lockFile.exists()) {
lockFile.createNewFile();
}
final Properties nifiRegistryProps = loadProperties(logger);
final String secretKey = nifiRegistryProps.getProperty("secret.key");
final String pid = nifiRegistryProps.getProperty(PID_KEY);
final File statusFile = getStatusFile(logger);
final File pidFile = getPidFile(logger);
try (final Socket socket = new Socket()) {
logger.debug("Connecting to NiFi Registry instance");
socket.setSoTimeout(10000);
socket.connect(new InetSocketAddress("localhost", port));
logger.debug("Established connection to NiFi Registry instance.");
socket.setSoTimeout(10000);
logger.debug("Sending SHUTDOWN Command to port {}", port);
final OutputStream out = socket.getOutputStream();
out.write((SHUTDOWN_CMD + " " + secretKey + "\n").getBytes(StandardCharsets.UTF_8));
out.flush();
socket.shutdownOutput();
final InputStream in = socket.getInputStream();
int lastChar;
final StringBuilder sb = new StringBuilder();
while ((lastChar = in.read()) > -1) {
sb.append((char) lastChar);
}
final String response = sb.toString().trim();
logger.debug("Received response to SHUTDOWN command: {}", response);
if (SHUTDOWN_CMD.equals(response)) {
logger.info("Apache NiFi Registry has accepted the Shutdown Command and is shutting down now");
if (pid != null) {
final Properties bootstrapProperties = new Properties();
try (final FileInputStream fis = new FileInputStream(bootstrapConfigFile)) {
bootstrapProperties.load(fis);
}
String gracefulShutdown = bootstrapProperties.getProperty(GRACEFUL_SHUTDOWN_PROP, DEFAULT_GRACEFUL_SHUTDOWN_VALUE);
int gracefulShutdownSeconds;
try {
gracefulShutdownSeconds = Integer.parseInt(gracefulShutdown);
} catch (final NumberFormatException nfe) {
gracefulShutdownSeconds = Integer.parseInt(DEFAULT_GRACEFUL_SHUTDOWN_VALUE);
}
notifyStop();
final long startWait = System.nanoTime();
while (isProcessRunning(pid, logger)) {
logger.info("Waiting for Apache NiFi Registry to finish shutting down...");
final long waitNanos = System.nanoTime() - startWait;
final long waitSeconds = TimeUnit.NANOSECONDS.toSeconds(waitNanos);
if (waitSeconds >= gracefulShutdownSeconds && gracefulShutdownSeconds > 0) {
if (isProcessRunning(pid, logger)) {
logger.warn("NiFi Registry has not finished shutting down after {} seconds. Killing process.", gracefulShutdownSeconds);
try {
killProcessTree(pid, logger);
} catch (final IOException ioe) {
logger.error("Failed to kill Process with PID {}", pid);
}
}
break;
} else {
try {
Thread.sleep(2000L);
} catch (final InterruptedException ie) {
}
}
}
if (statusFile.exists() && !statusFile.delete()) {
logger.error("Failed to delete status file {}; this file should be cleaned up manually", statusFile);
}
if (pidFile.exists() && !pidFile.delete()) {
logger.error("Failed to delete pid file {}; this file should be cleaned up manually", pidFile);
}
logger.info("NiFi Registry has finished shutting down.");
}
} else {
logger.error("When sending SHUTDOWN command to NiFi Registry , got unexpected response {}", response);
}
} catch (final IOException ioe) {
if (pid == null) {
logger.error("Failed to send shutdown command to port {} due to {}. No PID found for the NiFi Registry process, so unable to kill process; "
+ "the process should be killed manually.", new Object[]{port, ioe.toString()});
} else {
logger.error("Failed to send shutdown command to port {} due to {}. Will kill the NiFi Registry Process with PID {}.", port, ioe.toString(), pid);
notifyStop();
killProcessTree(pid, logger);
if (statusFile.exists() && !statusFile.delete()) {
logger.error("Failed to delete status file {}; this file should be cleaned up manually", statusFile);
}
}
} finally {
if (lockFile.exists() && !lockFile.delete()) {
logger.error("Failed to delete lock file {}; this file should be cleaned up manually", lockFile);
}
}
}
private static List<String> getChildProcesses(final String ppid) throws IOException {
final Process proc = Runtime.getRuntime().exec(new String[]{"ps", "-o", "pid", "--no-headers", "--ppid", ppid});
final List<String> childPids = new ArrayList<>();
try (final InputStream in = proc.getInputStream();
final BufferedReader reader = new BufferedReader(new InputStreamReader(in))) {
String line;
while ((line = reader.readLine()) != null) {
childPids.add(line.trim());
}
}
return childPids;
}
private void killProcessTree(final String pid, final Logger logger) throws IOException {
logger.debug("Killing Process Tree for PID {}", pid);
final List<String> children = getChildProcesses(pid);
logger.debug("Children of PID {}: {}", new Object[]{pid, children});
for (final String childPid : children) {
killProcessTree(childPid, logger);
}
Runtime.getRuntime().exec(new String[]{"kill", "-9", pid});
}
public static boolean isAlive(final Process process) {
try {
process.exitValue();
return false;
} catch (final IllegalStateException | IllegalThreadStateException itse) {
return true;
}
}
private String getHostname() {
String hostname = "Unknown Host";
String ip = "Unknown IP Address";
try {
final InetAddress localhost = InetAddress.getLocalHost();
hostname = localhost.getHostName();
ip = localhost.getHostAddress();
} catch (final Exception e) {
defaultLogger.warn("Failed to obtain hostname for notification due to:", e);
}
return hostname + " (" + ip + ")";
}
@SuppressWarnings({"rawtypes", "unchecked"})
public void start() throws IOException, InterruptedException {
final Integer port = getCurrentPort(cmdLogger);
if (port != null) {
cmdLogger.info("Apache NiFi Registry is already running, listening to Bootstrap on port " + port);
return;
}
final File prevLockFile = getLockFile(cmdLogger);
if (prevLockFile.exists() && !prevLockFile.delete()) {
cmdLogger.warn("Failed to delete previous lock file {}; this file should be cleaned up manually", prevLockFile);
}
final ProcessBuilder builder = new ProcessBuilder();
if (!bootstrapConfigFile.exists()) {
throw new FileNotFoundException(bootstrapConfigFile.getAbsolutePath());
}
final Properties properties = new Properties();
try (final FileInputStream fis = new FileInputStream(bootstrapConfigFile)) {
properties.load(fis);
}
final Map<String, String> props = new HashMap<>();
props.putAll((Map) properties);
// Determine the working dir for launching the NiFi Registry process
// The order of precedence is:
// 1) Specified in bootstrap.conf via working.dir
// 2) NIFI_REGISTRY_HOME env variable
// 3) Parent of bootstrap config file's parent
final File workingDir;
final String specifiedWorkingDir = props.get("working.dir");
final String nifiRegistryHome = System.getenv("NIFI_REGISTRY_HOME");
final File bootstrapConfigAbsoluteFile = bootstrapConfigFile.getAbsoluteFile();
if (!StringUtils.isBlank(specifiedWorkingDir)) {
workingDir = new File(specifiedWorkingDir.trim());
} else if (!StringUtils.isBlank(nifiRegistryHome)) {
workingDir = new File(nifiRegistryHome.trim());
} else {
final File binDir = bootstrapConfigAbsoluteFile.getParentFile();
workingDir = binDir.getParentFile();
}
builder.directory(workingDir);
final String nifiRegistryLogDir = replaceNull(System.getProperty("org.apache.nifi.registry.bootstrap.config.log.dir"), DEFAULT_LOG_DIR).trim();
final String nifiRegistryDocsDir = replaceNull(props.get("docs.dir"), DEFAULT_DOCS_DIR).trim();
final String libFilename = replaceNull(props.get("lib.dir"), "./lib").trim();
File libDir = getFile(libFilename, workingDir);
File libSharedDir = getFile(libFilename + "/shared", workingDir);
final String confFilename = replaceNull(props.get("conf.dir"), "./conf").trim();
File confDir = getFile(confFilename, workingDir);
String nifiRegistryPropsFilename = props.get("props.file");
if (nifiRegistryPropsFilename == null) {
if (confDir.exists()) {
nifiRegistryPropsFilename = new File(confDir, "nifi-registry.properties").getAbsolutePath();
} else {
nifiRegistryPropsFilename = DEFAULT_CONFIG_FILE;
}
}
nifiRegistryPropsFilename = nifiRegistryPropsFilename.trim();
final List<String> javaAdditionalArgs = new ArrayList<>();
for (final Map.Entry<String, String> entry : props.entrySet()) {
final String key = entry.getKey();
final String value = entry.getValue();
if (key.startsWith("java.arg")) {
javaAdditionalArgs.add(value);
}
}
final File[] libSharedFiles = libSharedDir.listFiles(new FilenameFilter() {
@Override
public boolean accept(final File dir, final String filename) {
return filename.toLowerCase().endsWith(".jar");
}
});
if (libSharedFiles == null || libSharedFiles.length == 0) {
throw new RuntimeException("Could not find lib shared directory at " + libSharedDir.getAbsolutePath());
}
final File[] libFiles = libDir.listFiles(new FilenameFilter() {
@Override
public boolean accept(final File dir, final String filename) {
return filename.toLowerCase().endsWith(".jar");
}
});
if (libFiles == null || libFiles.length == 0) {
throw new RuntimeException("Could not find lib directory at " + libDir.getAbsolutePath());
}
final File[] confFiles = confDir.listFiles();
if (confFiles == null || confFiles.length == 0) {
throw new RuntimeException("Could not find conf directory at " + confDir.getAbsolutePath());
}
final List<String> cpFiles = new ArrayList<>(confFiles.length + libFiles.length + libSharedFiles.length);
cpFiles.add(confDir.getAbsolutePath());
for (final File file : libSharedFiles) {
cpFiles.add(file.getAbsolutePath());
}
for (final File file : libFiles) {
cpFiles.add(file.getAbsolutePath());
}
final String runtimeJavaVersion = System.getProperty("java.version");
defaultLogger.info("Runtime Java version: {}", runtimeJavaVersion);
if (Integer.parseInt(runtimeJavaVersion.substring(0, runtimeJavaVersion.indexOf('.'))) >= 11) {
// If running on Java 11 or greater, add lib/java11 to the classpath.
// TODO: Once the minimum Java version requirement of NiFi Registry is 11, this processing should be removed.
final String libJava11Filename = replaceNull(props.get("lib.dir"), "./lib").trim() + "/java11";
final File libJava11Dir = getFile(libJava11Filename, workingDir);
if (libJava11Dir.exists()) {
for (final File file : Objects.requireNonNull(libJava11Dir.listFiles((dir, filename) -> filename.toLowerCase().endsWith(".jar")))) {
cpFiles.add(file.getAbsolutePath());
}
}
}
final StringBuilder classPathBuilder = new StringBuilder();
for (int i = 0; i < cpFiles.size(); i++) {
final String filename = cpFiles.get(i);
classPathBuilder.append(filename);
if (i < cpFiles.size() - 1) {
classPathBuilder.append(File.pathSeparatorChar);
}
}
final String classPath = classPathBuilder.toString();
String javaCmd = props.get("java");
if (javaCmd == null) {
javaCmd = DEFAULT_JAVA_CMD;
}
if (javaCmd.equals(DEFAULT_JAVA_CMD)) {
String javaHome = System.getenv("JAVA_HOME");
if (javaHome != null) {
String fileExtension = isWindows() ? ".exe" : "";
File javaFile = new File(javaHome + File.separatorChar + "bin"
+ File.separatorChar + "java" + fileExtension);
if (javaFile.exists() && javaFile.canExecute()) {
javaCmd = javaFile.getAbsolutePath();
}
}
}
final NiFiRegistryListener listener = new NiFiRegistryListener();
final int listenPort = listener.start(this);
final List<String> cmd = new ArrayList<>();
cmd.add(javaCmd);
cmd.add("-classpath");
cmd.add(classPath);
cmd.addAll(javaAdditionalArgs);
cmd.add("-Dnifi.registry.properties.file.path=" + nifiRegistryPropsFilename);
cmd.add("-Dnifi.registry.bootstrap.config.file.path=" + bootstrapConfigFile.getAbsolutePath());
cmd.add("-Dnifi.registry.bootstrap.listen.port=" + listenPort);
cmd.add("-Dnifi.registry.bootstrap.config.docs.dir=" + nifiRegistryDocsDir);
cmd.add("-Dapp=NiFiRegistry");
cmd.add("-Dorg.apache.nifi.registry.bootstrap.config.log.dir=" + nifiRegistryLogDir);
if (runtimeJavaVersion.startsWith("9") || runtimeJavaVersion.startsWith("10")) {
// running on Java 9+, java.xml.bind module must be made available
// running on Java 9 or 10, internal module java.xml.bind module must be made available
cmd.add("--add-modules=java.xml.bind");
}
cmd.add("org.apache.nifi.registry.NiFiRegistry");
builder.command(cmd);
final StringBuilder cmdBuilder = new StringBuilder();
for (final String s : cmd) {
cmdBuilder.append(s).append(" ");
}
cmdLogger.info("Starting Apache NiFi Registry...");
cmdLogger.info("Working Directory: {}", workingDir.getAbsolutePath());
cmdLogger.info("Command: {}", cmdBuilder.toString());
String gracefulShutdown = props.get(GRACEFUL_SHUTDOWN_PROP);
if (gracefulShutdown == null) {
gracefulShutdown = DEFAULT_GRACEFUL_SHUTDOWN_VALUE;
}
final int gracefulShutdownSeconds;
try {
gracefulShutdownSeconds = Integer.parseInt(gracefulShutdown);
} catch (final NumberFormatException nfe) {
throw new NumberFormatException("The '" + GRACEFUL_SHUTDOWN_PROP + "' property in Bootstrap Config File "
+ bootstrapConfigAbsoluteFile.getAbsolutePath() + " has an invalid value. Must be a non-negative integer");
}
if (gracefulShutdownSeconds < 0) {
throw new NumberFormatException("The '" + GRACEFUL_SHUTDOWN_PROP + "' property in Bootstrap Config File "
+ bootstrapConfigAbsoluteFile.getAbsolutePath() + " has an invalid value. Must be a non-negative integer");
}
Process process = builder.start();
handleLogging(process);
Long pid = OSUtils.getProcessId(process, cmdLogger);
if (pid == null) {
cmdLogger.warn("Launched Apache NiFi Registry but could not determined the Process ID");
} else {
nifiRegistryPid = pid;
final Properties pidProperties = new Properties();
pidProperties.setProperty(PID_KEY, String.valueOf(nifiRegistryPid));
savePidProperties(pidProperties, cmdLogger);
cmdLogger.info("Launched Apache NiFi Registry with Process ID {}", pid);
}
shutdownHook = new ShutdownHook(process, this, secretKey, gracefulShutdownSeconds, loggingExecutor);
final Runtime runtime = Runtime.getRuntime();
runtime.addShutdownHook(shutdownHook);
final String hostname = getHostname();
final SimpleDateFormat sdf = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss.SSS");
String now = sdf.format(System.currentTimeMillis());
String user = System.getProperty("user.name");
if (user == null || user.trim().isEmpty()) {
user = "Unknown User";
}
while (true) {
final boolean alive = isAlive(process);
if (alive) {
try {
Thread.sleep(1000L);
} catch (final InterruptedException ie) {
}
} else {
try {
runtime.removeShutdownHook(shutdownHook);
} catch (final IllegalStateException ise) {
// happens when already shutting down
}
now = sdf.format(System.currentTimeMillis());
if (autoRestartNiFiRegistry) {
final File statusFile = getStatusFile(defaultLogger);
if (!statusFile.exists()) {
defaultLogger.info("Status File no longer exists. Will not restart NiFi Registry ");
return;
}
final File lockFile = getLockFile(defaultLogger);
if (lockFile.exists()) {
defaultLogger.info("A shutdown was initiated. Will not restart NiFi Registry ");
return;
}
final boolean previouslyStarted = getNifiRegistryStarted();
if (!previouslyStarted) {
defaultLogger.info("NiFi Registry never started. Will not restart NiFi Registry ");
return;
} else {
setNiFiRegistryStarted(false);
}
defaultLogger.warn("Apache NiFi Registry appears to have died. Restarting...");
secretKey = null;
process = builder.start();
handleLogging(process);
pid = OSUtils.getProcessId(process, defaultLogger);
if (pid == null) {
cmdLogger.warn("Launched Apache NiFi Registry but could not obtain the Process ID");
} else {
nifiRegistryPid = pid;
final Properties pidProperties = new Properties();
pidProperties.setProperty(PID_KEY, String.valueOf(nifiRegistryPid));
savePidProperties(pidProperties, defaultLogger);
cmdLogger.info("Launched Apache NiFi Registry with Process ID " + pid);
}
shutdownHook = new ShutdownHook(process, this, secretKey, gracefulShutdownSeconds, loggingExecutor);
runtime.addShutdownHook(shutdownHook);
final boolean started = waitForStart();
if (started) {
defaultLogger.info("Successfully started Apache NiFi Registry {}", (pid == null ? "" : " with PID " + pid));
} else {
defaultLogger.error("Apache NiFi Registry does not appear to have started");
}
} else {
return;
}
}
}
}
private void handleLogging(final Process process) {
final Set<Future<?>> existingFutures = loggingFutures;
if (existingFutures != null) {
for (final Future<?> future : existingFutures) {
future.cancel(false);
}
}
final Future<?> stdOutFuture = loggingExecutor.submit(new Runnable() {
@Override
public void run() {
final Logger stdOutLogger = LoggerFactory.getLogger("org.apache.nifi.registry.StdOut");
final InputStream in = process.getInputStream();
try (final BufferedReader reader = new BufferedReader(new InputStreamReader(in))) {
String line;
while ((line = reader.readLine()) != null) {
stdOutLogger.info(line);
}
} catch (IOException e) {
defaultLogger.error("Failed to read from NiFi Registry's Standard Out stream", e);
}
}
});
final Future<?> stdErrFuture = loggingExecutor.submit(new Runnable() {
@Override
public void run() {
final Logger stdErrLogger = LoggerFactory.getLogger("org.apache.nifi.registry.StdErr");
final InputStream in = process.getErrorStream();
try (final BufferedReader reader = new BufferedReader(new InputStreamReader(in))) {
String line;
while ((line = reader.readLine()) != null) {
stdErrLogger.error(line);
}
} catch (IOException e) {
defaultLogger.error("Failed to read from NiFi Registry's Standard Error stream", e);
}
}
});
final Set<Future<?>> futures = new HashSet<>();
futures.add(stdOutFuture);
futures.add(stdErrFuture);
this.loggingFutures = futures;
}
private boolean isWindows() {
final String osName = System.getProperty("os.name");
return osName != null && osName.toLowerCase().contains("win");
}
private boolean waitForStart() {
lock.lock();
try {
final long startTime = System.nanoTime();
while (ccPort < 1) {
try {
startupCondition.await(1, TimeUnit.SECONDS);
} catch (final InterruptedException ie) {
return false;
}
final long waitNanos = System.nanoTime() - startTime;
final long waitSeconds = TimeUnit.NANOSECONDS.toSeconds(waitNanos);
if (waitSeconds > STARTUP_WAIT_SECONDS) {
return false;
}
}
} finally {
lock.unlock();
}
return true;
}
private File getFile(final String filename, final File workingDir) {
File file = new File(filename);
if (!file.isAbsolute()) {
file = new File(workingDir, filename);
}
return file;
}
private String replaceNull(final String value, final String replacement) {
return (value == null) ? replacement : value;
}
void setAutoRestartNiFiRegistry(final boolean restart) {
this.autoRestartNiFiRegistry = restart;
}
void setNiFiRegistryCommandControlPort(final int port, final String secretKey) throws IOException {
if (this.secretKey != null && this.ccPort != UNINITIALIZED_CC_PORT) {
defaultLogger.warn("Blocking attempt to change NiFi Registry command port and secret after they have already been initialized. requestedPort={}", port);
return;
}
this.ccPort = port;
this.secretKey = secretKey;
if (shutdownHook != null) {
shutdownHook.setSecretKey(secretKey);
}
final File statusFile = getStatusFile(defaultLogger);
final Properties nifiProps = new Properties();
if (nifiRegistryPid != -1) {
nifiProps.setProperty(PID_KEY, String.valueOf(nifiRegistryPid));
}
nifiProps.setProperty("port", String.valueOf(ccPort));
nifiProps.setProperty("secret.key", secretKey);
try {
savePidProperties(nifiProps, defaultLogger);
} catch (final IOException ioe) {
defaultLogger.warn("Apache NiFi Registry has started but failed to persist NiFi Registry Port information to {} due to {}", new Object[]{statusFile.getAbsolutePath(), ioe});
}
defaultLogger.info("Apache NiFi Registry now running and listening for Bootstrap requests on port {}", port);
}
int getNiFiRegistryCommandControlPort() {
return this.ccPort;
}
void setNiFiRegistryStarted(final boolean nifiStarted) {
startedLock.lock();
try {
this.nifiRegistryStarted = nifiStarted;
} finally {
startedLock.unlock();
}
}
boolean getNifiRegistryStarted() {
startedLock.lock();
try {
return nifiRegistryStarted;
} finally {
startedLock.unlock();
}
}
private static class Status {
private final Integer port;
private final String pid;
private final Boolean respondingToPing;
private final Boolean processRunning;
public Status(final Integer port, final String pid, final Boolean respondingToPing, final Boolean processRunning) {
this.port = port;
this.pid = pid;
this.respondingToPing = respondingToPing;
this.processRunning = processRunning;
}
public String getPid() {
return pid;
}
public Integer getPort() {
return port;
}
public boolean isRespondingToPing() {
return Boolean.TRUE.equals(respondingToPing);
}
public boolean isProcessRunning() {
return Boolean.TRUE.equals(processRunning);
}
}
}
| [
"\"NIFI_REGISTRY_HOME\"",
"\"NIFI_REGISTRY_HOME\"",
"\"JAVA_HOME\""
] | [] | [
"JAVA_HOME",
"NIFI_REGISTRY_HOME"
] | [] | ["JAVA_HOME", "NIFI_REGISTRY_HOME"] | java | 2 | 0 | |
src/twisted/conch/test/test_recvline.py | # -*- test-case-name: twisted.conch.test.test_recvline -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.recvline} and fixtures for testing related
functionality.
"""
import os
import sys
from unittest import skipIf
from twisted.conch import recvline
from twisted.conch.insults import insults
from twisted.cred import portal
from twisted.internet import defer, error
from twisted.python import components, filepath, reflect
from twisted.python.compat import iterbytes
from twisted.python.reflect import requireModule
from twisted.test.proto_helpers import StringTransport
from twisted.trial.unittest import SkipTest, TestCase
stdio = requireModule("twisted.conch.stdio")
properEnv = dict(os.environ)
properEnv["PYTHONPATH"] = os.pathsep.join(sys.path)
class ArrowsTests(TestCase):
def setUp(self):
self.underlyingTransport = StringTransport()
self.pt = insults.ServerProtocol()
self.p = recvline.HistoricRecvLine()
self.pt.protocolFactory = lambda: self.p
self.pt.factory = self
self.pt.makeConnection(self.underlyingTransport)
def test_printableCharacters(self):
"""
When L{HistoricRecvLine} receives a printable character,
it adds it to the current line buffer.
"""
self.p.keystrokeReceived(b"x", None)
self.p.keystrokeReceived(b"y", None)
self.p.keystrokeReceived(b"z", None)
self.assertEqual(self.p.currentLineBuffer(), (b"xyz", b""))
def test_horizontalArrows(self):
"""
When L{HistoricRecvLine} receives a LEFT_ARROW or
RIGHT_ARROW keystroke it moves the cursor left or right
in the current line buffer, respectively.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in iterbytes(b"xyz"):
kR(ch)
self.assertEqual(self.p.currentLineBuffer(), (b"xyz", b""))
kR(self.pt.RIGHT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), (b"xyz", b""))
kR(self.pt.LEFT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), (b"xy", b"z"))
kR(self.pt.LEFT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), (b"x", b"yz"))
kR(self.pt.LEFT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), (b"", b"xyz"))
kR(self.pt.LEFT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), (b"", b"xyz"))
kR(self.pt.RIGHT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), (b"x", b"yz"))
kR(self.pt.RIGHT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), (b"xy", b"z"))
kR(self.pt.RIGHT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), (b"xyz", b""))
kR(self.pt.RIGHT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), (b"xyz", b""))
def test_newline(self):
"""
When {HistoricRecvLine} receives a newline, it adds the current
line buffer to the end of its history buffer.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in iterbytes(b"xyz\nabc\n123\n"):
kR(ch)
self.assertEqual(self.p.currentHistoryBuffer(), ((b"xyz", b"abc", b"123"), ()))
kR(b"c")
kR(b"b")
kR(b"a")
self.assertEqual(self.p.currentHistoryBuffer(), ((b"xyz", b"abc", b"123"), ()))
kR(b"\n")
self.assertEqual(
self.p.currentHistoryBuffer(), ((b"xyz", b"abc", b"123", b"cba"), ())
)
def test_verticalArrows(self):
"""
When L{HistoricRecvLine} receives UP_ARROW or DOWN_ARROW
keystrokes it move the current index in the current history
buffer up or down, and resets the current line buffer to the
previous or next line in history, respectively for each.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in iterbytes(b"xyz\nabc\n123\n"):
kR(ch)
self.assertEqual(self.p.currentHistoryBuffer(), ((b"xyz", b"abc", b"123"), ()))
self.assertEqual(self.p.currentLineBuffer(), (b"", b""))
kR(self.pt.UP_ARROW)
self.assertEqual(self.p.currentHistoryBuffer(), ((b"xyz", b"abc"), (b"123",)))
self.assertEqual(self.p.currentLineBuffer(), (b"123", b""))
kR(self.pt.UP_ARROW)
self.assertEqual(self.p.currentHistoryBuffer(), ((b"xyz",), (b"abc", b"123")))
self.assertEqual(self.p.currentLineBuffer(), (b"abc", b""))
kR(self.pt.UP_ARROW)
self.assertEqual(self.p.currentHistoryBuffer(), ((), (b"xyz", b"abc", b"123")))
self.assertEqual(self.p.currentLineBuffer(), (b"xyz", b""))
kR(self.pt.UP_ARROW)
self.assertEqual(self.p.currentHistoryBuffer(), ((), (b"xyz", b"abc", b"123")))
self.assertEqual(self.p.currentLineBuffer(), (b"xyz", b""))
for i in range(4):
kR(self.pt.DOWN_ARROW)
self.assertEqual(self.p.currentHistoryBuffer(), ((b"xyz", b"abc", b"123"), ()))
def test_home(self):
"""
When L{HistoricRecvLine} receives a HOME keystroke it moves the
cursor to the beginning of the current line buffer.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in iterbytes(b"hello, world"):
kR(ch)
self.assertEqual(self.p.currentLineBuffer(), (b"hello, world", b""))
kR(self.pt.HOME)
self.assertEqual(self.p.currentLineBuffer(), (b"", b"hello, world"))
def test_end(self):
"""
When L{HistoricRecvLine} receives an END keystroke it moves the cursor
to the end of the current line buffer.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in iterbytes(b"hello, world"):
kR(ch)
self.assertEqual(self.p.currentLineBuffer(), (b"hello, world", b""))
kR(self.pt.HOME)
kR(self.pt.END)
self.assertEqual(self.p.currentLineBuffer(), (b"hello, world", b""))
def test_backspace(self):
"""
When L{HistoricRecvLine} receives a BACKSPACE keystroke it deletes
the character immediately before the cursor.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in iterbytes(b"xyz"):
kR(ch)
self.assertEqual(self.p.currentLineBuffer(), (b"xyz", b""))
kR(self.pt.BACKSPACE)
self.assertEqual(self.p.currentLineBuffer(), (b"xy", b""))
kR(self.pt.LEFT_ARROW)
kR(self.pt.BACKSPACE)
self.assertEqual(self.p.currentLineBuffer(), (b"", b"y"))
kR(self.pt.BACKSPACE)
self.assertEqual(self.p.currentLineBuffer(), (b"", b"y"))
def test_delete(self):
"""
When L{HistoricRecvLine} receives a DELETE keystroke, it
delets the character immediately after the cursor.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in iterbytes(b"xyz"):
kR(ch)
self.assertEqual(self.p.currentLineBuffer(), (b"xyz", b""))
kR(self.pt.DELETE)
self.assertEqual(self.p.currentLineBuffer(), (b"xyz", b""))
kR(self.pt.LEFT_ARROW)
kR(self.pt.DELETE)
self.assertEqual(self.p.currentLineBuffer(), (b"xy", b""))
kR(self.pt.LEFT_ARROW)
kR(self.pt.DELETE)
self.assertEqual(self.p.currentLineBuffer(), (b"x", b""))
kR(self.pt.LEFT_ARROW)
kR(self.pt.DELETE)
self.assertEqual(self.p.currentLineBuffer(), (b"", b""))
kR(self.pt.DELETE)
self.assertEqual(self.p.currentLineBuffer(), (b"", b""))
def test_insert(self):
"""
When not in INSERT mode, L{HistoricRecvLine} inserts the typed
character at the cursor before the next character.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in iterbytes(b"xyz"):
kR(ch)
kR(self.pt.LEFT_ARROW)
kR(b"A")
self.assertEqual(self.p.currentLineBuffer(), (b"xyA", b"z"))
kR(self.pt.LEFT_ARROW)
kR(b"B")
self.assertEqual(self.p.currentLineBuffer(), (b"xyB", b"Az"))
def test_typeover(self):
"""
When in INSERT mode and upon receiving a keystroke with a printable
character, L{HistoricRecvLine} replaces the character at
the cursor with the typed character rather than inserting before.
Ah, the ironies of INSERT mode.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in iterbytes(b"xyz"):
kR(ch)
kR(self.pt.INSERT)
kR(self.pt.LEFT_ARROW)
kR(b"A")
self.assertEqual(self.p.currentLineBuffer(), (b"xyA", b""))
kR(self.pt.LEFT_ARROW)
kR(b"B")
self.assertEqual(self.p.currentLineBuffer(), (b"xyB", b""))
def test_unprintableCharacters(self):
"""
When L{HistoricRecvLine} receives a keystroke for an unprintable
function key with no assigned behavior, the line buffer is unmodified.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
pt = self.pt
for ch in (
pt.F1,
pt.F2,
pt.F3,
pt.F4,
pt.F5,
pt.F6,
pt.F7,
pt.F8,
pt.F9,
pt.F10,
pt.F11,
pt.F12,
pt.PGUP,
pt.PGDN,
):
kR(ch)
self.assertEqual(self.p.currentLineBuffer(), (b"", b""))
from twisted.conch import telnet
from twisted.conch.insults import helper
from twisted.conch.test.loopback import LoopbackRelay
class EchoServer(recvline.HistoricRecvLine):
def lineReceived(self, line):
self.terminal.write(line + b"\n" + self.ps[self.pn])
# An insults API for this would be nice.
left = b"\x1b[D"
right = b"\x1b[C"
up = b"\x1b[A"
down = b"\x1b[B"
insert = b"\x1b[2~"
home = b"\x1b[1~"
delete = b"\x1b[3~"
end = b"\x1b[4~"
backspace = b"\x7f"
from twisted.cred import checkers
try:
from twisted.conch.manhole_ssh import (
ConchFactory,
TerminalRealm,
TerminalSession,
TerminalSessionTransport,
TerminalUser,
)
from twisted.conch.ssh import (
channel,
connection,
keys,
session,
transport,
userauth,
)
except ImportError:
ssh = False
else:
ssh = True
class SessionChannel(channel.SSHChannel):
name = b"session"
def __init__(
self, protocolFactory, protocolArgs, protocolKwArgs, width, height, *a, **kw
):
channel.SSHChannel.__init__(self, *a, **kw)
self.protocolFactory = protocolFactory
self.protocolArgs = protocolArgs
self.protocolKwArgs = protocolKwArgs
self.width = width
self.height = height
def channelOpen(self, data):
term = session.packRequest_pty_req(
b"vt102", (self.height, self.width, 0, 0), b""
)
self.conn.sendRequest(self, b"pty-req", term)
self.conn.sendRequest(self, b"shell", b"")
self._protocolInstance = self.protocolFactory(
*self.protocolArgs, **self.protocolKwArgs
)
self._protocolInstance.factory = self
self._protocolInstance.makeConnection(self)
def closed(self):
self._protocolInstance.connectionLost(error.ConnectionDone())
def dataReceived(self, data):
self._protocolInstance.dataReceived(data)
class TestConnection(connection.SSHConnection):
def __init__(
self, protocolFactory, protocolArgs, protocolKwArgs, width, height, *a, **kw
):
connection.SSHConnection.__init__(self, *a, **kw)
self.protocolFactory = protocolFactory
self.protocolArgs = protocolArgs
self.protocolKwArgs = protocolKwArgs
self.width = width
self.height = height
def serviceStarted(self):
self.__channel = SessionChannel(
self.protocolFactory,
self.protocolArgs,
self.protocolKwArgs,
self.width,
self.height,
)
self.openChannel(self.__channel)
def write(self, data):
return self.__channel.write(data)
class TestAuth(userauth.SSHUserAuthClient):
def __init__(self, username, password, *a, **kw):
userauth.SSHUserAuthClient.__init__(self, username, *a, **kw)
self.password = password
def getPassword(self):
return defer.succeed(self.password)
class TestTransport(transport.SSHClientTransport):
def __init__(
self,
protocolFactory,
protocolArgs,
protocolKwArgs,
username,
password,
width,
height,
*a,
**kw,
):
self.protocolFactory = protocolFactory
self.protocolArgs = protocolArgs
self.protocolKwArgs = protocolKwArgs
self.username = username
self.password = password
self.width = width
self.height = height
def verifyHostKey(self, hostKey, fingerprint):
return defer.succeed(True)
def connectionSecure(self):
self.__connection = TestConnection(
self.protocolFactory,
self.protocolArgs,
self.protocolKwArgs,
self.width,
self.height,
)
self.requestService(
TestAuth(self.username, self.password, self.__connection)
)
def write(self, data):
return self.__connection.write(data)
class TestSessionTransport(TerminalSessionTransport):
def protocolFactory(self):
return self.avatar.conn.transport.factory.serverProtocol()
class TestSession(TerminalSession):
transportFactory = TestSessionTransport
class TestUser(TerminalUser):
pass
components.registerAdapter(TestSession, TestUser, session.ISession)
class NotifyingExpectableBuffer(helper.ExpectableBuffer):
def __init__(self):
self.onConnection = defer.Deferred()
self.onDisconnection = defer.Deferred()
def connectionMade(self):
helper.ExpectableBuffer.connectionMade(self)
self.onConnection.callback(self)
def connectionLost(self, reason):
self.onDisconnection.errback(reason)
class _BaseMixin:
WIDTH = 80
HEIGHT = 24
def _assertBuffer(self, lines):
receivedLines = self.recvlineClient.__bytes__().splitlines()
expectedLines = lines + ([b""] * (self.HEIGHT - len(lines) - 1))
self.assertEqual(len(receivedLines), len(expectedLines))
for i in range(len(receivedLines)):
self.assertEqual(
receivedLines[i],
expectedLines[i],
b"".join(receivedLines[max(0, i - 1) : i + 1])
+ b" != "
+ b"".join(expectedLines[max(0, i - 1) : i + 1]),
)
def _trivialTest(self, inputLine, output):
done = self.recvlineClient.expect(b"done")
self._testwrite(inputLine)
def finished(ign):
self._assertBuffer(output)
return done.addCallback(finished)
class _SSHMixin(_BaseMixin):
def setUp(self):
if not ssh:
raise SkipTest(
"cryptography requirements missing, can't run historic "
"recvline tests over ssh"
)
u, p = b"testuser", b"testpass"
rlm = TerminalRealm()
rlm.userFactory = TestUser
rlm.chainedProtocolFactory = lambda: insultsServer
checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
checker.addUser(u, p)
ptl = portal.Portal(rlm)
ptl.registerChecker(checker)
sshFactory = ConchFactory(ptl)
sshKey = keys._getPersistentRSAKey(
filepath.FilePath(self.mktemp()), keySize=1024
)
sshFactory.publicKeys[b"ssh-rsa"] = sshKey
sshFactory.privateKeys[b"ssh-rsa"] = sshKey
sshFactory.serverProtocol = self.serverProtocol
sshFactory.startFactory()
recvlineServer = self.serverProtocol()
insultsServer = insults.ServerProtocol(lambda: recvlineServer)
sshServer = sshFactory.buildProtocol(None)
clientTransport = LoopbackRelay(sshServer)
recvlineClient = NotifyingExpectableBuffer()
insultsClient = insults.ClientProtocol(lambda: recvlineClient)
sshClient = TestTransport(
lambda: insultsClient, (), {}, u, p, self.WIDTH, self.HEIGHT
)
serverTransport = LoopbackRelay(sshClient)
sshClient.makeConnection(clientTransport)
sshServer.makeConnection(serverTransport)
self.recvlineClient = recvlineClient
self.sshClient = sshClient
self.sshServer = sshServer
self.clientTransport = clientTransport
self.serverTransport = serverTransport
return recvlineClient.onConnection
def _testwrite(self, data):
self.sshClient.write(data)
from twisted.conch.test import test_telnet
class TestInsultsClientProtocol(insults.ClientProtocol, test_telnet.TestProtocol):
pass
class TestInsultsServerProtocol(insults.ServerProtocol, test_telnet.TestProtocol):
pass
class _TelnetMixin(_BaseMixin):
def setUp(self):
recvlineServer = self.serverProtocol()
insultsServer = TestInsultsServerProtocol(lambda: recvlineServer)
telnetServer = telnet.TelnetTransport(lambda: insultsServer)
clientTransport = LoopbackRelay(telnetServer)
recvlineClient = NotifyingExpectableBuffer()
insultsClient = TestInsultsClientProtocol(lambda: recvlineClient)
telnetClient = telnet.TelnetTransport(lambda: insultsClient)
serverTransport = LoopbackRelay(telnetClient)
telnetClient.makeConnection(clientTransport)
telnetServer.makeConnection(serverTransport)
serverTransport.clearBuffer()
clientTransport.clearBuffer()
self.recvlineClient = recvlineClient
self.telnetClient = telnetClient
self.clientTransport = clientTransport
self.serverTransport = serverTransport
return recvlineClient.onConnection
def _testwrite(self, data):
self.telnetClient.write(data)
class _StdioMixin(_BaseMixin):
def setUp(self):
# A memory-only terminal emulator, into which the server will
# write things and make other state changes. What ends up
# here is basically what a user would have seen on their
# screen.
testTerminal = NotifyingExpectableBuffer()
# An insults client protocol which will translate bytes
# received from the child process into keystroke commands for
# an ITerminalProtocol.
insultsClient = insults.ClientProtocol(lambda: testTerminal)
# A process protocol which will translate stdout and stderr
# received from the child process to dataReceived calls and
# error reporting on an insults client protocol.
processClient = stdio.TerminalProcessProtocol(insultsClient)
# Run twisted/conch/stdio.py with the name of a class
# implementing ITerminalProtocol. This class will be used to
# handle bytes we send to the child process.
exe = sys.executable
module = stdio.__file__
if module.endswith(".pyc") or module.endswith(".pyo"):
module = module[:-1]
args = [exe, module, reflect.qual(self.serverProtocol)]
from twisted.internet import reactor
clientTransport = reactor.spawnProcess(
processClient, exe, args, env=properEnv, usePTY=True
)
self.recvlineClient = self.testTerminal = testTerminal
self.processClient = processClient
self.clientTransport = clientTransport
# Wait for the process protocol and test terminal to become
# connected before proceeding. The former should always
# happen first, but it doesn't hurt to be safe.
return defer.gatherResults(
filter(None, [processClient.onConnection, testTerminal.expect(b">>> ")])
)
def tearDown(self):
# Kill the child process. We're done with it.
try:
self.clientTransport.signalProcess("KILL")
except (error.ProcessExitedAlready, OSError):
pass
def trap(failure):
failure.trap(error.ProcessTerminated)
self.assertIsNone(failure.value.exitCode)
self.assertEqual(failure.value.status, 9)
return self.testTerminal.onDisconnection.addErrback(trap)
def _testwrite(self, data):
self.clientTransport.write(data)
class RecvlineLoopbackMixin:
serverProtocol = EchoServer
def testSimple(self):
return self._trivialTest(
b"first line\ndone", [b">>> first line", b"first line", b">>> done"]
)
def testLeftArrow(self):
return self._trivialTest(
insert + b"first line" + left * 4 + b"xxxx\ndone",
[b">>> first xxxx", b"first xxxx", b">>> done"],
)
def testRightArrow(self):
return self._trivialTest(
insert + b"right line" + left * 4 + right * 2 + b"xx\ndone",
[b">>> right lixx", b"right lixx", b">>> done"],
)
def testBackspace(self):
return self._trivialTest(
b"second line" + backspace * 4 + b"xxxx\ndone",
[b">>> second xxxx", b"second xxxx", b">>> done"],
)
def testDelete(self):
return self._trivialTest(
b"delete xxxx" + left * 4 + delete * 4 + b"line\ndone",
[b">>> delete line", b"delete line", b">>> done"],
)
def testInsert(self):
return self._trivialTest(
b"third ine" + left * 3 + b"l\ndone",
[b">>> third line", b"third line", b">>> done"],
)
def testTypeover(self):
return self._trivialTest(
b"fourth xine" + left * 4 + insert + b"l\ndone",
[b">>> fourth line", b"fourth line", b">>> done"],
)
def testHome(self):
return self._trivialTest(
insert + b"blah line" + home + b"home\ndone",
[b">>> home line", b"home line", b">>> done"],
)
def testEnd(self):
return self._trivialTest(
b"end " + left * 4 + end + b"line\ndone",
[b">>> end line", b"end line", b">>> done"],
)
class RecvlineLoopbackTelnetTests(_TelnetMixin, TestCase, RecvlineLoopbackMixin):
pass
class RecvlineLoopbackSSHTests(_SSHMixin, TestCase, RecvlineLoopbackMixin):
pass
@skipIf(not stdio, "Terminal requirements missing, can't run recvline tests over stdio")
class RecvlineLoopbackStdioTests(_StdioMixin, TestCase, RecvlineLoopbackMixin):
pass
class HistoricRecvlineLoopbackMixin:
serverProtocol = EchoServer
def testUpArrow(self):
return self._trivialTest(
b"first line\n" + up + b"\ndone",
[
b">>> first line",
b"first line",
b">>> first line",
b"first line",
b">>> done",
],
)
def test_DownArrowToPartialLineInHistory(self):
"""
Pressing down arrow to visit an entry that was added to the
history by pressing the up arrow instead of return does not
raise a L{TypeError}.
@see: U{http://twistedmatrix.com/trac/ticket/9031}
@return: A L{defer.Deferred} that fires when C{b"done"} is
echoed back.
"""
return self._trivialTest(
b"first line\n" + b"partial line" + up + down + b"\ndone",
[
b">>> first line",
b"first line",
b">>> partial line",
b"partial line",
b">>> done",
],
)
def testDownArrow(self):
return self._trivialTest(
b"first line\nsecond line\n" + up * 2 + down + b"\ndone",
[
b">>> first line",
b"first line",
b">>> second line",
b"second line",
b">>> second line",
b"second line",
b">>> done",
],
)
class HistoricRecvlineLoopbackTelnetTests(
_TelnetMixin, TestCase, HistoricRecvlineLoopbackMixin
):
pass
class HistoricRecvlineLoopbackSSHTests(
_SSHMixin, TestCase, HistoricRecvlineLoopbackMixin
):
pass
@skipIf(
not stdio,
"Terminal requirements missing, " "can't run historic recvline tests over stdio",
)
class HistoricRecvlineLoopbackStdioTests(
_StdioMixin, TestCase, HistoricRecvlineLoopbackMixin
):
pass
class TransportSequenceTests(TestCase):
"""
L{twisted.conch.recvline.TransportSequence}
"""
def test_invalidSequence(self):
"""
Initializing a L{recvline.TransportSequence} with no args
raises an assertion.
"""
self.assertRaises(AssertionError, recvline.TransportSequence)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
scripts/devstack_sanity_tests_with_setup.py | # Need to import path to test/fixtures and test/scripts/
# Ex : export PYTHONPATH='$PATH:/root/test/fixtures/:/root/test/scripts/'
#
# To run tests, you can do 'python sanity_tests.py'. To run specific tests,
# You can do 'python -m testtools.run -l tests'
# Set the env variable PARAMS_FILE to point to your ini file. Else it will try to pick params.ini in PWD
# Set the env variable SINGLE_NODE_IP if you are running a single node(No need to populate the json file)
#
import os
import time
import unittest
from tests import TestSanityFixture
from tests_with_setup import *
from encap_tests import *
from mx_test import *
from test_perms import *
from util import get_os_env
from NewPolicyTests import *
from policyTrafficTests import *
from sdn_policy_traffic_test_topo import *
from ParamTests import ParametrizedTestCase
from tcutils.contrailtestrunner import ContrailHTMLTestRunner
from performance.sanity_with_setup import PerformanceSanity
from servicechain.firewall.sanity_with_setup import SvcMonSanityFixture
from servicechain.mirror.sanity_with_setup import SvcMirrorSanityFixture
from ecmp.sanity_with_setup import ECMPSanityFixture
from securitygroup.sanity import SecurityGroupSanityTests
from discovery_tests_with_setup import TestDiscoveryFixture
from vdns.vdns_tests import TestVdnsFixture
from analytics_tests_with_setup import AnalyticsTestSanity
from vpc.sanity import VPCSanityTests
from vm_vn_tests import TestVMVN
from evpn.evpn_tests import *
if __name__ == "__main__":
if not get_os_env('SCRIPT_TS'):
os.environ['SCRIPT_TS'] = time.strftime("%Y_%m_%d_%H_%M_%S")
if 'PARAMS_FILE' in os.environ:
ini_file = os.environ.get('PARAMS_FILE')
else:
ini_file = 'params.ini'
inputs = ContrailTestInit(ini_file)
inputs.setUp()
print "\nTest Log File : %s" % (inputs.log_file)
suite = unittest.TestSuite()
test_result = unittest.TestResult()
suite.addTest(TestDiscoveryFixture(
'test_all_publishers_registered_to_discovery_service'))
suite.addTest(
TestDiscoveryFixture('test_agent_gets_control_nodes_from_discovery'))
suite.addTest(
TestDiscoveryFixture('test_control_nodes_subscribed_to_ifmap_service'))
suite.addTest(
TestDiscoveryFixture('test_agents_connected_to_collector_service'))
suite.addTest(TestSanity('test_vn_add_delete'))
suite.addTest(TestSanity('test_vm_add_delete'))
suite.addTest(SecurityGroupSanityTests('test_sec_group_add_delete'))
suite.addTest(SecurityGroupSanityTests('test_vm_with_sec_group'))
suite.addTest(TestSanity('test_floating_ip'))
suite.addTest(TestSanity('test_ping_within_vn'))
suite.addTest(TestSanity('test_ipam_add_delete'))
suite.addTest(TestSanityFixture('test_project_add_delete'))
suite.addTest(TestVMVN('test_vm_file_trf_tftp_tests'))
suite.addTest(TestVMVN('test_vm_file_trf_scp_tests'))
suite.addTest(TestSanity('test_ping_on_broadcast_multicast'))
suite.addTest(
TestSanity('test_ping_within_vn_two_vms_two_different_subnets'))
suite.addTest(TestMxSanityFixture('test_mx_gateway'))
suite.addTest(TestMxSanityFixture('test_change_of_rt_in_vn'))
suite.addTest(TestSanity('test_control_node_switchover'))
suite.addTest(
TestSanityFixture('test_process_restart_with_multiple_vn_vm'))
suite.addTest(TestSanityFixture('test_metadata_service'))
suite.addTest(TestSanityFixture('test_generic_link_local_service'))
suite.addTest(TestVdnsFixture('test_vdns_ping_same_vn'))
# suite.addTest(TestSanity('test_verify_generator_collector_connections'))
suite.addTest(AnalyticsTestSanity('test_verify_object_logs'))
suite.addTest(AnalyticsTestSanity('test_verify_flow_tables'))
'''policy support is missing from devstack.
Let one TC fail for now, will add other TCs after support is provided'''
suite.addTest(TestSanity('test_policy_to_deny'))
''' policy support missing from devstack
suite.addTest(TestSanity('test_policy_to_deny'))
suite.addTest(TestSanity('test_remove_policy_with_ref'))
suite.addTest(NewPolicyTestFixture('test_policy'))
suite.addTest(NewPolicyTestFixture('test_policy_modify_vn_policy'))
suite.addTest(NewPolicyTestFixture('test_repeated_policy_modify'))
suite.addTest(policyTrafficTestFixture('test_multi_vn_repeated_policy_update_with_ping'))
suite.addTest(TestSanity('test_process_restart_in_policy_between_vns'))
suite.addTest(SvcMonSanityFixture('test_svc_monitor_datapath'))
suite.addTest(SvcMonSanityFixture('test_svc_in_network_datapath'))
suite.addTest(SvcMonSanityFixture('test_svc_transparent_with_3_instance'))
suite.addTest(SvcMirrorSanityFixture('test_svc_mirroring'))
suite.addTest(TestEncapsulation('test_apply_policy_fip_on_same_vn_gw_mx'))
# euca commands are missing on devstack (euca-create-vpc/subnet etc)
# failure is aborting the script, so not including any TCs.
suite.addTest(VPCSanityTests('test_create_delete_vpc'))
suite.addTest(VPCSanityTests('test_subnet_create_delete'))
suite.addTest(VPCSanityTests('test_ping_between_instances'))
suite.addTest(VPCSanityTests('test_acl_with_association'))
suite.addTest(VPCSanityTests('test_security_group'))
suite.addTest(VPCSanityTests('test_allocate_floating_ip'))
# has reference to policy commands.
suite.addTest(ECMPSanityFixture('test_ecmp_svc_in_network_with_static_route'))
suite.addTest(ECMPSanityFixture('test_ecmp_svc_in_network_nat_with_3_instance'))
suite.addTest(ECMPSanityFixture('test_ecmp_svc_transparent_with_3_instance'))
'''
if inputs.multi_tenancy == 'True':
suite.addTest(TestPerms('test_all'))
suite.addTest(PerformanceSanity('test_netperf_within_vn'))
# suite.addTest(TestEvpnCases('test_with_vxlan_l2_mode'))
# suite.addTest(TestEvpnCases('test_with_vxlan_encap_agent_restart'))
descr = inputs.get_html_description()
if inputs.generate_html_report:
buf = open(inputs.html_report, 'w')
runner = ContrailHTMLTestRunner(
stream=buf,
title='%s Result %s' % (
inputs.log_scenario, inputs.build_id),
description=descr
)
test_result = runner.run(suite)
buf.close()
print "Test HTML Result : %s " % (inputs.html_report)
inputs.upload_results()
file_to_send = inputs.html_report
else:
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
file_to_send = inputs.log_file
inputs.log_any_issues(test_result)
inputs.send_mail(file_to_send)
print "\nTest Log File : %s" % (inputs.log_file)
| [] | [] | [
"PARAMS_FILE",
"SCRIPT_TS"
] | [] | ["PARAMS_FILE", "SCRIPT_TS"] | python | 2 | 0 | |
main.py | import telebot
import datetime
import os
import telegram
from flask import Flask, request
from random import randint
import codecs
TOKEN = ':'
bot = telebot.TeleBot(TOKEN)
COMPLIMENTS = ["лучший", "ТОП", "ТОПЧИК", "огонь", "красавчик", "мыло", "ЛОСК", "классный", "садись на бутылку", "шутник"]
NAMES = ["Миша", "Бодя", "Люся", "Варя", "Егор", "Артём", "Даня", "Саня", "Дима", "Женя", "Андрей", "Макс", "Ваня", "Маша", "Наташа", "Настя", "ЛОСК", "ЛОСК"
,"Иванка", "Серёга"]
WORDS = ["че че","ты че э", "бе да ме", "ща на бутылку посажу", "копать лопать", "ха, а ты смИшной", "Пац, ты не понял?", "Ой ты милаха)", "Цемик", "Лайк и подписочка", "Лайк", "Просто киця"]
GREETS = ["Приветули", "Утро утро добро", "Дароу", "Привет.", "ДАРОВА", "Хай", "А на зарядку?", "Доброе)", "Привет)", "Приветик)"]
MEMES = []
for _ in range(1,30):
MEMES.append( "Memes/"+ str(_) + ".jpg")
CUTES = ["Ты чудо)", "У тебя все выйдет", "Уряяя", "Так держать!", "Поднять щиты!!!", "Ты готов", "Если не ты, то кто?", "Герой", "3 богатыря с тобой", "Оленей больше", "Спасибо что живой"]
QUOTES = []
CUTE_WORDS = dict()
for _ in range(32):
CUTE_WORDS[_] = []
letter = 0
with open("cute_words.txt","r", encoding='utf-8') as f:
for line in f:
# print(line)
# print(len(line))
if len(line) == 2 and line != '\n':
#print('$'*40)
# print("--->" + line + "<------")
letter = ord(line[0])
elif len(line) > 1 and letter != 0:
#print(ord(letter))
#print("----- "+ str(letter))
CUTE_WORDS[letter - 1040].append(line)
with open("support.txt",encoding='utf-8') as f:
for line in f:
ct = line.find("»")
QUOTES.append(line[:ct]+ "<i>©" + line[ct+1:] + "</i>")
global_bots = 0
TIMES_WAKE_UP = 1
boolVAR = True
print(QUOTES)
print(CUTE_WORDS)
server = Flask(__name__)
@server.route('/' + TOKEN, methods=['POST'])
def getMessage():
bot.process_new_updates([telebot.types.Update.de_json(request.stream.read().decode("utf-8"))])
return "!", 200
@server.route("/")
def webhook():
bot.remove_webhook()
bot.set_webhook(url='https://evening-tor-60826.herokuapp.com/' + TOKEN)
return "!", 200
def print2(times, msg):
with open("logs.txt", "a+") as f:
f.write(msg)
f.write(str(times))
print(msg)
print(times)
@bot.message_handler(content_types=['text'])
def get_text_messages(message):
global global_bots
global TIMES_WAKE_UP
global boolVAR
global NAMES
global COMPLIMENTS
global GREETS
global MEMES
global CUTES
global CUTE_WORDS
global QUOTES
global_bots +=1
if message.text == "Цитата":
ct = QUOTES[randint(0,len(QUOTES) - 1)].find("»")
bot.send_message(message.chat.id,
QUOTES[randint(0,len(QUOTES) - 1)],
parse_mode = telegram.ParseMode.HTML
)
if message.text.find("Милость") >= 0 and len(message.text) >= 8:
#print(str(CUTE_WORDS))
try:
getLetter = message.text[message.text.find("Милость") + 7 + 1]
print(message.text)
print(getLetter)
if getLetter != '':
bot.send_message(message.chat.id,
"<i>"+CUTE_WORDS[ord(getLetter) - 1072][randint(0,len(CUTE_WORDS[ord(getLetter) - 1072]) - 1)] + "</i>",
parse_mode=telegram.ParseMode.HTML
)
except ValueError as ve:
print(ve)
if message.text == "Рота":
s = bot.send_message(message.chat.id,
"<b>подъем</b>",
parse_mode=telegram.ParseMode.HTML)
#bot.send_message("435112571", str(s))
bot.pin_chat_message(message.chat.id, s.message_id)
if message.text == "Ну че":
bot.send_message(message.chat.id, WORDS[randint(0, len(WORDS) - 1)])
if message.text == "Привет" or message.text == "привет" or message.text == "Прив":
bot.send_message(message.chat.id, GREETS[randint(0, len(GREETS) - 1)])
if message.text == "Хочу милости" or message.text == "Милость" or message.text == "Поддержки дай" or message.text == "Поддержи":
t = randint(0,2)
if t != 0:
ch = randint(0,28)
cnt = randint(1,3)
if cnt == 1:
bot.send_message(message.chat.id,
"<b>" + CUTE_WORDS[ch][randint(0, len(CUTE_WORDS[ch]) - 1)] + "</b>",
parse_mode=telegram.ParseMode.HTML
)
elif cnt == 2:
bot.send_message(message.chat.id,
"<i>" + CUTE_WORDS[ch][randint(0, len(CUTE_WORDS[ch]) - 1)] + "</i>",
parse_mode = telegram.ParseMode.HTML
)
elif cnt == 3:
bot.send_message(message.chat.id, CUTE_WORDS[ch][randint(0, len(CUTE_WORDS[ch]) - 1)])
else:
bot.send_message(message.chat.id, CUTES[randint(0, len(CUTES) - 1)])
if message.text == "Хочу мем" or message.text == "мем":
if (len(MEMES) == 0):
bot.send_message(message.chat.id, "эх, пусто...")
else:
photo = open(MEMES[randint(0, len(MEMES) - 1)], 'rb')
bot.send_photo(message.chat.id, photo)
if message.text == "на лицо пжлста":
photo = open('food.jpg', 'rb')
bot.send_photo(message.chat.id, photo)
# now = datetime.datetime.now()
# bot.send_message(message.chat.id, str(now.minute))
cnt_wake_up_0 = 1
cnt_wake_up_1 = 0
print(str(global_bots) + " ----BOTS ")
if message.text == "/help":
bot.send_message(message.chat.id,
"*Что можно?* \n1. мем\n2. на лицо пжлста\n3. Ну че\n4. Привет \n5. Прив \n6. Поддержи \n7. Милость \n8. Хочу милости\n9. Поддержки дай \n10. Хочу мем \n 11. Рота \n12. Милость [любую букву алфавита(пример: Милость м)] \n13. Цитатa \n За любыми идеями писать @MikeVernik")
if message.text == "/start" and boolVAR:
i = 0
# bot.delete_message(message.chat.id, message.message_id);
times = [1,1,1,1,1,1,1,1,1,0,0,0]
boolVAR = False
boolT = True
timeBool = True
try:
while True:
now = datetime.datetime.now()
if (int(now.minute) + 1) % 20 == 0:
boolT = True
if int(now.second) % 55 == 0 and timeBool == True:
bot.send_message("435112571", str(global_bots) + ": ----BOTS \n Time: " + str(now))
print(str(global_bots) + ": ----BOTS \n Time: " + str(now))
with open("logs.txt", "a+") as f:
f.write(str(global_bots) + ": ----BOTS \n Time: " + str(now))
print("HAS WRITTEN")
print(times)
f.write(str(times))
timeBool = False
if int(now.second) % 55 != 0 and timeBool == False:
timeBool = True
#bot.send_message(message.chat.id, str(now.hour))
# if (int(now.hour) + 3) % 24 > 7 and int(now.minute) % 50 == 0 and boolT == True and int(now.hour) % 2 == 0:
# print("COMPLIMENTS")
# boolT = False
# bot.send_message(message.chat.id,
# NAMES[randint(0, len(NAMES))] + " " + COMPLIMENTS[randint(0, len(COMPLIMENTS))])
# bot.send_message(message.chat.id,
# NAMES[randint(0, len(NAMES))] + " " + COMPLIMENTS[randint(0, len(COMPLIMENTS))])
# bot.send_message(message.chat.id,
# NAMES[randint(0, len(NAMES))] + " " + COMPLIMENTS[randint(0, len(COMPLIMENTS))])
#
if (int(now.hour) + 3) % 24 == 23 and int(now.minute) == 0 and times[0] == 1:
print("WAKE UP")
for _ in range(TIMES_WAKE_UP):
bot.send_message(message.chat.id, "РОТА ОТБОЙ!")
print2(times, "BEFORE: ")
times[0] = 0
times[1] = 1
print2(times, "AFTER: ")
if (int(now.hour) + 3) % 24 == 7 and int(now.minute) == 50 and times[1] == 1:
print("GO TO BED")
for _ in range(TIMES_WAKE_UP):
bot.send_message(message.chat.id, "РОТА ПОДЪЕМ!")
print2(times, "BEFORE: ")
times[1] = 0
times[2] = 1
print2(times, "AFTER: ")
if (int(now.hour) + 3) % 24 == 9 and int(now.minute) == 0 and times[2] == 1:
print("MORNING FOOD")
for _ in range(TIMES_WAKE_UP):
bot.send_message(message.chat.id, "Утренняя ХАВКА!")
print2(times, "BEFORE: ")
times[2] = 0
times[3] = 1
print2(times, "AFTER: ")
photo = open('food.jpg', 'rb')
bot.send_photo(message.chat.id, photo)
if (int(now.hour) + 3) % 24 == 13 and int(now.minute) == 30 and times[3] == 1:
print("DAILY FOOD")
for _ in range(TIMES_WAKE_UP):
bot.send_message(message.chat.id, "Дневная ХАВКА!")
print2(times, "BEFORE: ")
times[3] = 0
times[4] = 1
print2(times, "AFTER: ")
photo = open('food.jpg', 'rb')
bot.send_photo(message.chat.id, photo)
if (int(now.hour) + 3) % 24 == 16 and int(now.minute) == 30 and times[4] == 1:
print("NOT ENOUGH FOOD")
for _ in range(TIMES_WAKE_UP):
bot.send_message(message.chat.id, "Недо ХАВКА!")
times[4] = 0
times[5] = 1
photo = open('food.jpg', 'rb')
bot.send_photo(message.chat.id, photo)
if (int(now.hour) + 3) % 24 == 19 and int(now.minute) == 0 and times[5] == 1:
print("EVENING FOOD")
for _ in range(TIMES_WAKE_UP):
bot.send_message(message.chat.id, "Вечерняя ХАВКА!")
print2(times, "BEFORE: ")
times[5] = 0
times[6] = 1
print2(times, "AFTER: ")
photo = open('food.jpg', 'rb')
bot.send_photo(message.chat.id, photo)
if (int(now.hour) + 3) % 24 == 21 and int(now.minute) == 0 and times[6] == 1:
print("VERY EVENING FOOD")
for _ in range(TIMES_WAKE_UP):
bot.send_message(message.chat.id, "Очень вечерняя ХАВКА!")
print2(times, "BEFORE: ")
times[6] = 0
times[0] = 1
print2(times, "AFTER: ")
photo = open('food.jpg', 'rb')
bot.send_photo(message.chat.id, photo)
except:
s = bot.send_message(message.chat.id, "/start")
if __name__ == '__main__':
# print()
server.debug = False
server.run(host="0.0.0.0", port=int(os.environ.get('PORT', 5000)))
bot.polling(none_stop=True, interval=0)
| [] | [] | [
"PORT"
] | [] | ["PORT"] | python | 1 | 0 | |
cmd/root.go | /*
Copyright © 2020 Singee <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"bytes"
"fmt"
"github.com/spf13/cobra"
"os"
"text/template"
"github.com/spf13/viper"
)
var cfgFile string
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "generate",
Short: "A code generator for go",
Long: `Please use 'go:generate' to run this app.
Add this to your source file (struct_name.go, for example):
// go:generate generate getter -t StructName
And run this:
go generate
Then a file "struct_name_getter.go" will be generated
`,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
_ = os.Chdir(viper.GetString("workdir"))
if viper.GetBool("debug") {
info, err := GetBasicInfo()
if err != nil {
return err
} else {
fmt.Println(info)
}
}
return nil
},
PersistentPostRun: func(cmd *cobra.Command, args []string) {
fmt.Println("Done!")
},
}
func GetBasicInfo() (string, error) {
const outputTemplate = `Environments:
$GOARCH: {{ .GOARCH }}
The execution architecture (arm, amd64, etc.)
$GOOS: {{ .GOOS }}
The execution operating system (linux, windows, etc.)
$GOFILE: {{ .GOFILE }}
The base name of the file.
$GOLINE: {{ .GOLINE }}
The line number of the directive in the source file.
$GOPACKAGE: {{ .GOPACKAGE }}
The name of the package of the file containing the directive.
Working Directory:
{{ .WORKDIR }}
Command Args:
{{- range $i, $arg := .ARGS }}
[{{ $i }}] {{ $arg -}}
{{ end }}
Configs:
{{- range $key, $value := .VIPER }}
[{{ $key }}]: {{ $value -}}
{{ end }}
`
t, err := template.New("").Parse(outputTemplate)
if err != nil {
return "", err
}
buf := bytes.NewBufferString("")
wd, err := os.Getwd()
if err != nil {
return "", err
}
err = t.Execute(buf, map[string]interface{}{
"GOARCH": os.Getenv("GOARCH"),
"GOOS": os.Getenv("GOOS"),
"GOFILE": os.Getenv("GOFILE"),
"GOLINE": os.Getenv("GOLINE"),
"GOPACKAGE": os.Getenv("GOPACKAGE"),
"WORKDIR": wd,
"ARGS": os.Args,
"VIPER": viper.AllSettings(),
})
s := buf.String()
return s, nil
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func init() {
rootCmd.PersistentFlags().StringP("gofile", "", "", "mock Environment value")
rootCmd.PersistentFlags().StringP("gopackage", "", "", "mock Environment value")
rootCmd.PersistentFlags().StringP("workdir", "w", ".", "work directory")
rootCmd.PersistentFlags().StringP("filename", "", "{{ $.struct.LowerName }}_{{ $.type }}.go", "")
rootCmd.PersistentFlags().BoolP("debug", "", false, "debug mode")
_ = viper.BindPFlags(rootCmd.PersistentFlags())
_ = viper.BindEnv("GOARCH")
_ = viper.BindEnv("GOOS")
_ = viper.BindEnv("GOFILE")
_ = viper.BindEnv("GOLINE")
_ = viper.BindEnv("GOPACKAGE")
}
| [
"\"GOARCH\"",
"\"GOOS\"",
"\"GOFILE\"",
"\"GOLINE\"",
"\"GOPACKAGE\""
] | [] | [
"GOLINE",
"GOFILE",
"GOPACKAGE",
"GOARCH",
"GOOS"
] | [] | ["GOLINE", "GOFILE", "GOPACKAGE", "GOARCH", "GOOS"] | go | 5 | 0 | |
tests/providers/hashicorp/secrets/test_vault.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import TestCase, mock
from hvac.exceptions import InvalidPath, VaultError
from airflow.providers.hashicorp.secrets.vault import VaultBackend
class TestVaultSecrets(TestCase):
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_conn_uri(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.secrets.kv.v2.read_secret_version.return_value = {
'request_id': '94011e25-f8dc-ec29-221b-1f9c1d9ad2ae',
'lease_id': '',
'renewable': False,
'lease_duration': 0,
'data': {
'data': {'conn_uri': 'postgresql://airflow:airflow@host:5432/airflow'},
'metadata': {
'created_time': '2020-03-16T21:01:43.331126Z',
'deletion_time': '',
'destroyed': False,
'version': 1,
},
},
'wrap_info': None,
'warnings': None,
'auth': None,
}
kwargs = {
"connections_path": "connections",
"mount_point": "airflow",
"auth_type": "token",
"url": "http://127.0.0.1:8200",
"token": "s.7AU0I51yv1Q1lxOIg1F3ZRAS",
}
test_client = VaultBackend(**kwargs)
returned_uri = test_client.get_conn_uri(conn_id="test_postgres")
self.assertEqual('postgresql://airflow:airflow@host:5432/airflow', returned_uri)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_conn_uri_engine_version_1(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.secrets.kv.v1.read_secret.return_value = {
'request_id': '182d0673-618c-9889-4cba-4e1f4cfe4b4b',
'lease_id': '',
'renewable': False,
'lease_duration': 2764800,
'data': {'conn_uri': 'postgresql://airflow:airflow@host:5432/airflow'},
'wrap_info': None,
'warnings': None,
'auth': None,
}
kwargs = {
"connections_path": "connections",
"mount_point": "airflow",
"auth_type": "token",
"url": "http://127.0.0.1:8200",
"token": "s.7AU0I51yv1Q1lxOIg1F3ZRAS",
"kv_engine_version": 1,
}
test_client = VaultBackend(**kwargs)
returned_uri = test_client.get_conn_uri(conn_id="test_postgres")
mock_client.secrets.kv.v1.read_secret.assert_called_once_with(
mount_point='airflow', path='connections/test_postgres'
)
self.assertEqual('postgresql://airflow:airflow@host:5432/airflow', returned_uri)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_conn_uri_engine_version_1_custom_auth_mount_point(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.secrets.kv.v1.read_secret.return_value = {
'request_id': '182d0673-618c-9889-4cba-4e1f4cfe4b4b',
'lease_id': '',
'renewable': False,
'lease_duration': 2764800,
'data': {'conn_uri': 'postgresql://airflow:airflow@host:5432/airflow'},
'wrap_info': None,
'warnings': None,
'auth': None,
}
kwargs = {
"connections_path": "connections",
"mount_point": "airflow",
"auth_mount_point": "custom",
"auth_type": "token",
"url": "http://127.0.0.1:8200",
"token": "s.7AU0I51yv1Q1lxOIg1F3ZRAS",
"kv_engine_version": 1,
}
test_client = VaultBackend(**kwargs)
self.assertEqual("custom", test_client.vault_client.auth_mount_point)
returned_uri = test_client.get_conn_uri(conn_id="test_postgres")
mock_client.secrets.kv.v1.read_secret.assert_called_once_with(
mount_point='airflow', path='connections/test_postgres'
)
self.assertEqual('postgresql://airflow:airflow@host:5432/airflow', returned_uri)
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_MYSQL': 'mysql://airflow:airflow@host:5432/airflow',
},
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_conn_uri_non_existent_key(self, mock_hvac):
"""
Test that if the key with connection ID is not present in Vault, _VaultClient.get_connections
should return None
"""
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
# Response does not contain the requested key
mock_client.secrets.kv.v2.read_secret_version.side_effect = InvalidPath()
kwargs = {
"connections_path": "connections",
"mount_point": "airflow",
"auth_type": "token",
"url": "http://127.0.0.1:8200",
"token": "s.7AU0I51yv1Q1lxOIg1F3ZRAS",
}
test_client = VaultBackend(**kwargs)
self.assertIsNone(test_client.get_conn_uri(conn_id="test_mysql"))
mock_client.secrets.kv.v2.read_secret_version.assert_called_once_with(
mount_point='airflow', path='connections/test_mysql', version=None
)
self.assertEqual([], test_client.get_connections(conn_id="test_mysql"))
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_variable_value(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.secrets.kv.v2.read_secret_version.return_value = {
'request_id': '2d48a2ad-6bcb-e5b6-429d-da35fdf31f56',
'lease_id': '',
'renewable': False,
'lease_duration': 0,
'data': {
'data': {'value': 'world'},
'metadata': {
'created_time': '2020-03-28T02:10:54.301784Z',
'deletion_time': '',
'destroyed': False,
'version': 1,
},
},
'wrap_info': None,
'warnings': None,
'auth': None,
}
kwargs = {
"variables_path": "variables",
"mount_point": "airflow",
"auth_type": "token",
"url": "http://127.0.0.1:8200",
"token": "s.7AU0I51yv1Q1lxOIg1F3ZRAS",
}
test_client = VaultBackend(**kwargs)
returned_uri = test_client.get_variable("hello")
self.assertEqual('world', returned_uri)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_variable_value_engine_version_1(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.secrets.kv.v1.read_secret.return_value = {
'request_id': '182d0673-618c-9889-4cba-4e1f4cfe4b4b',
'lease_id': '',
'renewable': False,
'lease_duration': 2764800,
'data': {'value': 'world'},
'wrap_info': None,
'warnings': None,
'auth': None,
}
kwargs = {
"variables_path": "variables",
"mount_point": "airflow",
"auth_type": "token",
"url": "http://127.0.0.1:8200",
"token": "s.7AU0I51yv1Q1lxOIg1F3ZRAS",
"kv_engine_version": 1,
}
test_client = VaultBackend(**kwargs)
returned_uri = test_client.get_variable("hello")
mock_client.secrets.kv.v1.read_secret.assert_called_once_with(
mount_point='airflow', path='variables/hello'
)
self.assertEqual('world', returned_uri)
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_VAR_HELLO': 'world',
},
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_variable_value_non_existent_key(self, mock_hvac):
"""
Test that if the key with connection ID is not present in Vault, _VaultClient.get_connections
should return None
"""
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
# Response does not contain the requested key
mock_client.secrets.kv.v2.read_secret_version.side_effect = InvalidPath()
kwargs = {
"variables_path": "variables",
"mount_point": "airflow",
"auth_type": "token",
"url": "http://127.0.0.1:8200",
"token": "s.7AU0I51yv1Q1lxOIg1F3ZRAS",
}
test_client = VaultBackend(**kwargs)
self.assertIsNone(test_client.get_variable("hello"))
mock_client.secrets.kv.v2.read_secret_version.assert_called_once_with(
mount_point='airflow', path='variables/hello', version=None
)
self.assertIsNone(test_client.get_variable("hello"))
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_auth_failure_raises_error(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.is_authenticated.return_value = False
kwargs = {
"connections_path": "connections",
"mount_point": "airflow",
"auth_type": "token",
"url": "http://127.0.0.1:8200",
"token": "test_wrong_token",
}
with self.assertRaisesRegex(VaultError, "Vault Authentication Error!"):
VaultBackend(**kwargs).get_connections(conn_id='test')
def test_auth_type_kubernetes_with_unreadable_jwt_raises_error(self):
path = "/var/tmp/this_does_not_exist/334e918ef11987d3ef2f9553458ea09f"
kwargs = {
"auth_type": "kubernetes",
"kubernetes_role": "default",
"kubernetes_jwt_path": path,
"url": "http://127.0.0.1:8200",
}
with self.assertRaisesRegex(FileNotFoundError, path):
VaultBackend(**kwargs).get_connections(conn_id='test')
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_config_value(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.secrets.kv.v2.read_secret_version.return_value = {
'request_id': '2d48a2ad-6bcb-e5b6-429d-da35fdf31f56',
'lease_id': '',
'renewable': False,
'lease_duration': 0,
'data': {
'data': {'value': 'sqlite:////Users/airflow/airflow/airflow.db'},
'metadata': {
'created_time': '2020-03-28T02:10:54.301784Z',
'deletion_time': '',
'destroyed': False,
'version': 1,
},
},
'wrap_info': None,
'warnings': None,
'auth': None,
}
kwargs = {
"configs_path": "configurations",
"mount_point": "secret",
"auth_type": "token",
"url": "http://127.0.0.1:8200",
"token": "s.FnL7qg0YnHZDpf4zKKuFy0UK",
}
test_client = VaultBackend(**kwargs)
returned_uri = test_client.get_config("sql_alchemy_conn")
self.assertEqual('sqlite:////Users/airflow/airflow/airflow.db', returned_uri)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_connections_path_none_value(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
kwargs = {
"connections_path": None,
"mount_point": "airflow",
"auth_type": "token",
"url": "http://127.0.0.1:8200",
"token": "s.FnL7qg0YnHZDpf4zKKuFy0UK",
}
test_client = VaultBackend(**kwargs)
self.assertIsNone(test_client.get_conn_uri(conn_id="test"))
mock_hvac.Client.assert_not_called()
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_variables_path_none_value(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
kwargs = {
"variables_path": None,
"mount_point": "airflow",
"auth_type": "token",
"url": "http://127.0.0.1:8200",
"token": "s.FnL7qg0YnHZDpf4zKKuFy0UK",
}
test_client = VaultBackend(**kwargs)
self.assertIsNone(test_client.get_variable("hello"))
mock_hvac.Client.assert_not_called()
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_config_path_none_value(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
kwargs = {
"config_path": None,
"mount_point": "airflow",
"auth_type": "token",
"url": "http://127.0.0.1:8200",
"token": "s.FnL7qg0YnHZDpf4zKKuFy0UK",
}
test_client = VaultBackend(**kwargs)
self.assertIsNone(test_client.get_config("test"))
mock_hvac.Client.assert_not_called()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
security_groups/running_security_groups.go | package security_groups_test
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strings"
. "github.com/cloudfoundry/cf-acceptance-tests/cats_suite_helpers"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gbytes"
. "github.com/onsi/gomega/gexec"
"github.com/cloudfoundry-incubator/cf-test-helpers/cf"
"github.com/cloudfoundry-incubator/cf-test-helpers/helpers"
"github.com/cloudfoundry-incubator/cf-test-helpers/workflowhelpers"
"github.com/cloudfoundry/cf-acceptance-tests/helpers/app_helpers"
"github.com/cloudfoundry/cf-acceptance-tests/helpers/assets"
"github.com/cloudfoundry/cf-acceptance-tests/helpers/logs"
"github.com/cloudfoundry/cf-acceptance-tests/helpers/random_name"
"github.com/cloudfoundry/cf-acceptance-tests/helpers/skip_messages"
)
type AppsResponse struct {
Resources []struct {
Metadata struct {
Url string
}
}
}
type StatsResponse map[string]struct {
Stats struct {
Host string
Port int
}
}
type CatnipCurlResponse struct {
Stdout string
Stderr string
ReturnCode int `json:"return_code"`
}
func pushApp(appName, buildpack string) {
Expect(cf.Cf("push",
appName,
"--no-start",
"-b", buildpack,
"-m", DEFAULT_MEMORY_LIMIT,
"-p", assets.NewAssets().Catnip,
"-c", "./catnip",
"-d", Config.GetAppsDomain()).Wait(Config.DefaultTimeoutDuration())).To(Exit(0))
}
func getAppHostIpAndPort(appName string) (string, int) {
var appsResponse AppsResponse
cfResponse := cf.Cf("curl", fmt.Sprintf("/v2/apps?q=name:%s", appName)).Wait(Config.DefaultTimeoutDuration()).Out.Contents()
json.Unmarshal(cfResponse, &appsResponse)
serverAppUrl := appsResponse.Resources[0].Metadata.Url
var statsResponse StatsResponse
cfResponse = cf.Cf("curl", fmt.Sprintf("%s/stats", serverAppUrl)).Wait(Config.DefaultTimeoutDuration()).Out.Contents()
json.Unmarshal(cfResponse, &statsResponse)
return statsResponse["0"].Stats.Host, statsResponse["0"].Stats.Port
}
func testAppConnectivity(clientAppName string, privateHost string, privatePort int) CatnipCurlResponse {
var catnipCurlResponse CatnipCurlResponse
curlResponse := helpers.CurlApp(Config, clientAppName, fmt.Sprintf("/curl/%s/%d", privateHost, privatePort))
json.Unmarshal([]byte(curlResponse), &catnipCurlResponse)
return catnipCurlResponse
}
func getAppContainerIpAndPort(appName string) (string, int) {
curlResponse := helpers.CurlApp(Config, appName, "/myip")
containerIp := strings.TrimSpace(curlResponse)
curlResponse = helpers.CurlApp(Config, appName, "/env/VCAP_APPLICATION")
var env map[string]interface{}
err := json.Unmarshal([]byte(curlResponse), &env)
Expect(err).NotTo(HaveOccurred())
containerPort := int(env["port"].(float64))
return containerIp, containerPort
}
type Destination struct {
IP string `json:"destination"`
Port int `json:"ports,string,omitempty"`
Protocol string `json:"protocol"`
}
func createSecurityGroup(allowedDestinations ...Destination) string {
file, _ := ioutil.TempFile(os.TempDir(), "CATS-sg-rules")
defer os.Remove(file.Name())
Expect(json.NewEncoder(file).Encode(allowedDestinations)).To(Succeed())
rulesPath := file.Name()
securityGroupName := random_name.CATSRandomName("SG")
workflowhelpers.AsUser(TestSetup.AdminUserContext(), Config.DefaultTimeoutDuration(), func() {
Expect(cf.Cf("create-security-group", securityGroupName, rulesPath).Wait(Config.DefaultTimeoutDuration())).To(Exit(0))
})
return securityGroupName
}
func bindSecurityGroup(securityGroupName, orgName, spaceName string) {
By("Applying security group")
workflowhelpers.AsUser(TestSetup.AdminUserContext(), Config.DefaultTimeoutDuration(), func() {
Expect(cf.Cf("bind-security-group", securityGroupName, orgName, spaceName).Wait(Config.DefaultTimeoutDuration())).To(Exit(0))
})
}
func unbindSecurityGroup(securityGroupName, orgName, spaceName string) {
By("Unapplying security group")
workflowhelpers.AsUser(TestSetup.AdminUserContext(), Config.DefaultTimeoutDuration(), func() {
Expect(cf.Cf("unbind-security-group", securityGroupName, orgName, spaceName).Wait(Config.DefaultTimeoutDuration())).To(Exit(0))
})
}
func deleteSecurityGroup(securityGroupName string) {
workflowhelpers.AsUser(TestSetup.AdminUserContext(), Config.DefaultTimeoutDuration(), func() {
Expect(cf.Cf("delete-security-group", securityGroupName, "-f").Wait(Config.DefaultTimeoutDuration())).To(Exit(0))
})
}
func createDummyBuildpack() string {
buildpack := random_name.CATSRandomName("BPK")
buildpackZip := assets.NewAssets().SecurityGroupBuildpack
workflowhelpers.AsUser(TestSetup.AdminUserContext(), Config.DefaultTimeoutDuration(), func() {
Expect(cf.Cf("create-buildpack", buildpack, buildpackZip, "999").Wait(Config.DefaultTimeoutDuration())).To(Exit(0))
})
return buildpack
}
func deleteBuildpack(buildpack string) {
workflowhelpers.AsUser(TestSetup.AdminUserContext(), Config.DefaultTimeoutDuration(), func() {
Expect(cf.Cf("delete-buildpack", buildpack, "-f").Wait(Config.DefaultTimeoutDuration())).To(Exit(0))
})
}
func getStagingOutput(appName string) func() *Session {
return func() *Session {
appLogsSession := logs.Tail(Config.GetUseLogCache(), appName)
Expect(appLogsSession.Wait(Config.DefaultTimeoutDuration())).To(Exit(0))
return appLogsSession
}
}
func pushServerApp() (serverAppName string, privateHost string, privatePort int) {
serverAppName = random_name.CATSRandomName("APP")
pushApp(serverAppName, Config.GetBinaryBuildpackName())
Expect(cf.Cf("start", serverAppName).Wait(Config.CfPushTimeoutDuration())).To(Exit(0))
privateHost, privatePort = getAppHostIpAndPort(serverAppName)
return
}
func pushClientApp() (clientAppName string) {
clientAppName = random_name.CATSRandomName("APP")
pushApp(clientAppName, Config.GetBinaryBuildpackName())
Expect(cf.Cf("start", clientAppName).Wait(Config.CfPushTimeoutDuration())).To(Exit(0))
return
}
func assertNetworkingPreconditions(clientAppName string, privateHost string, privatePort int) {
By("Asserting default running security group configuration for traffic between containers")
catnipCurlResponse := testAppConnectivity(clientAppName, privateHost, privatePort)
Expect(catnipCurlResponse.ReturnCode).NotTo(Equal(0), "Expected default running security groups not to allow internal communication between app containers. Configure your running security groups to not allow traffic on internal networks, or disable this test by setting 'include_security_groups' to 'false' in '"+os.Getenv("CONFIG")+"'.")
By("Asserting default running security group configuration from a running container to an external destination")
catnipCurlResponse = testAppConnectivity(clientAppName, "www.google.com", 80)
Expect(catnipCurlResponse.ReturnCode).To(Equal(0), "Expected default running security groups to allow external traffic from app containers. Configure your running security groups to not allow traffic on internal networks, or disable this test by setting 'include_security_groups' to 'false' in '"+os.Getenv("CONFIG")+"'.")
}
var _ = SecurityGroupsDescribe("App Instance Networking", func() {
var serverAppName, privateHost string
var privatePort int
Describe("Using container-networking and running security-groups", func() {
var serverAppName, clientAppName, privateHost, orgName, spaceName, securityGroupName string
var privatePort int
BeforeEach(func() {
if !Config.GetIncludeContainerNetworking() {
Skip(skip_messages.SkipContainerNetworkingMessage)
}
orgName = TestSetup.RegularUserContext().Org
spaceName = TestSetup.RegularUserContext().Space
serverAppName, privateHost, privatePort = pushServerApp()
clientAppName = pushClientApp()
assertNetworkingPreconditions(clientAppName, privateHost, privatePort)
})
AfterEach(func() {
app_helpers.AppReport(serverAppName, Config.DefaultTimeoutDuration())
Expect(cf.Cf("delete", serverAppName, "-f", "-r").Wait(Config.CfPushTimeoutDuration())).To(Exit(0))
app_helpers.AppReport(clientAppName, Config.DefaultTimeoutDuration())
Expect(cf.Cf("delete", clientAppName, "-f", "-r").Wait(Config.CfPushTimeoutDuration())).To(Exit(0))
deleteSecurityGroup(securityGroupName)
})
It("correctly configures asgs and c2c policy independent of each other", func() {
By("creating a wide-open ASG")
dest := Destination{
IP: "0.0.0.0/0", // some random IP that isn't covered by an existing Security Group rule
Protocol: "all",
}
securityGroupName = createSecurityGroup(dest)
privateAddress := Config.GetUnallocatedIPForSecurityGroup()
By("binding new security group")
bindSecurityGroup(securityGroupName, orgName, spaceName)
Expect(cf.Cf("restart", clientAppName).Wait(Config.CfPushTimeoutDuration())).To(Exit(0))
By("Testing that client app cannot connect to the server app using the overlay")
containerIp, containerPort := getAppContainerIpAndPort(serverAppName)
catnipCurlResponse := testAppConnectivity(clientAppName, containerIp, containerPort)
Expect(catnipCurlResponse.ReturnCode).NotTo(Equal(0), "no policy configured but client app can talk to server app using overlay")
By("Testing that external connectivity to a private ip is not refused (but may be unreachable for other reasons)")
catnipCurlResponse = testAppConnectivity(clientAppName, privateAddress, 80)
Expect(catnipCurlResponse.Stderr).To(ContainSubstring("Connection timed out after"), "wide-open ASG configured but app is still refused by private ip")
By("adding policy")
workflowhelpers.AsUser(TestSetup.AdminUserContext(), Config.DefaultTimeoutDuration(), func() {
Expect(cf.Cf("target", "-o", orgName, "-s", spaceName).Wait(Config.DefaultTimeoutDuration())).To(Exit(0))
Expect(string(cf.Cf("network-policies").Wait(Config.DefaultTimeoutDuration()).Out.Contents())).ToNot(ContainSubstring(serverAppName))
Expect(cf.Cf("add-network-policy", clientAppName, "--destination-app", serverAppName, "--port", fmt.Sprintf("%d", containerPort), "--protocol", "tcp").Wait(Config.CfPushTimeoutDuration())).To(Exit(0))
Expect(string(cf.Cf("network-policies").Wait(Config.DefaultTimeoutDuration()).Out.Contents())).To(ContainSubstring(serverAppName))
})
By("Testing that client app can connect to server app using the overlay")
Eventually(func() int {
catnipCurlResponse = testAppConnectivity(clientAppName, containerIp, containerPort)
return catnipCurlResponse.ReturnCode
}, "5s").Should(Equal(0), "policy is configured + wide-open asg but client app cannot talk to server app using overlay")
By("unbinding the wide-open security group")
unbindSecurityGroup(securityGroupName, orgName, spaceName)
Expect(cf.Cf("restart", clientAppName).Wait(Config.CfPushTimeoutDuration())).To(Exit(0))
By("restarting the app")
Expect(cf.Cf("restart", clientAppName).Wait(Config.CfPushTimeoutDuration())).To(Exit(0))
By("Testing that client app can still connect to server app using the overlay")
Eventually(func() int {
catnipCurlResponse = testAppConnectivity(clientAppName, containerIp, containerPort)
return catnipCurlResponse.ReturnCode
}, "5s").Should(Equal(0), "policy is configured, asgs are not but client app cannot talk to server app using overlay")
By("Testing that external connectivity to a private ip is refused")
catnipCurlResponse = testAppConnectivity(clientAppName, privateAddress, 80)
Expect(catnipCurlResponse.Stderr).To(ContainSubstring("refused"))
By("deleting policy")
workflowhelpers.AsUser(TestSetup.AdminUserContext(), Config.DefaultTimeoutDuration(), func() {
Expect(cf.Cf("target", "-o", orgName, "-s", spaceName).Wait(Config.DefaultTimeoutDuration())).To(Exit(0))
Expect(string(cf.Cf("network-policies").Wait(Config.DefaultTimeoutDuration()).Out.Contents())).To(ContainSubstring(serverAppName))
Expect(cf.Cf("remove-network-policy", clientAppName, "--destination-app", serverAppName, "--port", fmt.Sprintf("%d", containerPort), "--protocol", "tcp").Wait(Config.CfPushTimeoutDuration())).To(Exit(0))
Expect(string(cf.Cf("network-policies").Wait(Config.DefaultTimeoutDuration()).Out.Contents())).ToNot(ContainSubstring(serverAppName))
})
By("Testing the client app cannot connect to the server app using the overlay")
Eventually(func() int {
catnipCurlResponse = testAppConnectivity(clientAppName, containerIp, containerPort)
return catnipCurlResponse.ReturnCode
}, "5s").ShouldNot(Equal(0), "no policy is configured but client app can talk to server app using overlay")
})
})
Describe("Using staging security groups", func() {
var testAppName, buildpack string
BeforeEach(func() {
serverAppName, privateHost, privatePort = pushServerApp()
By("Asserting default staging security group configuration")
testAppName = random_name.CATSRandomName("APP")
buildpack = createDummyBuildpack()
pushApp(testAppName, buildpack)
privateUri := fmt.Sprintf("%s:%d", privateHost, privatePort)
Expect(cf.Cf("set-env", testAppName, "TESTURI", privateUri).Wait(Config.DefaultTimeoutDuration())).To(Exit(0))
Expect(cf.Cf("start", testAppName).Wait(Config.CfPushTimeoutDuration())).To(Exit(1))
Eventually(getStagingOutput(testAppName), 5).Should(Say("CURL_EXIT=[^0]"), "Expected staging security groups not to allow internal communication between app containers. Configure your staging security groups to not allow traffic on internal networks, or disable this test by setting 'include_security_groups' to 'false' in '"+os.Getenv("CONFIG")+"'.")
})
AfterEach(func() {
app_helpers.AppReport(serverAppName, Config.DefaultTimeoutDuration())
Expect(cf.Cf("delete", serverAppName, "-f", "-r").Wait(Config.CfPushTimeoutDuration())).To(Exit(0))
app_helpers.AppReport(testAppName, Config.DefaultTimeoutDuration())
Expect(cf.Cf("delete", testAppName, "-f", "-r").Wait(Config.CfPushTimeoutDuration())).To(Exit(0))
deleteBuildpack(buildpack)
})
It("allows external and denies internal traffic during staging based on default staging security rules", func() {
Expect(cf.Cf("set-env", testAppName, "TESTURI", "www.google.com").Wait(Config.DefaultTimeoutDuration())).To(Exit(0))
Expect(cf.Cf("restart", testAppName).Wait(Config.CfPushTimeoutDuration())).To(Exit(1))
Eventually(getStagingOutput(testAppName), 5).Should(Say("CURL_EXIT=0"))
})
})
})
| [
"\"CONFIG\"",
"\"CONFIG\"",
"\"CONFIG\""
] | [] | [
"CONFIG"
] | [] | ["CONFIG"] | go | 1 | 0 | |
BALSAMIC/commands/run/scheduler.py | #!/usr/bin/env python3
# import sys
import os
import re
import subprocess
import json
import argparse
import shutil
from snakemake.utils import read_job_properties
class SbatchScheduler:
'''
Builds sbatch command. Commands map to SLURM sbatch options.
Params:
------
account - -A/--account
dependency - {{dependencies}}
error - -e/--error
mail_type - --mail-type
mail_user - --mail-user
ntasks - -n/--ntasks
output - -o/--output
qos - -q/--qos
time - -t/--time
'''
def __init__(self):
self.account = None
self.dependency = None
self.error = None
self.mail_type = None
self.mail_user = None
self.ntasks = None
self.output = None
self.qos = None
self.script = None
self.time = None
def build_cmd(self):
''' builds sbatch command matching its options '''
sbatch_options = list()
job_attributes = [
'account', 'dependency', 'error', 'output', 'mail_type',
'mail_user', 'ntasks', 'qos', 'time'
]
for attribute in job_attributes:
if getattr(self, attribute):
attribute_value = getattr(self, attribute)
sbatch_options.append('--{} \"{}\"'.format(
attribute.replace("_", "-"), attribute_value))
sbatch_options.append(self.script)
return 'sbatch' + ' ' + ' '.join(sbatch_options)
class QsubScheduler:
"""docstring for QsubScheduler"""
def __init__(self):
self.account = None
self.dependency = None
self.error = None
self.resources = None
self.mail_type = None
self.mail_user = None
self.ntasks = None
self.output = None
self.qos = None
self.script = None
self.time = None
def build_cmd(self):
resource_params = ""
depend = ""
qsub_options = list()
# Exclusive node
resource_params += " -l excl=1 "
# if self.time:
# resource_params += " -l \"walltime={}\" ".format(str(self.time))
if self.ntasks:
# resource_params += "nodes=1:ppn={}\" ".format(str(self.ntasks))
resource_params += " -pe mpi {} ".format(str(self.ntasks))
if self.account:
# qsub_options.append(" -A " + str(self.account))
qsub_options.append(" -q " + str(self.account))
if self.error:
qsub_options.append(" -e " + str(self.error))
if self.output:
qsub_options.append(" -o " + str(self.output))
if self.mail_type:
# qsub_options.append(" -m " + str(self.mail_type))
qsub_options.append(" -m s ")# + str(self.mail_type))
if self.mail_user:
qsub_options.append(" -M " + str(self.mail_user))
if self.qos:
qsub_options.append(" -p " + str(self.qos))
if resource_params:
qsub_options.append(resource_params)
if self.dependency:
for jobid in self.dependency:
depend = depend + ":" + jobid
#qsub_options.append(" -W \"depend=afterok" + str(depend) + "\"")
qsub_options.append(" -hold_jid " + ",".join(self.dependency))
if self.script:
qsub_options.append(" {} ".format(self.script))
return "qsub -V -S /bin/bash " + " ".join(qsub_options)
def read_sample_config(input_json):
''' load input sample_config file. Output of balsamic config sample. '''
try:
with open(input_json) as f:
return json.load(f)
except Exception as e:
raise e
def write_sacct_file(sacct_file, job_id):
''' writes a yaml file with job ids '''
try:
with open(sacct_file, 'a') as f:
f.write(job_id + "\n")
except FileNotFoundError as e:
raise e
# def write_scheduler_dump(scheduler_file, cmd):
# ''' writes sbatch dump for debuging purpose '''
# try:
# with open(scheduler_file, 'a') as f:
# f.write(cmd + "\n")
# f.write(sys.executable + "\n")
# except OSError:
# raise
def submit_job(sbatch_cmd, profile):
''' subprocess call for sbatch command '''
# run sbatch cmd
try:
res = subprocess.run(sbatch_cmd,
check=True,
shell=True,
stdout=subprocess.PIPE)
except subprocess.CalledProcessError as e:
raise e
# Get jobid
res = res.stdout.decode()
if profile == "slurm":
m = re.search("Submitted batch job (\d+)", res)
jobid = m.group(1)
elif profile == "qsub":
jobid_tmp = str(res).split()[3]
jobid = jobid_tmp.strip("\"()\"")
print(jobid)
return jobid
# def singularity_param(sample_config, script_dir, jobscript, sbatch_script):
# ''' write a modified sbatch script based on singularity parameters '''
# if 'bind_path' not in sample_config['singularity']:
# raise KeyError("bind_path was not found in sample config.")
# if 'main_env' not in sample_config['singularity']:
# raise KeyError("main_env was not found in sample config.")
# if 'container_path' not in sample_config['singularity']:
# raise KeyError("container_path was not found sample config.")
# try:
# bind_path = sample_config['singularity']['bind_path']
# main_env = sample_config['singularity']['main_env']
# container_path = sample_config['singularity']['container_path']
# with open(sbatch_script, 'a') as f:
# f.write("#!/bin/bash" + "\n")
# f.write(
# f"function balsamic-run {{ singularity exec -B {bind_path} --app {main_env} {container_path} $@; }}"
# + "\n")
# f.write(f"# Snakemake original script {jobscript}" + "\n")
# f.write(f"balsamic-run bash {jobscript}" + "\n")
# sbatch_file = os.path.join(
# script_dir, sample_config["analysis"]["case_id"] + ".sbatch")
# return sbatch_file
# except OSError:
# raise
def get_parser():
''' argument parser '''
parser = argparse.ArgumentParser(description='''
This is an internal script and should be invoked independently.
This script gets a list of arugments (see --help) and submits a job to slurm as
afterok dependency.
''')
parser.add_argument("dependencies",
nargs="*",
help="{{dependencies}} from snakemake")
parser.add_argument("snakescript", help="Snakemake script")
parser.add_argument("--sample-config",
help='balsamic config sample output')
parser.add_argument("--profile", help="profile to run jobs")
parser.add_argument("--account",
required=True,
help='cluster account name')
parser.add_argument("--qos", default='low', help='cluster job Priority (slurm - QOS)')
parser.add_argument("--mail-type", help='cluster mail type')
parser.add_argument("--mail-user", help='mail user')
parser.add_argument("--log-dir", help="Log directory")
parser.add_argument("--result-dir", help="Result directory")
parser.add_argument("--script-dir", help="Script directory")
return parser
def main(args=None):
''' entry point for scheduler.py '''
parser = get_parser()
args = parser.parse_args(args)
jobscript = args.snakescript
job_properties = read_job_properties(jobscript)
shutil.copy2(jobscript, args.script_dir)
jobscript = os.path.join(args.script_dir, os.path.basename(jobscript))
if args.profile == 'slurm':
jobid = '%j'
scheduler_cmd = SbatchScheduler()
if args.dependencies:
scheduler_cmd.dependency = ','.join(["afterok:%s" % d for d in args.dependencies])
elif args.profile == 'qsub':
jobid = '${JOB_ID}'
scheduler_cmd = QsubScheduler()
scheduler_cmd.dependency = args.dependencies
if not args.mail_type:
mail_type = job_properties["cluster"]["mail_type"]
sample_config = read_sample_config(input_json=args.sample_config)
sacct_file = os.path.join(args.log_dir,
sample_config["analysis"]["case_id"] + ".sacct")
balsamic_run_mode = os.getenv("BALSAMIC_STATUS", "conda")
# if balsamic_run_mode == 'container' and 'singularity' in sample_config:
# sbatch_script = os.path.join(args.script_dir,
# "sbatch." + os.path.basename(jobscript))
# sbatch_file = singularity_param(sample_config=sample_config,
# script_dir=args.script_dir,
# jobscript=jobscript,
# sbatch_script=sbatch_script)
# jobscript = sbatch_script
scheduler_cmd.account = args.account
scheduler_cmd.mail_type = mail_type
scheduler_cmd.error = os.path.join(args.log_dir,
os.path.basename(jobscript) + "_" + jobid + ".err")
scheduler_cmd.output = os.path.join(args.log_dir,
os.path.basename(jobscript) + "_" + jobid + ".out")
scheduler_cmd.ntasks = job_properties["cluster"]["n"]
scheduler_cmd.time = job_properties["cluster"]["time"]
scheduler_cmd.mail_user = args.mail_user
scheduler_cmd.script = jobscript
jobid = submit_job(scheduler_cmd.build_cmd(), args.profile)
# scheduler_file = os.path.join(args.script_dir, sample_config["analysis"]["case_id"] + ".scheduler_dump")
# if balsamic_run_mode == 'container' and 'singularity' in sample_config:
# write_scheduler_dump(scheduler_file=scheduler_file, cmd=scheduler_cmd.build_cmd())
write_sacct_file(sacct_file=sacct_file, job_id=jobid)
if __name__ == '__main__':
main()
| [] | [] | [
"BALSAMIC_STATUS"
] | [] | ["BALSAMIC_STATUS"] | python | 1 | 0 | |
clients/config/cli/cli_test.go | package cli
import (
"os"
"testing"
"time"
)
// func TestNewConfig(t *testing.T) {
// os.Setenv("LOCALHOST", "192.168.1.1")
// localhost := os.Getenv("LOCALHOST")
// t.Log("ENV LOCALHOST", localhost)
// configMap := make(map[string]*config.AppConfig)
// orderConfig := new(config.AppConfig)
// orderDB := new(config.DBConfig)
// orderDB.Server = "default"
// orderDB.User = "postgres"
// orderDB.Driver = "postgres"
// orderDB.Host = localhost
// orderDB.Port = 5432
// orderDB.MaxIdleConns = 10
// orderDB.MaxOpenConns = 100
// orderDB.Name = "order"
// orderDB.Password = "123456"
// orderDB.EnableLog = true
// orderRedis := new(config.RedisConfig)
// orderRedis.Name = 1
// orderRedis.Server = "default"
// orderRedis.Host = localhost + ":3306"
// orderJaeger := new(config.JaegerConfig)
// orderJaegerSampler := new(config.Sampler)
// orderJaegerSampler.HostPort = localhost + ":6831"
// orderJaegerSampler.RefreshInterval = 10
// orderJaegerReporter := new(config.Reporter)
// orderJaegerReporter.HostPort = localhost + ":6831"
// orderJaegerReporter.FlushInterval = 5
// orderJaegerReporter.QueueSize = 1000
// orderJaegerReporter.LogSpans = true
// orderJaeger.Server = "default"
// orderJaeger.ServiceName = "order"
// orderJaeger.Sampler = orderJaegerSampler
// orderJaeger.Reporter = orderJaegerReporter
// orderConfig.DB = orderDB
// orderConfig.Redis = orderRedis
// orderConfig.Jaeger = orderJaeger
// userConfig := new(config.AppConfig)
// userDB := new(config.DBConfig)
// userDB.Server = "default"
// userDB.User = "postgres"
// userDB.Driver = "postgres"
// userDB.Host = localhost
// userDB.Port = 5432
// userDB.MaxIdleConns = 10
// userDB.MaxOpenConns = 100
// userDB.Name = "user"
// userDB.Password = "123456"
// userDB.EnableLog = true
// userRedis := new(config.RedisConfig)
// userRedis.Name = 0
// userRedis.Server = "default"
// userRedis.Host = localhost + ":3306"
// userJaeger := new(config.JaegerConfig)
// userJaegerSampler := new(config.Sampler)
// userJaegerSampler.HostPort = localhost + ":6831"
// userJaegerSampler.RefreshInterval = 10
// userJaegerReporter := new(config.Reporter)
// userJaegerReporter.HostPort = localhost + ":6831"
// userJaegerReporter.FlushInterval = 5
// userJaegerReporter.QueueSize = 1000
// userJaegerReporter.LogSpans = true
// userJaeger.Server = "default"
// userJaeger.ServiceName = "user"
// userJaeger.Sampler = userJaegerSampler
// userJaeger.Reporter = userJaegerReporter
// userConfig.DB = userDB
// userConfig.Redis = userRedis
// userConfig.Jaeger = userJaeger
// configMap["order"] = orderConfig
// configMap["user"] = userConfig
// t.Log("order", orderConfig.DB)
// t.Log("user", userConfig.DB)
// type args struct {
// configPath string
// }
// tests := []struct {
// name string
// args args
// want map[string]*config.AppConfig
// }{
// {"test read config from dev directory", args{"dev"}, configMap},
// }
// for _, tt := range tests {
// t.Run(tt.name, func(t *testing.T) {
// if got := NewConfig(tt.args.configPath); !reflect.DeepEqual(got, tt.want) {
// for k, v := range got {
// t.Log(k, *v)
// }
// for k, v := range tt.want {
// t.Log(k, *v)
// }
// t.Errorf("NewConfig() = %v, want %v", got, tt.want)
// }
// })
// }
// }
func TestSyncConfig(t *testing.T) {
os.Setenv("LOCALHOST", "192.168.1.21")
type args struct {
etcdServer string
dialTimeout time.Duration
requestTimeout time.Duration
configPath string
}
tests := []struct {
name string
args args
wantErr bool
}{
{"test init config",
args{
dialTimeout: 5 * time.Second,
requestTimeout: 10 * time.Second,
configPath: "dev",
},
false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := SyncConfig(tt.args.dialTimeout, tt.args.requestTimeout, tt.args.configPath); (err != nil) != tt.wantErr {
t.Errorf("SyncConfig() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
| [
"\"LOCALHOST\""
] | [] | [
"LOCALHOST"
] | [] | ["LOCALHOST"] | go | 1 | 0 | |
lib/spack/llnl/util/tty/__init__.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import unicode_literals
import fcntl
import os
import struct
import sys
import termios
import textwrap
import traceback
import six
from datetime import datetime
from six import StringIO
from six.moves import input
from llnl.util.tty.color import cprint, cwrite, cescape, clen
# Globals
_debug = 0
_verbose = False
_stacktrace = False
_timestamp = False
_msg_enabled = True
_warn_enabled = True
_error_enabled = True
indent = " "
def debug_level():
return _debug
def is_verbose():
return _verbose
def is_debug(level=1):
return _debug >= level
def is_stacktrace():
return _stacktrace
def set_debug(level=0):
global _debug
assert level >= 0, 'Debug level must be a positive value'
_debug = level
def set_verbose(flag):
global _verbose
_verbose = flag
def set_timestamp(flag):
global _timestamp
_timestamp = flag
def set_msg_enabled(flag):
global _msg_enabled
_msg_enabled = flag
def set_warn_enabled(flag):
global _warn_enabled
_warn_enabled = flag
def set_error_enabled(flag):
global _error_enabled
_error_enabled = flag
def msg_enabled():
return _msg_enabled
def warn_enabled():
return _warn_enabled
def error_enabled():
return _error_enabled
class SuppressOutput:
"""Class for disabling output in a scope using 'with' keyword"""
def __init__(self,
msg_enabled=True,
warn_enabled=True,
error_enabled=True):
self._msg_enabled_initial = _msg_enabled
self._warn_enabled_initial = _warn_enabled
self._error_enabled_initial = _error_enabled
self._msg_enabled = msg_enabled
self._warn_enabled = warn_enabled
self._error_enabled = error_enabled
def __enter__(self):
set_msg_enabled(self._msg_enabled)
set_warn_enabled(self._warn_enabled)
set_error_enabled(self._error_enabled)
def __exit__(self, exc_type, exc_val, exc_tb):
set_msg_enabled(self._msg_enabled_initial)
set_warn_enabled(self._warn_enabled_initial)
set_error_enabled(self._error_enabled_initial)
def set_stacktrace(flag):
global _stacktrace
_stacktrace = flag
def process_stacktrace(countback):
"""Gives file and line frame 'countback' frames from the bottom"""
st = traceback.extract_stack()
# Not all entries may be spack files, we have to remove those that aren't.
file_list = []
for frame in st:
# Check that the file is a spack file
if frame[0].find("/spack") >= 0:
file_list.append(frame[0])
# We use commonprefix to find what the spack 'root' directory is.
root_dir = os.path.commonprefix(file_list)
root_len = len(root_dir)
st_idx = len(st) - countback - 1
st_text = "%s:%i " % (st[st_idx][0][root_len:], st[st_idx][1])
return st_text
def show_pid():
return is_debug(2)
def get_timestamp(force=False):
"""Get a string timestamp"""
if _debug or _timestamp or force:
# Note inclusion of the PID is useful for parallel builds.
pid = ', {0}'.format(os.getpid()) if show_pid() else ''
return '[{0}{1}] '.format(
datetime.now().strftime("%Y-%m-%d-%H:%M:%S.%f"), pid)
else:
return ''
def msg(message, *args, **kwargs):
if not msg_enabled():
return
if isinstance(message, Exception):
message = "%s: %s" % (message.__class__.__name__, str(message))
newline = kwargs.get('newline', True)
st_text = ""
if _stacktrace:
st_text = process_stacktrace(2)
if newline:
cprint("@*b{%s==>} %s%s" % (
st_text, get_timestamp(), cescape(message)))
else:
cwrite("@*b{%s==>} %s%s" % (
st_text, get_timestamp(), cescape(message)))
for arg in args:
print(indent + six.text_type(arg))
def info(message, *args, **kwargs):
if isinstance(message, Exception):
message = "%s: %s" % (message.__class__.__name__, str(message))
format = kwargs.get('format', '*b')
stream = kwargs.get('stream', sys.stdout)
wrap = kwargs.get('wrap', False)
break_long_words = kwargs.get('break_long_words', False)
st_countback = kwargs.get('countback', 3)
st_text = ""
if _stacktrace:
st_text = process_stacktrace(st_countback)
cprint("@%s{%s==>} %s%s" % (
format, st_text, get_timestamp(), cescape(six.text_type(message))
), stream=stream)
for arg in args:
if wrap:
lines = textwrap.wrap(
six.text_type(arg), initial_indent=indent,
subsequent_indent=indent, break_long_words=break_long_words)
for line in lines:
stream.write(line + '\n')
else:
stream.write(indent + six.text_type(arg) + '\n')
def verbose(message, *args, **kwargs):
if _verbose:
kwargs.setdefault('format', 'c')
info(message, *args, **kwargs)
def debug(message, *args, **kwargs):
level = kwargs.get('level', 1)
if is_debug(level):
kwargs.setdefault('format', 'g')
kwargs.setdefault('stream', sys.stderr)
info(message, *args, **kwargs)
def error(message, *args, **kwargs):
if not error_enabled():
return
kwargs.setdefault('format', '*r')
kwargs.setdefault('stream', sys.stderr)
info("Error: " + six.text_type(message), *args, **kwargs)
def warn(message, *args, **kwargs):
if not warn_enabled():
return
kwargs.setdefault('format', '*Y')
kwargs.setdefault('stream', sys.stderr)
info("Warning: " + six.text_type(message), *args, **kwargs)
def die(message, *args, **kwargs):
kwargs.setdefault('countback', 4)
error(message, *args, **kwargs)
sys.exit(1)
def get_number(prompt, **kwargs):
default = kwargs.get('default', None)
abort = kwargs.get('abort', None)
if default is not None and abort is not None:
prompt += ' (default is %s, %s to abort) ' % (default, abort)
elif default is not None:
prompt += ' (default is %s) ' % default
elif abort is not None:
prompt += ' (%s to abort) ' % abort
number = None
while number is None:
msg(prompt, newline=False)
ans = input()
if ans == six.text_type(abort):
return None
if ans:
try:
number = int(ans)
if number < 1:
msg("Please enter a valid number.")
number = None
except ValueError:
msg("Please enter a valid number.")
elif default is not None:
number = default
return number
def get_yes_or_no(prompt, **kwargs):
default_value = kwargs.get('default', None)
if default_value is None:
prompt += ' [y/n] '
elif default_value is True:
prompt += ' [Y/n] '
elif default_value is False:
prompt += ' [y/N] '
else:
raise ValueError(
"default for get_yes_no() must be True, False, or None.")
result = None
while result is None:
msg(prompt, newline=False)
ans = input().lower()
if not ans:
result = default_value
if result is None:
print("Please enter yes or no.")
else:
if ans == 'y' or ans == 'yes':
result = True
elif ans == 'n' or ans == 'no':
result = False
return result
def hline(label=None, **kwargs):
"""Draw a labeled horizontal line.
Keyword Arguments:
char (str): Char to draw the line with. Default '-'
max_width (int): Maximum width of the line. Default is 64 chars.
"""
char = kwargs.pop('char', '-')
max_width = kwargs.pop('max_width', 64)
if kwargs:
raise TypeError(
"'%s' is an invalid keyword argument for this function."
% next(kwargs.iterkeys()))
rows, cols = terminal_size()
if not cols:
cols = max_width
else:
cols -= 2
cols = min(max_width, cols)
label = six.text_type(label)
prefix = char * 2 + " "
suffix = " " + (cols - len(prefix) - clen(label)) * char
out = StringIO()
out.write(prefix)
out.write(label)
out.write(suffix)
print(out.getvalue())
def terminal_size():
"""Gets the dimensions of the console: (rows, cols)."""
def ioctl_gwinsz(fd):
try:
rc = struct.unpack('hh', fcntl.ioctl(
fd, termios.TIOCGWINSZ, '1234'))
except BaseException:
return
return rc
rc = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
if not rc:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
rc = ioctl_gwinsz(fd)
os.close(fd)
except BaseException:
pass
if not rc:
rc = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(rc[0]), int(rc[1])
| [] | [] | [
"LINES",
"COLUMNS"
] | [] | ["LINES", "COLUMNS"] | python | 2 | 0 | |
config/wsgi.py | """
WSGI config for Nagoya Rest project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# nagoyalookupservice directory.
app_path = os.path.dirname(os.path.abspath(__file__)).replace('/config', '')
sys.path.append(os.path.join(app_path, 'nagoyalookupservice'))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [] | [] | [
"DJANGO_SETTINGS_MODULE"
] | [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
tensorflow/compiler/plugin/poplar/tests/while_loop_perf_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pva
import numpy as np
import test_utils as tu
from tensorflow.compiler.tests import xla_test
from tensorflow.python import ipu
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class WhileLoopPerfTest(xla_test.XLATestCase):
def testIpuWhilePerfTest(self):
cfg = ipu.config.IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg, output_execution_profile=True)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
def cond(i, v):
del v
return math_ops.less(i, 15)
def body(i, v):
v = v + i
i = i + 1
return (i, v)
def my_net(v):
i = constant_op.constant(0)
r = control_flow_ops.while_loop(cond,
body, (i, v),
maximum_iterations=10)
return [r[1]]
with ops.device('cpu'):
v = array_ops.placeholder(np.int32, [500])
with ipu.scopes.ipu_scope("/device:IPU:0"):
r = ipu.ipu_compiler.compile(my_net, inputs=[v])
result = sess.run(r, {v: np.zeros([500], np.int32)})
self.assertAllClose(result[0], np.broadcast_to(45, [500]))
# Check that there is only one real compile
self.assert_num_reports(report_helper, 1)
report = pva.openReport(report_helper.find_report())
# Check that there is only one execute
self.assert_number_of_executions(report, 1)
if __name__ == "__main__":
os.environ['TF_XLA_FLAGS'] = ('--tf_xla_min_cluster_size=1 ' +
os.environ.get('TF_XLA_FLAGS', ''))
googletest.main()
| [] | [] | [
"TF_XLA_FLAGS"
] | [] | ["TF_XLA_FLAGS"] | python | 1 | 0 | |
main.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"io"
"os"
"path"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
sspv1beta1 "kubevirt.io/ssp-operator/api/v1beta1"
"kubevirt.io/ssp-operator/controllers"
// +kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
// Default certificate directory operator-sdk expects to have
sdkTLSDir = fmt.Sprintf("%s/k8s-webhook-server/serving-certs", os.TempDir())
)
const (
// Certificate directory and file names OLM mounts certificates to
olmTLSDir = "/apiserver.local.config/certificates"
olmTLSCrt = "apiserver.crt"
olmTLSKey = "apiserver.key"
// Default cert file names operator-sdk expects to have
sdkTLSCrt = "tls.crt"
sdkTLSKey = "tls.key"
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(sspv1beta1.AddToScheme(scheme))
utilruntime.Must(controllers.InitScheme(scheme))
// +kubebuilder:scaffold:scheme
}
func main() {
var metricsAddr string
var readyProbeAddr string
var enableLeaderElection bool
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&readyProbeAddr, "ready-probe-addr", ":9440", "The address the readiness probe endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseDevMode(true)))
err := copyCertificates()
if err != nil {
setupLog.Error(err, "Error copying certificates")
os.Exit(1)
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
HealthProbeBindAddress: readyProbeAddr,
Port: 9443,
LeaderElection: enableLeaderElection,
LeaderElectionID: "734f7229.kubevirt.io",
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
if err = (&controllers.SSPReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("SSP"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "SSP")
os.Exit(1)
}
if os.Getenv("ENABLE_WEBHOOKS") != "false" {
if err = (&sspv1beta1.SSP{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "SSP")
os.Exit(1)
}
}
err = mgr.AddReadyzCheck("ready", healthz.Ping)
if err != nil {
setupLog.Error(err, "unable to register readiness check")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
func copyCertificates() error {
olmDir, olmDirErr := os.Stat(olmTLSDir)
_, sdkDirErr := os.Stat(sdkTLSDir)
// If certificates are generated by OLM, we should use OLM certificates mount path
if olmDirErr == nil && olmDir.IsDir() && os.IsNotExist(sdkDirErr) {
// For some reason, OLM maps the cert/key files to apiserver.crt/apiserver.key
// instead of tls.crt/tls.key like the SDK expects, so copying those files to allow
// the operator to find and use them.
setupLog.Info("OLM cert directory found, copying cert files")
err := os.MkdirAll(sdkTLSDir, 0755)
if err != nil {
return fmt.Errorf("failed to create %s: %w", sdkTLSCrt, err)
}
err = copyFile(path.Join(olmTLSDir, olmTLSCrt), path.Join(sdkTLSDir, sdkTLSCrt))
if err != nil {
return fmt.Errorf("failed to copy %s/%s to %s/%s: %w", olmTLSDir, olmTLSCrt, sdkTLSDir, sdkTLSCrt, err)
}
err = copyFile(path.Join(olmTLSDir, olmTLSKey), path.Join(sdkTLSDir, sdkTLSKey))
if err != nil {
return fmt.Errorf("failed to copy %s/%s to %s/%s: %w", olmTLSDir, olmTLSKey, sdkTLSDir, sdkTLSKey, err)
}
} else {
setupLog.Info("OLM cert directory not found, using default")
}
return nil
}
func copyFile(src, dst string) error {
srcFile, err := os.Open(src)
if err != nil {
return err
}
defer srcFile.Close()
dstFile, err := os.Create(dst)
if err != nil {
return err
}
defer dstFile.Close()
_, err = io.Copy(dstFile, srcFile)
return err
}
| [
"\"ENABLE_WEBHOOKS\""
] | [] | [
"ENABLE_WEBHOOKS"
] | [] | ["ENABLE_WEBHOOKS"] | go | 1 | 0 | |
tools/dist/security/_gnupg.py | # Copyright (c) 2008-2014 by Vinay Sajip.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name(s) of the copyright holder(s) may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" A wrapper for the 'gpg' command::
Portions of this module are derived from A.M. Kuchling's well-designed
GPG.py, using Richard Jones' updated version 1.3, which can be found
in the pycrypto CVS repository on Sourceforge:
http://pycrypto.cvs.sourceforge.net/viewvc/pycrypto/gpg/GPG.py
This module is *not* forward-compatible with amk's; some of the
old interface has changed. For instance, since I've added decrypt
functionality, I elected to initialize with a 'gnupghome' argument
instead of 'keyring', so that gpg can find both the public and secret
keyrings. I've also altered some of the returned objects in order for
the caller to not have to know as much about the internals of the
result classes.
While the rest of ISconf is released under the GPL, I am releasing
this single file under the same terms that A.M. Kuchling used for
pycrypto.
Steve Traugott, [email protected]
Thu Jun 23 21:27:20 PDT 2005
This version of the module has been modified from Steve Traugott's version
(see http://trac.t7a.org/isconf/browser/trunk/lib/python/isconf/GPG.py) by
Vinay Sajip to make use of the subprocess module (Steve's version uses os.fork()
and so does not work on Windows). Renamed to gnupg.py to avoid confusion with
the previous versions.
Modifications Copyright (C) 2008-2017 Vinay Sajip. All rights reserved.
A unittest harness (test_gnupg.py) has also been added.
"""
__version__ = "0.4.1"
__author__ = "Vinay Sajip"
__date__ = "$07-Jul-2017 15:09:20$"
try:
from io import StringIO
except ImportError: # pragma: no cover
from cStringIO import StringIO
import codecs
import locale
import logging
import os
import re
import socket
from subprocess import Popen
from subprocess import PIPE
import sys
import threading
STARTUPINFO = None
if os.name == 'nt': # pragma: no cover
try:
from subprocess import STARTUPINFO, STARTF_USESHOWWINDOW, SW_HIDE
except ImportError:
STARTUPINFO = None
try:
import logging.NullHandler as NullHandler
except ImportError:
class NullHandler(logging.Handler):
def handle(self, record):
pass
try:
unicode
_py3k = False
string_types = basestring
text_type = unicode
except NameError:
_py3k = True
string_types = str
text_type = str
logger = logging.getLogger(__name__)
if not logger.handlers:
logger.addHandler(NullHandler())
# We use the test below because it works for Jython as well as CPython
if os.path.__name__ == 'ntpath': # pragma: no cover
# On Windows, we don't need shell quoting, other than worrying about
# paths with spaces in them.
def shell_quote(s):
return '"%s"' % s
else:
# Section copied from sarge
# This regex determines which shell input needs quoting
# because it may be unsafe
UNSAFE = re.compile(r'[^\w%+,./:=@-]')
def shell_quote(s):
"""
Quote text so that it is safe for Posix command shells.
For example, "*.py" would be converted to "'*.py'". If the text is
considered safe it is returned unquoted.
:param s: The value to quote
:type s: str (or unicode on 2.x)
:return: A safe version of the input, from the point of view of Posix
command shells
:rtype: The passed-in type
"""
if not isinstance(s, string_types): # pragma: no cover
raise TypeError('Expected string type, got %s' % type(s))
if not s:
result = "''"
elif not UNSAFE.search(s):
result = s
else:
result = "'%s'" % s.replace("'", r"'\''")
return result
# end of sarge code
# Now that we use shell=False, we shouldn't need to quote arguments.
# Use no_quote instead of shell_quote to remind us of where quoting
# was needed. However, note that we still need, on 2.x, to encode any
# Unicode argument with the file system encoding - see Issue #41 and
# Python issue #1759845 ("subprocess.call fails with unicode strings in
# command line").
# Allows the encoding used to be overridden in special cases by setting
# this module attribute appropriately.
fsencoding = sys.getfilesystemencoding()
def no_quote(s):
if not _py3k and isinstance(s, text_type):
s = s.encode(fsencoding)
return s
def _copy_data(instream, outstream):
# Copy one stream to another
sent = 0
if hasattr(sys.stdin, 'encoding'):
enc = sys.stdin.encoding
else: # pragma: no cover
enc = 'ascii'
while True:
# See issue #39: read can fail when e.g. a text stream is provided
# for what is actually a binary file
try:
data = instream.read(1024)
except UnicodeError:
logger.warning('Exception occurred while reading', exc_info=1)
break
if not data:
break
sent += len(data)
# logger.debug("sending chunk (%d): %r", sent, data[:256])
try:
outstream.write(data)
except UnicodeError: # pragma: no cover
outstream.write(data.encode(enc))
except:
# Can sometimes get 'broken pipe' errors even when the data has all
# been sent
logger.exception('Error sending data')
break
try:
outstream.close()
except IOError: # pragma: no cover
logger.warning('Exception occurred while closing: ignored', exc_info=1)
logger.debug("closed output, %d bytes sent", sent)
def _threaded_copy_data(instream, outstream):
wr = threading.Thread(target=_copy_data, args=(instream, outstream))
wr.setDaemon(True)
logger.debug('data copier: %r, %r, %r', wr, instream, outstream)
wr.start()
return wr
def _write_passphrase(stream, passphrase, encoding):
passphrase = '%s\n' % passphrase
passphrase = passphrase.encode(encoding)
stream.write(passphrase)
logger.debug('Wrote passphrase')
def _is_sequence(instance):
return isinstance(instance, (list, tuple, set, frozenset))
def _make_memory_stream(s):
try:
from io import BytesIO
rv = BytesIO(s)
except ImportError: # pragma: no cover
rv = StringIO(s)
return rv
def _make_binary_stream(s, encoding):
if _py3k:
if isinstance(s, str):
s = s.encode(encoding)
else:
if type(s) is not str:
s = s.encode(encoding)
return _make_memory_stream(s)
class Verify(object):
"Handle status messages for --verify"
TRUST_UNDEFINED = 0
TRUST_NEVER = 1
TRUST_MARGINAL = 2
TRUST_FULLY = 3
TRUST_ULTIMATE = 4
TRUST_LEVELS = {
"TRUST_UNDEFINED" : TRUST_UNDEFINED,
"TRUST_NEVER" : TRUST_NEVER,
"TRUST_MARGINAL" : TRUST_MARGINAL,
"TRUST_FULLY" : TRUST_FULLY,
"TRUST_ULTIMATE" : TRUST_ULTIMATE,
}
def __init__(self, gpg):
self.gpg = gpg
self.valid = False
self.fingerprint = self.creation_date = self.timestamp = None
self.signature_id = self.key_id = None
self.username = None
self.key_status = None
self.status = None
self.pubkey_fingerprint = None
self.expire_timestamp = None
self.sig_timestamp = None
self.trust_text = None
self.trust_level = None
def __nonzero__(self):
return self.valid
__bool__ = __nonzero__
def handle_status(self, key, value):
if key in self.TRUST_LEVELS:
self.trust_text = key
self.trust_level = self.TRUST_LEVELS[key]
elif key in ("WARNING", "ERROR"):
logger.warning('potential problem: %s: %s', key, value)
elif key == "BADSIG": # pragma: no cover
self.valid = False
self.status = 'signature bad'
self.key_id, self.username = value.split(None, 1)
elif key == "ERRSIG": # pragma: no cover
self.valid = False
(self.key_id,
algo, hash_algo,
cls,
self.timestamp) = value.split()[:5]
self.status = 'signature error'
elif key == "EXPSIG": # pragma: no cover
self.valid = False
self.status = 'signature expired'
self.key_id, self.username = value.split(None, 1)
elif key == "GOODSIG":
self.valid = True
self.status = 'signature good'
self.key_id, self.username = value.split(None, 1)
elif key == "VALIDSIG":
(self.fingerprint,
self.creation_date,
self.sig_timestamp,
self.expire_timestamp) = value.split()[:4]
# may be different if signature is made with a subkey
self.pubkey_fingerprint = value.split()[-1]
self.status = 'signature valid'
elif key == "SIG_ID":
(self.signature_id,
self.creation_date, self.timestamp) = value.split()
elif key == "DECRYPTION_FAILED": # pragma: no cover
self.valid = False
self.key_id = value
self.status = 'decryption failed'
elif key == "NO_PUBKEY": # pragma: no cover
self.valid = False
self.key_id = value
self.status = 'no public key'
elif key in ("EXPKEYSIG", "REVKEYSIG"): # pragma: no cover
# signed with expired or revoked key
self.valid = False
self.key_id = value.split()[0]
if key == "EXPKEYSIG":
self.key_status = 'signing key has expired'
else:
self.key_status = 'signing key was revoked'
self.status = self.key_status
elif key in ("UNEXPECTED", "FAILURE"): # pragma: no cover
self.valid = False
self.key_id = value
if key == "UNEXPECTED":
self.status = 'unexpected data'
else:
# N.B. there might be other reasons
if not self.status:
self.status = 'incorrect passphrase'
elif key in ("DECRYPTION_INFO", "PLAINTEXT", "PLAINTEXT_LENGTH",
"NO_SECKEY", "BEGIN_SIGNING"):
pass
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
class ImportResult(object):
"Handle status messages for --import"
counts = '''count no_user_id imported imported_rsa unchanged
n_uids n_subk n_sigs n_revoc sec_read sec_imported
sec_dups not_imported'''.split()
def __init__(self, gpg):
self.gpg = gpg
self.imported = []
self.results = []
self.fingerprints = []
for result in self.counts:
setattr(self, result, None)
def __nonzero__(self):
if self.not_imported: return False
if not self.fingerprints: return False
return True
__bool__ = __nonzero__
ok_reason = {
'0': 'Not actually changed',
'1': 'Entirely new key',
'2': 'New user IDs',
'4': 'New signatures',
'8': 'New subkeys',
'16': 'Contains private key',
}
problem_reason = {
'0': 'No specific reason given',
'1': 'Invalid Certificate',
'2': 'Issuer Certificate missing',
'3': 'Certificate Chain too long',
'4': 'Error storing certificate',
}
def handle_status(self, key, value):
if key in ("WARNING", "ERROR"):
logger.warning('potential problem: %s: %s', key, value)
elif key in ("IMPORTED", "KEY_CONSIDERED"):
# this duplicates info we already see in import_ok & import_problem
pass
elif key == "NODATA": # pragma: no cover
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'No valid data found'})
elif key == "IMPORT_OK":
reason, fingerprint = value.split()
reasons = []
for code, text in list(self.ok_reason.items()):
if int(reason) | int(code) == int(reason):
reasons.append(text)
reasontext = '\n'.join(reasons) + "\n"
self.results.append({'fingerprint': fingerprint,
'ok': reason, 'text': reasontext})
self.fingerprints.append(fingerprint)
elif key == "IMPORT_PROBLEM": # pragma: no cover
try:
reason, fingerprint = value.split()
except:
reason = value
fingerprint = '<unknown>'
self.results.append({'fingerprint': fingerprint,
'problem': reason, 'text': self.problem_reason[reason]})
elif key == "IMPORT_RES":
import_res = value.split()
for i, count in enumerate(self.counts):
setattr(self, count, int(import_res[i]))
elif key == "KEYEXPIRED": # pragma: no cover
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'Key expired'})
elif key == "SIGEXPIRED": # pragma: no cover
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'Signature expired'})
elif key == "FAILURE": # pragma: no cover
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'Other failure'})
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
def summary(self):
l = []
l.append('%d imported' % self.imported)
if self.not_imported: # pragma: no cover
l.append('%d not imported' % self.not_imported)
return ', '.join(l)
ESCAPE_PATTERN = re.compile(r'\\x([0-9a-f][0-9a-f])', re.I)
BASIC_ESCAPES = {
r'\n': '\n',
r'\r': '\r',
r'\f': '\f',
r'\v': '\v',
r'\b': '\b',
r'\0': '\0',
}
class SendResult(object):
def __init__(self, gpg):
self.gpg = gpg
def handle_status(self, key, value):
logger.debug('SendResult: %s: %s', key, value)
class SearchKeys(list):
''' Handle status messages for --search-keys.
Handle pub and uid (relating the latter to the former).
Don't care about the rest
'''
UID_INDEX = 1
FIELDS = 'type keyid algo length date expires'.split()
def __init__(self, gpg):
self.gpg = gpg
self.curkey = None
self.fingerprints = []
self.uids = []
def get_fields(self, args):
result = {}
for i, var in enumerate(self.FIELDS):
if i < len(args):
result[var] = args[i]
else:
result[var] = 'unavailable'
result['uids'] = []
result['sigs'] = []
return result
def pub(self, args):
self.curkey = curkey = self.get_fields(args)
self.append(curkey)
def uid(self, args):
uid = args[self.UID_INDEX]
uid = ESCAPE_PATTERN.sub(lambda m: chr(int(m.group(1), 16)), uid)
for k, v in BASIC_ESCAPES.items():
uid = uid.replace(k, v)
self.curkey['uids'].append(uid)
self.uids.append(uid)
def handle_status(self, key, value): # pragma: no cover
pass
class ListKeys(SearchKeys):
''' Handle status messages for --list-keys, --list-sigs.
Handle pub and uid (relating the latter to the former).
Don't care about (info from src/DETAILS):
crt = X.509 certificate
crs = X.509 certificate and private key available
uat = user attribute (same as user id except for field 10).
sig = signature
rev = revocation signature
pkd = public key data (special field format, see below)
grp = reserved for gpgsm
rvk = revocation key
'''
UID_INDEX = 9
FIELDS = 'type trust length algo keyid date expires dummy ownertrust uid sig'.split()
def __init__(self, gpg):
super(ListKeys, self).__init__(gpg)
self.in_subkey = False
self.key_map = {}
def key(self, args):
self.curkey = curkey = self.get_fields(args)
if curkey['uid']:
curkey['uids'].append(curkey['uid'])
del curkey['uid']
curkey['subkeys'] = []
self.append(curkey)
self.in_subkey = False
pub = sec = key
def fpr(self, args):
fp = args[9]
if fp in self.key_map: # pragma: no cover
raise ValueError('Unexpected fingerprint collision: %s' % fp)
if not self.in_subkey:
self.curkey['fingerprint'] = fp
self.fingerprints.append(fp)
self.key_map[fp] = self.curkey
else:
self.curkey['subkeys'][-1].append(fp)
self.key_map[fp] = self.curkey
def sub(self, args):
subkey = [args[4], args[11]] # keyid, type
self.curkey['subkeys'].append(subkey)
self.in_subkey = True
def ssb(self, args):
subkey = [args[4], None] # keyid, type
self.curkey['subkeys'].append(subkey)
self.in_subkey = True
def sig(self, args):
# keyid, uid, sigclass
self.curkey['sigs'].append((args[4], args[9], args[10]))
class ScanKeys(ListKeys):
''' Handle status messages for --with-fingerprint.'''
def sub(self, args):
# --with-fingerprint --with-colons somehow outputs fewer colons,
# use the last value args[-1] instead of args[11]
subkey = [args[4], args[-1]]
self.curkey['subkeys'].append(subkey)
self.in_subkey = True
class TextHandler(object):
def _as_text(self):
return self.data.decode(self.gpg.encoding, self.gpg.decode_errors)
if _py3k:
__str__ = _as_text
else:
__unicode__ = _as_text
def __str__(self):
return self.data
class Crypt(Verify, TextHandler):
"Handle status messages for --encrypt and --decrypt"
def __init__(self, gpg):
Verify.__init__(self, gpg)
self.data = ''
self.ok = False
self.status = ''
def __nonzero__(self):
if self.ok: return True
return False
__bool__ = __nonzero__
def handle_status(self, key, value):
if key in ("WARNING", "ERROR"):
logger.warning('potential problem: %s: %s', key, value)
elif key == "NODATA":
self.status = "no data was provided"
elif key in ("NEED_PASSPHRASE", "BAD_PASSPHRASE", "GOOD_PASSPHRASE",
"MISSING_PASSPHRASE", "DECRYPTION_FAILED",
"KEY_NOT_CREATED", "NEED_PASSPHRASE_PIN"):
self.status = key.replace("_", " ").lower()
elif key == "NEED_PASSPHRASE_SYM":
self.status = 'need symmetric passphrase'
elif key == "BEGIN_DECRYPTION":
self.status = 'decryption incomplete'
elif key == "BEGIN_ENCRYPTION":
self.status = 'encryption incomplete'
elif key == "DECRYPTION_OKAY":
self.status = 'decryption ok'
self.ok = True
elif key == "END_ENCRYPTION":
self.status = 'encryption ok'
self.ok = True
elif key == "INV_RECP": # pragma: no cover
self.status = 'invalid recipient'
elif key == "KEYEXPIRED": # pragma: no cover
self.status = 'key expired'
elif key == "SIG_CREATED": # pragma: no cover
self.status = 'sig created'
elif key == "SIGEXPIRED": # pragma: no cover
self.status = 'sig expired'
elif key in ("ENC_TO", "USERID_HINT", "GOODMDC",
"END_DECRYPTION", "CARDCTRL", "BADMDC",
"SC_OP_FAILURE", "SC_OP_SUCCESS",
"PINENTRY_LAUNCHED", "KEY_CONSIDERED"):
pass
else:
Verify.handle_status(self, key, value)
class GenKey(object):
"Handle status messages for --gen-key"
def __init__(self, gpg):
self.gpg = gpg
self.type = None
self.fingerprint = None
def __nonzero__(self):
if self.fingerprint: return True
return False
__bool__ = __nonzero__
def __str__(self):
return self.fingerprint or ''
def handle_status(self, key, value):
if key in ("WARNING", "ERROR"): # pragma: no cover
logger.warning('potential problem: %s: %s', key, value)
elif key == "KEY_CREATED":
(self.type,self.fingerprint) = value.split()
elif key in ("PROGRESS", "GOOD_PASSPHRASE", "KEY_NOT_CREATED"):
pass
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
class ExportResult(GenKey):
"""Handle status messages for --export[-secret-key].
For now, just use an existing class to base it on - if needed, we
can override handle_status for more specific message handling.
"""
def handle_status(self, key, value):
if key in ("EXPORTED", "EXPORT_RES"):
pass
else:
super(ExportResult, self).handle_status(key, value)
class DeleteResult(object):
"Handle status messages for --delete-key and --delete-secret-key"
def __init__(self, gpg):
self.gpg = gpg
self.status = 'ok'
def __str__(self):
return self.status
problem_reason = {
'1': 'No such key',
'2': 'Must delete secret key first',
'3': 'Ambiguous specification',
}
def handle_status(self, key, value):
if key == "DELETE_PROBLEM": # pragma: no cover
self.status = self.problem_reason.get(value,
"Unknown error: %r" % value)
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
def __nonzero__(self):
return self.status == 'ok'
__bool__ = __nonzero__
class Sign(TextHandler):
"Handle status messages for --sign"
def __init__(self, gpg):
self.gpg = gpg
self.type = None
self.hash_algo = None
self.fingerprint = None
self.status = None
def __nonzero__(self):
return self.fingerprint is not None
__bool__ = __nonzero__
def handle_status(self, key, value):
if key in ("WARNING", "ERROR", "FAILURE"): # pragma: no cover
logger.warning('potential problem: %s: %s', key, value)
elif key in ("KEYEXPIRED", "SIGEXPIRED"): # pragma: no cover
self.status = 'key expired'
elif key == "KEYREVOKED": # pragma: no cover
self.status = 'key revoked'
elif key == "SIG_CREATED":
(self.type,
algo, self.hash_algo, cls,
self.timestamp, self.fingerprint
) = value.split()
self.status = 'signature created'
elif key in ("USERID_HINT", "NEED_PASSPHRASE", "GOOD_PASSPHRASE",
"BAD_PASSPHRASE", "BEGIN_SIGNING"):
pass
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
VERSION_RE = re.compile(r'gpg \(GnuPG\) (\d+(\.\d+)*)'.encode('ascii'), re.I)
HEX_DIGITS_RE = re.compile(r'[0-9a-f]+$', re.I)
class GPG(object):
decode_errors = 'strict'
result_map = {
'crypt': Crypt,
'delete': DeleteResult,
'generate': GenKey,
'import': ImportResult,
'send': SendResult,
'list': ListKeys,
'scan': ScanKeys,
'search': SearchKeys,
'sign': Sign,
'verify': Verify,
'export': ExportResult,
}
"Encapsulate access to the gpg executable"
def __init__(self, gpgbinary='gpg', gnupghome=None, verbose=False,
use_agent=False, keyring=None, options=None,
secret_keyring=None):
"""Initialize a GPG process wrapper. Options are:
gpgbinary -- full pathname for GPG binary.
gnupghome -- full pathname to where we can find the public and
private keyrings. Default is whatever gpg defaults to.
keyring -- name of alternative keyring file to use, or list of such
keyrings. If specified, the default keyring is not used.
options =-- a list of additional options to pass to the GPG binary.
secret_keyring -- name of alternative secret keyring file to use, or
list of such keyrings.
"""
self.gpgbinary = gpgbinary
self.gnupghome = gnupghome
if keyring:
# Allow passing a string or another iterable. Make it uniformly
# a list of keyring filenames
if isinstance(keyring, string_types):
keyring = [keyring]
self.keyring = keyring
if secret_keyring:
# Allow passing a string or another iterable. Make it uniformly
# a list of keyring filenames
if isinstance(secret_keyring, string_types):
secret_keyring = [secret_keyring]
self.secret_keyring = secret_keyring
self.verbose = verbose
self.use_agent = use_agent
if isinstance(options, str): # pragma: no cover
options = [options]
self.options = options
self.on_data = None # or a callable - will be called with data chunks
# Changed in 0.3.7 to use Latin-1 encoding rather than
# locale.getpreferredencoding falling back to sys.stdin.encoding
# falling back to utf-8, because gpg itself uses latin-1 as the default
# encoding.
self.encoding = 'latin-1'
if gnupghome and not os.path.isdir(self.gnupghome):
os.makedirs(self.gnupghome,0x1C0)
try:
p = self._open_subprocess(["--version"])
except OSError:
msg = 'Unable to run gpg - it may not be available.'
logger.exception(msg)
raise OSError(msg)
result = self.result_map['verify'](self) # any result will do for this
self._collect_output(p, result, stdin=p.stdin)
if p.returncode != 0: # pragma: no cover
raise ValueError("Error invoking gpg: %s: %s" % (p.returncode,
result.stderr))
m = VERSION_RE.match(result.data)
if not m: # pragma: no cover
self.version = None
else:
dot = '.'.encode('ascii')
self.version = tuple([int(s) for s in m.groups()[0].split(dot)])
def make_args(self, args, passphrase):
"""
Make a list of command line elements for GPG. The value of ``args``
will be appended. The ``passphrase`` argument needs to be True if
a passphrase will be sent to GPG, else False.
"""
cmd = [self.gpgbinary, '--status-fd', '2', '--no-tty']
cmd.extend(['--debug', 'ipc'])
if passphrase and hasattr(self, 'version'):
if self.version >= (2, 1):
cmd[1:1] = ['--pinentry-mode', 'loopback']
cmd.extend(['--fixed-list-mode', '--batch', '--with-colons'])
if self.gnupghome:
cmd.extend(['--homedir', no_quote(self.gnupghome)])
if self.keyring:
cmd.append('--no-default-keyring')
for fn in self.keyring:
cmd.extend(['--keyring', no_quote(fn)])
if self.secret_keyring:
for fn in self.secret_keyring:
cmd.extend(['--secret-keyring', no_quote(fn)])
if passphrase:
cmd.extend(['--passphrase-fd', '0'])
if self.use_agent: # pragma: no cover
cmd.append('--use-agent')
if self.options:
cmd.extend(self.options)
cmd.extend(args)
return cmd
def _open_subprocess(self, args, passphrase=False):
# Internal method: open a pipe to a GPG subprocess and return
# the file objects for communicating with it.
# def debug_print(cmd):
# result = []
# for c in cmd:
# if ' ' not in c:
# result.append(c)
# else:
# if '"' not in c:
# result.append('"%s"' % c)
# elif "'" not in c:
# result.append("'%s'" % c)
# else:
# result.append(c) # give up
# return ' '.join(cmd)
from subprocess import list2cmdline as debug_print
cmd = self.make_args(args, passphrase)
if self.verbose: # pragma: no cover
print(debug_print(cmd))
if not STARTUPINFO:
si = None
else: # pragma: no cover
si = STARTUPINFO()
si.dwFlags = STARTF_USESHOWWINDOW
si.wShowWindow = SW_HIDE
result = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=PIPE,
startupinfo=si)
logger.debug("%s: %s", result.pid, debug_print(cmd))
return result
def _read_response(self, stream, result):
# Internal method: reads all the stderr output from GPG, taking notice
# only of lines that begin with the magic [GNUPG:] prefix.
#
# Calls methods on the response object for each valid token found,
# with the arg being the remainder of the status line.
lines = []
while True:
line = stream.readline()
if len(line) == 0:
break
lines.append(line)
line = line.rstrip()
if self.verbose: # pragma: no cover
print(line)
logger.debug("%s", line)
if line[0:9] == '[GNUPG:] ':
# Chop off the prefix
line = line[9:]
L = line.split(None, 1)
keyword = L[0]
if len(L) > 1:
value = L[1]
else:
value = ""
result.handle_status(keyword, value)
result.stderr = ''.join(lines)
def _read_data(self, stream, result, on_data=None):
# Read the contents of the file from GPG's stdout
chunks = []
while True:
data = stream.read(1024)
if len(data) == 0:
break
logger.debug("chunk: %r" % data[:256])
chunks.append(data)
if on_data:
on_data(data)
if _py3k:
# Join using b'' or '', as appropriate
result.data = type(data)().join(chunks)
else:
result.data = ''.join(chunks)
def _collect_output(self, process, result, writer=None, stdin=None):
"""
Drain the subprocesses output streams, writing the collected output
to the result. If a writer thread (writing to the subprocess) is given,
make sure it's joined before returning. If a stdin stream is given,
close it before returning.
"""
stderr = codecs.getreader(self.encoding)(process.stderr)
rr = threading.Thread(target=self._read_response, args=(stderr, result))
rr.setDaemon(True)
logger.debug('stderr reader: %r', rr)
rr.start()
stdout = process.stdout
dr = threading.Thread(target=self._read_data, args=(stdout, result, self.on_data))
dr.setDaemon(True)
logger.debug('stdout reader: %r', dr)
dr.start()
dr.join()
rr.join()
if writer is not None:
writer.join()
process.wait()
if stdin is not None:
try:
stdin.close()
except IOError: # pragma: no cover
pass
stderr.close()
stdout.close()
def _handle_io(self, args, fileobj, result, passphrase=None, binary=False):
"Handle a call to GPG - pass input data, collect output data"
# Handle a basic data call - pass data to GPG, handle the output
# including status information. Garbage In, Garbage Out :)
p = self._open_subprocess(args, passphrase is not None)
if not binary: # pragma: no cover
stdin = codecs.getwriter(self.encoding)(p.stdin)
else:
stdin = p.stdin
if passphrase:
_write_passphrase(stdin, passphrase, self.encoding)
writer = _threaded_copy_data(fileobj, stdin)
self._collect_output(p, result, writer, stdin)
return result
#
# SIGNATURE METHODS
#
def sign(self, message, **kwargs):
"""sign message"""
f = _make_binary_stream(message, self.encoding)
result = self.sign_file(f, **kwargs)
f.close()
return result
def set_output_without_confirmation(self, args, output):
"If writing to a file which exists, avoid a confirmation message."
if os.path.exists(output):
# We need to avoid an overwrite confirmation message
args.extend(['--yes'])
args.extend(['--output', no_quote(output)])
def sign_file(self, file, keyid=None, passphrase=None, clearsign=True,
detach=False, binary=False, output=None, extra_args=None):
"""sign file"""
logger.debug("sign_file: %s", file)
if binary: # pragma: no cover
args = ['-s']
else:
args = ['-sa']
# You can't specify detach-sign and clearsign together: gpg ignores
# the detach-sign in that case.
if detach:
args.append("--detach-sign")
elif clearsign:
args.append("--clearsign")
if keyid:
args.extend(['--default-key', no_quote(keyid)])
if output: # write the output to a file with the specified name
self.set_output_without_confirmation(args, output)
if extra_args:
args.extend(extra_args)
result = self.result_map['sign'](self)
#We could use _handle_io here except for the fact that if the
#passphrase is bad, gpg bails and you can't write the message.
p = self._open_subprocess(args, passphrase is not None)
try:
stdin = p.stdin
if passphrase:
_write_passphrase(stdin, passphrase, self.encoding)
writer = _threaded_copy_data(file, stdin)
except IOError: # pragma: no cover
logging.exception("error writing message")
writer = None
self._collect_output(p, result, writer, stdin)
return result
def verify(self, data, **kwargs):
"""Verify the signature on the contents of the string 'data'
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> input = gpg.gen_key_input(passphrase='foo')
>>> key = gpg.gen_key(input)
>>> assert key
>>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='bar')
>>> assert not sig
>>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='foo')
>>> assert sig
>>> verify = gpg.verify(sig.data)
>>> assert verify
"""
f = _make_binary_stream(data, self.encoding)
result = self.verify_file(f, **kwargs)
f.close()
return result
def verify_file(self, file, data_filename=None, close_file=True, extra_args=None):
"Verify the signature on the contents of the file-like object 'file'"
logger.debug('verify_file: %r, %r', file, data_filename)
result = self.result_map['verify'](self)
args = ['--verify']
if extra_args:
args.extend(extra_args)
if data_filename is None:
self._handle_io(args, file, result, binary=True)
else:
logger.debug('Handling detached verification')
import tempfile
fd, fn = tempfile.mkstemp(prefix='pygpg')
s = file.read()
if close_file:
file.close()
logger.debug('Wrote to temp file: %r', s)
os.write(fd, s)
os.close(fd)
args.append(no_quote(fn))
args.append(no_quote(data_filename))
try:
p = self._open_subprocess(args)
self._collect_output(p, result, stdin=p.stdin)
finally:
os.unlink(fn)
return result
def verify_data(self, sig_filename, data, extra_args=None):
"Verify the signature in sig_filename against data in memory"
logger.debug('verify_data: %r, %r ...', sig_filename, data[:16])
result = self.result_map['verify'](self)
args = ['--verify']
if extra_args:
args.extend(extra_args)
args.extend([no_quote(sig_filename), '-'])
stream = _make_memory_stream(data)
self._handle_io(args, stream, result, binary=True)
return result
#
# KEY MANAGEMENT
#
def import_keys(self, key_data):
"""
Import the key_data into our keyring.
"""
result = self.result_map['import'](self)
logger.debug('import_keys: %r', key_data[:256])
data = _make_binary_stream(key_data, self.encoding)
self._handle_io(['--import'], data, result, binary=True)
logger.debug('import_keys result: %r', result.__dict__)
data.close()
return result
def recv_keys(self, keyserver, *keyids):
"""Import a key from a keyserver
>>> import shutil
>>> shutil.rmtree("keys", ignore_errors=True)
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> os.chmod('keys', 0x1C0)
>>> result = gpg.recv_keys('pgp.mit.edu', '92905378')
>>> assert result
"""
result = self.result_map['import'](self)
logger.debug('recv_keys: %r', keyids)
data = _make_binary_stream("", self.encoding)
#data = ""
args = ['--keyserver', no_quote(keyserver), '--recv-keys']
args.extend([no_quote(k) for k in keyids])
self._handle_io(args, data, result, binary=True)
logger.debug('recv_keys result: %r', result.__dict__)
data.close()
return result
def send_keys(self, keyserver, *keyids):
"""Send a key to a keyserver.
Note: it's not practical to test this function without sending
arbitrary data to live keyservers.
"""
result = self.result_map['send'](self)
logger.debug('send_keys: %r', keyids)
data = _make_binary_stream('', self.encoding)
#data = ""
args = ['--keyserver', no_quote(keyserver), '--send-keys']
args.extend([no_quote(k) for k in keyids])
self._handle_io(args, data, result, binary=True)
logger.debug('send_keys result: %r', result.__dict__)
data.close()
return result
def delete_keys(self, fingerprints, secret=False, passphrase=None):
which='key'
if secret: # pragma: no cover
if self.version >= (2, 1) and passphrase is None:
raise ValueError('For GnuPG >= 2.1, deleting secret keys '
'needs a passphrase to be provided')
which='secret-key'
if _is_sequence(fingerprints): # pragma: no cover
fingerprints = [no_quote(s) for s in fingerprints]
else:
fingerprints = [no_quote(fingerprints)]
args = ['--delete-%s' % which]
args.extend(fingerprints)
result = self.result_map['delete'](self)
if not secret or self.version < (2, 1):
p = self._open_subprocess(args)
self._collect_output(p, result, stdin=p.stdin)
else:
# Need to send in a passphrase.
f = _make_binary_stream('', self.encoding)
try:
self._handle_io(args, f, result, passphrase=passphrase,
binary=True)
finally:
f.close()
return result
def export_keys(self, keyids, secret=False, armor=True, minimal=False,
passphrase=None):
"""
Export the indicated keys. A 'keyid' is anything gpg accepts.
Since GnuPG 2.1, you can't export secret keys without providing a
passphrase.
"""
which=''
if secret:
which='-secret-key'
if self.version >= (2, 1) and passphrase is None:
raise ValueError('For GnuPG >= 2.1, exporting secret keys '
'needs a passphrase to be provided')
if _is_sequence(keyids):
keyids = [no_quote(k) for k in keyids]
else:
keyids = [no_quote(keyids)]
args = ['--export%s' % which]
if armor:
args.append('--armor')
if minimal: # pragma: no cover
args.extend(['--export-options','export-minimal'])
args.extend(keyids)
# gpg --export produces no status-fd output; stdout will be
# empty in case of failure
#stdout, stderr = p.communicate()
result = self.result_map['export'](self)
if not secret or self.version < (2, 1):
p = self._open_subprocess(args)
self._collect_output(p, result, stdin=p.stdin)
else:
# Need to send in a passphrase.
f = _make_binary_stream('', self.encoding)
try:
self._handle_io(args, f, result, passphrase=passphrase,
binary=True)
finally:
f.close()
logger.debug('export_keys result: %r', result.data)
# Issue #49: Return bytes if armor not specified, else text
result = result.data
if armor:
result = result.decode(self.encoding, self.decode_errors)
return result
def _get_list_output(self, p, kind):
# Get the response information
result = self.result_map[kind](self)
self._collect_output(p, result, stdin=p.stdin)
lines = result.data.decode(self.encoding,
self.decode_errors).splitlines()
valid_keywords = 'pub uid sec fpr sub ssb sig'.split()
for line in lines:
if self.verbose: # pragma: no cover
print(line)
logger.debug("line: %r", line.rstrip())
if not line: # pragma: no cover
break
L = line.strip().split(':')
if not L: # pragma: no cover
continue
keyword = L[0]
if keyword in valid_keywords:
getattr(result, keyword)(L)
return result
def list_keys(self, secret=False, keys=None, sigs=False):
""" list the keys currently in the keyring
>>> import shutil
>>> shutil.rmtree("keys", ignore_errors=True)
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> input = gpg.gen_key_input(passphrase='foo')
>>> result = gpg.gen_key(input)
>>> fp1 = result.fingerprint
>>> result = gpg.gen_key(input)
>>> fp2 = result.fingerprint
>>> pubkeys = gpg.list_keys()
>>> assert fp1 in pubkeys.fingerprints
>>> assert fp2 in pubkeys.fingerprints
"""
if sigs:
which = 'sigs'
else: which='keys'
if secret:
which='secret-keys'
args = ['--list-%s' % which,
'--fingerprint', '--fingerprint'] # get subkey FPs, too
if keys:
if isinstance(keys, string_types):
keys = [keys]
args.extend(keys)
p = self._open_subprocess(args)
return self._get_list_output(p, 'list')
def scan_keys(self, filename):
"""
List details of an ascii armored or binary key file
without first importing it to the local keyring.
The function achieves this on modern GnuPG by running:
$ gpg --dry-run --import-options import-show --import
On older versions, it does the *much* riskier:
$ gpg --with-fingerprint --with-colons filename
"""
if self.version >= (2, 1):
args = ['--dry-run', '--import-options', 'import-show', '--import']
else:
logger.warning('Trying to list packets, but if the file is not a '
'keyring, might accidentally decrypt')
args = ['--with-fingerprint', '--with-colons', '--fixed-list-mode']
args.append(no_quote(filename))
p = self._open_subprocess(args)
return self._get_list_output(p, 'scan')
def search_keys(self, query, keyserver='pgp.mit.edu'):
""" search keyserver by query (using --search-keys option)
>>> import shutil
>>> shutil.rmtree('keys', ignore_errors=True)
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome='keys')
>>> os.chmod('keys', 0x1C0)
>>> result = gpg.search_keys('<[email protected]>')
>>> assert result, 'Failed using default keyserver'
>>> #keyserver = 'keyserver.ubuntu.com'
>>> #result = gpg.search_keys('<[email protected]>', keyserver)
>>> #assert result, 'Failed using keyserver.ubuntu.com'
"""
query = query.strip()
if HEX_DIGITS_RE.match(query):
query = '0x' + query
args = ['--fingerprint',
'--keyserver', no_quote(keyserver), '--search-keys',
no_quote(query)]
p = self._open_subprocess(args)
# Get the response information
result = self.result_map['search'](self)
self._collect_output(p, result, stdin=p.stdin)
lines = result.data.decode(self.encoding,
self.decode_errors).splitlines()
valid_keywords = ['pub', 'uid']
for line in lines:
if self.verbose: # pragma: no cover
print(line)
logger.debug('line: %r', line.rstrip())
if not line: # sometimes get blank lines on Windows
continue
L = line.strip().split(':')
if not L: # pragma: no cover
continue
keyword = L[0]
if keyword in valid_keywords:
getattr(result, keyword)(L)
return result
def gen_key(self, input):
"""Generate a key; you might use gen_key_input() to create the
control input.
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> input = gpg.gen_key_input(passphrase='foo')
>>> result = gpg.gen_key(input)
>>> assert result
>>> result = gpg.gen_key('foo')
>>> assert not result
"""
args = ["--gen-key"]
result = self.result_map['generate'](self)
f = _make_binary_stream(input, self.encoding)
self._handle_io(args, f, result, binary=True)
f.close()
return result
def gen_key_input(self, **kwargs):
"""
Generate --gen-key input per gpg doc/DETAILS
"""
parms = {}
for key, val in list(kwargs.items()):
key = key.replace('_','-').title()
if str(val).strip(): # skip empty strings
parms[key] = val
parms.setdefault('Key-Type','RSA')
parms.setdefault('Key-Length',2048)
parms.setdefault('Name-Real', "Autogenerated Key")
logname = (os.environ.get('LOGNAME') or os.environ.get('USERNAME') or
'unspecified')
hostname = socket.gethostname()
parms.setdefault('Name-Email', "%s@%s" % (logname.replace(' ', '_'),
hostname))
out = "Key-Type: %s\n" % parms.pop('Key-Type')
for key, val in list(parms.items()):
out += "%s: %s\n" % (key, val)
out += "%commit\n"
return out
# Key-Type: RSA
# Key-Length: 1024
# Name-Real: ISdlink Server on %s
# Name-Comment: Created by %s
# Name-Email: isdlink@%s
# Expire-Date: 0
# %commit
#
#
# Key-Type: DSA
# Key-Length: 1024
# Subkey-Type: ELG-E
# Subkey-Length: 1024
# Name-Real: Joe Tester
# Name-Comment: with stupid passphrase
# Name-Email: [email protected]
# Expire-Date: 0
# Passphrase: abc
# %pubring foo.pub
# %secring foo.sec
# %commit
#
# ENCRYPTION
#
def encrypt_file(self, file, recipients, sign=None,
always_trust=False, passphrase=None,
armor=True, output=None, symmetric=False, extra_args=None):
"Encrypt the message read from the file-like object 'file'"
args = ['--encrypt']
if symmetric:
# can't be False or None - could be True or a cipher algo value
# such as AES256
args = ['--symmetric']
if symmetric is not True:
args.extend(['--cipher-algo', no_quote(symmetric)])
# else use the default, currently CAST5
else:
if not recipients:
raise ValueError('No recipients specified with asymmetric '
'encryption')
if not _is_sequence(recipients):
recipients = (recipients,)
for recipient in recipients:
args.extend(['--recipient', no_quote(recipient)])
if armor: # create ascii-armored output - False for binary output
args.append('--armor')
if output: # write the output to a file with the specified name
self.set_output_without_confirmation(args, output)
if sign is True: # pragma: no cover
args.append('--sign')
elif sign: # pragma: no cover
args.extend(['--sign', '--default-key', no_quote(sign)])
if always_trust: # pragma: no cover
args.append('--always-trust')
if extra_args:
args.extend(extra_args)
result = self.result_map['crypt'](self)
self._handle_io(args, file, result, passphrase=passphrase, binary=True)
logger.debug('encrypt result: %r', result.data)
return result
def encrypt(self, data, recipients, **kwargs):
"""Encrypt the message contained in the string 'data'
>>> import shutil
>>> if os.path.exists("keys"):
... shutil.rmtree("keys", ignore_errors=True)
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> input = gpg.gen_key_input(name_email='user1@test', passphrase='pp1')
>>> result = gpg.gen_key(input)
>>> fp1 = result.fingerprint
>>> input = gpg.gen_key_input(name_email='user2@test', passphrase='pp2')
>>> result = gpg.gen_key(input)
>>> fp2 = result.fingerprint
>>> result = gpg.encrypt("hello",fp2)
>>> message = str(result)
>>> assert message != 'hello'
>>> result = gpg.decrypt(message, passphrase='pp2')
>>> assert result
>>> str(result)
'hello'
>>> result = gpg.encrypt("hello again", fp1)
>>> message = str(result)
>>> result = gpg.decrypt(message, passphrase='bar')
>>> result.status in ('decryption failed', 'bad passphrase')
True
>>> assert not result
>>> result = gpg.decrypt(message, passphrase='pp1')
>>> result.status == 'decryption ok'
True
>>> str(result)
'hello again'
>>> result = gpg.encrypt("signed hello", fp2, sign=fp1, passphrase='pp1')
>>> result.status == 'encryption ok'
True
>>> message = str(result)
>>> result = gpg.decrypt(message, passphrase='pp2')
>>> result.status == 'decryption ok'
True
>>> assert result.fingerprint == fp1
"""
data = _make_binary_stream(data, self.encoding)
result = self.encrypt_file(data, recipients, **kwargs)
data.close()
return result
def decrypt(self, message, **kwargs):
data = _make_binary_stream(message, self.encoding)
result = self.decrypt_file(data, **kwargs)
data.close()
return result
def decrypt_file(self, file, always_trust=False, passphrase=None,
output=None, extra_args=None):
args = ["--decrypt"]
if output: # write the output to a file with the specified name
self.set_output_without_confirmation(args, output)
if always_trust: # pragma: no cover
args.append("--always-trust")
if extra_args:
args.extend(extra_args)
result = self.result_map['crypt'](self)
self._handle_io(args, file, result, passphrase, binary=True)
logger.debug('decrypt result: %r', result.data)
return result
| [] | [] | [
"GPGBINARY",
"LOGNAME",
"USERNAME"
] | [] | ["GPGBINARY", "LOGNAME", "USERNAME"] | python | 3 | 0 | |
cmd/common_test.go | // Copyright © 2017-2019 Aqua Security Software Ltd. <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"testing"
"time"
"github.com/aquasecurity/kube-bench/check"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
)
func TestNewRunFilter(t *testing.T) {
type TestCase struct {
Name string
FilterOpts FilterOpts
Group *check.Group
Check *check.Check
Expected bool
}
testCases := []TestCase{
{
Name: "Should return true when scored flag is enabled and check is scored",
FilterOpts: FilterOpts{Scored: true, Unscored: false},
Group: &check.Group{},
Check: &check.Check{Scored: true},
Expected: true,
},
{
Name: "Should return false when scored flag is enabled and check is not scored",
FilterOpts: FilterOpts{Scored: true, Unscored: false},
Group: &check.Group{},
Check: &check.Check{Scored: false},
Expected: false,
},
{
Name: "Should return true when unscored flag is enabled and check is not scored",
FilterOpts: FilterOpts{Scored: false, Unscored: true},
Group: &check.Group{},
Check: &check.Check{Scored: false},
Expected: true,
},
{
Name: "Should return false when unscored flag is enabled and check is scored",
FilterOpts: FilterOpts{Scored: false, Unscored: true},
Group: &check.Group{},
Check: &check.Check{Scored: true},
Expected: false,
},
{
Name: "Should return true when group flag contains group's ID",
FilterOpts: FilterOpts{Scored: true, Unscored: true, GroupList: "G1,G2,G3"},
Group: &check.Group{ID: "G2"},
Check: &check.Check{},
Expected: true,
},
{
Name: "Should return false when group flag doesn't contain group's ID",
FilterOpts: FilterOpts{GroupList: "G1,G3"},
Group: &check.Group{ID: "G2"},
Check: &check.Check{},
Expected: false,
},
{
Name: "Should return true when check flag contains check's ID",
FilterOpts: FilterOpts{Scored: true, Unscored: true, CheckList: "C1,C2,C3"},
Group: &check.Group{},
Check: &check.Check{ID: "C2"},
Expected: true,
},
{
Name: "Should return false when check flag doesn't contain check's ID",
FilterOpts: FilterOpts{CheckList: "C1,C3"},
Group: &check.Group{},
Check: &check.Check{ID: "C2"},
Expected: false,
},
}
for _, testCase := range testCases {
t.Run(testCase.Name, func(t *testing.T) {
filter, _ := NewRunFilter(testCase.FilterOpts)
assert.Equal(t, testCase.Expected, filter(testCase.Group, testCase.Check))
})
}
t.Run("Should return error when both group and check flags are used", func(t *testing.T) {
// given
opts := FilterOpts{GroupList: "G1", CheckList: "C1"}
// when
_, err := NewRunFilter(opts)
// then
assert.EqualError(t, err, "group option and check option can't be used together")
})
}
func TestIsMaster(t *testing.T) {
testCases := []struct {
name string
cfgFile string
getBinariesFunc func(*viper.Viper, check.NodeType) (map[string]string, error)
isMaster bool
}{
{
name: "valid config, is master and all components are running",
cfgFile: "../cfg/config.yaml",
getBinariesFunc: func(viper *viper.Viper, nt check.NodeType) (strings map[string]string, i error) {
return map[string]string{"apiserver": "kube-apiserver"}, nil
},
isMaster: true,
},
{
name: "valid config, is master and but not all components are running",
cfgFile: "../cfg/config.yaml",
getBinariesFunc: func(viper *viper.Viper, nt check.NodeType) (strings map[string]string, i error) {
return map[string]string{}, nil
},
isMaster: false,
},
{
name: "valid config, is master, not all components are running and fails to find all binaries",
cfgFile: "../cfg/config.yaml",
getBinariesFunc: func(viper *viper.Viper, nt check.NodeType) (strings map[string]string, i error) {
return map[string]string{}, errors.New("failed to find binaries")
},
isMaster: false,
},
{
name: "valid config, does not include master",
cfgFile: "../cfg/node_only.yaml",
isMaster: false,
},
}
cfgDirOld := cfgDir
cfgDir = "../cfg"
defer func() {
cfgDir = cfgDirOld
}()
execCode := `#!/bin/sh
echo "Server Version: v1.13.10"
`
restore, err := fakeExecutableInPath("kubectl", execCode)
if err != nil {
t.Fatal("Failed when calling fakeExecutableInPath ", err)
}
defer restore()
for _, tc := range testCases {
cfgFile = tc.cfgFile
initConfig()
oldGetBinariesFunc := getBinariesFunc
getBinariesFunc = tc.getBinariesFunc
defer func() {
getBinariesFunc = oldGetBinariesFunc
cfgFile = ""
}()
assert.Equal(t, tc.isMaster, isMaster(), tc.name)
}
}
func TestMapToCISVersion(t *testing.T) {
viperWithData, err := loadConfigForTest()
if err != nil {
t.Fatalf("Unable to load config file %v", err)
}
kubeToBenchmarkMap, err := loadVersionMapping(viperWithData)
if err != nil {
t.Fatalf("Unable to load config file %v", err)
}
cases := []struct {
kubeVersion string
succeed bool
exp string
expErr string
}{
{kubeVersion: "1.9", succeed: false, exp: "", expErr: "unable to find a matching Benchmark Version match for kubernetes version: 1.9"},
{kubeVersion: "1.11", succeed: true, exp: "cis-1.3"},
{kubeVersion: "1.12", succeed: true, exp: "cis-1.3"},
{kubeVersion: "1.13", succeed: true, exp: "cis-1.4"},
{kubeVersion: "1.14", succeed: true, exp: "cis-1.4"},
{kubeVersion: "1.15", succeed: true, exp: "cis-1.5"},
{kubeVersion: "1.16", succeed: true, exp: "cis-1.5"},
{kubeVersion: "1.17", succeed: true, exp: "cis-1.5"},
{kubeVersion: "1.18", succeed: true, exp: "cis-1.5"},
{kubeVersion: "gke-1.0", succeed: true, exp: "gke-1.0"},
{kubeVersion: "ocp-3.10", succeed: true, exp: "rh-0.7"},
{kubeVersion: "ocp-3.11", succeed: true, exp: "rh-0.7"},
{kubeVersion: "unknown", succeed: false, exp: "", expErr: "unable to find a matching Benchmark Version match for kubernetes version: unknown"},
}
for _, c := range cases {
rv, err := mapToBenchmarkVersion(kubeToBenchmarkMap, c.kubeVersion)
if c.succeed {
if err != nil {
t.Errorf("[%q]-Unexpected error: %v", c.kubeVersion, err)
}
if len(rv) == 0 {
t.Errorf("[%q]-missing return value", c.kubeVersion)
}
if c.exp != rv {
t.Errorf("[%q]- expected %q but Got %q", c.kubeVersion, c.exp, rv)
}
} else {
if c.exp != rv {
t.Errorf("[%q]-mapToBenchmarkVersion kubeversion: %q Got %q expected %s", c.kubeVersion, c.kubeVersion, rv, c.exp)
}
if c.expErr != err.Error() {
t.Errorf("[%q]-mapToBenchmarkVersion expected Error: %q instead Got %q", c.kubeVersion, c.expErr, err.Error())
}
}
}
}
func TestLoadVersionMapping(t *testing.T) {
setDefault := func(v *viper.Viper, key string, value interface{}) *viper.Viper {
v.SetDefault(key, value)
return v
}
viperWithData, err := loadConfigForTest()
if err != nil {
t.Fatalf("Unable to load config file %v", err)
}
cases := []struct {
n string
v *viper.Viper
succeed bool
}{
{n: "empty", v: viper.New(), succeed: false},
{
n: "novals",
v: setDefault(viper.New(), "version_mapping", "novals"),
succeed: false,
},
{
n: "good",
v: viperWithData,
succeed: true,
},
}
for _, c := range cases {
rv, err := loadVersionMapping(c.v)
if c.succeed {
if err != nil {
t.Errorf("[%q]-Unexpected error: %v", c.n, err)
}
if len(rv) == 0 {
t.Errorf("[%q]-missing mapping value", c.n)
}
} else {
if err == nil {
t.Errorf("[%q]-Expected error but got none", c.n)
}
}
}
}
func TestGetBenchmarkVersion(t *testing.T) {
viperWithData, err := loadConfigForTest()
if err != nil {
t.Fatalf("Unable to load config file %v", err)
}
type getBenchmarkVersionFnToTest func(kubeVersion, benchmarkVersion string, v *viper.Viper) (string, error)
withFakeKubectl := func(kubeVersion, benchmarkVersion string, v *viper.Viper, fn getBenchmarkVersionFnToTest) (string, error) {
execCode := `#!/bin/sh
echo "Server Version: v1.13.10"
`
restore, err := fakeExecutableInPath("kubectl", execCode)
if err != nil {
t.Fatal("Failed when calling fakeExecutableInPath ", err)
}
defer restore()
return fn(kubeVersion, benchmarkVersion, v)
}
withNoPath := func(kubeVersion, benchmarkVersion string, v *viper.Viper, fn getBenchmarkVersionFnToTest) (string, error) {
restore, err := prunePath()
if err != nil {
t.Fatal("Failed when calling prunePath ", err)
}
defer restore()
return fn(kubeVersion, benchmarkVersion, v)
}
type getBenchmarkVersionFn func(string, string, *viper.Viper, getBenchmarkVersionFnToTest) (string, error)
cases := []struct {
n string
kubeVersion string
benchmarkVersion string
v *viper.Viper
callFn getBenchmarkVersionFn
exp string
succeed bool
}{
{n: "both versions", kubeVersion: "1.11", benchmarkVersion: "cis-1.3", exp: "cis-1.3", callFn: withNoPath, v: viper.New(), succeed: false},
{n: "no version-missing-kubectl", kubeVersion: "", benchmarkVersion: "", v: viperWithData, exp: "", callFn: withNoPath, succeed: false},
{n: "no version-fakeKubectl", kubeVersion: "", benchmarkVersion: "", v: viperWithData, exp: "cis-1.4", callFn: withFakeKubectl, succeed: true},
{n: "kubeVersion", kubeVersion: "1.11", benchmarkVersion: "", v: viperWithData, exp: "cis-1.3", callFn: withNoPath, succeed: true},
{n: "ocpVersion310", kubeVersion: "ocp-3.10", benchmarkVersion: "", v: viperWithData, exp: "rh-0.7", callFn: withNoPath, succeed: true},
{n: "ocpVersion311", kubeVersion: "ocp-3.11", benchmarkVersion: "", v: viperWithData, exp: "rh-0.7", callFn: withNoPath, succeed: true},
{n: "gke10", kubeVersion: "gke-1.0", benchmarkVersion: "", v: viperWithData, exp: "gke-1.0", callFn: withNoPath, succeed: true},
}
for _, c := range cases {
rv, err := c.callFn(c.kubeVersion, c.benchmarkVersion, c.v, getBenchmarkVersion)
if c.succeed {
if err != nil {
t.Errorf("[%q]-Unexpected error: %v", c.n, err)
}
if len(rv) == 0 {
t.Errorf("[%q]-missing return value", c.n)
}
if c.exp != rv {
t.Errorf("[%q]- expected %q but Got %q", c.n, c.exp, rv)
}
} else {
if err == nil {
t.Errorf("[%q]-Expected error but got none", c.n)
}
}
}
}
func TestValidTargets(t *testing.T) {
cases := []struct {
name string
benchmark string
targets []string
expected bool
}{
{
name: "cis-1.3 no etcd",
benchmark: "cis-1.3",
targets: []string{"master", "etcd"},
expected: false,
},
{
name: "cis-1.4 valid",
benchmark: "cis-1.4",
targets: []string{"master", "node"},
expected: true,
},
{
name: "cis-1.5 no dummy",
benchmark: "cis-1.5",
targets: []string{"master", "node", "controlplane", "etcd", "dummy"},
expected: false,
},
{
name: "cis-1.5 valid",
benchmark: "cis-1.5",
targets: []string{"master", "node", "controlplane", "etcd", "policies"},
expected: true,
},
{
name: "gke-1.0 valid",
benchmark: "gke-1.0",
targets: []string{"master", "node", "controlplane", "etcd", "policies", "managedservices"},
expected: true,
},
{
name: "eks-1.0 valid",
benchmark: "eks-1.0",
targets: []string{"node", "policies", "controlplane", "managedservices"},
expected: true,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
ret := validTargets(c.benchmark, c.targets)
if ret != c.expected {
t.Fatalf("Expected %t, got %t", c.expected, ret)
}
})
}
}
func TestIsEtcd(t *testing.T) {
testCases := []struct {
name string
cfgFile string
getBinariesFunc func(*viper.Viper, check.NodeType) (map[string]string, error)
isEtcd bool
}{
{
name: "valid config, is etcd and all components are running",
cfgFile: "../cfg/config.yaml",
getBinariesFunc: func(viper *viper.Viper, nt check.NodeType) (strings map[string]string, i error) {
return map[string]string{"etcd": "etcd"}, nil
},
isEtcd: true,
},
{
name: "valid config, is etcd and but not all components are running",
cfgFile: "../cfg/config.yaml",
getBinariesFunc: func(viper *viper.Viper, nt check.NodeType) (strings map[string]string, i error) {
return map[string]string{}, nil
},
isEtcd: false,
},
{
name: "valid config, is etcd, not all components are running and fails to find all binaries",
cfgFile: "../cfg/config.yaml",
getBinariesFunc: func(viper *viper.Viper, nt check.NodeType) (strings map[string]string, i error) {
return map[string]string{}, errors.New("failed to find binaries")
},
isEtcd: false,
},
{
name: "valid config, does not include etcd",
cfgFile: "../cfg/node_only.yaml",
isEtcd: false,
},
}
cfgDirOld := cfgDir
cfgDir = "../cfg"
defer func() {
cfgDir = cfgDirOld
}()
execCode := `#!/bin/sh
echo "Server Version: v1.15.03"
`
restore, err := fakeExecutableInPath("kubectl", execCode)
if err != nil {
t.Fatal("Failed when calling fakeExecutableInPath ", err)
}
defer restore()
for _, tc := range testCases {
cfgFile = tc.cfgFile
initConfig()
oldGetBinariesFunc := getBinariesFunc
getBinariesFunc = tc.getBinariesFunc
defer func() {
getBinariesFunc = oldGetBinariesFunc
cfgFile = ""
}()
assert.Equal(t, tc.isEtcd, isEtcd(), tc.name)
}
}
func TestWriteResultToJsonFile(t *testing.T) {
defer func() {
controlsCollection = []*check.Controls{}
jsonFmt = false
outputFile = ""
}()
var err error
jsonFmt = true
outputFile = path.Join(os.TempDir(), fmt.Sprintf("%d", time.Now().UnixNano()))
controlsCollection, err = parseControlsJsonFile("./testdata/controlsCollection.json")
if err != nil {
t.Error(err)
}
writeOutput(controlsCollection)
var expect []*check.Controls
var result []*check.Controls
result, err = parseControlsJsonFile(outputFile)
if err != nil {
t.Error(err)
}
expect, err = parseControlsJsonFile("./testdata/result.json")
if err != nil {
t.Error(err)
}
assert.Equal(t, expect, result)
}
func parseControlsJsonFile(filepath string) ([]*check.Controls, error) {
var result []*check.Controls
d, err := ioutil.ReadFile(filepath)
if err != nil {
return nil, err
}
err = json.Unmarshal(d, &result)
if err != nil {
return nil, err
}
return result, nil
}
func loadConfigForTest() (*viper.Viper, error) {
viperWithData := viper.New()
viperWithData.SetConfigFile(filepath.Join("..", cfgDir, "config.yaml"))
if err := viperWithData.ReadInConfig(); err != nil {
return nil, err
}
return viperWithData, nil
}
type restoreFn func()
func fakeExecutableInPath(execFile, execCode string) (restoreFn, error) {
pathenv := os.Getenv("PATH")
tmp, err := ioutil.TempDir("", "TestfakeExecutableInPath")
if err != nil {
return nil, err
}
wd, err := os.Getwd()
if err != nil {
return nil, err
}
if len(execCode) > 0 {
ioutil.WriteFile(filepath.Join(tmp, execFile), []byte(execCode), 0700)
} else {
f, err := os.OpenFile(execFile, os.O_CREATE|os.O_EXCL, 0700)
if err != nil {
return nil, err
}
err = f.Close()
if err != nil {
return nil, err
}
}
err = os.Setenv("PATH", fmt.Sprintf("%s:%s", tmp, pathenv))
if err != nil {
return nil, err
}
restorePath := func() {
os.RemoveAll(tmp)
os.Chdir(wd)
os.Setenv("PATH", pathenv)
}
return restorePath, nil
}
func prunePath() (restoreFn, error) {
pathenv := os.Getenv("PATH")
err := os.Setenv("PATH", "")
if err != nil {
return nil, err
}
restorePath := func() {
os.Setenv("PATH", pathenv)
}
return restorePath, nil
}
| [
"\"PATH\"",
"\"PATH\""
] | [] | [
"PATH"
] | [] | ["PATH"] | go | 1 | 0 |