filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
sequence | variablearg
sequence | constarg
sequence | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
s3/pkg/service.go | package pkg
import (
"context"
"github.com/opensds/go-panda/s3/pkg/db"
"github.com/micro/go-log"
pb "github.com/opensds/go-panda/s3/proto"
"os"
. "github.com/opensds/go-panda/dataflow/pkg/utils"
. "github.com/opensds/go-panda/s3/pkg/exception"
)
type s3Service struct{}
func (b *s3Service) ListBuckets(ctx context.Context, in *pb.BaseRequest, out *pb.ListBucketsResponse) error {
log.Log("ListBuckets is called in s3 service.")
return nil
}
func (b *s3Service) CreateBucket(ctx context.Context, in *pb.Bucket, out *pb.BaseResponse) error {
log.Log("CreateBucket is called in s3 service.")
err := db.DbAdapter.CreateBucket(in)
if err.Code != ERR_OK {
return err.Error()
}
out.Msg = "Create bucket successfully."
return nil
}
func (b *s3Service) GetBucket(ctx context.Context, in *pb.Bucket, out *pb.Bucket) error {
log.Logf("GetBucket %s is called in s3 service.", in.Name)
err := db.DbAdapter.GetBucketByName(in.Name,out)
if err.Code != ERR_OK {
return err.Error()
}
return nil
}
func (b *s3Service) DeleteBucket(ctx context.Context, in *pb.Bucket, out *pb.BaseResponse) error {
log.Log("DeleteBucket is called in s3 service.")
out.Msg = "Delete bucket successfully."
return nil
}
func (b *s3Service) ListObjects(ctx context.Context, in *pb.Object, out *pb.ListObjectResponse) error {
log.Log("PutObject is called in s3 service.")
return nil
}
func (b *s3Service) PutObject(ctx context.Context, in *pb.Object, out *pb.BaseResponse) error {
log.Log("PutObject is called in s3 service.")
out.Msg = "Create bucket successfully."
return nil
}
func (b *s3Service) GetObject(ctx context.Context, in *pb.Object, out *pb.Object) error {
log.Log("GetObject is called in s3 service.")
out.ObjectKey = in.ObjectKey
out.BucketName = in.BucketName
return nil
}
func (b *s3Service) DeleteObject(ctx context.Context, in *pb.Object, out *pb.BaseResponse) error {
log.Log("PutObject is called in s3 service.")
out.Msg = "Create bucket successfully."
return nil
}
func NewS3Service() pb.S3Handler {
host := os.Getenv("DB_HOST")
dbstor := Database{Credential:"unkonwn", Driver:"mongodb", Endpoint:host}
db.Init(&dbstor)
return &s3Service{}
}
| [
"\"DB_HOST\""
] | [] | [
"DB_HOST"
] | [] | ["DB_HOST"] | go | 1 | 0 | |
pkg/helmexec/exec.go | package helmexec
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
const (
command = "helm"
)
type execer struct {
helmBinary string
runner Runner
logger *zap.SugaredLogger
kubeContext string
extra []string
decryptionMutex sync.Mutex
}
func NewLogger(writer io.Writer, logLevel string) *zap.SugaredLogger {
var cfg zapcore.EncoderConfig
cfg.MessageKey = "message"
out := zapcore.AddSync(writer)
var level zapcore.Level
err := level.Set(logLevel)
if err != nil {
panic(err)
}
core := zapcore.NewCore(
zapcore.NewConsoleEncoder(cfg),
out,
level,
)
return zap.New(core).Sugar()
}
// New for running helm commands
func New(logger *zap.SugaredLogger, kubeContext string) *execer {
return &execer{
helmBinary: command,
logger: logger,
kubeContext: kubeContext,
runner: &ShellRunner{
logger: logger,
},
}
}
func (helm *execer) SetExtraArgs(args ...string) {
helm.extra = args
}
func (helm *execer) SetHelmBinary(bin string) {
helm.helmBinary = bin
}
func (helm *execer) AddRepo(name, repository, certfile, keyfile, username, password string) error {
var args []string
args = append(args, "repo", "add", name, repository)
if certfile != "" && keyfile != "" {
args = append(args, "--cert-file", certfile, "--key-file", keyfile)
}
if username != "" && password != "" {
args = append(args, "--username", username, "--password", password)
}
helm.logger.Infof("Adding repo %v %v", name, repository)
out, err := helm.exec(args, map[string]string{})
helm.info(out)
return err
}
func (helm *execer) UpdateRepo() error {
helm.logger.Info("Updating repo")
out, err := helm.exec([]string{"repo", "update"}, map[string]string{})
helm.info(out)
return err
}
func (helm *execer) UpdateDeps(chart string) error {
helm.logger.Infof("Updating dependency %v", chart)
out, err := helm.exec([]string{"dependency", "update", chart}, map[string]string{})
helm.info(out)
return err
}
func (helm *execer) BuildDeps(chart string) error {
helm.logger.Infof("Building dependency %v", chart)
out, err := helm.exec([]string{"dependency", "build", chart}, map[string]string{})
helm.info(out)
return err
}
func (helm *execer) SyncRelease(context HelmContext, name, chart string, flags ...string) error {
helm.logger.Infof("Upgrading %v", chart)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "upgrade", "--install", "--reset-values", name, chart), flags...), env)
helm.write(out)
return err
}
func (helm *execer) ReleaseStatus(context HelmContext, name string, flags ...string) error {
helm.logger.Infof("Getting status %v", name)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "status", name), flags...), env)
helm.write(out)
return err
}
func (helm *execer) List(context HelmContext, filter string, flags ...string) (string, error) {
helm.logger.Infof("Listing releases matching %v", filter)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "list", filter), flags...), env)
helm.write(out)
return string(out), err
}
func (helm *execer) DecryptSecret(context HelmContext, name string, flags ...string) (string, error) {
// Prevents https://github.com/roboll/helmfile/issues/258
helm.decryptionMutex.Lock()
defer helm.decryptionMutex.Unlock()
absPath, err := filepath.Abs(name)
if err != nil {
return "", err
}
helm.logger.Infof("Decrypting secret %v", absPath)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "secrets", "dec", absPath), flags...), env)
helm.info(out)
if err != nil {
return "", err
}
tmpFile, err := ioutil.TempFile("", "secret")
if err != nil {
return "", err
}
defer tmpFile.Close()
// HELM_SECRETS_DEC_SUFFIX is used by the helm-secrets plugin to define the output file
decSuffix := os.Getenv("HELM_SECRETS_DEC_SUFFIX")
if len(decSuffix) == 0 {
decSuffix = ".yaml.dec"
}
decFilename := strings.Replace(absPath, ".yaml", decSuffix, 1)
// os.Rename seems to results in "cross-device link` errors in some cases
// Instead of moving, copy it to the destination temp file as a work-around
// See https://github.com/roboll/helmfile/issues/251#issuecomment-417166296f
decFile, err := os.Open(decFilename)
if err != nil {
return "", err
}
defer decFile.Close()
_, err = io.Copy(tmpFile, decFile)
if err != nil {
return "", err
}
if err := decFile.Close(); err != nil {
return "", err
}
if err := os.Remove(decFilename); err != nil {
return "", err
}
return tmpFile.Name(), err
}
func (helm *execer) TemplateRelease(chart string, flags ...string) error {
out, err := helm.exec(append([]string{"template", chart}, flags...), map[string]string{})
helm.write(out)
return err
}
func (helm *execer) DiffRelease(context HelmContext, name, chart string, flags ...string) error {
helm.logger.Infof("Comparing %v %v", name, chart)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "diff", "upgrade", "--reset-values", "--allow-unreleased", name, chart), flags...), env)
// Do our best to write STDOUT only when diff existed
// Unfortunately, this works only when you run helmfile with `--detailed-exitcode`
detailedExitcodeEnabled := false
for _, f := range flags {
if strings.Contains(f, "detailed-exitcode") {
detailedExitcodeEnabled = true
break
}
}
if detailedExitcodeEnabled {
switch e := err.(type) {
case ExitError:
if e.ExitStatus() == 2 {
helm.write(out)
return err
}
}
} else {
helm.write(out)
}
return err
}
func (helm *execer) Lint(chart string, flags ...string) error {
helm.logger.Infof("Linting %v", chart)
out, err := helm.exec(append([]string{"lint", chart}, flags...), map[string]string{})
helm.write(out)
return err
}
func (helm *execer) Fetch(chart string, flags ...string) error {
helm.logger.Infof("Fetching %v", chart)
out, err := helm.exec(append([]string{"fetch", chart}, flags...), map[string]string{})
helm.info(out)
return err
}
func (helm *execer) DeleteRelease(context HelmContext, name string, flags ...string) error {
helm.logger.Infof("Deleting %v", name)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "delete", name), flags...), env)
helm.write(out)
return err
}
func (helm *execer) TestRelease(context HelmContext, name string, flags ...string) error {
helm.logger.Infof("Testing %v", name)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "test", name), flags...), env)
helm.write(out)
return err
}
func (helm *execer) exec(args []string, env map[string]string) ([]byte, error) {
cmdargs := args
if len(helm.extra) > 0 {
cmdargs = append(cmdargs, helm.extra...)
}
if helm.kubeContext != "" {
cmdargs = append(cmdargs, "--kube-context", helm.kubeContext)
}
cmd := fmt.Sprintf("exec: %s %s", helm.helmBinary, strings.Join(cmdargs, " "))
helm.logger.Debug(cmd)
bytes, err := helm.runner.Execute(helm.helmBinary, cmdargs, env)
helm.logger.Debugf("%s: %s", cmd, bytes)
return bytes, err
}
func (helm *execer) info(out []byte) {
if len(out) > 0 {
helm.logger.Infof("%s", out)
}
}
func (helm *execer) write(out []byte) {
if len(out) > 0 {
fmt.Printf("%s\n", out)
}
}
| [
"\"HELM_SECRETS_DEC_SUFFIX\""
] | [] | [
"HELM_SECRETS_DEC_SUFFIX"
] | [] | ["HELM_SECRETS_DEC_SUFFIX"] | go | 1 | 0 | |
airflow/executors/celery_executor.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import subprocess
import os
from celery import Celery
from airflow.config_templates.default_celery import DEFAULT_CELERY_CONFIG
from airflow.exceptions import AirflowException
from airflow import configuration
from xTool.utils.log.logging_mixin import LoggingMixin
from xTool.utils.module_loading import import_string
from xTool.executors.celery_executor import CeleryExecutor
'''
To start the celery worker, run the command:
airflow worker
'''
# 获得配置文件的路径,并导入celery默认配置
if configuration.conf.has_option('celery', 'celery_config_options'):
celery_configuration = import_string(
configuration.conf.get('celery', 'celery_config_options')
)
else:
celery_configuration = DEFAULT_CELERY_CONFIG
# 创建一个celery客户端
celery_app_name = configuration.conf.get('celery', 'CELERY_APP_NAME')
app = Celery(
celery_app_name,
config_source=celery_configuration)
@app.task
def execute_command(command):
"""airflow worker 执行shell命令 ."""
log = LoggingMixin().log
log.info("Executing command in Celery: %s", command)
env = os.environ.copy()
try:
# celery worker 收到消息后,执行消息中的shell命令
subprocess.check_call(command, shell=True, stderr=subprocess.STDOUT,
close_fds=True, env=env)
except subprocess.CalledProcessError as e:
log.exception('execute_command encountered a CalledProcessError')
log.error(e.output)
raise AirflowException('Celery command failed')
| [] | [] | [] | [] | [] | python | 0 | 0 | |
spades/config.py | # -*- coding: utf-8 -*-
# copyright: (c) 2020 by Jesse Johnson.
# license: Apache 2.0, see LICENSE for more details.
'''Provide book package.'''
import os
from passlib.pwd import genword
from redis import Redis
# logging
loglevel: str = os.getenv('LOGLEVEL', 'INFO').upper()
# spades settings
player_max: int = 2
winning_score: int = 50
def set_player_max(count: int) -> None:
'''Set player count.'''
global player_max
player_max = count
# db settings
db_type: str = os.getenv('DB_TYPE', 'postgres')
db_host: str = os.getenv('POSTGRESQL_HOST', 'localhost')
db_port: int = int(os.getenv('POSTGRESQL_PORT', 5432))
db_user: str = os.getenv('POSTGRESQL_USER', 'spades')
db_pass: str = os.getenv('POSTGRESQL_PASSWORD', 'password')
db_name: str = os.getenv('POSTGRESQL_DATABASE', 'spades')
session_type: str = os.getenv('SESSION_TYPE', 'redis')
# f"redis://:{session_password}@{session_host}:{session_port}"
session = {
'host': os.getenv('REDIS_HOST', 'localhost'),
'port': int(os.getenv('REDIS_PORT', 6379)),
'password': os.getenv('REDIS_PASSWORD', 'password'),
'socket_timeout': int(os.getenv('REDIS_TIMEOUT', 30)),
}
sqlalchemy_engine_options = {
'connect_args': {
'conection_timeout': 30
}
}
class Config:
'''Provide Flask configuration.'''
SQLALCHEMY_DATABASE_URI: str = os.getenv(
'SQLALCHEMY_DATABASE_URI',
f"{db_type}://{db_user}:{db_pass}@{db_host}:{db_port}/{db_name}"
)
SQLALCHEMY_ECHO: bool = bool(os.getenv('SQLALCHEMY_ECHO', True))
SQLALCHEMY_TRACK_MODIFICATIONS: bool = bool(
os.getenv('SQLALCHEMY_TRACK_MODIFICATIONS', False)
)
SESSION_COOKIE_SECURE: bool = True
SESSION_COOKIE_HTTPONLY: bool = True
SESSION_COOKIE_SAMESITE: str = 'Lax'
SECRET_KEY: str = genword(entropy=56, length=128)
WTF_CSRF_SECRET_KEY: str = genword(entropy=56, length=128)
SESSION_TYPE: str = session_type
SESSION_PERMANENT: bool = True
SESSION_USE_SIGNER: bool = True
SESSION_REDIS = Redis(**session) # type: ignore
SSE_REDIS_URL = "redis://:{p}@{h}:{s}".format(
p=session['password'], h=session['host'], s=session['port']
)
| [] | [] | [
"SQLALCHEMY_ECHO",
"SQLALCHEMY_TRACK_MODIFICATIONS",
"REDIS_PORT",
"POSTGRESQL_HOST",
"SESSION_TYPE",
"REDIS_HOST",
"POSTGRESQL_PORT",
"REDIS_PASSWORD",
"POSTGRESQL_DATABASE",
"LOGLEVEL",
"REDIS_TIMEOUT",
"POSTGRESQL_PASSWORD",
"SQLALCHEMY_DATABASE_URI",
"POSTGRESQL_USER",
"DB_TYPE"
] | [] | ["SQLALCHEMY_ECHO", "SQLALCHEMY_TRACK_MODIFICATIONS", "REDIS_PORT", "POSTGRESQL_HOST", "SESSION_TYPE", "REDIS_HOST", "POSTGRESQL_PORT", "REDIS_PASSWORD", "POSTGRESQL_DATABASE", "LOGLEVEL", "REDIS_TIMEOUT", "POSTGRESQL_PASSWORD", "SQLALCHEMY_DATABASE_URI", "POSTGRESQL_USER", "DB_TYPE"] | python | 15 | 0 | |
source/git/gitsource.go | package git
import (
"bytes"
"context"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"os/exec"
"os/user"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/secrets"
"github.com/moby/buildkit/session/sshforward"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/source"
srctypes "github.com/moby/buildkit/source/types"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/progress/logs"
"github.com/moby/buildkit/util/urlutil"
"github.com/moby/locker"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var validHex = regexp.MustCompile(`^[a-f0-9]{40}$`)
var defaultBranch = regexp.MustCompile(`refs/heads/(\S+)`)
type Opt struct {
CacheAccessor cache.Accessor
}
type gitSource struct {
cache cache.Accessor
locker *locker.Locker
}
// Supported returns nil if the system supports Git source
func Supported() error {
if err := exec.Command("git", "version").Run(); err != nil {
return errors.Wrap(err, "failed to find git binary")
}
return nil
}
func NewSource(opt Opt) (source.Source, error) {
gs := &gitSource{
cache: opt.CacheAccessor,
locker: locker.New(),
}
return gs, nil
}
func (gs *gitSource) ID() string {
return srctypes.GitScheme
}
// needs to be called with repo lock
func (gs *gitSource) mountRemote(ctx context.Context, remote string, auth []string, g session.Group) (target string, release func(), retErr error) {
sis, err := searchGitRemote(ctx, gs.cache, remote)
if err != nil {
return "", nil, errors.Wrapf(err, "failed to search metadata for %s", urlutil.RedactCredentials(remote))
}
var remoteRef cache.MutableRef
for _, si := range sis {
remoteRef, err = gs.cache.GetMutable(ctx, si.ID())
if err != nil {
if errors.Is(err, cache.ErrLocked) {
// should never really happen as no other function should access this metadata, but lets be graceful
bklog.G(ctx).Warnf("mutable ref for %s %s was locked: %v", urlutil.RedactCredentials(remote), si.ID(), err)
continue
}
return "", nil, errors.Wrapf(err, "failed to get mutable ref for %s", urlutil.RedactCredentials(remote))
}
break
}
initializeRepo := false
if remoteRef == nil {
remoteRef, err = gs.cache.New(ctx, nil, g, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("shared git repo for %s", urlutil.RedactCredentials(remote))))
if err != nil {
return "", nil, errors.Wrapf(err, "failed to create new mutable for %s", urlutil.RedactCredentials(remote))
}
initializeRepo = true
}
releaseRemoteRef := func() {
remoteRef.Release(context.TODO())
}
defer func() {
if retErr != nil && remoteRef != nil {
releaseRemoteRef()
}
}()
mount, err := remoteRef.Mount(ctx, false, g)
if err != nil {
return "", nil, err
}
lm := snapshot.LocalMounter(mount)
dir, err := lm.Mount()
if err != nil {
return "", nil, err
}
defer func() {
if retErr != nil {
lm.Unmount()
}
}()
if initializeRepo {
if _, err := gitWithinDir(ctx, dir, "", "", "", auth, "init", "--bare"); err != nil {
return "", nil, errors.Wrapf(err, "failed to init repo at %s", dir)
}
if _, err := gitWithinDir(ctx, dir, "", "", "", auth, "remote", "add", "origin", remote); err != nil {
return "", nil, errors.Wrapf(err, "failed add origin repo at %s", dir)
}
// save new remote metadata
md := cacheRefMetadata{remoteRef}
if err := md.setGitRemote(remote); err != nil {
return "", nil, err
}
}
return dir, func() {
lm.Unmount()
releaseRemoteRef()
}, nil
}
type gitSourceHandler struct {
*gitSource
src source.GitIdentifier
cacheKey string
sm *session.Manager
auth []string
}
func (gs *gitSourceHandler) shaToCacheKey(sha string) string {
key := sha
if gs.src.KeepGitDir {
key += ".git"
}
if gs.src.Subdir != "" {
key += ":" + gs.src.Subdir
}
return key
}
func (gs *gitSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, _ solver.Vertex) (source.SourceInstance, error) {
gitIdentifier, ok := id.(*source.GitIdentifier)
if !ok {
return nil, errors.Errorf("invalid git identifier %v", id)
}
return &gitSourceHandler{
src: *gitIdentifier,
gitSource: gs,
sm: sm,
}, nil
}
type authSecret struct {
token bool
name string
}
func (gs *gitSourceHandler) authSecretNames() (sec []authSecret, _ error) {
u, err := url.Parse(gs.src.Remote)
if err != nil {
return nil, err
}
if gs.src.AuthHeaderSecret != "" {
sec = append(sec, authSecret{name: gs.src.AuthHeaderSecret + "." + u.Host})
}
if gs.src.AuthTokenSecret != "" {
sec = append(sec, authSecret{name: gs.src.AuthTokenSecret + "." + u.Host, token: true})
}
if gs.src.AuthHeaderSecret != "" {
sec = append(sec, authSecret{name: gs.src.AuthHeaderSecret})
}
if gs.src.AuthTokenSecret != "" {
sec = append(sec, authSecret{name: gs.src.AuthTokenSecret, token: true})
}
return sec, nil
}
func (gs *gitSourceHandler) getAuthToken(ctx context.Context, g session.Group) error {
if gs.auth != nil {
return nil
}
sec, err := gs.authSecretNames()
if err != nil {
return err
}
return gs.sm.Any(ctx, g, func(ctx context.Context, _ string, caller session.Caller) error {
for _, s := range sec {
dt, err := secrets.GetSecret(ctx, caller, s.name)
if err != nil {
if errors.Is(err, secrets.ErrNotFound) {
continue
}
return err
}
if s.token {
dt = []byte("basic " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("x-access-token:%s", dt))))
}
gs.auth = []string{"-c", "http." + tokenScope(gs.src.Remote) + ".extraheader=Authorization: " + string(dt)}
break
}
return nil
})
}
func (gs *gitSourceHandler) mountSSHAuthSock(ctx context.Context, sshID string, g session.Group) (string, func() error, error) {
var caller session.Caller
err := gs.sm.Any(ctx, g, func(ctx context.Context, _ string, c session.Caller) error {
if err := sshforward.CheckSSHID(ctx, c, sshID); err != nil {
if st, ok := status.FromError(err); ok && st.Code() == codes.Unimplemented {
return errors.Errorf("no SSH key %q forwarded from the client", sshID)
}
return err
}
caller = c
return nil
})
if err != nil {
return "", nil, err
}
usr, err := user.Current()
if err != nil {
return "", nil, err
}
// best effort, default to root
uid, _ := strconv.Atoi(usr.Uid)
gid, _ := strconv.Atoi(usr.Gid)
sock, cleanup, err := sshforward.MountSSHSocket(ctx, caller, sshforward.SocketOpt{
ID: sshID,
UID: uid,
GID: gid,
Mode: 0700,
})
if err != nil {
return "", nil, err
}
return sock, cleanup, nil
}
func (gs *gitSourceHandler) mountKnownHosts(ctx context.Context) (string, func() error, error) {
if gs.src.KnownSSHHosts == "" {
return "", nil, errors.Errorf("no configured known hosts forwarded from the client")
}
knownHosts, err := ioutil.TempFile("", "")
if err != nil {
return "", nil, err
}
cleanup := func() error {
return os.Remove(knownHosts.Name())
}
_, err = knownHosts.Write([]byte(gs.src.KnownSSHHosts))
if err != nil {
cleanup()
return "", nil, err
}
err = knownHosts.Close()
if err != nil {
cleanup()
return "", nil, err
}
return knownHosts.Name(), cleanup, nil
}
func (gs *gitSourceHandler) CacheKey(ctx context.Context, g session.Group, index int) (string, string, solver.CacheOpts, bool, error) {
remote := gs.src.Remote
gs.locker.Lock(remote)
defer gs.locker.Unlock(remote)
if ref := gs.src.Ref; ref != "" && isCommitSHA(ref) {
cacheKey := gs.shaToCacheKey(ref)
gs.cacheKey = cacheKey
return cacheKey, ref, nil, true, nil
}
gs.getAuthToken(ctx, g)
gitDir, unmountGitDir, err := gs.mountRemote(ctx, remote, gs.auth, g)
if err != nil {
return "", "", nil, false, err
}
defer unmountGitDir()
var sock string
if gs.src.MountSSHSock != "" {
var unmountSock func() error
sock, unmountSock, err = gs.mountSSHAuthSock(ctx, gs.src.MountSSHSock, g)
if err != nil {
return "", "", nil, false, err
}
defer unmountSock()
}
var knownHosts string
if gs.src.KnownSSHHosts != "" {
var unmountKnownHosts func() error
knownHosts, unmountKnownHosts, err = gs.mountKnownHosts(ctx)
if err != nil {
return "", "", nil, false, err
}
defer unmountKnownHosts()
}
ref := gs.src.Ref
if ref == "" {
ref, err = getDefaultBranch(ctx, gitDir, "", sock, knownHosts, gs.auth, gs.src.Remote)
if err != nil {
return "", "", nil, false, err
}
}
// TODO: should we assume that remote tag is immutable? add a timer?
buf, err := gitWithinDir(ctx, gitDir, "", sock, knownHosts, gs.auth, "ls-remote", "origin", ref)
if err != nil {
return "", "", nil, false, errors.Wrapf(err, "failed to fetch remote %s", urlutil.RedactCredentials(remote))
}
out := buf.String()
idx := strings.Index(out, "\t")
if idx == -1 {
return "", "", nil, false, errors.Errorf("repository does not contain ref %s, output: %q", ref, string(out))
}
sha := string(out[:idx])
if !isCommitSHA(sha) {
return "", "", nil, false, errors.Errorf("invalid commit sha %q", sha)
}
cacheKey := gs.shaToCacheKey(sha)
gs.cacheKey = cacheKey
return cacheKey, sha, nil, true, nil
}
func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out cache.ImmutableRef, retErr error) {
cacheKey := gs.cacheKey
if cacheKey == "" {
var err error
cacheKey, _, _, _, err = gs.CacheKey(ctx, g, 0)
if err != nil {
return nil, err
}
}
gs.getAuthToken(ctx, g)
snapshotKey := cacheKey + ":" + gs.src.Subdir
gs.locker.Lock(snapshotKey)
defer gs.locker.Unlock(snapshotKey)
sis, err := searchGitSnapshot(ctx, gs.cache, snapshotKey)
if err != nil {
return nil, errors.Wrapf(err, "failed to search metadata for %s", snapshotKey)
}
if len(sis) > 0 {
return gs.cache.Get(ctx, sis[0].ID())
}
gs.locker.Lock(gs.src.Remote)
defer gs.locker.Unlock(gs.src.Remote)
gitDir, unmountGitDir, err := gs.mountRemote(ctx, gs.src.Remote, gs.auth, g)
if err != nil {
return nil, err
}
defer unmountGitDir()
var sock string
if gs.src.MountSSHSock != "" {
var unmountSock func() error
sock, unmountSock, err = gs.mountSSHAuthSock(ctx, gs.src.MountSSHSock, g)
if err != nil {
return nil, err
}
defer unmountSock()
}
var knownHosts string
if gs.src.KnownSSHHosts != "" {
var unmountKnownHosts func() error
knownHosts, unmountKnownHosts, err = gs.mountKnownHosts(ctx)
if err != nil {
return nil, err
}
defer unmountKnownHosts()
}
ref := gs.src.Ref
if ref == "" {
ref, err = getDefaultBranch(ctx, gitDir, "", sock, knownHosts, gs.auth, gs.src.Remote)
if err != nil {
return nil, err
}
}
doFetch := true
if isCommitSHA(ref) {
// skip fetch if commit already exists
if _, err := gitWithinDir(ctx, gitDir, "", sock, knownHosts, nil, "cat-file", "-e", ref+"^{commit}"); err == nil {
doFetch = false
}
}
if doFetch {
// make sure no old lock files have leaked
os.RemoveAll(filepath.Join(gitDir, "shallow.lock"))
args := []string{"fetch"}
if !isCommitSHA(ref) { // TODO: find a branch from ls-remote?
args = append(args, "--depth=1", "--no-tags")
} else {
if _, err := os.Lstat(filepath.Join(gitDir, "shallow")); err == nil {
args = append(args, "--unshallow")
}
}
args = append(args, "origin")
if !isCommitSHA(ref) {
args = append(args, "--force", ref+":tags/"+ref)
// local refs are needed so they would be advertised on next fetches. Force is used
// in case the ref is a branch and it now points to a different commit sha
// TODO: is there a better way to do this?
}
if _, err := gitWithinDir(ctx, gitDir, "", sock, knownHosts, gs.auth, args...); err != nil {
return nil, errors.Wrapf(err, "failed to fetch remote %s", urlutil.RedactCredentials(gs.src.Remote))
}
_, err = gitWithinDir(ctx, gitDir, "", sock, knownHosts, nil, "reflog", "expire", "--all", "--expire=now")
if err != nil {
return nil, errors.Wrapf(err, "failed to expire reflog for remote %s", urlutil.RedactCredentials(gs.src.Remote))
}
}
checkoutRef, err := gs.cache.New(ctx, nil, g, cache.WithRecordType(client.UsageRecordTypeGitCheckout), cache.WithDescription(fmt.Sprintf("git snapshot for %s#%s", gs.src.Remote, ref)))
if err != nil {
return nil, errors.Wrapf(err, "failed to create new mutable for %s", urlutil.RedactCredentials(gs.src.Remote))
}
defer func() {
if retErr != nil && checkoutRef != nil {
checkoutRef.Release(context.TODO())
}
}()
mount, err := checkoutRef.Mount(ctx, false, g)
if err != nil {
return nil, err
}
lm := snapshot.LocalMounter(mount)
checkoutDir, err := lm.Mount()
if err != nil {
return nil, err
}
defer func() {
if retErr != nil && lm != nil {
lm.Unmount()
}
}()
subdir := path.Clean(gs.src.Subdir)
if subdir == "/" {
subdir = "."
}
if gs.src.KeepGitDir && subdir == "." {
checkoutDirGit := filepath.Join(checkoutDir, ".git")
if err := os.MkdirAll(checkoutDir, 0711); err != nil {
return nil, err
}
_, err = gitWithinDir(ctx, checkoutDirGit, "", sock, knownHosts, nil, "init")
if err != nil {
return nil, err
}
_, err = gitWithinDir(ctx, checkoutDirGit, "", sock, knownHosts, nil, "remote", "add", "origin", gitDir)
if err != nil {
return nil, err
}
gitCatFileBuf, err := gitWithinDir(ctx, gitDir, "", sock, knownHosts, gs.auth, "cat-file", "-t", ref)
if err != nil {
return nil, err
}
isAnnotatedTag := strings.TrimSpace(gitCatFileBuf.String()) == "tag"
pullref := ref
if isAnnotatedTag {
pullref += ":refs/tags/" + pullref
} else if isCommitSHA(ref) {
pullref = "refs/buildkit/" + identity.NewID()
_, err = gitWithinDir(ctx, gitDir, "", sock, knownHosts, gs.auth, "update-ref", pullref, ref)
if err != nil {
return nil, err
}
} else {
pullref += ":" + pullref
}
_, err = gitWithinDir(ctx, checkoutDirGit, "", sock, knownHosts, gs.auth, "fetch", "-u", "--depth=1", "origin", pullref)
if err != nil {
return nil, err
}
_, err = gitWithinDir(ctx, checkoutDirGit, checkoutDir, sock, knownHosts, nil, "checkout", "FETCH_HEAD")
if err != nil {
return nil, errors.Wrapf(err, "failed to checkout remote %s", urlutil.RedactCredentials(gs.src.Remote))
}
_, err = gitWithinDir(ctx, checkoutDirGit, "", sock, knownHosts, nil, "remote", "set-url", "origin", urlutil.RedactCredentials(gs.src.Remote))
if err != nil {
return nil, errors.Wrapf(err, "failed to set remote origin to %s", urlutil.RedactCredentials(gs.src.Remote))
}
_, err = gitWithinDir(ctx, checkoutDirGit, "", sock, knownHosts, nil, "reflog", "expire", "--all", "--expire=now")
if err != nil {
return nil, errors.Wrapf(err, "failed to expire reflog for remote %s", urlutil.RedactCredentials(gs.src.Remote))
}
if err := os.Remove(filepath.Join(checkoutDirGit, "FETCH_HEAD")); err != nil && !errors.Is(err, os.ErrNotExist) {
return nil, errors.Wrapf(err, "failed to remove FETCH_HEAD for remote %s", urlutil.RedactCredentials(gs.src.Remote))
}
gitDir = checkoutDirGit
} else {
cd := checkoutDir
if subdir != "." {
cd, err = ioutil.TempDir(cd, "checkout")
if err != nil {
return nil, errors.Wrapf(err, "failed to create temporary checkout dir")
}
}
_, err = gitWithinDir(ctx, gitDir, cd, sock, knownHosts, nil, "checkout", ref, "--", ".")
if err != nil {
return nil, errors.Wrapf(err, "failed to checkout remote %s", urlutil.RedactCredentials(gs.src.Remote))
}
if subdir != "." {
d, err := os.Open(filepath.Join(cd, subdir))
if err != nil {
return nil, errors.Wrapf(err, "failed to open subdir %v", subdir)
}
defer func() {
if d != nil {
d.Close()
}
}()
names, err := d.Readdirnames(0)
if err != nil {
return nil, err
}
for _, n := range names {
if err := os.Rename(filepath.Join(cd, subdir, n), filepath.Join(checkoutDir, n)); err != nil {
return nil, err
}
}
if err := d.Close(); err != nil {
return nil, err
}
d = nil // reset defer
if err := os.RemoveAll(cd); err != nil {
return nil, err
}
}
}
_, err = gitWithinDir(ctx, gitDir, checkoutDir, sock, knownHosts, gs.auth, "submodule", "update", "--init", "--recursive", "--depth=1")
if err != nil {
return nil, errors.Wrapf(err, "failed to update submodules for %s", urlutil.RedactCredentials(gs.src.Remote))
}
if idmap := mount.IdentityMapping(); idmap != nil {
u := idmap.RootPair()
err := filepath.Walk(gitDir, func(p string, f os.FileInfo, err error) error {
return os.Lchown(p, u.UID, u.GID)
})
if err != nil {
return nil, errors.Wrap(err, "failed to remap git checkout")
}
}
lm.Unmount()
lm = nil
snap, err := checkoutRef.Commit(ctx)
if err != nil {
return nil, err
}
checkoutRef = nil
defer func() {
if retErr != nil {
snap.Release(context.TODO())
}
}()
md := cacheRefMetadata{snap}
if err := md.setGitSnapshot(snapshotKey); err != nil {
return nil, err
}
return snap, nil
}
func isCommitSHA(str string) bool {
return validHex.MatchString(str)
}
func gitWithinDir(ctx context.Context, gitDir, workDir, sshAuthSock, knownHosts string, auth []string, args ...string) (*bytes.Buffer, error) {
a := append([]string{"--git-dir", gitDir}, auth...)
if workDir != "" {
a = append(a, "--work-tree", workDir)
}
return git(ctx, workDir, sshAuthSock, knownHosts, append(a, args...)...)
}
func getGitSSHCommand(knownHosts string) string {
gitSSHCommand := "ssh -F /dev/null"
if knownHosts != "" {
gitSSHCommand += " -o UserKnownHostsFile=" + knownHosts
} else {
gitSSHCommand += " -o StrictHostKeyChecking=no"
}
return gitSSHCommand
}
func git(ctx context.Context, dir, sshAuthSock, knownHosts string, args ...string) (_ *bytes.Buffer, err error) {
for {
stdout, stderr, flush := logs.NewLogStreams(ctx, false)
defer stdout.Close()
defer stderr.Close()
defer func() {
if err != nil {
flush()
}
}()
cmd := exec.Command("git", args...)
cmd.Dir = dir // some commands like submodule require this
buf := bytes.NewBuffer(nil)
errbuf := bytes.NewBuffer(nil)
cmd.Stdin = nil
cmd.Stdout = io.MultiWriter(stdout, buf)
cmd.Stderr = io.MultiWriter(stderr, errbuf)
cmd.Env = []string{
"PATH=" + os.Getenv("PATH"),
"GIT_TERMINAL_PROMPT=0",
"GIT_SSH_COMMAND=" + getGitSSHCommand(knownHosts),
// "GIT_TRACE=1",
}
if sshAuthSock != "" {
cmd.Env = append(cmd.Env, "SSH_AUTH_SOCK="+sshAuthSock)
}
// remote git commands spawn helper processes that inherit FDs and don't
// handle parent death signal so exec.CommandContext can't be used
err := runProcessGroup(ctx, cmd)
if err != nil {
if strings.Contains(errbuf.String(), "--depth") || strings.Contains(errbuf.String(), "shallow") {
if newArgs := argsNoDepth(args); len(args) > len(newArgs) {
args = newArgs
continue
}
}
}
return buf, err
}
}
func argsNoDepth(args []string) []string {
out := make([]string, 0, len(args))
for _, a := range args {
if a != "--depth=1" {
out = append(out, a)
}
}
return out
}
func tokenScope(remote string) string {
// generally we can only use the token for fetching main remote but in case of github.com we do best effort
// to try reuse same token for all github.com remotes. This is the same behavior actions/checkout uses
for _, pfx := range []string{"https://github.com/", "https://www.github.com/"} {
if strings.HasPrefix(remote, pfx) {
return pfx
}
}
return remote
}
// getDefaultBranch gets the default branch of a repository using ls-remote
func getDefaultBranch(ctx context.Context, gitDir, workDir, sshAuthSock, knownHosts string, auth []string, remoteURL string) (string, error) {
buf, err := gitWithinDir(ctx, gitDir, workDir, sshAuthSock, knownHosts, auth, "ls-remote", "--symref", remoteURL, "HEAD")
if err != nil {
return "", errors.Wrapf(err, "error fetching default branch for repository %s", urlutil.RedactCredentials(remoteURL))
}
ss := defaultBranch.FindAllStringSubmatch(buf.String(), -1)
if len(ss) == 0 || len(ss[0]) != 2 {
return "", errors.Errorf("could not find default branch for repository: %s", urlutil.RedactCredentials(remoteURL))
}
return ss[0][1], nil
}
const keyGitRemote = "git-remote"
const gitRemoteIndex = keyGitRemote + "::"
const keyGitSnapshot = "git-snapshot"
const gitSnapshotIndex = keyGitSnapshot + "::"
func search(ctx context.Context, store cache.MetadataStore, key string, idx string) ([]cacheRefMetadata, error) {
var results []cacheRefMetadata
mds, err := store.Search(ctx, idx+key)
if err != nil {
return nil, err
}
for _, md := range mds {
results = append(results, cacheRefMetadata{md})
}
return results, nil
}
func searchGitRemote(ctx context.Context, store cache.MetadataStore, remote string) ([]cacheRefMetadata, error) {
return search(ctx, store, remote, gitRemoteIndex)
}
func searchGitSnapshot(ctx context.Context, store cache.MetadataStore, key string) ([]cacheRefMetadata, error) {
return search(ctx, store, key, gitSnapshotIndex)
}
type cacheRefMetadata struct {
cache.RefMetadata
}
func (md cacheRefMetadata) setGitSnapshot(key string) error {
return md.SetString(keyGitSnapshot, key, gitSnapshotIndex+key)
}
func (md cacheRefMetadata) setGitRemote(key string) error {
return md.SetString(keyGitRemote, key, gitRemoteIndex+key)
}
| [
"\"PATH\""
] | [] | [
"PATH"
] | [] | ["PATH"] | go | 1 | 0 | |
cmd/pillbox/config.go | package main
import (
"fmt"
"os"
"github.com/fteem/pbox/launchd"
)
var (
agentConfig = &launchd.Config{
ExecutableName: "pillboxd",
Program: fmt.Sprintf("%s/bin/pillboxd", os.Getenv("GOPATH")),
WorkingDirectory: "/Users/ie/projects/go/bin",
Name: "com.ieftimov.pillbox",
DisplayName: "Pillbox",
LongDescription: "Pillbox reminders agent",
LogLocation: "/tmp",
RunAtLoad: true,
KeepAlive: true,
Disabled: false,
}
)
| [
"\"GOPATH\""
] | [] | [
"GOPATH"
] | [] | ["GOPATH"] | go | 1 | 0 | |
google/appengine/tools/devappserver2/devappserver2.py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""The main entry point for the new development server."""
import argparse
import errno
import getpass
import itertools
import logging
import os
import sys
import tempfile
import time
from google.appengine.api import appinfo
from google.appengine.datastore import datastore_stub_util
from google.appengine.tools import boolean_action
from google.appengine.tools.devappserver2 import api_server
from google.appengine.tools.devappserver2 import application_configuration
from google.appengine.tools.devappserver2 import dispatcher
from google.appengine.tools.devappserver2 import gcd_application
from google.appengine.tools.devappserver2 import login
from google.appengine.tools.devappserver2 import runtime_config_pb2
from google.appengine.tools.devappserver2 import shutdown
from google.appengine.tools.devappserver2 import update_checker
from google.appengine.tools.devappserver2 import wsgi_request_info
from google.appengine.tools.devappserver2.admin import admin_server
# Initialize logging early -- otherwise some library packages may
# pre-empt our log formatting. NOTE: the level is provisional; it may
# be changed in main() based on the --debug flag.
logging.basicConfig(
level=logging.INFO,
format='%(levelname)-8s %(asctime)s %(filename)s:%(lineno)s] %(message)s')
# Valid choices for --log_level and their corresponding constants in
# runtime_config_pb2.Config.stderr_log_level.
_LOG_LEVEL_TO_RUNTIME_CONSTANT = {
'debug': 0,
'info': 1,
'warning': 2,
'error': 3,
'critical': 4,
}
# Valid choices for --dev_appserver_log_level and their corresponding Python
# logging levels
_LOG_LEVEL_TO_PYTHON_CONSTANT = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
# The default encoding used by the production interpreter.
_PROD_DEFAULT_ENCODING = 'ascii'
def _generate_storage_paths(app_id):
"""Yield an infinite sequence of possible storage paths."""
if sys.platform == 'win32':
# The temp directory is per-user on Windows so there is no reason to add
# the username to the generated directory name.
user_format = ''
else:
try:
user_name = getpass.getuser()
except Exception: # The possible set of exceptions is not documented.
user_format = ''
else:
user_format = '.%s' % user_name
tempdir = tempfile.gettempdir()
yield os.path.join(tempdir, 'appengine.%s%s' % (app_id, user_format))
for i in itertools.count(1):
yield os.path.join(tempdir, 'appengine.%s%s.%d' % (app_id, user_format, i))
def _get_storage_path(path, app_id):
"""Returns a path to the directory where stub data can be stored."""
_, _, app_id = app_id.replace(':', '_').rpartition('~')
if path is None:
for path in _generate_storage_paths(app_id):
try:
os.mkdir(path, 0700)
except OSError, e:
if e.errno == errno.EEXIST:
# Check that the directory is only accessable by the current user to
# protect against an attacker creating the directory in advance in
# order to access any created files. Windows has per-user temporary
# directories and st_mode does not include per-user permission
# information so assume that it is safe.
if sys.platform == 'win32' or (
(os.stat(path).st_mode & 0777) == 0700 and os.path.isdir(path)):
return path
else:
continue
raise
else:
return path
elif not os.path.exists(path):
os.mkdir(path)
return path
elif not os.path.isdir(path):
raise IOError('the given storage path %r is a file, a directory was '
'expected' % path)
else:
return path
def _get_default_php_path():
"""Returns the path to the siloed php-cgi binary or None if not present."""
default_php_executable_path = None
if sys.platform == 'win32':
default_php_executable_path = os.path.abspath(
os.path.join(os.path.dirname(sys.argv[0]),
'php/php-5.4-Win32-VC9-x86/php-cgi.exe'))
elif sys.platform == 'darwin':
# The Cloud SDK uses symlinks in its packaging of the Mac Launcher. First
# try to find PHP relative to the apsolute path of this executable. If that
# doesn't work, try using the path without dereferencing all symlinks.
base_paths = [os.path.realpath(sys.argv[0]), sys.argv[0]]
for base_path in base_paths:
default_php_executable_path = os.path.abspath(
os.path.join(os.path.dirname(os.path.dirname(base_path)), 'php-cgi'))
if os.path.exists(default_php_executable_path):
break
if (default_php_executable_path and
os.path.exists(default_php_executable_path)):
return default_php_executable_path
return None
class PortParser(object):
"""A parser for ints that represent ports."""
def __init__(self, allow_port_zero=True):
self._min_port = 0 if allow_port_zero else 1
def __call__(self, value):
try:
port = int(value)
except ValueError:
raise argparse.ArgumentTypeError('Invalid port: %r' % value)
if port < self._min_port or port >= (1 << 16):
raise argparse.ArgumentTypeError('Invalid port: %d' % port)
return port
def parse_per_module_option(
value, value_type, value_predicate,
single_bad_type_error, single_bad_predicate_error,
multiple_bad_type_error, multiple_bad_predicate_error,
multiple_duplicate_module_error):
"""Parses command line options that may be specified per-module.
Args:
value: A str containing the flag value to parse. Two formats are supported:
1. A universal value (may not contain a colon as that is use to
indicate a per-module value).
2. Per-module values. One or more comma separated module-value pairs.
Each pair is a module_name:value. An empty module-name is shorthand
for "default" to match how not specifying a module name in the yaml
is the same as specifying "module: default".
value_type: a callable that converts the string representation of the value
to the actual value. Should raise ValueError if the string can not
be converted.
value_predicate: a predicate to call on the converted value to validate
the converted value. Use "lambda _: True" if all values are valid.
single_bad_type_error: the message to use if a universal value is provided
and value_type throws a ValueError. The message must consume a single
format parameter (the provided value).
single_bad_predicate_error: the message to use if a universal value is
provided and value_predicate returns False. The message does not
get any format parameters.
multiple_bad_type_error: the message to use if a per-module value
either does not have two values separated by a single colon or if
value_types throws a ValueError on the second string. The message must
consume a single format parameter (the module_name:value pair).
multiple_bad_predicate_error: the message to use if a per-module value if
value_predicate returns False. The message must consume a single format
parameter (the module name).
multiple_duplicate_module_error: the message to use if the same module is
repeated. The message must consume a single formater parameter (the
module name).
Returns:
Either a single value of value_type for universal values or a dict of
str->value_type for per-module values.
Raises:
argparse.ArgumentTypeError: the value is invalid.
"""
if ':' not in value:
try:
single_value = value_type(value)
except ValueError:
raise argparse.ArgumentTypeError(single_bad_type_error % value)
else:
if not value_predicate(single_value):
raise argparse.ArgumentTypeError(single_bad_predicate_error)
return single_value
else:
module_to_value = {}
for module_value in value.split(','):
try:
module_name, single_value = module_value.split(':')
single_value = value_type(single_value)
except ValueError:
raise argparse.ArgumentTypeError(multiple_bad_type_error % module_value)
else:
module_name = module_name.strip()
if not module_name:
module_name = appinfo.DEFAULT_MODULE
if module_name in module_to_value:
raise argparse.ArgumentTypeError(
multiple_duplicate_module_error % module_name)
if not value_predicate(single_value):
raise argparse.ArgumentTypeError(
multiple_bad_predicate_error % module_name)
module_to_value[module_name] = single_value
return module_to_value
def parse_max_module_instances(value):
"""Returns the parsed value for the --max_module_instances flag.
Args:
value: A str containing the flag value for parse. The format should follow
one of the following examples:
1. "5" - All modules are limited to 5 instances.
2. "default:3,backend:20" - The default module can have 3 instances,
"backend" can have 20 instances and all other modules are
unaffected. An empty name (i.e. ":3") is shorthand for default
to match how not specifying a module name in the yaml is the
same as specifying "module: default".
Returns:
The parsed value of the max_module_instances flag. May either be an int
(for values of the form "5") or a dict of str->int (for values of the
form "default:3,backend:20").
Raises:
argparse.ArgumentTypeError: the value is invalid.
"""
return parse_per_module_option(
value, int, lambda instances: instances > 0,
'Invalid max instance count: %r',
'Max instance count must be greater than zero',
'Expected "module:max_instance_count": %r',
'Max instance count for module %s must be greater than zero',
'Duplicate max instance count for module %s')
def parse_threadsafe_override(value):
"""Returns the parsed value for the --threadsafe_override flag.
Args:
value: A str containing the flag value for parse. The format should follow
one of the following examples:
1. "False" - All modules override the YAML threadsafe configuration
as if the YAML contained False.
2. "default:False,backend:True" - The default module overrides the
YAML threadsafe configuration as if the YAML contained False, the
"backend" module overrides with a value of True and all other
modules use the value in the YAML file. An empty name (i.e.
":True") is shorthand for default to match how not specifying a
module name in the yaml is the same as specifying
"module: default".
Returns:
The parsed value of the threadsafe_override flag. May either be a bool
(for values of the form "False") or a dict of str->bool (for values of the
form "default:False,backend:True").
Raises:
argparse.ArgumentTypeError: the value is invalid.
"""
return parse_per_module_option(
value, boolean_action.BooleanParse, lambda _: True,
'Invalid threadsafe override: %r',
None,
'Expected "module:threadsafe_override": %r',
None,
'Duplicate threadsafe override value for module %s')
def parse_path(value):
"""Returns the given path with ~ and environment variables expanded."""
return os.path.expanduser(os.path.expandvars(value))
def create_command_line_parser():
"""Returns an argparse.ArgumentParser to parse command line arguments."""
# TODO: Add more robust argument validation. Consider what flags
# are actually needed.
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
arg_name = 'yaml_path'
arg_help = 'Path to a yaml file, or a directory containing yaml files'
if application_configuration.java_supported():
arg_name = 'yaml_or_war_path'
arg_help += ', or a directory containing WEB-INF/web.xml'
parser.add_argument(
'config_paths', metavar=arg_name, nargs='+', help=arg_help)
common_group = parser.add_argument_group('Common')
common_group.add_argument(
'-A', '--application', action='store', dest='app_id',
help='Set the application, overriding the application value from the '
'app.yaml file.')
common_group.add_argument(
'--host', default='localhost',
help='host name to which application modules should bind')
common_group.add_argument(
'--port', type=PortParser(), default=8080,
help='lowest port to which application modules should bind')
common_group.add_argument(
'--admin_host', default='localhost',
help='host name to which the admin server should bind')
common_group.add_argument(
'--admin_port', type=PortParser(), default=8000,
help='port to which the admin server should bind')
common_group.add_argument(
'--auth_domain', default='gmail.com',
help='name of the authorization domain to use')
common_group.add_argument(
'--storage_path', metavar='PATH',
type=parse_path,
help='path to the data (datastore, blobstore, etc.) associated with the '
'application.')
common_group.add_argument(
'--log_level', default='info',
choices=_LOG_LEVEL_TO_RUNTIME_CONSTANT.keys(),
help='the log level below which logging messages generated by '
'application code will not be displayed on the console')
common_group.add_argument(
'--max_module_instances',
type=parse_max_module_instances,
help='the maximum number of runtime instances that can be started for a '
'particular module - the value can be an integer, in what case all '
'modules are limited to that number of instances or a comma-seperated '
'list of module:max_instances e.g. "default:5,backend:3"')
common_group.add_argument(
'--use_mtime_file_watcher',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='use mtime polling for detecting source code changes - useful if '
'modifying code from a remote machine using a distributed file system')
common_group.add_argument(
'--threadsafe_override',
type=parse_threadsafe_override,
help='override the application\'s threadsafe configuration - the value '
'can be a boolean, in which case all modules threadsafe setting will '
'be overridden or a comma-separated list of module:threadsafe_override '
'e.g. "default:False,backend:True"')
common_group.add_argument('--docker_daemon_url', help=argparse.SUPPRESS)
# PHP
php_group = parser.add_argument_group('PHP')
php_group.add_argument('--php_executable_path', metavar='PATH',
type=parse_path,
default=_get_default_php_path(),
help='path to the PHP executable')
php_group.add_argument('--php_remote_debugging',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='enable XDebug remote debugging')
# Dart
dart_group = parser.add_argument_group('Dart')
dart_group.add_argument('--dart_sdk', help=argparse.SUPPRESS)
dart_group.add_argument('--dart_dev_mode',
choices=['dev', 'deploy'],
help=argparse.SUPPRESS)
dart_group.add_argument('--dart_pub_serve_host', help=argparse.SUPPRESS)
dart_group.add_argument('--dart_pub_serve_port',
type=PortParser(), help=argparse.SUPPRESS)
# App Identity
appidentity_group = parser.add_argument_group('Application Identity')
appidentity_group.add_argument(
'--appidentity_email_address',
help='email address associated with a service account that has a '
'downloadable key. May be None for no local application identity.')
appidentity_group.add_argument(
'--appidentity_private_key_path',
help='path to private key file associated with service account '
'(.pem format). Must be set if appidentity_email_address is set.')
# Python
python_group = parser.add_argument_group('Python')
python_group.add_argument(
'--python_startup_script',
help='the script to run at the startup of new Python runtime instances '
'(useful for tools such as debuggers.')
python_group.add_argument(
'--python_startup_args',
help='the arguments made available to the script specified in '
'--python_startup_script.')
# Java
java_group = parser.add_argument_group('Java')
java_group.add_argument(
'--jvm_flag', action='append',
help='additional arguments to pass to the java command when launching '
'an instance of the app. May be specified more than once. Example: '
'--jvm_flag=-Xmx1024m --jvm_flag=-Xms256m')
# Blobstore
blobstore_group = parser.add_argument_group('Blobstore API')
blobstore_group.add_argument(
'--blobstore_path',
type=parse_path,
help='path to directory used to store blob contents '
'(defaults to a subdirectory of --storage_path if not set)',
default=None)
# Cloud SQL
cloud_sql_group = parser.add_argument_group('Cloud SQL')
cloud_sql_group.add_argument(
'--mysql_host',
default='localhost',
help='host name of a running MySQL server used for simulated Google '
'Cloud SQL storage')
cloud_sql_group.add_argument(
'--mysql_port', type=PortParser(allow_port_zero=False),
default=3306,
help='port number of a running MySQL server used for simulated Google '
'Cloud SQL storage')
cloud_sql_group.add_argument(
'--mysql_user',
default='',
help='username to use when connecting to the MySQL server specified in '
'--mysql_host and --mysql_port or --mysql_socket')
cloud_sql_group.add_argument(
'--mysql_password',
default='',
help='password to use when connecting to the MySQL server specified in '
'--mysql_host and --mysql_port or --mysql_socket')
cloud_sql_group.add_argument(
'--mysql_socket',
help='path to a Unix socket file to use when connecting to a running '
'MySQL server used for simulated Google Cloud SQL storage')
# Datastore
datastore_group = parser.add_argument_group('Datastore API')
datastore_group.add_argument(
'--datastore_path',
type=parse_path,
default=None,
help='path to a file used to store datastore contents '
'(defaults to a file in --storage_path if not set)',)
datastore_group.add_argument('--clear_datastore',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='clear the datastore on startup')
datastore_group.add_argument(
'--datastore_consistency_policy',
default='time',
choices=['consistent', 'random', 'time'],
help='the policy to apply when deciding whether a datastore write should '
'appear in global queries')
datastore_group.add_argument(
'--require_indexes',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='generate an error on datastore queries that '
'requires a composite index not found in index.yaml')
datastore_group.add_argument(
'--auto_id_policy',
default=datastore_stub_util.SCATTERED,
choices=[datastore_stub_util.SEQUENTIAL,
datastore_stub_util.SCATTERED],
help='the type of sequence from which the datastore stub '
'assigns automatic IDs. NOTE: Sequential IDs are '
'deprecated. This flag will be removed in a future '
'release. Please do not rely on sequential IDs in your '
'tests.')
datastore_group.add_argument(
'--enable_cloud_datastore',
action=boolean_action.BooleanAction,
const=True,
default=False,
help=argparse.SUPPRESS #'enable the Google Cloud Datastore API.'
)
# Logs
logs_group = parser.add_argument_group('Logs API')
logs_group.add_argument(
'--logs_path', default=None,
help='path to a file used to store request logs (defaults to a file in '
'--storage_path if not set)',)
# Mail
mail_group = parser.add_argument_group('Mail API')
mail_group.add_argument(
'--show_mail_body',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='logs the contents of e-mails sent using the Mail API')
mail_group.add_argument(
'--enable_sendmail',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='use the "sendmail" tool to transmit e-mail sent '
'using the Mail API (ignored if --smtp_host is set)')
mail_group.add_argument(
'--smtp_host', default='',
help='host name of an SMTP server to use to transmit '
'e-mail sent using the Mail API')
mail_group.add_argument(
'--smtp_port', default=25,
type=PortParser(allow_port_zero=False),
help='port number of an SMTP server to use to transmit '
'e-mail sent using the Mail API (ignored if --smtp_host '
'is not set)')
mail_group.add_argument(
'--smtp_user', default='',
help='username to use when connecting to the SMTP server '
'specified in --smtp_host and --smtp_port')
mail_group.add_argument(
'--smtp_password', default='',
help='password to use when connecting to the SMTP server '
'specified in --smtp_host and --smtp_port')
mail_group.add_argument(
'--smtp_allow_tls',
action=boolean_action.BooleanAction,
const=True,
default=True,
help='Allow TLS to be used when the SMTP server announces TLS support '
'(ignored if --smtp_host is not set)')
# Matcher
prospective_search_group = parser.add_argument_group('Prospective Search API')
prospective_search_group.add_argument(
'--prospective_search_path', default=None,
type=parse_path,
help='path to a file used to store the prospective '
'search subscription index (defaults to a file in '
'--storage_path if not set)')
prospective_search_group.add_argument(
'--clear_prospective_search',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='clear the prospective search subscription index')
# Search
search_group = parser.add_argument_group('Search API')
search_group.add_argument(
'--search_indexes_path', default=None,
type=parse_path,
help='path to a file used to store search indexes '
'(defaults to a file in --storage_path if not set)',)
search_group.add_argument(
'--clear_search_indexes',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='clear the search indexes')
# Taskqueue
taskqueue_group = parser.add_argument_group('Task Queue API')
taskqueue_group.add_argument(
'--enable_task_running',
action=boolean_action.BooleanAction,
const=True,
default=True,
help='run "push" tasks created using the taskqueue API automatically')
# Misc
misc_group = parser.add_argument_group('Miscellaneous')
misc_group.add_argument(
'--allow_skipped_files',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='make files specified in the app.yaml "skip_files" or "static" '
'handles readable by the application.')
# No help to avoid lengthening help message for rarely used feature:
# host name to which the server for API calls should bind.
misc_group.add_argument(
'--api_host', default='localhost',
help=argparse.SUPPRESS)
misc_group.add_argument(
'--api_port', type=PortParser(), default=0,
help='port to which the server for API calls should bind')
misc_group.add_argument(
'--automatic_restart',
action=boolean_action.BooleanAction,
const=True,
default=True,
help=('restart instances automatically when files relevant to their '
'module are changed'))
misc_group.add_argument(
'--dev_appserver_log_level', default='info',
choices=_LOG_LEVEL_TO_PYTHON_CONSTANT.keys(),
help='the log level below which logging messages generated by '
'the development server will not be displayed on the console (this '
'flag is more useful for diagnosing problems in dev_appserver.py rather '
'than in application code)')
misc_group.add_argument(
'--skip_sdk_update_check',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='skip checking for SDK updates (if false, use .appcfg_nag to '
'decide)')
misc_group.add_argument(
'--default_gcs_bucket_name', default=None,
help='default Google Cloud Storgage bucket name')
return parser
PARSER = create_command_line_parser()
def _clear_datastore_storage(datastore_path):
"""Delete the datastore storage file at the given path."""
# lexists() returns True for broken symlinks, where exists() returns False.
if os.path.lexists(datastore_path):
try:
os.remove(datastore_path)
except OSError, e:
logging.warning('Failed to remove datastore file %r: %s',
datastore_path,
e)
def _clear_prospective_search_storage(prospective_search_path):
"""Delete the perspective search storage file at the given path."""
# lexists() returns True for broken symlinks, where exists() returns False.
if os.path.lexists(prospective_search_path):
try:
os.remove(prospective_search_path)
except OSError, e:
logging.warning('Failed to remove prospective search file %r: %s',
prospective_search_path,
e)
def _clear_search_indexes_storage(search_index_path):
"""Delete the search indexes storage file at the given path."""
# lexists() returns True for broken symlinks, where exists() returns False.
if os.path.lexists(search_index_path):
try:
os.remove(search_index_path)
except OSError, e:
logging.warning('Failed to remove search indexes file %r: %s',
search_index_path,
e)
def _setup_environ(app_id):
"""Sets up the os.environ dictionary for the front-end server and API server.
This function should only be called once.
Args:
app_id: The id of the application.
"""
os.environ['APPLICATION_ID'] = app_id
class DevelopmentServer(object):
"""Encapsulates the logic for the development server.
Only a single instance of the class may be created per process. See
_setup_environ.
"""
def __init__(self):
# A list of servers that are currently running.
self._running_modules = []
self._module_to_port = {}
self._dispatcher = None
def module_to_address(self, module_name, instance=None):
"""Returns the address of a module."""
if module_name is None:
return self._dispatcher.dispatch_address
return self._dispatcher.get_hostname(
module_name,
self._dispatcher.get_default_version(module_name),
instance)
def start(self, options):
"""Start devappserver2 servers based on the provided command line arguments.
Args:
options: An argparse.Namespace containing the command line arguments.
"""
logging.getLogger().setLevel(
_LOG_LEVEL_TO_PYTHON_CONSTANT[options.dev_appserver_log_level])
configuration = application_configuration.ApplicationConfiguration(
options.config_paths, options.app_id)
if options.enable_cloud_datastore:
# This requires the oauth server stub to return that the logged in user
# is in fact an admin.
os.environ['OAUTH_IS_ADMIN'] = '1'
gcd_module = application_configuration.ModuleConfiguration(
gcd_application.generate_gcd_app(configuration.app_id.split('~')[1]))
configuration.modules.append(gcd_module)
if options.skip_sdk_update_check:
logging.info('Skipping SDK update check.')
else:
update_checker.check_for_updates(configuration)
# There is no good way to set the default encoding from application code
# (it needs to be done during interpreter initialization in site.py or
# sitecustomize.py) so just warn developers if they have a different
# encoding than production.
if sys.getdefaultencoding() != _PROD_DEFAULT_ENCODING:
logging.warning(
'The default encoding of your local Python interpreter is set to %r '
'while App Engine\'s production environment uses %r; as a result '
'your code may behave differently when deployed.',
sys.getdefaultencoding(), _PROD_DEFAULT_ENCODING)
if options.port == 0:
logging.warn('DEFAULT_VERSION_HOSTNAME will not be set correctly with '
'--port=0')
_setup_environ(configuration.app_id)
self._dispatcher = dispatcher.Dispatcher(
configuration,
options.host,
options.port,
options.auth_domain,
_LOG_LEVEL_TO_RUNTIME_CONSTANT[options.log_level],
self._create_php_config(options),
self._create_python_config(options),
self._create_java_config(options),
self._create_cloud_sql_config(options),
self._create_vm_config(options),
self._create_module_to_setting(options.max_module_instances,
configuration, '--max_module_instances'),
options.use_mtime_file_watcher,
options.automatic_restart,
options.allow_skipped_files,
self._create_module_to_setting(options.threadsafe_override,
configuration, '--threadsafe_override'))
request_data = wsgi_request_info.WSGIRequestInfo(self._dispatcher)
storage_path = _get_storage_path(options.storage_path, configuration.app_id)
apis = self._create_api_server(
request_data, storage_path, options, configuration)
apis.start()
self._running_modules.append(apis)
self._dispatcher.start(options.api_host, apis.port, request_data)
xsrf_path = os.path.join(storage_path, 'xsrf')
admin = admin_server.AdminServer(options.admin_host, options.admin_port,
self._dispatcher, configuration, xsrf_path)
admin.start()
self._running_modules.append(admin)
def stop(self):
"""Stops all running devappserver2 modules."""
while self._running_modules:
self._running_modules.pop().quit()
if self._dispatcher:
self._dispatcher.quit()
@staticmethod
def _create_api_server(request_data, storage_path, options, configuration):
datastore_path = options.datastore_path or os.path.join(storage_path,
'datastore.db')
logs_path = options.logs_path or os.path.join(storage_path, 'logs.db')
search_index_path = options.search_indexes_path or os.path.join(
storage_path, 'search_indexes')
prospective_search_path = options.prospective_search_path or os.path.join(
storage_path, 'prospective-search')
blobstore_path = options.blobstore_path or os.path.join(storage_path,
'blobs')
if options.clear_datastore:
_clear_datastore_storage(datastore_path)
if options.clear_prospective_search:
_clear_prospective_search_storage(prospective_search_path)
if options.clear_search_indexes:
_clear_search_indexes_storage(search_index_path)
if options.auto_id_policy==datastore_stub_util.SEQUENTIAL:
logging.warn("--auto_id_policy='sequential' is deprecated. This option "
"will be removed in a future release.")
application_address = '%s' % options.host
if options.port and options.port != 80:
application_address += ':' + str(options.port)
user_login_url = '/%s?%s=%%s' % (login.LOGIN_URL_RELATIVE,
login.CONTINUE_PARAM)
user_logout_url = '%s&%s=%s' % (user_login_url, login.ACTION_PARAM,
login.LOGOUT_ACTION)
if options.datastore_consistency_policy == 'time':
consistency = datastore_stub_util.TimeBasedHRConsistencyPolicy()
elif options.datastore_consistency_policy == 'random':
consistency = datastore_stub_util.PseudoRandomHRConsistencyPolicy()
elif options.datastore_consistency_policy == 'consistent':
consistency = datastore_stub_util.PseudoRandomHRConsistencyPolicy(1.0)
else:
assert 0, ('unknown consistency policy: %r' %
options.datastore_consistency_policy)
api_server.maybe_convert_datastore_file_stub_data_to_sqlite(
configuration.app_id, datastore_path)
api_server.setup_stubs(
request_data=request_data,
app_id=configuration.app_id,
application_root=configuration.modules[0].application_root,
# The "trusted" flag is only relevant for Google administrative
# applications.
trusted=getattr(options, 'trusted', False),
appidentity_email_address=options.appidentity_email_address,
appidentity_private_key_path=os.path.abspath(
options.appidentity_private_key_path)
if options.appidentity_private_key_path else None,
blobstore_path=blobstore_path,
datastore_path=datastore_path,
datastore_consistency=consistency,
datastore_require_indexes=options.require_indexes,
datastore_auto_id_policy=options.auto_id_policy,
images_host_prefix='http://%s' % application_address,
logs_path=logs_path,
mail_smtp_host=options.smtp_host,
mail_smtp_port=options.smtp_port,
mail_smtp_user=options.smtp_user,
mail_smtp_password=options.smtp_password,
mail_enable_sendmail=options.enable_sendmail,
mail_show_mail_body=options.show_mail_body,
mail_allow_tls=options.smtp_allow_tls,
matcher_prospective_search_path=prospective_search_path,
search_index_path=search_index_path,
taskqueue_auto_run_tasks=options.enable_task_running,
taskqueue_default_http_server=application_address,
user_login_url=user_login_url,
user_logout_url=user_logout_url,
default_gcs_bucket_name=options.default_gcs_bucket_name)
return api_server.APIServer(options.api_host, options.api_port,
configuration.app_id)
@staticmethod
def _create_php_config(options):
php_config = runtime_config_pb2.PhpConfig()
if options.php_executable_path:
php_config.php_executable_path = os.path.abspath(
options.php_executable_path)
php_config.enable_debugger = options.php_remote_debugging
return php_config
@staticmethod
def _create_python_config(options):
python_config = runtime_config_pb2.PythonConfig()
if options.python_startup_script:
python_config.startup_script = os.path.abspath(
options.python_startup_script)
if options.python_startup_args:
python_config.startup_args = options.python_startup_args
return python_config
@staticmethod
def _create_java_config(options):
java_config = runtime_config_pb2.JavaConfig()
if options.jvm_flag:
java_config.jvm_args.extend(options.jvm_flag)
return java_config
@staticmethod
def _create_cloud_sql_config(options):
cloud_sql_config = runtime_config_pb2.CloudSQL()
cloud_sql_config.mysql_host = options.mysql_host
cloud_sql_config.mysql_port = options.mysql_port
cloud_sql_config.mysql_user = options.mysql_user
cloud_sql_config.mysql_password = options.mysql_password
if options.mysql_socket:
cloud_sql_config.mysql_socket = options.mysql_socket
return cloud_sql_config
@staticmethod
def _create_vm_config(options):
vm_config = runtime_config_pb2.VMConfig()
if options.docker_daemon_url:
vm_config.docker_daemon_url = options.docker_daemon_url
if options.dart_sdk:
vm_config.dart_config.dart_sdk = os.path.abspath(options.dart_sdk)
if options.dart_dev_mode:
vm_config.dart_config.dart_dev_mode = options.dart_dev_mode
if options.dart_pub_serve_host:
vm_config.dart_config.dart_pub_serve_host = options.dart_pub_serve_host
if options.dart_pub_serve_port:
vm_config.dart_config.dart_pub_serve_port = options.dart_pub_serve_port
return vm_config
@staticmethod
def _create_module_to_setting(setting, configuration, option):
"""Create a per-module dictionary configuration.
Creates a dictionary that maps a module name to a configuration
setting. Used in conjunction with parse_per_module_option.
Args:
setting: a value that can be None, a dict of str->type or a single value.
configuration: an ApplicationConfiguration object.
option: the option name the setting came from.
Returns:
A dict of str->type.
"""
if setting is None:
return {}
module_names = [module_configuration.module_name
for module_configuration in configuration.modules]
if isinstance(setting, dict):
# Warn and remove a setting if the module name is unknown.
module_to_setting = {}
for module_name, value in setting.items():
if module_name in module_names:
module_to_setting[module_name] = value
else:
logging.warning('Unknown module %r for %r', module_name, option)
return module_to_setting
# Create a dict with an entry for every module.
return {module_name: setting for module_name in module_names}
def main():
shutdown.install_signal_handlers()
# The timezone must be set in the devappserver2 process rather than just in
# the runtime so printed log timestamps are consistent and the taskqueue stub
# expects the timezone to be UTC. The runtime inherits the environment.
os.environ['TZ'] = 'UTC'
if hasattr(time, 'tzset'):
# time.tzet() should be called on Unix, but doesn't exist on Windows.
time.tzset()
options = PARSER.parse_args()
dev_server = DevelopmentServer()
try:
dev_server.start(options)
shutdown.wait_until_shutdown()
finally:
dev_server.stop()
if __name__ == '__main__':
main()
| [] | [] | [
"OAUTH_IS_ADMIN",
"TZ",
"APPLICATION_ID"
] | [] | ["OAUTH_IS_ADMIN", "TZ", "APPLICATION_ID"] | python | 3 | 0 | |
images/wallarm-api-proxy/proxy.go | /*
Copyright 2019 Wallarm Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net/http"
"net/http/httputil"
"net/url"
"os"
"strconv"
)
var apiURL *url.URL
func main() {
useSSL, err := strconv.ParseBool(os.Getenv("WALLARM_API_USE_SSL"))
if err != nil {
panic(err)
}
if useSSL {
apiURL, err = url.Parse(fmt.Sprintf("https://%s:%s", os.Getenv("WALLARM_API_HOST"), os.Getenv("WALLARM_API_PORT")))
} else {
apiURL, err = url.Parse(fmt.Sprintf("http://%s:%s", os.Getenv("WALLARM_API_HOST"), os.Getenv("WALLARM_API_PORT")))
}
if err != nil {
panic(err)
}
apiProxy := httputil.NewSingleHostReverseProxy(apiURL)
certs, err := ioutil.ReadFile("ca.pem")
if err == nil {
rootCAs := x509.NewCertPool()
rootCAs.AppendCertsFromPEM(certs)
config := &tls.Config{
RootCAs: rootCAs,
}
apiProxy.Transport = &http.Transport{TLSClientConfig: config}
}
http.HandleFunc("/", proxyHandler(apiProxy))
srv := &http.Server{Addr: ":8080"}
srv.ListenAndServe()
}
func proxyHandler(p *httputil.ReverseProxy) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
r.Host = apiURL.Host
p.ServeHTTP(w, r)
}
}
| [
"\"WALLARM_API_USE_SSL\"",
"\"WALLARM_API_HOST\"",
"\"WALLARM_API_PORT\"",
"\"WALLARM_API_HOST\"",
"\"WALLARM_API_PORT\""
] | [] | [
"WALLARM_API_PORT",
"WALLARM_API_USE_SSL",
"WALLARM_API_HOST"
] | [] | ["WALLARM_API_PORT", "WALLARM_API_USE_SSL", "WALLARM_API_HOST"] | go | 3 | 0 | |
pkg/params/info/pac.go | package info
import (
"io/ioutil"
"os"
"strings"
"github.com/spf13/cobra"
)
type PacOpts struct {
LogURL string
ApplicationName string // the Application Name for example "Pipelines as Code"
SecretAutoCreation bool // secret auto creation in target namespace
ProviderToken string
ProviderURL string
ProviderUser string
ProviderInfoFromRepo bool // whether the provider info come from the repository
WebhookType string
PayloadFile string
TektonDashboardURL string
HubURL string
RemoteTasks bool
}
func (p *PacOpts) AddFlags(cmd *cobra.Command) error {
cmd.PersistentFlags().StringVarP(&p.WebhookType, "git-provider-type", "",
os.Getenv("PAC_GIT_PROVIDER_TYPE"),
"Webhook type")
providerToken := os.Getenv("PAC_GIT_PROVIDER_TOKEN")
if providerToken != "" {
if _, err := os.Stat(providerToken); !os.IsNotExist(err) {
data, err := ioutil.ReadFile(providerToken)
if err != nil {
return err
}
providerToken = string(data)
}
}
cmd.PersistentFlags().StringVarP(&p.ProviderToken, "git-provider-token", "", providerToken,
"Git Provider Token")
cmd.PersistentFlags().StringVarP(&p.ProviderURL, "git-provider-api-url", "",
os.Getenv("PAC_GIT_PROVIDER_APIURL"),
"Git Provider API URL")
cmd.PersistentFlags().StringVarP(&p.PayloadFile,
"payload-file", "", os.Getenv("PAC_PAYLOAD_FILE"), "A file containing the webhook payload")
applicationName := os.Getenv("PAC_APPLICATION_NAME")
cmd.Flags().StringVar(&p.ApplicationName,
"application-name", applicationName,
"The name of the application.")
secretAutoCreation := false
secretAutoCreationEnv := os.Getenv("PAC_SECRET_AUTO_CREATE")
if strings.ToLower(secretAutoCreationEnv) == "true" ||
strings.ToLower(secretAutoCreationEnv) == "yes" || secretAutoCreationEnv == "1" {
secretAutoCreation = true
}
cmd.Flags().BoolVar(&p.SecretAutoCreation,
"secret-auto-creation",
secretAutoCreation,
"Wether to create automatically secrets.")
return nil
}
| [
"\"PAC_GIT_PROVIDER_TYPE\"",
"\"PAC_GIT_PROVIDER_TOKEN\"",
"\"PAC_GIT_PROVIDER_APIURL\"",
"\"PAC_PAYLOAD_FILE\"",
"\"PAC_APPLICATION_NAME\"",
"\"PAC_SECRET_AUTO_CREATE\""
] | [] | [
"PAC_GIT_PROVIDER_APIURL",
"PAC_SECRET_AUTO_CREATE",
"PAC_GIT_PROVIDER_TOKEN",
"PAC_GIT_PROVIDER_TYPE",
"PAC_PAYLOAD_FILE",
"PAC_APPLICATION_NAME"
] | [] | ["PAC_GIT_PROVIDER_APIURL", "PAC_SECRET_AUTO_CREATE", "PAC_GIT_PROVIDER_TOKEN", "PAC_GIT_PROVIDER_TYPE", "PAC_PAYLOAD_FILE", "PAC_APPLICATION_NAME"] | go | 6 | 0 | |
data_conversion_subsystem/test/data_converter/test_data_converter.py | import datetime
from copy import deepcopy
from os import environ
from unittest import TestCase, mock
from unittest.mock import Mock
from pytz import UTC
from requests import Timeout
import data_conversion_subsystem.data_converter.data_converter as data_converter
from data_conversion_subsystem.config.config import DCS_CONFIG
from data_conversion_subsystem.data_converter.data_converter import ABORTED, CONFIG, CREATED, DATA_CONVERTED, \
DATA_SAVED, EXECUTION_CHECKED, FINISHED, INITIALIZED, PENDING_WORK_CHECKED, STATE_RESTORED, STATE_SAVED
from data_conversion_subsystem.supervisor.supervisor import DataConverterSupervisor
ENVIRON = deepcopy(environ)
ENVIRON['API_IP'] = 'test_ip'
ENVIRON['POSTGRES_IP'] = 'test_ip'
PAGE_SIZE = 100
MIN_UPDATE_FREQUENCY = {'value': 30, 'units': 'min'}
MAX_UPDATE_FREQUENCY = {'value': 1, 'units': 'day'}
DEPENDENCIES_UNSATISFIED_UPDATE_FREQUENCY = {'value': 10, 'units': 'min'}
DATA_COLLECTION_MIN_UPDATE_FREQUENCY = {'value': 1, 'units': 'min'}
DCS_CONFIG['MAX_DATA_CALLS_PER_MODULE_AND_EXECUTION'] = 4
_CONFIG = deepcopy(data_converter.CONFIG)
_CONFIG['STATE_STRUCT']['last_request'] = None
_CONFIG['PAGE_SIZE'] = PAGE_SIZE
_CONFIG['MAX_UPDATE_FREQUENCY'] = MAX_UPDATE_FREQUENCY
_CONFIG['MIN_UPDATE_FREQUENCY'] = MIN_UPDATE_FREQUENCY
_CONFIG['DATA_COLLECTION_MIN_UPDATE_FREQUENCY'] = DATA_COLLECTION_MIN_UPDATE_FREQUENCY
_CONFIG['DEPENDENCIES_UNSATISFIED_UPDATE_FREQUENCY'] = DEPENDENCIES_UNSATISFIED_UPDATE_FREQUENCY
def _get_config(*args, **kwargs):
return deepcopy(_CONFIG)
class SimpleDataConverter(data_converter.DataConverter):
"""
This DataConverter does not perform actual work. Failure or success are fully configurable, and so are the
state variables and inner attributes.
With this class, all possible scenarios (transitions, failures, etc...) can be tested.
"""
def __init__(self, fail_on='', pending_work=True, elements_to_convert=None, data_converted=None, data_inserted=None,
update_frequency={'value': 0, 'units': 's'}, restart_required=False, backoff_time=None,
log_to_file=True, log_to_stdout=False, log_to_telegram=False, dependencies_satisfied=True):
fake_file_path = DCS_CONFIG['DATA_CONVERTERS_PATH'] + 'test/simple_data_converter/simple_data_converter.py'
super().__init__(fake_file_path, log_to_file=log_to_file, log_to_stdout=log_to_stdout,
log_to_telegram=log_to_telegram)
self.fail_on = fail_on
self._elements_to_convert = elements_to_convert
self._pending_work = pending_work
self._update_frequency = update_frequency
self._data_converted = data_converted
self._data_inserted = data_inserted
self._restart_required = restart_required
self._backoff_time = backoff_time
self._dependencies_satisfied = dependencies_satisfied
self.method_name = '__init__'
def _restore_state(self):
super()._restore_state()
self.state['update_frequency'] = self._update_frequency
if self.fail_on == '_restore_state':
raise Exception('Fail!')
self.state['restart_required'] = self._restart_required
if self._backoff_time:
self._pending_work = False
self.state['backoff_time']['value'] = self._backoff_time
def _has_pending_work(self):
super()._has_pending_work()
self.pending_work = self._pending_work
if self.fail_on == '_has_pending_work':
raise Exception('Fail!')
def _convert_data(self):
super()._convert_data()
if self.fail_on == '_convert_data':
self.state['elements_to_convert'] = self.elements_to_convert
self.state['converted_elements'] = None
self.state['inserted_elements'] = None
raise Exception('Fail!')
self.state['elements_to_convert'] = self._elements_to_convert
self.state['converted_elements'] = self._data_converted
self.state['last_request'] = datetime.datetime.now(tz=UTC)
def _perform_data_conversion(self):
if self.fail_on == '_perform_data_conversion':
raise Exception('Fail!')
self.data = []
for i in range(self._data_converted if self._data_converted is not None else 0):
self.data.append(i)
def _check_dependencies_satisfied(self):
self.dependencies_satisfied = self._dependencies_satisfied
if self.fail_on == '_check_dependencies_satisfied':
raise Exception('Fail!')
def _save_data(self):
super()._save_data()
if self.fail_on == '_save_data':
raise Exception('Fail!')
self.state['inserted_elements'] = self._data_inserted
def _check_execution(self):
super()._check_execution()
if self.fail_on == '_check_execution':
raise Exception('Fail!')
def _save_state(self):
if self.fail_on == '_save_state':
raise Exception('Fail!')
super()._save_state()
@mock.patch('os.environ', ENVIRON)
@mock.patch('data_conversion_subsystem.data_converter.data_converter.DCS_CONFIG', DCS_CONFIG)
class TestDataConverter(TestCase):
def create_run(self, fail_on='', pending_work=True, data_converted=None, data_inserted=None,
elements_to_convert=None, update_frequency={'value': 0, 'units': 's'}, dependencies_satisfied=True):
with mock.patch('data_conversion_subsystem.data_converter.data_converter.get_config') as mock_config:
mock_config.side_effect = _get_config
self.data_converter = SimpleDataConverter(fail_on=fail_on, pending_work=pending_work,
elements_to_convert=elements_to_convert, data_converted=data_converted, data_inserted=data_inserted,
update_frequency=update_frequency, dependencies_satisfied=dependencies_satisfied, log_to_stdout=False)
self.data_converter.run()
@classmethod
def setUpClass(cls):
SimpleDataConverter().remove_files()
def setUp(self):
self.data_converter = None
def tearDown(self):
if self.data_converter:
self.data_converter.remove_files()
def test_create_fails(self):
# Since '.config' file hasn't been defined, DataConverter isn't runnable.
self.data_converter = SimpleDataConverter()
self.assertFalse(self.data_converter.is_runnable())
@mock.patch('data_conversion_subsystem.data_converter.data_converter.get_config')
def test_invalid_state_struct(self, mock):
config = deepcopy(CONFIG)
config['STATE_STRUCT'] = {'update_frequency': {'value': 1, 'units': 's'}, 'last_request': None}
# Since 'STATE_STRUCT' has less fields than expected, DataConverter isn't runnable.
mock.return_value = config
self.data_converter = SimpleDataConverter()
self.assertFalse(self.data_converter.is_runnable())
@mock.patch('data_conversion_subsystem.data_converter.data_converter.get_config')
def test_invalid_config(self, mock):
# Since 'STATE_STRUCT' has less fields than expected, DataConverter isn't runnable.
mock.return_value = {}
self.data_converter = SimpleDataConverter()
self.assertFalse(self.data_converter.is_runnable())
@mock.patch('data_conversion_subsystem.data_converter.data_converter.get_config')
def test_correct_creation(self, mock):
mock.return_value = CONFIG
self.data_converter = SimpleDataConverter()
self.assertTrue(self.data_converter.is_runnable())
@mock.patch('data_conversion_subsystem.data_converter.data_converter.get_config')
def test_expose_inner_data(self, mock):
class Thief: # isinstance(o, Supervisor) is False
pass
class FakeSupervisor(DataConverterSupervisor): # isinstance(o, Supervisor) is True
pass
# Negative cases
mock.return_value = CONFIG
self.data_converter = SimpleDataConverter()
thief = Thief()
elegant_thief = FakeSupervisor(None, None)
self.assertIsNone(self.data_converter.expose_transition_states(who=thief))
self.assertIsNone(self.data_converter.expose_transition_states(who=elegant_thief))
self.assertFalse(self.data_converter.execute_actions(CREATED, who=thief))
self.assertFalse(self.data_converter.execute_actions(CREATED, who=elegant_thief))
# Positive case
supervisor = DataConverterSupervisor(None, None)
expected = [CREATED, INITIALIZED]
result = self.data_converter.expose_transition_states(who=supervisor)
self.assertIsNotNone(result)
self.assertEqual(expected, result)
self.assertTrue(self.data_converter.execute_actions(CREATED, who=supervisor))
@mock.patch('requests.get')
def test_remove_files(self, mock_requests):
mock_requests.return_value = response = Mock()
response.status_code = 200
response.content = '{"data": [{"foo": true}, {"baz": false}]}'.encode()
from os.path import exists
from global_config.config import GLOBAL_CONFIG
from utilities.util import map_data_collector_path_to_state_file_path
mock.return_value = CONFIG
self.create_run(data_converted=1000, data_inserted=1000)
self.data_converter.remove_files()
self.assertFalse(
exists(map_data_collector_path_to_state_file_path(__file__, GLOBAL_CONFIG['ROOT_PROJECT_FOLDER'])))
self.assertFalse(
exists(GLOBAL_CONFIG['ROOT_LOG_FOLDER'] + 'test/simple__data_converter/simple__data_converter.py'))
@mock.patch('requests.get')
def test_positive_execution_with_pending_work(self, mock_requests):
mock_requests.return_value = response = Mock()
response.status_code = 200
response.content = '{"data": [{"foo": true}, {"baz": false}]}'.encode()
supervisor = DataConverterSupervisor(None, None)
self.create_run(elements_to_convert=2, data_converted=2, data_inserted=2)
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, DATA_CONVERTED, DATA_SAVED,
EXECUTION_CHECKED, STATE_SAVED, FINISHED]
self.assertTrue(self.data_converter.pending_work)
self.assertTrue(self.data_converter.check_result)
self.assertIsNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertTrue(self.data_converter.successful_execution())
self.assertEqual(2, self.data_converter.state['elements_to_convert'])
self.assertEqual(2, self.data_converter.state['converted_elements'])
self.assertEqual(2, self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
def test_normal_execution_with_no_pending_work(self):
supervisor = DataConverterSupervisor(None, None)
self.create_run(pending_work=False)
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, EXECUTION_CHECKED, STATE_SAVED,
FINISHED]
self.assertFalse(self.data_converter.pending_work)
self.assertTrue(self.data_converter.check_result)
self.assertIsNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertTrue(self.data_converter.successful_execution())
self.assertIsNone(self.data_converter.state['elements_to_convert'])
self.assertIsNone(self.data_converter.state['converted_elements'])
self.assertIsNone(self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
@mock.patch('requests.get')
def test_abnormal_execution_pending_work_less_saved_than_converted(self, mock_requests):
mock_requests.return_value = response = Mock()
response.status_code = 200
response.content = '{"data": [{"foo": true}, {"baz": false}]}'.encode()
supervisor = DataConverterSupervisor(None, None)
self.create_run(elements_to_convert=2, data_converted=2, data_inserted=1)
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, DATA_CONVERTED, DATA_SAVED,
EXECUTION_CHECKED, STATE_SAVED, FINISHED]
self.assertTrue(self.data_converter.pending_work)
self.assertFalse(self.data_converter.check_result)
self.assertIsNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertFalse(self.data_converter.successful_execution())
self.assertEqual(2, self.data_converter.state['elements_to_convert'])
self.assertEqual(2, self.data_converter.state['converted_elements'])
self.assertEqual(1, self.data_converter.state['inserted_elements'])
self.assertGreater(self.data_converter.state['converted_elements'],
self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
@mock.patch('requests.get')
def test_abnormal_execution_pending_work_less_converted_than_saved(self, mock_requests):
mock_requests.return_value = response = Mock()
response.status_code = 200
response.content = '{"data": [{"foo": true}, {"baz": false}]}'.encode()
supervisor = DataConverterSupervisor(None, None)
self.create_run(elements_to_convert=2, data_converted=1, data_inserted=2)
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, DATA_CONVERTED, DATA_SAVED,
EXECUTION_CHECKED, STATE_SAVED, FINISHED]
self.assertTrue(self.data_converter.pending_work)
self.assertFalse(self.data_converter.check_result)
self.assertIsNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertFalse(self.data_converter.successful_execution())
self.assertEqual(2, self.data_converter.state['elements_to_convert'])
self.assertEqual(1, self.data_converter.state['converted_elements'])
self.assertEqual(2, self.data_converter.state['inserted_elements'])
self.assertLess(self.data_converter.state['converted_elements'], self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
@mock.patch('requests.get')
def test_abnormal_execution_pending_work_not_converted(self, mock_requests):
mock_requests.return_value = response = Mock()
response.status_code = 200
response.content = '{"data": [{"foo": true}, {"baz": false}]}'.encode()
supervisor = DataConverterSupervisor(None, None)
self.create_run(elements_to_convert=2, data_converted=0, data_inserted=0)
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, DATA_CONVERTED, DATA_SAVED,
EXECUTION_CHECKED, STATE_SAVED, FINISHED]
self.assertTrue(self.data_converter.pending_work)
self.assertFalse(self.data_converter.check_result)
self.assertIsNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertFalse(self.data_converter.successful_execution())
self.assertEqual(2, self.data_converter.state['elements_to_convert'])
self.assertEqual(0, self.data_converter.state['converted_elements'])
self.assertEqual(0, self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
@mock.patch('requests.get')
def test_abnormal_execution_pending_work_converted_not_saved(self, mock_requests):
mock_requests.return_value = response = Mock()
response.status_code = 200
response.content = '{"data": [{"foo": true}, {"baz": false}]}'.encode()
supervisor = DataConverterSupervisor(None, None)
self.create_run(elements_to_convert=2, data_converted=2, data_inserted=0)
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, DATA_CONVERTED, DATA_SAVED,
EXECUTION_CHECKED, STATE_SAVED, FINISHED]
self.assertTrue(self.data_converter.pending_work)
self.assertFalse(self.data_converter.check_result)
self.assertIsNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertFalse(self.data_converter.successful_execution())
self.assertEqual(2, self.data_converter.state['elements_to_convert'])
self.assertEqual(2, self.data_converter.state['converted_elements'])
self.assertEqual(0, self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
def test_abnormal_execution_failure_INITIALIZED(self):
supervisor = DataConverterSupervisor(None, None)
self.create_run(fail_on='_restore_state')
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, ABORTED]
self.assertIsNone(self.data_converter.pending_work)
self.assertIsNone(self.data_converter.check_result)
self.assertIsNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertFalse(self.data_converter.successful_execution())
self.assertIsNone(self.data_converter.state['converted_elements'])
self.assertIsNone(self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
def test_abnormal_execution_failure_STATE_RESTORED(self):
supervisor = DataConverterSupervisor(None, None)
self.create_run(fail_on='_has_pending_work')
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, ABORTED]
self.assertTrue(self.data_converter.pending_work)
self.assertIsNone(self.data_converter.check_result)
self.assertIsNotNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertFalse(self.data_converter.successful_execution())
self.assertIsNone(self.data_converter.state['converted_elements'])
self.assertIsNone(self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
@mock.patch('requests.get')
def test_abnormal_execution_failure_PENDING_WORK_CHECKED(self, mock_requests):
mock_requests.return_value = response = Mock()
response.status_code = 200
response.content = '{"data": [{"foo": true}, {"baz": false}]}'.encode()
supervisor = DataConverterSupervisor(None, None)
self.create_run(fail_on='_convert_data')
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, ABORTED]
self.assertTrue(self.data_converter.pending_work)
self.assertIsNone(self.data_converter.check_result)
self.assertIsNotNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertFalse(self.data_converter.successful_execution())
self.assertIsNone(self.data_converter.state['converted_elements'])
self.assertIsNone(self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
@mock.patch('requests.get')
def test_abnormal_execution_failure_DATA_COLLECTED(self, mock_requests):
mock_requests.return_value = response = Mock()
response.status_code = 200
response.content = '{"data": [{"foo": true}, {"baz": false}]}'.encode()
supervisor = DataConverterSupervisor(None, None)
self.create_run(fail_on='_save_data', elements_to_convert=2, data_converted=2)
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, DATA_CONVERTED, ABORTED]
self.assertTrue(self.data_converter.pending_work)
self.assertIsNone(self.data_converter.check_result)
self.assertIsNotNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertFalse(self.data_converter.successful_execution())
self.assertEqual(2, self.data_converter.state['elements_to_convert'])
self.assertEqual(2, self.data_converter.state['converted_elements'])
self.assertIsNone(self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
@mock.patch('requests.get')
def test_abnormal_execution_failure_DATA_SAVED(self, mock_requests):
mock_requests.return_value = response = Mock()
response.status_code = 200
response.content = '{"data": [{"foo": true}, {"baz": false}]}'.encode()
supervisor = DataConverterSupervisor(None, None)
self.create_run(fail_on='_check_execution', elements_to_convert=2, data_converted=2, data_inserted=2)
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, DATA_CONVERTED, DATA_SAVED, ABORTED]
self.assertTrue(self.data_converter.pending_work)
self.assertTrue(self.data_converter.check_result)
self.assertIsNotNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertFalse(self.data_converter.successful_execution())
self.assertEqual(2, self.data_converter.state['elements_to_convert'])
self.assertEqual(2, self.data_converter.state['converted_elements'])
self.assertEqual(2, self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
@mock.patch('requests.get')
def test_abnormal_execution_failure_EXECUTION_CHECKED(self, mock_requests):
mock_requests.return_value = response = Mock()
response.status_code = 200
response.content = '{"data": [{"foo": true}, {"baz": false}]}'.encode()
supervisor = DataConverterSupervisor(None, None)
self.create_run(fail_on='_save_state', elements_to_convert=2, data_converted=2, data_inserted=2)
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, DATA_CONVERTED, DATA_SAVED,
EXECUTION_CHECKED, ABORTED]
self.assertTrue(self.data_converter.pending_work)
self.assertTrue(self.data_converter.check_result)
self.assertIsNotNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertFalse(self.data_converter.successful_execution())
self.assertEqual(2, self.data_converter.state['elements_to_convert'])
self.assertEqual(2, self.data_converter.state['converted_elements'])
self.assertEqual(2, self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
def test_dependencies_unsatisfied(self):
supervisor = DataConverterSupervisor(None, None)
self.create_run(dependencies_satisfied=False)
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, DATA_CONVERTED, DATA_SAVED,
EXECUTION_CHECKED, STATE_SAVED, FINISHED]
self.assertTrue(self.data_converter.pending_work)
self.assertTrue(self.data_converter.check_result)
self.assertIsNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertTrue(self.data_converter.successful_execution())
self.assertFalse(self.data_converter.dependencies_satisfied)
self.assertIsNone(self.data_converter.state['elements_to_convert'])
self.assertIsNone(self.data_converter.state['converted_elements'])
self.assertIsNone(self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
self.assertDictEqual(DEPENDENCIES_UNSATISFIED_UPDATE_FREQUENCY, self.data_converter.state['update_frequency'])
@mock.patch('requests.get', Mock(side_effect=Timeout('Test error to verify requests timeout. This is OK.')))
def test_requests_timed_out(self):
supervisor = DataConverterSupervisor(None, None)
self.create_run(dependencies_satisfied=True)
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, ABORTED]
self.assertTrue(self.data_converter.pending_work)
self.assertIsNotNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertFalse(self.data_converter.successful_execution())
self.assertTrue(self.data_converter.dependencies_satisfied)
self.assertIsNone(self.data_converter.state['elements_to_convert'])
self.assertIsNone(self.data_converter.state['converted_elements'])
self.assertIsNone(self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
@mock.patch('requests.get')
def test_API_responses_unparseable_json_content_error_code(self, mock_requests):
mock_requests.return_value = response = Mock()
response.status_code = 500
response.content = '<html><h1>Internal Error</h1></html>'.encode()
supervisor = DataConverterSupervisor(None, None)
self.create_run(dependencies_satisfied=True)
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, ABORTED]
self.assertTrue(self.data_converter.pending_work)
self.assertFalse(self.data_converter.check_result)
self.assertIsNotNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertFalse(self.data_converter.successful_execution())
self.assertTrue(self.data_converter.dependencies_satisfied)
self.assertIsNone(self.data_converter.state['elements_to_convert'])
self.assertIsNone(self.data_converter.state['converted_elements'])
self.assertIsNone(self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
@mock.patch('requests.get')
def test_API_responses_unparseable_content_at_all_error_code(self, mock_requests):
mock_requests.return_value = response = Mock()
response.status_code = 500
response.content = '\xf9291\x32'.encode()
supervisor = DataConverterSupervisor(None, None)
self.create_run(dependencies_satisfied=True)
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, ABORTED]
self.assertTrue(self.data_converter.pending_work)
self.assertFalse(self.data_converter.check_result)
self.assertIsNotNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertFalse(self.data_converter.successful_execution())
self.assertTrue(self.data_converter.dependencies_satisfied)
self.assertIsNone(self.data_converter.state['elements_to_convert'])
self.assertIsNone(self.data_converter.state['converted_elements'])
self.assertIsNone(self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
@mock.patch('requests.get')
def test_correct_data_conversion_no_data_to_convert(self, mock_requests):
mock_requests.return_value = response = Mock()
response.status_code = 200
response.content = '{"data": []}'.encode()
supervisor = DataConverterSupervisor(None, None)
self.create_run(dependencies_satisfied=True)
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, DATA_CONVERTED, DATA_SAVED,
EXECUTION_CHECKED, STATE_SAVED, FINISHED]
self.assertTrue(self.data_converter.pending_work)
self.assertTrue(self.data_converter.check_result)
self.assertIsNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertTrue(self.data_converter.successful_execution())
self.assertTrue(self.data_converter.advisedly_no_data_converted)
self.assertIsNone(self.data_converter.state['elements_to_convert'])
self.assertIsNone(self.data_converter.state['converted_elements'])
self.assertIsNone(self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
self.assertDictEqual(MIN_UPDATE_FREQUENCY, self.data_converter.state['update_frequency'])
@mock.patch('requests.get')
def test_correct_data_conversion_data_to_convert_last_data(self, mock_requests):
mock_requests.return_value = response = Mock()
response.status_code = 200
response.content = '{"data": [{"foo": true}, {"baz": false}]}'.encode()
supervisor = DataConverterSupervisor(None, None)
self.create_run(dependencies_satisfied=True, elements_to_convert=2, data_converted=2, data_inserted=2)
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, DATA_CONVERTED, DATA_SAVED,
EXECUTION_CHECKED, STATE_SAVED, FINISHED]
self.assertTrue(self.data_converter.pending_work)
self.assertTrue(self.data_converter.check_result)
self.assertIsNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertTrue(self.data_converter.successful_execution())
self.assertEqual(2, self.data_converter.state['start_index'])
self.assertEqual(2, self.data_converter.state['elements_to_convert'])
self.assertEqual(2, self.data_converter.state['converted_elements'])
self.assertEqual(2, self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
self.assertDictEqual(MAX_UPDATE_FREQUENCY, self.data_converter.state['update_frequency'])
@mock.patch('requests.get')
def test_correct_data_conversion_data_to_convert_more_data(self, mock_requests):
mock_requests.return_value = response = Mock()
response.status_code = 200
response.content.decode = Mock(side_effect=['{"data": [{"foo": true}, {"baz": false}], "next_start_index": 2}',
'{"data": [{"foo": true}, {"baz": false}], "next_start_index": 4}',
'{"data": [{"foo": true}, {"baz": false}], "next_start_index": 6}',
'{"data": [{"foo": true}, {"baz": false}], "next_start_index": 8}'])
supervisor = DataConverterSupervisor(None, None)
self.create_run(dependencies_satisfied=True, elements_to_convert=8, data_converted=8, data_inserted=8)
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, DATA_CONVERTED, DATA_SAVED,
EXECUTION_CHECKED, STATE_SAVED, FINISHED]
self.assertTrue(self.data_converter.pending_work)
self.assertTrue(self.data_converter.check_result)
self.assertIsNone(self.data_converter.state['error'])
self.assertTrue(self.data_converter.finished_execution())
self.assertTrue(self.data_converter.successful_execution())
self.assertEqual(8, self.data_converter.state['start_index'])
self.assertEqual(8, self.data_converter.state['elements_to_convert'])
self.assertEqual(8, self.data_converter.state['converted_elements'])
self.assertEqual(8, self.data_converter.state['inserted_elements'])
self.assertListEqual(expected, transitions)
self.assertDictEqual(DATA_COLLECTION_MIN_UPDATE_FREQUENCY, self.data_converter.state['update_frequency'])
@mock.patch('requests.get')
@mock.patch('data_conversion_subsystem.data_converter.data_converter.get_config', Mock(return_value=CONFIG))
def test_exponential_backoff_mechanism(self, mock_requests):
mock_requests.return_value = response = Mock()
response.status_code = 200
response.content = '{"data": [{"foo": true}, {"baz": false}]}'.encode()
supervisor = DataConverterSupervisor(None, None)
dc = SimpleDataConverter(fail_on='_restore_state')
dc.run()
# Checking exponential backoff update, and error saving
self.assertIsNone(dc.state['error'])
dc = SimpleDataConverter(fail_on='_convert_data')
dc.run()
self.assertIsNotNone(dc.state['error'])
dc.execute_actions(EXECUTION_CHECKED, supervisor)
self.assertEqual('Exception', dc.state['error'])
self.assertDictEqual({'Exception': 1}, dc.state['errors'])
exponential_backoff_value = dc.state['backoff_time']['value']
config = dc.config
dc2 = SimpleDataConverter(fail_on='_save_data')
dc2.run()
dc2.config = config
dc2.execute_actions(EXECUTION_CHECKED, supervisor)
self.assertGreater(dc2.state['backoff_time']['value'], exponential_backoff_value) # Next exponential backoff
self.assertEqual('Exception', dc2.state['error'])
self.assertDictEqual({'Exception': 2}, dc2.state['errors'])
exponential_backoff_value = dc2.state['backoff_time']['value']
config = dc2.config
dc3 = SimpleDataConverter(fail_on='_save_data')
dc3.run()
dc3.config = config
dc3.state['error'] = {'class': 'ValueError', 'message': 'Fail!'}
dc3.execute_actions(EXECUTION_CHECKED, supervisor)
self.assertLess(dc3.state['backoff_time']['value'], exponential_backoff_value) # Exponential backoff restarted
self.assertEqual('ValueError', dc3.state['error'])
self.assertDictEqual({'Exception': 2, 'ValueError': 1}, dc3.state['errors'])
# Checking a module with restart_required and big exponential_backoff won't work
self.data_converter = SimpleDataConverter(pending_work=True, restart_required=True, backoff_time=2000)
self.data_converter.run()
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, EXECUTION_CHECKED, STATE_SAVED,
FINISHED]
self.assertListEqual(expected, transitions)
# Checking a module with restart_required and short exponential_backoff will work as usual
self.data_converter = SimpleDataConverter(pending_work=True, restart_required=True, backoff_time=0,
data_converted=1, data_inserted=1)
self.data_converter.run()
transitions = self.data_converter.expose_transition_states(supervisor)
expected = [CREATED, INITIALIZED, STATE_RESTORED, PENDING_WORK_CHECKED, DATA_CONVERTED, DATA_SAVED,
EXECUTION_CHECKED, STATE_SAVED, FINISHED]
self.assertListEqual(expected, transitions)
# Checking a module with big exponential backoff and successful execution restarts backoff.
self.data_converter = SimpleDataConverter(pending_work=True, restart_required=False, backoff_time=2000,
data_converted=1, data_inserted=1)
self.data_converter.run()
self.assertTrue(self.data_converter.successful_execution())
self.assertDictEqual(data_converter.MIN_BACKOFF, self.data_converter.state['backoff_time'])
| [] | [] | [] | [] | [] | python | 0 | 0 | |
main.go | package main
import (
"crypto/tls"
"encoding/json"
"flag"
"fmt"
"log"
"os"
"strconv"
"github.com/jle35/proxmox-api-go/proxmox"
)
func main() {
var insecure *bool
insecure = flag.Bool("insecure", false, "TLS insecure mode")
proxmox.Debug = flag.Bool("debug", false, "debug mode")
taskTimeout := flag.Int("timeout", 300, "api task timeout in seconds")
fvmid := flag.Int("vmid", -1, "custom vmid (instead of auto)")
flag.Parse()
tlsconf := &tls.Config{InsecureSkipVerify: true}
if !*insecure {
tlsconf = nil
}
c, _ := proxmox.NewClient(os.Getenv("PM_API_URL"), nil, tlsconf, *taskTimeout)
err := c.Login(os.Getenv("PM_USER"), os.Getenv("PM_PASS"), os.Getenv("PM_OTP"))
if err != nil {
log.Fatal(err)
}
vmid := *fvmid
if vmid < 0 {
if len(flag.Args()) > 1 {
vmid, err = strconv.Atoi(flag.Args()[len(flag.Args())-1])
if err != nil {
vmid = 0
}
} else if flag.Args()[0] == "idstatus" {
vmid = 0
}
}
var jbody interface{}
var vmr *proxmox.VmRef
if len(flag.Args()) == 0 {
fmt.Printf("Missing action, try start|stop vmid\n")
os.Exit(0)
}
switch flag.Args()[0] {
case "start":
vmr = proxmox.NewVmRef(vmid)
jbody, _ = c.StartVm(vmr)
case "stop":
vmr = proxmox.NewVmRef(vmid)
jbody, _ = c.StopVm(vmr)
case "destroy":
vmr = proxmox.NewVmRef(vmid)
jbody, err = c.StopVm(vmr)
failError(err)
jbody, _ = c.DeleteVm(vmr)
case "getConfig":
vmr = proxmox.NewVmRef(vmid)
c.CheckVmRef(vmr)
vmType := vmr.GetVmType()
var config interface{}
var err error
if vmType == "qemu" {
config, err = proxmox.NewConfigQemuFromApi(vmr, c)
} else if vmType == "lxc" {
config, err = proxmox.NewConfigLxcFromApi(vmr, c)
}
failError(err)
cj, err := json.MarshalIndent(config, "", " ")
log.Println(string(cj))
case "getNetworkInterfaces":
vmr = proxmox.NewVmRef(vmid)
c.CheckVmRef(vmr)
networkInterfaces, err := c.GetVmAgentNetworkInterfaces(vmr)
failError(err)
networkInterfaceJson, err := json.Marshal(networkInterfaces)
fmt.Println(string(networkInterfaceJson))
case "createQemu":
config, err := proxmox.NewConfigQemuFromJson(os.Stdin)
failError(err)
vmr = proxmox.NewVmRef(vmid)
vmr.SetNode(flag.Args()[2])
failError(config.CreateVm(vmr, c))
log.Println("Complete")
case "createLxc":
config, err := proxmox.NewConfigLxcFromJson(os.Stdin)
failError(err)
vmr = proxmox.NewVmRef(vmid)
vmr.SetNode(flag.Args()[2])
failError(config.CreateLxc(vmr, c))
log.Println("Complete")
case "installQemu":
config, err := proxmox.NewConfigQemuFromJson(os.Stdin)
failError(err)
if vmid > 0 {
vmr = proxmox.NewVmRef(vmid)
} else {
nextid, err := c.GetNextID(0)
failError(err)
vmr = proxmox.NewVmRef(nextid)
}
vmr.SetNode(flag.Args()[1])
log.Print("Creating node: ")
log.Println(vmr)
failError(config.CreateVm(vmr, c))
_, err = c.StartVm(vmr)
failError(err)
sshPort, err := proxmox.SshForwardUsernet(vmr, c)
failError(err)
log.Println("Waiting for CDRom install shutdown (at least 5 minutes)")
failError(proxmox.WaitForShutdown(vmr, c))
log.Println("Restarting")
_, err = c.StartVm(vmr)
failError(err)
sshPort, err = proxmox.SshForwardUsernet(vmr, c)
failError(err)
log.Println("SSH Portforward on:" + sshPort)
log.Println("Complete")
case "idstatus":
maxid, err := proxmox.MaxVmId(c)
failError(err)
nextid, err := c.GetNextID(vmid)
failError(err)
log.Println("---")
log.Printf("MaxID: %d\n", maxid)
log.Printf("NextID: %d\n", nextid)
log.Println("---")
case "cloneQemu":
config, err := proxmox.NewConfigQemuFromJson(os.Stdin)
failError(err)
log.Println("Looking for template: " + flag.Args()[1])
sourceVmr, err := c.GetVmRefByName(flag.Args()[1])
failError(err)
if sourceVmr == nil {
log.Fatal("Can't find template")
return
}
if vmid == 0 {
vmid, err = c.GetNextID(0)
}
vmr = proxmox.NewVmRef(vmid)
vmr.SetNode(flag.Args()[2])
log.Print("Creating node: ")
log.Println(vmr)
failError(config.CloneVm(sourceVmr, vmr, c))
failError(config.UpdateConfig(vmr, c))
log.Println("Complete")
case "rollbackQemu":
vmr = proxmox.NewVmRef(vmid)
jbody, err = c.RollbackQemuVm(vmr, flag.Args()[2])
failError(err)
case "sshforward":
vmr = proxmox.NewVmRef(vmid)
sshPort, err := proxmox.SshForwardUsernet(vmr, c)
failError(err)
log.Println("SSH Portforward on:" + sshPort)
case "sshbackward":
vmr = proxmox.NewVmRef(vmid)
err = proxmox.RemoveSshForwardUsernet(vmr, c)
failError(err)
log.Println("SSH Portforward off")
case "sendstring":
vmr = proxmox.NewVmRef(vmid)
err = proxmox.SendKeysString(vmr, c, flag.Args()[2])
failError(err)
log.Println("Keys sent")
case "nextid":
id, err := c.NextId()
failError(err)
log.Printf("Getting Next Free ID: %d\n", id)
case "checkid":
i, err := strconv.Atoi(flag.Args()[1])
failError(err)
id, err := c.VMIdExists(i)
failError(err)
log.Printf("Selected ID is free: %d\n", id)
case "migrate":
vmr := proxmox.NewVmRef(vmid)
c.GetVmInfo(vmr)
args := flag.Args()
if len(args) <= 1 {
fmt.Printf("Missing target node\n")
os.Exit(1)
}
_, err := c.MigrateNode(vmr, args[1], true)
if err != nil {
log.Printf("Error to move %+v\n", err)
os.Exit(1)
}
log.Printf("VM %d is moved on %s\n", vmid, args[1])
default:
fmt.Printf("unknown action, try start|stop vmid\n")
}
if jbody != nil {
log.Println(jbody)
}
//log.Println(vmr)
}
func failError(err error) {
if err != nil {
log.Fatal(err)
}
return
}
| [
"\"PM_API_URL\"",
"\"PM_USER\"",
"\"PM_PASS\"",
"\"PM_OTP\""
] | [] | [
"PM_PASS",
"PM_USER",
"PM_OTP",
"PM_API_URL"
] | [] | ["PM_PASS", "PM_USER", "PM_OTP", "PM_API_URL"] | go | 4 | 0 | |
run_lualatex.py | #!/usr/bin/env python
import glob
import os
import subprocess
import sys
# Walk through all externals. If they start with the special prefix
# texlive_{extra,texmf}__ prefix, it means they should be part of the
# texmf directory. LaTeX utilities don't seem to like the use of
# symlinks, so move the externals into the texmf directory.
#
# Externals that do not start with the special prefix should be added to
# TEXINPUTS, so that inclusions of external resources works.
texinputs = [""] + glob.glob("bazel-out/*/bin")
for external in sorted(os.listdir("external")):
src = os.path.abspath(os.path.join("external", external))
if external.startswith("texlive_extra__") or external.startswith("texlive_texmf__"):
dst = os.path.join("texmf", "/".join(external.split("__")[1:]))
try:
os.makedirs(os.path.dirname(dst))
except OSError:
pass
os.rename(src, dst)
else:
texinputs.append(src)
(
kpsewhich_file,
luatex_file,
latexrun_file,
job_name,
main_file,
output_file,
) = sys.argv[1:]
env = dict(os.environ)
env["OPENTYPEFONTS"] = ":".join(texinputs)
env["PATH"] = "%s:%s" % (os.path.abspath("bin"), env["PATH"])
env["SOURCE_DATE_EPOCH"] = "0"
env["TEXINPUTS"] = ":".join(texinputs)
env["TEXMF"] = os.path.abspath("texmf/texmf-dist")
env["TEXMFCNF"] = os.path.abspath("texmf/texmf-dist/web2c")
env["TEXMFROOT"] = os.path.abspath("texmf")
env["TTFONTS"] = ":".join(texinputs)
os.mkdir("bin")
os.link(kpsewhich_file, "bin/kpsewhich")
os.link(luatex_file, "bin/lualatex")
os.link(luatex_file, "bin/luatex")
os.link("texmf/texmf-dist/scripts/texlive/fmtutil.pl", "bin/mktexfmt")
return_code = subprocess.call(
args=[
latexrun_file,
"--latex-args=-jobname=" + job_name,
"--latex-cmd=lualatex",
"-Wall",
main_file,
],
env=env,
)
if return_code != 0:
sys.exit(return_code)
os.rename(job_name + ".pdf", output_file)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
config/db.go | package config
import (
"database/sql"
_ "github.com/go-sql-driver/mysql"
"log"
"os"
)
var DB *sql.DB
func init() {
//Set Variables
dbName := os.Getenv("dbName")
dbPass := os.Getenv("dbPass")
dbHost := os.Getenv("dbHost")
dbUser := os.Getenv("dbUser")
var err error
//Open DB Connection
dsn := dbUser + ":" + dbPass + "@tcp(" + dbHost + ":3306)/" + dbName
DB, err = sql.Open("mysql", dsn)
if err != nil {
log.Fatalln(err)
}
err = DB.Ping()
if err != nil {
log.Fatalln(err)
}
}
| [
"\"dbName\"",
"\"dbPass\"",
"\"dbHost\"",
"\"dbUser\""
] | [] | [
"dbUser",
"dbName",
"dbPass",
"dbHost"
] | [] | ["dbUser", "dbName", "dbPass", "dbHost"] | go | 4 | 0 | |
inference/models/encoders/ordered_memory.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class Distribution(nn.Module):
def __init__(self, nslot, hidden_size, dropout):
super(Distribution, self).__init__()
self.query = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(hidden_size, hidden_size),
nn.LayerNorm(hidden_size),
)
self.key = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(hidden_size, hidden_size),
nn.LayerNorm(hidden_size),
)
self.beta = nn.Sequential(
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(hidden_size, 1),
)
self.hidden_size = hidden_size
def init_p(self, bsz, nslot):
return None
@staticmethod
def process_softmax(beta, prev_p):
if prev_p is None:
return torch.zeros_like(beta), torch.ones_like(beta), torch.zeros_like(beta)
beta_normalized = beta - beta.max(dim=-1)[0][:, None]
x = torch.exp(beta_normalized)
prev_cp = torch.cumsum(prev_p, dim=1)
mask = prev_cp[:, 1:]
mask = mask.masked_fill(mask < 1e-5, 0.)
mask = F.pad(mask, (0, 1), value=1)
x_masked = x * mask
p = F.normalize(x_masked, p=1)
cp = torch.cumsum(p, dim=1)
rcp = torch.cumsum(p.flip([1]), dim=1).flip([1])
return cp, rcp, p
def forward(self, in_val, prev_out_M, prev_p):
query = self.query(in_val)
key = self.key(prev_out_M)
beta = self.beta(query[:, None, :] + key).squeeze(dim=2)
beta = beta / math.sqrt(self.hidden_size)
cp, rcp, p = self.process_softmax(beta, prev_p)
return cp, rcp, p
class Cell(nn.Module):
def __init__(self, hidden_size, dropout, activation=None):
super(Cell, self).__init__()
self.hidden_size = hidden_size
self.cell_hidden_size = 4 * hidden_size
self.input_t = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(hidden_size * 2, self.cell_hidden_size),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(self.cell_hidden_size, hidden_size * 4),
)
self.gates = nn.Sequential(
nn.Sigmoid(),
)
assert activation is not None
self.activation = activation
self.drop = nn.Dropout(dropout)
def forward(self, vi, hi):
input = torch.cat([vi, hi], dim=-1)
g_input, cell = self.input_t(input).split(
(self.hidden_size * 3, self.hidden_size),
dim=-1
)
gates = self.gates(g_input)
vg, hg, cg = gates.chunk(3, dim=1)
output = self.activation(vg * vi + hg * hi + cg * cell)
return output
class OrderedMemoryRecurrent(nn.Module):
def __init__(self, input_size, slot_size, nslot,
dropout=0.2, dropoutm=0.2):
super(OrderedMemoryRecurrent, self).__init__()
self.activation = nn.LayerNorm(slot_size)
self.input_projection = nn.Sequential(
nn.Linear(input_size, slot_size),
self.activation
)
self.distribution = Distribution(nslot, slot_size, dropoutm)
self.cell = Cell(slot_size, dropout, activation=self.activation)
self.nslot = nslot
self.slot_size = slot_size
self.input_size = input_size
def init_hidden(self, bsz):
weight = next(self.parameters()).data
zeros = weight.new(bsz, self.nslot, self.slot_size).zero_()
p = self.distribution.init_p(bsz, self.nslot)
return (zeros, zeros, p)
def omr_step(self, in_val, prev_M, prev_out_M, prev_p):
batch_size, nslot, slot_size = prev_M.size()
_batch_size, slot_size = in_val.size()
assert self.slot_size == slot_size
assert self.nslot == nslot
assert batch_size == _batch_size
cp, rcp, p = self.distribution(in_val, prev_out_M, prev_p)
curr_M = prev_M * (1 - rcp)[:, :, None] + prev_out_M * rcp[:, :, None]
M_list = []
h = in_val
for i in range(nslot):
if i == nslot - 1 or cp[:, i + 1].max() > 0:
h = self.cell(h, curr_M[:, i, :])
h = in_val * (1 - cp)[:, i, None] + h * cp[:, i, None]
M_list.append(h)
out_M = torch.stack(M_list, dim=1)
output = out_M[:, -1]
return output, curr_M, out_M, p
def forward(self, X, hidden, mask=None):
prev_M, prev_memory_output, prev_p = hidden
output_list = []
p_list = []
X_projected = self.input_projection(X)
if mask is not None:
padded = ~mask
for t in range(X_projected.size(0)):
output, prev_M, prev_memory_output, prev_p = self.omr_step(
X_projected[t], prev_M, prev_memory_output, prev_p)
if mask is not None:
padded_1 = padded[t, :, None]
padded_2 = padded[t, :, None, None]
output = output.masked_fill(padded_1, 0.)
prev_p = prev_p.masked_fill(padded_1, 0.)
prev_M = prev_M.masked_fill(padded_2, 0.)
prev_memory_output = prev_memory_output.masked_fill(padded_2, 0.)
output_list.append(output)
p_list.append(prev_p)
output = torch.stack(output_list)
probs = torch.stack(p_list)
return (output,
probs,
(prev_M, prev_memory_output, prev_p))
class ordered_memory(nn.Module):
def __init__(self, config):
super(ordered_memory, self).__init__()
input_size = config["hidden_size"]
slot_size = config["hidden_size"]
nslot = config["memory_slots"]
if not self.training:
if "double_slots_during_val" in config:
if config["double_slots_during_val"]:
nslot = 2 * nslot
dropoutm = config["memory_dropout"]
dropout = config["dropout"]
bidirection = config["bidirection"]
self.output_last = config["output_last"]
self.OM_forward = OrderedMemoryRecurrent(input_size, slot_size, nslot,
dropout=dropout, dropoutm=dropoutm)
if bidirection:
self.OM_backward = OrderedMemoryRecurrent(input_size, slot_size, nslot,
dropout=dropout, dropoutm=dropoutm)
self.bidirection = bidirection
def init_hidden(self, bsz):
return self.OM_forward.init_hidden(bsz)
def forward(self, X, mask, **kwargs):
X = X.permute(1, 0, 2).contiguous()
mask = mask.permute(1, 0).contiguous()
mask = mask.bool()
output_last = self.output_last
bsz = X.size(1)
lengths = mask.sum(0)
init_hidden = self.init_hidden(bsz)
output_list = []
prob_list = []
om_output_forward, prob_forward, _ = self.OM_forward(X, init_hidden, mask)
if output_last:
output_list.append(om_output_forward[-1])
else:
output_list.append(om_output_forward[lengths - 1, torch.arange(bsz).long()])
prob_list.append(prob_forward)
if self.bidirection:
om_output_backward, prob_backward, _ = self.OM_backward(X.flip([0]), init_hidden, mask.flip([0]))
output_list.append(om_output_backward[-1])
prob_list.append(prob_backward.flip([0]))
output = torch.cat(output_list, dim=-1)
self.probs = prob_list[0]
return {"global_state": output, "sequence": output}
| [] | [] | [] | [] | [] | python | null | null | null |
attacks/momentum_iterative_attack.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from advertorch.context import ctx_noparamgrad_and_eval
from advertorch.attacks import LinfMomentumIterativeAttack, L2MomentumIterativeAttack
import argparse
import os
import ipdb
import json
import sys
from __init__ import data_and_model_setup, load_data, eval
sys.path.append("..") # Adds higher directory to python modules path.
from utils.utils import create_loaders, load_unk_model, test_classifier
from classifiers import load_all_classifiers, load_list_classifiers, load_dict_classifiers
from cnn_models import LeNet as Net
from cnn_models import ResNet18
from cnn_models.mnist_ensemble_adv_train_models import *
from eval import baseline_transfer, baseline_eval_classifier
from defenses.ensemble_adver_train_mnist import *
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='cifar')
parser.add_argument('--start', type=int, default=0)
parser.add_argument('--end', type=int, default=100)
parser.add_argument('--n_iter', type=int, default=1000)
parser.add_argument('--query_step', type=int, default=1)
parser.add_argument('--transfer', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--sweep', action='store_true')
parser.add_argument("--wandb", action="store_true", default=False, help='Use wandb for logging')
parser.add_argument('--ensemble_adv_trained', action='store_true')
parser.add_argument('--noise', type=float, default=0.3)
parser.add_argument('--batch_size', type=int, default=256, metavar='S')
parser.add_argument('--test_batch_size', type=int, default=32, metavar='S')
parser.add_argument('--train_set', default='test',
choices=['train_and_test','test','train'],
help='add the test set in the training set')
parser.add_argument('--modelIn', type=str,
default='../pretrained_classifiers/cifar/res18/model_0.pt')
parser.add_argument('--robust_model_path', type=str,
default="../madry_challenge_models/mnist/adv_trained/mnist_lenet5_advtrained.pt")
parser.add_argument('--dir_test_models', type=str,
default="../",
help="The path to the directory containing the classifier models for evaluation.")
parser.add_argument("--max_test_model", type=int, default=2,
help="The maximum number of pretrained classifiers to use for testing.")
parser.add_argument('--train_on_madry', default=False, action='store_true',
help='Train using Madry tf grad')
parser.add_argument('--train_on_list', default=False, action='store_true',
help='train on a list of classifiers')
parser.add_argument('--attack_ball', type=str, default="Linf",
choices= ['L2','Linf'])
parser.add_argument('--source_arch', default="res18",
help="The architecture we want to attack on CIFAR.")
parser.add_argument('--target_arch', default=None,
help="The architecture we want to blackbox transfer to on CIFAR.")
parser.add_argument('--split', type=int, default=None,
help="Which subsplit to use.")
parser.add_argument('--epsilon', type=float, default=0.1, metavar='M',
help='Epsilon for Delta (default: 0.1)')
parser.add_argument('--num_test_samples', default=None, type=int,
help="The number of samples used to train and test the attacker.")
parser.add_argument('--train_with_critic_path', type=str, default=None,
help='Train generator with saved critic model')
parser.add_argument('--model', help='path to model')
parser.add_argument('--adv_models', nargs='*', help='path to adv model(s)')
parser.add_argument('--type', type=int, default=0, help='Model type (default: 0)')
parser.add_argument('--namestr', type=str, default='NoBox', \
help='additional info in output filename to describe experiments')
args = parser.parse_args()
args.dev = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_loader, test_loader, split_train_loader, split_test_loader = create_loaders(args,
root='../data', split=args.split)
if args.split is not None:
train_loader = split_train_loader
test_loader = split_test_loader
if os.path.isfile("../settings.json"):
with open('../settings.json') as f:
data = json.load(f)
args.wandb_apikey = data.get("wandbapikey")
if args.wandb:
os.environ['WANDB_API_KEY'] = args.wandb_apikey
wandb.init(project='NoBox-sweeps', name='MI-Attack-{}'.format(args.dataset))
model, adv_models, l_test_classif_paths, model_type = data_and_model_setup(args)
model.to(args.dev)
model.eval()
print("Testing on %d Test Classifiers with Source Model %s" %(len(l_test_classif_paths), args.source_arch))
if args.attack_ball == 'Linf':
attacker = LinfMomentumIterativeAttack(model,
loss_fn=nn.CrossEntropyLoss(reduction="sum"),
eps=args.epsilon,
nb_iter=args.n_iter,
decay_factor=1., eps_iter=0.01,
clip_min=0., clip_max=1.,
targeted=False)
elif args.attack_ball == 'L2':
attacker = L2MomentumIterativeAttack(model,
loss_fn=nn.CrossEntropyLoss(reduction="sum"),
eps=args.epsilon,
nb_iter=args.n_iter,
decay_factor=1., eps_iter=0.01,
clip_min=0., clip_max=1.,
targeted=False)
else:
raise NotImplementedError
eval_helpers = [model, model_type, adv_models, l_test_classif_paths, test_loader]
total_fool_rate = eval(args, attacker, "MI-Attack", eval_helpers)
if __name__ == '__main__':
main()
| [] | [] | [
"WANDB_API_KEY"
] | [] | ["WANDB_API_KEY"] | python | 1 | 0 | |
parser_test.go | package swag
import (
"encoding/json"
goparser "go/parser"
"go/token"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
const defaultParseDepth = 100
const mainAPIFile = "main.go"
func TestNew(t *testing.T) {
swagMode = test
New()
}
func TestSetMarkdownFileDirectory(t *testing.T) {
expected := "docs/markdown"
p := New(SetMarkdownFileDirectory(expected))
assert.Equal(t, expected, p.markdownFileDir)
}
func TestSetCodeExamplesDirectory(t *testing.T) {
expected := "docs/examples"
p := New(SetCodeExamplesDirectory(expected))
assert.Equal(t, expected, p.codeExampleFilesDir)
}
func TestParser_ParseGeneralApiInfo(t *testing.T) {
expected := `{
"schemes": [
"http",
"https"
],
"swagger": "2.0",
"info": {
"description": "This is a sample server Petstore server.\nIt has a lot of beautiful features.",
"title": "Swagger Example API",
"termsOfService": "http://swagger.io/terms/",
"contact": {
"name": "API Support",
"url": "http://www.swagger.io/support",
"email": "[email protected]"
},
"license": {
"name": "Apache 2.0",
"url": "http://www.apache.org/licenses/LICENSE-2.0.html"
},
"version": "1.0",
"x-logo": {
"altText": "Petstore logo",
"backgroundColor": "#FFFFFF",
"url": "https://redocly.github.io/redoc/petstore-logo.png"
}
},
"host": "petstore.swagger.io",
"basePath": "/v2",
"paths": {},
"securityDefinitions": {
"ApiKeyAuth": {
"type": "apiKey",
"name": "Authorization",
"in": "header"
},
"BasicAuth": {
"type": "basic"
},
"OAuth2AccessCode": {
"type": "oauth2",
"flow": "accessCode",
"authorizationUrl": "https://example.com/oauth/authorize",
"tokenUrl": "https://example.com/oauth/token",
"scopes": {
"admin": " Grants read and write access to administrative information"
},
"x-tokenName": "id_token"
},
"OAuth2Application": {
"type": "oauth2",
"flow": "application",
"tokenUrl": "https://example.com/oauth/token",
"scopes": {
"admin": " Grants read and write access to administrative information",
"write": " Grants write access"
}
},
"OAuth2Implicit": {
"type": "oauth2",
"flow": "implicit",
"authorizationUrl": "https://example.com/oauth/authorize",
"scopes": {
"admin": " Grants read and write access to administrative information",
"write": " Grants write access"
}
},
"OAuth2Password": {
"type": "oauth2",
"flow": "password",
"tokenUrl": "https://example.com/oauth/token",
"scopes": {
"admin": " Grants read and write access to administrative information",
"read": " Grants read access",
"write": " Grants write access"
}
}
},
"x-google-endpoints": [
{
"allowCors": true,
"name": "name.endpoints.environment.cloud.goog"
}
],
"x-google-marks": "marks values"
}`
gopath := os.Getenv("GOPATH")
assert.NotNil(t, gopath)
p := New()
err := p.ParseGeneralAPIInfo("testdata/main.go")
assert.NoError(t, err)
b, _ := json.MarshalIndent(p.swagger, "", " ")
assert.Equal(t, expected, string(b))
}
func TestParser_ParseGeneralApiInfoTemplated(t *testing.T) {
expected := `{
"swagger": "2.0",
"info": {
"termsOfService": "http://swagger.io/terms/",
"contact": {
"name": "API Support",
"url": "http://www.swagger.io/support",
"email": "[email protected]"
},
"license": {
"name": "Apache 2.0",
"url": "http://www.apache.org/licenses/LICENSE-2.0.html"
}
},
"paths": {},
"securityDefinitions": {
"ApiKeyAuth": {
"type": "apiKey",
"name": "Authorization",
"in": "header"
},
"BasicAuth": {
"type": "basic"
},
"OAuth2AccessCode": {
"type": "oauth2",
"flow": "accessCode",
"authorizationUrl": "https://example.com/oauth/authorize",
"tokenUrl": "https://example.com/oauth/token",
"scopes": {
"admin": " Grants read and write access to administrative information"
}
},
"OAuth2Application": {
"type": "oauth2",
"flow": "application",
"tokenUrl": "https://example.com/oauth/token",
"scopes": {
"admin": " Grants read and write access to administrative information",
"write": " Grants write access"
}
},
"OAuth2Implicit": {
"type": "oauth2",
"flow": "implicit",
"authorizationUrl": "https://example.com/oauth/authorize",
"scopes": {
"admin": " Grants read and write access to administrative information",
"write": " Grants write access"
}
},
"OAuth2Password": {
"type": "oauth2",
"flow": "password",
"tokenUrl": "https://example.com/oauth/token",
"scopes": {
"admin": " Grants read and write access to administrative information",
"read": " Grants read access",
"write": " Grants write access"
}
}
},
"x-google-endpoints": [
{
"allowCors": true,
"name": "name.endpoints.environment.cloud.goog"
}
],
"x-google-marks": "marks values"
}`
gopath := os.Getenv("GOPATH")
assert.NotNil(t, gopath)
p := New()
err := p.ParseGeneralAPIInfo("testdata/templated.go")
assert.NoError(t, err)
b, _ := json.MarshalIndent(p.swagger, "", " ")
assert.Equal(t, expected, string(b))
}
func TestParser_ParseGeneralApiInfoExtensions(t *testing.T) {
// should be return an error because extension value is not a valid json
func() {
expected := "annotation @x-google-endpoints need a valid json value"
gopath := os.Getenv("GOPATH")
assert.NotNil(t, gopath)
p := New()
err := p.ParseGeneralAPIInfo("testdata/extensionsFail1.go")
if assert.Error(t, err) {
assert.Equal(t, expected, err.Error())
}
}()
// should be return an error because extension don't have a value
func() {
expected := "annotation @x-google-endpoints need a value"
gopath := os.Getenv("GOPATH")
assert.NotNil(t, gopath)
p := New()
err := p.ParseGeneralAPIInfo("testdata/extensionsFail2.go")
if assert.Error(t, err) {
assert.Equal(t, expected, err.Error())
}
}()
}
func TestParser_ParseGeneralApiInfoWithOpsInSameFile(t *testing.T) {
expected := `{
"swagger": "2.0",
"info": {
"description": "This is a sample server Petstore server.\nIt has a lot of beautiful features.",
"title": "Swagger Example API",
"termsOfService": "http://swagger.io/terms/",
"contact": {},
"version": "1.0"
},
"paths": {}
}`
gopath := os.Getenv("GOPATH")
assert.NotNil(t, gopath)
p := New()
err := p.ParseGeneralAPIInfo("testdata/single_file_api/main.go")
assert.NoError(t, err)
b, _ := json.MarshalIndent(p.swagger, "", " ")
assert.Equal(t, expected, string(b))
}
func TestParser_ParseGeneralAPIInfoMarkdown(t *testing.T) {
p := New(SetMarkdownFileDirectory("testdata"))
mainAPIFile := "testdata/markdown.go"
err := p.ParseGeneralAPIInfo(mainAPIFile)
assert.NoError(t, err)
expected := `{
"swagger": "2.0",
"info": {
"description": "Swagger Example API Markdown Description",
"title": "Swagger Example API",
"termsOfService": "http://swagger.io/terms/",
"contact": {},
"version": "1.0"
},
"paths": {},
"tags": [
{
"description": "Users Tag Markdown Description",
"name": "users"
}
]
}`
b, _ := json.MarshalIndent(p.swagger, "", " ")
assert.Equal(t, expected, string(b))
p = New()
err = p.ParseGeneralAPIInfo(mainAPIFile)
assert.Error(t, err)
}
func TestParser_ParseGeneralApiInfoFailed(t *testing.T) {
gopath := os.Getenv("GOPATH")
assert.NotNil(t, gopath)
p := New()
assert.Error(t, p.ParseGeneralAPIInfo("testdata/noexist.go"))
}
func TestGetAllGoFileInfo(t *testing.T) {
searchDir := "testdata/pet"
p := New()
err := p.getAllGoFileInfo("testdata", searchDir)
assert.NoError(t, err)
assert.Equal(t, 2, len(p.packages.files))
}
func TestParser_ParseType(t *testing.T) {
searchDir := "testdata/simple/"
p := New()
err := p.getAllGoFileInfo("testdata", searchDir)
assert.NoError(t, err)
_, err = p.packages.ParseTypes()
assert.NoError(t, err)
assert.NotNil(t, p.packages.uniqueDefinitions["api.Pet3"])
assert.NotNil(t, p.packages.uniqueDefinitions["web.Pet"])
assert.NotNil(t, p.packages.uniqueDefinitions["web.Pet2"])
}
func TestGetSchemes(t *testing.T) {
schemes := getSchemes("@schemes http https")
expectedSchemes := []string{"http", "https"}
assert.Equal(t, expectedSchemes, schemes)
}
func TestParseSimpleApi1(t *testing.T) {
expected, err := ioutil.ReadFile("testdata/simple/expected.json")
assert.NoError(t, err)
searchDir := "testdata/simple"
p := New()
p.PropNamingStrategy = PascalCase
err = p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
assert.NoError(t, err)
b, _ := json.MarshalIndent(p.swagger, "", " ")
assert.Equal(t, string(expected), string(b))
}
func TestParseSimpleApi_ForSnakecase(t *testing.T) {
expected := `{
"swagger": "2.0",
"info": {
"description": "This is a sample server Petstore server.",
"title": "Swagger Example API",
"termsOfService": "http://swagger.io/terms/",
"contact": {
"name": "API Support",
"url": "http://www.swagger.io/support",
"email": "[email protected]"
},
"license": {
"name": "Apache 2.0",
"url": "http://www.apache.org/licenses/LICENSE-2.0.html"
},
"version": "1.0"
},
"host": "petstore.swagger.io",
"basePath": "/v2",
"paths": {
"/file/upload": {
"post": {
"description": "Upload file",
"consumes": [
"multipart/form-data"
],
"produces": [
"application/json"
],
"summary": "Upload file",
"operationId": "file.upload",
"parameters": [
{
"type": "file",
"description": "this is a test file",
"name": "file",
"in": "formData",
"required": true
}
],
"responses": {
"200": {
"description": "ok",
"schema": {
"type": "string"
}
},
"400": {
"description": "We need ID!!",
"schema": {
"$ref": "#/definitions/web.APIError"
}
},
"404": {
"description": "Can not find ID",
"schema": {
"$ref": "#/definitions/web.APIError"
}
}
}
}
},
"/testapi/get-string-by-int/{some_id}": {
"get": {
"description": "get string by ID",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"summary": "Add a new pet to the store",
"operationId": "get-string-by-int",
"parameters": [
{
"type": "integer",
"format": "int64",
"description": "Some ID",
"name": "some_id",
"in": "path",
"required": true
},
{
"description": "Some ID",
"name": "some_id",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/web.Pet"
}
}
],
"responses": {
"200": {
"description": "ok",
"schema": {
"type": "string"
}
},
"400": {
"description": "We need ID!!",
"schema": {
"$ref": "#/definitions/web.APIError"
}
},
"404": {
"description": "Can not find ID",
"schema": {
"$ref": "#/definitions/web.APIError"
}
}
}
}
},
"/testapi/get-struct-array-by-string/{some_id}": {
"get": {
"security": [
{
"ApiKeyAuth": []
},
{
"BasicAuth": []
},
{
"OAuth2Application": [
"write"
]
},
{
"OAuth2Implicit": [
"read",
"admin"
]
},
{
"OAuth2AccessCode": [
"read"
]
},
{
"OAuth2Password": [
"admin"
]
}
],
"description": "get struct array by ID",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"operationId": "get-struct-array-by-string",
"parameters": [
{
"type": "string",
"description": "Some ID",
"name": "some_id",
"in": "path",
"required": true
},
{
"enum": [
1,
2,
3
],
"type": "integer",
"description": "Category",
"name": "category",
"in": "query",
"required": true
},
{
"minimum": 0,
"type": "integer",
"default": 0,
"description": "Offset",
"name": "offset",
"in": "query",
"required": true
},
{
"maximum": 50,
"type": "integer",
"default": 10,
"description": "Limit",
"name": "limit",
"in": "query",
"required": true
},
{
"maxLength": 50,
"minLength": 1,
"type": "string",
"default": "\"\"",
"description": "q",
"name": "q",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "ok",
"schema": {
"type": "string"
}
},
"400": {
"description": "We need ID!!",
"schema": {
"$ref": "#/definitions/web.APIError"
}
},
"404": {
"description": "Can not find ID",
"schema": {
"$ref": "#/definitions/web.APIError"
}
}
}
}
}
},
"definitions": {
"web.APIError": {
"type": "object",
"properties": {
"created_at": {
"type": "string"
},
"error_code": {
"type": "integer"
},
"error_message": {
"type": "string"
}
}
},
"web.Pet": {
"type": "object",
"required": [
"price"
],
"properties": {
"birthday": {
"type": "integer"
},
"category": {
"type": "object",
"properties": {
"id": {
"type": "integer",
"example": 1
},
"name": {
"type": "string",
"example": "category_name"
},
"photo_urls": {
"type": "array",
"format": "url",
"items": {
"type": "string"
},
"example": [
"http://test/image/1.jpg",
"http://test/image/2.jpg"
]
},
"small_category": {
"type": "object",
"required": [
"name"
],
"properties": {
"id": {
"type": "integer",
"example": 1
},
"name": {
"type": "string",
"example": "detail_category_name"
},
"photo_urls": {
"type": "array",
"items": {
"type": "string"
},
"example": [
"http://test/image/1.jpg",
"http://test/image/2.jpg"
]
}
}
}
}
},
"coeffs": {
"type": "array",
"items": {
"type": "number"
}
},
"custom_string": {
"type": "string"
},
"custom_string_arr": {
"type": "array",
"items": {
"type": "string"
}
},
"data": {
"type": "object"
},
"decimal": {
"type": "number"
},
"id": {
"type": "integer",
"format": "int64",
"example": 1
},
"is_alive": {
"type": "boolean",
"example": true
},
"name": {
"type": "string",
"example": "poti"
},
"null_int": {
"type": "integer"
},
"pets": {
"type": "array",
"items": {
"$ref": "#/definitions/web.Pet2"
}
},
"pets2": {
"type": "array",
"items": {
"$ref": "#/definitions/web.Pet2"
}
},
"photo_urls": {
"type": "array",
"items": {
"type": "string"
},
"example": [
"http://test/image/1.jpg",
"http://test/image/2.jpg"
]
},
"price": {
"type": "number",
"example": 3.25
},
"status": {
"type": "string"
},
"tags": {
"type": "array",
"items": {
"$ref": "#/definitions/web.Tag"
}
},
"uuid": {
"type": "string"
}
}
},
"web.Pet2": {
"type": "object",
"properties": {
"deleted_at": {
"type": "string"
},
"id": {
"type": "integer"
},
"middle_name": {
"type": "string"
}
}
},
"web.RevValue": {
"type": "object",
"properties": {
"data": {
"type": "integer"
},
"err": {
"type": "integer"
},
"status": {
"type": "boolean"
}
}
},
"web.Tag": {
"type": "object",
"properties": {
"id": {
"type": "integer",
"format": "int64"
},
"name": {
"type": "string"
},
"pets": {
"type": "array",
"items": {
"$ref": "#/definitions/web.Pet"
}
}
}
}
},
"securityDefinitions": {
"ApiKeyAuth": {
"type": "apiKey",
"name": "Authorization",
"in": "header"
},
"BasicAuth": {
"type": "basic"
},
"OAuth2AccessCode": {
"type": "oauth2",
"flow": "accessCode",
"authorizationUrl": "https://example.com/oauth/authorize",
"tokenUrl": "https://example.com/oauth/token",
"scopes": {
"admin": " Grants read and write access to administrative information"
}
},
"OAuth2Application": {
"type": "oauth2",
"flow": "application",
"tokenUrl": "https://example.com/oauth/token",
"scopes": {
"admin": " Grants read and write access to administrative information",
"write": " Grants write access"
}
},
"OAuth2Implicit": {
"type": "oauth2",
"flow": "implicit",
"authorizationUrl": "https://example.com/oauth/authorize",
"scopes": {
"admin": " Grants read and write access to administrative information",
"write": " Grants write access"
}
},
"OAuth2Password": {
"type": "oauth2",
"flow": "password",
"tokenUrl": "https://example.com/oauth/token",
"scopes": {
"admin": " Grants read and write access to administrative information",
"read": " Grants read access",
"write": " Grants write access"
}
}
}
}`
searchDir := "testdata/simple2"
p := New()
p.PropNamingStrategy = SnakeCase
err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
assert.NoError(t, err)
b, _ := json.MarshalIndent(p.swagger, "", " ")
assert.Equal(t, expected, string(b))
}
func TestParseSimpleApi_ForLowerCamelcase(t *testing.T) {
expected := `{
"swagger": "2.0",
"info": {
"description": "This is a sample server Petstore server.",
"title": "Swagger Example API",
"termsOfService": "http://swagger.io/terms/",
"contact": {
"name": "API Support",
"url": "http://www.swagger.io/support",
"email": "[email protected]"
},
"license": {
"name": "Apache 2.0",
"url": "http://www.apache.org/licenses/LICENSE-2.0.html"
},
"version": "1.0"
},
"host": "petstore.swagger.io",
"basePath": "/v2",
"paths": {
"/file/upload": {
"post": {
"description": "Upload file",
"consumes": [
"multipart/form-data"
],
"produces": [
"application/json"
],
"summary": "Upload file",
"operationId": "file.upload",
"parameters": [
{
"type": "file",
"description": "this is a test file",
"name": "file",
"in": "formData",
"required": true
}
],
"responses": {
"200": {
"description": "ok",
"schema": {
"type": "string"
}
},
"400": {
"description": "We need ID!!",
"schema": {
"$ref": "#/definitions/web.APIError"
}
},
"404": {
"description": "Can not find ID",
"schema": {
"$ref": "#/definitions/web.APIError"
}
}
}
}
},
"/testapi/get-string-by-int/{some_id}": {
"get": {
"description": "get string by ID",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"summary": "Add a new pet to the store",
"operationId": "get-string-by-int",
"parameters": [
{
"type": "integer",
"format": "int64",
"description": "Some ID",
"name": "some_id",
"in": "path",
"required": true
},
{
"description": "Some ID",
"name": "some_id",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/web.Pet"
}
}
],
"responses": {
"200": {
"description": "ok",
"schema": {
"type": "string"
}
},
"400": {
"description": "We need ID!!",
"schema": {
"$ref": "#/definitions/web.APIError"
}
},
"404": {
"description": "Can not find ID",
"schema": {
"$ref": "#/definitions/web.APIError"
}
}
}
}
},
"/testapi/get-struct-array-by-string/{some_id}": {
"get": {
"security": [
{
"ApiKeyAuth": []
},
{
"BasicAuth": []
},
{
"OAuth2Application": [
"write"
]
},
{
"OAuth2Implicit": [
"read",
"admin"
]
},
{
"OAuth2AccessCode": [
"read"
]
},
{
"OAuth2Password": [
"admin"
]
}
],
"description": "get struct array by ID",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"operationId": "get-struct-array-by-string",
"parameters": [
{
"type": "string",
"description": "Some ID",
"name": "some_id",
"in": "path",
"required": true
},
{
"enum": [
1,
2,
3
],
"type": "integer",
"description": "Category",
"name": "category",
"in": "query",
"required": true
},
{
"minimum": 0,
"type": "integer",
"default": 0,
"description": "Offset",
"name": "offset",
"in": "query",
"required": true
},
{
"maximum": 50,
"type": "integer",
"default": 10,
"description": "Limit",
"name": "limit",
"in": "query",
"required": true
},
{
"maxLength": 50,
"minLength": 1,
"type": "string",
"default": "\"\"",
"description": "q",
"name": "q",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "ok",
"schema": {
"type": "string"
}
},
"400": {
"description": "We need ID!!",
"schema": {
"$ref": "#/definitions/web.APIError"
}
},
"404": {
"description": "Can not find ID",
"schema": {
"$ref": "#/definitions/web.APIError"
}
}
}
}
}
},
"definitions": {
"web.APIError": {
"type": "object",
"properties": {
"createdAt": {
"type": "string"
},
"errorCode": {
"type": "integer"
},
"errorMessage": {
"type": "string"
}
}
},
"web.Pet": {
"type": "object",
"properties": {
"category": {
"type": "object",
"properties": {
"id": {
"type": "integer",
"example": 1
},
"name": {
"type": "string",
"example": "category_name"
},
"photoURLs": {
"type": "array",
"format": "url",
"items": {
"type": "string"
},
"example": [
"http://test/image/1.jpg",
"http://test/image/2.jpg"
]
},
"smallCategory": {
"type": "object",
"properties": {
"id": {
"type": "integer",
"example": 1
},
"name": {
"type": "string",
"example": "detail_category_name"
},
"photoURLs": {
"type": "array",
"items": {
"type": "string"
},
"example": [
"http://test/image/1.jpg",
"http://test/image/2.jpg"
]
}
}
}
}
},
"data": {
"type": "object"
},
"decimal": {
"type": "number"
},
"id": {
"type": "integer",
"format": "int64",
"example": 1
},
"isAlive": {
"type": "boolean",
"example": true
},
"name": {
"type": "string",
"example": "poti"
},
"pets": {
"type": "array",
"items": {
"$ref": "#/definitions/web.Pet2"
}
},
"pets2": {
"type": "array",
"items": {
"$ref": "#/definitions/web.Pet2"
}
},
"photoURLs": {
"type": "array",
"items": {
"type": "string"
},
"example": [
"http://test/image/1.jpg",
"http://test/image/2.jpg"
]
},
"price": {
"type": "number",
"example": 3.25
},
"status": {
"type": "string"
},
"tags": {
"type": "array",
"items": {
"$ref": "#/definitions/web.Tag"
}
},
"uuid": {
"type": "string"
}
}
},
"web.Pet2": {
"type": "object",
"properties": {
"deletedAt": {
"type": "string"
},
"id": {
"type": "integer"
},
"middleName": {
"type": "string"
}
}
},
"web.RevValue": {
"type": "object",
"properties": {
"data": {
"type": "integer"
},
"err": {
"type": "integer"
},
"status": {
"type": "boolean"
}
}
},
"web.Tag": {
"type": "object",
"properties": {
"id": {
"type": "integer",
"format": "int64"
},
"name": {
"type": "string"
},
"pets": {
"type": "array",
"items": {
"$ref": "#/definitions/web.Pet"
}
}
}
}
},
"securityDefinitions": {
"ApiKeyAuth": {
"type": "apiKey",
"name": "Authorization",
"in": "header"
},
"BasicAuth": {
"type": "basic"
},
"OAuth2AccessCode": {
"type": "oauth2",
"flow": "accessCode",
"authorizationUrl": "https://example.com/oauth/authorize",
"tokenUrl": "https://example.com/oauth/token",
"scopes": {
"admin": " Grants read and write access to administrative information"
}
},
"OAuth2Application": {
"type": "oauth2",
"flow": "application",
"tokenUrl": "https://example.com/oauth/token",
"scopes": {
"admin": " Grants read and write access to administrative information",
"write": " Grants write access"
}
},
"OAuth2Implicit": {
"type": "oauth2",
"flow": "implicit",
"authorizationUrl": "https://example.com/oauth/authorize",
"scopes": {
"admin": " Grants read and write access to administrative information",
"write": " Grants write access"
}
},
"OAuth2Password": {
"type": "oauth2",
"flow": "password",
"tokenUrl": "https://example.com/oauth/token",
"scopes": {
"admin": " Grants read and write access to administrative information",
"read": " Grants read access",
"write": " Grants write access"
}
}
}
}`
searchDir := "testdata/simple3"
p := New()
err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
assert.NoError(t, err)
b, _ := json.MarshalIndent(p.swagger, "", " ")
assert.Equal(t, expected, string(b))
}
func TestParseStructComment(t *testing.T) {
expected := `{
"swagger": "2.0",
"info": {
"description": "This is a sample server Petstore server.",
"title": "Swagger Example API",
"contact": {},
"version": "1.0"
},
"host": "localhost:4000",
"basePath": "/api",
"paths": {
"/posts/{post_id}": {
"get": {
"description": "get string by ID",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"summary": "Add a new pet to the store",
"parameters": [
{
"type": "integer",
"format": "int64",
"description": "Some ID",
"name": "post_id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "string"
}
},
"400": {
"description": "We need ID!!",
"schema": {
"$ref": "#/definitions/web.APIError"
}
},
"404": {
"description": "Can not find ID",
"schema": {
"$ref": "#/definitions/web.APIError"
}
}
}
}
}
},
"definitions": {
"web.APIError": {
"type": "object",
"properties": {
"createdAt": {
"description": "Error time",
"type": "string"
},
"error": {
"description": "Error an Api error",
"type": "string"
},
"errorCtx": {
"description": "Error ` + "`" + `context` + "`" + ` tick comment",
"type": "string"
},
"errorNo": {
"description": "Error ` + "`" + `number` + "`" + ` tick comment",
"type": "integer"
}
}
}
}
}`
searchDir := "testdata/struct_comment"
p := New()
err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
assert.NoError(t, err)
b, _ := json.MarshalIndent(p.swagger, "", " ")
assert.Equal(t, expected, string(b))
}
func TestParseNonExportedJSONFields(t *testing.T) {
expected := `{
"swagger": "2.0",
"info": {
"description": "This is a sample server.",
"title": "Swagger Example API",
"contact": {},
"version": "1.0"
},
"host": "localhost:4000",
"basePath": "/api",
"paths": {
"/so-something": {
"get": {
"description": "Does something, but internal (non-exported) fields inside a struct won't be marshaled into JSON",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"summary": "Call DoSomething",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/main.MyStruct"
}
}
}
}
}
},
"definitions": {
"main.MyStruct": {
"type": "object",
"properties": {
"data": {
"description": "Post data",
"type": "object",
"properties": {
"name": {
"description": "Post tag",
"type": "array",
"items": {
"type": "string"
}
}
}
},
"id": {
"type": "integer",
"format": "int64",
"example": 1
},
"name": {
"description": "Post name",
"type": "string",
"example": "poti"
}
}
}
}
}`
searchDir := "testdata/non_exported_json_fields"
p := New()
err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
assert.NoError(t, err)
b, _ := json.MarshalIndent(p.swagger, "", " ")
assert.Equal(t, expected, string(b))
}
func TestParsePetApi(t *testing.T) {
expected := `{
"schemes": [
"http",
"https"
],
"swagger": "2.0",
"info": {
"description": "This is a sample server Petstore server. You can find out more about Swagger at [http://swagger.io](http://swagger.io) or on [irc.freenode.net, #swagger](http://swagger.io/irc/). For this sample, you can use the api key 'special-key' to test the authorization filters.",
"title": "Swagger Petstore",
"termsOfService": "http://swagger.io/terms/",
"contact": {
"email": "[email protected]"
},
"license": {
"name": "Apache 2.0",
"url": "http://www.apache.org/licenses/LICENSE-2.0.html"
},
"version": "1.0"
},
"host": "petstore.swagger.io",
"basePath": "/v2",
"paths": {}
}`
searchDir := "testdata/pet"
p := New()
err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
assert.NoError(t, err)
b, _ := json.MarshalIndent(p.swagger, "", " ")
assert.Equal(t, expected, string(b))
}
func TestParseModelAsTypeAlias(t *testing.T) {
expected := `{
"swagger": "2.0",
"info": {
"description": "This is a sample server Petstore server.",
"title": "Swagger Example API",
"termsOfService": "http://swagger.io/terms/",
"contact": {
"name": "API Support",
"url": "http://www.swagger.io/support",
"email": "[email protected]"
},
"license": {
"name": "Apache 2.0",
"url": "http://www.apache.org/licenses/LICENSE-2.0.html"
},
"version": "1.0"
},
"host": "petstore.swagger.io",
"basePath": "/v2",
"paths": {
"/testapi/time-as-time-container": {
"get": {
"description": "test container with time and time alias",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"summary": "Get container with time and time alias",
"operationId": "time-as-time-container",
"responses": {
"200": {
"description": "ok",
"schema": {
"$ref": "#/definitions/data.TimeContainer"
}
}
}
}
}
},
"definitions": {
"data.TimeContainer": {
"type": "object",
"properties": {
"created_at": {
"type": "string"
},
"name": {
"type": "string"
},
"timestamp": {
"type": "string"
}
}
}
}
}`
searchDir := "testdata/alias_type"
p := New()
err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
assert.NoError(t, err)
b, _ := json.MarshalIndent(p.swagger, "", " ")
assert.Equal(t, expected, string(b))
}
func TestParseComposition(t *testing.T) {
searchDir := "testdata/composition"
p := New()
err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
assert.NoError(t, err)
expected, err := ioutil.ReadFile(filepath.Join(searchDir, "expected.json"))
assert.NoError(t, err)
b, _ := json.MarshalIndent(p.swagger, "", " ")
//windows will fail: \r\n \n
assert.Equal(t, string(expected), string(b))
}
func TestParseImportAliases(t *testing.T) {
searchDir := "testdata/alias_import"
p := New()
err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
assert.NoError(t, err)
expected, err := ioutil.ReadFile(filepath.Join(searchDir, "expected.json"))
assert.NoError(t, err)
b, _ := json.MarshalIndent(p.swagger, "", " ")
//windows will fail: \r\n \n
assert.Equal(t, string(expected), string(b))
}
func TestParseNested(t *testing.T) {
searchDir := "testdata/nested"
p := New()
p.ParseDependency = true
err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
assert.NoError(t, err)
expected, err := ioutil.ReadFile(filepath.Join(searchDir, "expected.json"))
assert.NoError(t, err)
b, _ := json.MarshalIndent(p.swagger, "", " ")
assert.Equal(t, string(expected), string(b))
}
func TestParseDuplicated(t *testing.T) {
searchDir := "testdata/duplicated"
p := New()
p.ParseDependency = true
err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
assert.Errorf(t, err, "duplicated @id declarations successfully found")
}
func TestParseDuplicatedOtherMethods(t *testing.T) {
searchDir := "testdata/duplicated2"
p := New()
p.ParseDependency = true
err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
assert.Errorf(t, err, "duplicated @id declarations successfully found")
}
func TestParseConflictSchemaName(t *testing.T) {
searchDir := "testdata/conflict_name"
p := New()
p.ParseDependency = true
err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
assert.NoError(t, err)
b, _ := json.MarshalIndent(p.swagger, "", " ")
expected, err := ioutil.ReadFile(filepath.Join(searchDir, "expected.json"))
assert.NoError(t, err)
assert.Equal(t, string(expected), string(b))
}
func TestParser_ParseStructArrayObject(t *testing.T) {
src := `
package api
type Response struct {
Code int
Table [][]string
Data []struct{
Field1 uint
Field2 string
}
}
// @Success 200 {object} Response
// @Router /api/{id} [get]
func Test(){
}
`
expected := `{
"api.Response": {
"type": "object",
"properties": {
"code": {
"type": "integer"
},
"data": {
"type": "array",
"items": {
"type": "object",
"properties": {
"field1": {
"type": "integer"
},
"field2": {
"type": "string"
}
}
}
},
"table": {
"type": "array",
"items": {
"type": "array",
"items": {
"type": "string"
}
}
}
}
}
}`
f, err := goparser.ParseFile(token.NewFileSet(), "", src, goparser.ParseComments)
assert.NoError(t, err)
p := New()
p.packages.CollectAstFile("api", "api/api.go", f)
_, err = p.packages.ParseTypes()
assert.NoError(t, err)
err = p.ParseRouterAPIInfo("", f)
assert.NoError(t, err)
out, err := json.MarshalIndent(p.swagger.Definitions, "", " ")
assert.NoError(t, err)
assert.Equal(t, expected, string(out))
}
func TestParser_ParseEmbededStruct(t *testing.T) {
src := `
package api
type Response struct {
rest.ResponseWrapper
}
// @Success 200 {object} Response
// @Router /api/{id} [get]
func Test(){
}
`
restsrc := `
package rest
type ResponseWrapper struct {
Status string
Code int
Messages []string
Result interface{}
}
`
expected := `{
"api.Response": {
"type": "object",
"properties": {
"code": {
"type": "integer"
},
"messages": {
"type": "array",
"items": {
"type": "string"
}
},
"result": {
"type": "object"
},
"status": {
"type": "string"
}
}
}
}`
parser := New()
parser.ParseDependency = true
f, err := goparser.ParseFile(token.NewFileSet(), "", src, goparser.ParseComments)
assert.NoError(t, err)
parser.packages.CollectAstFile("api", "api/api.go", f)
f2, err := goparser.ParseFile(token.NewFileSet(), "", restsrc, goparser.ParseComments)
assert.NoError(t, err)
parser.packages.CollectAstFile("rest", "rest/rest.go", f2)
_, err = parser.packages.ParseTypes()
assert.NoError(t, err)
err = parser.ParseRouterAPIInfo("", f)
assert.NoError(t, err)
out, err := json.MarshalIndent(parser.swagger.Definitions, "", " ")
assert.NoError(t, err)
assert.Equal(t, expected, string(out))
}
func TestParser_ParseStructPointerMembers(t *testing.T) {
src := `
package api
type Child struct {
Name string
}
type Parent struct {
Test1 *string //test1
Test2 *Child //test2
}
// @Success 200 {object} Parent
// @Router /api/{id} [get]
func Test(){
}
`
expected := `{
"api.Child": {
"type": "object",
"properties": {
"name": {
"type": "string"
}
}
},
"api.Parent": {
"type": "object",
"properties": {
"test1": {
"description": "test1",
"type": "string"
},
"test2": {
"description": "test2",
"$ref": "#/definitions/api.Child"
}
}
}
}`
f, err := goparser.ParseFile(token.NewFileSet(), "", src, goparser.ParseComments)
assert.NoError(t, err)
p := New()
p.packages.CollectAstFile("api", "api/api.go", f)
_, err = p.packages.ParseTypes()
assert.NoError(t, err)
err = p.ParseRouterAPIInfo("", f)
assert.NoError(t, err)
out, err := json.MarshalIndent(p.swagger.Definitions, "", " ")
assert.NoError(t, err)
assert.Equal(t, expected, string(out))
}
func TestParser_ParseStructMapMember(t *testing.T) {
src := `
package api
type MyMapType map[string]string
type Child struct {
Name string
}
type Parent struct {
Test1 map[string]interface{} //test1
Test2 map[string]string //test2
Test3 map[string]*string //test3
Test4 map[string]Child //test4
Test5 map[string]*Child //test5
Test6 MyMapType //test6
Test7 []Child //test7
Test8 []*Child //test8
Test9 []map[string]string //test9
}
// @Success 200 {object} Parent
// @Router /api/{id} [get]
func Test(){
}
`
expected := `{
"api.Child": {
"type": "object",
"properties": {
"name": {
"type": "string"
}
}
},
"api.MyMapType": {
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"api.Parent": {
"type": "object",
"properties": {
"test1": {
"description": "test1",
"type": "object",
"additionalProperties": true
},
"test2": {
"description": "test2",
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"test3": {
"description": "test3",
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"test4": {
"description": "test4",
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/api.Child"
}
},
"test5": {
"description": "test5",
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/api.Child"
}
},
"test6": {
"description": "test6",
"$ref": "#/definitions/api.MyMapType"
},
"test7": {
"description": "test7",
"type": "array",
"items": {
"$ref": "#/definitions/api.Child"
}
},
"test8": {
"description": "test8",
"type": "array",
"items": {
"$ref": "#/definitions/api.Child"
}
},
"test9": {
"description": "test9",
"type": "array",
"items": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}
}
}`
f, err := goparser.ParseFile(token.NewFileSet(), "", src, goparser.ParseComments)
assert.NoError(t, err)
p := New()
p.packages.CollectAstFile("api", "api/api.go", f)
_, err = p.packages.ParseTypes()
assert.NoError(t, err)
err = p.ParseRouterAPIInfo("", f)
assert.NoError(t, err)
out, err := json.MarshalIndent(p.swagger.Definitions, "", " ")
assert.NoError(t, err)
assert.Equal(t, expected, string(out))
}
func TestParser_ParseRouterApiInfoErr(t *testing.T) {
src := `
package test
// @Accept unknown
func Test(){
}
`
f, err := goparser.ParseFile(token.NewFileSet(), "", src, goparser.ParseComments)
assert.NoError(t, err)
p := New()
err = p.ParseRouterAPIInfo("", f)
assert.EqualError(t, err, "ParseComment error in file :unknown accept type can't be accepted")
}
func TestParser_ParseRouterApiGet(t *testing.T) {
src := `
package test
// @Router /api/{id} [get]
func Test(){
}
`
f, err := goparser.ParseFile(token.NewFileSet(), "", src, goparser.ParseComments)
assert.NoError(t, err)
p := New()
err = p.ParseRouterAPIInfo("", f)
assert.NoError(t, err)
ps := p.swagger.Paths.Paths
val, ok := ps["/api/{id}"]
assert.True(t, ok)
assert.NotNil(t, val.Get)
}
func TestParser_ParseRouterApiPOST(t *testing.T) {
src := `
package test
// @Router /api/{id} [post]
func Test(){
}
`
f, err := goparser.ParseFile(token.NewFileSet(), "", src, goparser.ParseComments)
assert.NoError(t, err)
p := New()
err = p.ParseRouterAPIInfo("", f)
assert.NoError(t, err)
ps := p.swagger.Paths.Paths
val, ok := ps["/api/{id}"]
assert.True(t, ok)
assert.NotNil(t, val.Post)
}
func TestParser_ParseRouterApiDELETE(t *testing.T) {
src := `
package test
// @Router /api/{id} [delete]
func Test(){
}
`
f, err := goparser.ParseFile(token.NewFileSet(), "", src, goparser.ParseComments)
assert.NoError(t, err)
p := New()
err = p.ParseRouterAPIInfo("", f)
assert.NoError(t, err)
ps := p.swagger.Paths.Paths
val, ok := ps["/api/{id}"]
assert.True(t, ok)
assert.NotNil(t, val.Delete)
}
func TestParser_ParseRouterApiPUT(t *testing.T) {
src := `
package test
// @Router /api/{id} [put]
func Test(){
}
`
f, err := goparser.ParseFile(token.NewFileSet(), "", src, goparser.ParseComments)
assert.NoError(t, err)
p := New()
err = p.ParseRouterAPIInfo("", f)
assert.NoError(t, err)
ps := p.swagger.Paths.Paths
val, ok := ps["/api/{id}"]
assert.True(t, ok)
assert.NotNil(t, val.Put)
}
func TestParser_ParseRouterApiPATCH(t *testing.T) {
src := `
package test
// @Router /api/{id} [patch]
func Test(){
}
`
f, err := goparser.ParseFile(token.NewFileSet(), "", src, goparser.ParseComments)
assert.NoError(t, err)
p := New()
err = p.ParseRouterAPIInfo("", f)
assert.NoError(t, err)
ps := p.swagger.Paths.Paths
val, ok := ps["/api/{id}"]
assert.True(t, ok)
assert.NotNil(t, val.Patch)
}
func TestParser_ParseRouterApiHead(t *testing.T) {
src := `
package test
// @Router /api/{id} [head]
func Test(){
}
`
f, err := goparser.ParseFile(token.NewFileSet(), "", src, goparser.ParseComments)
assert.NoError(t, err)
p := New()
err = p.ParseRouterAPIInfo("", f)
assert.NoError(t, err)
ps := p.swagger.Paths.Paths
val, ok := ps["/api/{id}"]
assert.True(t, ok)
assert.NotNil(t, val.Head)
}
func TestParser_ParseRouterApiOptions(t *testing.T) {
src := `
package test
// @Router /api/{id} [options]
func Test(){
}
`
f, err := goparser.ParseFile(token.NewFileSet(), "", src, goparser.ParseComments)
assert.NoError(t, err)
p := New()
err = p.ParseRouterAPIInfo("", f)
assert.NoError(t, err)
ps := p.swagger.Paths.Paths
val, ok := ps["/api/{id}"]
assert.True(t, ok)
assert.NotNil(t, val.Options)
}
func TestParser_ParseRouterApiMultipleRoutesForSameFunction(t *testing.T) {
src := `
package test
// @Router /api/v1/{id} [get]
// @Router /api/v2/{id} [post]
func Test(){
}
`
f, err := goparser.ParseFile(token.NewFileSet(), "", src, goparser.ParseComments)
assert.NoError(t, err)
p := New()
err = p.ParseRouterAPIInfo("", f)
assert.NoError(t, err)
ps := p.swagger.Paths.Paths
val, ok := ps["/api/v1/{id}"]
assert.True(t, ok)
assert.NotNil(t, val.Get)
val, ok = ps["/api/v2/{id}"]
assert.True(t, ok)
assert.NotNil(t, val.Post)
}
func TestParser_ParseRouterApiMultiple(t *testing.T) {
src := `
package test
// @Router /api/{id} [get]
func Test1(){
}
// @Router /api/{id} [patch]
func Test2(){
}
// @Router /api/{id} [delete]
func Test3(){
}
`
f, err := goparser.ParseFile(token.NewFileSet(), "", src, goparser.ParseComments)
assert.NoError(t, err)
p := New()
err = p.ParseRouterAPIInfo("", f)
assert.NoError(t, err)
ps := p.swagger.Paths.Paths
val, ok := ps["/api/{id}"]
assert.True(t, ok)
assert.NotNil(t, val.Get)
assert.NotNil(t, val.Patch)
assert.NotNil(t, val.Delete)
}
// func TestParseDeterministic(t *testing.T) {
// mainAPIFile := "main.go"
// for _, searchDir := range []string{
// "testdata/simple",
// "testdata/model_not_under_root/cmd",
// } {
// t.Run(searchDir, func(t *testing.T) {
// var expected string
// // run the same code 100 times and check that the output is the same every time
// for i := 0; i < 100; i++ {
// p := New()
// p.PropNamingStrategy = PascalCase
// err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
// b, _ := json.MarshalIndent(p.swagger, "", " ")
// assert.NotEqual(t, "", string(b))
// if expected == "" {
// expected = string(b)
// }
// assert.Equal(t, expected, string(b))
// }
// })
// }
// }
func TestApiParseTag(t *testing.T) {
searchDir := "testdata/tags"
p := New(SetMarkdownFileDirectory(searchDir))
p.PropNamingStrategy = PascalCase
err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
assert.NoError(t, err)
if len(p.swagger.Tags) != 3 {
t.Error("Number of tags did not match")
}
dogs := p.swagger.Tags[0]
if dogs.TagProps.Name != "dogs" || dogs.TagProps.Description != "Dogs are cool" {
t.Error("Failed to parse dogs name or description")
}
cats := p.swagger.Tags[1]
if cats.TagProps.Name != "cats" || cats.TagProps.Description != "Cats are the devil" {
t.Error("Failed to parse cats name or description")
}
if cats.TagProps.ExternalDocs.URL != "https://google.de" || cats.TagProps.ExternalDocs.Description != "google is super useful to find out that cats are evil!" {
t.Error("URL: ", cats.TagProps.ExternalDocs.URL)
t.Error("Description: ", cats.TagProps.ExternalDocs.Description)
t.Error("Failed to parse cats external documentation")
}
}
func TestApiParseTag_NonExistendTag(t *testing.T) {
searchDir := "testdata/tags_nonexistend_tag"
p := New(SetMarkdownFileDirectory(searchDir))
p.PropNamingStrategy = PascalCase
err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
assert.Error(t, err)
}
func TestParseTagMarkdownDescription(t *testing.T) {
searchDir := "testdata/tags"
p := New(SetMarkdownFileDirectory(searchDir))
p.PropNamingStrategy = PascalCase
err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
if err != nil {
t.Error("Failed to parse api description: " + err.Error())
}
if len(p.swagger.Tags) != 3 {
t.Error("Number of tags did not match")
}
apes := p.swagger.Tags[2]
if apes.TagProps.Description == "" {
t.Error("Failed to parse tag description markdown file")
}
}
func TestParseApiMarkdownDescription(t *testing.T) {
searchDir := "testdata/tags"
p := New(SetMarkdownFileDirectory(searchDir))
p.PropNamingStrategy = PascalCase
err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
if err != nil {
t.Error("Failed to parse api description: " + err.Error())
}
if p.swagger.Info.Description == "" {
t.Error("Failed to parse api description: " + err.Error())
}
}
func TestIgnoreInvalidPkg(t *testing.T) {
searchDir := "testdata/deps_having_invalid_pkg"
p := New()
if err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth); err != nil {
t.Error("Failed to ignore valid pkg: " + err.Error())
}
}
func TestFixes432(t *testing.T) {
searchDir := "testdata/fixes-432"
mainAPIFile := "cmd/main.go"
p := New()
if err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth); err != nil {
t.Error("Failed to ignore valid pkg: " + err.Error())
}
}
func TestParseOutsideDependencies(t *testing.T) {
searchDir := "testdata/pare_outside_dependencies"
mainAPIFile := "cmd/main.go"
p := New()
p.ParseDependency = true
if err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth); err != nil {
t.Error("Failed to parse api: " + err.Error())
}
}
func TestParseStructParamCommentByQueryType(t *testing.T) {
src := `
package main
type Student struct {
Name string
Age int
Teachers []string
SkipField map[string]string
}
// @Param request query Student true "query params"
// @Success 200
// @Router /test [get]
func Fun() {
}
`
expected := `{
"info": {
"contact": {}
},
"paths": {
"/test": {
"get": {
"parameters": [
{
"type": "integer",
"name": "age",
"in": "query"
},
{
"type": "string",
"name": "name",
"in": "query"
},
{
"type": "array",
"items": {
"type": "string"
},
"name": "teachers",
"in": "query"
}
],
"responses": {
"200": {
"description": ""
}
}
}
}
}
}`
f, err := goparser.ParseFile(token.NewFileSet(), "", src, goparser.ParseComments)
assert.NoError(t, err)
p := New()
p.packages.CollectAstFile("api", "api/api.go", f)
_, err = p.packages.ParseTypes()
assert.NoError(t, err)
err = p.ParseRouterAPIInfo("", f)
assert.NoError(t, err)
b, _ := json.MarshalIndent(p.swagger, "", " ")
assert.Equal(t, expected, string(b))
}
func TestParseRenamedStructDefinition(t *testing.T) {
src := `
package main
type Child struct {
Name string
}//@name Student
type Parent struct {
Name string
Child Child
}//@name Teacher
// @Param request body Parent true "query params"
// @Success 200 {object} Parent
// @Router /test [get]
func Fun() {
}
`
f, err := goparser.ParseFile(token.NewFileSet(), "", src, goparser.ParseComments)
assert.NoError(t, err)
p := New()
p.packages.CollectAstFile("api", "api/api.go", f)
_, err = p.packages.ParseTypes()
assert.NoError(t, err)
err = p.ParseRouterAPIInfo("", f)
assert.NoError(t, err)
assert.NoError(t, err)
teacher, ok := p.swagger.Definitions["Teacher"]
assert.True(t, ok)
ref := teacher.Properties["child"].SchemaProps.Ref
assert.Equal(t, "#/definitions/Student", ref.String())
_, ok = p.swagger.Definitions["Student"]
assert.True(t, ok)
path, ok := p.swagger.Paths.Paths["/test"]
assert.True(t, ok)
assert.Equal(t, "#/definitions/Teacher", path.Get.Parameters[0].Schema.Ref.String())
ref = path.Get.Responses.ResponsesProps.StatusCodeResponses[200].ResponseProps.Schema.Ref
assert.Equal(t, "#/definitions/Teacher", ref.String())
}
func TestParseJSONFieldString(t *testing.T) {
expected := `{
"swagger": "2.0",
"info": {
"description": "This is a sample server.",
"title": "Swagger Example API",
"contact": {},
"version": "1.0"
},
"host": "localhost:4000",
"basePath": "/",
"paths": {
"/do-something": {
"post": {
"description": "Does something",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"summary": "Call DoSomething",
"parameters": [
{
"description": "My Struct",
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/main.MyStruct"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/main.MyStruct"
}
},
"500": {
"description": ""
}
}
}
}
},
"definitions": {
"main.MyStruct": {
"type": "object",
"properties": {
"boolvar": {
"description": "boolean as a string",
"type": "string",
"example": "false"
},
"floatvar": {
"description": "float as a string",
"type": "string",
"example": "0"
},
"id": {
"type": "integer",
"format": "int64",
"example": 1
},
"myint": {
"description": "integer as string",
"type": "string",
"example": "0"
},
"name": {
"type": "string",
"example": "poti"
},
"truebool": {
"description": "boolean as a string",
"type": "string",
"example": "true"
}
}
}
}
}`
searchDir := "testdata/json_field_string"
p := New()
err := p.ParseAPI(searchDir, mainAPIFile, defaultParseDepth)
assert.NoError(t, err)
b, _ := json.MarshalIndent(p.swagger, "", " ")
assert.Equal(t, expected, string(b))
}
func TestParseSwaggerignoreForEmbedded(t *testing.T) {
src := `
package main
type Child struct {
ChildName string
}//@name Student
type Parent struct {
Name string
Child ` + "`swaggerignore:\"true\"`" + `
}//@name Teacher
// @Param request body Parent true "query params"
// @Success 200 {object} Parent
// @Router /test [get]
func Fun() {
}
`
f, err := goparser.ParseFile(token.NewFileSet(), "", src, goparser.ParseComments)
assert.NoError(t, err)
p := New()
p.packages.CollectAstFile("api", "api/api.go", f)
p.packages.ParseTypes()
err = p.ParseRouterAPIInfo("", f)
assert.NoError(t, err)
assert.NoError(t, err)
teacher, ok := p.swagger.Definitions["Teacher"]
assert.True(t, ok)
name, ok := teacher.Properties["name"]
assert.True(t, ok)
assert.Len(t, name.Type, 1)
assert.Equal(t, "string", name.Type[0])
childName, ok := teacher.Properties["childName"]
assert.False(t, ok)
assert.Empty(t, childName)
}
func TestDefineTypeOfExample(t *testing.T) {
var example interface{}
var err error
example, err = defineTypeOfExample("string", "", "example")
assert.NoError(t, err)
assert.Equal(t, example.(string), "example")
example, err = defineTypeOfExample("number", "", "12.34")
assert.NoError(t, err)
assert.Equal(t, example.(float64), 12.34)
example, err = defineTypeOfExample("boolean", "", "true")
assert.NoError(t, err)
assert.Equal(t, example.(bool), true)
example, err = defineTypeOfExample("array", "", "one,two,three")
assert.Error(t, err)
assert.Nil(t, example)
example, err = defineTypeOfExample("array", "string", "one,two,three")
assert.NoError(t, err)
arr := []string{}
for _, v := range example.([]interface{}) {
arr = append(arr, v.(string))
}
assert.Equal(t, arr, []string{"one", "two", "three"})
example, err = defineTypeOfExample("object", "", "key_one:one,key_two:two,key_three:three")
assert.Error(t, err)
assert.Nil(t, example)
example, err = defineTypeOfExample("object", "string", "key_one,key_two,key_three")
assert.Error(t, err)
assert.Nil(t, example)
example, err = defineTypeOfExample("object", "oops", "key_one:one,key_two:two,key_three:three")
assert.Error(t, err)
assert.Nil(t, example)
example, err = defineTypeOfExample("object", "string", "key_one:one,key_two:two,key_three:three")
assert.NoError(t, err)
obj := map[string]string{}
for k, v := range example.(map[string]interface{}) {
obj[k] = v.(string)
}
assert.Equal(t, obj, map[string]string{"key_one": "one", "key_two": "two", "key_three": "three"})
example, err = defineTypeOfExample("oops", "", "")
assert.Error(t, err)
assert.Nil(t, example)
}
type mockFS struct {
os.FileInfo
FileName string
IsDirectory bool
}
func (fs *mockFS) Name() string {
return fs.FileName
}
func (fs *mockFS) IsDir() bool {
return fs.IsDirectory
}
func TestParser_Skip(t *testing.T) {
parser := New()
parser.ParseVendor = true
assert.NoError(t, parser.Skip("", &mockFS{FileName: "vendor"}))
assert.NoError(t, parser.Skip("", &mockFS{FileName: "vendor", IsDirectory: true}))
parser.ParseVendor = false
assert.NoError(t, parser.Skip("", &mockFS{FileName: "vendor"}))
assert.Error(t, parser.Skip("", &mockFS{FileName: "vendor", IsDirectory: true}))
assert.NoError(t, parser.Skip("", &mockFS{FileName: "models", IsDirectory: true}))
assert.NoError(t, parser.Skip("", &mockFS{FileName: "admin", IsDirectory: true}))
assert.NoError(t, parser.Skip("", &mockFS{FileName: "release", IsDirectory: true}))
parser = New(SetExcludedDirsAndFiles("admin/release,admin/models"))
assert.NoError(t, parser.Skip("admin", &mockFS{IsDirectory: true}))
assert.NoError(t, parser.Skip("admin/service", &mockFS{IsDirectory: true}))
assert.Error(t, parser.Skip("admin/models", &mockFS{IsDirectory: true}))
assert.Error(t, parser.Skip("admin/release", &mockFS{IsDirectory: true}))
}
| [
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\""
] | [] | [
"GOPATH"
] | [] | ["GOPATH"] | go | 1 | 0 | |
deployments/cloudfoundry/buildpack-windows/src/supply/supply.go | package main
import (
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"time"
)
// Please don't change the formatting of this line - it's automatically updated by SetAllVersions.cs
const LatestVersion = "0.1.16"
func getAgentVersion() string {
version, ok := os.LookupEnv("SIGNALFX_DOTNET_TRACING_VERSION")
if !ok {
return LatestVersion
} else {
return version
}
}
func getAgentArchiveName() string {
return "signalfx-dotnet-tracing-" + getAgentVersion() + "-x64.msi"
}
func getAgentDownloadUrl() string {
return "http://github.com/signalfx/signalfx-dotnet-tracing/releases/download/v" + getAgentVersion() + "/" + getAgentArchiveName()
}
func log(message string) {
os.Stdout.WriteString("-----> " + message + "\n")
}
func downloadAgentMsi(cacheDir string) (string, error) {
agentMsiPath := filepath.Join(cacheDir, getAgentArchiveName())
if _, err := os.Stat(agentMsiPath); os.IsNotExist(err) {
log("Agent msi file " + agentMsiPath + " does not exist, downloading ...")
httpClient := &http.Client{
Timeout: 5 * time.Minute,
}
rs, err := httpClient.Get(getAgentDownloadUrl())
if err != nil {
return "", err
}
defer rs.Body.Close()
log(rs.Status)
out, err := os.Create(agentMsiPath)
if err != nil {
return "", err
}
defer out.Close()
_, err = io.Copy(out, rs.Body)
if err != nil {
return "", err
}
}
return agentMsiPath, nil
}
func unpackAgentMsi(depsDir string, depsIdx string, agentMsiPath string) error {
libDir := filepath.Join(depsDir, depsIdx, "lib")
if _, err := os.Stat(libDir); os.IsNotExist(err) {
err := os.MkdirAll(libDir, 0755)
if err != nil {
return err
}
}
log("Unpacking the tracing library from the msi file ...")
cmd := exec.Command("msiexec", "/a", agentMsiPath, "/qn", "TARGETDIR="+libDir)
stdout, err := cmd.Output()
log("msiexec output:\n" + string(stdout))
if err != nil {
return err
}
return nil
}
func prepareEnvVars(depsIdx string) map[string]string {
signalFxLibPath := filepath.Join("c:\\Users\\vcap\\deps", depsIdx, "lib", "SignalFx", ".NET Tracing")
log("Preparing environment variables ...")
envVars := make(map[string]string)
// .NET Core profiler
envVars["CORECLR_ENABLE_PROFILING"] = "1"
envVars["CORECLR_PROFILER"] = "{B4C89B0F-9908-4F73-9F59-0D77C5A06874}"
envVars["CORECLR_PROFILER_PATH"] = filepath.Join(signalFxLibPath, "SignalFx.Tracing.ClrProfiler.Native.dll")
// .NET Framework profiler
envVars["COR_ENABLE_PROFILING"] = "1"
envVars["COR_PROFILER"] = "{B4C89B0F-9908-4F73-9F59-0D77C5A06874}"
envVars["COR_PROFILER_PATH"] = filepath.Join(signalFxLibPath, "SignalFx.Tracing.ClrProfiler.Native.dll")
// SignalFx tracing config
envVars["SIGNALFX_INTEGRATIONS"] = filepath.Join(signalFxLibPath, "integrations.json")
envVars["SIGNALFX_DOTNET_TRACER_HOME"] = signalFxLibPath
envVars["SIGNALFX_TRACE_LOG_PATH"] = "c:\\Users\\vcap\\logs\\signalfx-dotnet-profiler.log"
return envVars
}
func writeEnvVars(envVars map[string]string, out *os.File) error {
for key, value := range envVars {
if _, err := out.WriteString("set " + key + "=" + value + "\r\n"); err != nil {
return err
}
}
return nil
}
func writeProfileDScript(depsDir string, depsIdx string, envVars map[string]string) error {
profileD := filepath.Join(depsDir, depsIdx, "profile.d")
if _, err := os.Stat(profileD); os.IsNotExist(err) {
err := os.MkdirAll(profileD, 0755)
if err != nil {
return err
}
}
script := filepath.Join(profileD, "configureSfxDotnetTracing.bat")
log("Writing profile.d start script " + script + " ...")
out, err := os.Create(script)
if err != nil {
return err
}
defer out.Close()
if _, err := out.WriteString("@echo off\r\n"); err != nil {
return err
}
if err := writeEnvVars(envVars, out); err != nil {
return err
}
return nil
}
func writeHwcRunScript(buildDir string, envVars map[string]string) error {
script := filepath.Join(buildDir, "configureAndRun.bat")
log("HWC: Writing configureAndRun.bat " + script + " ...")
scriptOut, err := os.Create(script)
if err != nil {
return err
}
defer scriptOut.Close()
if _, err := scriptOut.WriteString("@echo off\r\n"); err != nil {
return err
}
if err := writeEnvVars(envVars, scriptOut); err != nil {
return err
}
if _, err := scriptOut.WriteString(".cloudfoundry\\hwc.exe\r\n"); err != nil {
return err
}
procfile := filepath.Join(buildDir, "Procfile")
log("HWC: Writing Procfile " + procfile + " ...")
procfileOut, err := os.Create(procfile)
if err != nil {
return err
}
defer procfileOut.Close()
if _, err := procfileOut.WriteString("web: configureAndRun.bat"); err != nil {
return err
}
return nil
}
// Needs to be present because of https://docs.cloudfoundry.org/buildpacks/custom.html#contract
func writeConfigYml(depsDir string, depsIdx string) error {
configYml := filepath.Join(depsDir, depsIdx, "config.yml")
log("Writing config.yml " + configYml + " ...")
out, err := os.Create(configYml)
if err != nil {
return err
}
defer out.Close()
if _, err := out.WriteString("---\nname: signalfx-dotnet-tracing\nconfig: {}"); err != nil {
return err
}
return nil
}
func main() {
if len(os.Args) < 5 {
log("ERROR: this script must be provided at least 4 args: BUILD_DIR, CACHE_DIR, DEPS_DIR, DEPS_IDX")
os.Exit(1)
}
log("SignalFx Tracing Library for .NET Buildpack")
var (
buildDir = os.Args[1]
cacheDir = os.Args[2]
depsDir = os.Args[3]
depsIdx = os.Args[4]
)
// 1. download agent msi
agentMsiPath, err := downloadAgentMsi(cacheDir)
if err != nil {
log("Cannot download agent MSI file: " + err.Error())
os.Exit(1)
}
// 2. unpack msi
if err := unpackAgentMsi(depsDir, depsIdx, agentMsiPath); err != nil {
log("Cannot unpack agent MSI file: " + err.Error())
os.Exit(1)
}
// 3. compute env vars
envVars := prepareEnvVars(depsIdx)
// 4. write hwc run script if hwc is used
if hwc := os.Getenv("SIGNALFX_USE_HWC"); hwc == "true" {
if err := writeHwcRunScript(buildDir, envVars); err != nil {
log("Cannot write HWC scripts: " + err.Error())
os.Exit(1)
}
} else {
// 5. write profile.d script otherwise
if err := writeProfileDScript(depsDir, depsIdx, envVars); err != nil {
log("Cannot write profile.d script: " + err.Error())
os.Exit(1)
}
}
// 6. write empty config.yml
if err := writeConfigYml(depsDir, depsIdx); err != nil {
log("Cannot config.yml: " + err.Error())
os.Exit(1)
}
}
| [
"\"SIGNALFX_USE_HWC\""
] | [] | [
"SIGNALFX_USE_HWC"
] | [] | ["SIGNALFX_USE_HWC"] | go | 1 | 0 | |
main.go | package main
// A CNI IPAM plugin that takes /proc/cmdline and the environment variables and
// outputs the CNI configuration required for the external IP address for the
// pod in question. IPAM plugins send and receive JSON on stdout and stdin,
// respectively, and are passed arguments and configuration information via
// environment variables and the aforementioned JSON.
import (
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"os"
"regexp"
"strconv"
"strings"
"github.com/containernetworking/cni/pkg/types"
cni "github.com/containernetworking/cni/pkg/types/040"
"github.com/m-lab/go/rtx"
)
// This value determines the output schema, and 0.3.1 is compatible with the
// schema defined in CniConfig.
const cniVersion = "0.3.1"
// Configuration objects to hold the CNI config that must be marshalled into Stdout
// IPConfig holds the IP configuration. The elements are strings to support v4 or v6.
type IPConfig struct {
Version IPaf `json:"version"`
Address string `json:"address"`
Gateway string `json:"gateway"`
}
// RouteConfig holds the subnets for which an interface should receive packets.
type RouteConfig struct {
Destination string `json:"dst"`
Gateway string `json:"gw"`
}
// DNSConfig holds a list of IP addresses for nameservers.
type DNSConfig struct {
Nameservers []string `json:"nameservers"`
}
// CniResult holds a complete CNI result, including the protocol version.
type CniResult struct {
CniVersion string `json:"cniVersion"`
IPs []*IPConfig `json:"ips,omitempty"`
Routes []*RouteConfig `json:"routes,omitempty"`
DNS *DNSConfig `json:"dns,omitempty"`
}
type JSONInput struct {
CNIVersion string `json:"cniVersion"`
Ipam struct {
Index int64 `json:"index"`
} `json:"ipam"`
}
// IPaf represents the IP address family.
type IPaf string
const (
v4 IPaf = "4"
v6 IPaf = "6"
)
var (
CNIConfig JSONInput
// ErrNoIPv6 is returned when we attempt to configure IPv6 on a system which has no v6 address.
ErrNoIPv6 = errors.New("IPv6 is not supported or configured")
)
// MakeGenericIPConfig makes IPConfig and DNSConfig objects out of the epoxy command line.
func MakeGenericIPConfig(procCmdline string, version IPaf) (*IPConfig, *RouteConfig, *DNSConfig, error) {
var destination string
switch version {
case v4:
destination = "0.0.0.0/0"
case v6:
destination = "::/0"
default:
return nil, nil, nil, errors.New("IP version can only be v4 or v6")
}
// Example substring: epoxy.ipv4=4.14.159.112/26,4.14.159.65,8.8.8.8,8.8.4.4
ipargsRe := regexp.MustCompile("epoxy.ipv" + string(version) + "=([^ ]+)")
matches := ipargsRe.FindStringSubmatch(procCmdline)
if len(matches) < 2 {
if version == v6 {
return nil, nil, nil, ErrNoIPv6
}
return nil, nil, nil, fmt.Errorf("Could not find epoxy.ip" + string(version) + " args")
}
// Example substring: 4.14.159.112/26,4.14.159.65,8.8.8.8,8.8.4.4
config := strings.Split(matches[1], ",")
if len(config) != 4 {
return nil, nil, nil, errors.New("Could not split up " + matches[1] + " into 4 parts")
}
Route := &RouteConfig{
Destination: destination,
Gateway: config[1],
}
IP := &IPConfig{
Version: version,
Address: config[0],
Gateway: config[1],
}
DNS := &DNSConfig{Nameservers: []string{config[2], config[3]}}
return IP, Route, DNS, nil
}
// MakeIPConfig makes the initial config from /proc/cmdline without incrementing up to the index.
func MakeIPConfig(procCmdline string) (*CniResult, error) {
config := &CniResult{CniVersion: cniVersion}
ipv4, route4, dnsv4, err := MakeGenericIPConfig(procCmdline, v4)
if err != nil {
// v4 config is required. Return an error if it is not present.
return nil, err
}
config.IPs = append(config.IPs, ipv4)
config.Routes = append(config.Routes, route4)
config.DNS = dnsv4
ipv6, route6, dnsv6, err := MakeGenericIPConfig(procCmdline, v6)
switch err {
case nil:
// v6 config is optional. Only set it up if the error is nil.
config.IPs = append(config.IPs, ipv6)
config.Routes = append(config.Routes, route6)
config.DNS.Nameservers = append(config.DNS.Nameservers, dnsv6.Nameservers...)
case ErrNoIPv6:
// Do nothing, but also don't return an error
default:
return nil, err
}
return config, nil
}
// Base10AdditionInBase16 implements a subtle addition operation that aids
// M-Lab operations staff in visually aligning IPv6 and IPv4 data.
//
// In an effort to keep with an established M-Lab deployment pattern, we
// ensure that the IPv4 last octet (printed in base 10) and the IPv6 last
// grouping (printed in base 16) match up visually.
//
// Some examples:
// IPv4 address 1.0.0.9 + index 12 -> 1.0.0.21
// IPv6 address 1f::9 + index 12 -> 1f::21
// IPv4 address 1.0.0.201 + index 12 -> 1.0.0.213
// IPv6 address 1f::201 + index 12 -> 1f::213
//
// Note that in the first example, the IPv6 address, when printed in decimal,
// is actually 33 (0x21). M-Lab already assigns IPv6 subnets in matching
// base-10 configurations, so this should work for our use case.
//
// The second example is a nice illustration of why this process has to involve
// the last two bytes of the IPv6 address, because 0x213 > 0xFF. It is for this
// reason that the function returns two bytes.
func Base10AdditionInBase16(octets []byte, index int64) ([]byte, error) {
if len(octets) != 2 {
return []byte{0, 0}, fmt.Errorf("passed-in slice %v was not of length 2", octets)
}
base16Number := 256*int64(octets[0]) + int64(octets[1])
base16String := strconv.FormatInt(base16Number, 16)
base10Number, err := strconv.ParseInt(base16String, 10, 64)
if err != nil {
return []byte{0, 0}, err
}
base10Number += index
base10String := strconv.FormatInt(base10Number, 10)
// All base 10 strings are valid base 16 numbers, so parse errors are
// impossible here in one sense, but it is also true that the number could (in
// the case of some edge-case strange inputs) be outside of the range of int16.
base16Result, err := strconv.ParseInt(base10String, 16, 16)
if err != nil {
return []byte{0, 0}, err
}
return []byte{byte(base16Result / 256), byte(base16Result % 256)}, nil
}
// AddIndexToIP updates a single IP in light of the discovered index.
func AddIndexToIP(config *IPConfig, index int64) error {
switch config.Version {
case v4:
// Add the index to the IPv4 address.
var a, b, c, d, subnet int64
_, err := fmt.Sscanf(config.Address, "%d.%d.%d.%d/%d", &a, &b, &c, &d, &subnet)
if err != nil {
return errors.New("Could not parse IPv4 address: " + config.Address)
}
if d+index > 255 || index <= 0 {
return errors.New("ihdex out of range for address")
}
config.Address = fmt.Sprintf("%d.%d.%d.%d/%d", a, b, c, d+index, subnet)
case v6:
// Add the index to the IPv6 address.
addrSubnet := strings.Split(config.Address, "/")
if len(addrSubnet) != 2 {
return fmt.Errorf("could not parse IPv6 IP/subnet %v", config.Address)
}
ipv6 := net.ParseIP(addrSubnet[0])
if ipv6 == nil {
return fmt.Errorf("cloud not parse IPv6 address %v", addrSubnet[0])
}
// Ensure that the byte array is 16 bytes. According to the "net" API docs,
// the byte array length and the IP address family are purposely decoupled. To
// ensure a 16 byte array as the underlying storage (which is what we need) we
// call To16() which has the job of ensuring 16 bytes of storage backing.
ipv6 = ipv6.To16()
lastoctets, err := Base10AdditionInBase16(ipv6[14:16], index)
if err != nil {
return err
}
ipv6[14] = lastoctets[0]
ipv6[15] = lastoctets[1]
config.Address = ipv6.String() + "/" + addrSubnet[1]
default:
return errors.New("unknown IP version")
}
return nil
}
// AddIndexToIPs updates the config in light of the discovered index.
func AddIndexToIPs(config *CniResult, index int64) error {
for _, ip := range config.IPs {
if err := AddIndexToIP(ip, index); err != nil {
return err
}
}
return nil
}
// MustReadProcCmdline reads /proc/cmdline or (if present) the environment
// variable PROC_CMDLINE_FOR_TESTING. The PROC_CMDLINE_FOR_TESTING environment
// variable should only be used for unit testing, and should not be used in
// production. No guarantee of future compatibility is made or implied if you
// use PROC_CMDLINE_FOR_TESTING for anything other than unit testing. If the
// environment variable and the file /proc/cmdline are both unreadable, call
// log.Fatal and exit.
func MustReadProcCmdline() string {
if text, isPresent := os.LookupEnv("PROC_CMDLINE_FOR_TESTING"); isPresent {
return text
}
procCmdline, err := ioutil.ReadFile("/proc/cmdline")
rtx.Must(err, "Could not read /proc/cmdline")
return string(procCmdline)
}
// ReadJSONInput unmarshals JSON input from stdin into a global variable.
func ReadJSONInput(r io.Reader) error {
dec := json.NewDecoder(r)
err := dec.Decode(&CNIConfig)
return err
}
// Cmd represents the possible CNI operations for an IPAM plugin.
type Cmd int
// The CNI operations we know about.
const (
AddCmd = iota
DelCmd
VersionCmd
CheckCmd
UnknownCmd
)
// ParseCmd returns the corresponding Cmd for a string.
func ParseCmd(cmd string) Cmd {
cmd = strings.ToLower(cmd)
switch cmd {
case "add":
return AddCmd
case "del":
return DelCmd
case "version":
return VersionCmd
case "check":
return CheckCmd
}
return UnknownCmd
}
// Add responds to the ADD command.
func Add() error {
err := ReadJSONInput(os.Stdin)
rtx.Must(err, "Could not unmarshall JSON from stdin")
procCmdline := MustReadProcCmdline()
config, err := MakeIPConfig(procCmdline)
rtx.Must(err, "Could not populate the IP configuration")
rtx.Must(AddIndexToIPs(config, CNIConfig.Ipam.Index), "Could not manipulate the IP")
data, err := json.Marshal(config)
rtx.Must(err, "failed to marshall CNI output to JSON")
result, err := cni.NewResult(data)
rtx.Must(err, "failed to create new result type from JSON")
return types.PrintResult(result, CNIConfig.CNIVersion)
}
// Version responds to the VERSION command.
func Version() {
fmt.Fprintf(os.Stdout, `{
"cniVersion": "0.3.1",
"supportedVersions": [ "0.2.0", "0.3.0", "0.3.1", "0.4.0" ]
}`)
}
// Put it all together.
func main() {
cmd := os.Getenv("CNI_COMMAND")
switch ParseCmd(cmd) {
case AddCmd:
Add()
case VersionCmd:
Version()
case DelCmd, CheckCmd:
// For DEL and CHECK we affirmatively and successfully do nothing.
default:
// To preserve old behavior: when in doubt, Add()
log.Printf("Unknown CNI_COMMAND value %q. Treating it like ADD.\n", cmd)
Add()
}
}
| [
"\"CNI_COMMAND\""
] | [] | [
"CNI_COMMAND"
] | [] | ["CNI_COMMAND"] | go | 1 | 0 | |
app/app/settings.py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p8*2wv9%c_@90h#(8-z91t5%)%+-j4fek31=g12=24gca2k9#u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| [] | [] | [
"DB_PASS",
"DB_USER",
"DB_NAME",
"DB_HOST"
] | [] | ["DB_PASS", "DB_USER", "DB_NAME", "DB_HOST"] | python | 4 | 0 | |
nsd1904/devops/day01/mtping3.py | import subprocess
import threading
class Ping:
def __init__(self, host):
self.host = host
def __call__(self):
result = subprocess.run(
'ping -c2 %s &> /dev/null' % self.host,
shell=True
)
if result.returncode == 0:
print('%s:up' % self.host)
else:
print('%s:down' % self.host)
if __name__ == '__main__':
ips = ('172.40.59.%s' % i for i in range(1, 255))
for ip in ips:
t = threading.Thread(target=Ping(ip)) # 创建实例
t.start() # target() # 调用实例
| [] | [] | [] | [] | [] | python | null | null | null |
fiaas_deploy_daemon/pipeline/consumer.py | #!/usr/bin/env python
# -*- coding: utf-8
# Copyright 2017-2019 The FIAAS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import logging
import os
import time
from kafka import KafkaConsumer
from monotonic import monotonic as time_monotonic
from prometheus_client import Counter
from requests import HTTPError
from yaml import YAMLError
from fiaas_deploy_daemon.log_extras import set_extras
from ..base_thread import DaemonThread
from ..deployer import DeployerEvent
from ..specs.factory import InvalidConfiguration
ALLOW_WITHOUT_MESSAGES_S = int(os.getenv('ALLOW_WITHOUT_MESSAGES_MIN', 30)) * 60
DEFAULT_NAMESPACE = u"default"
class Consumer(DaemonThread):
"""Listen for pipeline events and generate AppSpecs for the deployer
Requires the following environment variables:
- KAFKA_PIPELINE_SERVICE_HOST: Comma separated list of kafka brokers
- KAFKA_PIPELINE_SERVICE_PORT: Port kafka listens on
"""
def __init__(self, deploy_queue, config, reporter, spec_factory, app_config_downloader, lifecycle):
super(Consumer, self).__init__()
self._logger = logging.getLogger(__name__)
self._deploy_queue = deploy_queue
self._consumer = None
self._config = config
self._environment = config.environment
self._reporter = reporter
self._spec_factory = spec_factory
self._app_config_downloader = app_config_downloader
self._lifecycle = lifecycle
self._last_message_timestamp = int(time_monotonic())
def __call__(self):
if self._consumer is None:
self._connect_kafka()
message_counter = Counter("pipeline_message_received", "A message from pipeline received")
deploy_counter = Counter("pipeline_deploy_triggered", "A message caused a deploy to be triggered")
for message in self._consumer:
self._handle_message(deploy_counter, message, message_counter)
def _handle_message(self, deploy_counter, message, message_counter):
message_counter.inc()
self._last_message_timestamp = int(time_monotonic())
event = self._deserialize(message)
self._logger.debug("Got event: %r", event)
if event[u"environment"] == self._environment:
app_name = event[u"project_name"]
try:
lifecycle_subject = self._lifecycle.initiate(app_name=app_name,
namespace=DEFAULT_NAMESPACE,
deployment_id=self._deployment_id(event))
except (NoDockerArtifactException, NoFiaasArtifactException):
self._logger.debug("Ignoring event %r with missing artifacts", event)
return
try:
app_spec = self._create_spec(event)
set_extras(app_spec)
self._check_app_acceptable(app_spec)
self._add_deployment_label(app_spec)
self._deploy_queue.put(DeployerEvent("UPDATE", app_spec, lifecycle_subject))
self._reporter.register(app_spec, event[u"callback_url"])
deploy_counter.inc()
except YAMLError:
self._logger.exception("Failure when parsing FIAAS-config for application %s", app_name)
self._lifecycle.failed(lifecycle_subject)
except InvalidConfiguration:
self._logger.exception("Invalid configuration for application %s", app_name)
self._lifecycle.failed(lifecycle_subject)
except HTTPError:
self._logger.exception("Failure when downloading FIAAS-config for application %s", app_name)
self._lifecycle.failed(lifecycle_subject)
except (NotWhiteListedApplicationException, BlackListedApplicationException) as e:
self._logger.warn("App %s not deployed. %s", app_name, str(e))
def _check_app_acceptable(self, app_spec):
if self._config.whitelist and app_spec.name not in self._config.whitelist:
raise NotWhiteListedApplicationException(
"{} is not a in whitelist for this cluster".format(app_spec.name))
if self._config.blacklist and app_spec.name in self._config.blacklist:
raise BlackListedApplicationException(
"{} is banned from this cluster".format(app_spec.name))
def _connect_kafka(self):
self._consumer = KafkaConsumer(
"internal.pipeline.deployment",
bootstrap_servers=self._build_connect_string("kafka_pipeline")
)
def _create_spec(self, event):
artifacts = self._artifacts(event)
name = event[u"project_name"]
image = artifacts[u"docker"]
deployment_id = self._deployment_id(event)
fiaas_url = artifacts[u"fiaas"]
teams = event[u"teams"]
tags = event[u"tags"]
set_extras(app_name=name, namespace=DEFAULT_NAMESPACE, deployment_id=deployment_id)
app_config = self._app_config_downloader.get(fiaas_url)
return self._spec_factory(name, image, app_config, teams, tags, deployment_id,
DEFAULT_NAMESPACE, None, None)
def _artifacts(self, event):
artifacts = event[u"artifacts_by_type"]
if u"docker" not in artifacts:
raise NoDockerArtifactException()
if u"fiaas" not in artifacts:
raise NoFiaasArtifactException()
return artifacts
def _deployment_id(self, event):
artifacts = self._artifacts(event)
image = artifacts[u"docker"]
deployment_id = image.split(":")[-1][:63].lower()
return deployment_id
def _build_connect_string(self, service):
host, port = self._config.resolve_service(service)
connect = ",".join("{}:{}".format(host, port) for host in host.split(","))
return connect
def is_alive(self):
return super(Consumer, self).is_alive() and self._is_receiving_messages()
def _is_receiving_messages(self):
# TODO: this is a hack to handle the fact that we sometimes seem to loose contact with kafka
self._logger.debug("No message for %r seconds", int(time_monotonic()) - self._last_message_timestamp)
return self._last_message_timestamp > int(time_monotonic()) - ALLOW_WITHOUT_MESSAGES_S
@staticmethod
def _deserialize(message):
return json.loads(message.value)
@staticmethod
def _add_deployment_label(app_spec):
app_spec.labels.deployment["fiaas/app_deployed_at"] = str(int(round(time.time())))
class NoDockerArtifactException(Exception):
pass
class NoFiaasArtifactException(Exception):
pass
class NotWhiteListedApplicationException(Exception):
pass
class BlackListedApplicationException(Exception):
pass
| [] | [] | [
"ALLOW_WITHOUT_MESSAGES_MIN"
] | [] | ["ALLOW_WITHOUT_MESSAGES_MIN"] | python | 1 | 0 | |
pkg/executor/newdeploy/newdeploymgr.go | /*
Copyright 2016 The Fission Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package newdeploy
import (
"context"
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/dchest/uniuri"
"github.com/fission/fission/pkg/throttler"
"github.com/fission/fission/pkg/utils"
multierror "github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"go.uber.org/zap"
apiv1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
k8sErrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
k8sTypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
k8sCache "k8s.io/client-go/tools/cache"
fv1 "github.com/fission/fission/pkg/apis/fission.io/v1"
"github.com/fission/fission/pkg/crd"
"github.com/fission/fission/pkg/executor/fscache"
fetcherConfig "github.com/fission/fission/pkg/fetcher/config"
"github.com/fission/fission/pkg/types"
)
type (
NewDeploy struct {
logger *zap.Logger
kubernetesClient *kubernetes.Clientset
fissionClient *crd.FissionClient
crdClient *rest.RESTClient
instanceID string
fetcherConfig *fetcherConfig.Config
runtimeImagePullPolicy apiv1.PullPolicy
namespace string
useIstio bool
collectorEndpoint string
fsCache *fscache.FunctionServiceCache // cache funcSvc's by function, address and pod name
throttler *throttler.Throttler
funcStore k8sCache.Store
funcController k8sCache.Controller
envStore k8sCache.Store
envController k8sCache.Controller
idlePodReapTime time.Duration
}
)
func MakeNewDeploy(
logger *zap.Logger,
fissionClient *crd.FissionClient,
kubernetesClient *kubernetes.Clientset,
crdClient *rest.RESTClient,
namespace string,
fetcherConfig *fetcherConfig.Config,
instanceID string,
) *NewDeploy {
enableIstio := false
if len(os.Getenv("ENABLE_ISTIO")) > 0 {
istio, err := strconv.ParseBool(os.Getenv("ENABLE_ISTIO"))
if err != nil {
logger.Error("failed to parse 'ENABLE_ISTIO', set to false", zap.Error(err))
}
enableIstio = istio
}
nd := &NewDeploy{
logger: logger.Named("new_deploy"),
fissionClient: fissionClient,
kubernetesClient: kubernetesClient,
crdClient: crdClient,
instanceID: instanceID,
namespace: namespace,
fsCache: fscache.MakeFunctionServiceCache(logger),
throttler: throttler.MakeThrottler(1 * time.Minute),
fetcherConfig: fetcherConfig,
runtimeImagePullPolicy: utils.GetImagePullPolicy(os.Getenv("RUNTIME_IMAGE_PULL_POLICY")),
useIstio: enableIstio,
idlePodReapTime: 2 * time.Minute,
}
if nd.crdClient != nil {
fnStore, fnController := nd.initFuncController()
nd.funcStore = fnStore
nd.funcController = fnController
envStore, envController := nd.initEnvController()
nd.envStore = envStore
nd.envController = envController
}
return nd
}
func (deploy *NewDeploy) Run(ctx context.Context) {
//go deploy.service()
go deploy.funcController.Run(ctx.Done())
go deploy.envController.Run(ctx.Done())
go deploy.idleObjectReaper()
}
func (deploy *NewDeploy) initFuncController() (k8sCache.Store, k8sCache.Controller) {
resyncPeriod := 30 * time.Second
listWatch := k8sCache.NewListWatchFromClient(deploy.crdClient, "functions", metav1.NamespaceAll, fields.Everything())
store, controller := k8sCache.NewInformer(listWatch, &fv1.Function{}, resyncPeriod, k8sCache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
fn := obj.(*fv1.Function)
_, err := deploy.createFunction(fn, true)
if err != nil {
deploy.logger.Error("error eager creating function",
zap.Error(err),
zap.Any("function", fn))
}
},
DeleteFunc: func(obj interface{}) {
fn := obj.(*fv1.Function)
err := deploy.deleteFunction(fn)
if err != nil {
deploy.logger.Error("error deleting function",
zap.Error(err),
zap.Any("function", fn))
}
},
UpdateFunc: func(oldObj interface{}, newObj interface{}) {
oldFn := oldObj.(*fv1.Function)
newFn := newObj.(*fv1.Function)
err := deploy.updateFunction(oldFn, newFn)
if err != nil {
deploy.logger.Error("error updating function",
zap.Error(err),
zap.Any("old_function", oldFn),
zap.Any("new_function", newFn))
}
},
})
return store, controller
}
func (deploy *NewDeploy) initEnvController() (k8sCache.Store, k8sCache.Controller) {
resyncPeriod := 30 * time.Second
listWatch := k8sCache.NewListWatchFromClient(deploy.crdClient, "environments", metav1.NamespaceAll, fields.Everything())
store, controller := k8sCache.NewInformer(listWatch, &fv1.Environment{}, resyncPeriod, k8sCache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {},
DeleteFunc: func(obj interface{}) {},
UpdateFunc: func(oldObj interface{}, newObj interface{}) {
newEnv := newObj.(*fv1.Environment)
oldEnv := oldObj.(*fv1.Environment)
// Currently only an image update in environment calls for function's deployment recreation. In future there might be more attributes which would want to do it
if oldEnv.Spec.Runtime.Image != newEnv.Spec.Runtime.Image {
deploy.logger.Debug("Updating all function of the environment that changed, old env:", zap.Any("environment", oldEnv))
funcs := deploy.getEnvFunctions(&newEnv.Metadata)
for _, f := range funcs {
function, err := deploy.fissionClient.Functions(f.Metadata.Namespace).Get(f.Metadata.Name)
if err != nil {
deploy.logger.Error("Error getting function", zap.Error(err), zap.Any("function", function))
}
err = deploy.updateFuncDeployment(function, newEnv)
if err != nil {
deploy.logger.Error("Error updating function", zap.Error(err), zap.Any("function", function))
}
}
}
},
})
return store, controller
}
func (deploy *NewDeploy) getEnvFunctions(m *metav1.ObjectMeta) []fv1.Function {
funcList, err := deploy.fissionClient.Functions(m.Namespace).List(metav1.ListOptions{})
if err != nil {
deploy.logger.Error("Error getting functions for env", zap.Error(err), zap.Any("environment", m))
}
relatedFunctions := make([]fv1.Function, 0)
for _, f := range funcList.Items {
if (f.Spec.Environment.Name == m.Name) && (f.Spec.Environment.Namespace == m.Namespace) {
relatedFunctions = append(relatedFunctions, f)
}
}
return relatedFunctions
}
func (deploy *NewDeploy) GetFuncSvc(ctx context.Context, metadata *metav1.ObjectMeta) (*fscache.FuncSvc, error) {
fn, err := deploy.fissionClient.Functions(metadata.Namespace).Get(metadata.Name)
if err != nil {
return nil, err
}
return deploy.createFunction(fn, false)
}
// RefreshFuncPods deleted pods related to the function so that new pods are replenished
func (deploy *NewDeploy) RefreshFuncPods(logger *zap.Logger, f fv1.Function) error {
env, err := deploy.fissionClient.Environments(f.Spec.Environment.Namespace).Get(f.Spec.Environment.Name)
if err != nil {
return err
}
funcLabels := deploy.getDeployLabels(f.Metadata, metav1.ObjectMeta{
Name: f.Spec.Environment.Name,
Namespace: f.Spec.Environment.Namespace,
UID: env.Metadata.UID,
})
dep, err := deploy.kubernetesClient.ExtensionsV1beta1().Deployments(metav1.NamespaceAll).List(metav1.ListOptions{
LabelSelector: labels.Set(funcLabels).AsSelector().String(),
})
if err != nil {
return err
}
patch := fmt.Sprintf(`{"spec" : {"template": {"spec":{"containers":[{"name": "%s", "env":[{"name": "%s", "value": "%s"}]}]}}}}`,
f.Metadata.Name,
fv1.LastUpdateTimestamp,
time.Now().String())
// Ideally there should be only one deployment but for now we rely on label/selector to ensure that condition
for _, deployment := range dep.Items {
_, err := deploy.kubernetesClient.ExtensionsV1beta1().Deployments(deployment.ObjectMeta.Namespace).Patch(deployment.ObjectMeta.Name,
k8sTypes.StrategicMergePatchType,
[]byte(patch))
if err != nil {
return err
}
}
return nil
}
func (deploy *NewDeploy) createFunction(fn *fv1.Function, firstcreate bool) (*fscache.FuncSvc, error) {
if fn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType != fv1.ExecutorTypeNewdeploy {
return nil, nil
}
fsvcObj, err := deploy.throttler.RunOnce(string(fn.Metadata.UID), func(ableToCreate bool) (interface{}, error) {
if ableToCreate {
return deploy.fnCreate(fn, firstcreate)
}
return deploy.fsCache.GetByFunctionUID(fn.Metadata.UID)
})
if err != nil {
e := "error updating service address entry for function"
deploy.logger.Error(e,
zap.Error(err),
zap.String("function_name", fn.Metadata.Name),
zap.String("function_namespace", fn.Metadata.Namespace))
return nil, errors.Wrapf(err, "%s %s_%s", e, fn.Metadata.Name, fn.Metadata.Namespace)
}
fsvc, ok := fsvcObj.(*fscache.FuncSvc)
if !ok {
deploy.logger.Panic("receive unknown object while creating function - expected pointer of function service object")
}
return fsvc, err
}
func (deploy *NewDeploy) deleteFunction(fn *fv1.Function) error {
if fn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType != fv1.ExecutorTypeNewdeploy {
return nil
}
err := deploy.fnDelete(fn)
if err != nil {
err = errors.Wrapf(err, "error deleting kubernetes objects of function %v", fn.Metadata)
}
return err
}
func (deploy *NewDeploy) fnCreate(fn *fv1.Function, firstcreate bool) (*fscache.FuncSvc, error) {
env, err := deploy.fissionClient.
Environments(fn.Spec.Environment.Namespace).
Get(fn.Spec.Environment.Name)
if err != nil {
return nil, err
}
objName := deploy.getObjName(fn)
if !firstcreate {
// retrieve back the previous obj name for later use.
fsvc, err := deploy.fsCache.GetByFunctionUID(fn.Metadata.UID)
if err == nil {
objName = fsvc.Name
}
}
deployLabels := deploy.getDeployLabels(fn.Metadata, env.Metadata)
// to support backward compatibility, if the function was created in default ns, we fall back to creating the
// deployment of the function in fission-function ns
ns := deploy.namespace
if fn.Metadata.Namespace != metav1.NamespaceDefault {
ns = fn.Metadata.Namespace
}
// Envoy(istio-proxy) returns 404 directly before istio pilot
// propagates latest Envoy-specific configuration.
// Since newdeploy waits for pods of deployment to be ready,
// change the order of kubeObject creation (create service first,
// then deployment) to take advantage of waiting time.
svc, err := deploy.createOrGetSvc(deployLabels, objName, ns)
if err != nil {
deploy.logger.Error("error creating service", zap.Error(err), zap.String("service", objName))
go deploy.cleanupNewdeploy(ns, objName)
return nil, errors.Wrapf(err, "error creating service %v", objName)
}
svcAddress := fmt.Sprintf("%v.%v", svc.Name, svc.Namespace)
depl, err := deploy.createOrGetDeployment(fn, env, objName, deployLabels, ns, firstcreate)
if err != nil {
deploy.logger.Error("error creating deployment", zap.Error(err), zap.String("deployment", objName))
go deploy.cleanupNewdeploy(ns, objName)
return nil, errors.Wrapf(err, "error creating deployment %v", objName)
}
hpa, err := deploy.createOrGetHpa(objName, &fn.Spec.InvokeStrategy.ExecutionStrategy, depl)
if err != nil {
deploy.logger.Error("error creating HPA", zap.Error(err), zap.String("hpa", objName))
go deploy.cleanupNewdeploy(ns, objName)
return nil, errors.Wrapf(err, "error creating the HPA %v", objName)
}
kubeObjRefs := []apiv1.ObjectReference{
{
//obj.TypeMeta.Kind does not work hence this, needs investigation and a fix
Kind: "deployment",
Name: depl.ObjectMeta.Name,
APIVersion: depl.TypeMeta.APIVersion,
Namespace: depl.ObjectMeta.Namespace,
ResourceVersion: depl.ObjectMeta.ResourceVersion,
UID: depl.ObjectMeta.UID,
},
{
Kind: "service",
Name: svc.ObjectMeta.Name,
APIVersion: svc.TypeMeta.APIVersion,
Namespace: svc.ObjectMeta.Namespace,
ResourceVersion: svc.ObjectMeta.ResourceVersion,
UID: svc.ObjectMeta.UID,
},
{
Kind: "horizontalpodautoscaler",
Name: hpa.ObjectMeta.Name,
APIVersion: hpa.TypeMeta.APIVersion,
Namespace: hpa.ObjectMeta.Namespace,
ResourceVersion: hpa.ObjectMeta.ResourceVersion,
UID: hpa.ObjectMeta.UID,
},
}
fsvc := &fscache.FuncSvc{
Name: objName,
Function: &fn.Metadata,
Environment: env,
Address: svcAddress,
KubernetesObjects: kubeObjRefs,
Executor: fscache.NEWDEPLOY,
}
_, err = deploy.fsCache.Add(*fsvc)
if err != nil {
deploy.logger.Error("error adding function to cache", zap.Error(err), zap.Any("function", fsvc.Function))
return fsvc, err
}
return fsvc, nil
}
func (deploy *NewDeploy) updateFunction(oldFn *fv1.Function, newFn *fv1.Function) error {
if oldFn.Metadata.ResourceVersion == newFn.Metadata.ResourceVersion {
return nil
}
// Ignoring updates to functions which are not of NewDeployment type
if newFn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType != fv1.ExecutorTypeNewdeploy &&
oldFn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType != fv1.ExecutorTypeNewdeploy {
return nil
}
// Executor type is no longer New Deployment
if newFn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType != fv1.ExecutorTypeNewdeploy &&
oldFn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType == fv1.ExecutorTypeNewdeploy {
deploy.logger.Info("function does not use new deployment executor anymore, deleting resources",
zap.Any("function", newFn))
// IMP - pass the oldFn, as the new/modified function is not in cache
return deploy.deleteFunction(oldFn)
}
// Executor type changed to New Deployment from something else
if oldFn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType != fv1.ExecutorTypeNewdeploy &&
newFn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType == fv1.ExecutorTypeNewdeploy {
deploy.logger.Info("function type changed to new deployment, creating resources",
zap.Any("old_function", oldFn.Metadata),
zap.Any("new_function", newFn.Metadata))
_, err := deploy.createFunction(newFn, true)
if err != nil {
deploy.updateStatus(oldFn, err, "error changing the function's type to newdeploy")
}
return err
}
deployChanged := false
if oldFn.Spec.InvokeStrategy != newFn.Spec.InvokeStrategy {
// to support backward compatibility, if the function was created in default ns, we fall back to creating the
// deployment of the function in fission-function ns, so cleaning up resources there
ns := deploy.namespace
if newFn.Metadata.Namespace != metav1.NamespaceDefault {
ns = newFn.Metadata.Namespace
}
fsvc, err := deploy.fsCache.GetByFunctionUID(newFn.Metadata.UID)
if err != nil {
err = errors.Wrapf(err, "error updating function due to unable to find function service cache: %v", oldFn)
return err
}
hpa, err := deploy.getHpa(ns, fsvc.Name)
if err != nil {
deploy.updateStatus(oldFn, err, "error getting HPA while updating function")
return err
}
hpaChanged := false
if newFn.Spec.InvokeStrategy.ExecutionStrategy.MinScale != oldFn.Spec.InvokeStrategy.ExecutionStrategy.MinScale {
replicas := int32(newFn.Spec.InvokeStrategy.ExecutionStrategy.MinScale)
hpa.Spec.MinReplicas = &replicas
hpaChanged = true
}
if newFn.Spec.InvokeStrategy.ExecutionStrategy.MaxScale != oldFn.Spec.InvokeStrategy.ExecutionStrategy.MaxScale {
hpa.Spec.MaxReplicas = int32(newFn.Spec.InvokeStrategy.ExecutionStrategy.MaxScale)
hpaChanged = true
}
if newFn.Spec.InvokeStrategy.ExecutionStrategy.TargetCPUPercent != oldFn.Spec.InvokeStrategy.ExecutionStrategy.TargetCPUPercent {
targetCpupercent := int32(newFn.Spec.InvokeStrategy.ExecutionStrategy.TargetCPUPercent)
hpa.Spec.TargetCPUUtilizationPercentage = &targetCpupercent
hpaChanged = true
}
if hpaChanged {
err := deploy.updateHpa(hpa)
if err != nil {
deploy.updateStatus(oldFn, err, "error updating HPA while updating function")
return err
}
}
}
if oldFn.Spec.Environment != newFn.Spec.Environment ||
oldFn.Spec.Package.PackageRef != newFn.Spec.Package.PackageRef ||
oldFn.Spec.Package.FunctionName != newFn.Spec.Package.FunctionName {
deployChanged = true
}
// If length of slice has changed then no need to check individual elements
if len(oldFn.Spec.Secrets) != len(newFn.Spec.Secrets) {
deployChanged = true
} else {
for i, newSecret := range newFn.Spec.Secrets {
if newSecret != oldFn.Spec.Secrets[i] {
deployChanged = true
break
}
}
}
if len(oldFn.Spec.ConfigMaps) != len(newFn.Spec.ConfigMaps) {
deployChanged = true
} else {
for i, newConfig := range newFn.Spec.ConfigMaps {
if newConfig != oldFn.Spec.ConfigMaps[i] {
deployChanged = true
break
}
}
}
if deployChanged == true {
env, err := deploy.fissionClient.Environments(newFn.Spec.Environment.Namespace).
Get(newFn.Spec.Environment.Name)
if err != nil {
deploy.updateStatus(oldFn, err, "failed to get environment while updating function")
return err
}
return deploy.updateFuncDeployment(newFn, env)
}
return nil
}
func (deploy *NewDeploy) updateFuncDeployment(fn *fv1.Function, env *fv1.Environment) error {
fsvc, err := deploy.fsCache.GetByFunctionUID(fn.Metadata.UID)
if err != nil {
err = errors.Wrapf(err, "error updating function due to unable to find function service cache: %v", fn)
return err
}
fnObjName := fsvc.Name
deployLabels := deploy.getDeployLabels(fn.Metadata, env.Metadata)
deploy.logger.Info("updating deployment due to function/environment update",
zap.String("deployment", fnObjName), zap.Any("function", fn.Metadata.Name))
newDeployment, err := deploy.getDeploymentSpec(fn, env, fnObjName, deployLabels)
if err != nil {
deploy.updateStatus(fn, err, "failed to get new deployment spec while updating function")
return err
}
// to support backward compatibility, if the function was created in default ns, we fall back to creating the
// deployment of the function in fission-function ns
ns := deploy.namespace
if fn.Metadata.Namespace != metav1.NamespaceDefault {
ns = fn.Metadata.Namespace
}
err = deploy.updateDeployment(newDeployment, ns)
if err != nil {
deploy.updateStatus(fn, err, "failed to update deployment while updating function")
return err
}
return nil
}
func (deploy *NewDeploy) fnDelete(fn *fv1.Function) error {
var multierr *multierror.Error
// GetByFunction uses resource version as part of cache key, however,
// the resource version in function metadata will be changed when a function
// is deleted and cause newdeploy backend fails to delete the entry.
// Use GetByFunctionUID instead of GetByFunction here to find correct
// fsvc entry.
fsvc, err := deploy.fsCache.GetByFunctionUID(fn.Metadata.UID)
if err != nil {
err = errors.Wrap(err, fmt.Sprintf("fsvc not found in cache: %v", fn.Metadata))
return err
}
objName := fsvc.Name
_, err = deploy.fsCache.DeleteOld(fsvc, time.Second*0)
if err != nil {
multierr = multierror.Append(multierr,
errors.Wrap(err, fmt.Sprintf("error deleting the function from cache")))
}
// to support backward compatibility, if the function was created in default ns, we fall back to creating the
// deployment of the function in fission-function ns, so cleaning up resources there
ns := deploy.namespace
if fn.Metadata.Namespace != metav1.NamespaceDefault {
ns = fn.Metadata.Namespace
}
err = deploy.cleanupNewdeploy(ns, objName)
multierr = multierror.Append(multierr, err)
return multierr.ErrorOrNil()
}
// getObjName returns a unique name for kubernetes objects of function
func (deploy *NewDeploy) getObjName(fn *fv1.Function) string {
return strings.ToLower(fmt.Sprintf("newdeploy-%v-%v-%v", fn.Metadata.Name, fn.Metadata.Namespace, uniuri.NewLen(8)))
}
func (deploy *NewDeploy) getDeployLabels(fnMeta metav1.ObjectMeta, envMeta metav1.ObjectMeta) map[string]string {
return map[string]string{
types.EXECUTOR_INSTANCEID_LABEL: deploy.instanceID,
types.EXECUTOR_TYPE: fv1.ExecutorTypeNewdeploy,
types.ENVIRONMENT_NAME: envMeta.Name,
types.ENVIRONMENT_NAMESPACE: envMeta.Namespace,
types.ENVIRONMENT_UID: string(envMeta.UID),
types.FUNCTION_NAME: fnMeta.Name,
types.FUNCTION_NAMESPACE: fnMeta.Namespace,
types.FUNCTION_UID: string(fnMeta.UID),
}
}
// updateKubeObjRefRV update the resource version of kubeObjectRef with
// given kind and return error if failed to find the reference.
func (deploy *NewDeploy) updateKubeObjRefRV(fsvc *fscache.FuncSvc, objKind string, rv string) error {
kubeObjs := fsvc.KubernetesObjects
for i, obj := range kubeObjs {
if obj.Kind == objKind {
kubeObjs[i].ResourceVersion = rv
return nil
}
}
fsvc.KubernetesObjects = kubeObjs
return fmt.Errorf("error finding kubernetes object reference with kind: %v", objKind)
}
// updateStatus is a function which updates status of update.
// Current implementation only logs messages, in future it will update function status
func (deploy *NewDeploy) updateStatus(fn *fv1.Function, err error, message string) {
deploy.logger.Error("function status update", zap.Error(err), zap.Any("function", fn), zap.String("message", message))
}
// IsValid does a get on the service address to ensure it's a valid service, then
// scale deployment to 1 replica if there are no available replicas for function.
// Return true if no error occurs, return false otherwise.
func (deploy *NewDeploy) IsValid(fsvc *fscache.FuncSvc) bool {
service := strings.Split(fsvc.Address, ".")
if len(service) == 0 {
return false
}
_, err := deploy.kubernetesClient.CoreV1().Services(service[1]).Get(service[0], metav1.GetOptions{})
if err != nil {
deploy.logger.Error("error validating function service address", zap.String("function", fsvc.Function.Name), zap.Error(err))
return false
}
deployObj := getDeploymentObj(fsvc.KubernetesObjects)
if deployObj == nil {
deploy.logger.Error("deployment obj for function does not exist", zap.String("function", fsvc.Function.Name))
return false
}
currentDeploy, err := deploy.kubernetesClient.ExtensionsV1beta1().
Deployments(deployObj.Namespace).Get(deployObj.Name, metav1.GetOptions{})
if err != nil {
deploy.logger.Error("error validating function deployment", zap.Error(err), zap.String("function", fsvc.Function.Name))
return false
}
// return directly when available replicas > 0
if currentDeploy.Status.AvailableReplicas > 0 {
return true
}
return false
}
// idleObjectReaper reaps objects after certain idle time
func (deploy *NewDeploy) idleObjectReaper() {
pollSleep := time.Duration(deploy.idlePodReapTime)
for {
time.Sleep(pollSleep)
envs, err := deploy.fissionClient.Environments(metav1.NamespaceAll).List(metav1.ListOptions{})
if err != nil {
deploy.logger.Fatal("failed to get environment list", zap.Error(err))
}
envList := make(map[k8sTypes.UID]struct{})
for _, env := range envs.Items {
envList[env.Metadata.UID] = struct{}{}
}
funcSvcs, err := deploy.fsCache.ListOld(deploy.idlePodReapTime)
if err != nil {
deploy.logger.Error("error reaping idle pods", zap.Error(err))
continue
}
for _, fsvc := range funcSvcs {
if fsvc.Executor != fscache.NEWDEPLOY {
continue
}
// For function with the environment that no longer exists, executor
// scales down the deployment as usual and prints log to notify user.
if _, ok := envList[fsvc.Environment.Metadata.UID]; !ok {
deploy.logger.Error("function environment no longer exists",
zap.String("environment", fsvc.Environment.Metadata.Name),
zap.String("function", fsvc.Name))
}
fn, err := deploy.fissionClient.Functions(fsvc.Function.Namespace).Get(fsvc.Function.Name)
if err != nil {
// Newdeploy manager handles the function delete event and clean cache/kubeobjs itself,
// so we ignore the not found error for functions with newdeploy executor type here.
if k8sErrs.IsNotFound(err) && fsvc.Executor == fscache.NEWDEPLOY {
continue
}
deploy.logger.Error("error getting function", zap.Error(err), zap.String("function", fsvc.Function.Name))
continue
}
deployObj := getDeploymentObj(fsvc.KubernetesObjects)
if deployObj == nil {
deploy.logger.Error("error finding function deployment", zap.Error(err), zap.String("function", fsvc.Function.Name))
continue
}
currentDeploy, err := deploy.kubernetesClient.ExtensionsV1beta1().
Deployments(deployObj.Namespace).Get(deployObj.Name, metav1.GetOptions{})
if err != nil {
deploy.logger.Error("error validating function deployment", zap.Error(err), zap.String("function", fsvc.Function.Name))
continue
}
minScale := int32(fn.Spec.InvokeStrategy.ExecutionStrategy.MinScale)
// do nothing if the current replicas is already lower than minScale
if *currentDeploy.Spec.Replicas <= minScale {
continue
}
err = deploy.scaleDeployment(deployObj.Namespace, deployObj.Name, minScale)
if err != nil {
deploy.logger.Error("error scaling down function deployment", zap.Error(err), zap.String("function", fsvc.Function.Name))
}
}
}
}
func getDeploymentObj(kubeobjs []apiv1.ObjectReference) *apiv1.ObjectReference {
for _, kubeobj := range kubeobjs {
switch strings.ToLower(kubeobj.Kind) {
case "deployment":
return &kubeobj
}
}
return nil
}
func (deploy *NewDeploy) scaleDeployment(deplNS string, deplName string, replicas int32) error {
deploy.logger.Info("scaling deployment",
zap.String("deployment", deplName),
zap.String("namespace", deplNS),
zap.Int32("replicas", replicas))
_, err := deploy.kubernetesClient.ExtensionsV1beta1().Deployments(deplNS).UpdateScale(deplName, &v1beta1.Scale{
ObjectMeta: metav1.ObjectMeta{
Name: deplName,
Namespace: deplNS,
},
Spec: v1beta1.ScaleSpec{
Replicas: replicas,
},
})
return err
}
| [
"\"ENABLE_ISTIO\"",
"\"ENABLE_ISTIO\"",
"\"RUNTIME_IMAGE_PULL_POLICY\""
] | [] | [
"ENABLE_ISTIO",
"RUNTIME_IMAGE_PULL_POLICY"
] | [] | ["ENABLE_ISTIO", "RUNTIME_IMAGE_PULL_POLICY"] | go | 2 | 0 | |
salt/modules/dockerio.py | # -*- coding: utf-8 -*-
'''
Management of Docker Containers
.. versionadded:: 2014.1.0
.. deprecated:: 2015.8.0
Future feature development will be done only in :mod:`dockerng
<salt.modules.dockerng>`. See the documentation for this module for
information on the deprecation path.
.. note::
The DockerIO integration is still in beta; the API is subject to change
General Notes
-------------
As we use states, we don't want to be continuously popping dockers, so we
will map each container id (or image) with a grain whenever it is relevant.
As a corollary, we will resolve a container id either directly by the id
or try to find a container id matching something stocked in grain.
Installation Prerequisites
--------------------------
- You will need the ``docker-py`` python package in your python installation
path that is running salt. Its version should support `Docker Remote API
v1.12 <http://docs.docker.io/en/latest/reference/api/docker_remote_api_v1.12>`_.
Currently, ``docker-py 0.5.0`` is known to support `Docker Remote API v1.12
<http://docs.docker.io/en/latest/reference/api/docker_remote_api_v1.12>`_
.. code-block:: bash
pip install docker-py==0.5.0
Prerequisite Pillar Configuration for Authentication
----------------------------------------------------
- To push or pull you will need to be authenticated as the ``docker-py`` bindings
require it
- For this to happen, you will need to configure a mapping in the pillar
representing your per URL authentication bits:
.. code-block:: yaml
docker-registries:
registry_url:
email: [email protected]
password: s3cr3t
username: foo
- You need at least an entry to the default docker index:
.. code-block:: yaml
docker-registries:
https://index.docker.io/v1/:
email: [email protected]
password: s3cr3t
username: foo
- You can define multiple registry blocks for them to be aggregated. The only thing to keep
in mind is that their ID must finish with ``-docker-registries``:
.. code-block:: yaml
ac-docker-registries:
https://index.bar.io/v1/:
email: [email protected]
password: s3cr3t
username: foo
ab-docker-registries:
https://index.foo.io/v1/:
email: [email protected]
password: s3cr3t
username: foo
This could be also written as:
.. code-block:: yaml
docker-registries:
https://index.bar.io/v1/:
email: [email protected]
password: s3cr3t
username: foo
https://index.foo.io/v1/:
email: [email protected]
password: s3cr3t
username: foo
Methods
_______
- Registry Dialog
- :py:func:`login<salt.modules.dockerio.login>`
- :py:func:`push<salt.modules.dockerio.push>`
- :py:func:`pull<salt.modules.dockerio.pull>`
- Docker Management
- :py:func:`version<salt.modules.dockerio.version>`
- :py:func:`info<salt.modules.dockerio.info>`
- Image Management
- :py:func:`search<salt.modules.dockerio.search>`
- :py:func:`inspect_image<salt.modules.dockerio.inspect_image>`
- :py:func:`get_images<salt.modules.dockerio.get_images>`
- :py:func:`remove_image<salt.modules.dockerio.remove_image>`
- :py:func:`import_image<salt.modules.dockerio.import_image>`
- :py:func:`build<salt.modules.dockerio.build>`
- :py:func:`tag<salt.modules.dockerio.tag>`
- :py:func:`save<salt.modules.dockerio.save>`
- :py:func:`load<salt.modules.dockerio.load>`
- Container Management
- :py:func:`start<salt.modules.dockerio.start>`
- :py:func:`stop<salt.modules.dockerio.stop>`
- :py:func:`restart<salt.modules.dockerio.restart>`
- :py:func:`kill<salt.modules.dockerio.kill>`
- :py:func:`wait<salt.modules.dockerio.wait>`
- :py:func:`get_containers<salt.modules.dockerio.get_containers>`
- :py:func:`inspect_container<salt.modules.dockerio.inspect_container>`
- :py:func:`remove_container<salt.modules.dockerio.remove_container>`
- :py:func:`is_running<salt.modules.dockerio.is_running>`
- :py:func:`top<salt.modules.dockerio.top>`
- :py:func:`port<salt.modules.dockerio.port>`
- :py:func:`logs<salt.modules.dockerio.logs>`
- :py:func:`diff<salt.modules.dockerio.diff>`
- :py:func:`commit<salt.modules.dockerio.commit>`
- :py:func:`create_container<salt.modules.dockerio.create_container>`
- :py:func:`export<salt.modules.dockerio.export>`
- :py:func:`get_container_root<salt.modules.dockerio.get_container_root>`
Runtime Execution within a specific, already existing/running container
--------------------------------------------------------------------------
Idea is to use `lxc-attach <http://linux.die.net/man/1/lxc-attach>`_ to execute
inside the container context.
We do not want to use ``docker run`` but want to execute something inside a
running container.
These are the available methods:
- :py:func:`retcode<salt.modules.dockerio.retcode>`
- :py:func:`run<salt.modules.dockerio.run>`
- :py:func:`run_all<salt.modules.dockerio.run_all>`
- :py:func:`run_stderr<salt.modules.dockerio.run_stderr>`
- :py:func:`run_stdout<salt.modules.dockerio.run_stdout>`
- :py:func:`script<salt.modules.dockerio.script>`
- :py:func:`script_retcode<salt.modules.dockerio.script_retcode>`
'''
# Import Python Futures
from __future__ import absolute_import
__docformat__ = 'restructuredtext en'
# Import Python libs
import datetime
import json
import logging
import os
import re
import traceback
import shutil
import types
# Import Salt libs
from salt.modules import cmdmod
from salt.exceptions import CommandExecutionError, SaltInvocationError
import salt.utils
import salt.utils.odict
# Import 3rd-party libs
import salt.ext.six as six
# pylint: disable=import-error
from salt.ext.six.moves import range # pylint: disable=no-name-in-module,redefined-builtin
try:
import docker
HAS_DOCKER = True
except ImportError:
HAS_DOCKER = False
# pylint: enable=import-error
HAS_NSENTER = bool(salt.utils.which('nsenter'))
log = logging.getLogger(__name__)
INVALID_RESPONSE = 'We did not get any expected answer from docker'
VALID_RESPONSE = ''
NOTSET = object()
base_status = {
'status': None,
'id': None,
'comment': '',
'out': None
}
# Define the module's virtual name
__virtualname__ = 'docker'
def __virtual__():
'''
Only load if docker libs are present
'''
if HAS_DOCKER:
return __virtualname__
return False
def _sizeof_fmt(num):
'''
Return disk format size data
'''
for unit in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB']:
if num < 1024.0:
return '{0:3.1f} {1}'.format(num, unit)
num /= 1024.0
def _set_status(m,
id_=NOTSET,
comment=INVALID_RESPONSE,
status=False,
out=None):
'''
Assign status data to a dict
'''
m['comment'] = comment
m['status'] = status
m['out'] = out
if id_ is not NOTSET:
m['id'] = id_
return m
def _invalid(m, id_=NOTSET, comment=INVALID_RESPONSE, out=None):
'''
Return invalid status
'''
return _set_status(m, status=False, id_=id_, comment=comment, out=out)
def _valid(m, id_=NOTSET, comment=VALID_RESPONSE, out=None):
'''
Return valid status
'''
return _set_status(m, status=True, id_=id_, comment=comment, out=out)
def _get_client(version=None, timeout=None):
'''
Get a connection to a docker API (socket or URL)
based on config.get mechanism (pillar -> grains)
By default it will use the base docker-py defaults which
at the time of writing are using the local socket and
the 1.4 API
Set those keys in your configuration tree somehow:
- docker.url: URL to the docker service
- docker.version: API version to use
'''
kwargs = {}
get = __salt__['config.get']
for key, val in (('base_url', 'docker.url'),
('version', 'docker.version')):
param = get(val, NOTSET)
if param is not NOTSET:
kwargs[key] = param
if timeout is not None:
# make sure we override default timeout of docker-py
# only if defined by user.
kwargs['timeout'] = timeout
if 'base_url' not in kwargs and 'DOCKER_HOST' in os.environ:
# Check if the DOCKER_HOST environment variable has been set
kwargs['base_url'] = os.environ.get('DOCKER_HOST')
client = docker.Client(**kwargs)
if not version:
# set version that match docker daemon
client._version = client.version()['ApiVersion']
# try to authenticate the client using credentials
# found in pillars
registry_auth_config = __pillar__.get('docker-registries', {})
for key, data in six.iteritems(__pillar__):
if key.endswith('-docker-registries'):
registry_auth_config.update(data)
for registry, creds in six.iteritems(registry_auth_config):
client.login(creds['username'], password=creds['password'],
email=creds.get('email'), registry=registry)
return client
def _get_image_infos(image):
'''
Verify that the image exists
We will try to resolve either by:
- name
- image_id
- tag
image
Image Name / Image Id / Image Tag
Returns the image id
'''
status = base_status.copy()
client = _get_client()
try:
infos = client.inspect_image(image)
if infos:
_valid(status,
id_=infos['Id'],
out=infos,
comment='found')
except Exception:
pass
if not status['id']:
_invalid(status)
raise CommandExecutionError(
'ImageID {0!r} could not be resolved to '
'an existing Image'.format(image)
)
return status['out']
def _get_container_infos(container):
'''
Get container infos
We will try to resolve either by:
- the mapping grain->docker id or directly
- dockerid
container
Image Id / grain name
'''
status = base_status.copy()
client = _get_client()
try:
container_info = client.inspect_container(container)
if container_info:
_valid(status,
id_=container_info['Id'],
out=container_info)
except Exception:
pass
if not status['id']:
raise CommandExecutionError(
'Container_id {0} could not be resolved to '
'an existing container'.format(
container)
)
if 'id' not in status['out'] and 'Id' in status['out']:
status['out']['id'] = status['out']['Id']
return status['out']
def get_containers(all=True,
trunc=False,
since=None,
before=None,
limit=-1,
host=False,
inspect=False):
'''
Get a list of mappings representing all containers
all
return all containers, Default is ``True``
trunc
set it to True to have the short ID, Default is ``False``
host
include the Docker host's ipv4 and ipv6 address in return, Default is ``False``
inspect
Get more granular information about each container by running a docker inspect
CLI Example:
.. code-block:: bash
salt '*' docker.get_containers
salt '*' docker.get_containers host=True
salt '*' docker.get_containers host=True inspect=True
'''
client = _get_client()
status = base_status.copy()
if host:
status['host'] = {}
status['host']['interfaces'] = __salt__['network.interfaces']()
containers = client.containers(all=all,
trunc=trunc,
since=since,
before=before,
limit=limit)
# Optionally for each container get more granular information from them
# by inspecting the container
if inspect:
for container in containers:
container_id = container.get('Id')
if container_id:
inspect = _get_container_infos(container_id)
container['detail'] = inspect.copy()
_valid(status, comment='All containers in out', out=containers)
return status
def logs(container):
'''
Return logs for a specified container
container
container id
CLI Example:
.. code-block:: bash
salt '*' docker.logs <container id>
'''
status = base_status.copy()
client = _get_client()
try:
container_logs = client.logs(_get_container_infos(container)['Id'])
_valid(status, id_=container, out=container_logs)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc())
return status
def commit(container,
repository=None,
tag=None,
message=None,
author=None,
conf=None):
'''
Commit a container (promotes it to an image)
container
container id
repository
repository/image to commit to
tag
tag of the image (Optional)
message
commit message (Optional)
author
author name (Optional)
conf
conf (Optional)
CLI Example:
.. code-block:: bash
salt '*' docker.commit <container id>
'''
status = base_status.copy()
client = _get_client()
try:
container = _get_container_infos(container)['Id']
commit_info = client.commit(
container,
repository=repository,
tag=tag,
message=message,
author=author,
conf=conf)
found = False
for k in ('Id', 'id', 'ID'):
if k in commit_info:
found = True
image_id = commit_info[k]
if not found:
raise Exception('Invalid commit return')
image = _get_image_infos(image_id)['Id']
comment = 'Image {0} created from {1}'.format(image, container)
_valid(status, id_=image, out=commit_info, comment=comment)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc())
return status
def diff(container):
'''
Get container diffs
container
container id
CLI Example:
.. code-block:: bash
salt '*' docker.diff <container id>
'''
status = base_status.copy()
client = _get_client()
try:
container_diff = client.diff(_get_container_infos(container)['Id'])
_valid(status, id_=container, out=container_diff)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc())
return status
def export(container, path):
'''
Export a container to a file
container
container id
path
path to which file is to be exported
CLI Example:
.. code-block:: bash
salt '*' docker.export <container id>
'''
try:
ppath = os.path.abspath(path)
with salt.utils.fopen(ppath, 'w') as fic:
status = base_status.copy()
client = _get_client()
response = client.export(_get_container_infos(container)['Id'])
byte = response.read(4096)
fic.write(byte)
while byte != '':
# Do stuff with byte.
byte = response.read(4096)
fic.write(byte)
fic.flush()
_valid(status,
id_=container, out=ppath,
comment='Exported to {0}'.format(ppath))
except Exception:
_invalid(status, id_=container, out=traceback.format_exc())
return status
def create_container(image,
command=None,
hostname=None,
user=None,
detach=True,
stdin_open=False,
tty=False,
mem_limit=0,
ports=None,
environment=None,
dns=None,
volumes=None,
volumes_from=None,
name=None,
cpu_shares=None,
cpuset=None,
binds=None):
'''
Create a new container
image
image to create the container from
command
command to execute while starting
hostname
hostname of the container
user
user to run docker as
detach
daemon mode, Default is ``True``
environment
environment variable mapping ``({'foo':'BAR'})``
ports
port redirections ``({'222': {}})``
volumes
list of volume mappings in either local volume, bound volume, or read-only
bound volume form::
(['/var/lib/mysql/', '/usr/local/etc/ssl:/etc/ssl', '/etc/passwd:/etc/passwd:ro'])
binds
complete dictionary of bound volume mappings::
{ '/usr/local/etc/ssl/certs/internal.crt': {
'bind': '/etc/ssl/certs/com.example.internal.crt',
'ro': True
},
'/var/lib/mysql': {
'bind': '/var/lib/mysql/',
'ro': False
}
}
This dictionary is suitable for feeding directly into the Docker API, and all
keys are required.
(see http://docker-py.readthedocs.org/en/latest/volumes/)
tty
attach ttys, Default is ``False``
stdin_open
let stdin open, Default is ``False``
name
name given to container
cpu_shares
CPU shares (relative weight)
cpuset
CPUs in which to allow execution ('0-3' or '0,1')
CLI Example:
.. code-block:: bash
salt '*' docker.create_container o/ubuntu volumes="['/s','/m:/f']"
'''
log.trace("modules.dockerio.create_container() called for image " + image)
status = base_status.copy()
client = _get_client()
# In order to permit specification of bind volumes in the volumes field,
# we'll look through it for bind-style specs and move them. This is purely
# for CLI convenience and backwards-compatibility, as states.dockerio
# should parse volumes before this, and the binds argument duplicates this.
# N.B. this duplicates code in states.dockerio._parse_volumes()
if isinstance(volumes, list):
for volume in volumes:
if ':' in volume:
volspec = volume.split(':')
source = volspec[0]
target = volspec[1]
ro = False
try:
if len(volspec) > 2:
ro = volspec[2] == "ro"
except IndexError:
pass
binds[source] = {'bind': target, 'ro': ro}
volumes.remove(volume)
try:
if salt.utils.version_cmp(client.version()['ApiVersion'], '1.18') == 1:
container_info = client.create_container(
image=image,
command=command,
hostname=hostname,
user=user,
detach=detach,
stdin_open=stdin_open,
tty=tty,
ports=ports,
environment=environment,
dns=dns,
volumes=volumes,
volumes_from=volumes_from,
name=name,
cpu_shares=cpu_shares,
cpuset=cpuset,
host_config=docker.utils.create_host_config(binds=binds,
mem_limit=mem_limit)
)
else:
container_info = client.create_container(
image=image,
command=command,
hostname=hostname,
user=user,
detach=detach,
stdin_open=stdin_open,
tty=tty,
mem_limit=mem_limit,
ports=ports,
environment=environment,
dns=dns,
volumes=volumes,
volumes_from=volumes_from,
name=name,
cpu_shares=cpu_shares,
cpuset=cpuset,
host_config=docker.utils.create_host_config(binds=binds)
)
log.trace("docker.client.create_container returned: " + str(container_info))
container = container_info['Id']
callback = _valid
comment = 'Container created'
out = {
'info': _get_container_infos(container),
'out': container_info
}
__salt__['mine.send']('dockerng.ps', verbose=True, all=True, host=True)
return callback(status, id_=container, comment=comment, out=out)
except Exception as e:
_invalid(status, id_=image, out=traceback.format_exc())
raise e
__salt__['mine.send']('dockerng.ps', verbose=True, all=True, host=True)
return status
def version():
'''
Get docker version
CLI Example:
.. code-block:: bash
salt '*' docker.version
'''
status = base_status.copy()
client = _get_client()
try:
docker_version = client.version()
_valid(status, out=docker_version)
except Exception:
_invalid(status, out=traceback.format_exc())
return status
def info():
'''
Get the version information about docker. This is similar to ``docker info`` command
CLI Example:
.. code-block:: bash
salt '*' docker.info
'''
status = base_status.copy()
client = _get_client()
try:
version_info = client.info()
_valid(status, out=version_info)
except Exception:
_invalid(status, out=traceback.format_exc())
return status
def port(container, private_port):
'''
Private port mapping allocation information. This method is broken on docker-py
side. Just use the result of inspect to mangle port
allocation
container
container id
private_port
private port on the container to query for
CLI Example:
.. code-block:: bash
salt '*' docker.port <container id> <private port>
'''
status = base_status.copy()
client = _get_client()
try:
port_info = client.port(
_get_container_infos(container)['Id'],
private_port)
_valid(status, id_=container, out=port_info)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc())
return status
def stop(container, timeout=10):
'''
Stop a running container
container
container id
timeout
timeout for container to exit gracefully before killing it, Default is ``10`` seconds
CLI Example:
.. code-block:: bash
salt '*' docker.stop <container id> [timeout=20]
'''
client = _get_client()
status = base_status.copy()
try:
dcontainer = _get_container_infos(container)['Id']
if is_running(dcontainer):
client.stop(dcontainer, timeout=timeout)
if not is_running(dcontainer):
_valid(
status,
comment='Container {0} was stopped'.format(
container),
id_=container)
else:
_invalid(status)
else:
_valid(status,
comment='Container {0} was already stopped'.format(
container),
id_=container)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc(),
comment=(
'An exception occurred while stopping '
'your container {0}').format(container))
__salt__['mine.send']('dockerng.ps', verbose=True, all=True, host=True)
return status
def kill(container, signal=None):
'''
Kill a running container
container
container id
signal
signal to send
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' docker.kill <container id>
'''
client = _get_client()
status = base_status.copy()
try:
dcontainer = _get_container_infos(container)['Id']
if is_running(dcontainer):
client.kill(dcontainer, signal=signal)
if signal:
# no need to check if container is running
# because some signals might not stop the container.
_valid(status,
comment='Kill signal {0!r} successfully'
' sent to the container {1!r}'.format(signal, container),
id_=container)
else:
if not is_running(dcontainer):
_valid(status,
comment='Container {0} was killed'.format(container),
id_=container)
else:
_invalid(status,
comment='Container {0} was not killed'.format(
container))
else:
_valid(status,
comment='Container {0} was already stopped'.format(
container),
id_=container)
except Exception:
_invalid(status,
id_=container,
out=traceback.format_exc(),
comment=(
'An exception occurred while killing '
'your container {0}').format(container))
__salt__['mine.send']('dockerng.ps', verbose=True, all=True, host=True)
return status
def restart(container, timeout=10):
'''
Restart a running container
container
container id
timeout
timeout for container to exit gracefully before killing it, Default is ``10`` seconds
CLI Example:
.. code-block:: bash
salt '*' docker.restart <container id> [timeout=20]
'''
client = _get_client()
status = base_status.copy()
try:
dcontainer = _get_container_infos(container)['Id']
client.restart(dcontainer, timeout=timeout)
if is_running(dcontainer):
_valid(status,
comment='Container {0} was restarted'.format(container),
id_=container)
else:
_invalid(status)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc(),
comment=(
'An exception occurred while restarting '
'your container {0}').format(container))
__salt__['mine.send']('dockerng.ps', verbose=True, all=True, host=True)
return status
def start(container,
binds=None,
port_bindings=None,
lxc_conf=None,
publish_all_ports=None,
links=None,
privileged=False,
dns=None,
volumes_from=None,
network_mode=None,
restart_policy=None,
cap_add=None,
cap_drop=None):
'''
Start the specified container
container
container id
CLI Example:
.. code-block:: bash
salt '*' docker.start <container id>
'''
if binds:
if not isinstance(binds, dict):
raise SaltInvocationError('binds must be formatted as a dictionary')
client = _get_client()
status = base_status.copy()
try:
dcontainer = _get_container_infos(container)['Id']
if not is_running(container):
bindings = None
if port_bindings is not None:
try:
bindings = {}
for key, val in six.iteritems(port_bindings):
bindings[key] = (val.get('HostIp', ''), val['HostPort'])
except AttributeError:
raise SaltInvocationError(
'port_bindings must be formatted as a dictionary of '
'dictionaries'
)
client.start(dcontainer,
binds=binds,
port_bindings=bindings,
lxc_conf=lxc_conf,
publish_all_ports=publish_all_ports,
links=links,
privileged=privileged,
dns=dns,
volumes_from=volumes_from,
network_mode=network_mode,
restart_policy=restart_policy,
cap_add=cap_add,
cap_drop=cap_drop)
if is_running(dcontainer):
_valid(status,
comment='Container {0} was started'.format(container),
id_=container)
else:
_invalid(status)
else:
_valid(status,
comment='Container {0} was already started'.format(container),
id_=container)
except Exception:
_invalid(status,
id_=container,
out=traceback.format_exc(),
comment=(
'An exception occurred while starting '
'your container {0}').format(container))
__salt__['mine.send']('dockerng.ps', verbose=True, all=True, host=True)
return status
def wait(container):
'''
Wait for a container to exit gracefully
container
container id
CLI Example:
.. code-block:: bash
salt '*' docker.wait <container id>
'''
client = _get_client()
status = base_status.copy()
try:
dcontainer = _get_container_infos(container)['Id']
if is_running(dcontainer):
client.wait(dcontainer)
if not is_running(container):
_valid(status,
id_=container,
comment='Container waited for stop')
else:
_invalid(status)
else:
_valid(status,
comment='Container {0} was already stopped'.format(container),
id_=container)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc(),
comment=(
'An exception occurred while waiting '
'your container {0}').format(container))
__salt__['mine.send']('dockerng.ps', verbose=True, all=True, host=True)
return status
def exists(container):
'''
Check if a given container exists
container
container id
Returns ``True`` if container exists otherwise returns ``False``
CLI Example:
.. code-block:: bash
salt '*' docker.exists <container id>
'''
try:
_get_container_infos(container)
return True
except Exception:
return False
def is_running(container):
'''
Check if the specified container is running
container
container id
Returns ``True`` if container is running otherwise returns ``False``
CLI Example:
.. code-block:: bash
salt '*' docker.is_running <container id>
'''
try:
infos = _get_container_infos(container)
return infos.get('State', {}).get('Running')
except Exception:
return False
def remove_container(container, force=False, v=False):
'''
Remove a container from a docker installation
container
container id
force
remove a running container, Default is ``False``
v
remove the volumes associated to the container, Default is ``False``
CLI Example:
.. code-block:: bash
salt '*' docker.remove_container <container id> [force=True|False] [v=True|False]
'''
client = _get_client()
status = base_status.copy()
status['id'] = container
dcontainer = None
try:
dcontainer = _get_container_infos(container)['Id']
if is_running(dcontainer):
if not force:
_invalid(status, id_=container, out=None,
comment=(
'Container {0} is running, '
'won\'t remove it').format(container))
__salt__['mine.send']('dockerng.ps', verbose=True, all=True, host=True)
return status
else:
kill(dcontainer)
client.remove_container(dcontainer, v=v)
try:
_get_container_infos(dcontainer)
_invalid(status,
comment='Container was not removed: {0}'.format(container))
except Exception:
status['status'] = True
status['comment'] = 'Container {0} was removed'.format(container)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc())
__salt__['mine.send']('dockerng.ps', verbose=True, all=True, host=True)
return status
def top(container):
'''
Run the docker top command on a specific container
container
container id
CLI Example:
.. code-block:: bash
salt '*' docker.top <container id>
'''
client = _get_client()
status = base_status.copy()
try:
dcontainer = _get_container_infos(container)['Id']
if is_running(dcontainer):
ret = client.top(dcontainer)
if ret:
ret['mprocesses'] = []
titles = ret['Titles']
for i in ret['Processes']:
data = salt.utils.odict.OrderedDict()
for k, j in enumerate(titles):
data[j] = i[k]
ret['mprocesses'].append(data)
_valid(status,
out=ret,
id_=container,
comment='Current top for container')
if not status['id']:
_invalid(status)
else:
_invalid(status,
comment='Container {0} is not running'.format(container))
except Exception:
_invalid(status, id_=container, out=traceback.format_exc())
return status
def inspect_container(container):
'''
Get container information. This is similar to ``docker inspect`` command but only for containers
container
container id
CLI Example:
.. code-block:: bash
salt '*' docker.inspect_container <container id>
'''
status = base_status.copy()
status['id'] = container
try:
infos = _get_container_infos(container)
_valid(status, id_=container, out=infos)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc(),
comment='Container does not exit: {0}'.format(container))
return status
def login(url=None, username=None, password=None, email=None):
'''
Wrapper to the ``docker.py`` login method (does not do much yet)
url
registry url to authenticate to
username
username to authenticate
password
password to authenticate
email
email to authenticate
CLI Example:
.. code-block:: bash
salt '*' docker.login <url> <username> <password> <email>
'''
client = _get_client()
return client.login(username, password, email, url)
def search(term):
'''
Search for an image on the registry
term
search keyword
CLI Example:
.. code-block:: bash
salt '*' docker.search <term>
'''
client = _get_client()
status = base_status.copy()
ret = client.search(term)
if ret:
_valid(status, out=ret, id_=term)
else:
_invalid(status)
return status
def _create_image_assemble_error_status(status, ret, image_logs):
'''
Given input in this form::
[{u'error': u'Get file:///r.tar.gz: unsupported protocol scheme "file"',
u'errorDetail': {
u'message':u'Get file:///r.tar.gz:unsupported protocol scheme "file"'}},
{u'status': u'Downloading from file:///r.tar.gz'}]
'''
comment = 'An error occurred while importing your image'
out = None
is_invalid = True
status['out'] = ''
try:
is_invalid = False
status['out'] += '\n' + ret
for err_log in image_logs:
if isinstance(err_log, dict):
if 'errorDetail' in err_log:
if 'code' in err_log['errorDetail']:
msg = '\n{0}\n{1}: {2}'.format(
err_log['error'],
err_log['errorDetail']['code'],
err_log['errorDetail']['message']
)
else:
msg = '\n{0}\n{1}'.format(
err_log['error'],
err_log['errorDetail']['message'],
)
comment += msg
except Exception:
is_invalid = True
trace = traceback.format_exc()
out = (
'An error occurred while '
'parsing error output:\n{0}'
).format(trace)
if is_invalid:
_invalid(status, out=out, comment=comment)
return status
def import_image(src, repo, tag=None):
'''
Import content from a local tarball or a URL to a docker image
src
content to import (URL or absolute path to a tarball)
repo
repository to import to
tag
set tag of the image (Optional)
CLI Example:
.. code-block:: bash
salt '*' docker.import_image <src> <repo> [tag]
'''
client = _get_client()
status = base_status.copy()
try:
ret = client.import_image(src, repository=repo, tag=tag)
if ret:
image_logs, _info = _parse_image_multilogs_string(ret)
_create_image_assemble_error_status(status, ret, image_logs)
if status['status'] is not False:
infos = _get_image_infos(image_logs[0]['status'])
_valid(status,
comment='Image {0} was created'.format(infos['Id']),
id_=infos['Id'],
out=ret)
else:
_invalid(status)
except Exception:
_invalid(status, out=traceback.format_exc())
return status
def tag(image, repository, tag=None, force=False):
'''
Tag an image into a repository
image
name of image
repository
name of repository
tag
tag to apply (Optional)
force
force apply tag, Default is ``False``
CLI Example:
.. code-block:: bash
salt '*' docker.tag <image> <repository> [tag] [force=True|False]
'''
client = _get_client()
status = base_status.copy()
try:
dimage = _get_image_infos(image)['Id']
ret = client.tag(dimage, repository, tag=tag, force=force)
except Exception:
_invalid(status,
out=traceback.format_exc(),
comment='Cant tag image {0} {1}{2}'.format(
image, repository,
tag and (':' + tag) or '').strip())
return status
if ret:
_valid(status,
id_=image,
comment='Image was tagged: {0}{1}'.format(
repository,
tag and (':' + tag) or '').strip())
else:
_invalid(status)
return status
def get_images(name=None, quiet=False, all=True):
'''
List docker images
name
repository name
quiet
only show image id, Default is ``False``
all
show all images, Default is ``True``
CLI Example:
.. code-block:: bash
salt '*' docker.get_images <name> [quiet=True|False] [all=True|False]
'''
client = _get_client()
status = base_status.copy()
try:
infos = client.images(name=name, quiet=quiet, all=all)
for i in range(len(infos)):
inf = infos[i]
try:
inf['Human_Size'] = _sizeof_fmt(int(inf['Size']))
except ValueError:
pass
try:
ts = int(inf['Created'])
dts = datetime.datetime.fromtimestamp(ts)
inf['Human_IsoCreated'] = dts.isoformat()
inf['Human_Created'] = dts.strftime(
'%Y-%m-%d %H:%M:%S')
except Exception:
pass
try:
inf['Human_VirtualSize'] = (
_sizeof_fmt(int(inf['VirtualSize'])))
except ValueError:
pass
_valid(status, out=infos)
except Exception:
_invalid(status, out=traceback.format_exc())
return status
def build(path=None,
tag=None,
quiet=False,
fileobj=None,
nocache=False,
rm=True,
timeout=None):
'''
Build a docker image from a dockerfile or an URL
path
url/branch/docker_dir or path on the filesystem to the dockerfile
tag
tag of the image
quiet
quiet mode, Default is ``False``
nocache
do not use docker image cache, Default is ``False``
rm
remove intermediate commits, Default is ``True``
timeout
timeout value before aborting (in seconds)
CLI Example:
.. code-block:: bash
salt '*' docker.build vieux/apache
salt '*' docker.build github.com/creack/docker-firefox
'''
client = _get_client(timeout=timeout)
status = base_status.copy()
if path or fileobj:
try:
ret = client.build(path=path,
tag=tag,
quiet=quiet,
fileobj=fileobj,
rm=rm,
nocache=nocache)
if isinstance(ret, types.GeneratorType):
message = json.loads(list(ret)[-1])
if 'stream' in message:
if 'Successfully built' in message['stream']:
_valid(status, out=message['stream'])
if 'errorDetail' in message:
_invalid(status, out=message['errorDetail']['message'])
elif isinstance(ret, tuple):
id_, out = ret[0], ret[1]
if id_:
_valid(status, id_=id_, out=out, comment='Image built')
else:
_invalid(status, id_=id_, out=out)
except Exception:
_invalid(status,
out=traceback.format_exc(),
comment='Unexpected error while building an image')
return status
return status
def remove_image(image):
'''
Remove an image from a system.
image
name of image
CLI Example:
.. code-block:: bash
salt '*' docker.remove_image <image>
'''
client = _get_client()
status = base_status.copy()
# will raise an error if no deletion
try:
infos = _get_image_infos(image)
if infos:
status['id'] = infos['Id']
try:
client.remove_image(infos['Id'])
except Exception:
_invalid(status,
id_=image,
out=traceback.format_exc(),
comment='Image could not be deleted')
try:
infos = _get_image_infos(image)
_invalid(status,
comment=(
'Image marked to be deleted but not deleted yet'))
except Exception:
_valid(status, id_=image, comment='Image deleted')
else:
_invalid(status)
except Exception:
_invalid(status,
out=traceback.format_exc(),
comment='Image does not exist: {0}'.format(image))
return status
def inspect_image(image):
'''
Inspect the status of an image and return relative data. This is similar to
``docker inspect`` command but only for images
image
name of the image
CLI Example:
.. code-block:: bash
salt '*' docker.inspect_image <image>
'''
status = base_status.copy()
try:
infos = _get_image_infos(image)
try:
for k in ['Size']:
infos[
'Human_{0}'.format(k)
] = _sizeof_fmt(int(infos[k]))
except Exception:
pass
_valid(status, id_=image, out=infos)
except Exception:
_invalid(status, id_=image, out=traceback.format_exc(),
comment='Image does not exist')
return status
def _parse_image_multilogs_string(ret):
'''
Parse image log strings into grokable data
'''
image_logs, infos = [], None
if ret and ret.strip().startswith('{') and ret.strip().endswith('}'):
pushd = 0
buf = ''
for char in ret:
buf += char
if char == '{':
pushd += 1
if char == '}':
pushd -= 1
if pushd == 0:
try:
buf = json.loads(buf)
except Exception:
pass
else:
image_logs.append(buf)
buf = ''
image_logs.reverse()
# Valid statest when pulling an image from the docker registry
valid_states = [
'Download complete',
'Already exists',
]
# search last layer grabbed
for ilog in image_logs:
if isinstance(ilog, dict):
if ilog.get('status') in valid_states and ilog.get('id'):
infos = _get_image_infos(ilog['id'])
break
return image_logs, infos
def _pull_assemble_error_status(status, ret, logs):
'''
Given input in this form::
u'{"status":"Pulling repository foo/ubuntubox"}:
"image (latest) from foo/ ...
rogress":"complete","id":"2c80228370c9"}'
construct something like that (load JSON data is possible)::
[u'{"status":"Pulling repository foo/ubuntubox"',
{"status":"Download","progress":"complete","id":"2c80228370c9"}]
'''
comment = 'An error occurred pulling your image'
out = ''
try:
out = '\n' + ret
for err_log in logs:
if isinstance(err_log, dict):
if 'errorDetail' in err_log:
if 'code' in err_log['errorDetail']:
msg = '\n{0}\n{1}: {2}'.format(
err_log['error'],
err_log['errorDetail']['code'],
err_log['errorDetail']['message']
)
else:
msg = '\n{0}\n{1}'.format(
err_log['error'],
err_log['errorDetail']['message'],
)
comment += msg
except Exception:
out = traceback.format_exc()
_invalid(status, out=out, comment=comment)
return status
def pull(repo, tag=None, insecure_registry=False):
'''
Pulls an image from any registry. See documentation at top of this page to
configure authenticated access
repo
name of repository
tag
specific tag to pull (Optional)
insecure_registry
set as ``True`` to use insecure (non HTTPS) registry. Default is ``False``
(only available if using docker-py >= 0.5.0)
CLI Example:
.. code-block:: bash
salt '*' docker.pull <repository> [tag]
'''
client = _get_client()
status = base_status.copy()
try:
kwargs = {'tag': tag}
# if docker-py version is greater than 0.5.0 use the
# insecure_registry parameter
if salt.utils.compare_versions(ver1=docker.__version__,
oper='>=',
ver2='0.5.0'):
kwargs['insecure_registry'] = insecure_registry
ret = client.pull(repo, **kwargs)
if ret:
image_logs, infos = _parse_image_multilogs_string(ret)
if infos and infos.get('Id', None):
repotag = repo
if tag:
repotag = '{0}:{1}'.format(repo, tag)
_valid(status,
out=image_logs if image_logs else ret,
id_=infos['Id'],
comment='Image {0} was pulled ({1})'.format(
repotag, infos['Id']))
else:
_pull_assemble_error_status(status, ret, image_logs)
else:
_invalid(status)
except Exception:
_invalid(status, id_=repo, out=traceback.format_exc())
return status
def _push_assemble_error_status(status, ret, logs):
'''
Given input in this form::
u'{"status":"Pulling repository foo/ubuntubox"}:
"image (latest) from foo/ ...
rogress":"complete","id":"2c80228370c9"}'
construct something like that (load json data is possible)::
[u'{"status":"Pulling repository foo/ubuntubox"',
{"status":"Download","progress":"complete","id":"2c80228370c9"}]
'''
comment = 'An error occurred pushing your image'
status['out'] = ''
try:
status['out'] += '\n' + ret
for err_log in logs:
if isinstance(err_log, dict):
if 'errorDetail' in err_log:
if 'code' in err_log['errorDetail']:
msg = '\n{0}\n{1}: {2}'.format(
err_log['error'],
err_log['errorDetail']['code'],
err_log['errorDetail']['message']
)
else:
msg = '\n{0}\n{1}'.format(
err_log['error'],
err_log['errorDetail']['message'],
)
comment += msg
except Exception:
trace = traceback.format_exc()
status['out'] = (
'An error occurred while '
'parsing error output:\n{0}'
).format(trace)
_invalid(status, comment=comment)
return status
def push(repo, tag=None, quiet=False, insecure_registry=False):
'''
Pushes an image to any registry. See documentation at top of this page to
configure authenticated access
repo
name of repository
tag
specific tag to push (Optional)
quiet
set as ``True`` to quiet output, Default is ``False``
insecure_registry
set as ``True`` to use insecure (non HTTPS) registry. Default is ``False``
(only available if using docker-py >= 0.5.0)
CLI Example:
.. code-block:: bash
salt '*' docker.push <repository> [tag] [quiet=True|False]
'''
client = _get_client()
status = base_status.copy()
registry, repo_name = docker.auth.resolve_repository_name(repo)
try:
kwargs = {'tag': tag}
# if docker-py version is greater than 0.5.0 use the
# insecure_registry parameter
if salt.utils.compare_versions(ver1=docker.__version__,
oper='>=',
ver2='0.5.0'):
kwargs['insecure_registry'] = insecure_registry
ret = client.push(repo, **kwargs)
if ret:
image_logs, infos = _parse_image_multilogs_string(ret)
if image_logs:
repotag = repo_name
if tag:
repotag = '{0}:{1}'.format(repo, tag)
if not quiet:
status['out'] = image_logs
else:
status['out'] = None
laststatus = image_logs[2].get('status', None)
if laststatus and (
('already pushed' in laststatus)
or ('Pushing tags for rev' in laststatus)
or ('Pushing tag for rev' in laststatus)
):
status['status'] = True
status['id'] = _get_image_infos(repo)['Id']
status['comment'] = 'Image {0}({1}) was pushed'.format(
repotag, status['id'])
else:
_push_assemble_error_status(status, ret, image_logs)
else:
status['out'] = ret
_push_assemble_error_status(status, ret, image_logs)
else:
_invalid(status)
except Exception:
_invalid(status, id_=repo, out=traceback.format_exc())
return status
def _run_wrapper(status, container, func, cmd, *args, **kwargs):
'''
Wrapper to a cmdmod function
Idea is to prefix the call to cmdrun with the relevant driver to
execute inside a container context
.. note::
Only lxc and native drivers are implemented.
status
status object
container
container id to execute in
func
cmd function to execute
cmd
command to execute in the container
'''
client = _get_client()
# For old version of docker. lxc was the only supported driver.
# We can safely hardcode it
driver = client.info().get('ExecutionDriver', 'lxc-')
container_info = _get_container_infos(container)
container_id = container_info['Id']
if driver.startswith('lxc-'):
full_cmd = 'lxc-attach -n {0} -- {1}'.format(container_id, cmd)
elif driver.startswith('native-'):
if HAS_NSENTER:
# http://jpetazzo.github.io/2014/03/23/lxc-attach-nsinit-nsenter-docker-0-9/
container_pid = container_info['State']['Pid']
if container_pid == 0:
_invalid(status, id_=container,
comment='Container is not running')
return status
full_cmd = (
'nsenter --target {pid} --mount --uts --ipc --net --pid'
' -- {cmd}'.format(pid=container_pid, cmd=cmd)
)
else:
raise CommandExecutionError(
'nsenter is not installed on the minion, cannot run command'
)
else:
raise NotImplementedError(
'Unknown docker ExecutionDriver {0!r}. Or didn\'t find command'
' to attach to the container'.format(driver))
# now execute the command
comment = 'Executed {0}'.format(full_cmd)
try:
ret = __salt__[func](full_cmd, *args, **kwargs)
if ((isinstance(ret, dict) and ('retcode' in ret) and (ret['retcode'] != 0))
or (func == 'cmd.retcode' and ret != 0)):
_invalid(status, id_=container, out=ret, comment=comment)
else:
_valid(status, id_=container, out=ret, comment=comment)
except Exception:
_invalid(status, id_=container, comment=comment, out=traceback.format_exc())
return status
def load(imagepath):
'''
Load the specified file at imagepath into docker that was generated from
a docker save command
e.g. `docker load < imagepath`
imagepath
imagepath to docker tar file
CLI Example:
.. code-block:: bash
salt '*' docker.load /path/to/image
'''
status = base_status.copy()
if os.path.isfile(imagepath):
try:
dockercmd = ['docker', 'load', '-i', imagepath]
ret = __salt__['cmd.run'](dockercmd, python_shell=False)
if isinstance(ret, dict) and ('retcode' in ret) and (ret['retcode'] != 0):
return _invalid(status, id_=None,
out=ret,
comment='Command to load image {0} failed.'.format(imagepath))
_valid(status, id_=None, out=ret, comment='Image load success')
except Exception:
_invalid(status, id_=None,
comment="Image not loaded.",
out=traceback.format_exc())
else:
_invalid(status, id_=None,
comment='Image file {0} could not be found.'.format(imagepath),
out=traceback.format_exc())
return status
def save(image, filename):
'''
.. versionadded:: 2015.5.0
Save the specified image to filename from docker
e.g. `docker save image > filename`
image
name of image
filename
The filename of the saved docker image
CLI Example:
.. code-block:: bash
salt '*' docker.save arch_image /path/to/save/image
'''
status = base_status.copy()
ok = False
try:
_info = _get_image_infos(image)
ok = True
except Exception:
_invalid(status, id_=image,
comment="docker image {0} could not be found.".format(image),
out=traceback.format_exc())
if ok:
try:
dockercmd = ['docker', 'save', '-o', filename, image]
ret = __salt__['cmd.run'](dockercmd)
if isinstance(ret, dict) and ('retcode' in ret) and (ret['retcode'] != 0):
return _invalid(status,
id_=image,
out=ret,
comment='Command to save image {0} to {1} failed.'.format(image, filename))
_valid(status, id_=image, out=ret, comment='Image save success')
except Exception:
_invalid(status, id_=image, comment="Image not saved.", out=traceback.format_exc())
return status
def run(container, cmd):
'''
Wrapper for :py:func:`cmdmod.run<salt.modules.cmdmod.run>` inside a container context
container
container id (or grain)
cmd
command to execute
.. note::
The return is a bit different as we use the docker struct.
Output of the command is in 'out' and result is always ``True``.
.. warning::
Be advised that this function allows for raw shell access to the named
container! If allowing users to execute this directly it may allow more
rights than intended!
CLI Example:
.. code-block:: bash
salt '*' docker.run <container id> 'ls -l /etc'
'''
status = base_status.copy()
return _run_wrapper(
status, container, 'cmd.run', cmd)
def run_all(container, cmd):
'''
Wrapper for :py:func:`cmdmod.run_all<salt.modules.cmdmod.run_all>` inside a container context
container
container id (or grain)
cmd
command to execute
.. note::
The return is a bit different as we use the docker struct.
Output of the command is in 'out' and result is ``False`` if
command failed to execute.
.. warning::
Be advised that this function allows for raw shell access to the named
container! If allowing users to execute this directly it may allow more
rights than intended!
CLI Example:
.. code-block:: bash
salt '*' docker.run_all <container id> 'ls -l /etc'
'''
status = base_status.copy()
return _run_wrapper(
status, container, 'cmd.run_all', cmd)
def run_stderr(container, cmd):
'''
Wrapper for :py:func:`cmdmod.run_stderr<salt.modules.cmdmod.run_stderr>` inside a container context
container
container id (or grain)
cmd
command to execute
.. note::
The return is a bit different as we use the docker struct.
Output of the command is in 'out' and result is always ``True``.
.. warning::
Be advised that this function allows for raw shell access to the named
container! If allowing users to execute this directly it may allow more
rights than intended!
CLI Example:
.. code-block:: bash
salt '*' docker.run_stderr <container id> 'ls -l /etc'
'''
status = base_status.copy()
return _run_wrapper(
status, container, 'cmd.run_stderr', cmd)
def run_stdout(container, cmd):
'''
Wrapper for :py:func:`cmdmod.run_stdout<salt.modules.cmdmod.run_stdout>` inside a container context
container
container id (or grain)
cmd
command to execute
.. note::
The return is a bit different as we use the docker struct.
Output of the command is in 'out' and result is always ``True``.
.. warning::
Be advised that this function allows for raw shell access to the named
container! If allowing users to execute this directly it may allow more
rights than intended!
CLI Example:
.. code-block:: bash
salt '*' docker.run_stdout <container id> 'ls -l /etc'
'''
status = base_status.copy()
return _run_wrapper(
status, container, 'cmd.run_stdout', cmd)
def retcode(container, cmd):
'''
Wrapper for :py:func:`cmdmod.retcode<salt.modules.cmdmod.retcode>` inside a container context
container
container id (or grain)
cmd
command to execute
.. note::
The return is True or False depending on the commands success.
.. warning::
Be advised that this function allows for raw shell access to the named
container! If allowing users to execute this directly it may allow more
rights than intended!
CLI Example:
.. code-block:: bash
salt '*' docker.retcode <container id> 'ls -l /etc'
'''
status = base_status.copy()
return _run_wrapper(
status, container, 'cmd.retcode', cmd)['status']
def get_container_root(container):
'''
Get the container rootfs path
container
container id or grain
CLI Example:
.. code-block:: bash
salt '*' docker.get_container_root <container id>
'''
default_path = os.path.join(
'/var/lib/docker',
'containers',
_get_container_infos(container)['Id'],
)
default_rootfs = os.path.join(default_path, 'rootfs')
rootfs_re = re.compile(r'^lxc.rootfs\s*=\s*(.*)\s*$', re.U)
try:
lxcconfig = os.path.join(default_path, 'config.lxc')
with salt.utils.fopen(lxcconfig) as fhr:
lines = fhr.readlines()
rlines = lines[:]
rlines.reverse()
for rline in rlines:
robj = rootfs_re.search(rline)
if robj:
rootfs = robj.groups()[0]
break
except Exception:
rootfs = default_rootfs
return rootfs
def _script(status,
container,
source,
args=None,
cwd=None,
stdin=None,
runas=None,
shell=cmdmod.DEFAULT_SHELL,
env=None,
template='jinja',
umask=None,
timeout=None,
reset_system_locale=True,
run_func_=None,
no_clean=False,
saltenv='base',
output_loglevel='info',
quiet=False,
**kwargs):
try:
if not run_func_:
run_func_ = run_all
rpath = get_container_root(container)
tpath = os.path.join(rpath, 'tmp')
if isinstance(env, six.string_types):
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt '
'Boron.'
)
# Backwards compatibility
saltenv = env
path = salt.utils.mkstemp(dir=tpath)
if template:
__salt__['cp.get_template'](
source, path, template, saltenv, **kwargs)
else:
fn_ = __salt__['cp.cache_file'](source, saltenv)
if not fn_:
return {'pid': 0,
'retcode': 1,
'stdout': '',
'stderr': '',
'cache_error': True}
shutil.copyfile(fn_, path)
in_path = os.path.join('/', os.path.relpath(path, rpath))
os.chmod(path, 0o755)
command = in_path + ' ' + str(args) if args else in_path
status = run_func_(container,
command,
cwd=cwd,
stdin=stdin,
output_loglevel=output_loglevel,
quiet=quiet,
runas=runas,
shell=shell,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale)
if not no_clean:
os.remove(path)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc())
return status
def script(container,
source,
args=None,
cwd=None,
stdin=None,
runas=None,
shell=cmdmod.DEFAULT_SHELL,
env=None,
template='jinja',
umask=None,
timeout=None,
reset_system_locale=True,
no_clean=False,
saltenv='base'):
'''
Wrapper for :py:func:`cmdmod.script<salt.modules.cmdmod.script>` inside a container context
container
container id (or grain)
additional parameters
See :py:func:`cmd.script <salt.modules.cmdmod.script>`
.. warning::
Be advised that this function allows for raw shell access to the named
container! If allowing users to execute this directly it may allow more
rights than intended!
Download a script from a remote location and execute the script in the container.
The script can be located on the salt master file server or on an HTTP/FTP server.
The script will be executed directly, so it can be written in any available programming
language.
The script can also be formatted as a template, the default is jinja. Arguments for the
script can be specified as well.
CLI Example:
.. code-block:: bash
salt '*' docker.script <container id> salt://docker_script.py
salt '*' docker.script <container id> salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt '*' docker.script <container id> salt://scripts/windows_task.ps1 args=' -Input c:\\tmp\\infile.txt' shell='powershell'
A string of standard input can be specified for the command to be run using the stdin
parameter. This can be useful in cases where sensitive information must be read from
standard input:
CLI Example:
.. code-block:: bash
salt '*' docker.script <container id> salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
status = base_status.copy()
if isinstance(env, six.string_types):
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt '
'Boron.'
)
# Backwards compatibility
saltenv = env
return _script(status,
container,
source,
args=args,
cwd=cwd,
stdin=stdin,
runas=runas,
shell=shell,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
no_clean=no_clean,
saltenv=saltenv)
def script_retcode(container,
source,
cwd=None,
stdin=None,
runas=None,
shell=cmdmod.DEFAULT_SHELL,
env=None,
template='jinja',
umask=None,
timeout=None,
reset_system_locale=True,
no_clean=False,
saltenv='base'):
'''
Wrapper for :py:func:`cmdmod.script_retcode<salt.modules.cmdmod.script_retcode>` inside a container context
container
container id (or grain)
additional parameters
See :py:func:`cmd.script_retcode <salt.modules.cmdmod.script_retcode>`
.. warning::
Be advised that this function allows for raw shell access to the named
container! If allowing users to execute this directly it may allow more
rights than intended!
CLI Example:
.. code-block:: bash
salt '*' docker.script_retcode <container id> salt://docker_script.py
'''
if isinstance(env, six.string_types):
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt '
'Boron.'
)
# Backwards compatibility
saltenv = env
status = base_status.copy()
return _script(status,
container,
source=source,
cwd=cwd,
stdin=stdin,
runas=runas,
shell=shell,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
run_func_=retcode,
no_clean=no_clean,
saltenv=saltenv)
| [] | [] | [
"DOCKER_HOST"
] | [] | ["DOCKER_HOST"] | python | 1 | 0 | |
nodehost_test.go | // Copyright 2017-2021 Lei Ni ([email protected]) and other contributors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dragonboat
import (
"bytes"
"context"
"encoding/binary"
"flag"
"fmt"
"io"
"math"
"math/rand"
"os"
"os/exec"
"reflect"
"runtime"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/cockroachdb/errors"
"github.com/lni/goutils/leaktest"
"github.com/lni/goutils/random"
"github.com/lni/goutils/syncutil"
"github.com/stretchr/testify/assert"
"github.com/lni/dragonboat/v3/client"
"github.com/lni/dragonboat/v3/config"
"github.com/lni/dragonboat/v3/internal/fileutil"
"github.com/lni/dragonboat/v3/internal/id"
"github.com/lni/dragonboat/v3/internal/invariants"
"github.com/lni/dragonboat/v3/internal/logdb"
"github.com/lni/dragonboat/v3/internal/rsm"
"github.com/lni/dragonboat/v3/internal/server"
"github.com/lni/dragonboat/v3/internal/settings"
"github.com/lni/dragonboat/v3/internal/tests"
"github.com/lni/dragonboat/v3/internal/transport"
"github.com/lni/dragonboat/v3/internal/vfs"
chantrans "github.com/lni/dragonboat/v3/plugin/chan"
"github.com/lni/dragonboat/v3/raftio"
pb "github.com/lni/dragonboat/v3/raftpb"
sm "github.com/lni/dragonboat/v3/statemachine"
"github.com/lni/dragonboat/v3/tools"
"github.com/lni/dragonboat/v3/tools/upgrade310"
)
const (
defaultTestPort = 26001
testNodeHostID1 = "nhid-12345"
testNodeHostID2 = "nhid-12346"
)
func getTestPort() int {
pv := os.Getenv("DRAGONBOAT_TEST_PORT")
if len(pv) > 0 {
port, err := strconv.Atoi(pv)
if err != nil {
panic(err)
}
return port
}
return defaultTestPort
}
var rttMillisecond uint64
var mu sync.Mutex
var rttValues = []uint64{10, 20, 30, 50, 100, 200, 500}
func getRTTMillisecond(fs vfs.IFS, dir string) uint64 {
mu.Lock()
defer mu.Unlock()
if rttMillisecond > 0 {
return rttMillisecond
}
rttMillisecond = calcRTTMillisecond(fs, dir)
return rttMillisecond
}
func calcRTTMillisecond(fs vfs.IFS, dir string) uint64 {
testFile := fs.PathJoin(dir, ".dragonboat_test_file_safe_to_delete")
defer func() {
_ = fs.RemoveAll(testFile)
}()
_ = fs.MkdirAll(dir, 0755)
f, err := fs.Create(testFile)
if err != nil {
panic(err)
}
defer func() {
f.Close()
}()
data := make([]byte, 512)
total := uint64(0)
repeat := 5
for i := 0; i < repeat; i++ {
if _, err := f.Write(data); err != nil {
panic(err)
}
start := time.Now()
if err := f.Sync(); err != nil {
panic(err)
}
total += uint64(time.Since(start).Milliseconds())
}
rtt := total / uint64(repeat)
for i := range rttValues {
if rttValues[i] > rtt {
if i == 0 {
return rttValues[0]
}
return rttValues[i-1]
}
}
return rttValues[len(rttValues)-1]
}
// typical proposal timeout
func pto(nh *NodeHost) time.Duration {
rtt := nh.NodeHostConfig().RTTMillisecond
if invariants.Race {
return 5 * time.Second
}
return time.Duration(rtt*45) * time.Millisecond
}
func lpto(nh *NodeHost) time.Duration {
rtt := nh.NodeHostConfig().RTTMillisecond
if invariants.Race {
return 30 * time.Second
}
return time.Duration(rtt*100) * time.Millisecond
}
func getTestExpertConfig(fs vfs.IFS) config.ExpertConfig {
cfg := config.GetDefaultExpertConfig()
cfg.LogDB.Shards = 4
cfg.FS = fs
return cfg
}
func reportLeakedFD(fs vfs.IFS, t *testing.T) {
vfs.ReportLeakedFD(fs, t)
}
func getTestNodeHostConfig(fs vfs.IFS) *config.NodeHostConfig {
cfg := &config.NodeHostConfig{
WALDir: singleNodeHostTestDir,
NodeHostDir: singleNodeHostTestDir,
RTTMillisecond: getRTTMillisecond(fs, singleNodeHostTestDir),
RaftAddress: singleNodeHostTestAddr,
Expert: getTestExpertConfig(fs),
SystemEventListener: &testSysEventListener{},
}
return cfg
}
func getTestConfig() *config.Config {
return &config.Config{
NodeID: 1,
ClusterID: 1,
ElectionRTT: 3,
HeartbeatRTT: 1,
CheckQuorum: true,
}
}
func waitNodeInfoEvent(t *testing.T, f func() []raftio.NodeInfo, count int) {
for i := 0; i < 1000; i++ {
if len(f()) == count {
return
}
time.Sleep(10 * time.Millisecond)
}
t.Fatalf("failed to get node info event")
}
func waitSnapshotInfoEvent(t *testing.T, f func() []raftio.SnapshotInfo, count int) {
for i := 0; i < 1000; i++ {
if len(f()) == count {
return
}
time.Sleep(10 * time.Millisecond)
}
t.Fatalf("failed to get snapshot info event")
}
type testSysEventListener struct {
mu sync.Mutex
nodeHostShuttingdown uint64
nodeUnloaded []raftio.NodeInfo
nodeReady []raftio.NodeInfo
membershipChanged []raftio.NodeInfo
snapshotCreated []raftio.SnapshotInfo
snapshotRecovered []raftio.SnapshotInfo
snapshotReceived []raftio.SnapshotInfo
sendSnapshotStarted []raftio.SnapshotInfo
sendSnapshotCompleted []raftio.SnapshotInfo
snapshotCompacted []raftio.SnapshotInfo
logCompacted []raftio.EntryInfo
logdbCompacted []raftio.EntryInfo
connectionEstablished uint64
}
func copyNodeInfo(info []raftio.NodeInfo) []raftio.NodeInfo {
return append([]raftio.NodeInfo{}, info...)
}
func (t *testSysEventListener) NodeHostShuttingDown() {
t.mu.Lock()
defer t.mu.Unlock()
t.nodeHostShuttingdown++
}
func (t *testSysEventListener) NodeReady(info raftio.NodeInfo) {
t.mu.Lock()
defer t.mu.Unlock()
t.nodeReady = append(t.nodeReady, info)
}
func (t *testSysEventListener) getNodeReady() []raftio.NodeInfo {
t.mu.Lock()
defer t.mu.Unlock()
return copyNodeInfo(t.nodeReady)
}
func (t *testSysEventListener) NodeUnloaded(info raftio.NodeInfo) {
t.mu.Lock()
defer t.mu.Unlock()
t.nodeUnloaded = append(t.nodeUnloaded, info)
}
func (t *testSysEventListener) getNodeUnloaded() []raftio.NodeInfo {
t.mu.Lock()
defer t.mu.Unlock()
return copyNodeInfo(t.nodeUnloaded)
}
func (t *testSysEventListener) MembershipChanged(info raftio.NodeInfo) {
t.mu.Lock()
defer t.mu.Unlock()
t.membershipChanged = append(t.membershipChanged, info)
}
func (t *testSysEventListener) getMembershipChanged() []raftio.NodeInfo {
t.mu.Lock()
defer t.mu.Unlock()
return copyNodeInfo(t.membershipChanged)
}
func (t *testSysEventListener) ConnectionEstablished(info raftio.ConnectionInfo) {
t.mu.Lock()
defer t.mu.Unlock()
t.connectionEstablished++
}
func (t *testSysEventListener) getConnectionEstablished() uint64 {
t.mu.Lock()
defer t.mu.Unlock()
return t.connectionEstablished
}
func (t *testSysEventListener) ConnectionFailed(info raftio.ConnectionInfo) {}
func copySnapshotInfo(info []raftio.SnapshotInfo) []raftio.SnapshotInfo {
return append([]raftio.SnapshotInfo{}, info...)
}
func (t *testSysEventListener) SendSnapshotStarted(info raftio.SnapshotInfo) {
t.mu.Lock()
defer t.mu.Unlock()
t.sendSnapshotStarted = append(t.sendSnapshotStarted, info)
}
func (t *testSysEventListener) getSendSnapshotStarted() []raftio.SnapshotInfo {
t.mu.Lock()
defer t.mu.Unlock()
return copySnapshotInfo(t.sendSnapshotStarted)
}
func (t *testSysEventListener) SendSnapshotCompleted(info raftio.SnapshotInfo) {
t.mu.Lock()
defer t.mu.Unlock()
t.sendSnapshotCompleted = append(t.sendSnapshotCompleted, info)
}
func (t *testSysEventListener) getSendSnapshotCompleted() []raftio.SnapshotInfo {
t.mu.Lock()
defer t.mu.Unlock()
return copySnapshotInfo(t.sendSnapshotCompleted)
}
func (t *testSysEventListener) SendSnapshotAborted(info raftio.SnapshotInfo) {}
func (t *testSysEventListener) SnapshotReceived(info raftio.SnapshotInfo) {
t.mu.Lock()
defer t.mu.Unlock()
t.snapshotReceived = append(t.snapshotReceived, info)
}
func (t *testSysEventListener) getSnapshotReceived() []raftio.SnapshotInfo {
t.mu.Lock()
defer t.mu.Unlock()
return copySnapshotInfo(t.snapshotReceived)
}
func (t *testSysEventListener) SnapshotRecovered(info raftio.SnapshotInfo) {
t.mu.Lock()
defer t.mu.Unlock()
t.snapshotRecovered = append(t.snapshotRecovered, info)
}
func (t *testSysEventListener) getSnapshotRecovered() []raftio.SnapshotInfo {
t.mu.Lock()
defer t.mu.Unlock()
return copySnapshotInfo(t.snapshotRecovered)
}
func (t *testSysEventListener) SnapshotCreated(info raftio.SnapshotInfo) {
t.mu.Lock()
defer t.mu.Unlock()
t.snapshotCreated = append(t.snapshotCreated, info)
}
func (t *testSysEventListener) getSnapshotCreated() []raftio.SnapshotInfo {
t.mu.Lock()
defer t.mu.Unlock()
return copySnapshotInfo(t.snapshotCreated)
}
func (t *testSysEventListener) SnapshotCompacted(info raftio.SnapshotInfo) {
t.mu.Lock()
defer t.mu.Unlock()
t.snapshotCompacted = append(t.snapshotCompacted, info)
}
func copyEntryInfo(info []raftio.EntryInfo) []raftio.EntryInfo {
return append([]raftio.EntryInfo{}, info...)
}
func (t *testSysEventListener) LogCompacted(info raftio.EntryInfo) {
t.mu.Lock()
defer t.mu.Unlock()
t.logCompacted = append(t.logCompacted, info)
}
func (t *testSysEventListener) getLogCompacted() []raftio.EntryInfo {
t.mu.Lock()
defer t.mu.Unlock()
return copyEntryInfo(t.logCompacted)
}
func (t *testSysEventListener) LogDBCompacted(info raftio.EntryInfo) {
t.mu.Lock()
defer t.mu.Unlock()
t.logdbCompacted = append(t.logdbCompacted, info)
}
type TimeoutStateMachine struct {
updateDelay uint64
lookupDelay uint64
snapshotDelay uint64
closed bool
}
func (t *TimeoutStateMachine) Update(date []byte) (sm.Result, error) {
if t.updateDelay > 0 {
time.Sleep(time.Duration(t.updateDelay) * time.Millisecond)
}
return sm.Result{}, nil
}
func (t *TimeoutStateMachine) Lookup(data interface{}) (interface{}, error) {
if t.lookupDelay > 0 {
time.Sleep(time.Duration(t.lookupDelay) * time.Millisecond)
}
return data, nil
}
func (t *TimeoutStateMachine) SaveSnapshot(w io.Writer,
fc sm.ISnapshotFileCollection, stopc <-chan struct{}) error {
if t.snapshotDelay > 0 {
time.Sleep(time.Duration(t.snapshotDelay) * time.Millisecond)
}
_, err := w.Write([]byte("done"))
return err
}
func (t *TimeoutStateMachine) RecoverFromSnapshot(r io.Reader,
fc []sm.SnapshotFile, stopc <-chan struct{}) error {
return nil
}
func (t *TimeoutStateMachine) Close() error {
t.closed = true
return nil
}
type noopLogDB struct {
}
func (n *noopLogDB) BinaryFormat() uint32 { return 0 }
func (n *noopLogDB) Name() string { return "noopLogDB" }
func (n *noopLogDB) Close() error { return nil }
func (n *noopLogDB) HasNodeInfo(clusterID uint64, nodeID uint64) (bool, error) { return true, nil }
func (n *noopLogDB) CreateNodeInfo(clusterID uint64, nodeID uint64) error { return nil }
func (n *noopLogDB) ListNodeInfo() ([]raftio.NodeInfo, error) { return nil, nil }
func (n *noopLogDB) SaveBootstrapInfo(clusterID uint64, nodeID uint64, bs pb.Bootstrap) error {
return nil
}
func (n *noopLogDB) GetBootstrapInfo(clusterID uint64, nodeID uint64) (pb.Bootstrap, error) {
return pb.Bootstrap{}, nil
}
func (n *noopLogDB) SaveRaftState(updates []pb.Update, workerID uint64) error { return nil }
func (n *noopLogDB) IterateEntries(ents []pb.Entry,
size uint64, clusterID uint64, nodeID uint64, low uint64,
high uint64, maxSize uint64) ([]pb.Entry, uint64, error) {
return nil, 0, nil
}
func (n *noopLogDB) ReadRaftState(clusterID uint64, nodeID uint64,
lastIndex uint64) (raftio.RaftState, error) {
return raftio.RaftState{}, nil
}
func (n *noopLogDB) RemoveEntriesTo(clusterID uint64, nodeID uint64, index uint64) error { return nil }
func (n *noopLogDB) CompactEntriesTo(clusterID uint64,
nodeID uint64, index uint64) (<-chan struct{}, error) {
return nil, nil
}
func (n *noopLogDB) RemoveNodeData(clusterID uint64, nodeID uint64) error { return nil }
func (n *noopLogDB) SaveSnapshots([]pb.Update) error { return nil }
func (n *noopLogDB) GetSnapshot(clusterID uint64, nodeID uint64) (pb.Snapshot, error) {
return pb.Snapshot{}, nil
}
func (n *noopLogDB) ImportSnapshot(snapshot pb.Snapshot, nodeID uint64) error {
return nil
}
type updateConfig func(*config.Config) *config.Config
type updateNodeHostConfig func(*config.NodeHostConfig) *config.NodeHostConfig
type testFunc func(*NodeHost)
type beforeTest func()
type afterTest func(*NodeHost)
type testOption struct {
updateConfig updateConfig
updateNodeHostConfig updateNodeHostConfig
tf testFunc
rf testFunc
bt beforeTest
at afterTest
defaultTestNode bool
fakeDiskNode bool
fakeDiskInitialIndex uint64
createSM sm.CreateStateMachineFunc
createConcurrentSM sm.CreateConcurrentStateMachineFunc
createOnDiskSM sm.CreateOnDiskStateMachineFunc
join bool
newNodeHostToFail bool
restartNodeHost bool
noElection bool
compressed bool
fsErrorInjection bool
}
func createSingleTestNode(t *testing.T, to *testOption, nh *NodeHost) {
if to.createSM == nil &&
to.createConcurrentSM == nil &&
to.createOnDiskSM == nil && !to.defaultTestNode && !to.fakeDiskNode {
return
}
if to.defaultTestNode {
to.createSM = func(uint64, uint64) sm.IStateMachine {
return &PST{}
}
}
if to.fakeDiskNode {
to.createOnDiskSM = func(uint64, uint64) sm.IOnDiskStateMachine {
return tests.NewFakeDiskSM(to.fakeDiskInitialIndex)
}
}
cfg := getTestConfig()
if to.updateConfig != nil {
cfg = to.updateConfig(cfg)
}
if to.compressed {
cfg.SnapshotCompressionType = config.Snappy
cfg.EntryCompressionType = config.Snappy
}
peers := make(map[uint64]string)
if !to.join {
if !nh.nhConfig.AddressByNodeHostID {
peers[cfg.ClusterID] = nh.RaftAddress()
} else {
peers[cfg.ClusterID] = nh.ID()
}
}
if to.createSM != nil {
if err := nh.StartCluster(peers, to.join, to.createSM, *cfg); err != nil {
t.Fatalf("start cluster failed: %v", err)
}
} else if to.createConcurrentSM != nil {
if err := nh.StartConcurrentCluster(peers,
to.join, to.createConcurrentSM, *cfg); err != nil {
t.Fatalf("start concurrent cluster failed: %v", err)
}
} else if to.createOnDiskSM != nil {
if err := nh.StartOnDiskCluster(peers,
to.join, to.createOnDiskSM, *cfg); err != nil {
t.Fatalf("start on disk cluster fail: %v", err)
}
} else {
t.Fatalf("?!?")
}
}
func runNodeHostTest(t *testing.T, to *testOption, fs vfs.IFS) {
func() {
if !to.fsErrorInjection {
defer leaktest.AfterTest(t)()
}
// FIXME:
// the following RemoveAll call will fail on windows after running error
// injection tests as some pebble log files are not closed
_ = fs.RemoveAll(singleNodeHostTestDir)
if to.bt != nil {
to.bt()
}
nhc := getTestNodeHostConfig(fs)
if to.updateNodeHostConfig != nil {
nhc = to.updateNodeHostConfig(nhc)
}
nh, err := NewNodeHost(*nhc)
if err != nil && !to.newNodeHostToFail {
t.Fatalf("failed to create nodehost: %v", err)
}
if err != nil && to.newNodeHostToFail {
return
}
if err == nil && to.newNodeHostToFail {
t.Fatalf("NewNodeHost didn't fail as expected")
}
if !to.restartNodeHost {
defer func() {
func() {
defer func() {
if r := recover(); r != nil {
if to.fsErrorInjection {
return
}
panicNow(r.(error))
}
}()
nh.Close()
}()
if to.at != nil {
to.at(nh)
}
}()
}
createSingleTestNode(t, to, nh)
if !to.noElection {
waitForLeaderToBeElected(t, nh, 1)
}
if to.tf != nil {
to.tf(nh)
}
if to.restartNodeHost {
nh.Close()
nh, err = NewNodeHost(*nhc)
if err != nil {
t.Fatalf("failed to create nodehost: %v", err)
}
defer func() {
nh.Close()
if to.at != nil {
to.at(nh)
}
}()
createSingleTestNode(t, to, nh)
if to.rf != nil {
to.rf(nh)
}
}
}()
reportLeakedFD(fs, t)
}
func createProposalsToTriggerSnapshot(t *testing.T,
nh *NodeHost, count uint64, timeoutExpected bool) {
for i := uint64(0); i < count; i++ {
pto := lpto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
cs, err := nh.SyncGetSession(ctx, 1)
if err != nil {
if err == ErrTimeout {
cancel()
return
}
t.Fatalf("unexpected error %v", err)
}
//time.Sleep(100 * time.Millisecond)
if err := nh.SyncCloseSession(ctx, cs); err != nil {
if err == ErrTimeout {
cancel()
return
}
t.Fatalf("failed to close client session %v", err)
}
cancel()
}
if timeoutExpected {
t.Fatalf("failed to trigger ")
}
}
func runNodeHostTestDC(t *testing.T, f func(), removeDir bool, fs vfs.IFS) {
defer leaktest.AfterTest(t)()
defer func() {
if err := fs.RemoveAll(singleNodeHostTestDir); err != nil {
t.Fatalf("%v", err)
}
}()
if removeDir {
if err := fs.RemoveAll(singleNodeHostTestDir); err != nil {
t.Fatalf("%v", err)
}
}
f()
reportLeakedFD(fs, t)
}
type testLogDBFactory struct {
ldb raftio.ILogDB
}
func (t *testLogDBFactory) Create(cfg config.NodeHostConfig,
cb config.LogDBCallback, dirs []string, wals []string) (raftio.ILogDB, error) {
return t.ldb, nil
}
func (t *testLogDBFactory) Name() string {
return t.ldb.Name()
}
func TestLogDBCanBeExtended(t *testing.T) {
fs := vfs.GetTestFS()
ldb := &noopLogDB{}
to := &testOption{
updateNodeHostConfig: func(nhc *config.NodeHostConfig) *config.NodeHostConfig {
nhc.Expert.LogDBFactory = &testLogDBFactory{ldb: ldb}
return nhc
},
tf: func(nh *NodeHost) {
if nh.mu.logdb.Name() != ldb.Name() {
t.Errorf("logdb type name %s, expect %s", nh.mu.logdb.Name(), ldb.Name())
}
},
noElection: true,
}
runNodeHostTest(t, to, fs)
}
func TestTCPTransportIsUsedByDefault(t *testing.T) {
if vfs.GetTestFS() != vfs.DefaultFS {
t.Skip("memfs test mode, skipped")
}
fs := vfs.GetTestFS()
to := &testOption{
tf: func(nh *NodeHost) {
tt := nh.transport.(*transport.Transport)
if tt.GetTrans().Name() != transport.TCPTransportName {
t.Errorf("transport type name %s, expect %s",
tt.GetTrans().Name(), transport.TCPTransportName)
}
},
noElection: true,
}
runNodeHostTest(t, to, fs)
}
type noopTransportFactory struct{}
func (noopTransportFactory) Create(cfg config.NodeHostConfig,
h raftio.MessageHandler, ch raftio.ChunkHandler) raftio.ITransport {
return transport.NewNOOPTransport(cfg, h, ch)
}
func (noopTransportFactory) Validate(string) bool {
return true
}
func TestTransportFactoryIsStillHonored(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
updateNodeHostConfig: func(nhc *config.NodeHostConfig) *config.NodeHostConfig {
nhc.Expert.TransportFactory = noopTransportFactory{}
return nhc
},
tf: func(nh *NodeHost) {
tt := nh.transport.(*transport.Transport)
if tt.GetTrans().Name() != transport.NOOPRaftName {
t.Errorf("transport type name %s, expect %s",
tt.GetTrans().Name(), transport.NOOPRaftName)
}
},
noElection: true,
}
runNodeHostTest(t, to, fs)
}
func TestTransportFactoryCanBeSet(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
updateNodeHostConfig: func(nhc *config.NodeHostConfig) *config.NodeHostConfig {
nhc.Expert.TransportFactory = &transport.NOOPTransportFactory{}
return nhc
},
tf: func(nh *NodeHost) {
tt := nh.transport.(*transport.Transport)
if tt.GetTrans().Name() != transport.NOOPRaftName {
t.Errorf("transport type name %s, expect %s",
tt.GetTrans().Name(), transport.NOOPRaftName)
}
},
noElection: true,
}
runNodeHostTest(t, to, fs)
}
type validatorTestModule struct {
}
func (tm *validatorTestModule) Create(nhConfig config.NodeHostConfig,
handler raftio.MessageHandler,
chunkHandler raftio.ChunkHandler) raftio.ITransport {
return transport.NewNOOPTransport(nhConfig, handler, chunkHandler)
}
func (tm *validatorTestModule) Validate(addr string) bool {
return addr == "localhost:12346" || addr == "localhost:26001"
}
func TestAddressValidatorCanBeSet(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
updateNodeHostConfig: func(nhc *config.NodeHostConfig) *config.NodeHostConfig {
nhc.Expert.TransportFactory = &validatorTestModule{}
return nhc
},
tf: func(nh *NodeHost) {
pto := lpto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
err := nh.SyncRequestAddNode(ctx, 1, 100, "localhost:12345", 0)
cancel()
if err != ErrInvalidAddress {
t.Fatalf("failed to return ErrInvalidAddress, %v", err)
}
ctx, cancel = context.WithTimeout(context.Background(), pto)
err = nh.SyncRequestAddNode(ctx, 1, 100, "localhost:12346", 0)
cancel()
if err != nil {
t.Fatalf("failed to add node, %v", err)
}
},
}
runNodeHostTest(t, to, fs)
}
type chanTransportFactory struct{}
func (*chanTransportFactory) Create(nhConfig config.NodeHostConfig,
handler raftio.MessageHandler,
chunkHandler raftio.ChunkHandler) raftio.ITransport {
return chantrans.NewChanTransport(nhConfig, handler, chunkHandler)
}
func (tm *chanTransportFactory) Validate(addr string) bool {
return addr == nodeHostTestAddr1 || addr == nodeHostTestAddr2
}
func TestGossip(t *testing.T) {
testAddressByNodeHostID(t, true, nil)
}
func TestCustomTransportCanUseNodeHostID(t *testing.T) {
factory := &chanTransportFactory{}
testAddressByNodeHostID(t, true, factory)
}
func TestCustomTransportCanGoWithoutNodeHostID(t *testing.T) {
factory := &chanTransportFactory{}
testAddressByNodeHostID(t, false, factory)
}
func testAddressByNodeHostID(t *testing.T,
addressByNodeHostID bool, factory config.TransportFactory) {
fs := vfs.GetTestFS()
datadir1 := fs.PathJoin(singleNodeHostTestDir, "nh1")
datadir2 := fs.PathJoin(singleNodeHostTestDir, "nh2")
os.RemoveAll(singleNodeHostTestDir)
defer os.RemoveAll(singleNodeHostTestDir)
addr1 := nodeHostTestAddr1
addr2 := nodeHostTestAddr2
nhc1 := config.NodeHostConfig{
NodeHostDir: datadir1,
RTTMillisecond: getRTTMillisecond(fs, datadir1),
RaftAddress: addr1,
AddressByNodeHostID: addressByNodeHostID,
Expert: config.ExpertConfig{
FS: fs,
TestGossipProbeInterval: 50 * time.Millisecond,
},
}
if addressByNodeHostID {
nhc1.Gossip = config.GossipConfig{
BindAddress: "127.0.0.1:25001",
AdvertiseAddress: "127.0.0.1:25001",
Seed: []string{"127.0.0.1:25002"},
}
}
nhc2 := config.NodeHostConfig{
NodeHostDir: datadir2,
RTTMillisecond: getRTTMillisecond(fs, datadir2),
RaftAddress: addr2,
AddressByNodeHostID: addressByNodeHostID,
Expert: config.ExpertConfig{
FS: fs,
TestGossipProbeInterval: 50 * time.Millisecond,
},
}
if addressByNodeHostID {
nhc2.Gossip = config.GossipConfig{
BindAddress: "127.0.0.1:25002",
AdvertiseAddress: "127.0.0.1:25002",
Seed: []string{"127.0.0.1:25001"},
}
}
nhid1, err := id.ParseNodeHostID(testNodeHostID1)
if err != nil {
t.Fatalf("failed to parse nhid")
}
nhc1.Expert.TestNodeHostID = nhid1.Value()
nhid2, err := id.ParseNodeHostID(testNodeHostID2)
if err != nil {
t.Fatalf("failed to parse nhid")
}
nhc2.Expert.TestNodeHostID = nhid2.Value()
nhc1.Expert.TransportFactory = factory
nhc2.Expert.TransportFactory = factory
nh1, err := NewNodeHost(nhc1)
if err != nil {
t.Fatalf("failed to create nh, %v", err)
}
defer nh1.Close()
nh2, err := NewNodeHost(nhc2)
if err != nil {
t.Fatalf("failed to create nh2, %v", err)
}
defer nh2.Close()
peers := make(map[uint64]string)
if addressByNodeHostID {
peers[1] = testNodeHostID1
peers[2] = testNodeHostID2
} else {
peers[1] = addr1
peers[2] = addr2
}
createSM := func(uint64, uint64) sm.IStateMachine {
return &PST{}
}
rc := config.Config{
ClusterID: 1,
NodeID: 1,
ElectionRTT: 10,
HeartbeatRTT: 1,
SnapshotEntries: 0,
}
if err := nh1.StartCluster(peers, false, createSM, rc); err != nil {
t.Fatalf("failed to start node %v", err)
}
rc.NodeID = 2
if err := nh2.StartCluster(peers, false, createSM, rc); err != nil {
t.Fatalf("failed to start node %v", err)
}
waitForLeaderToBeElected(t, nh1, 1)
waitForLeaderToBeElected(t, nh2, 1)
pto := lpto(nh1)
session := nh1.GetNoOPSession(1)
for i := 0; i < 1000; i++ {
ctx, cancel := context.WithTimeout(context.Background(), pto)
if _, err := nh1.SyncPropose(ctx, session, make([]byte, 0)); err == nil {
cancel()
return
}
cancel()
time.Sleep(100 * time.Millisecond)
}
t.Fatalf("failed to make proposal")
}
func TestNodeHostRegistry(t *testing.T) {
fs := vfs.GetTestFS()
datadir1 := fs.PathJoin(singleNodeHostTestDir, "nh1")
datadir2 := fs.PathJoin(singleNodeHostTestDir, "nh2")
os.RemoveAll(singleNodeHostTestDir)
defer os.RemoveAll(singleNodeHostTestDir)
addr1 := nodeHostTestAddr1
addr2 := nodeHostTestAddr2
nhc1 := config.NodeHostConfig{
NodeHostDir: datadir1,
RTTMillisecond: getRTTMillisecond(fs, datadir1),
RaftAddress: addr1,
AddressByNodeHostID: true,
Expert: config.ExpertConfig{
FS: fs,
TestGossipProbeInterval: 50 * time.Millisecond,
},
}
nhc1.Gossip = config.GossipConfig{
BindAddress: "127.0.0.1:25001",
AdvertiseAddress: "127.0.0.1:25001",
Seed: []string{"127.0.0.1:25002"},
}
nhc2 := config.NodeHostConfig{
NodeHostDir: datadir2,
RTTMillisecond: getRTTMillisecond(fs, datadir2),
RaftAddress: addr2,
AddressByNodeHostID: true,
Expert: config.ExpertConfig{
FS: fs,
TestGossipProbeInterval: 50 * time.Millisecond,
},
}
nhc2.Gossip = config.GossipConfig{
BindAddress: "127.0.0.1:25002",
AdvertiseAddress: "127.0.0.1:25002",
Seed: []string{"127.0.0.1:25001"},
}
nhid1, err := id.ParseNodeHostID(testNodeHostID1)
if err != nil {
t.Fatalf("failed to parse nhid")
}
nhc1.Expert.TestNodeHostID = nhid1.Value()
nhc1.Gossip.Meta = []byte(testNodeHostID1)
nhid2, err := id.ParseNodeHostID(testNodeHostID2)
if err != nil {
t.Fatalf("failed to parse nhid")
}
nhc2.Expert.TestNodeHostID = nhid2.Value()
nhc2.Gossip.Meta = []byte(testNodeHostID2)
nh1, err := NewNodeHost(nhc1)
if err != nil {
t.Fatalf("failed to create nh, %v", err)
}
defer nh1.Close()
nh2, err := NewNodeHost(nhc2)
if err != nil {
t.Fatalf("failed to create nh2, %v", err)
}
defer nh2.Close()
peers := make(map[uint64]string)
peers[1] = testNodeHostID1
createSM := func(uint64, uint64) sm.IStateMachine {
return &PST{}
}
rc := config.Config{
ClusterID: 1,
NodeID: 1,
ElectionRTT: 10,
HeartbeatRTT: 1,
SnapshotEntries: 0,
}
if err := nh1.StartCluster(peers, false, createSM, rc); err != nil {
t.Fatalf("failed to start node %v", err)
}
rc.ClusterID = 2
peers[1] = testNodeHostID2
if err := nh2.StartCluster(peers, false, createSM, rc); err != nil {
t.Fatalf("failed to start node %v", err)
}
rc.ClusterID = 3
if err := nh2.StartCluster(peers, false, createSM, rc); err != nil {
t.Fatalf("failed to start node %v", err)
}
waitForLeaderToBeElected(t, nh1, 1)
waitForLeaderToBeElected(t, nh2, 2)
waitForLeaderToBeElected(t, nh2, 3)
good := false
for i := 0; i < 1000; i++ {
r1, ok := nh1.GetNodeHostRegistry()
assert.True(t, ok)
r2, ok := nh2.GetNodeHostRegistry()
assert.True(t, ok)
if r1.NumOfClusters() != 3 || r2.NumOfClusters() != 3 {
time.Sleep(10 * time.Millisecond)
} else {
good = true
break
}
}
if !good {
t.Fatalf("registry failed to report the expected num of clusters")
}
rc.ClusterID = 100
if err := nh2.StartCluster(peers, false, createSM, rc); err != nil {
t.Fatalf("failed to start node %v", err)
}
waitForLeaderToBeElected(t, nh2, 100)
for i := 0; i < 1000; i++ {
r1, ok := nh1.GetNodeHostRegistry()
assert.True(t, ok)
r2, ok := nh2.GetNodeHostRegistry()
assert.True(t, ok)
if r1.NumOfClusters() != 4 || r2.NumOfClusters() != 4 {
time.Sleep(10 * time.Millisecond)
} else {
v1, ok := r1.GetMeta(testNodeHostID1)
assert.True(t, ok)
assert.Equal(t, testNodeHostID1, string(v1))
v2, ok := r1.GetMeta(testNodeHostID2)
assert.True(t, ok)
assert.Equal(t, testNodeHostID2, string(v2))
return
}
}
t.Fatalf("failed to report the expected num of clusters")
}
func TestGossipCanHandleDynamicRaftAddress(t *testing.T) {
fs := vfs.GetTestFS()
datadir1 := fs.PathJoin(singleNodeHostTestDir, "nh1")
datadir2 := fs.PathJoin(singleNodeHostTestDir, "nh2")
os.RemoveAll(singleNodeHostTestDir)
defer os.RemoveAll(singleNodeHostTestDir)
addr1 := nodeHostTestAddr1
addr2 := nodeHostTestAddr2
nhc1 := config.NodeHostConfig{
NodeHostDir: datadir1,
RTTMillisecond: getRTTMillisecond(fs, datadir1),
RaftAddress: addr1,
AddressByNodeHostID: true,
Expert: config.ExpertConfig{
FS: fs,
TestGossipProbeInterval: 50 * time.Millisecond,
},
}
nhc2 := config.NodeHostConfig{
NodeHostDir: datadir2,
RTTMillisecond: getRTTMillisecond(fs, datadir2),
RaftAddress: addr2,
AddressByNodeHostID: true,
Expert: config.ExpertConfig{
FS: fs,
TestGossipProbeInterval: 50 * time.Millisecond,
},
}
nhid1, err := id.ParseNodeHostID(testNodeHostID1)
if err != nil {
t.Fatalf("failed to parse nhid")
}
nhc1.Expert.TestNodeHostID = nhid1.Value()
nhid2, err := id.ParseNodeHostID(testNodeHostID2)
if err != nil {
t.Fatalf("failed to parse nhid")
}
nhc2.Expert.TestNodeHostID = nhid2.Value()
nhc1.Gossip = config.GossipConfig{
BindAddress: "127.0.0.1:25001",
AdvertiseAddress: "127.0.0.1:25001",
Seed: []string{"127.0.0.1:25002"},
}
nhc2.Gossip = config.GossipConfig{
BindAddress: "127.0.0.1:25002",
AdvertiseAddress: "127.0.0.1:25002",
Seed: []string{"127.0.0.1:25001"},
}
nh1, err := NewNodeHost(nhc1)
if err != nil {
t.Fatalf("failed to create nh, %v", err)
}
defer nh1.Close()
nh2, err := NewNodeHost(nhc2)
if err != nil {
t.Fatalf("failed to create nh2, %v", err)
}
nh2NodeHostID := nh2.ID()
peers := make(map[uint64]string)
peers[1] = testNodeHostID1
peers[2] = testNodeHostID2
createSM := func(uint64, uint64) sm.IStateMachine {
return &PST{}
}
rc := config.Config{
ClusterID: 1,
NodeID: 1,
ElectionRTT: 3,
HeartbeatRTT: 1,
SnapshotEntries: 0,
}
if err := nh1.StartCluster(peers, false, createSM, rc); err != nil {
t.Fatalf("failed to start node %v", err)
}
rc.NodeID = 2
if err := nh2.StartCluster(peers, false, createSM, rc); err != nil {
t.Fatalf("failed to start node %v", err)
}
waitForLeaderToBeElected(t, nh1, 1)
waitForLeaderToBeElected(t, nh2, 1)
pto := lpto(nh1)
session := nh1.GetNoOPSession(1)
testProposal := func() {
done := false
for i := 0; i < 100; i++ {
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh1.SyncPropose(ctx, session, make([]byte, 0))
cancel()
if err != nil {
time.Sleep(100 * time.Millisecond)
continue
}
done = true
break
}
if !done {
t.Fatalf("failed to make proposal")
}
}
testProposal()
nh2.Close()
nhc2.RaftAddress = nodeHostTestAddr3
nh2, err = NewNodeHost(nhc2)
if err != nil {
t.Fatalf("failed to restart nh2, %v", err)
}
defer nh2.Close()
if nh2.ID() != nh2NodeHostID {
t.Fatalf("NodeHostID changed, got %s, want %s", nh2.ID(), nh2NodeHostID)
}
if err := nh2.StartCluster(peers, false, createSM, rc); err != nil {
t.Fatalf("failed to start node %v", err)
}
waitForLeaderToBeElected(t, nh2, 1)
testProposal()
}
func TestNewNodeHostReturnErrorOnInvalidConfig(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
updateNodeHostConfig: func(nhc *config.NodeHostConfig) *config.NodeHostConfig {
nhc.RaftAddress = "12345"
if err := nhc.Validate(); err == nil {
t.Fatalf("config is not considered as invalid")
}
return nhc
},
newNodeHostToFail: true,
}
runNodeHostTest(t, to, fs)
}
func TestDeploymentIDCanBeSetUsingNodeHostConfig(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
updateNodeHostConfig: func(nhc *config.NodeHostConfig) *config.NodeHostConfig {
nhc.DeploymentID = 1000
return nhc
},
tf: func(nh *NodeHost) {
nhc := nh.NodeHostConfig()
if did := nhc.GetDeploymentID(); did != 1000 {
t.Errorf("unexpected did: %d", did)
}
},
noElection: true,
}
runNodeHostTest(t, to, fs)
}
var (
singleNodeHostTestAddr = fmt.Sprintf("localhost:%d", getTestPort())
nodeHostTestAddr1 = fmt.Sprintf("localhost:%d", getTestPort())
nodeHostTestAddr2 = fmt.Sprintf("localhost:%d", getTestPort()+1)
nodeHostTestAddr3 = fmt.Sprintf("localhost:%d", getTestPort()+2)
singleNodeHostTestDir = "single_nodehost_test_dir_safe_to_delete"
)
type PST struct {
mu sync.Mutex
stopped bool
saved bool
restored bool
slowSave bool
}
func (n *PST) setRestored(v bool) {
n.mu.Lock()
defer n.mu.Unlock()
n.restored = v
}
func (n *PST) getRestored() bool {
n.mu.Lock()
defer n.mu.Unlock()
return n.restored
}
func (n *PST) Close() error { return nil }
// Lookup locally looks up the data.
func (n *PST) Lookup(key interface{}) (interface{}, error) {
return make([]byte, 1), nil
}
// Update updates the object.
func (n *PST) Update(data []byte) (sm.Result, error) {
return sm.Result{Value: uint64(len(data))}, nil
}
// SaveSnapshot saves the state of the object to the provided io.Writer object.
func (n *PST) SaveSnapshot(w io.Writer,
fileCollection sm.ISnapshotFileCollection,
done <-chan struct{}) error {
n.saved = true
if !n.slowSave {
_, err := w.Write([]byte("random-data"))
if err != nil {
panic(err)
}
return nil
}
for {
time.Sleep(10 * time.Millisecond)
select {
case <-done:
n.stopped = true
return sm.ErrSnapshotStopped
default:
}
}
}
// RecoverFromSnapshot recovers the object from the snapshot specified by the
// io.Reader object.
func (n *PST) RecoverFromSnapshot(r io.Reader,
files []sm.SnapshotFile, done <-chan struct{}) error {
n.setRestored(true)
for {
time.Sleep(10 * time.Millisecond)
select {
case <-done:
n.stopped = true
return sm.ErrSnapshotStopped
default:
}
}
}
// GetHash returns a uint64 value representing the current state of the object.
func (n *PST) GetHash() (uint64, error) {
return 0, nil
}
func createConcurrentTestNodeHost(addr string,
datadir string, snapshotEntry uint64,
concurrent bool, fs vfs.IFS) (*NodeHost, error) {
// config for raft
rc := config.Config{
NodeID: uint64(1),
ElectionRTT: 3,
HeartbeatRTT: 1,
CheckQuorum: true,
SnapshotEntries: snapshotEntry,
CompactionOverhead: 100,
}
peers := make(map[uint64]string)
peers[1] = addr
nhc := config.NodeHostConfig{
WALDir: datadir,
NodeHostDir: datadir,
RTTMillisecond: getRTTMillisecond(fs, datadir),
RaftAddress: peers[1],
Expert: getTestExpertConfig(fs),
}
nh, err := NewNodeHost(nhc)
if err != nil {
return nil, err
}
var newConcurrentSM func(uint64, uint64) sm.IConcurrentStateMachine
var newSM func(uint64, uint64) sm.IStateMachine
if snapshotEntry == 0 {
newConcurrentSM = func(uint64, uint64) sm.IConcurrentStateMachine {
return &tests.ConcurrentUpdate{}
}
newSM = func(uint64, uint64) sm.IStateMachine {
return &tests.TestUpdate{}
}
} else {
newConcurrentSM = func(uint64, uint64) sm.IConcurrentStateMachine {
return &tests.ConcurrentSnapshot{}
}
newSM = func(uint64, uint64) sm.IStateMachine {
return &tests.TestSnapshot{}
}
}
rc.ClusterID = 1 + nhc.Expert.Engine.ApplyShards
if err := nh.StartConcurrentCluster(peers, false, newConcurrentSM, rc); err != nil {
return nil, err
}
rc.ClusterID = 1
if err := nh.StartCluster(peers, false, newSM, rc); err != nil {
return nil, err
}
return nh, nil
}
func singleConcurrentNodeHostTest(t *testing.T,
tf func(t *testing.T, nh *NodeHost),
snapshotEntry uint64, concurrent bool, fs vfs.IFS) {
defer func() {
if err := fs.RemoveAll(singleNodeHostTestDir); err != nil {
t.Fatalf("%v", err)
}
}()
func() {
defer leaktest.AfterTest(t)()
if err := fs.RemoveAll(singleNodeHostTestDir); err != nil {
t.Fatalf("%v", err)
}
nh, err := createConcurrentTestNodeHost(singleNodeHostTestAddr,
singleNodeHostTestDir, snapshotEntry, concurrent, fs)
if err != nil {
t.Fatalf("failed to create nodehost %v", err)
}
defer func() {
nh.Close()
}()
nhc := nh.NodeHostConfig()
waitForLeaderToBeElected(t, nh, 1)
waitForLeaderToBeElected(t, nh, 1+nhc.Expert.Engine.ApplyShards)
tf(t, nh)
}()
reportLeakedFD(fs, t)
}
func twoFakeDiskNodeHostTest(t *testing.T,
tf func(t *testing.T, nh1 *NodeHost, nh2 *NodeHost), fs vfs.IFS) {
defer func() {
if err := fs.RemoveAll(singleNodeHostTestDir); err != nil {
t.Fatalf("%v", err)
}
}()
func() {
defer leaktest.AfterTest(t)()
nh1dir := fs.PathJoin(singleNodeHostTestDir, "nh1")
nh2dir := fs.PathJoin(singleNodeHostTestDir, "nh2")
if err := fs.RemoveAll(singleNodeHostTestDir); err != nil {
t.Fatalf("%v", err)
}
nh1, nh2, err := createFakeDiskTwoTestNodeHosts(nodeHostTestAddr1,
nodeHostTestAddr2, nh1dir, nh2dir, fs)
if err != nil {
t.Fatalf("failed to create nodehost %v", err)
}
defer func() {
nh1.Close()
nh2.Close()
}()
tf(t, nh1, nh2)
}()
reportLeakedFD(fs, t)
}
func createFakeDiskTwoTestNodeHosts(addr1 string, addr2 string,
datadir1 string, datadir2 string, fs vfs.IFS) (*NodeHost, *NodeHost, error) {
peers := make(map[uint64]string)
peers[1] = addr1
nhc1 := config.NodeHostConfig{
WALDir: datadir1,
NodeHostDir: datadir1,
RTTMillisecond: getRTTMillisecond(fs, datadir1),
RaftAddress: addr1,
SystemEventListener: &testSysEventListener{},
Expert: getTestExpertConfig(fs),
}
nhc2 := config.NodeHostConfig{
WALDir: datadir2,
NodeHostDir: datadir2,
RTTMillisecond: getRTTMillisecond(fs, datadir2),
RaftAddress: addr2,
SystemEventListener: &testSysEventListener{},
Expert: getTestExpertConfig(fs),
}
nh1, err := NewNodeHost(nhc1)
if err != nil {
return nil, nil, err
}
nh2, err := NewNodeHost(nhc2)
if err != nil {
return nil, nil, err
}
return nh1, nh2, nil
}
func createRateLimitedTwoTestNodeHosts(addr1 string, addr2 string,
datadir1 string, datadir2 string,
fs vfs.IFS) (*NodeHost, *NodeHost, *tests.NoOP, *tests.NoOP, error) {
rc := config.Config{
ClusterID: 1,
ElectionRTT: 3,
HeartbeatRTT: 1,
CheckQuorum: true,
MaxInMemLogSize: 1024 * 3,
}
peers := make(map[uint64]string)
peers[1] = addr1
peers[2] = addr2
nhc1 := config.NodeHostConfig{
WALDir: datadir1,
NodeHostDir: datadir1,
RTTMillisecond: getRTTMillisecond(fs, datadir1),
RaftAddress: peers[1],
Expert: getTestExpertConfig(fs),
}
nhc2 := config.NodeHostConfig{
WALDir: datadir2,
NodeHostDir: datadir2,
RTTMillisecond: getRTTMillisecond(fs, datadir2),
RaftAddress: peers[2],
Expert: getTestExpertConfig(fs),
}
nh1, err := NewNodeHost(nhc1)
if err != nil {
return nil, nil, nil, nil, err
}
nh2, err := NewNodeHost(nhc2)
if err != nil {
return nil, nil, nil, nil, err
}
sm1 := &tests.NoOP{}
sm2 := &tests.NoOP{}
newRSM1 := func(clusterID uint64, nodeID uint64) sm.IStateMachine {
return sm1
}
newRSM2 := func(clusterID uint64, nodeID uint64) sm.IStateMachine {
return sm2
}
rc.NodeID = 1
if err := nh1.StartCluster(peers, false, newRSM1, rc); err != nil {
return nil, nil, nil, nil, err
}
rc.NodeID = 2
if err := nh2.StartCluster(peers, false, newRSM2, rc); err != nil {
return nil, nil, nil, nil, err
}
var leaderNh *NodeHost
var followerNh *NodeHost
for i := 0; i < 200; i++ {
leaderID, ready, err := nh1.GetLeaderID(1)
if err == nil && ready {
if leaderID == 1 {
leaderNh = nh1
followerNh = nh2
sm2.SetSleepTime(nhc1.RTTMillisecond * 10)
} else {
leaderNh = nh2
followerNh = nh1
sm1.SetSleepTime(nhc1.RTTMillisecond * 10)
}
return leaderNh, followerNh, sm1, sm2, nil
}
// wait for leader to be elected
time.Sleep(100 * time.Millisecond)
}
return nil, nil, nil, nil, errors.New("failed to get usable nodehosts")
}
func rateLimitedTwoNodeHostTest(t *testing.T,
tf func(t *testing.T, leaderNh *NodeHost, followerNh *NodeHost,
n1 *tests.NoOP, n2 *tests.NoOP), fs vfs.IFS) {
defer func() {
if err := fs.RemoveAll(singleNodeHostTestDir); err != nil {
t.Fatalf("%v", err)
}
}()
func() {
nh1dir := fs.PathJoin(singleNodeHostTestDir, "nh1")
nh2dir := fs.PathJoin(singleNodeHostTestDir, "nh2")
defer leaktest.AfterTest(t)()
if err := fs.RemoveAll(singleNodeHostTestDir); err != nil {
t.Fatalf("%v", err)
}
nh1, nh2, n1, n2, err := createRateLimitedTwoTestNodeHosts(nodeHostTestAddr1,
nodeHostTestAddr2, nh1dir, nh2dir, fs)
if err != nil {
t.Fatalf("failed to create nodehost2 %v", err)
}
defer func() {
nh1.Close()
nh2.Close()
}()
tf(t, nh1, nh2, n1, n2)
}()
reportLeakedFD(fs, t)
}
func waitForLeaderToBeElected(t *testing.T, nh *NodeHost, clusterID uint64) {
for i := 0; i < 200; i++ {
_, ready, err := nh.GetLeaderID(clusterID)
if err == nil && ready {
return
}
time.Sleep(100 * time.Millisecond)
}
t.Fatalf("failed to elect leader")
}
func TestJoinedClusterCanBeRestartedOrJoinedAgain(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
cfg := getTestConfig()
peers := make(map[uint64]string)
newPST := func(uint64, uint64) sm.IStateMachine { return &PST{} }
if err := nh.StopCluster(1); err != nil {
t.Fatalf("failed to stop the cluster: %v", err)
}
for i := 0; i < 1000; i++ {
err := nh.StartCluster(peers, true, newPST, *cfg)
if err == nil {
return
}
if err == ErrClusterAlreadyExist {
time.Sleep(5 * time.Millisecond)
continue
} else {
t.Fatalf("failed to join the cluster again, %v", err)
}
}
},
join: true,
noElection: true,
}
runNodeHostTest(t, to, fs)
}
func TestCompactionCanBeRequested(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
updateConfig: func(c *config.Config) *config.Config {
c.SnapshotEntries = 10
c.CompactionOverhead = 5
c.DisableAutoCompactions = true
return c
},
tf: func(nh *NodeHost) {
pto := lpto(nh)
session := nh.GetNoOPSession(1)
ctx, cancel := context.WithTimeout(context.Background(), pto)
defer cancel()
_, err := nh.SyncPropose(ctx, session, []byte("test-data"))
if err != nil {
t.Fatalf("failed to make proposal, %v", err)
}
opt := SnapshotOption{
OverrideCompactionOverhead: true,
CompactionOverhead: 0,
}
if _, err := nh.SyncRequestSnapshot(ctx, 1, opt); err != nil {
t.Fatalf("failed to request snapshot %v", err)
}
for i := 0; i < 100; i++ {
op, err := nh.RequestCompaction(1, 1)
if err == ErrRejected {
time.Sleep(100 * time.Millisecond)
continue
}
if err != nil {
t.Fatalf("failed to request compaction %v", err)
}
select {
case <-op.ResultC():
break
case <-ctx.Done():
t.Fatalf("failed to complete the compaction")
}
break
}
if _, err = nh.RequestCompaction(1, 1); err != ErrRejected {
t.Fatalf("not rejected")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestSnapshotCanBeStopped(t *testing.T) {
fs := vfs.GetTestFS()
pst := &PST{slowSave: true}
to := &testOption{
updateConfig: func(c *config.Config) *config.Config {
c.SnapshotEntries = 10
return c
},
createSM: func(clusterID uint64, nodeID uint64) sm.IStateMachine {
return pst
},
tf: func(nh *NodeHost) {
createProposalsToTriggerSnapshot(t, nh, 50, true)
},
at: func(*NodeHost) {
if !pst.saved || !pst.stopped {
t.Errorf("snapshot not stopped")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestRecoverFromSnapshotCanBeStopped(t *testing.T) {
fs := vfs.GetTestFS()
pst := &PST{slowSave: false}
to := &testOption{
updateConfig: func(c *config.Config) *config.Config {
c.SnapshotEntries = 10
return c
},
createSM: func(clusterID uint64, nodeID uint64) sm.IStateMachine {
return pst
},
tf: func(nh *NodeHost) {
createProposalsToTriggerSnapshot(t, nh, 50, false)
},
rf: func(nh *NodeHost) {
wait := 0
for !pst.getRestored() {
time.Sleep(10 * time.Millisecond)
wait++
if wait > 1000 {
break
}
}
},
at: func(*NodeHost) {
wait := 0
for !pst.stopped {
time.Sleep(10 * time.Millisecond)
wait++
if wait > 1000 {
break
}
}
if !pst.getRestored() {
t.Errorf("not restored")
}
if !pst.stopped {
t.Errorf("not stopped")
}
},
restartNodeHost: true,
}
runNodeHostTest(t, to, fs)
}
func TestNodeHostIDIsStatic(t *testing.T) {
fs := vfs.GetTestFS()
id := ""
to := &testOption{
restartNodeHost: true,
noElection: true,
tf: func(nh *NodeHost) {
id = nh.ID()
},
rf: func(nh *NodeHost) {
if nh.ID() != id {
t.Fatalf("NodeHost ID value changed")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestNodeHostIDCanBeSet(t *testing.T) {
fs := vfs.GetTestFS()
nhid := uint64(1234567890)
to := &testOption{
updateNodeHostConfig: func(c *config.NodeHostConfig) *config.NodeHostConfig {
c.Expert.TestNodeHostID = nhid
return c
},
noElection: true,
tf: func(nh *NodeHost) {
nhid, err := id.NewNodeHostID(nhid)
if err != nil {
t.Fatalf("failed to create NodeHostID")
}
if nh.ID() != nhid.String() {
t.Fatalf("failed to set nhid")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestInvalidAddressIsRejected(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
pto := pto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
defer cancel()
err := nh.SyncRequestAddNode(ctx, 1, 100, "a1", 0)
if err != ErrInvalidAddress {
t.Errorf("failed to return ErrInvalidAddress, %v", err)
}
},
}
runNodeHostTest(t, to, fs)
}
func TestInvalidContextDeadlineIsReported(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
pto := lpto(nh)
rctx, rcancel := context.WithTimeout(context.Background(), pto)
rcs, err := nh.SyncGetSession(rctx, 1)
rcancel()
if err != nil {
t.Fatalf("failed to get regular session")
}
// 8 * time.Millisecond is smaller than the smallest possible RTTMillisecond
ctx, cancel := context.WithTimeout(context.Background(), 8*time.Millisecond)
defer cancel()
cs := nh.GetNoOPSession(1)
_, err = nh.SyncPropose(ctx, cs, make([]byte, 1))
if err != ErrTimeoutTooSmall {
t.Errorf("failed to return ErrTimeoutTooSmall, %v", err)
}
_, err = nh.SyncRead(ctx, 1, nil)
if err != ErrTimeoutTooSmall {
t.Errorf("failed to return ErrTimeoutTooSmall, %v", err)
}
_, err = nh.SyncGetSession(ctx, 1)
if err != ErrTimeoutTooSmall {
t.Errorf("failed to return ErrTimeoutTooSmall, %v", err)
}
err = nh.SyncCloseSession(ctx, rcs)
if err != ErrTimeoutTooSmall {
t.Errorf("failed to return ErrTimeoutTooSmall, %v", err)
}
_, err = nh.SyncRequestSnapshot(ctx, 1, DefaultSnapshotOption)
if err != ErrTimeoutTooSmall {
t.Errorf("failed to return ErrTimeoutTooSmall, %v", err)
}
err = nh.SyncRequestDeleteNode(ctx, 1, 1, 0)
if err != ErrTimeoutTooSmall {
t.Errorf("failed to return ErrTimeoutTooSmall, %v", err)
}
err = nh.SyncRequestAddNode(ctx, 1, 100, "a1.com:12345", 0)
if err != ErrTimeoutTooSmall {
t.Errorf("failed to return ErrTimeoutTooSmall, %v", err)
}
err = nh.SyncRequestAddNonVoting(ctx, 1, 100, "a1.com:12345", 0)
if err != ErrTimeoutTooSmall {
t.Errorf("failed to return ErrTimeoutTooSmall, %v", err)
}
},
}
runNodeHostTest(t, to, fs)
}
func TestErrClusterNotFoundCanBeReturned(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
pto := lpto(nh)
_, _, err := nh.GetLeaderID(1234)
if err != ErrClusterNotFound {
t.Errorf("failed to return ErrClusterNotFound, %v", err)
}
_, err = nh.StaleRead(1234, nil)
if err != ErrClusterNotFound {
t.Errorf("failed to return ErrClusterNotFound, %v", err)
}
_, err = nh.RequestSnapshot(1234, DefaultSnapshotOption, pto)
if err != ErrClusterNotFound {
t.Errorf("failed to return ErrClusterNotFound, %v", err)
}
_, err = nh.RequestDeleteNode(1234, 10, 0, pto)
if err != ErrClusterNotFound {
t.Errorf("failed to return ErrClusterNotFound, %v", err)
}
_, err = nh.RequestAddNode(1234, 10, "a1", 0, pto)
if err != ErrClusterNotFound {
t.Errorf("failed to return ErrClusterNotFound, %v", err)
}
_, err = nh.RequestAddNonVoting(1234, 10, "a1", 0, pto)
if err != ErrClusterNotFound {
t.Errorf("failed to return ErrClusterNotFound, %v", err)
}
err = nh.RequestLeaderTransfer(1234, 10)
if err != ErrClusterNotFound {
t.Errorf("failed to return ErrClusterNotFound, %v", err)
}
_, err = nh.GetNodeUser(1234)
if err != ErrClusterNotFound {
t.Errorf("failed to return ErrClusterNotFound, %v", err)
}
cs := nh.GetNoOPSession(1234)
_, err = nh.propose(cs, make([]byte, 1), pto)
if err != ErrClusterNotFound {
t.Errorf("failed to return ErrClusterNotFound, %v", err)
}
_, _, err = nh.readIndex(1234, pto)
if err != ErrClusterNotFound {
t.Errorf("failed to return ErrClusterNotFound, %v", err)
}
err = nh.stopNode(1234, 1, true)
if err != ErrClusterNotFound {
t.Errorf("failed to return ErrClusterNotFound, %v", err)
}
},
}
runNodeHostTest(t, to, fs)
}
func TestGetClusterMembership(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
pto := pto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
defer cancel()
_, err := nh.GetClusterMembership(ctx, 1)
if err != nil {
t.Fatalf("failed to get cluster membership")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestRegisterASessionTwiceWillBeReported(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
pto := lpto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
defer cancel()
cs, err := nh.SyncGetSession(ctx, 1)
if err != nil {
t.Errorf("failed to get client session %v", err)
}
cs.PrepareForRegister()
rs, err := nh.ProposeSession(cs, pto)
if err != nil {
t.Errorf("failed to propose client session %v", err)
}
r := <-rs.ResultC()
if !r.Rejected() {
t.Errorf("failed to reject the cs registeration")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestUnregisterNotRegisterClientSessionWillBeReported(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
pto := lpto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
defer cancel()
cs, err := nh.SyncGetSession(ctx, 1)
if err != nil {
t.Errorf("failed to get client session %v", err)
}
err = nh.SyncCloseSession(ctx, cs)
if err != nil {
t.Errorf("failed to unregister the client session %v", err)
}
err = nh.SyncCloseSession(ctx, cs)
if err != ErrRejected {
t.Errorf("failed to reject the request %v", err)
}
},
}
runNodeHostTest(t, to, fs)
}
func TestSnapshotFilePayloadChecksumIsSaved(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
updateConfig: func(c *config.Config) *config.Config {
c.SnapshotEntries = 10
return c
},
tf: func(nh *NodeHost) {
cs := nh.GetNoOPSession(1)
logdb := nh.mu.logdb
snapshotted := false
var snapshot pb.Snapshot
for i := 0; i < 1000; i++ {
pto := pto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh.SyncPropose(ctx, cs, []byte("test-data"))
cancel()
if err != nil {
continue
}
ss, err := logdb.GetSnapshot(1, 1)
if err != nil {
t.Fatalf("failed to list snapshots")
}
if !pb.IsEmptySnapshot(ss) {
snapshotted = true
snapshot = ss
break
}
}
if !snapshotted {
t.Fatalf("snapshot not triggered")
}
crc, err := rsm.GetV2PayloadChecksum(snapshot.Filepath, fs)
if err != nil {
t.Fatalf("failed to get payload checksum")
}
if !bytes.Equal(crc, snapshot.Checksum) {
t.Errorf("checksum changed")
}
ss := pb.Snapshot{}
if err := fileutil.GetFlagFileContent(fs.PathDir(snapshot.Filepath),
"snapshot.metadata", &ss, fs); err != nil {
t.Fatalf("failed to get content %v", err)
}
if !reflect.DeepEqual(&ss, &snapshot) {
t.Errorf("snapshot record changed")
}
},
}
runNodeHostTest(t, to, fs)
}
func testZombieSnapshotDirWillBeDeletedDuringAddCluster(t *testing.T, dirName string, fs vfs.IFS) {
var z1 string
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
did := nh.nhConfig.GetDeploymentID()
if err := nh.env.CreateSnapshotDir(did, 1, 1); err != nil {
t.Fatalf("failed to get snap dir")
}
snapDir := nh.env.GetSnapshotDir(did, 1, 1)
z1 = fs.PathJoin(snapDir, dirName)
if err := fs.MkdirAll(z1, 0755); err != nil {
t.Fatalf("failed to create dir %v", err)
}
},
rf: func(nh *NodeHost) {
_, err := fs.Stat(z1)
if !vfs.IsNotExist(err) {
t.Fatalf("failed to delete zombie dir")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestZombieSnapshotDirWillBeDeletedDuringAddCluster(t *testing.T) {
fs := vfs.GetTestFS()
defer leaktest.AfterTest(t)()
testZombieSnapshotDirWillBeDeletedDuringAddCluster(t, "snapshot-AB-01.receiving", fs)
testZombieSnapshotDirWillBeDeletedDuringAddCluster(t, "snapshot-AB-10.generating", fs)
}
func TestNodeHostReadIndex(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
pto := lpto(nh)
rs, err := nh.ReadIndex(1, pto)
if err != nil {
t.Errorf("failed to read index %v", err)
}
if rs.node == nil {
t.Fatal("rs.node not set")
}
v := <-rs.ResultC()
if !v.Completed() {
t.Errorf("failed to complete read index")
}
_, err = nh.ReadLocalNode(rs, make([]byte, 128))
if err != nil {
t.Errorf("read local failed %v", err)
}
},
}
runNodeHostTest(t, to, fs)
}
func TestNALookupCanReturnErrNotImplemented(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
pto := lpto(nh)
rs, err := nh.ReadIndex(1, pto)
if err != nil {
t.Errorf("failed to read index %v", err)
}
v := <-rs.ResultC()
if !v.Completed() {
t.Errorf("failed to complete read index")
}
_, err = nh.NAReadLocalNode(rs, make([]byte, 128))
if err != sm.ErrNotImplemented {
t.Errorf("failed to return sm.ErrNotImplemented, got %v", err)
}
},
}
runNodeHostTest(t, to, fs)
}
func TestNodeHostSyncIOAPIs(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
cs := nh.GetNoOPSession(1)
pto := lpto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
defer cancel()
v, err := nh.SyncPropose(ctx, cs, make([]byte, 128))
if err != nil {
t.Errorf("make proposal failed %v", err)
}
if v.Value != 128 {
t.Errorf("unexpected result")
}
data, err := nh.SyncRead(ctx, 1, make([]byte, 128))
if err != nil {
t.Errorf("make linearizable read failed %v", err)
}
if data == nil || len(data.([]byte)) == 0 {
t.Errorf("failed to get result")
}
if err := nh.StopCluster(1); err != nil {
t.Errorf("failed to stop cluster 2 %v", err)
}
listener, ok := nh.events.sys.ul.(*testSysEventListener)
if !ok {
t.Fatalf("failed to get the system event listener")
}
waitNodeInfoEvent(t, listener.getNodeReady, 1)
ni := listener.getNodeReady()[0]
if ni.ClusterID != 1 || ni.NodeID != 1 {
t.Fatalf("incorrect node ready info")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestEntryCompression(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
updateConfig: func(c *config.Config) *config.Config {
c.EntryCompressionType = config.Snappy
return c
},
tf: func(nh *NodeHost) {
cs := nh.GetNoOPSession(1)
pto := pto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
defer cancel()
_, err := nh.SyncPropose(ctx, cs, make([]byte, 1024))
if err != nil {
t.Errorf("make proposal failed %v", err)
}
logdb := nh.mu.logdb
ents, _, err := logdb.IterateEntries(nil, 0, 1, 1, 1, 100, math.MaxUint64)
if err != nil {
t.Errorf("failed to get entries %v", err)
}
hasEncodedEntry := false
for _, e := range ents {
if e.Type == pb.EncodedEntry {
hasEncodedEntry = true
payload, err := rsm.GetPayload(e)
if err != nil {
t.Fatalf("failed to get payload %v", err)
}
if !bytes.Equal(payload, make([]byte, 1024)) {
t.Errorf("payload changed")
}
}
}
if !hasEncodedEntry {
t.Errorf("failed to locate any encoded entry")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestOrderedMembershipChange(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
updateConfig: func(c *config.Config) *config.Config {
c.OrderedConfigChange = true
return c
},
tf: func(nh *NodeHost) {
pto := pto(nh)
{
ctx, cancel := context.WithTimeout(context.Background(), 2*pto)
defer cancel()
m, err := nh.SyncGetClusterMembership(ctx, 1)
if err != nil {
t.Fatalf("get membership failed, %v", err)
}
if err := nh.SyncRequestAddNode(ctx, 1, 2, "localhost:25000", m.ConfigChangeID+1); err == nil {
t.Fatalf("unexpectedly completed")
}
}
{
ctx, cancel := context.WithTimeout(context.Background(), 2*pto)
defer cancel()
m, err := nh.SyncGetClusterMembership(ctx, 1)
if err != nil {
t.Fatalf("get membership failed, %v", err)
}
if err := nh.SyncRequestAddNode(ctx, 1, 2, "localhost:25000", m.ConfigChangeID); err != nil {
t.Fatalf("failed to add node %v", err)
}
}
},
}
runNodeHostTest(t, to, fs)
}
func TestSyncRequestDeleteNode(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
pto := lpto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
defer cancel()
err := nh.SyncRequestDeleteNode(ctx, 1, 2, 0)
if err != nil {
t.Errorf("failed to delete node %v", err)
}
listener, ok := nh.events.sys.ul.(*testSysEventListener)
if !ok {
t.Fatalf("failed to get the system event listener")
}
waitNodeInfoEvent(t, listener.getMembershipChanged, 2)
ni := listener.getMembershipChanged()[1]
if ni.ClusterID != 1 || ni.NodeID != 1 {
t.Fatalf("incorrect node ready info")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestSyncRequestAddNode(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
pto := pto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
defer cancel()
err := nh.SyncRequestAddNode(ctx, 1, 2, "localhost:25000", 0)
if err != nil {
t.Errorf("failed to add node %v", err)
}
},
}
runNodeHostTest(t, to, fs)
}
func TestSyncRequestAddNonVoting(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
pto := pto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
defer cancel()
err := nh.SyncRequestAddNonVoting(ctx, 1, 2, "localhost:25000", 0)
if err != nil {
t.Errorf("failed to add nonVoting %v", err)
}
},
}
runNodeHostTest(t, to, fs)
}
func TestNodeHostAddNode(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
pto := pto(nh)
rs, err := nh.RequestAddNode(1, 2, "localhost:25000", 0, pto)
if err != nil {
t.Errorf("failed to add node %v", err)
}
v := <-rs.ResultC()
if !v.Completed() {
t.Errorf("failed to complete add node")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestNodeHostGetNodeUser(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
n, err := nh.GetNodeUser(1)
if err != nil {
t.Errorf("failed to get NodeUser")
}
if n == nil {
t.Errorf("got a nil NodeUser")
}
n, err = nh.GetNodeUser(123)
if err != ErrClusterNotFound {
t.Errorf("didn't return expected err")
}
if n != nil {
t.Errorf("got unexpected node user")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestNodeHostNodeUserPropose(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
pto := pto(nh)
n, err := nh.GetNodeUser(1)
if err != nil {
t.Errorf("failed to get NodeUser")
}
cs := nh.GetNoOPSession(1)
rs, err := n.Propose(cs, make([]byte, 16), pto)
if err != nil {
t.Errorf("failed to make propose %v", err)
}
v := <-rs.ResultC()
if !v.Completed() {
t.Errorf("failed to complete proposal")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestNodeHostNodeUserRead(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
pto := pto(nh)
n, err := nh.GetNodeUser(1)
if err != nil {
t.Errorf("failed to get NodeUser")
}
rs, err := n.ReadIndex(pto)
if err != nil {
t.Errorf("failed to read index %v", err)
}
v := <-rs.ResultC()
if !v.Completed() {
t.Errorf("failed to complete read index")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestNodeHostAddNonVotingRemoveNode(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
pto := pto(nh)
rs, err := nh.RequestAddNonVoting(1, 2, "localhost:25000", 0, pto)
if err != nil {
t.Errorf("failed to add node %v", err)
}
v := <-rs.ResultC()
if !v.Completed() {
t.Errorf("failed to complete add node")
}
ctx, cancel := context.WithTimeout(context.Background(), pto)
defer cancel()
membership, err := nh.SyncGetClusterMembership(ctx, 1)
if err != nil {
t.Fatalf("failed to get cluster membership %v", err)
}
if len(membership.Nodes) != 1 || len(membership.Removed) != 0 {
t.Errorf("unexpected nodes/removed len")
}
if len(membership.NonVotings) != 1 {
t.Errorf("unexpected nodes len")
}
_, ok := membership.NonVotings[2]
if !ok {
t.Errorf("node 2 not added")
}
// remove it
rs, err = nh.RequestDeleteNode(1, 2, 0, pto)
if err != nil {
t.Errorf("failed to remove node %v", err)
}
v = <-rs.ResultC()
if !v.Completed() {
t.Errorf("failed to complete remove node")
}
ctx, cancel = context.WithTimeout(context.Background(), pto)
defer cancel()
membership, err = nh.SyncGetClusterMembership(ctx, 1)
if err != nil {
t.Fatalf("failed to get cluster membership %v", err)
}
if len(membership.Nodes) != 1 || len(membership.Removed) != 1 {
t.Errorf("unexpected nodes/removed len")
}
if len(membership.NonVotings) != 0 {
t.Errorf("unexpected nodes len")
}
_, ok = membership.Removed[2]
if !ok {
t.Errorf("node 2 not removed")
}
},
}
runNodeHostTest(t, to, fs)
}
// FIXME:
// Leadership transfer is not actually tested
func TestNodeHostLeadershipTransfer(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
if err := nh.RequestLeaderTransfer(1, 1); err != nil {
t.Errorf("leader transfer failed %v", err)
}
},
}
runNodeHostTest(t, to, fs)
}
func TestNodeHostHasNodeInfo(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
if ok := nh.HasNodeInfo(1, 1); !ok {
t.Errorf("node info missing")
}
if ok := nh.HasNodeInfo(1, 2); ok {
t.Errorf("unexpected node info")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestOnDiskStateMachineDoesNotSupportClientSession(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
fakeDiskNode: true,
tf: func(nh *NodeHost) {
defer func() {
if r := recover(); r == nil {
t.Fatalf("no panic when proposing session on disk SM")
}
}()
pto := pto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh.SyncGetSession(ctx, 1)
cancel()
if err == nil {
t.Fatalf("managed to get new session")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestStaleReadOnUninitializedNodeReturnError(t *testing.T) {
fs := vfs.GetTestFS()
fakeDiskSM := tests.NewFakeDiskSM(0)
atomic.StoreUint32(&fakeDiskSM.SlowOpen, 1)
to := &testOption{
createOnDiskSM: func(uint64, uint64) sm.IOnDiskStateMachine {
return fakeDiskSM
},
tf: func(nh *NodeHost) {
n, ok := nh.getCluster(1)
if !ok {
t.Fatalf("failed to get the node")
}
if n.initialized() {
t.Fatalf("node unexpectedly initialized")
}
if _, err := nh.StaleRead(1, nil); err != ErrClusterNotInitialized {
t.Fatalf("expected to return ErrClusterNotInitialized")
}
atomic.StoreUint32(&fakeDiskSM.SlowOpen, 0)
for !n.initialized() {
runtime.Gosched()
}
v, err := nh.StaleRead(1, nil)
if err != nil {
t.Fatalf("stale read failed %v", err)
}
if len(v.([]byte)) != 8 {
t.Fatalf("unexpected result %v", v)
}
},
noElection: true,
}
runNodeHostTest(t, to, fs)
}
func testOnDiskStateMachineCanTakeDummySnapshot(t *testing.T, compressed bool) {
fs := vfs.GetTestFS()
to := &testOption{
fakeDiskNode: true,
compressed: compressed,
updateConfig: func(c *config.Config) *config.Config {
c.SnapshotEntries = 30
c.CompactionOverhead = 30
return c
},
tf: func(nh *NodeHost) {
session := nh.GetNoOPSession(1)
logdb := nh.mu.logdb
snapshotted := false
var ss pb.Snapshot
for i := uint64(2); i < 1000; i++ {
pto := pto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh.SyncPropose(ctx, session, []byte("test-data"))
cancel()
if err != nil {
continue
}
snapshot, err := logdb.GetSnapshot(1, 1)
if err != nil {
t.Fatalf("list snapshot failed %v", err)
}
if !pb.IsEmptySnapshot(snapshot) {
snapshotted = true
ss = snapshot
if !ss.Dummy {
t.Fatalf("dummy snapshot is not recorded as dummy")
}
break
} else if i%100 == 0 {
// this is an ugly hack to workaround RocksDB's incorrect fsync
// implementation on macos.
// fcntl(fd, F_FULLFSYNC) is required for a proper fsync on macos,
// sadly rocksdb is not doing that. this means we can make proposals
// very fast as they are not actually fsynced on macos but making
// snapshots are going to be much much slower as dragonboat properly
// fsyncs its snapshot data. we can end up completing all required
// proposals even before completing the first ongoing snapshotting
// operation.
time.Sleep(200 * time.Millisecond)
}
}
if !snapshotted {
t.Fatalf("failed to snapshot")
}
fi, err := fs.Stat(ss.Filepath)
if err != nil {
t.Fatalf("failed to get file st %v", err)
}
if fi.Size() != 1060 {
t.Fatalf("unexpected dummy snapshot file size %d", fi.Size())
}
reader, h, err := rsm.NewSnapshotReader(ss.Filepath, fs)
if err != nil {
t.Fatalf("failed to read snapshot %v", err)
}
// dummy snapshot is always not compressed
if h.CompressionType != config.NoCompression {
t.Errorf("dummy snapshot compressed")
}
if rsm.SSVersion(h.Version) != rsm.DefaultVersion {
t.Errorf("unexpected snapshot version, got %d, want %d",
h.Version, rsm.DefaultVersion)
}
reader.Close()
shrunk, err := rsm.IsShrunkSnapshotFile(ss.Filepath, fs)
if err != nil {
t.Fatalf("failed to check shrunk %v", err)
}
if !shrunk {
t.Errorf("not a dummy snapshot")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestOnDiskStateMachineCanTakeDummySnapshot(t *testing.T) {
testOnDiskStateMachineCanTakeDummySnapshot(t, true)
testOnDiskStateMachineCanTakeDummySnapshot(t, false)
}
func TestOnDiskSMCanStreamSnapshot(t *testing.T) {
fs := vfs.GetTestFS()
tf := func(t *testing.T, nh1 *NodeHost, nh2 *NodeHost) {
rc := config.Config{
ClusterID: 1,
NodeID: 1,
ElectionRTT: 3,
HeartbeatRTT: 1,
CheckQuorum: true,
SnapshotEntries: 5,
CompactionOverhead: 2,
SnapshotCompressionType: config.Snappy,
EntryCompressionType: config.Snappy,
}
sm1 := tests.NewFakeDiskSM(0)
sm1.SetAborted()
peers := make(map[uint64]string)
peers[1] = nodeHostTestAddr1
newSM := func(uint64, uint64) sm.IOnDiskStateMachine {
return sm1
}
if err := nh1.StartOnDiskCluster(peers, false, newSM, rc); err != nil {
t.Fatalf("failed to start cluster %v", err)
}
waitForLeaderToBeElected(t, nh1, 1)
logdb := nh1.mu.logdb
snapshotted := false
session := nh1.GetNoOPSession(1)
for i := uint64(2); i < 1000; i++ {
pto := pto(nh1)
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh1.SyncPropose(ctx, session, []byte("test-data"))
cancel()
if err != nil {
time.Sleep(100 * time.Millisecond)
continue
}
snapshot, err := logdb.GetSnapshot(1, 1)
if err != nil {
t.Fatalf("list snapshot failed %v", err)
}
if !pb.IsEmptySnapshot(snapshot) {
snapshotted = true
break
} else if i%50 == 0 {
// see comments in testOnDiskStateMachineCanTakeDummySnapshot
time.Sleep(100 * time.Millisecond)
}
}
if !snapshotted {
t.Fatalf("failed to take 3 snapshots")
}
pto := pto(nh1)
rs, err := nh1.RequestAddNode(1, 2, nodeHostTestAddr2, 0, pto)
if err != nil {
t.Fatalf("failed to add node %v", err)
}
s := <-rs.ResultC()
if !s.Completed() {
t.Fatalf("failed to complete the add node request")
}
rc = config.Config{
ClusterID: 1,
NodeID: 2,
ElectionRTT: 3,
HeartbeatRTT: 1,
CheckQuorum: true,
SnapshotEntries: 5,
CompactionOverhead: 2,
}
sm2 := tests.NewFakeDiskSM(0)
sm2.SetAborted()
newSM2 := func(uint64, uint64) sm.IOnDiskStateMachine {
return sm2
}
sm1.ClearAborted()
if err := nh2.StartOnDiskCluster(nil, true, newSM2, rc); err != nil {
t.Fatalf("failed to start cluster %v", err)
}
ssIndex := uint64(0)
logdb = nh2.mu.logdb
waitForLeaderToBeElected(t, nh2, 1)
for i := uint64(2); i < 1000; i++ {
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh2.SyncPropose(ctx, session, []byte("test-data"))
cancel()
plog.Infof("nh2 proposal result: %v", err)
if err != nil {
time.Sleep(100 * time.Millisecond)
continue
}
ss, err := logdb.GetSnapshot(1, 2)
if err != nil {
t.Fatalf("list snapshot failed %v", err)
}
if !pb.IsEmptySnapshot(ss) {
if !sm2.Recovered() {
t.Fatalf("not recovered")
}
if !sm1.Aborted() {
t.Fatalf("not aborted")
}
if ss.OnDiskIndex == 0 {
t.Errorf("on disk index not recorded in ss")
}
shrunk, err := rsm.IsShrunkSnapshotFile(ss.Filepath, fs)
if err != nil {
t.Errorf("failed to check whether snapshot is shrunk %v", err)
}
if !shrunk {
t.Errorf("snapshot %d is not shrunk", ss.Index)
}
if ssIndex == 0 {
ssIndex = ss.Index
} else {
if ssIndex != ss.Index {
break
}
}
} else if i%50 == 0 {
// see comments in testOnDiskStateMachineCanTakeDummySnapshot
time.Sleep(100 * time.Millisecond)
}
}
if ssIndex == 0 {
t.Fatalf("failed to take 2 snapshots")
}
listener, ok := nh2.events.sys.ul.(*testSysEventListener)
if !ok {
t.Fatalf("failed to get the system event listener")
}
if len(listener.getSnapshotReceived()) == 0 {
t.Fatalf("snapshot received not notified")
}
if len(listener.getSnapshotRecovered()) == 0 {
t.Fatalf("failed to be notified for recovered snapshot")
}
if len(listener.getLogCompacted()) == 0 {
t.Fatalf("log compaction not notified")
}
listener, ok = nh1.events.sys.ul.(*testSysEventListener)
if !ok {
t.Fatalf("failed to get the system event listener")
}
if len(listener.getSendSnapshotStarted()) == 0 {
t.Fatalf("send snapshot started not notified")
}
if len(listener.getSendSnapshotCompleted()) == 0 {
t.Fatalf("send snapshot completed not notified")
}
if listener.getConnectionEstablished() == 0 {
t.Fatalf("connection established not notified")
}
}
twoFakeDiskNodeHostTest(t, tf, fs)
}
func TestConcurrentStateMachineLookup(t *testing.T) {
fs := vfs.GetTestFS()
done := uint32(0)
tf := func(t *testing.T, nh *NodeHost) {
nhc := nh.NodeHostConfig()
clusterID := 1 + nhc.Expert.Engine.ApplyShards
count := uint32(0)
stopper := syncutil.NewStopper()
pto := pto(nh)
stopper.RunWorker(func() {
for i := 0; i < 10000; i++ {
ctx, cancel := context.WithTimeout(context.Background(), pto)
session := nh.GetNoOPSession(clusterID)
_, err := nh.SyncPropose(ctx, session, []byte("test"))
cancel()
if err == ErrTimeout {
continue
}
if err != nil {
t.Fatalf("failed to make proposal %v", err)
}
if atomic.LoadUint32(&count) > 0 {
return
}
}
})
stopper.RunWorker(func() {
for i := 0; i < 10000; i++ {
if i%5 == 0 {
time.Sleep(time.Millisecond)
}
rs, err := nh.ReadIndex(clusterID, pto)
if err != nil {
continue
}
s := <-rs.ResultC()
if !s.Completed() {
continue
}
st := random.LockGuardedRand.Uint64()%7 + 1
time.Sleep(time.Duration(st) * time.Millisecond)
result, err := nh.ReadLocalNode(rs, []byte("test"))
if err != nil {
continue
}
v := binary.LittleEndian.Uint32(result.([]byte))
if v%2 == 1 {
atomic.AddUint32(&count, 1)
atomic.StoreUint32(&done, 1)
return
}
}
})
stopper.Stop()
if atomic.LoadUint32(&done) == 0 {
t.Fatalf("failed to have any concurrent read")
}
}
singleConcurrentNodeHostTest(t, tf, 0, true, fs)
}
func TestConcurrentStateMachineSaveSnapshot(t *testing.T) {
fs := vfs.GetTestFS()
tf := func(t *testing.T, nh *NodeHost) {
nhc := nh.NodeHostConfig()
clusterID := 1 + nhc.Expert.Engine.ApplyShards
nhi := nh.GetNodeHostInfo(DefaultNodeHostInfoOption)
for _, ci := range nhi.ClusterInfoList {
if ci.ClusterID == clusterID {
if ci.StateMachineType != sm.ConcurrentStateMachine {
t.Errorf("unexpected state machine type")
}
}
if ci.IsNonVoting {
t.Errorf("unexpected IsNonVoting value")
}
}
result := make(map[uint64]struct{})
session := nh.GetNoOPSession(clusterID)
pto := pto(nh)
for i := 0; i < 10000; i++ {
ctx, cancel := context.WithTimeout(context.Background(), pto)
v, err := nh.SyncPropose(ctx, session, []byte("test"))
cancel()
if err != nil {
t.Fatalf("failed to make proposal %v", err)
}
result[v.Value] = struct{}{}
if len(result) > 1 {
return
}
time.Sleep(time.Millisecond)
}
t.Fatalf("failed to make proposal when saving snapshots")
}
singleConcurrentNodeHostTest(t, tf, 10, true, fs)
}
func TestErrorCanBeReturnedWhenLookingUpConcurrentStateMachine(t *testing.T) {
fs := vfs.GetTestFS()
tf := func(t *testing.T, nh *NodeHost) {
nhc := nh.NodeHostConfig()
clusterID := 1 + nhc.Expert.Engine.ApplyShards
for i := 0; i < 100; i++ {
pto := pto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh.SyncRead(ctx, clusterID, []byte("test"))
cancel()
if err != sm.ErrSnapshotStopped {
t.Fatalf("error not returned")
}
}
}
singleConcurrentNodeHostTest(t, tf, 10, true, fs)
}
func TestRegularStateMachineDoesNotAllowConucrrentUpdate(t *testing.T) {
fs := vfs.GetTestFS()
failed := uint32(0)
tf := func(t *testing.T, nh *NodeHost) {
nhi := nh.GetNodeHostInfo(DefaultNodeHostInfoOption)
for _, ci := range nhi.ClusterInfoList {
if ci.ClusterID == 1 {
if ci.StateMachineType != sm.RegularStateMachine {
t.Errorf("unexpected state machine type")
}
}
if ci.IsNonVoting {
t.Errorf("unexpected IsNonVoting value")
}
}
stopper := syncutil.NewStopper()
pto := pto(nh)
stopper.RunWorker(func() {
for i := 0; i < 100; i++ {
ctx, cancel := context.WithTimeout(context.Background(), pto)
session := nh.GetNoOPSession(1)
_, err := nh.SyncPropose(ctx, session, []byte("test"))
if err != nil {
plog.Infof("failed to make proposal %v\n", err)
}
cancel()
if atomic.LoadUint32(&failed) == 1 {
return
}
}
})
stopper.RunWorker(func() {
for i := 0; i < 100; i++ {
ctx, cancel := context.WithTimeout(context.Background(), pto)
result, err := nh.SyncRead(ctx, 1, []byte("test"))
cancel()
if err != nil {
continue
}
v := binary.LittleEndian.Uint32(result.([]byte))
if v == 1 {
atomic.StoreUint32(&failed, 1)
return
}
}
})
stopper.Stop()
if atomic.LoadUint32(&failed) == 1 {
t.Fatalf("unexpected concurrent update observed")
}
}
singleConcurrentNodeHostTest(t, tf, 0, false, fs)
}
func TestRegularStateMachineDoesNotAllowConcurrentSaveSnapshot(t *testing.T) {
fs := vfs.GetTestFS()
tf := func(t *testing.T, nh *NodeHost) {
result := make(map[uint64]struct{})
session := nh.GetNoOPSession(1)
pto := pto(nh)
for i := 0; i < 50; i++ {
ctx, cancel := context.WithTimeout(context.Background(), pto)
v, err := nh.SyncPropose(ctx, session, []byte("test"))
cancel()
if err != nil {
continue
}
result[v.Value] = struct{}{}
if len(result) > 1 {
t.Fatalf("unexpected concurrent save snapshot observed")
}
}
}
singleConcurrentNodeHostTest(t, tf, 10, false, fs)
}
func TestLogDBRateLimit(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
updateConfig: func(c *config.Config) *config.Config {
c.MaxInMemLogSize = 1024 * 3
return c
},
updateNodeHostConfig: func(c *config.NodeHostConfig) *config.NodeHostConfig {
logDBConfig := config.GetDefaultLogDBConfig()
logDBConfig.KVMaxWriteBufferNumber = 2
logDBConfig.KVWriteBufferSize = 1024 * 8
c.Expert.LogDB = logDBConfig
return c
},
tf: func(nh *NodeHost) {
if nh.mu.logdb.Name() == "Tan" {
t.Skip("skipped, using tan logdb")
}
for i := 0; i < 10240; i++ {
pto := pto(nh)
session := nh.GetNoOPSession(1)
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh.SyncPropose(ctx, session, make([]byte, 512))
cancel()
if err == ErrSystemBusy {
return
}
}
t.Fatalf("failed to return ErrSystemBusy")
},
}
runNodeHostTest(t, to, fs)
}
func TestTooBigPayloadIsRejectedWhenRateLimited(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
updateConfig: func(c *config.Config) *config.Config {
c.MaxInMemLogSize = 1024 * 3
return c
},
createSM: func(uint64, uint64) sm.IStateMachine {
return &tests.NoOP{MillisecondToSleep: 20}
},
tf: func(nh *NodeHost) {
bigPayload := make([]byte, 1024*1024)
session := nh.GetNoOPSession(1)
pto := pto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh.SyncPropose(ctx, session, bigPayload)
cancel()
if err != ErrPayloadTooBig {
t.Errorf("failed to return ErrPayloadTooBig")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestProposalsCanBeMadeWhenRateLimited(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
updateConfig: func(c *config.Config) *config.Config {
c.MaxInMemLogSize = 1024 * 3
return c
},
createSM: func(uint64, uint64) sm.IStateMachine {
return &tests.NoOP{MillisecondToSleep: 20}
},
tf: func(nh *NodeHost) {
session := nh.GetNoOPSession(1)
for i := 0; i < 16; i++ {
pto := pto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh.SyncPropose(ctx, session, make([]byte, 16))
cancel()
if err == ErrTimeout {
continue
}
if err != nil {
t.Fatalf("failed to make proposal %v", err)
}
}
},
}
runNodeHostTest(t, to, fs)
}
func makeTestProposal(nh *NodeHost, count int) bool {
session := nh.GetNoOPSession(1)
for i := 0; i < count; i++ {
pto := pto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh.SyncPropose(ctx, session, make([]byte, 1024))
cancel()
if err == nil {
return true
}
time.Sleep(20 * time.Millisecond)
}
return false
}
func TestRateLimitCanBeTriggered(t *testing.T) {
fs := vfs.GetTestFS()
limited := uint32(0)
stopper := syncutil.NewStopper()
to := &testOption{
updateConfig: func(c *config.Config) *config.Config {
c.MaxInMemLogSize = 1024 * 3
return c
},
createSM: func(uint64, uint64) sm.IStateMachine {
return &tests.NoOP{MillisecondToSleep: 20}
},
tf: func(nh *NodeHost) {
pto := pto(nh)
session := nh.GetNoOPSession(1)
for i := 0; i < 10; i++ {
stopper.RunWorker(func() {
for j := 0; j < 16; j++ {
if atomic.LoadUint32(&limited) == 1 {
return
}
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh.SyncPropose(ctx, session, make([]byte, 1024))
cancel()
if err == ErrSystemBusy {
atomic.StoreUint32(&limited, 1)
return
}
}
})
}
stopper.Stop()
if atomic.LoadUint32(&limited) != 1 {
t.Fatalf("failed to observe ErrSystemBusy")
}
if makeTestProposal(nh, 10000) {
return
}
t.Fatalf("failed to make proposal again")
},
}
runNodeHostTest(t, to, fs)
}
func TestRateLimitCanUseFollowerFeedback(t *testing.T) {
fs := vfs.GetTestFS()
tf := func(t *testing.T, nh1 *NodeHost, nh2 *NodeHost,
n1 *tests.NoOP, n2 *tests.NoOP) {
session := nh1.GetNoOPSession(1)
limited := false
for i := 0; i < 2000; i++ {
pto := pto(nh1)
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh1.SyncPropose(ctx, session, make([]byte, 1024))
cancel()
if err == ErrClusterNotReady {
time.Sleep(20 * time.Millisecond)
} else if err == ErrSystemBusy {
limited = true
break
}
}
if !limited {
t.Fatalf("failed to observe rate limited")
}
n1.SetSleepTime(0)
n2.SetSleepTime(0)
if makeTestProposal(nh1, 2000) {
plog.Infof("rate limit lifted, all good")
return
}
t.Fatalf("failed to make proposal again")
}
rateLimitedTwoNodeHostTest(t, tf, fs)
}
func TestUpdateResultIsReturnedToCaller(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
updateConfig: func(c *config.Config) *config.Config {
c.MaxInMemLogSize = 1024 * 3
return c
},
createSM: func(uint64, uint64) sm.IStateMachine {
return &tests.NoOP{MillisecondToSleep: 20}
},
tf: func(nh *NodeHost) {
session := nh.GetNoOPSession(1)
pto := pto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
cmd := make([]byte, 1518)
rand.Read(cmd)
result, err := nh.SyncPropose(ctx, session, cmd)
cancel()
if err != nil {
t.Errorf("failed to make proposal %v", err)
}
if result.Value != uint64(1518) {
t.Errorf("unexpected result value")
}
if !bytes.Equal(result.Data, cmd) {
t.Errorf("unexpected result data")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestRaftLogQuery(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
for i := 0; i < 10; i++ {
makeTestProposal(nh, 10)
}
rs, err := nh.QueryRaftLog(1, 1, 11, math.MaxUint64)
assert.NoError(t, err)
ticker := time.NewTicker(2 * time.Second)
defer ticker.Stop()
select {
case v := <-rs.CompletedC:
assert.True(t, v.Completed())
entries, logRange := v.RaftLogs()
assert.Equal(t, 10, len(entries))
assert.Equal(t, LogRange{FirstIndex: 1, LastIndex: 13}, logRange)
case <-ticker.C:
t.Fatalf("no results")
}
rs.Release()
// this should be fine
rs, err = nh.QueryRaftLog(1, 1, 1000, math.MaxUint64)
assert.NoError(t, err)
select {
case v := <-rs.CompletedC:
assert.True(t, v.Completed())
entries, logRange := v.RaftLogs()
assert.Equal(t, 12, len(entries))
assert.Equal(t, LogRange{FirstIndex: 1, LastIndex: 13}, logRange)
case <-ticker.C:
t.Fatalf("no results")
}
rs.Release()
// OutOfRange expected
rs, err = nh.QueryRaftLog(1, 13, 1000, math.MaxUint64)
assert.NoError(t, err)
select {
case v := <-rs.CompletedC:
assert.True(t, v.RequestOutOfRange())
entries, logRange := v.RaftLogs()
assert.Equal(t, 0, len(entries))
assert.Equal(t, LogRange{FirstIndex: 1, LastIndex: 13}, logRange)
case <-ticker.C:
t.Fatalf("no results")
}
rs.Release()
// generate a snapshot, a compaction will be requested in the background
// then query compacted log will fail.
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
opts := SnapshotOption{
CompactionIndex: 10,
OverrideCompactionOverhead: true,
}
_, err = nh.SyncRequestSnapshot(ctx, 1, opts)
assert.NoError(t, err)
done := false
for i := 0; i < 1000; i++ {
func() {
rs, err := nh.QueryRaftLog(1, 1, 11, math.MaxUint64)
assert.NoError(t, err)
ticker := time.NewTicker(2 * time.Second)
defer ticker.Stop()
select {
case v := <-rs.CompletedC:
if v.Completed() {
time.Sleep(10 * time.Millisecond)
return
}
assert.True(t, v.RequestOutOfRange())
entries, logRange := v.RaftLogs()
assert.Equal(t, 0, len(entries))
assert.Equal(t, LogRange{FirstIndex: 11, LastIndex: 13}, logRange)
done = true
return
case <-ticker.C:
t.Fatalf("no results")
}
rs.Release()
}()
if done {
return
}
if i == 999 {
t.Fatalf("failed to observe RequestOutOfRange error")
}
}
},
}
runNodeHostTest(t, to, fs)
}
func TestIsNonVotingIsReturnedWhenNodeIsNonVoting(t *testing.T) {
fs := vfs.GetTestFS()
tf := func(t *testing.T, nh1 *NodeHost, nh2 *NodeHost) {
rc := config.Config{
ClusterID: 1,
NodeID: 1,
ElectionRTT: 3,
HeartbeatRTT: 1,
CheckQuorum: true,
SnapshotEntries: 5,
CompactionOverhead: 2,
SnapshotCompressionType: config.NoCompression,
}
newSM := func(uint64, uint64) sm.IOnDiskStateMachine {
return tests.NewFakeDiskSM(0)
}
peers := make(map[uint64]string)
peers[1] = nodeHostTestAddr1
if err := nh1.StartOnDiskCluster(peers, false, newSM, rc); err != nil {
t.Errorf("failed to start nonVoting %v", err)
}
waitForLeaderToBeElected(t, nh1, 1)
rc = config.Config{
ClusterID: 1,
NodeID: 2,
ElectionRTT: 3,
HeartbeatRTT: 1,
IsNonVoting: true,
CheckQuorum: true,
SnapshotEntries: 5,
CompactionOverhead: 2,
}
newSM2 := func(uint64, uint64) sm.IOnDiskStateMachine {
return tests.NewFakeDiskSM(0)
}
pto := pto(nh1)
rs, err := nh1.RequestAddNonVoting(1, 2, nodeHostTestAddr2, 0, pto)
if err != nil {
t.Fatalf("failed to add nonVoting %v", err)
}
<-rs.ResultC()
if err := nh2.StartOnDiskCluster(nil, true, newSM2, rc); err != nil {
t.Errorf("failed to start nonVoting %v", err)
}
for i := 0; i < 10000; i++ {
nhi := nh2.GetNodeHostInfo(DefaultNodeHostInfoOption)
for _, ci := range nhi.ClusterInfoList {
if ci.Pending {
continue
}
if ci.IsNonVoting && ci.NodeID == 2 {
return
}
}
time.Sleep(10 * time.Millisecond)
}
t.Errorf("failed to get is nonVoting flag")
}
twoFakeDiskNodeHostTest(t, tf, fs)
}
func TestSnapshotIndexWillPanicOnRegularRequestResult(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
cs := nh.GetNoOPSession(1)
pto := pto(nh)
rs, err := nh.Propose(cs, make([]byte, 1), pto)
if err != nil {
t.Fatalf("propose failed %v", err)
}
defer func() {
if r := recover(); r == nil {
t.Fatalf("no panic")
}
}()
v := <-rs.ResultC()
plog.Infof("%d", v.SnapshotIndex())
},
}
runNodeHostTest(t, to, fs)
}
func TestSyncRequestSnapshot(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
session := nh.GetNoOPSession(1)
pto := lpto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
cmd := make([]byte, 1518)
_, err := nh.SyncPropose(ctx, session, cmd)
cancel()
if err != nil {
t.Fatalf("failed to make proposal %v", err)
}
ctx, cancel = context.WithTimeout(context.Background(), pto)
idx, err := nh.SyncRequestSnapshot(ctx, 1, DefaultSnapshotOption)
cancel()
if err != nil {
t.Fatalf("%v", err)
}
if idx == 0 {
t.Errorf("unexpected index %d", idx)
}
listener, ok := nh.events.sys.ul.(*testSysEventListener)
if !ok {
t.Fatalf("failed to get the system event listener")
}
waitSnapshotInfoEvent(t, listener.getSnapshotCreated, 1)
si := listener.getSnapshotCreated()[0]
if si.ClusterID != 1 || si.NodeID != 1 {
t.Fatalf("incorrect created snapshot info")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestSnapshotCanBeExportedAfterSnapshotting(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
session := nh.GetNoOPSession(1)
pto := lpto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
cmd := make([]byte, 1518)
_, err := nh.SyncPropose(ctx, session, cmd)
cancel()
if err != nil {
t.Fatalf("failed to make proposal %v", err)
}
ctx, cancel = context.WithTimeout(context.Background(), pto)
idx, err := nh.SyncRequestSnapshot(ctx, 1, DefaultSnapshotOption)
cancel()
if err != nil {
t.Fatalf("%v", err)
}
if idx == 0 {
t.Errorf("unexpected index %d", idx)
}
sspath := "exported_snapshot_safe_to_delete"
if err := fs.RemoveAll(sspath); err != nil {
t.Fatalf("%v", err)
}
if err := fs.MkdirAll(sspath, 0755); err != nil {
t.Fatalf("%v", err)
}
defer func() {
if err := fs.RemoveAll(sspath); err != nil {
t.Fatalf("%v", err)
}
}()
opt := SnapshotOption{
Exported: true,
ExportPath: sspath,
}
ctx, cancel = context.WithTimeout(context.Background(), pto)
exportIdx, err := nh.SyncRequestSnapshot(ctx, 1, opt)
cancel()
if err != nil {
t.Fatalf("%v", err)
}
if exportIdx != idx {
t.Errorf("unexpected index %d, want %d", exportIdx, idx)
}
},
}
runNodeHostTest(t, to, fs)
}
func TestCanOverrideSnapshotOverhead(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
session := nh.GetNoOPSession(1)
cmd := make([]byte, 1)
pto := pto(nh)
for i := 0; i < 16; i++ {
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh.SyncPropose(ctx, session, cmd)
cancel()
if err != nil {
// see comments in testOnDiskStateMachineCanTakeDummySnapshot
if err == ErrTimeout {
time.Sleep(500 * time.Millisecond)
continue
}
t.Fatalf("failed to make proposal %v", err)
}
}
opt := SnapshotOption{
OverrideCompactionOverhead: true,
CompactionOverhead: 0,
}
lpto := lpto(nh)
sr, err := nh.RequestSnapshot(1, opt, lpto)
if err != nil {
t.Fatalf("failed to request snapshot")
}
v := <-sr.ResultC()
if !v.Completed() {
t.Errorf("failed to complete the requested snapshot")
}
if v.SnapshotIndex() < 16 {
t.Fatalf("unexpected snapshot index %d", v.SnapshotIndex())
}
logdb := nh.mu.logdb
for i := 0; i < 1000; i++ {
if i == 999 {
t.Fatalf("failed to compact the entries")
}
time.Sleep(10 * time.Millisecond)
op, err := nh.RequestCompaction(1, 1)
if err == nil {
<-op.ResultC()
}
ents, _, err := logdb.IterateEntries(nil, 0, 1, 1, 12, 14, math.MaxUint64)
if err != nil {
t.Fatalf("failed to iterate entries, %v", err)
}
if len(ents) != 0 {
continue
} else {
return
}
}
},
}
runNodeHostTest(t, to, fs)
}
func TestSnapshotCanBeRequested(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
pto := lpto(nh)
session := nh.GetNoOPSession(1)
ctx, cancel := context.WithTimeout(context.Background(), pto)
cmd := make([]byte, 1518)
_, err := nh.SyncPropose(ctx, session, cmd)
cancel()
if err != nil {
t.Fatalf("failed to make proposal %v", err)
}
sr, err := nh.RequestSnapshot(1, SnapshotOption{}, pto)
if err != nil {
t.Errorf("failed to request snapshot")
}
var index uint64
v := <-sr.ResultC()
if !v.Completed() {
t.Errorf("failed to complete the requested snapshot")
}
index = v.SnapshotIndex()
sr, err = nh.RequestSnapshot(1, SnapshotOption{}, pto)
if err != nil {
t.Fatalf("failed to request snapshot")
}
v = <-sr.ResultC()
if !v.Rejected() {
t.Errorf("failed to complete the requested snapshot")
}
logdb := nh.mu.logdb
snapshot, err := logdb.GetSnapshot(1, 1)
if err != nil {
t.Fatalf("%v", err)
}
if pb.IsEmptySnapshot(snapshot) {
t.Fatalf("failed to save snapshots")
}
if snapshot.Index != index {
t.Errorf("unexpected index value")
}
reader, header, err := rsm.NewSnapshotReader(snapshot.Filepath, fs)
if err != nil {
t.Fatalf("failed to new snapshot reader %v", err)
}
defer reader.Close()
if rsm.SSVersion(header.Version) != rsm.V2 {
t.Errorf("unexpected snapshot version")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestClientCanBeNotifiedOnCommittedConfigChange(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
updateNodeHostConfig: func(c *config.NodeHostConfig) *config.NodeHostConfig {
c.NotifyCommit = true
return c
},
tf: func(nh *NodeHost) {
pto := pto(nh)
rs, err := nh.RequestAddNode(1, 2, "localhost:3456", 0, pto)
if err != nil {
t.Fatalf("failed to request add node")
}
if rs.committedC == nil {
t.Fatalf("committedC not set")
}
cn := <-rs.ResultC()
if !cn.Committed() {
t.Fatalf("failed to get committed notification")
}
cn = <-rs.ResultC()
if !cn.Completed() {
t.Fatalf("failed to get completed notification")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestClientCanBeNotifiedOnCommittedProposals(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
updateNodeHostConfig: func(c *config.NodeHostConfig) *config.NodeHostConfig {
c.NotifyCommit = true
return c
},
tf: func(nh *NodeHost) {
pto := pto(nh)
cmd := make([]byte, 128)
session := nh.GetNoOPSession(1)
rs, err := nh.Propose(session, cmd, pto)
if err != nil {
t.Fatalf("failed to make proposal %v", err)
}
if rs.committedC == nil {
t.Fatalf("committedC not set")
}
cn := <-rs.ResultC()
if !cn.Committed() {
t.Fatalf("failed to get committed notification")
}
cn = <-rs.ResultC()
if !cn.Completed() {
t.Fatalf("failed to get completed notification")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestRequestSnapshotTimeoutWillBeReported(t *testing.T) {
fs := vfs.GetTestFS()
pst := &PST{slowSave: true}
to := &testOption{
createSM: func(uint64, uint64) sm.IStateMachine {
return pst
},
tf: func(nh *NodeHost) {
pto := pto(nh)
sr, err := nh.RequestSnapshot(1, SnapshotOption{}, pto)
if err != nil {
t.Fatalf("failed to request snapshot")
}
v := <-sr.ResultC()
if !v.Timeout() {
t.Errorf("failed to report timeout")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestSyncRemoveData(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
if err := nh.StopCluster(1); err != nil {
t.Fatalf("failed to remove cluster %v", err)
}
pto := lpto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
defer cancel()
if err := nh.SyncRemoveData(ctx, 1, 1); err != nil {
t.Fatalf("sync remove data failed: %v", err)
}
listener, ok := nh.events.sys.ul.(*testSysEventListener)
if !ok {
t.Fatalf("failed to get the system event listener")
}
waitNodeInfoEvent(t, listener.getNodeUnloaded, 1)
ni := listener.getNodeUnloaded()[0]
if ni.ClusterID != 1 || ni.NodeID != 1 {
t.Fatalf("incorrect node unloaded info")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestRemoveNodeDataWillFailWhenNodeIsStillRunning(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
if err := nh.RemoveData(1, 1); err != ErrClusterNotStopped {
t.Errorf("remove data didn't fail")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestRestartingAnNodeWithRemovedDataWillBeRejected(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
if err := nh.StopCluster(1); err != nil {
t.Fatalf("failed to remove cluster %v", err)
}
for {
if err := nh.RemoveData(1, 1); err != nil {
if err == ErrClusterNotStopped {
time.Sleep(100 * time.Millisecond)
continue
} else {
t.Errorf("remove data failed %v", err)
}
}
break
}
rc := getTestConfig()
peers := make(map[uint64]string)
peers[1] = nh.RaftAddress()
newPST := func(clusterID uint64, nodeID uint64) sm.IStateMachine {
return &PST{}
}
if err := nh.StartCluster(peers, false, newPST, *rc); err != ErrNodeRemoved {
t.Errorf("start cluster failed %v", err)
}
},
}
runNodeHostTest(t, to, fs)
}
func TestRemoveNodeDataRemovesAllNodeData(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
session := nh.GetNoOPSession(1)
pto := lpto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
cmd := make([]byte, 1518)
_, err := nh.SyncPropose(ctx, session, cmd)
cancel()
if err != nil {
t.Fatalf("failed to make proposal %v", err)
}
sr, err := nh.RequestSnapshot(1, SnapshotOption{}, pto)
if err != nil {
t.Errorf("failed to request snapshot")
}
v := <-sr.ResultC()
if !v.Completed() {
t.Errorf("failed to complete the requested snapshot")
}
if err := nh.StopCluster(1); err != nil {
t.Fatalf("failed to stop cluster %v", err)
}
logdb := nh.mu.logdb
snapshot, err := logdb.GetSnapshot(1, 1)
if err != nil {
t.Fatalf("%v", err)
}
if pb.IsEmptySnapshot(snapshot) {
t.Fatalf("failed to save snapshots")
}
snapshotDir := nh.env.GetSnapshotDir(nh.nhConfig.GetDeploymentID(), 1, 1)
exist, err := fileutil.Exist(snapshotDir, fs)
if err != nil {
t.Fatalf("%v", err)
}
if !exist {
t.Fatalf("snapshot dir %s does not exist", snapshotDir)
}
files, err := fs.List(snapshotDir)
if err != nil {
t.Fatalf("failed to read dir %v", err)
}
sscount := 0
for _, fn := range files {
fi, err := fs.Stat(fs.PathJoin(snapshotDir, fn))
if err != nil {
t.Fatalf("failed to get stat for %s", fn)
}
if !fi.IsDir() {
continue
}
if server.SnapshotDirNameRe.Match([]byte(fi.Name())) {
sscount++
}
}
if sscount == 0 {
t.Fatalf("no snapshot dir found")
}
removed := false
for i := 0; i < 1000; i++ {
err := nh.RemoveData(1, 1)
if err == ErrClusterNotStopped {
time.Sleep(100 * time.Millisecond)
continue
}
if err != nil {
t.Fatalf("failed to remove data %v", err)
}
if err == nil {
removed = true
break
}
}
if !removed {
t.Fatalf("failed to remove node data")
}
marked, err := fileutil.IsDirMarkedAsDeleted(snapshotDir, fs)
if err != nil {
t.Fatalf("%v", err)
}
if !marked {
t.Fatalf("snapshot dir %s still exist", snapshotDir)
}
files, err = fs.List(snapshotDir)
if err != nil {
t.Fatalf("failed to read dir %v", err)
}
for _, fn := range files {
fi, err := fs.Stat(fs.PathJoin(snapshotDir, fn))
if err != nil {
t.Fatalf("failed to get stat for %s", fn)
}
if !fi.IsDir() {
continue
}
if server.SnapshotDirNameRe.Match([]byte(fi.Name())) {
t.Fatalf("failed to delete the snapshot dir %s", fi.Name())
}
}
bs, err := logdb.GetBootstrapInfo(1, 1)
if !errors.Is(err, raftio.ErrNoBootstrapInfo) {
t.Fatalf("failed to delete bootstrap %v", err)
}
if !reflect.DeepEqual(bs, pb.Bootstrap{}) {
t.Fatalf("bs not nil")
}
ents, sz, err := logdb.IterateEntries(nil, 0, 1, 1, 0,
math.MaxUint64, math.MaxUint64)
if err != nil {
t.Fatalf("failed to get entries %v", err)
}
if len(ents) != 0 || sz != 0 {
t.Fatalf("entry returned")
}
snapshot, err = logdb.GetSnapshot(1, 1)
if err != nil {
t.Fatalf("%v", err)
}
if !pb.IsEmptySnapshot(snapshot) {
t.Fatalf("snapshot not deleted")
}
_, err = logdb.ReadRaftState(1, 1, 1)
if !errors.Is(err, raftio.ErrNoSavedLog) {
t.Fatalf("raft state not deleted %v", err)
}
sysop, err := nh.RequestCompaction(1, 1)
if err != nil {
t.Fatalf("failed to request compaction %v", err)
}
<-sysop.ResultC()
},
}
runNodeHostTest(t, to, fs)
}
func TestSnapshotOptionIsChecked(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
opts := SnapshotOption{
OverrideCompactionOverhead: true,
CompactionIndex: 100,
CompactionOverhead: 10,
}
assert.Equal(t, ErrInvalidOption, opts.Validate())
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
_, err := nh.SyncRequestSnapshot(ctx, 1, opts)
assert.Equal(t, ErrInvalidOption, err)
rs, err := nh.RequestSnapshot(1, opts, time.Second)
assert.Equal(t, ErrInvalidOption, err)
assert.Nil(t, rs)
},
}
runNodeHostTest(t, to, fs)
}
func TestSnapshotCanBeExported(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
sspath := "exported_snapshot_safe_to_delete"
if err := fs.RemoveAll(sspath); err != nil {
t.Fatalf("%v", err)
}
if err := fs.MkdirAll(sspath, 0755); err != nil {
t.Fatalf("%v", err)
}
defer func() {
if err := fs.RemoveAll(sspath); err != nil {
t.Fatalf("%v", err)
}
}()
session := nh.GetNoOPSession(1)
pto := lpto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
cmd := make([]byte, 1518)
_, err := nh.SyncPropose(ctx, session, cmd)
cancel()
if err != nil {
t.Fatalf("failed to make proposal %v", err)
}
opt := SnapshotOption{
Exported: true,
ExportPath: sspath,
}
sr, err := nh.RequestSnapshot(1, opt, pto)
if err != nil {
t.Errorf("failed to request snapshot")
}
var index uint64
v := <-sr.ResultC()
if !v.Completed() {
t.Fatalf("failed to complete the requested snapshot")
}
index = v.SnapshotIndex()
logdb := nh.mu.logdb
snapshot, err := logdb.GetSnapshot(1, 1)
if err != nil {
t.Fatalf("%v", err)
}
// exported snapshot is not managed by the system
if !pb.IsEmptySnapshot(snapshot) {
t.Fatalf("snapshot record unexpectedly inserted into the system")
}
snapshotDir := fmt.Sprintf("snapshot-%016X", index)
snapshotFile := fmt.Sprintf("snapshot-%016X.gbsnap", index)
fp := fs.PathJoin(sspath, snapshotDir, snapshotFile)
exist, err := fileutil.Exist(fp, fs)
if err != nil {
t.Fatalf("%v", err)
}
if !exist {
t.Errorf("snapshot file not saved")
}
metafp := fs.PathJoin(sspath, snapshotDir, "snapshot.metadata")
exist, err = fileutil.Exist(metafp, fs)
if err != nil {
t.Fatalf("%v", err)
}
if !exist {
t.Errorf("snapshot metadata not saved")
}
var ss pb.Snapshot
if err := fileutil.GetFlagFileContent(fs.PathJoin(sspath, snapshotDir),
"snapshot.metadata", &ss, fs); err != nil {
t.Fatalf("failed to get snapshot from its metadata file")
}
if ss.OnDiskIndex != 0 {
t.Errorf("on disk index is not 0")
}
if ss.Imported {
t.Errorf("incorrectly recorded as imported")
}
if ss.Type != pb.RegularStateMachine {
t.Errorf("incorrect type")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestOnDiskStateMachineCanExportSnapshot(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
fakeDiskNode: true,
tf: func(nh *NodeHost) {
session := nh.GetNoOPSession(1)
proposed := false
for i := 0; i < 16; i++ {
pto := pto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh.SyncPropose(ctx, session, []byte("test-data"))
cancel()
if err == nil {
proposed = true
break
} else {
time.Sleep(100 * time.Millisecond)
}
}
if !proposed {
t.Fatalf("failed to make proposal")
}
sspath := "exported_snapshot_safe_to_delete"
if err := fs.RemoveAll(sspath); err != nil {
t.Fatalf("%v", err)
}
if err := fs.MkdirAll(sspath, 0755); err != nil {
t.Fatalf("%v", err)
}
defer func() {
if err := fs.RemoveAll(sspath); err != nil {
t.Fatalf("%v", err)
}
}()
opt := SnapshotOption{
Exported: true,
ExportPath: sspath,
}
aborted := false
index := uint64(0)
pto := lpto(nh)
for {
sr, err := nh.RequestSnapshot(1, opt, pto)
if err == ErrRejected {
continue
}
if err != nil {
t.Fatalf("failed to request snapshot %v", err)
}
v := <-sr.ResultC()
if v.Aborted() {
aborted = true
continue
}
if v.code == requestRejected {
continue
}
if !v.Completed() {
t.Fatalf("failed to complete the requested snapshot, %s", v.code)
}
index = v.SnapshotIndex()
break
}
if !aborted {
t.Fatalf("never aborted")
}
logdb := nh.mu.logdb
snapshot, err := logdb.GetSnapshot(1, 1)
if err != nil {
t.Fatalf("%v", err)
}
if !pb.IsEmptySnapshot(snapshot) {
t.Fatalf("snapshot record unexpectedly inserted into the system")
}
snapshotDir := fmt.Sprintf("snapshot-%016X", index)
snapshotFile := fmt.Sprintf("snapshot-%016X.gbsnap", index)
fp := fs.PathJoin(sspath, snapshotDir, snapshotFile)
exist, err := fileutil.Exist(fp, fs)
if err != nil {
t.Fatalf("%v", err)
}
if !exist {
t.Errorf("snapshot file not saved")
}
metafp := fs.PathJoin(sspath, snapshotDir, "snapshot.metadata")
exist, err = fileutil.Exist(metafp, fs)
if err != nil {
t.Fatalf("%v", err)
}
if !exist {
t.Errorf("snapshot metadata not saved")
}
shrunk, err := rsm.IsShrunkSnapshotFile(fp, fs)
if err != nil {
t.Fatalf("%v", err)
}
if shrunk {
t.Errorf("exported snapshot is considered as shrunk")
}
var ss pb.Snapshot
if err := fileutil.GetFlagFileContent(fs.PathJoin(sspath, snapshotDir),
"snapshot.metadata", &ss, fs); err != nil {
t.Fatalf("failed to get snapshot from its metadata file")
}
if ss.OnDiskIndex == 0 {
t.Errorf("on disk index is not recorded")
}
if ss.Imported {
t.Errorf("incorrectly recorded as imported")
}
if ss.Type != pb.OnDiskStateMachine {
t.Errorf("incorrect type")
}
},
}
runNodeHostTest(t, to, fs)
}
func testImportedSnapshotIsAlwaysRestored(t *testing.T,
newDir bool, ct config.CompressionType, fs vfs.IFS) {
tf := func() {
rc := config.Config{
ClusterID: 1,
NodeID: 1,
ElectionRTT: 3,
HeartbeatRTT: 1,
CheckQuorum: true,
SnapshotEntries: 5,
CompactionOverhead: 2,
SnapshotCompressionType: ct,
}
peers := make(map[uint64]string)
peers[1] = nodeHostTestAddr1
nhc := config.NodeHostConfig{
NodeHostDir: singleNodeHostTestDir,
RTTMillisecond: getRTTMillisecond(fs, singleNodeHostTestDir),
RaftAddress: nodeHostTestAddr1,
Expert: getTestExpertConfig(fs),
}
nh, err := NewNodeHost(nhc)
if err != nil {
t.Fatalf("failed to create node host %v", err)
}
pto := lpto(nh)
newSM := func(uint64, uint64) sm.IOnDiskStateMachine {
return tests.NewSimDiskSM(0)
}
if err := nh.StartOnDiskCluster(peers, false, newSM, rc); err != nil {
t.Fatalf("failed to start cluster %v", err)
}
waitForLeaderToBeElected(t, nh, 1)
makeProposals := func(nn *NodeHost) {
session := nn.GetNoOPSession(1)
for i := 0; i < 16; i++ {
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nn.SyncPropose(ctx, session, []byte("test-data"))
cancel()
if err != nil {
t.Errorf("failed to make proposal %v", err)
}
}
}
makeProposals(nh)
sspath := "exported_snapshot_safe_to_delete"
if err := fs.RemoveAll(sspath); err != nil {
t.Fatalf("%v", err)
}
if err := fs.MkdirAll(sspath, 0755); err != nil {
t.Fatalf("%v", err)
}
defer func() {
if err := fs.RemoveAll(sspath); err != nil {
t.Fatalf("%v", err)
}
}()
opt := SnapshotOption{
Exported: true,
ExportPath: sspath,
}
var index uint64
for i := 0; i < 1000; i++ {
if i == 999 {
t.Fatalf("failed to export snapshot")
}
sr, err := nh.RequestSnapshot(1, opt, pto)
if err != nil {
t.Fatalf("failed to request snapshot %v", err)
}
v := <-sr.ResultC()
if v.Rejected() {
time.Sleep(10 * time.Millisecond)
continue
}
if v.Completed() {
index = v.SnapshotIndex()
break
}
}
makeProposals(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
rv, err := nh.SyncRead(ctx, 1, nil)
cancel()
if err != nil {
t.Fatalf("failed to read applied value %v", err)
}
applied := rv.(uint64)
if applied <= index {
t.Fatalf("invalid applied value %d", applied)
}
ctx, cancel = context.WithTimeout(context.Background(), pto)
if err := nh.SyncRequestAddNode(ctx, 1, 2, "noidea:8080", 0); err != nil {
t.Fatalf("failed to add node %v", err)
}
nh.Close()
snapshotDir := fmt.Sprintf("snapshot-%016X", index)
dir := fs.PathJoin(sspath, snapshotDir)
members := make(map[uint64]string)
members[1] = nhc.RaftAddress
if newDir {
nhc.NodeHostDir = fs.PathJoin(nhc.NodeHostDir, "newdir")
}
if err := tools.ImportSnapshot(nhc, dir, members, 1); err != nil {
t.Fatalf("failed to import snapshot %v", err)
}
ok, err := upgrade310.CanUpgradeToV310(nhc)
if err != nil {
t.Errorf("failed to check whether upgrade is possible")
}
if ok {
t.Errorf("should not be considered as ok to upgrade")
}
func() {
rnh, err := NewNodeHost(nhc)
if err != nil {
t.Fatalf("failed to create node host %v", err)
}
defer rnh.Close()
rnewSM := func(uint64, uint64) sm.IOnDiskStateMachine {
return tests.NewSimDiskSM(applied)
}
if err := rnh.StartOnDiskCluster(nil, false, rnewSM, rc); err != nil {
t.Fatalf("failed to start cluster %v", err)
}
waitForLeaderToBeElected(t, rnh, 1)
ctx, cancel = context.WithTimeout(context.Background(), pto)
rv, err = rnh.SyncRead(ctx, 1, nil)
cancel()
if err != nil {
t.Fatalf("failed to read applied value %v", err)
}
if index != rv.(uint64) {
t.Fatalf("invalid returned value %d", rv.(uint64))
}
makeProposals(rnh)
}()
ok, err = upgrade310.CanUpgradeToV310(nhc)
if err != nil {
t.Errorf("failed to check whether upgrade is possible")
}
if !ok {
t.Errorf("can not upgrade")
}
}
runNodeHostTestDC(t, tf, true, fs)
}
func TestImportedSnapshotIsAlwaysRestored(t *testing.T) {
if vfs.GetTestFS() != vfs.DefaultFS {
t.Skip("not using the default fs")
} else {
fs := vfs.GetTestFS()
testImportedSnapshotIsAlwaysRestored(t, true, config.NoCompression, fs)
testImportedSnapshotIsAlwaysRestored(t, false, config.NoCompression, fs)
testImportedSnapshotIsAlwaysRestored(t, false, config.Snappy, fs)
}
}
func TestClusterWithoutQuorumCanBeRestoreByImportingSnapshot(t *testing.T) {
fs := vfs.GetTestFS()
tf := func() {
nh1dir := fs.PathJoin(singleNodeHostTestDir, "nh1")
nh2dir := fs.PathJoin(singleNodeHostTestDir, "nh2")
rc := config.Config{
ClusterID: 1,
NodeID: 1,
ElectionRTT: 10,
HeartbeatRTT: 1,
CheckQuorum: true,
SnapshotEntries: 5,
CompactionOverhead: 2,
}
peers := make(map[uint64]string)
peers[1] = nodeHostTestAddr1
nhc1 := config.NodeHostConfig{
WALDir: nh1dir,
NodeHostDir: nh1dir,
RTTMillisecond: getRTTMillisecond(fs, nh1dir),
RaftAddress: nodeHostTestAddr1,
Expert: getTestExpertConfig(fs),
}
nhc2 := config.NodeHostConfig{
WALDir: nh2dir,
NodeHostDir: nh2dir,
RTTMillisecond: getRTTMillisecond(fs, nh2dir),
RaftAddress: nodeHostTestAddr2,
Expert: getTestExpertConfig(fs),
}
var once sync.Once
nh1, err := NewNodeHost(nhc1)
if err != nil {
t.Fatalf("failed to create node host %v", err)
}
nh2, err := NewNodeHost(nhc2)
if err != nil {
t.Fatalf("failed to create node host %v", err)
}
sm1 := tests.NewFakeDiskSM(0)
sm1.SetAborted()
newSM := func(uint64, uint64) sm.IOnDiskStateMachine {
return sm1
}
newSM2 := func(uint64, uint64) sm.IOnDiskStateMachine {
return tests.NewFakeDiskSM(0)
}
if err := nh1.StartOnDiskCluster(peers, false, newSM, rc); err != nil {
t.Fatalf("failed to start cluster %v", err)
}
waitForLeaderToBeElected(t, nh1, 1)
defer func() {
if err := fs.RemoveAll(singleNodeHostTestDir); err != nil {
t.Fatalf("%v", err)
}
}()
defer once.Do(func() {
nh1.Close()
nh2.Close()
})
session := nh1.GetNoOPSession(1)
mkproposal := func(nh *NodeHost) {
done := false
pto := pto(nh)
for i := 0; i < 100; i++ {
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh.SyncPropose(ctx, session, []byte("test-data"))
cancel()
if err == nil {
done = true
break
} else {
time.Sleep(200 * time.Millisecond)
}
}
if !done {
t.Fatalf("failed to make proposal on restored cluster")
}
}
mkproposal(nh1)
sspath := "exported_snapshot_safe_to_delete"
if err := fs.RemoveAll(sspath); err != nil {
t.Fatalf("%v", err)
}
if err := fs.MkdirAll(sspath, 0755); err != nil {
t.Fatalf("%v", err)
}
defer func() {
if err := fs.RemoveAll(sspath); err != nil {
t.Fatalf("%v", err)
}
}()
opt := SnapshotOption{
Exported: true,
ExportPath: sspath,
}
pto := lpto(nh1)
sr, err := nh1.RequestSnapshot(1, opt, pto)
if err != nil {
t.Fatalf("failed to request snapshot %v", err)
}
var index uint64
v := <-sr.ResultC()
if !v.Completed() {
t.Fatalf("failed to complete the requested snapshot")
}
index = v.SnapshotIndex()
snapshotDir := fmt.Sprintf("snapshot-%016X", index)
dir := fs.PathJoin(sspath, snapshotDir)
members := make(map[uint64]string)
members[1] = nhc1.RaftAddress
members[10] = nhc2.RaftAddress
once.Do(func() {
nh1.Close()
nh2.Close()
})
if err := tools.ImportSnapshot(nhc1, dir, members, 1); err != nil {
t.Fatalf("failed to import snapshot %v", err)
}
if err := tools.ImportSnapshot(nhc2, dir, members, 10); err != nil {
t.Fatalf("failed to import snapshot %v", err)
}
rnh1, err := NewNodeHost(nhc1)
if err != nil {
t.Fatalf("failed to create node host %v", err)
}
rnh2, err := NewNodeHost(nhc2)
if err != nil {
t.Fatalf("failed to create node host %v", err)
}
defer func() {
rnh1.Close()
rnh2.Close()
}()
if err := rnh1.StartOnDiskCluster(nil, false, newSM, rc); err != nil {
t.Fatalf("failed to start cluster %v", err)
}
rc.NodeID = 10
if err := rnh2.StartOnDiskCluster(nil, false, newSM2, rc); err != nil {
t.Fatalf("failed to start cluster %v", err)
}
waitForLeaderToBeElected(t, rnh1, 1)
mkproposal(rnh1)
mkproposal(rnh2)
}
runNodeHostTestDC(t, tf, true, fs)
}
type chunks struct {
received uint64
confirmed uint64
}
var (
testSnapshotDir = "test_snapshot_dir_safe_to_delete"
)
func (c *chunks) onReceive(pb.MessageBatch) {
c.received++
}
func (c *chunks) confirm(clusterID uint64, nodeID uint64, index uint64) {
c.confirmed++
}
func (c *chunks) getSnapshotDirFunc(clusterID uint64, nodeID uint64) string {
return testSnapshotDir
}
type testSink2 struct {
receiver chunkReceiver
}
func (s *testSink2) Receive(chunk pb.Chunk) (bool, bool) {
s.receiver.Add(chunk)
return true, false
}
func (s *testSink2) Close() error {
s.Receive(pb.Chunk{ChunkCount: pb.PoisonChunkCount})
return nil
}
func (s *testSink2) ClusterID() uint64 {
return 2000
}
func (s *testSink2) ToNodeID() uint64 {
return 300
}
type dataCorruptionSink struct {
receiver chunkReceiver
enabled bool
}
func (s *dataCorruptionSink) Receive(chunk pb.Chunk) (bool, bool) {
if s.enabled && len(chunk.Data) > 0 {
idx := rand.Uint64() % uint64(len(chunk.Data))
chunk.Data[idx] = chunk.Data[idx] + 1
}
s.receiver.Add(chunk)
return true, false
}
func (s *dataCorruptionSink) Close() error {
s.Receive(pb.Chunk{ChunkCount: pb.PoisonChunkCount})
return nil
}
func (s *dataCorruptionSink) ClusterID() uint64 {
return 2000
}
func (s *dataCorruptionSink) ToNodeID() uint64 {
return 300
}
type chunkReceiver interface {
Add(chunk pb.Chunk) bool
}
func getTestSSMeta() rsm.SSMeta {
return rsm.SSMeta{
Index: 1000,
Term: 5,
From: 150,
}
}
func testCorruptedChunkWriterOutputCanBeHandledByChunk(t *testing.T,
enabled bool, exp uint64, fs vfs.IFS) {
if err := fs.RemoveAll(testSnapshotDir); err != nil {
t.Fatalf("%v", err)
}
c := &chunks{}
if err := fs.MkdirAll(c.getSnapshotDirFunc(0, 0), 0755); err != nil {
t.Fatalf("%v", err)
}
cks := transport.NewChunk(c.onReceive,
c.confirm, c.getSnapshotDirFunc, 0, fs)
sink := &dataCorruptionSink{receiver: cks, enabled: enabled}
meta := getTestSSMeta()
cw := rsm.NewChunkWriter(sink, meta)
defer func() {
if err := fs.RemoveAll(testSnapshotDir); err != nil {
t.Fatalf("%v", err)
}
}()
for i := 0; i < 10; i++ {
data := make([]byte, rsm.ChunkSize)
rand.Read(data)
if _, err := cw.Write(data); err != nil {
t.Fatalf("failed to write the data %v", err)
}
}
if err := cw.Close(); err != nil {
t.Fatalf("failed to flush %v", err)
}
if c.received != exp {
t.Fatalf("unexpected received count: %d, want %d", c.received, exp)
}
if c.confirmed != exp {
t.Fatalf("unexpected confirmed count: %d, want %d", c.confirmed, exp)
}
}
func TestCorruptedChunkWriterOutputCanBeHandledByChunk(t *testing.T) {
fs := vfs.GetTestFS()
testCorruptedChunkWriterOutputCanBeHandledByChunk(t, false, 1, fs)
testCorruptedChunkWriterOutputCanBeHandledByChunk(t, true, 0, fs)
}
func TestChunkWriterOutputCanBeHandledByChunk(t *testing.T) {
fs := vfs.GetTestFS()
if err := fs.RemoveAll(testSnapshotDir); err != nil {
t.Fatalf("%v", err)
}
c := &chunks{}
if err := fs.MkdirAll(c.getSnapshotDirFunc(0, 0), 0755); err != nil {
t.Fatalf("%v", err)
}
cks := transport.NewChunk(c.onReceive,
c.confirm, c.getSnapshotDirFunc, 0, fs)
sink := &testSink2{receiver: cks}
meta := getTestSSMeta()
cw := rsm.NewChunkWriter(sink, meta)
if _, err := cw.Write(rsm.GetEmptyLRUSession()); err != nil {
t.Fatalf("write failed %v", err)
}
defer func() {
if err := fs.RemoveAll(testSnapshotDir); err != nil {
t.Fatalf("%v", err)
}
}()
payload := make([]byte, 0)
payload = append(payload, rsm.GetEmptyLRUSession()...)
for i := 0; i < 10; i++ {
data := make([]byte, rsm.ChunkSize)
rand.Read(data)
payload = append(payload, data...)
if _, err := cw.Write(data); err != nil {
t.Fatalf("failed to write the data %v", err)
}
}
if err := cw.Close(); err != nil {
t.Fatalf("failed to flush %v", err)
}
if c.received != 1 {
t.Fatalf("failed to receive the snapshot")
}
if c.confirmed != 1 {
t.Fatalf("failed to confirm")
}
fp := fs.PathJoin(testSnapshotDir,
"snapshot-00000000000003E8", "snapshot-00000000000003E8.gbsnap")
reader, _, err := rsm.NewSnapshotReader(fp, fs)
if err != nil {
t.Fatalf("failed to get a snapshot reader %v", err)
}
defer reader.Close()
got := make([]byte, 0)
buf := make([]byte, 1024*256)
for {
n, err := reader.Read(buf)
if n > 0 {
got = append(got, buf[:n]...)
}
if err == io.EOF {
break
}
}
if !bytes.Equal(got, payload) {
t.Errorf("snapshot content changed")
}
}
func TestNodeHostReturnsErrorWhenTransportCanNotBeCreated(t *testing.T) {
fs := vfs.GetTestFS()
if fs != vfs.DefaultFS {
t.Skip("memfs test mode, skipped")
}
to := &testOption{
updateNodeHostConfig: func(c *config.NodeHostConfig) *config.NodeHostConfig {
c.RaftAddress = "microsoft.com:12345"
return c
},
newNodeHostToFail: true,
}
runNodeHostTest(t, to, fs)
}
func TestNodeHostChecksLogDBType(t *testing.T) {
fs := vfs.GetTestFS()
ldb := &noopLogDB{}
to := &testOption{
updateNodeHostConfig: func(c *config.NodeHostConfig) *config.NodeHostConfig {
c.Expert.LogDBFactory = &testLogDBFactory{ldb: ldb}
return c
},
at: func(*NodeHost) {
nhc := getTestNodeHostConfig(fs)
_, err := NewNodeHost(*nhc)
if err != server.ErrLogDBType {
t.Fatalf("didn't report logdb type error %v", err)
}
},
noElection: true,
}
runNodeHostTest(t, to, fs)
}
var spawnChild = flag.Bool("spawn-child", false, "spawned child")
func spawn(execName string) ([]byte, error) {
return exec.Command(execName, "-spawn-child",
"-test.v", "-test.run=TestNodeHostFileLock$").CombinedOutput()
}
func TestNodeHostFileLock(t *testing.T) {
fs := vfs.GetTestFS()
if fs != vfs.DefaultFS {
t.Skip("not using the default fs, skipped")
}
tf := func() {
child := *spawnChild
nhc := config.NodeHostConfig{
NodeHostDir: singleNodeHostTestDir,
RTTMillisecond: getRTTMillisecond(fs, singleNodeHostTestDir),
RaftAddress: nodeHostTestAddr1,
Expert: getTestExpertConfig(fs),
}
if !child {
nh, err := NewNodeHost(nhc)
if err != nil {
t.Fatalf("failed to create nodehost %v", err)
}
defer nh.Close()
out, err := spawn(os.Args[0])
if err == nil {
t.Fatalf("file lock didn't prevent the second nh to start, %s", out)
}
if !bytes.Contains(out, []byte("returned ErrLockDirectory")) {
t.Fatalf("unexpected output: %s", out)
}
} else {
nhc.RaftAddress = nodeHostTestAddr2
cnh, err := NewNodeHost(nhc)
if err == server.ErrLockDirectory {
t.Fatalf("returned ErrLockDirectory")
}
if err == nil {
defer cnh.Close()
}
}
}
runNodeHostTestDC(t, tf, !*spawnChild, fs)
}
type testLogDBFactory2 struct {
f func(config.NodeHostConfig,
config.LogDBCallback, []string, []string) (raftio.ILogDB, error)
name string
}
func (t *testLogDBFactory2) Create(cfg config.NodeHostConfig, cb config.LogDBCallback,
dirs []string, wals []string) (raftio.ILogDB, error) {
return t.f(cfg, cb, dirs, wals)
}
func (t *testLogDBFactory2) Name() string {
return t.name
}
func TestNodeHostReturnsErrLogDBBrokenChangeWhenLogDBTypeChanges(t *testing.T) {
fs := vfs.GetTestFS()
bff := func(config config.NodeHostConfig, cb config.LogDBCallback,
dirs []string, lldirs []string) (raftio.ILogDB, error) {
return logdb.NewDefaultBatchedLogDB(config, cb, dirs, lldirs)
}
nff := func(config config.NodeHostConfig, cb config.LogDBCallback,
dirs []string, lldirs []string) (raftio.ILogDB, error) {
return logdb.NewDefaultLogDB(config, cb, dirs, lldirs)
}
to := &testOption{
at: func(*NodeHost) {
nhc := getTestNodeHostConfig(fs)
nhc.Expert.LogDBFactory = &testLogDBFactory2{f: nff}
if _, err := NewNodeHost(*nhc); err != server.ErrLogDBBrokenChange {
t.Errorf("failed to return ErrLogDBBrokenChange")
}
},
updateNodeHostConfig: func(c *config.NodeHostConfig) *config.NodeHostConfig {
c.Expert.LogDBFactory = &testLogDBFactory2{f: bff}
return c
},
noElection: true,
}
runNodeHostTest(t, to, fs)
}
func TestNodeHostByDefaultUsePlainEntryLogDB(t *testing.T) {
fs := vfs.GetTestFS()
bff := func(config config.NodeHostConfig, cb config.LogDBCallback,
dirs []string, lldirs []string) (raftio.ILogDB, error) {
return logdb.NewDefaultBatchedLogDB(config, cb, dirs, lldirs)
}
nff := func(config config.NodeHostConfig, cb config.LogDBCallback,
dirs []string, lldirs []string) (raftio.ILogDB, error) {
return logdb.NewDefaultLogDB(config, cb, dirs, lldirs)
}
to := &testOption{
updateNodeHostConfig: func(c *config.NodeHostConfig) *config.NodeHostConfig {
c.Expert.LogDBFactory = &testLogDBFactory2{f: nff}
return c
},
noElection: true,
at: func(*NodeHost) {
nhc := getTestNodeHostConfig(fs)
nhc.Expert.LogDBFactory = &testLogDBFactory2{f: bff}
if _, err := NewNodeHost(*nhc); err != server.ErrIncompatibleData {
t.Errorf("failed to return server.ErrIncompatibleData")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestNodeHostByDefaultChecksWhetherToUseBatchedLogDB(t *testing.T) {
fs := vfs.GetTestFS()
bff := func(config config.NodeHostConfig, cb config.LogDBCallback,
dirs []string, lldirs []string) (raftio.ILogDB, error) {
return logdb.NewDefaultBatchedLogDB(config, cb, dirs, lldirs)
}
nff := func(config config.NodeHostConfig, cb config.LogDBCallback,
dirs []string, lldirs []string) (raftio.ILogDB, error) {
return logdb.NewDefaultLogDB(config, cb, dirs, lldirs)
}
to := &testOption{
updateNodeHostConfig: func(c *config.NodeHostConfig) *config.NodeHostConfig {
c.Expert.LogDBFactory = &testLogDBFactory2{f: bff}
return c
},
createSM: func(uint64, uint64) sm.IStateMachine {
return &PST{}
},
tf: func(nh *NodeHost) {
pto := pto(nh)
cs := nh.GetNoOPSession(1)
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh.SyncPropose(ctx, cs, []byte("test-data"))
cancel()
if err != nil {
t.Errorf("failed to make proposal %v", err)
}
},
at: func(*NodeHost) {
nhc := getTestNodeHostConfig(fs)
nhc.Expert.LogDBFactory = &testLogDBFactory2{f: nff}
if nh, err := NewNodeHost(*nhc); err != nil {
t.Errorf("failed to create node host")
} else {
nh.Close()
}
},
}
runNodeHostTest(t, to, fs)
}
func TestNodeHostWithUnexpectedDeploymentIDWillBeDetected(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
noElection: true,
at: func(*NodeHost) {
nhc := getTestNodeHostConfig(fs)
nhc.DeploymentID = 200
_, err := NewNodeHost(*nhc)
if err != server.ErrDeploymentIDChanged {
t.Errorf("failed to return ErrDeploymentIDChanged, got %v", err)
}
},
}
runNodeHostTest(t, to, fs)
}
func TestGossipInfoIsReported(t *testing.T) {
fs := vfs.GetTestFS()
advertiseAddress := "202.96.1.2:12345"
to := &testOption{
noElection: true,
updateNodeHostConfig: func(c *config.NodeHostConfig) *config.NodeHostConfig {
c.AddressByNodeHostID = true
c.Gossip = config.GossipConfig{
BindAddress: "localhost:23001",
AdvertiseAddress: advertiseAddress,
Seed: []string{"localhost:23002"},
}
return c
},
tf: func(nh *NodeHost) {
nhi := nh.GetNodeHostInfo(DefaultNodeHostInfoOption)
if nhi.Gossip.AdvertiseAddress != advertiseAddress {
t.Errorf("unexpected advertise address, got %s, want %s",
nhi.Gossip.AdvertiseAddress, advertiseAddress)
}
if !nhi.Gossip.Enabled {
t.Errorf("gossip info not marked as enabled")
}
if nhi.Gossip.NumOfKnownNodeHosts != 1 {
t.Errorf("unexpected NumOfKnownNodeHosts, got %d, want 1",
nhi.Gossip.NumOfKnownNodeHosts)
}
if nhi.NodeHostID != nh.ID() {
t.Errorf("unexpected NodeHostID, got %s, want %s", nhi.NodeHostID, nh.ID())
}
},
}
runNodeHostTest(t, to, fs)
}
func TestLeaderInfoIsReported(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
nhi := nh.GetNodeHostInfo(DefaultNodeHostInfoOption)
if len(nhi.ClusterInfoList) != 1 {
t.Errorf("unexpected len: %d", len(nhi.ClusterInfoList))
}
if nhi.ClusterInfoList[0].ClusterID != 1 {
t.Fatalf("unexpected cluster id")
}
if !nhi.ClusterInfoList[0].IsLeader {
t.Errorf("not leader")
}
pto := pto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
defer cancel()
if err := nh.SyncRequestAddNode(ctx, 1, 2, "noidea:8080", 0); err != nil {
t.Fatalf("failed to add node %v", err)
}
for i := 0; i < 500; i++ {
nhi := nh.GetNodeHostInfo(DefaultNodeHostInfoOption)
if len(nhi.ClusterInfoList) != 1 {
t.Errorf("unexpected len: %d", len(nhi.ClusterInfoList))
}
if nhi.ClusterInfoList[0].IsLeader {
time.Sleep(20 * time.Millisecond)
} else {
return
}
}
t.Fatalf("no leader info change")
},
}
runNodeHostTest(t, to, fs)
}
func TestDroppedRequestsAreReported(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
pto := lpto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
defer cancel()
if err := nh.SyncRequestAddNode(ctx, 1, 2, "noidea:8080", 0); err != nil {
t.Fatalf("failed to add node %v", err)
}
for i := 0; i < 1000; i++ {
leaderID, ok, err := nh.GetLeaderID(1)
if err != nil {
t.Fatalf("failed to get leader id %v", err)
}
if err == nil && !ok && leaderID == 0 {
break
}
time.Sleep(10 * time.Millisecond)
if i == 999 {
t.Fatalf("leader failed to step down")
}
}
unlimited := 30 * time.Minute
func() {
nctx, ncancel := context.WithTimeout(context.Background(), unlimited)
defer ncancel()
cs := nh.GetNoOPSession(1)
for i := 0; i < 10; i++ {
if _, err := nh.SyncPropose(nctx, cs, make([]byte, 1)); err != ErrClusterNotReady {
t.Errorf("failed to get ErrClusterNotReady, got %v", err)
}
}
}()
func() {
nctx, ncancel := context.WithTimeout(context.Background(), unlimited)
defer ncancel()
for i := 0; i < 10; i++ {
if err := nh.SyncRequestAddNode(nctx, 1, 3, "noidea:8080", 0); err != ErrClusterNotReady {
t.Errorf("failed to get ErrClusterNotReady, got %v", err)
}
}
}()
func() {
nctx, ncancel := context.WithTimeout(context.Background(), unlimited)
defer ncancel()
for i := 0; i < 10; i++ {
if _, err := nh.SyncRead(nctx, 1, nil); err != ErrClusterNotReady {
t.Errorf("failed to get ErrClusterNotReady, got %v", err)
}
}
}()
},
}
runNodeHostTest(t, to, fs)
}
type testRaftEventListener struct {
mu sync.Mutex
received []raftio.LeaderInfo
}
func (rel *testRaftEventListener) LeaderUpdated(info raftio.LeaderInfo) {
rel.mu.Lock()
defer rel.mu.Unlock()
rel.received = append(rel.received, info)
}
func (rel *testRaftEventListener) get() []raftio.LeaderInfo {
rel.mu.Lock()
defer rel.mu.Unlock()
r := make([]raftio.LeaderInfo, 0)
return append(r, rel.received...)
}
func TestRaftEventsAreReported(t *testing.T) {
fs := vfs.GetTestFS()
rel := &testRaftEventListener{
received: make([]raftio.LeaderInfo, 0),
}
to := &testOption{
defaultTestNode: true,
updateNodeHostConfig: func(nh *config.NodeHostConfig) *config.NodeHostConfig {
nh.RaftEventListener = rel
return nh
},
tf: func(nh *NodeHost) {
pto := pto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
if err := nh.SyncRequestAddNode(ctx, 1, 2, "127.0.0.1:8080", 0); err != nil {
t.Fatalf("add node failed %v", err)
}
cancel()
var received []raftio.LeaderInfo
for i := 0; i < 1000; i++ {
received = rel.get()
if len(received) >= 4 {
break
}
time.Sleep(10 * time.Millisecond)
if i == 999 {
t.Fatalf("failed to get the second LeaderUpdated notification")
}
}
exp0 := raftio.LeaderInfo{
ClusterID: 1,
NodeID: 1,
LeaderID: raftio.NoLeader,
Term: 1,
}
exp1 := raftio.LeaderInfo{
ClusterID: 1,
NodeID: 1,
LeaderID: raftio.NoLeader,
Term: 2,
}
exp2 := raftio.LeaderInfo{
ClusterID: 1,
NodeID: 1,
LeaderID: 1,
Term: 2,
}
exp3 := raftio.LeaderInfo{
ClusterID: 1,
NodeID: 1,
LeaderID: raftio.NoLeader,
Term: 2,
}
expected := []raftio.LeaderInfo{exp0, exp1, exp2, exp3}
for idx := range expected {
if !reflect.DeepEqual(&(received[idx]), &expected[idx]) {
t.Errorf("unexpecded leader info, %d, %v, %v",
idx, received[idx], expected[idx])
}
}
},
}
runNodeHostTest(t, to, fs)
}
func TestV2DataCanBeHandled(t *testing.T) {
fs := vfs.GetTestFS()
if vfs.GetTestFS() != vfs.DefaultFS {
t.Skip("skipped as not using the default fs")
}
v2datafp := "internal/logdb/testdata/v2-rocksdb-batched.tar.bz2"
targetDir := "test-v2-data-safe-to-remove"
if err := fs.RemoveAll(targetDir); err != nil {
t.Fatalf("%v", err)
}
defer func() {
if err := fs.RemoveAll(targetDir); err != nil {
t.Fatalf("%v", err)
}
}()
topDirName := "single_nodehost_test_dir_safe_to_delete"
testHostname := "lindfield.local"
if err := fileutil.ExtractTarBz2(v2datafp, targetDir, fs); err != nil {
t.Fatalf("%v", err)
}
hostname, err := os.Hostname()
if err != nil {
t.Fatalf("failed to get hostname %v", err)
}
testPath := fs.PathJoin(targetDir, topDirName, testHostname)
expPath := fs.PathJoin(targetDir, topDirName, hostname)
if expPath != testPath {
if err := fs.Rename(testPath, expPath); err != nil {
t.Fatalf("failed to rename the dir %v", err)
}
}
v2dataDir := fs.PathJoin(targetDir, topDirName)
to := &testOption{
noElection: true,
updateNodeHostConfig: func(c *config.NodeHostConfig) *config.NodeHostConfig {
c.WALDir = v2dataDir
c.NodeHostDir = v2dataDir
return c
},
tf: func(nh *NodeHost) {
name := nh.mu.logdb.Name()
if name != "sharded-pebble" {
// v2-rocksdb-batched.tar.bz2 contains rocksdb format data
t.Skip("skipped as not using rocksdb compatible logdb")
}
logdb := nh.mu.logdb
rs, err := logdb.ReadRaftState(2, 1, 0)
if err != nil {
t.Fatalf("failed to get raft state %v", err)
}
if rs.EntryCount != 3 || rs.State.Commit != 3 {
t.Errorf("unexpected rs value")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestSnapshotCanBeCompressed(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
compressed: true,
createSM: func(uint64, uint64) sm.IStateMachine {
return &tests.VerboseSnapshotSM{}
},
tf: func(nh *NodeHost) {
pto := lpto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh.SyncRequestSnapshot(ctx, 1, DefaultSnapshotOption)
cancel()
if err != nil {
t.Fatalf("failed to request snapshot %v", err)
}
logdb := nh.mu.logdb
ss, err := logdb.GetSnapshot(1, 1)
if err != nil {
t.Fatalf("failed to list snapshots: %v", err)
}
if pb.IsEmptySnapshot(ss) {
t.Fatalf("failed to get snapshot rec")
}
fi, err := fs.Stat(ss.Filepath)
if err != nil {
t.Fatalf("failed to get file path %v", err)
}
if fi.Size() > 1024*364 {
t.Errorf("snapshot file not compressed, sz %d", fi.Size())
}
},
}
runNodeHostTest(t, to, fs)
}
func makeProposals(nh *NodeHost) {
session := nh.GetNoOPSession(1)
pto := pto(nh)
for i := 0; i < 16; i++ {
ctx, cancel := context.WithTimeout(context.Background(), pto)
_, err := nh.SyncPropose(ctx, session, []byte("test-data"))
cancel()
if err != nil {
time.Sleep(100 * time.Millisecond)
}
}
}
func testWitnessIO(t *testing.T,
witnessTestFunc func(*NodeHost, *NodeHost, *tests.SimDiskSM), fs vfs.IFS) {
tf := func() {
rc := config.Config{
ClusterID: 1,
NodeID: 1,
ElectionRTT: 3,
HeartbeatRTT: 1,
CheckQuorum: true,
}
peers := make(map[uint64]string)
peers[1] = nodeHostTestAddr1
dir := fs.PathJoin(singleNodeHostTestDir, "nh1")
nhc1 := config.NodeHostConfig{
NodeHostDir: dir,
RTTMillisecond: getRTTMillisecond(fs, dir),
RaftAddress: nodeHostTestAddr1,
Expert: getTestExpertConfig(fs),
}
nh1, err := NewNodeHost(nhc1)
if err != nil {
t.Fatalf("failed to create node host %v", err)
}
defer nh1.Close()
newSM := func(uint64, uint64) sm.IOnDiskStateMachine {
return tests.NewSimDiskSM(0)
}
if err := nh1.StartOnDiskCluster(peers, false, newSM, rc); err != nil {
t.Fatalf("failed to start cluster %v", err)
}
waitForLeaderToBeElected(t, nh1, 1)
for i := 0; i < 8; i++ {
makeProposals(nh1)
pto := lpto(nh1)
ctx, cancel := context.WithTimeout(context.Background(), pto)
opt := SnapshotOption{OverrideCompactionOverhead: true, CompactionOverhead: 1}
if _, err := nh1.SyncRequestSnapshot(ctx, 1, opt); err != nil {
t.Fatalf("failed to request snapshot %v", err)
}
cancel()
}
pto := pto(nh1)
ctx, cancel := context.WithTimeout(context.Background(), pto)
if err := nh1.SyncRequestAddWitness(ctx, 1, 2, nodeHostTestAddr2, 0); err != nil {
t.Fatalf("failed to add witness %v", err)
}
cancel()
rc2 := rc
rc2.NodeID = 2
rc2.IsWitness = true
nhc2 := nhc1
nhc2.RaftAddress = nodeHostTestAddr2
nhc2.NodeHostDir = fs.PathJoin(singleNodeHostTestDir, "nh2")
nh2, err := NewNodeHost(nhc2)
if err != nil {
t.Fatalf("failed to create node host %v", err)
}
defer nh2.Close()
witness := tests.NewSimDiskSM(0)
newWitness := func(uint64, uint64) sm.IOnDiskStateMachine {
return witness
}
if err := nh2.StartOnDiskCluster(nil, true, newWitness, rc2); err != nil {
t.Fatalf("failed to start cluster %v", err)
}
waitForLeaderToBeElected(t, nh2, 1)
witnessTestFunc(nh1, nh2, witness)
}
runNodeHostTestDC(t, tf, true, fs)
}
func TestWitnessSnapshotIsCorrectlyHandled(t *testing.T) {
fs := vfs.GetTestFS()
tf := func(nh1 *NodeHost, nh2 *NodeHost, witness *tests.SimDiskSM) {
for {
if witness.GetRecovered() > 0 {
t.Fatalf("unexpected recovered count %d", witness.GetRecovered())
}
snapshot, err := nh2.mu.logdb.GetSnapshot(1, 2)
if err != nil {
t.Fatalf("%v", err)
}
if pb.IsEmptySnapshot(snapshot) {
time.Sleep(100 * time.Millisecond)
} else {
if !snapshot.Witness {
t.Errorf("not a witness snapshot")
}
return
}
}
}
testWitnessIO(t, tf, fs)
}
func TestWitnessCanReplicateEntries(t *testing.T) {
fs := vfs.GetTestFS()
tf := func(nh1 *NodeHost, nh2 *NodeHost, witness *tests.SimDiskSM) {
for i := 0; i < 8; i++ {
makeProposals(nh1)
}
if witness.GetApplied() > 0 {
t.Fatalf("unexpected applied count %d", witness.GetApplied())
}
}
testWitnessIO(t, tf, fs)
}
func TestWitnessCanNotInitiateIORequest(t *testing.T) {
fs := vfs.GetTestFS()
tf := func(nh1 *NodeHost, nh2 *NodeHost, witness *tests.SimDiskSM) {
pto := lpto(nh1)
opt := SnapshotOption{OverrideCompactionOverhead: true, CompactionOverhead: 1}
if _, err := nh2.RequestSnapshot(1, opt, pto); err != ErrInvalidOperation {
t.Fatalf("requesting snapshot on witness not rejected")
}
session := nh2.GetNoOPSession(1)
if _, err := nh2.Propose(session, []byte("test-data"), pto); err != ErrInvalidOperation {
t.Fatalf("proposal not rejected on witness")
}
session = client.NewSession(1, nh2.env.GetRandomSource())
session.PrepareForRegister()
if _, err := nh2.ProposeSession(session, pto); err != ErrInvalidOperation {
t.Fatalf("propose session not rejected on witness")
}
if _, err := nh2.ReadIndex(1, pto); err != ErrInvalidOperation {
t.Fatalf("sync read not rejected on witness")
}
if _, err := nh2.RequestAddNode(1, 3, "a3.com:12345", 0, pto); err != ErrInvalidOperation {
t.Fatalf("add node not rejected on witness")
}
if _, err := nh2.RequestDeleteNode(1, 3, 0, pto); err != ErrInvalidOperation {
t.Fatalf("delete node not rejected on witness")
}
if _, err := nh2.RequestAddNonVoting(1, 3, "a3.com:12345", 0, pto); err != ErrInvalidOperation {
t.Fatalf("add nonVoting not rejected on witness")
}
if _, err := nh2.RequestAddWitness(1, 3, "a3.com:12345", 0, pto); err != ErrInvalidOperation {
t.Fatalf("add witness not rejected on witness")
}
if err := nh2.RequestLeaderTransfer(1, 3); err != ErrInvalidOperation {
t.Fatalf("leader transfer not rejected on witness")
}
if _, err := nh2.StaleRead(1, nil); err != ErrInvalidOperation {
t.Fatalf("stale read not rejected on witness")
}
}
testWitnessIO(t, tf, fs)
}
func TestStateMachineIsClosedAfterOffloaded(t *testing.T) {
fs := vfs.GetTestFS()
tsm := &TimeoutStateMachine{}
to := &testOption{
createSM: func(uint64, uint64) sm.IStateMachine {
return tsm
},
at: func(nh *NodeHost) {
if !tsm.closed {
t.Fatalf("sm not closed")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestTimeoutCanBeReturned(t *testing.T) {
fs := vfs.GetTestFS()
rtt := getRTTMillisecond(fs, singleNodeHostTestDir)
to := &testOption{
createSM: func(uint64, uint64) sm.IStateMachine {
return &TimeoutStateMachine{
updateDelay: rtt * 10,
snapshotDelay: rtt * 10,
}
},
tf: func(nh *NodeHost) {
timeout := time.Duration(rtt*5) * time.Millisecond
ctx, cancel := context.WithTimeout(context.Background(), timeout)
session := nh.GetNoOPSession(1)
_, err := nh.SyncPropose(ctx, session, []byte("test"))
cancel()
if err != ErrTimeout {
t.Errorf("failed to return ErrTimeout, %v", err)
}
ctx, cancel = context.WithTimeout(context.Background(), timeout)
_, err = nh.SyncRequestSnapshot(ctx, 1, SnapshotOption{})
cancel()
if err != ErrTimeout {
t.Errorf("failed to return ErrTimeout, %v", err)
}
},
}
runNodeHostTest(t, to, fs)
}
func testIOErrorIsHandled(t *testing.T, op vfs.Op) {
inj := vfs.OnIndex(-1, op)
fs := vfs.Wrap(vfs.GetTestFS(), inj)
to := &testOption{
fsErrorInjection: true,
defaultTestNode: true,
tf: func(nh *NodeHost) {
if nh.mu.logdb.Name() == "Tan" {
t.Skip("skipped, using tan logdb")
}
inj.SetIndex(0)
pto := pto(nh)
ctx, cancel := context.WithTimeout(context.Background(), pto)
session := nh.GetNoOPSession(1)
_, err := nh.SyncPropose(ctx, session, []byte("test"))
cancel()
if err != ErrTimeout {
t.Fatalf("proposal unexpectedly completed, %v", err)
}
select {
case e := <-nh.engine.ec:
if e != vfs.ErrInjected && e.Error() != vfs.ErrInjected.Error() {
t.Fatalf("failed to return the expected error, %v", e)
}
default:
t.Fatalf("failed to trigger error")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestIOErrorIsHandled(t *testing.T) {
testIOErrorIsHandled(t, vfs.OpWrite)
testIOErrorIsHandled(t, vfs.OpSync)
}
func TestInstallSnapshotMessageIsNeverDropped(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
nh.partitioned = 1
handler := newNodeHostMessageHandler(nh)
msg := pb.Message{Type: pb.InstallSnapshot, ClusterId: 1, To: 1}
batch := pb.MessageBatch{Requests: []pb.Message{msg}}
s, m := handler.HandleMessageBatch(batch)
if s != 1 {
t.Errorf("snapshot message dropped, %d", s)
}
if m != 0 {
t.Errorf("unexpected message count %d", m)
}
},
}
runNodeHostTest(t, to, fs)
}
func TestMessageToUnknownNodeIsIgnored(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
handler := newNodeHostMessageHandler(nh)
msg1 := pb.Message{Type: pb.Ping, ClusterId: 1, To: 1}
msg2 := pb.Message{Type: pb.Pong, ClusterId: 1, To: 1}
msg3 := pb.Message{Type: pb.Pong, ClusterId: 1, To: 2}
batch := pb.MessageBatch{Requests: []pb.Message{msg1, msg2, msg3}}
s, m := handler.HandleMessageBatch(batch)
if s != 0 {
t.Errorf("unexpected snapshot count, %d", s)
}
if m != 2 {
t.Errorf("unexpected message count %d", m)
}
},
}
runNodeHostTest(t, to, fs)
}
func TestNodeCanBeUnloadedOnceClosed(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
countNodes := func(nh *NodeHost) uint64 {
count := uint64(0)
nh.mu.clusters.Range(func(key, value interface{}) bool {
count++
return true
})
return count
}
node, ok := nh.getCluster(1)
if !ok {
t.Fatalf("failed to get node")
}
node.requestRemoval()
retry := 1000
for retry > 0 {
retry--
if countNodes(nh) == 0 {
return
}
time.Sleep(10 * time.Millisecond)
}
t.Fatalf("failed to unload the node")
},
}
runNodeHostTest(t, to, fs)
}
func TestUsingClosedNodeHostIsNotAllowed(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
at: func(nh *NodeHost) {
plog.Infof("hello")
if err := nh.StartCluster(nil, false, nil, config.Config{}); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if err := nh.StartConcurrentCluster(nil, false, nil, config.Config{}); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if err := nh.StartOnDiskCluster(nil, false, nil, config.Config{}); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if err := nh.StopCluster(1); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if err := nh.StopNode(1, 1); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Second)
defer cancel()
if _, err := nh.SyncPropose(ctx, nil, nil); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if _, err := nh.SyncRead(ctx, 1, nil); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if _, err := nh.SyncGetClusterMembership(ctx, 1); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if _, _, err := nh.GetLeaderID(1); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if _, err := nh.Propose(nil, nil, time.Second); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if _, err := nh.ReadIndex(1, time.Second); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if _, err := nh.ReadLocalNode(nil, nil); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if _, err := nh.NAReadLocalNode(nil, nil); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if _, err := nh.StaleRead(1, nil); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if _, err := nh.SyncRequestSnapshot(ctx, 1, DefaultSnapshotOption); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if _, err := nh.RequestSnapshot(1, DefaultSnapshotOption, time.Second); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if _, err := nh.RequestCompaction(1, 1); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if err := nh.SyncRequestDeleteNode(ctx, 1, 1, 0); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if err := nh.SyncRequestAddNode(ctx, 1, 1, "", 0); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if err := nh.SyncRequestAddNonVoting(ctx, 1, 1, "", 0); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if err := nh.SyncRequestAddWitness(ctx, 1, 1, "", 0); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if _, err := nh.RequestDeleteNode(1, 1, 0, time.Second); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if _, err := nh.RequestAddNode(1, 1, "", 0, time.Second); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if _, err := nh.RequestAddNonVoting(1, 1, "", 0, time.Second); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if _, err := nh.RequestAddWitness(1, 1, "", 0, time.Second); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if err := nh.RequestLeaderTransfer(1, 2); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if err := nh.SyncRemoveData(ctx, 1, 1); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if err := nh.RemoveData(1, 1); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
if _, err := nh.GetNodeUser(1); err != ErrClosed {
t.Errorf("failed to return ErrClosed")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestContextDeadlineIsChecked(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
ctx := context.Background()
if _, err := nh.SyncPropose(ctx, nil, nil); err != ErrDeadlineNotSet {
t.Errorf("ctx deadline not checked")
}
if _, err := nh.SyncRead(ctx, 1, nil); err != ErrDeadlineNotSet {
t.Errorf("ctx deadline not checked")
}
if _, err := nh.SyncGetClusterMembership(ctx, 1); err != ErrDeadlineNotSet {
t.Errorf("ctx deadline not checked")
}
if _, err := nh.GetNewSession(ctx, 1); err != ErrDeadlineNotSet {
t.Errorf("ctx deadline not checked")
}
if err := nh.CloseSession(ctx, nil); err != ErrDeadlineNotSet {
t.Errorf("ctx deadline not checked")
}
if _, err := nh.SyncGetSession(ctx, 1); err != ErrDeadlineNotSet {
t.Errorf("ctx deadline not checked")
}
if err := nh.SyncCloseSession(ctx, nil); err != ErrDeadlineNotSet {
t.Errorf("ctx deadline not checked")
}
if _, err := nh.SyncRequestSnapshot(ctx, 1, DefaultSnapshotOption); err != ErrDeadlineNotSet {
t.Errorf("ctx deadline not checked")
}
if err := nh.SyncRequestDeleteNode(ctx, 1, 1, 0); err != ErrDeadlineNotSet {
t.Errorf("ctx deadline not checked")
}
if err := nh.SyncRequestAddNode(ctx, 1, 2, "", 0); err != ErrDeadlineNotSet {
t.Errorf("ctx deadline not checked")
}
if err := nh.SyncRequestAddNonVoting(ctx, 1, 2, "", 0); err != ErrDeadlineNotSet {
t.Errorf("ctx deadline not checked")
}
if err := nh.SyncRequestAddWitness(ctx, 1, 2, "", 0); err != ErrDeadlineNotSet {
t.Errorf("ctx deadline not checked")
}
if err := nh.SyncRemoveData(ctx, 1, 1); err != ErrDeadlineNotSet {
t.Errorf("ctx deadline not checked")
}
},
}
runNodeHostTest(t, to, fs)
}
func TestGetTimeoutFromContext(t *testing.T) {
defer leaktest.AfterTest(t)()
if _, err := getTimeoutFromContext(context.Background()); err != ErrDeadlineNotSet {
t.Errorf("failed to return ErrDeadlineNotSet, %v", err)
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
d, err := getTimeoutFromContext(ctx)
if err != nil {
t.Errorf("failed to get timeout, %v", err)
}
if d < 55*time.Second {
t.Errorf("unexpected result")
}
}
func TestHandleSnapshotStatus(t *testing.T) {
defer leaktest.AfterTest(t)()
nh := &NodeHost{stopper: syncutil.NewStopper()}
engine := newExecEngine(nh, config.GetDefaultEngineConfig(), false, false, nil, nil)
defer func() {
if err := engine.close(); err != nil {
t.Fatalf("failed to close engine %v", err)
}
}()
nh.engine = engine
nh.events.sys = newSysEventListener(nil, nh.stopper.ShouldStop())
h := messageHandler{nh: nh}
mq := server.NewMessageQueue(1024, false, lazyFreeCycle, 1024)
node := &node{clusterID: 1, nodeID: 1, mq: mq}
h.nh.mu.clusters.Store(uint64(1), node)
h.HandleSnapshotStatus(1, 2, true)
for i := uint64(0); i <= streamPushDelayTick; i++ {
node.mq.Tick()
}
msgs := node.mq.Get()
if len(msgs) != 1 {
t.Fatalf("no msg")
}
if msgs[0].Type != pb.SnapshotStatus || !msgs[0].Reject {
t.Fatalf("unexpected message")
}
}
func TestSnapshotReceivedMessageCanBeConverted(t *testing.T) {
defer leaktest.AfterTest(t)()
nh := &NodeHost{stopper: syncutil.NewStopper()}
engine := newExecEngine(nh, config.GetDefaultEngineConfig(), false, false, nil, nil)
defer func() {
if err := engine.close(); err != nil {
t.Fatalf("failed to close engine %v", err)
}
}()
nh.engine = engine
nh.events.sys = newSysEventListener(nil, nh.stopper.ShouldStop())
h := messageHandler{nh: nh}
mq := server.NewMessageQueue(1024, false, lazyFreeCycle, 1024)
node := &node{clusterID: 1, nodeID: 1, mq: mq}
h.nh.mu.clusters.Store(uint64(1), node)
mb := pb.MessageBatch{
Requests: []pb.Message{{To: 1, From: 2, ClusterId: 1, Type: pb.SnapshotReceived}},
}
sc, mc := h.HandleMessageBatch(mb)
if sc != 0 || mc != 1 {
t.Errorf("failed to handle message batch")
}
for i := uint64(0); i <= streamConfirmedDelayTick; i++ {
node.mq.Tick()
}
msgs := node.mq.Get()
if len(msgs) != 1 {
t.Fatalf("no msg")
}
if msgs[0].Type != pb.SnapshotStatus || msgs[0].Reject {
t.Fatalf("unexpected message")
}
}
func TestIncorrectlyRoutedMessagesAreIgnored(t *testing.T) {
defer leaktest.AfterTest(t)()
nh := &NodeHost{stopper: syncutil.NewStopper()}
engine := newExecEngine(nh, config.GetDefaultEngineConfig(), false, false, nil, nil)
defer func() {
if err := engine.close(); err != nil {
t.Fatalf("failed to close engine %v", err)
}
}()
nh.engine = engine
nh.events.sys = newSysEventListener(nil, nh.stopper.ShouldStop())
h := messageHandler{nh: nh}
mq := server.NewMessageQueue(1024, false, lazyFreeCycle, 1024)
node := &node{clusterID: 1, nodeID: 1, mq: mq}
h.nh.mu.clusters.Store(uint64(1), node)
mb := pb.MessageBatch{
Requests: []pb.Message{{To: 3, From: 2, ClusterId: 1, Type: pb.SnapshotReceived}},
}
sc, mc := h.HandleMessageBatch(mb)
if sc != 0 || mc != 0 {
t.Errorf("failed to ignore the message")
}
msgs := node.mq.Get()
if len(msgs) != 0 {
t.Fatalf("received unexpected message")
}
}
func TestProposeOnClosedNode(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
u, err := nh.GetNodeUser(1)
if err != nil {
t.Fatalf("failed to get node, %v", err)
}
if err := nh.StopNode(1, 1); err != nil {
t.Fatalf("failed to stop node, %v", err)
}
cs := nh.GetNoOPSession(1)
if _, err := u.Propose(cs, nil, time.Second); err == nil {
t.Errorf("propose on closed node didn't cause error")
} else {
plog.Infof("%v returned from closed node", err)
}
},
}
runNodeHostTest(t, to, fs)
}
func TestReadIndexOnClosedNode(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
u, err := nh.GetNodeUser(1)
if err != nil {
t.Fatalf("failed to get node, %v", err)
}
if err := nh.StopNode(1, 1); err != nil {
t.Fatalf("failed to stop node, %v", err)
}
if _, err := u.ReadIndex(time.Second); err == nil {
t.Errorf("ReadIndex on closed node didn't cause error")
} else {
plog.Infof("%v returned from closed node", err)
}
},
}
runNodeHostTest(t, to, fs)
}
func TestNodeCanNotStartWhenStillLoadedInEngine(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
nodes := make(map[uint64]*node)
nodes[2] = &node{clusterID: 2, nodeID: 1}
nh.engine.loaded.update(1, fromStepWorker, nodes)
if err := nh.startCluster(nil, false, nil,
config.Config{ClusterID: 2, NodeID: 1},
pb.RegularStateMachine); err != ErrClusterAlreadyExist {
t.Errorf("failed to return ErrClusterAlreadyExist, %v", err)
}
},
}
runNodeHostTest(t, to, fs)
}
func TestBootstrapInfoIsValidated(t *testing.T) {
fs := vfs.GetTestFS()
to := &testOption{
defaultTestNode: true,
tf: func(nh *NodeHost) {
if _, _, err := nh.bootstrapCluster(nil, false,
config.Config{ClusterID: 1, NodeID: 1},
pb.OnDiskStateMachine); err != ErrInvalidClusterSettings {
t.Errorf("failed to fail the boostrap, %v", err)
}
},
}
runNodeHostTest(t, to, fs)
}
// slow tests
type stressRSM struct{}
func (s *stressRSM) Update([]byte) (sm.Result, error) {
plog.Infof("updated")
return sm.Result{}, nil
}
func (s *stressRSM) Lookup(interface{}) (interface{}, error) {
return nil, nil
}
func (s *stressRSM) SaveSnapshot(w io.Writer,
f sm.ISnapshotFileCollection, c <-chan struct{}) error {
data := make([]byte, settings.SnapshotChunkSize*3)
_, err := w.Write(data)
return err
}
func (s *stressRSM) RecoverFromSnapshot(r io.Reader,
f []sm.SnapshotFile, c <-chan struct{}) error {
plog.Infof("RecoverFromSnapshot called")
data := make([]byte, settings.SnapshotChunkSize*3)
n, err := io.ReadFull(r, data)
if uint64(n) != settings.SnapshotChunkSize*3 {
return errors.New("unexpected size")
}
return err
}
func (s *stressRSM) Close() error {
return nil
}
// this test takes around 6 minutes on mbp and 30 seconds on a linux box with
// proper SSD
func TestSlowTestStressedSnapshotWorker(t *testing.T) {
if len(os.Getenv("SLOW_TEST")) == 0 {
t.Skip("skipped TestSlowTestStressedSnapshotWorker, SLOW_TEST not set")
}
fs := vfs.GetTestFS()
if err := fs.RemoveAll(singleNodeHostTestDir); err != nil {
t.Fatalf("%v", err)
}
defer func() {
if err := fs.RemoveAll(singleNodeHostTestDir); err != nil {
t.Fatalf("%v", err)
}
}()
nh1dir := fs.PathJoin(singleNodeHostTestDir, "nh1")
nh2dir := fs.PathJoin(singleNodeHostTestDir, "nh2")
rc := config.Config{
ClusterID: 1,
NodeID: 1,
ElectionRTT: 10,
HeartbeatRTT: 1,
CheckQuorum: true,
SnapshotEntries: 5,
CompactionOverhead: 1,
}
peers := make(map[uint64]string)
peers[1] = nodeHostTestAddr1
nhc1 := config.NodeHostConfig{
WALDir: nh1dir,
NodeHostDir: nh1dir,
RTTMillisecond: 5 * getRTTMillisecond(fs, nh1dir),
RaftAddress: nodeHostTestAddr1,
Expert: getTestExpertConfig(fs),
}
nh1, err := NewNodeHost(nhc1)
if err != nil {
t.Fatalf("failed to create node host %v", err)
}
defer nh1.Close()
newRSM := func(uint64, uint64) sm.IStateMachine {
return &stressRSM{}
}
for i := uint64(1); i <= uint64(96); i++ {
rc.ClusterID = i
if err := nh1.StartCluster(peers, false, newRSM, rc); err != nil {
t.Fatalf("failed to start cluster %v", err)
}
cs := nh1.GetNoOPSession(i)
total := 20
for {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
_, err := nh1.SyncPropose(ctx, cs, make([]byte, 1))
cancel()
if err == ErrTimeout || err == ErrClusterNotReady {
time.Sleep(100 * time.Millisecond)
continue
}
total--
if total == 0 {
break
}
}
}
// add another node
for i := uint64(1); i <= uint64(96); i++ {
for {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
err := nh1.SyncRequestAddNode(ctx, i, 2, nodeHostTestAddr2, 0)
cancel()
if err != nil {
if err == ErrTimeout || err == ErrClusterNotReady {
time.Sleep(100 * time.Millisecond)
continue
} else {
t.Fatalf("failed to add node %v", err)
}
} else {
break
}
}
}
plog.Infof("all nodes added")
nhc2 := config.NodeHostConfig{
WALDir: nh2dir,
NodeHostDir: nh2dir,
RTTMillisecond: 5 * getRTTMillisecond(fs, nh2dir),
RaftAddress: nodeHostTestAddr2,
Expert: getTestExpertConfig(fs),
}
nh2, err := NewNodeHost(nhc2)
if err != nil {
t.Fatalf("failed to create node host 2 %v", err)
}
// start new nodes
defer nh2.Close()
for i := uint64(1); i <= uint64(96); i++ {
rc.ClusterID = i
rc.NodeID = 2
peers := make(map[uint64]string)
if err := nh2.StartCluster(peers, true, newRSM, rc); err != nil {
t.Fatalf("failed to start cluster %v", err)
}
}
for i := uint64(1); i <= uint64(96); i++ {
cs := nh2.GetNoOPSession(i)
total := 1000
for {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
_, err := nh2.SyncPropose(ctx, cs, make([]byte, 1))
cancel()
if err != nil {
if err == ErrTimeout || err == ErrClusterNotReady {
total--
if total == 0 {
t.Fatalf("failed to make proposal on cluster %d", i)
}
time.Sleep(100 * time.Millisecond)
continue
} else {
t.Fatalf("failed to make proposal %v", err)
}
} else {
break
}
}
}
}
| [
"\"DRAGONBOAT_TEST_PORT\"",
"\"SLOW_TEST\""
] | [] | [
"SLOW_TEST",
"DRAGONBOAT_TEST_PORT"
] | [] | ["SLOW_TEST", "DRAGONBOAT_TEST_PORT"] | go | 2 | 0 | |
cancel.py | import gdax
import os
import json
API_KEY = os.environ['GDAX_API_KEY']
API_SECRET = os.environ['GDAX_API_SECRET']
API_PASS = os.environ['GDAX_API_PASS']
def main():
'''
Cancels all bitcoin orders.
'''
client = gdax.AuthenticatedClient(API_KEY, API_SECRET, API_PASS)
r = client.cancel_all(product='LTC-USD')
print(json.dumps(r))
if __name__ == '__main__':
main()
| [] | [] | [
"GDAX_API_SECRET",
"GDAX_API_KEY",
"GDAX_API_PASS"
] | [] | ["GDAX_API_SECRET", "GDAX_API_KEY", "GDAX_API_PASS"] | python | 3 | 0 | |
backend/config/wsgi.py | """
WSGI config for web project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
sdk/java/src/main/java/org/arvados/sdk/Arvados.java | // Copyright (C) The Arvados Authors. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0
package org.arvados.sdk;
import com.google.api.client.http.javanet.*;
import com.google.api.client.http.ByteArrayContent;
import com.google.api.client.http.GenericUrl;
import com.google.api.client.http.HttpBackOffIOExceptionHandler;
import com.google.api.client.http.HttpContent;
import com.google.api.client.http.HttpRequest;
import com.google.api.client.http.HttpRequestFactory;
import com.google.api.client.http.HttpTransport;
import com.google.api.client.http.UriTemplate;
import com.google.api.client.json.JsonFactory;
import com.google.api.client.json.jackson2.JacksonFactory;
import com.google.api.client.util.ExponentialBackOff;
import com.google.api.client.util.Maps;
import com.google.api.services.discovery.Discovery;
import com.google.api.services.discovery.model.JsonSchema;
import com.google.api.services.discovery.model.RestDescription;
import com.google.api.services.discovery.model.RestMethod;
import com.google.api.services.discovery.model.RestMethod.Request;
import com.google.api.services.discovery.model.RestResource;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.log4j.Logger;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
/**
* This class provides a java SDK interface to Arvados API server.
*
* Please refer to http://doc.arvados.org/api/ to learn about the
* various resources and methods exposed by the API server.
*
* @author radhika
*/
public class Arvados {
// HttpTransport and JsonFactory are thread-safe. So, use global instances.
private HttpTransport httpTransport;
private final JsonFactory jsonFactory = JacksonFactory.getDefaultInstance();
private String arvadosApiToken;
private String arvadosApiHost;
private boolean arvadosApiHostInsecure;
private String arvadosRootUrl;
private static final Logger logger = Logger.getLogger(Arvados.class);
// Get it once and reuse on the call requests
RestDescription restDescription = null;
String apiName = null;
String apiVersion = null;
public Arvados (String apiName, String apiVersion) throws Exception {
this (apiName, apiVersion, null, null, null);
}
public Arvados (String apiName, String apiVersion, String token,
String host, String hostInsecure) throws Exception {
this.apiName = apiName;
this.apiVersion = apiVersion;
// Read needed environmental variables if they are not passed
if (token != null) {
arvadosApiToken = token;
} else {
arvadosApiToken = System.getenv().get("ARVADOS_API_TOKEN");
if (arvadosApiToken == null) {
throw new Exception("Missing environment variable: ARVADOS_API_TOKEN");
}
}
if (host != null) {
arvadosApiHost = host;
} else {
arvadosApiHost = System.getenv().get("ARVADOS_API_HOST");
if (arvadosApiHost == null) {
throw new Exception("Missing environment variable: ARVADOS_API_HOST");
}
}
arvadosRootUrl = "https://" + arvadosApiHost;
arvadosRootUrl += (arvadosApiHost.endsWith("/")) ? "" : "/";
if (hostInsecure != null) {
arvadosApiHostInsecure = Boolean.valueOf(hostInsecure);
} else {
arvadosApiHostInsecure =
"true".equals(System.getenv().get("ARVADOS_API_HOST_INSECURE")) ? true : false;
}
// Create HTTP_TRANSPORT object
NetHttpTransport.Builder builder = new NetHttpTransport.Builder();
if (arvadosApiHostInsecure) {
builder.doNotValidateCertificate();
}
httpTransport = builder.build();
// initialize rest description
restDescription = loadArvadosApi();
}
/**
* Make a call to API server with the provide call information.
* @param resourceName
* @param methodName
* @param paramsMap
* @return Map
* @throws Exception
*/
public Map call(String resourceName, String methodName,
Map<String, Object> paramsMap) throws Exception {
RestMethod method = getMatchingMethod(resourceName, methodName);
HashMap<String, Object> parameters = loadParameters(paramsMap, method);
GenericUrl url = new GenericUrl(UriTemplate.expand(
arvadosRootUrl + restDescription.getBasePath() + method.getPath(),
parameters, true));
try {
// construct the request
HttpRequestFactory requestFactory;
requestFactory = httpTransport.createRequestFactory();
// possibly required content
HttpContent content = null;
if (!method.getHttpMethod().equals("GET") &&
!method.getHttpMethod().equals("DELETE")) {
String objectName = resourceName.substring(0, resourceName.length()-1);
Object requestBody = paramsMap.get(objectName);
if (requestBody == null) {
error("POST method requires content object " + objectName);
}
content = new ByteArrayContent("application/json",((String)requestBody).getBytes());
}
HttpRequest request =
requestFactory.buildRequest(method.getHttpMethod(), url, content);
// Set read timeout to 120 seconds (up from default of 20 seconds)
request.setReadTimeout(120 * 1000);
// Add retry behavior
request.setIOExceptionHandler(new HttpBackOffIOExceptionHandler(new ExponentialBackOff()));
// make the request
List<String> authHeader = new ArrayList<String>();
authHeader.add("OAuth2 " + arvadosApiToken);
request.getHeaders().put("Authorization", authHeader);
String response = request.execute().parseAsString();
Map responseMap = jsonFactory.createJsonParser(response).parse(HashMap.class);
logger.debug(responseMap);
return responseMap;
} catch (Exception e) {
e.printStackTrace();
throw e;
}
}
/**
* Get all supported resources by the API
* @return Set
*/
public Set<String> getAvailableResourses() {
return (restDescription.getResources().keySet());
}
/**
* Get all supported method names for the given resource
* @param resourceName
* @return Set
* @throws Exception
*/
public Set<String> getAvailableMethodsForResourse(String resourceName)
throws Exception {
Map<String, RestMethod> methodMap = getMatchingMethodMap (resourceName);
return (methodMap.keySet());
}
/**
* Get the parameters for the method in the resource sought.
* @param resourceName
* @param methodName
* @return Set
* @throws Exception
*/
public Map<String,List<String>> getAvailableParametersForMethod(String resourceName, String methodName)
throws Exception {
RestMethod method = getMatchingMethod(resourceName, methodName);
Map<String, List<String>> parameters = new HashMap<String, List<String>>();
List<String> requiredParameters = new ArrayList<String>();
List<String> optionalParameters = new ArrayList<String>();
parameters.put ("required", requiredParameters);
parameters.put("optional", optionalParameters);
try {
// get any request parameters
Request request = method.getRequest();
if (request != null) {
Object required = request.get("required");
Object requestProperties = request.get("properties");
if (requestProperties != null) {
if (requestProperties instanceof Map) {
Map properties = (Map)requestProperties;
Set<String> propertyKeys = properties.keySet();
for (String property : propertyKeys) {
if (Boolean.TRUE.equals(required)) {
requiredParameters.add(property);
} else {
optionalParameters.add(property);
}
}
}
}
}
// get other listed parameters
Map<String,JsonSchema> methodParameters = method.getParameters();
for (Map.Entry<String, JsonSchema> entry : methodParameters.entrySet()) {
if (Boolean.TRUE.equals(entry.getValue().getRequired())) {
requiredParameters.add(entry.getKey());
} else {
optionalParameters.add(entry.getKey());
}
}
} catch (Exception e){
logger.error(e);
}
return parameters;
}
private HashMap<String, Object> loadParameters(Map<String, Object> paramsMap,
RestMethod method) throws Exception {
HashMap<String, Object> parameters = Maps.newHashMap();
// required parameters
if (method.getParameterOrder() != null) {
for (String parameterName : method.getParameterOrder()) {
JsonSchema parameter = method.getParameters().get(parameterName);
if (Boolean.TRUE.equals(parameter.getRequired())) {
Object parameterValue = paramsMap.get(parameterName);
if (parameterValue == null) {
error("missing required parameter: " + parameter);
} else {
putParameter(null, parameters, parameterName, parameter, parameterValue);
}
}
}
}
for (Map.Entry<String, Object> entry : paramsMap.entrySet()) {
String parameterName = entry.getKey();
Object parameterValue = entry.getValue();
if (parameterName.equals("contentType")) {
if (method.getHttpMethod().equals("GET") || method.getHttpMethod().equals("DELETE")) {
error("HTTP content type cannot be specified for this method: " + parameterName);
}
} else {
JsonSchema parameter = null;
if (restDescription.getParameters() != null) {
parameter = restDescription.getParameters().get(parameterName);
}
if (parameter == null && method.getParameters() != null) {
parameter = method.getParameters().get(parameterName);
}
putParameter(parameterName, parameters, parameterName, parameter, parameterValue);
}
}
return parameters;
}
private RestMethod getMatchingMethod(String resourceName, String methodName)
throws Exception {
Map<String, RestMethod> methodMap = getMatchingMethodMap(resourceName);
if (methodName == null) {
error("missing method name");
}
RestMethod method =
methodMap == null ? null : methodMap.get(methodName);
if (method == null) {
error("method not found: ");
}
return method;
}
private Map<String, RestMethod> getMatchingMethodMap(String resourceName)
throws Exception {
if (resourceName == null) {
error("missing resource name");
}
Map<String, RestMethod> methodMap = null;
Map<String, RestResource> resources = restDescription.getResources();
RestResource resource = resources.get(resourceName);
if (resource == null) {
error("resource not found");
}
methodMap = resource.getMethods();
return methodMap;
}
/**
* Not thread-safe. So, create for each request.
* @param apiName
* @param apiVersion
* @return
* @throws Exception
*/
private RestDescription loadArvadosApi()
throws Exception {
try {
Discovery discovery;
Discovery.Builder discoveryBuilder =
new Discovery.Builder(httpTransport, jsonFactory, null);
discoveryBuilder.setRootUrl(arvadosRootUrl);
discoveryBuilder.setApplicationName(apiName);
discovery = discoveryBuilder.build();
return discovery.apis().getRest(apiName, apiVersion).execute();
} catch (Exception e) {
e.printStackTrace();
throw e;
}
}
/**
* Convert the input parameter into its equivalent json string.
* Add this json string value to the parameters map to be sent to server.
* @param argName
* @param parameters
* @param parameterName
* @param parameter
* @param parameterValue
* @throws Exception
*/
private void putParameter(String argName, Map<String, Object> parameters,
String parameterName, JsonSchema parameter, Object parameterValue)
throws Exception {
Object value = parameterValue;
if (parameter != null) {
if ("boolean".equals(parameter.getType())) {
value = Boolean.valueOf(parameterValue.toString());
} else if ("number".equals(parameter.getType())) {
value = new BigDecimal(parameterValue.toString());
} else if ("integer".equals(parameter.getType())) {
value = new BigInteger(parameterValue.toString());
} else if ("float".equals(parameter.getType())) {
value = new BigDecimal(parameterValue.toString());
} else if ("Java.util.Calendar".equals(parameter.getType())) {
value = new BigDecimal(parameterValue.toString());
} else if (("array".equals(parameter.getType())) ||
("Array".equals(parameter.getType()))) {
if (parameterValue.getClass().isArray()){
value = getJsonValueFromArrayType(parameterValue);
} else if (List.class.isAssignableFrom(parameterValue.getClass())) {
value = getJsonValueFromListType(parameterValue);
}
} else if (("Hash".equals(parameter.getType())) ||
("hash".equals(parameter.getType()))) {
value = getJsonValueFromMapType(parameterValue);
} else {
if (parameterValue.getClass().isArray()){
value = getJsonValueFromArrayType(parameterValue);
} else if (List.class.isAssignableFrom(parameterValue.getClass())) {
value = getJsonValueFromListType(parameterValue);
} else if (Map.class.isAssignableFrom(parameterValue.getClass())) {
value = getJsonValueFromMapType(parameterValue);
}
}
}
parameters.put(parameterName, value);
}
/**
* Convert the given input array into json string before sending to server.
* @param parameterValue
* @return
*/
private String getJsonValueFromArrayType (Object parameterValue) {
String arrayStr = Arrays.deepToString((Object[])parameterValue);
// we can expect either an array of array objects or an array of objects
if (arrayStr.startsWith("[[") && arrayStr.endsWith("]]")) {
Object[][] array = new Object[1][];
arrayStr = arrayStr.substring(2, arrayStr.length()-2);
String jsonStr = getJsonStringForArrayStr(arrayStr);
String value = "[" + jsonStr + "]";
return value;
} else {
arrayStr = arrayStr.substring(1, arrayStr.length()-1);
return (getJsonStringForArrayStr(arrayStr));
}
}
private String getJsonStringForArrayStr(String arrayStr) {
Object[] array = arrayStr.split(",");
Object[] trimmedArray = new Object[array.length];
for (int i=0; i<array.length; i++){
trimmedArray[i] = array[i].toString().trim();
}
String value = JSONArray.toJSONString(Arrays.asList(trimmedArray));
return value;
}
/**
* Convert the given input List into json string before sending to server.
* @param parameterValue
* @return
*/
private String getJsonValueFromListType (Object parameterValue) {
List paramList = (List)parameterValue;
Object[] array = new Object[paramList.size()];
Arrays.deepToString(paramList.toArray(array));
return (getJsonValueFromArrayType(array));
}
/**
* Convert the given input map into json string before sending to server.
* @param parameterValue
* @return
*/
private String getJsonValueFromMapType (Object parameterValue) {
JSONObject json = new JSONObject((Map)parameterValue);
return json.toString();
}
private static void error(String detail) throws Exception {
String errorDetail = "ERROR: " + detail;
logger.debug(errorDetail);
throw new Exception(errorDetail);
}
public static void main(String[] args){
System.out.println("Welcome to Arvados Java SDK.");
System.out.println("Please refer to http://doc.arvados.org/sdk/java/index.html to get started with the the SDK.");
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
tilda/__init__.py | from freenit.api import register_endpoints
def create_api(app):
register_endpoints(app, '/api/v0', [])
| [] | [] | [] | [] | [] | python | null | null | null |
run_project_tests.py | #!/usr/bin/env python3
# Copyright 2012-2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent.futures import ProcessPoolExecutor, CancelledError
from enum import Enum
from io import StringIO
from mesonbuild._pathlib import Path, PurePath
import argparse
import functools
import itertools
import json
import multiprocessing
import os
import re
import shlex
import shutil
import signal
import subprocess
import sys
import tempfile
import time
import typing as T
import xml.etree.ElementTree as ET
from mesonbuild import build
from mesonbuild import environment
from mesonbuild import compilers
from mesonbuild import mesonlib
from mesonbuild import mlog
from mesonbuild import mtest
from mesonbuild.build import ConfigurationData
from mesonbuild.mesonlib import MachineChoice, Popen_safe
from mesonbuild.coredata import backendlist, version as meson_version
from run_tests import get_fake_options, run_configure, get_meson_script
from run_tests import get_backend_commands, get_backend_args_for_dir, Backend
from run_tests import ensure_backend_detects_changes
from run_tests import guess_backend
ALL_TESTS = ['cmake', 'common', 'native', 'warning-meson', 'failing-meson', 'failing-build', 'failing-test',
'keyval', 'platform-osx', 'platform-windows', 'platform-linux',
'java', 'C#', 'vala', 'rust', 'd', 'objective c', 'objective c++',
'fortran', 'swift', 'cuda', 'python3', 'python', 'fpga', 'frameworks', 'nasm', 'wasm'
]
class BuildStep(Enum):
configure = 1
build = 2
test = 3
install = 4
clean = 5
validate = 6
class TestResult(BaseException):
def __init__(self, cicmds):
self.msg = '' # empty msg indicates test success
self.stdo = ''
self.stde = ''
self.mlog = ''
self.cicmds = cicmds
self.conftime = 0
self.buildtime = 0
self.testtime = 0
def add_step(self, step, stdo, stde, mlog='', time=0):
self.step = step
self.stdo += stdo
self.stde += stde
self.mlog += mlog
if step == BuildStep.configure:
self.conftime = time
elif step == BuildStep.build:
self.buildtime = time
elif step == BuildStep.test:
self.testtime = time
def fail(self, msg):
self.msg = msg
class InstalledFile:
def __init__(self, raw: T.Dict[str, str]):
self.path = raw['file']
self.typ = raw['type']
self.platform = raw.get('platform', None)
self.language = raw.get('language', 'c') # type: str
version = raw.get('version', '') # type: str
if version:
self.version = version.split('.') # type: T.List[str]
else:
# split on '' will return [''], we want an empty list though
self.version = []
def get_path(self, compiler: str, env: environment.Environment) -> T.Optional[Path]:
p = Path(self.path)
canonical_compiler = compiler
if ((compiler in ['clang-cl', 'intel-cl']) or
(env.machines.host.is_windows() and compiler in {'pgi', 'dmd', 'ldc'})):
canonical_compiler = 'msvc'
has_pdb = False
if self.language in {'c', 'cpp'}:
has_pdb = canonical_compiler == 'msvc'
elif self.language == 'd':
# dmd's optlink does not genearte pdb iles
has_pdb = env.coredata.compilers.host['d'].linker.id in {'link', 'lld-link'}
# Abort if the platform does not match
matches = {
'msvc': canonical_compiler == 'msvc',
'gcc': canonical_compiler != 'msvc',
'cygwin': env.machines.host.is_cygwin(),
'!cygwin': not env.machines.host.is_cygwin(),
}.get(self.platform or '', True)
if not matches:
return None
# Handle the different types
if self.typ in ['file', 'dir']:
return p
elif self.typ == 'shared_lib':
if env.machines.host.is_windows() or env.machines.host.is_cygwin():
# Windows only has foo.dll and foo-X.dll
if len(self.version) > 1:
return None
if self.version:
p = p.with_name('{}-{}'.format(p.name, self.version[0]))
return p.with_suffix('.dll')
p = p.with_name('lib{}'.format(p.name))
if env.machines.host.is_darwin():
# MacOS only has libfoo.dylib and libfoo.X.dylib
if len(self.version) > 1:
return None
# pathlib.Path.with_suffix replaces, not appends
suffix = '.dylib'
if self.version:
suffix = '.{}{}'.format(self.version[0], suffix)
else:
# pathlib.Path.with_suffix replaces, not appends
suffix = '.so'
if self.version:
suffix = '{}.{}'.format(suffix, '.'.join(self.version))
return p.with_suffix(suffix)
elif self.typ == 'exe':
if env.machines.host.is_windows() or env.machines.host.is_cygwin():
return p.with_suffix('.exe')
elif self.typ == 'pdb':
if self.version:
p = p.with_name('{}-{}'.format(p.name, self.version[0]))
return p.with_suffix('.pdb') if has_pdb else None
elif self.typ == 'implib' or self.typ == 'implibempty':
if env.machines.host.is_windows() and canonical_compiler == 'msvc':
# only MSVC doesn't generate empty implibs
if self.typ == 'implibempty' and compiler == 'msvc':
return None
return p.parent / (re.sub(r'^lib', '', p.name) + '.lib')
elif env.machines.host.is_windows() or env.machines.host.is_cygwin():
return p.with_suffix('.dll.a')
else:
return None
elif self.typ == 'expr':
return Path(platform_fix_name(p.as_posix(), canonical_compiler, env))
else:
raise RuntimeError('Invalid installed file type {}'.format(self.typ))
return p
def get_paths(self, compiler: str, env: environment.Environment, installdir: Path) -> T.List[Path]:
p = self.get_path(compiler, env)
if not p:
return []
if self.typ == 'dir':
abs_p = installdir / p
if not abs_p.exists():
raise RuntimeError('{} does not exist'.format(p))
if not abs_p.is_dir():
raise RuntimeError('{} is not a directory'.format(p))
return [x.relative_to(installdir) for x in abs_p.rglob('*') if x.is_file() or x.is_symlink()]
else:
return [p]
@functools.total_ordering
class TestDef:
def __init__(self, path: Path, name: T.Optional[str], args: T.List[str], skip: bool = False):
self.path = path
self.name = name
self.args = args
self.skip = skip
self.env = os.environ.copy()
self.installed_files = [] # type: T.List[InstalledFile]
self.do_not_set_opts = [] # type: T.List[str]
self.stdout = [] # type: T.List[T.Dict[str, str]]
def __repr__(self) -> str:
return '<{}: {:<48} [{}: {}] -- {}>'.format(type(self).__name__, str(self.path), self.name, self.args, self.skip)
def display_name(self) -> str:
if self.name:
return '{} ({})'.format(self.path.as_posix(), self.name)
return self.path.as_posix()
def __lt__(self, other: object) -> bool:
if isinstance(other, TestDef):
# None is not sortable, so replace it with an empty string
s_id = int(self.path.name.split(' ')[0])
o_id = int(other.path.name.split(' ')[0])
return (s_id, self.path, self.name or '') < (o_id, other.path, other.name or '')
return NotImplemented
class AutoDeletedDir:
def __init__(self, d):
self.dir = d
def __enter__(self):
os.makedirs(self.dir, exist_ok=True)
return self.dir
def __exit__(self, _type, value, traceback):
# We don't use tempfile.TemporaryDirectory, but wrap the
# deletion in the AutoDeletedDir class because
# it fails on Windows due antivirus programs
# holding files open.
mesonlib.windows_proof_rmtree(self.dir)
failing_logs = []
print_debug = 'MESON_PRINT_TEST_OUTPUT' in os.environ
under_ci = 'CI' in os.environ
under_xenial_ci = under_ci and ('XENIAL' in os.environ)
skip_scientific = under_ci and ('SKIP_SCIENTIFIC' in os.environ)
do_debug = under_ci or print_debug
no_meson_log_msg = 'No meson-log.txt found.'
host_c_compiler = None
compiler_id_map = {} # type: T.Dict[str, str]
tool_vers_map = {} # type: T.Dict[str, str]
class StopException(Exception):
def __init__(self):
super().__init__('Stopped by user')
stop = False
def stop_handler(signal, frame):
global stop
stop = True
signal.signal(signal.SIGINT, stop_handler)
signal.signal(signal.SIGTERM, stop_handler)
def setup_commands(optbackend):
global do_debug, backend, backend_flags
global compile_commands, clean_commands, test_commands, install_commands, uninstall_commands
backend, backend_flags = guess_backend(optbackend, shutil.which('msbuild'))
compile_commands, clean_commands, test_commands, install_commands, \
uninstall_commands = get_backend_commands(backend, do_debug)
# TODO try to eliminate or at least reduce this function
def platform_fix_name(fname: str, canonical_compiler: str, env: environment.Environment) -> str:
if '?lib' in fname:
if env.machines.host.is_windows() and canonical_compiler == 'msvc':
fname = re.sub(r'lib/\?lib(.*)\.', r'bin/\1.', fname)
fname = re.sub(r'/\?lib/', r'/bin/', fname)
elif env.machines.host.is_windows():
fname = re.sub(r'lib/\?lib(.*)\.', r'bin/lib\1.', fname)
fname = re.sub(r'\?lib(.*)\.dll$', r'lib\1.dll', fname)
fname = re.sub(r'/\?lib/', r'/bin/', fname)
elif env.machines.host.is_cygwin():
fname = re.sub(r'lib/\?lib(.*)\.so$', r'bin/cyg\1.dll', fname)
fname = re.sub(r'lib/\?lib(.*)\.', r'bin/cyg\1.', fname)
fname = re.sub(r'\?lib(.*)\.dll$', r'cyg\1.dll', fname)
fname = re.sub(r'/\?lib/', r'/bin/', fname)
else:
fname = re.sub(r'\?lib', 'lib', fname)
if fname.endswith('?so'):
if env.machines.host.is_windows() and canonical_compiler == 'msvc':
fname = re.sub(r'lib/([^/]*)\?so$', r'bin/\1.dll', fname)
fname = re.sub(r'/(?:lib|)([^/]*?)\?so$', r'/\1.dll', fname)
return fname
elif env.machines.host.is_windows():
fname = re.sub(r'lib/([^/]*)\?so$', r'bin/\1.dll', fname)
fname = re.sub(r'/([^/]*?)\?so$', r'/\1.dll', fname)
return fname
elif env.machines.host.is_cygwin():
fname = re.sub(r'lib/([^/]*)\?so$', r'bin/\1.dll', fname)
fname = re.sub(r'/lib([^/]*?)\?so$', r'/cyg\1.dll', fname)
fname = re.sub(r'/([^/]*?)\?so$', r'/\1.dll', fname)
return fname
elif env.machines.host.is_darwin():
return fname[:-3] + '.dylib'
else:
return fname[:-3] + '.so'
return fname
def validate_install(test: TestDef, installdir: Path, compiler: str, env: environment.Environment) -> str:
ret_msg = ''
expected_raw = [] # type: T.List[Path]
for i in test.installed_files:
try:
expected_raw += i.get_paths(compiler, env, installdir)
except RuntimeError as err:
ret_msg += 'Expected path error: {}\n'.format(err)
expected = {x: False for x in expected_raw}
found = [x.relative_to(installdir) for x in installdir.rglob('*') if x.is_file() or x.is_symlink()]
# Mark all found files as found and detect unexpected files
for fname in found:
if fname not in expected:
ret_msg += 'Extra file {} found.\n'.format(fname)
continue
expected[fname] = True
# Check if expected files were found
for p, f in expected.items():
if not f:
ret_msg += 'Expected file {} missing.\n'.format(p)
# List dir content on error
if ret_msg != '':
ret_msg += '\nInstall dir contents:\n'
for i in found:
ret_msg += ' - {}\n'.format(i)
return ret_msg
def log_text_file(logfile, testdir, stdo, stde):
global stop, executor, futures
logfile.write('%s\nstdout\n\n---\n' % testdir.as_posix())
logfile.write(stdo)
logfile.write('\n\n---\n\nstderr\n\n---\n')
logfile.write(stde)
logfile.write('\n\n---\n\n')
if print_debug:
try:
print(stdo)
except UnicodeError:
sanitized_out = stdo.encode('ascii', errors='replace').decode()
print(sanitized_out)
try:
print(stde, file=sys.stderr)
except UnicodeError:
sanitized_err = stde.encode('ascii', errors='replace').decode()
print(sanitized_err, file=sys.stderr)
if stop:
print("Aborting..")
for f in futures:
f[2].cancel()
executor.shutdown()
raise StopException()
def bold(text):
return mlog.bold(text).get_text(mlog.colorize_console())
def green(text):
return mlog.green(text).get_text(mlog.colorize_console())
def red(text):
return mlog.red(text).get_text(mlog.colorize_console())
def yellow(text):
return mlog.yellow(text).get_text(mlog.colorize_console())
def _run_ci_include(args: T.List[str]) -> str:
if not args:
return 'At least one parameter required'
try:
data = Path(args[0]).read_text(errors='ignore', encoding='utf-8')
return 'Included file {}:\n{}\n'.format(args[0], data)
except Exception:
return 'Failed to open {}'.format(args[0])
ci_commands = {
'ci_include': _run_ci_include
}
def run_ci_commands(raw_log: str) -> T.List[str]:
res = []
for l in raw_log.splitlines():
if not l.startswith('!meson_ci!/'):
continue
cmd = shlex.split(l[11:])
if not cmd or cmd[0] not in ci_commands:
continue
res += ['CI COMMAND {}:\n{}\n'.format(cmd[0], ci_commands[cmd[0]](cmd[1:]))]
return res
def _compare_output(expected: T.List[T.Dict[str, str]], output: str, desc: str) -> str:
if expected:
i = iter(expected)
def next_expected(i):
# Get the next expected line
item = next(i)
how = item.get('match', 'literal')
expected = item.get('line')
# Simple heuristic to automatically convert path separators for
# Windows:
#
# Any '/' appearing before 'WARNING' or 'ERROR' (i.e. a path in a
# filename part of a location) is replaced with '\' (in a re: '\\'
# which matches a literal '\')
#
# (There should probably be a way to turn this off for more complex
# cases which don't fit this)
if mesonlib.is_windows():
if how != "re":
sub = r'\\'
else:
sub = r'\\\\'
expected = re.sub(r'/(?=.*(WARNING|ERROR))', sub, expected)
return how, expected
try:
how, expected = next_expected(i)
for actual in output.splitlines():
if how == "re":
match = bool(re.match(expected, actual))
else:
match = (expected == actual)
if match:
how, expected = next_expected(i)
# reached the end of output without finding expected
return 'expected "{}" not found in {}'.format(expected, desc)
except StopIteration:
# matched all expected lines
pass
return ''
def validate_output(test: TestDef, stdo: str, stde: str) -> str:
return _compare_output(test.stdout, stdo, 'stdout')
# There are some class variables and such that cahce
# information. Clear all of these. The better solution
# would be to change the code so that no state is persisted
# but that would be a lot of work given that Meson was originally
# coded to run as a batch process.
def clear_internal_caches():
import mesonbuild.interpreterbase
from mesonbuild.dependencies import CMakeDependency
from mesonbuild.mesonlib import PerMachine
mesonbuild.interpreterbase.FeatureNew.feature_registry = {}
CMakeDependency.class_cmakeinfo = PerMachine(None, None)
def run_test_inprocess(testdir):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
old_stderr = sys.stderr
sys.stderr = mystderr = StringIO()
old_cwd = os.getcwd()
os.chdir(testdir)
test_log_fname = Path('meson-logs', 'testlog.txt')
try:
returncode_test = mtest.run_with_args(['--no-rebuild'])
if test_log_fname.exists():
test_log = test_log_fname.open(errors='ignore').read()
else:
test_log = ''
returncode_benchmark = mtest.run_with_args(['--no-rebuild', '--benchmark', '--logbase', 'benchmarklog'])
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
os.chdir(old_cwd)
return max(returncode_test, returncode_benchmark), mystdout.getvalue(), mystderr.getvalue(), test_log
# Build directory name must be the same so Ccache works over
# consecutive invocations.
def create_deterministic_builddir(test: TestDef, use_tmpdir: bool) -> str:
import hashlib
src_dir = test.path.as_posix()
if test.name:
src_dir += test.name
rel_dirname = 'b ' + hashlib.sha256(src_dir.encode(errors='ignore')).hexdigest()[0:10]
abs_pathname = os.path.join(tempfile.gettempdir() if use_tmpdir else os.getcwd(), rel_dirname)
os.mkdir(abs_pathname)
return abs_pathname
def format_parameter_file(file_basename: str, test: TestDef, test_build_dir: str) -> Path:
confdata = ConfigurationData()
confdata.values = {'MESON_TEST_ROOT': (str(test.path.absolute()), 'base directory of current test')}
template = test.path / (file_basename + '.in')
destination = Path(test_build_dir) / file_basename
mesonlib.do_conf_file(str(template), str(destination), confdata, 'meson')
return destination
def detect_parameter_files(test: TestDef, test_build_dir: str) -> (Path, Path):
nativefile = test.path / 'nativefile.ini'
crossfile = test.path / 'crossfile.ini'
if os.path.exists(str(test.path / 'nativefile.ini.in')):
nativefile = format_parameter_file('nativefile.ini', test, test_build_dir)
if os.path.exists(str(test.path / 'crossfile.ini.in')):
crossfile = format_parameter_file('crossfile.ini', test, test_build_dir)
return nativefile, crossfile
def run_test(test: TestDef, extra_args, compiler, backend, flags, commands, should_fail, use_tmp: bool):
if test.skip:
return None
with AutoDeletedDir(create_deterministic_builddir(test, use_tmp)) as build_dir:
with AutoDeletedDir(tempfile.mkdtemp(prefix='i ', dir=None if use_tmp else os.getcwd())) as install_dir:
try:
return _run_test(test, build_dir, install_dir, extra_args, compiler, backend, flags, commands, should_fail)
except TestResult as r:
return r
finally:
mlog.shutdown() # Close the log file because otherwise Windows wets itself.
def _run_test(test: TestDef, test_build_dir: str, install_dir: str, extra_args, compiler, backend, flags, commands, should_fail):
compile_commands, clean_commands, install_commands, uninstall_commands = commands
gen_start = time.time()
# Configure in-process
gen_args = [] # type: T.List[str]
if 'prefix' not in test.do_not_set_opts:
gen_args += ['--prefix', 'x:/usr'] if mesonlib.is_windows() else ['--prefix', '/usr']
if 'libdir' not in test.do_not_set_opts:
gen_args += ['--libdir', 'lib']
gen_args += [test.path.as_posix(), test_build_dir] + flags + extra_args
nativefile, crossfile = detect_parameter_files(test, test_build_dir)
if nativefile.exists():
gen_args.extend(['--native-file', nativefile.as_posix()])
if crossfile.exists():
gen_args.extend(['--cross-file', crossfile.as_posix()])
(returncode, stdo, stde) = run_configure(gen_args, env=test.env)
try:
logfile = Path(test_build_dir, 'meson-logs', 'meson-log.txt')
mesonlog = logfile.open(errors='ignore', encoding='utf-8').read()
except Exception:
mesonlog = no_meson_log_msg
cicmds = run_ci_commands(mesonlog)
testresult = TestResult(cicmds)
testresult.add_step(BuildStep.configure, stdo, stde, mesonlog, time.time() - gen_start)
output_msg = validate_output(test, stdo, stde)
testresult.mlog += output_msg
if output_msg:
testresult.fail('Unexpected output while configuring.')
return testresult
if should_fail == 'meson':
if returncode == 1:
return testresult
elif returncode != 0:
testresult.fail('Test exited with unexpected status {}.'.format(returncode))
return testresult
else:
testresult.fail('Test that should have failed succeeded.')
return testresult
if returncode != 0:
testresult.fail('Generating the build system failed.')
return testresult
builddata = build.load(test_build_dir)
dir_args = get_backend_args_for_dir(backend, test_build_dir)
# Build with subprocess
def build_step():
build_start = time.time()
pc, o, e = Popen_safe(compile_commands + dir_args, cwd=test_build_dir)
testresult.add_step(BuildStep.build, o, e, '', time.time() - build_start)
if should_fail == 'build':
if pc.returncode != 0:
raise testresult
testresult.fail('Test that should have failed to build succeeded.')
raise testresult
if pc.returncode != 0:
testresult.fail('Compiling source code failed.')
raise testresult
# Touch the meson.build file to force a regenerate
def force_regenerate():
ensure_backend_detects_changes(backend)
os.utime(str(test.path / 'meson.build'))
# just test building
build_step()
# test that regeneration works for build step
force_regenerate()
build_step() # TBD: assert nothing gets built after the regenerate?
# test that regeneration works for test step
force_regenerate()
# Test in-process
clear_internal_caches()
test_start = time.time()
(returncode, tstdo, tstde, test_log) = run_test_inprocess(test_build_dir)
testresult.add_step(BuildStep.test, tstdo, tstde, test_log, time.time() - test_start)
if should_fail == 'test':
if returncode != 0:
return testresult
testresult.fail('Test that should have failed to run unit tests succeeded.')
return testresult
if returncode != 0:
testresult.fail('Running unit tests failed.')
return testresult
# Do installation, if the backend supports it
if install_commands:
env = os.environ.copy()
env['DESTDIR'] = install_dir
# Install with subprocess
pi, o, e = Popen_safe(install_commands, cwd=test_build_dir, env=env)
testresult.add_step(BuildStep.install, o, e)
if pi.returncode != 0:
testresult.fail('Running install failed.')
return testresult
# Clean with subprocess
env = os.environ.copy()
pi, o, e = Popen_safe(clean_commands + dir_args, cwd=test_build_dir, env=env)
testresult.add_step(BuildStep.clean, o, e)
if pi.returncode != 0:
testresult.fail('Running clean failed.')
return testresult
# Validate installed files
testresult.add_step(BuildStep.install, '', '')
if not install_commands:
return testresult
install_msg = validate_install(test, Path(install_dir), compiler, builddata.environment)
if install_msg:
testresult.fail('\n' + install_msg)
return testresult
return testresult
def gather_tests(testdir: Path, stdout_mandatory: bool) -> T.List[TestDef]:
tests = [t.name for t in testdir.iterdir() if t.is_dir()]
tests = [t for t in tests if not t.startswith('.')] # Filter non-tests files (dot files, etc)
test_defs = [TestDef(testdir / t, None, []) for t in tests]
all_tests = [] # type: T.List[TestDef]
for t in test_defs:
test_def = {}
test_def_file = t.path / 'test.json'
if test_def_file.is_file():
test_def = json.loads(test_def_file.read_text())
# Handle additional environment variables
env = {} # type: T.Dict[str, str]
if 'env' in test_def:
assert isinstance(test_def['env'], dict)
env = test_def['env']
for key, val in env.items():
val = val.replace('@ROOT@', t.path.resolve().as_posix())
env[key] = val
# Handle installed files
installed = [] # type: T.List[InstalledFile]
if 'installed' in test_def:
installed = [InstalledFile(x) for x in test_def['installed']]
# Handle expected output
stdout = test_def.get('stdout', [])
if stdout_mandatory and not stdout:
raise RuntimeError("{} must contain a non-empty stdout key".format(test_def_file))
# Handle the do_not_set_opts list
do_not_set_opts = test_def.get('do_not_set_opts', []) # type: T.List[str]
# Skip tests if the tool requirements are not met
if 'tools' in test_def:
assert isinstance(test_def['tools'], dict)
for tool, vers_req in test_def['tools'].items():
if tool not in tool_vers_map:
t.skip = True
elif not mesonlib.version_compare(tool_vers_map[tool], vers_req):
t.skip = True
# Skip the matrix code and just update the existing test
if 'matrix' not in test_def:
t.env.update(env)
t.installed_files = installed
t.do_not_set_opts = do_not_set_opts
t.stdout = stdout
all_tests += [t]
continue
# 'matrix; entry is present, so build multiple tests from matrix definition
opt_list = [] # type: T.List[T.List[T.Tuple[str, bool]]]
matrix = test_def['matrix']
assert "options" in matrix
for key, val in matrix["options"].items():
assert isinstance(val, list)
tmp_opts = [] # type: T.List[T.Tuple[str, bool]]
for i in val:
assert isinstance(i, dict)
assert "val" in i
skip = False
# Skip the matrix entry if environment variable is present
if 'skip_on_env' in i:
for skip_env_var in i['skip_on_env']:
if skip_env_var in os.environ:
skip = True
# Only run the test if all compiler ID's match
if 'compilers' in i:
for lang, id_list in i['compilers'].items():
if lang not in compiler_id_map or compiler_id_map[lang] not in id_list:
skip = True
break
# Add an empty matrix entry
if i['val'] is None:
tmp_opts += [(None, skip)]
continue
tmp_opts += [('{}={}'.format(key, i['val']), skip)]
if opt_list:
new_opt_list = [] # type: T.List[T.List[T.Tuple[str, bool]]]
for i in opt_list:
for j in tmp_opts:
new_opt_list += [[*i, j]]
opt_list = new_opt_list
else:
opt_list = [[x] for x in tmp_opts]
# Exclude specific configurations
if 'exclude' in matrix:
assert isinstance(matrix['exclude'], list)
new_opt_list = [] # type: T.List[T.List[T.Tuple[str, bool]]]
for i in opt_list:
exclude = False
opt_names = [x[0] for x in i]
for j in matrix['exclude']:
ex_list = ['{}={}'.format(k, v) for k, v in j.items()]
if all([x in opt_names for x in ex_list]):
exclude = True
break
if not exclude:
new_opt_list += [i]
opt_list = new_opt_list
for i in opt_list:
name = ' '.join([x[0] for x in i if x[0] is not None])
opts = ['-D' + x[0] for x in i if x[0] is not None]
skip = any([x[1] for x in i])
test = TestDef(t.path, name, opts, skip or t.skip)
test.env.update(env)
test.installed_files = installed
test.do_not_set_opts = do_not_set_opts
test.stdout = stdout
all_tests += [test]
return sorted(all_tests)
def have_d_compiler():
if shutil.which("ldc2"):
return True
elif shutil.which("ldc"):
return True
elif shutil.which("gdc"):
return True
elif shutil.which("dmd"):
# The Windows installer sometimes produces a DMD install
# that exists but segfaults every time the compiler is run.
# Don't know why. Don't know how to fix. Skip in this case.
cp = subprocess.run(['dmd', '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if cp.stdout == b'':
return False
return True
return False
def have_objc_compiler(use_tmp: bool) -> bool:
with AutoDeletedDir(tempfile.mkdtemp(prefix='b ', dir=None if use_tmp else '.')) as build_dir:
env = environment.Environment(None, build_dir, get_fake_options('/'))
try:
objc_comp = env.detect_objc_compiler(MachineChoice.HOST)
except mesonlib.MesonException:
return False
if not objc_comp:
return False
env.coredata.process_new_compiler('objc', objc_comp, env)
try:
objc_comp.sanity_check(env.get_scratch_dir(), env)
except mesonlib.MesonException:
return False
return True
def have_objcpp_compiler(use_tmp: bool) -> bool:
with AutoDeletedDir(tempfile.mkdtemp(prefix='b ', dir=None if use_tmp else '.')) as build_dir:
env = environment.Environment(None, build_dir, get_fake_options('/'))
try:
objcpp_comp = env.detect_objcpp_compiler(MachineChoice.HOST)
except mesonlib.MesonException:
return False
if not objcpp_comp:
return False
env.coredata.process_new_compiler('objcpp', objcpp_comp, env)
try:
objcpp_comp.sanity_check(env.get_scratch_dir(), env)
except mesonlib.MesonException:
return False
return True
def have_java():
if shutil.which('javac') and shutil.which('java'):
return True
return False
def skippable(suite, test):
# Everything is optional when not running on CI, or on Ubuntu 16.04 CI
if not under_ci or under_xenial_ci:
return True
if not suite.endswith('frameworks'):
return True
# this test assumptions aren't valid for Windows paths
if test.endswith('38 libdir must be inside prefix'):
return True
# gtk-doc test may be skipped, pending upstream fixes for spaces in
# filenames landing in the distro used for CI
if test.endswith('10 gtk-doc'):
return True
# NetCDF is not in the CI Docker image
if test.endswith('netcdf'):
return True
# MSVC doesn't link with GFortran
if test.endswith('14 fortran links c'):
return True
# Blocks are not supported on all compilers
if test.endswith('29 blocks'):
return True
# Scientific libraries are skippable on certain systems
# See the discussion here: https://github.com/mesonbuild/meson/pull/6562
if any([x in test for x in ['17 mpi', '25 hdf5', '30 scalapack']]) and skip_scientific:
return True
# These create OS specific tests, and need to be skippable
if any([x in test for x in ['16 sdl', '17 mpi']]):
return True
# No frameworks test should be skipped on linux CI, as we expect all
# prerequisites to be installed
if mesonlib.is_linux():
return False
# Boost test should only be skipped for windows CI build matrix entries
# which don't define BOOST_ROOT
if test.endswith('1 boost'):
if mesonlib.is_windows():
return 'BOOST_ROOT' not in os.environ
return False
# Qt is provided on macOS by Homebrew
if test.endswith('4 qt') and mesonlib.is_osx():
return False
# Other framework tests are allowed to be skipped on other platforms
return True
def skip_csharp(backend) -> bool:
if backend is not Backend.ninja:
return True
if not shutil.which('resgen'):
return True
if shutil.which('mcs'):
return False
if shutil.which('csc'):
# Only support VS2017 for now. Earlier versions fail
# under CI in mysterious ways.
try:
stdo = subprocess.check_output(['csc', '/version'])
except subprocess.CalledProcessError:
return True
# Having incrementing version numbers would be too easy.
# Microsoft reset the versioning back to 1.0 (from 4.x)
# when they got the Roslyn based compiler. Thus there
# is NO WAY to reliably do version number comparisons.
# Only support the version that ships with VS2017.
return not stdo.startswith(b'2.')
return True
# In Azure some setups have a broken rustc that will error out
# on all compilation attempts.
def has_broken_rustc() -> bool:
dirname = 'brokenrusttest'
if os.path.exists(dirname):
mesonlib.windows_proof_rmtree(dirname)
os.mkdir(dirname)
open(dirname + '/sanity.rs', 'w').write('''fn main() {
}
''')
pc = subprocess.run(['rustc', '-o', 'sanity.exe', 'sanity.rs'],
cwd=dirname,
stdout = subprocess.DEVNULL,
stderr = subprocess.DEVNULL)
mesonlib.windows_proof_rmtree(dirname)
return pc.returncode != 0
def should_skip_rust(backend: Backend) -> bool:
if not shutil.which('rustc'):
return True
if backend is not Backend.ninja:
return True
if mesonlib.is_windows() and has_broken_rustc():
return True
return False
def detect_tests_to_run(only: T.List[str], use_tmp: bool) -> T.List[T.Tuple[str, T.List[TestDef], bool]]:
"""
Parameters
----------
only: list of str, optional
specify names of tests to run
Returns
-------
gathered_tests: list of tuple of str, list of TestDef, bool
tests to run
"""
skip_fortran = not(shutil.which('gfortran') or
shutil.which('flang') or
shutil.which('pgfortran') or
shutil.which('ifort'))
class TestCategory:
def __init__(self, category: str, subdir: str, skip: bool = False, stdout_mandatory: bool = False):
self.category = category # category name
self.subdir = subdir # subdirectory
self.skip = skip # skip condition
self.stdout_mandatory = stdout_mandatory # expected stdout is mandatory for tests in this categroy
all_tests = [
TestCategory('cmake', 'cmake', not shutil.which('cmake') or (os.environ.get('compiler') == 'msvc2015' and under_ci)),
TestCategory('common', 'common'),
TestCategory('native', 'native'),
TestCategory('warning-meson', 'warning', stdout_mandatory=True),
TestCategory('failing-meson', 'failing', stdout_mandatory=True),
TestCategory('failing-build', 'failing build'),
TestCategory('failing-test', 'failing test'),
TestCategory('keyval', 'keyval'),
TestCategory('platform-osx', 'osx', not mesonlib.is_osx()),
TestCategory('platform-windows', 'windows', not mesonlib.is_windows() and not mesonlib.is_cygwin()),
TestCategory('platform-linux', 'linuxlike', mesonlib.is_osx() or mesonlib.is_windows()),
TestCategory('java', 'java', backend is not Backend.ninja or mesonlib.is_osx() or not have_java()),
TestCategory('C#', 'csharp', skip_csharp(backend)),
TestCategory('vala', 'vala', backend is not Backend.ninja or not shutil.which(os.environ.get('VALAC', 'valac'))),
TestCategory('rust', 'rust', should_skip_rust(backend)),
TestCategory('d', 'd', backend is not Backend.ninja or not have_d_compiler()),
TestCategory('objective c', 'objc', backend not in (Backend.ninja, Backend.xcode) or not have_objc_compiler(options.use_tmpdir)),
TestCategory('objective c++', 'objcpp', backend not in (Backend.ninja, Backend.xcode) or not have_objcpp_compiler(options.use_tmpdir)),
TestCategory('fortran', 'fortran', skip_fortran or backend != Backend.ninja),
TestCategory('swift', 'swift', backend not in (Backend.ninja, Backend.xcode) or not shutil.which('swiftc')),
# CUDA tests on Windows: use Ninja backend: python run_project_tests.py --only cuda --backend ninja
TestCategory('cuda', 'cuda', backend not in (Backend.ninja, Backend.xcode) or not shutil.which('nvcc')),
TestCategory('python3', 'python3', backend is not Backend.ninja),
TestCategory('python', 'python'),
TestCategory('fpga', 'fpga', shutil.which('yosys') is None),
TestCategory('frameworks', 'frameworks'),
TestCategory('nasm', 'nasm'),
TestCategory('wasm', 'wasm', shutil.which('emcc') is None or backend is not Backend.ninja),
]
categories = [t.category for t in all_tests]
assert categories == ALL_TESTS, 'argparse("--only", choices=ALL_TESTS) need to be updated to match all_tests categories'
if only:
all_tests = [t for t in all_tests if t.category in only]
gathered_tests = [(t.category, gather_tests(Path('test cases', t.subdir), t.stdout_mandatory), t.skip) for t in all_tests]
return gathered_tests
def run_tests(all_tests: T.List[T.Tuple[str, T.List[TestDef], bool]],
log_name_base: str, failfast: bool,
extra_args: T.List[str], use_tmp: bool) -> T.Tuple[int, int, int]:
global logfile
txtname = log_name_base + '.txt'
with open(txtname, 'w', encoding='utf-8', errors='ignore') as lf:
logfile = lf
return _run_tests(all_tests, log_name_base, failfast, extra_args, use_tmp)
def _run_tests(all_tests: T.List[T.Tuple[str, T.List[TestDef], bool]],
log_name_base: str, failfast: bool,
extra_args: T.List[str], use_tmp: bool) -> T.Tuple[int, int, int]:
global stop, executor, futures, host_c_compiler
xmlname = log_name_base + '.xml'
junit_root = ET.Element('testsuites')
conf_time = 0
build_time = 0
test_time = 0
passing_tests = 0
failing_tests = 0
skipped_tests = 0
commands = (compile_commands, clean_commands, install_commands, uninstall_commands)
try:
# This fails in some CI environments for unknown reasons.
num_workers = multiprocessing.cpu_count()
except Exception as e:
print('Could not determine number of CPUs due to the following reason:' + str(e))
print('Defaulting to using only one process')
num_workers = 1
# Due to Ninja deficiency, almost 50% of build time
# is spent waiting. Do something useful instead.
#
# Remove this once the following issue has been resolved:
# https://github.com/mesonbuild/meson/pull/2082
if not mesonlib.is_windows(): # twice as fast on Windows by *not* multiplying by 2.
num_workers *= 2
executor = ProcessPoolExecutor(max_workers=num_workers)
for name, test_cases, skipped in all_tests:
current_suite = ET.SubElement(junit_root, 'testsuite', {'name': name, 'tests': str(len(test_cases))})
print()
if skipped:
print(bold('Not running %s tests.' % name))
else:
print(bold('Running %s tests.' % name))
print()
futures = []
for t in test_cases:
# Jenkins screws us over by automatically sorting test cases by name
# and getting it wrong by not doing logical number sorting.
(testnum, testbase) = t.path.name.split(' ', 1)
testname = '%.3d %s' % (int(testnum), testbase)
if t.name:
testname += ' ({})'.format(t.name)
should_fail = False
suite_args = []
if name.startswith('failing'):
should_fail = name.split('failing-')[1]
if name.startswith('warning'):
suite_args = ['--fatal-meson-warnings']
should_fail = name.split('warning-')[1]
t.skip = skipped or t.skip
result = executor.submit(run_test, t, extra_args + suite_args + t.args,
host_c_compiler, backend, backend_flags, commands, should_fail, use_tmp)
futures.append((testname, t, result))
for (testname, t, result) in futures:
sys.stdout.flush()
try:
result = result.result()
except CancelledError:
continue
if (result is None) or (('MESON_SKIP_TEST' in result.stdo) and (skippable(name, t.path.as_posix()))):
print(yellow('Skipping:'), t.display_name())
current_test = ET.SubElement(current_suite, 'testcase', {'name': testname,
'classname': name})
ET.SubElement(current_test, 'skipped', {})
skipped_tests += 1
else:
without_install = "" if len(install_commands) > 0 else " (without install)"
if result.msg != '':
print(red('Failed test{} during {}: {!r}'.format(without_install, result.step.name, t.display_name())))
print('Reason:', result.msg)
failing_tests += 1
if result.step == BuildStep.configure and result.mlog != no_meson_log_msg:
# For configure failures, instead of printing stdout,
# print the meson log if available since it's a superset
# of stdout and often has very useful information.
failing_logs.append(result.mlog)
elif under_ci:
# Always print the complete meson log when running in
# a CI. This helps debugging issues that only occur in
# a hard to reproduce environment
failing_logs.append(result.mlog)
failing_logs.append(result.stdo)
else:
failing_logs.append(result.stdo)
for cmd_res in result.cicmds:
failing_logs.append(cmd_res)
failing_logs.append(result.stde)
if failfast:
print("Cancelling the rest of the tests")
for (_, _, res) in futures:
res.cancel()
else:
print('Succeeded test%s: %s' % (without_install, t.display_name()))
passing_tests += 1
conf_time += result.conftime
build_time += result.buildtime
test_time += result.testtime
total_time = conf_time + build_time + test_time
log_text_file(logfile, t.path, result.stdo, result.stde)
current_test = ET.SubElement(current_suite, 'testcase', {'name': testname,
'classname': name,
'time': '%.3f' % total_time})
if result.msg != '':
ET.SubElement(current_test, 'failure', {'message': result.msg})
stdoel = ET.SubElement(current_test, 'system-out')
stdoel.text = result.stdo
stdeel = ET.SubElement(current_test, 'system-err')
stdeel.text = result.stde
if failfast and failing_tests > 0:
break
print("\nTotal configuration time: %.2fs" % conf_time)
print("Total build time: %.2fs" % build_time)
print("Total test time: %.2fs" % test_time)
ET.ElementTree(element=junit_root).write(xmlname, xml_declaration=True, encoding='UTF-8')
return passing_tests, failing_tests, skipped_tests
def check_file(file: Path):
lines = file.read_bytes().split(b'\n')
tabdetector = re.compile(br' *\t')
for i, line in enumerate(lines):
if re.match(tabdetector, line):
raise SystemExit("File {} contains a tab indent on line {:d}. Only spaces are permitted.".format(file, i + 1))
if line.endswith(b'\r'):
raise SystemExit("File {} contains DOS line ending on line {:d}. Only unix-style line endings are permitted.".format(file, i + 1))
def check_format():
check_suffixes = {'.c',
'.cpp',
'.cxx',
'.cc',
'.rs',
'.f90',
'.vala',
'.d',
'.s',
'.m',
'.mm',
'.asm',
'.java',
'.txt',
'.py',
'.swift',
'.build',
'.md',
}
skip_dirs = {
'.dub', # external deps are here
'.pytest_cache',
'meson-logs', 'meson-private',
'work area',
'.eggs', '_cache', # e.g. .mypy_cache
'venv', # virtualenvs have DOS line endings
}
for (root, _, filenames) in os.walk('.'):
if any([x in root for x in skip_dirs]):
continue
for fname in filenames:
file = Path(fname)
if file.suffix.lower() in check_suffixes:
if file.name in ('sitemap.txt', 'meson-test-run.txt'):
continue
check_file(root / file)
def check_meson_commands_work(options):
global backend, compile_commands, test_commands, install_commands
testdir = PurePath('test cases', 'common', '1 trivial').as_posix()
meson_commands = mesonlib.python_command + [get_meson_script()]
with AutoDeletedDir(tempfile.mkdtemp(prefix='b ', dir=None if options.use_tmpdir else '.')) as build_dir:
print('Checking that configuring works...')
gen_cmd = meson_commands + [testdir, build_dir] + backend_flags + options.extra_args
pc, o, e = Popen_safe(gen_cmd)
if pc.returncode != 0:
raise RuntimeError('Failed to configure {!r}:\n{}\n{}'.format(testdir, e, o))
print('Checking that introspect works...')
pc, o, e = Popen_safe(meson_commands + ['introspect', '--targets'], cwd=build_dir)
json.loads(o)
if pc.returncode != 0:
raise RuntimeError('Failed to introspect --targets {!r}:\n{}\n{}'.format(testdir, e, o))
print('Checking that building works...')
dir_args = get_backend_args_for_dir(backend, build_dir)
pc, o, e = Popen_safe(compile_commands + dir_args, cwd=build_dir)
if pc.returncode != 0:
raise RuntimeError('Failed to build {!r}:\n{}\n{}'.format(testdir, e, o))
print('Checking that testing works...')
pc, o, e = Popen_safe(test_commands, cwd=build_dir)
if pc.returncode != 0:
raise RuntimeError('Failed to test {!r}:\n{}\n{}'.format(testdir, e, o))
if install_commands:
print('Checking that installing works...')
pc, o, e = Popen_safe(install_commands, cwd=build_dir)
if pc.returncode != 0:
raise RuntimeError('Failed to install {!r}:\n{}\n{}'.format(testdir, e, o))
def detect_system_compiler(options):
global host_c_compiler, compiler_id_map
with AutoDeletedDir(tempfile.mkdtemp(prefix='b ', dir=None if options.use_tmpdir else '.')) as build_dir:
fake_opts = get_fake_options('/')
if options.cross_file:
fake_opts.cross_file = [options.cross_file]
if options.native_file:
fake_opts.native_file = [options.native_file]
env = environment.Environment(None, build_dir, fake_opts)
print_compilers(env, MachineChoice.HOST)
if options.cross_file:
print_compilers(env, MachineChoice.BUILD)
for lang in sorted(compilers.all_languages):
try:
comp = env.compiler_from_language(lang, MachineChoice.HOST)
# note compiler id for later use with test.json matrix
compiler_id_map[lang] = comp.get_id()
except mesonlib.MesonException:
comp = None
# note C compiler for later use by platform_fix_name()
if lang == 'c':
if comp:
host_c_compiler = comp.get_id()
else:
raise RuntimeError("Could not find C compiler.")
def print_compilers(env, machine):
print()
print('{} machine compilers'.format(machine.get_lower_case_name()))
print()
for lang in sorted(compilers.all_languages):
try:
comp = env.compiler_from_language(lang, machine)
details = '{:<10} {} {}'.format('[' + comp.get_id() + ']', ' '.join(comp.get_exelist()), comp.get_version_string())
except mesonlib.MesonException:
details = '[not found]'
print('%-7s: %s' % (lang, details))
def print_tool_versions():
tools = [
{
'tool': 'ninja',
'args': ['--version'],
'regex': re.compile(r'^([0-9]+(\.[0-9]+)*(-[a-z0-9]+)?)$'),
'match_group': 1,
},
{
'tool': 'cmake',
'args': ['--version'],
'regex': re.compile(r'^cmake version ([0-9]+(\.[0-9]+)*(-[a-z0-9]+)?)$'),
'match_group': 1,
},
{
'tool': 'hotdoc',
'args': ['--version'],
'regex': re.compile(r'^([0-9]+(\.[0-9]+)*(-[a-z0-9]+)?)$'),
'match_group': 1,
},
]
def get_version(t: dict) -> str:
exe = shutil.which(t['tool'])
if not exe:
return 'not found'
args = [t['tool']] + t['args']
pc, o, e = Popen_safe(args)
if pc.returncode != 0:
return '{} (invalid {} executable)'.format(exe, t['tool'])
for i in o.split('\n'):
i = i.strip('\n\r\t ')
m = t['regex'].match(i)
if m is not None:
tool_vers_map[t['tool']] = m.group(t['match_group'])
return '{} ({})'.format(exe, m.group(t['match_group']))
return '{} (unknown)'.format(exe)
print()
print('tools')
print()
max_width = max([len(x['tool']) for x in tools] + [7])
for tool in tools:
print('{0:<{2}}: {1}'.format(tool['tool'], get_version(tool), max_width))
print()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Run the test suite of Meson.")
parser.add_argument('extra_args', nargs='*',
help='arguments that are passed directly to Meson (remember to have -- before these).')
parser.add_argument('--backend', dest='backend', choices=backendlist)
parser.add_argument('--failfast', action='store_true',
help='Stop running if test case fails')
parser.add_argument('--no-unittests', action='store_true',
help='Not used, only here to simplify run_tests.py')
parser.add_argument('--only', help='name of test(s) to run', nargs='+', choices=ALL_TESTS)
parser.add_argument('--cross-file', action='store', help='File describing cross compilation environment.')
parser.add_argument('--native-file', action='store', help='File describing native compilation environment.')
parser.add_argument('--use-tmpdir', action='store_true', help='Use tmp directory for temporary files.')
options = parser.parse_args()
if options.cross_file:
options.extra_args += ['--cross-file', options.cross_file]
if options.native_file:
options.extra_args += ['--native-file', options.native_file]
print('Meson build system', meson_version, 'Project Tests')
print('Using python', sys.version.split('\n')[0])
setup_commands(options.backend)
detect_system_compiler(options)
print_tool_versions()
script_dir = os.path.split(__file__)[0]
if script_dir != '':
os.chdir(script_dir)
check_format()
check_meson_commands_work(options)
try:
all_tests = detect_tests_to_run(options.only, options.use_tmpdir)
(passing_tests, failing_tests, skipped_tests) = run_tests(all_tests, 'meson-test-run', options.failfast, options.extra_args, options.use_tmpdir)
except StopException:
pass
print('\nTotal passed tests:', green(str(passing_tests)))
print('Total failed tests:', red(str(failing_tests)))
print('Total skipped tests:', yellow(str(skipped_tests)))
if failing_tests > 0:
print('\nMesonlogs of failing tests\n')
for l in failing_logs:
try:
print(l, '\n')
except UnicodeError:
print(l.encode('ascii', errors='replace').decode(), '\n')
for name, dirs, _ in all_tests:
dir_names = list(set(x.path.name for x in dirs))
for k, g in itertools.groupby(dir_names, key=lambda x: x.split()[0]):
tests = list(g)
if len(tests) != 1:
print('WARNING: The %s suite contains duplicate "%s" tests: "%s"' % (name, k, '", "'.join(tests)))
raise SystemExit(failing_tests)
| [] | [] | [
"VALAC",
"compiler"
] | [] | ["VALAC", "compiler"] | python | 2 | 0 | |
cmd/nerdctl/build.go | /*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"strconv"
"strings"
"path/filepath"
dockerreference "github.com/containerd/containerd/reference/docker"
"github.com/containerd/nerdctl/pkg/buildkitutil"
"github.com/containerd/nerdctl/pkg/defaults"
"github.com/containerd/nerdctl/pkg/platformutil"
"github.com/containerd/nerdctl/pkg/strutil"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
func newBuildCommand() *cobra.Command {
var buildCommand = &cobra.Command{
Use: "build",
Short: "Build an image from a Dockerfile. Needs buildkitd to be running.",
RunE: buildAction,
SilenceUsage: true,
SilenceErrors: true,
}
AddStringFlag(buildCommand, "buildkit-host", nil, defaults.BuildKitHost(), "BUILDKIT_HOST", "BuildKit address")
buildCommand.Flags().StringArrayP("tag", "t", nil, "Name and optionally a tag in the 'name:tag' format")
buildCommand.Flags().StringP("file", "f", "", "Name of the Dockerfile")
buildCommand.Flags().String("target", "", "Set the target build stage to build")
buildCommand.Flags().StringArray("build-arg", nil, "Set build-time variables")
buildCommand.Flags().Bool("no-cache", false, "Do not use cache when building the image")
buildCommand.Flags().StringP("output", "o", "", "Output destination (format: type=local,dest=path)")
buildCommand.Flags().String("progress", "auto", "Set type of progress output (auto, plain, tty). Use plain to show container output")
buildCommand.Flags().StringArray("secret", nil, "Secret file to expose to the build: id=mysecret,src=/local/secret")
buildCommand.Flags().StringArray("ssh", nil, "SSH agent socket or keys to expose to the build (format: default|<id>[=<socket>|<key>[,<key>]])")
buildCommand.Flags().BoolP("quiet", "q", false, "Suppress the build output and print image ID on success")
buildCommand.Flags().StringArray("cache-from", nil, "External cache sources (eg. user/app:cache, type=local,src=path/to/dir)")
buildCommand.Flags().StringArray("cache-to", nil, "Cache export destinations (eg. user/app:cache, type=local,dest=path/to/dir)")
buildCommand.Flags().Bool("rm", true, "Remove intermediate containers after a successful build")
// #region platform flags
// platform is defined as StringSlice, not StringArray, to allow specifying "--platform=amd64,arm64"
buildCommand.Flags().StringSlice("platform", []string{}, "Set target platform for build (e.g., \"amd64\", \"arm64\")")
buildCommand.RegisterFlagCompletionFunc("platform", shellCompletePlatforms)
// #endregion
buildCommand.Flags().Bool("ipfs", false, "Allow pulling base images from IPFS")
buildCommand.Flags().String("iidfile", "", "Write the image ID to the file")
buildCommand.Flags().StringArray("label", nil, "Set metadata for an image")
return buildCommand
}
func getBuildkitHost(cmd *cobra.Command) (string, error) {
if cmd.Flags().Changed("buildkit-host") || os.Getenv("BUILDKIT_HOST") != "" {
// If address is explicitly specified, use it.
buildkitHost, err := cmd.Flags().GetString("buildkit-host")
if err != nil {
return "", err
}
if err := buildkitutil.PingBKDaemon(buildkitHost); err != nil {
return "", err
}
return buildkitHost, nil
}
ns, err := cmd.Flags().GetString("namespace")
if err != nil {
return "", err
}
return buildkitutil.GetBuildkitHost(ns)
}
func isImageSharable(buildkitHost string, namespace, uuid, snapshotter string) (bool, error) {
labels, err := buildkitutil.GetWorkerLabels(buildkitHost)
if err != nil {
return false, err
}
logrus.Debugf("worker labels: %+v", labels)
executor, ok := labels["org.mobyproject.buildkit.worker.executor"]
if !ok {
return false, nil
}
containerdUUID, ok := labels["org.mobyproject.buildkit.worker.containerd.uuid"]
if !ok {
return false, nil
}
containerdNamespace, ok := labels["org.mobyproject.buildkit.worker.containerd.namespace"]
if !ok {
return false, nil
}
workerSnapshotter, ok := labels["org.mobyproject.buildkit.worker.snapshotter"]
if !ok {
return false, nil
}
return executor == "containerd" && containerdUUID == uuid && containerdNamespace == namespace && workerSnapshotter == snapshotter, nil
}
func buildAction(cmd *cobra.Command, args []string) error {
platform, err := cmd.Flags().GetStringSlice("platform")
if err != nil {
return err
}
platform = strutil.DedupeStrSlice(platform)
buildkitHost, err := getBuildkitHost(cmd)
if err != nil {
return err
}
buildctlBinary, buildctlArgs, needsLoading, metaFile, cleanup, err := generateBuildctlArgs(cmd, buildkitHost, platform, args)
if err != nil {
return err
}
if cleanup != nil {
defer cleanup()
}
runIPFSRegistry, err := cmd.Flags().GetBool("ipfs")
if err != nil {
return err
}
if runIPFSRegistry {
logrus.Infof("Ensuring IPFS registry is running")
nerdctlCmd, nerdctlArgs := globalFlags(cmd)
if out, err := exec.Command(nerdctlCmd, append(nerdctlArgs, "ipfs", "registry", "up")...).CombinedOutput(); err != nil {
return fmt.Errorf("failed to start IPFS registry: %v: %v", string(out), err)
} else {
logrus.Infof("IPFS registry is running: %v", string(out))
}
}
quiet, err := cmd.Flags().GetBool("quiet")
if err != nil {
return err
}
logrus.Debugf("running %s %v", buildctlBinary, buildctlArgs)
buildctlCmd := exec.Command(buildctlBinary, buildctlArgs...)
buildctlCmd.Env = os.Environ()
var buildctlStdout io.Reader
if needsLoading {
buildctlStdout, err = buildctlCmd.StdoutPipe()
if err != nil {
return err
}
} else {
buildctlCmd.Stdout = cmd.OutOrStdout()
}
if !quiet {
buildctlCmd.Stderr = cmd.ErrOrStderr()
}
if err := buildctlCmd.Start(); err != nil {
return err
}
if needsLoading {
platMC, err := platformutil.NewMatchComparer(false, platform)
if err != nil {
return err
}
if err = loadImage(buildctlStdout, cmd, args, platMC, quiet); err != nil {
return err
}
}
if err = buildctlCmd.Wait(); err != nil {
return err
}
iidFile, _ := cmd.Flags().GetString("iidfile")
if iidFile != "" {
id, err := getDigestFromMetaFile(metaFile)
if err != nil {
return err
}
if err := os.WriteFile(iidFile, []byte(id), 0600); err != nil {
return err
}
}
return nil
}
func generateBuildctlArgs(cmd *cobra.Command, buildkitHost string, platform, args []string) (string, []string, bool, string, func(), error) {
var needsLoading bool
if len(args) < 1 {
return "", nil, false, "", nil, errors.New("context needs to be specified")
}
buildContext := args[0]
if buildContext == "-" || strings.Contains(buildContext, "://") {
return "", nil, false, "", nil, fmt.Errorf("unsupported build context: %q", buildContext)
}
buildctlBinary, err := buildkitutil.BuildctlBinary()
if err != nil {
return "", nil, false, "", nil, err
}
output, err := cmd.Flags().GetString("output")
if err != nil {
return "", nil, false, "", nil, err
}
if output == "" {
client, ctx, cancel, err := newClient(cmd)
if err != nil {
return "", nil, false, "", nil, err
}
defer cancel()
info, err := client.Server(ctx)
if err != nil {
return "", nil, false, "", nil, err
}
ns, err := cmd.Flags().GetString("namespace")
if err != nil {
return "", nil, false, "", nil, err
}
snapshotter, err := cmd.Flags().GetString("snapshotter")
if err != nil {
return "", nil, false, "", nil, err
}
sharable, err := isImageSharable(buildkitHost, ns, info.UUID, snapshotter)
if err != nil {
return "", nil, false, "", nil, err
}
if sharable {
output = "type=image,unpack=true" // ensure the target stage is unlazied (needed for any snapshotters)
} else {
output = "type=docker"
if len(platform) > 1 {
// For avoiding `error: failed to solve: docker exporter does not currently support exporting manifest lists`
// TODO: consider using type=oci for single-platform build too
output = "type=oci"
}
needsLoading = true
}
}
tagValue, err := cmd.Flags().GetStringArray("tag")
if err != nil {
return "", nil, false, "", nil, err
}
if tagSlice := strutil.DedupeStrSlice(tagValue); len(tagSlice) > 0 {
if len(tagSlice) > 1 {
return "", nil, false, "", nil, fmt.Errorf("specifying multiple -t is not supported yet")
}
ref := tagSlice[0]
named, err := dockerreference.ParseNormalizedNamed(ref)
if err != nil {
return "", nil, false, "", nil, err
}
output += ",name=" + dockerreference.TagNameOnly(named).String()
}
buildctlArgs := buildkitutil.BuildctlBaseArgs(buildkitHost)
progressValue, err := cmd.Flags().GetString("progress")
if err != nil {
return "", nil, false, "", nil, err
}
buildctlArgs = append(buildctlArgs, []string{
"build",
"--progress=" + progressValue,
"--frontend=dockerfile.v0",
"--local=context=" + buildContext,
"--local=dockerfile=" + buildContext,
"--output=" + output,
}...)
filename, err := cmd.Flags().GetString("file")
if err != nil {
return "", nil, false, "", nil, err
}
var dir, file string
var cleanup func()
if filename != "" {
if filename == "-" {
var err error
dir, err = buildkitutil.WriteTempDockerfile(cmd.InOrStdin())
if err != nil {
return "", nil, false, "", nil, err
}
file = buildkitutil.DefaultDockerfileName
cleanup = func() {
os.RemoveAll(dir)
}
} else {
dir, file = filepath.Split(filename)
}
if dir != "" {
buildctlArgs = append(buildctlArgs, "--local=dockerfile="+dir)
}
buildctlArgs = append(buildctlArgs, "--opt=filename="+file)
}
target, err := cmd.Flags().GetString("target")
if err != nil {
return "", nil, false, "", cleanup, err
}
if target != "" {
buildctlArgs = append(buildctlArgs, "--opt=target="+target)
}
if len(platform) > 0 {
buildctlArgs = append(buildctlArgs, "--opt=platform="+strings.Join(platform, ","))
}
buildArgsValue, err := cmd.Flags().GetStringArray("build-arg")
if err != nil {
return "", nil, false, "", cleanup, err
}
for _, ba := range strutil.DedupeStrSlice(buildArgsValue) {
buildctlArgs = append(buildctlArgs, "--opt=build-arg:"+ba)
// Support `--build-arg BUILDKIT_INLINE_CACHE=1` for compatibility with `docker buildx build`
// https://github.com/docker/buildx/blob/v0.6.3/docs/reference/buildx_build.md#-export-build-cache-to-an-external-cache-destination---cache-to
if strings.HasPrefix(ba, "BUILDKIT_INLINE_CACHE=") {
bic := strings.TrimPrefix(ba, "BUILDKIT_INLINE_CACHE=")
bicParsed, err := strconv.ParseBool(bic)
if err == nil {
if bicParsed {
buildctlArgs = append(buildctlArgs, "--export-cache=type=inline")
}
} else {
logrus.WithError(err).Warnf("invalid BUILDKIT_INLINE_CACHE: %q", bic)
}
}
}
labels, err := cmd.Flags().GetStringArray("label")
if err != nil {
return "", nil, false, "", nil, err
}
labels = strutil.DedupeStrSlice(labels)
for _, l := range labels {
buildctlArgs = append(buildctlArgs, "--opt=label:"+l)
}
noCache, err := cmd.Flags().GetBool("no-cache")
if err != nil {
return "", nil, false, "", cleanup, err
}
if noCache {
buildctlArgs = append(buildctlArgs, "--no-cache")
}
secretValue, err := cmd.Flags().GetStringArray("secret")
if err != nil {
return "", nil, false, "", cleanup, err
}
for _, s := range strutil.DedupeStrSlice(secretValue) {
buildctlArgs = append(buildctlArgs, "--secret="+s)
}
sshValue, err := cmd.Flags().GetStringArray("ssh")
if err != nil {
return "", nil, false, "", cleanup, err
}
for _, s := range strutil.DedupeStrSlice(sshValue) {
buildctlArgs = append(buildctlArgs, "--ssh="+s)
}
cacheFrom, err := cmd.Flags().GetStringArray("cache-from")
if err != nil {
return "", nil, false, "", cleanup, err
}
for _, s := range strutil.DedupeStrSlice(cacheFrom) {
if !strings.Contains(s, "type=") {
s = "type=registry,ref=" + s
}
buildctlArgs = append(buildctlArgs, "--import-cache="+s)
}
cacheTo, err := cmd.Flags().GetStringArray("cache-to")
if err != nil {
return "", nil, false, "", cleanup, err
}
for _, s := range strutil.DedupeStrSlice(cacheTo) {
if !strings.Contains(s, "type=") {
s = "type=registry,ref=" + s
}
buildctlArgs = append(buildctlArgs, "--export-cache="+s)
}
rm, err := cmd.Flags().GetBool("rm")
if err != nil {
return "", nil, false, "", cleanup, err
}
if !rm {
logrus.Warn("ignoring deprecated flag: '--rm=false'")
}
iidFile, err := cmd.Flags().GetString("iidfile")
if err != nil {
return "", nil, false, "", cleanup, err
}
var metaFile string
if iidFile != "" {
file, err := os.CreateTemp("", "buildkit-meta-*")
if err != nil {
return "", nil, false, "", cleanup, err
}
defer file.Close()
metaFile = file.Name()
buildctlArgs = append(buildctlArgs, "--metadata-file="+metaFile)
}
return buildctlBinary, buildctlArgs, needsLoading, metaFile, cleanup, nil
}
func getDigestFromMetaFile(path string) (string, error) {
data, err := os.ReadFile(path)
if err != nil {
return "", err
}
defer os.Remove(path)
metadata := map[string]json.RawMessage{}
if err := json.Unmarshal(data, &metadata); err != nil {
logrus.WithError(err).Errorf("failed to unmarshal metadata file %s", path)
return "", err
}
digestRaw, ok := metadata["containerimage.digest"]
if !ok {
return "", errors.New("failed to find containerimage.digest in metadata file")
}
var digest string
if err := json.Unmarshal(digestRaw, &digest); err != nil {
logrus.WithError(err).Errorf("failed to unmarshal digset")
return "", err
}
return digest, nil
}
| [
"\"BUILDKIT_HOST\""
] | [] | [
"BUILDKIT_HOST"
] | [] | ["BUILDKIT_HOST"] | go | 1 | 0 | |
greenlight-agent/src/greenlight/views.py | from datetime import datetime
import os
import json
import time
from importlib import import_module
from django.http import JsonResponse
from django.shortcuts import render, redirect
from django.http import Http404
from von_connector.config import Configurator
from von_connector.schema import SchemaManager
from von_connector.proof import ProofRequestManager
from django.contrib import messages
from django.contrib.messages import get_messages
from django.db.models import CharField
from django.db.models.functions import Length
import logging
logger = logging.getLogger(__name__)
import redis
# Get redis configuration
redis_service_name = os.getenv('REDIS_SERVICE_NAME')
# Get redis connection
if redis_service_name:
redis_key_name = redis_service_name.upper().replace('-', '_')
redis_host = os.getenv('{}_SERVICE_HOST'.format(redis_key_name), redis_service_name)
redis_port = os.getenv('{}_SERVICE_PORT'.format(redis_key_name), '6379')
redis_password = os.getenv('REDIS_PASSWORD')
r = redis.StrictRedis(host=redis_host, port=redis_port, db=0, password=redis_password)
r_history = redis.StrictRedis(host=redis_host, port=redis_port, db=1, password=redis_password)
schema_manager = SchemaManager()
configurator = Configurator()
CharField.register_lookup(Length, 'length')
def admin(request):
if not redis_service_name:
raise Exception('The REDIS_SERVICE_NAME environment variable must be defined. REDIS_SERVICE_NAME=redis.')
# Get all keys in redis
pending_requests = []
rkeys = r.scan()[1]
rkeys = [byte.decode("utf-8") for byte in rkeys]
# # logger.info('------rkeys-----')
# logger.info(rkeys)
for key in rkeys:
pending_request = r.get(key).decode("utf-8")
pending_request=json.loads(pending_request)
pending_requests.append(pending_request)
# For Approved Requests
approved_reqs = []
app_keys = r_history.scan()[1]
logger.info('-----app-----')
logger.info(app_keys)
app_keys = [byte.decode("utf-8") for byte in app_keys]
for key in app_keys:
approved_req = r_history.get(key).decode("utf-8")
approved_req = json.loads(approved_req)
logger.info('------Approved------')
logger.info(approved_req)
approved_reqs.append(approved_req)
logger.info('\n\n\n\n\n\n')
logger.info('--------pending requests-------')
for req in pending_requests:
logger.info(req)
logger.info('-------------------------------')
logger.info('\n\n\n\n\n\n')
logger.info(json.dumps(configurator.config))
return render(request, 'admin.index.html', {'pending_requests': pending_requests, 'approved_reqs': approved_reqs, 'rkeys': rkeys})
def process_request(request):
if not redis_service_name:
raise Exception('The REDIS_SERVICE_NAME environment variable must be defined. REDIS_SERVICE_NAME=redis.')
body = json.loads(request.body.decode('utf-8'))
schema = schema_manager.schemas[0]
logger.info('----------------body')
logger.info(body)
# process_reqs = []
for rkey in body:
# logger.info(rkey)
# rkey_str = rkey.decode('utf-8')
# logger.info(rkey)
process_req=r.get(rkey).decode('utf-8')
process_req = json.loads(process_req)
logger.info('-----------process-------')
logger.info(process_req)
current_time = datetime.now().isoformat()
r_history.set(current_time, json.dumps(process_req))
claim = schema_manager.submit_claim(schema, process_req)
r.delete(rkey)
# # expired_keys = r.scan()[1]
# # for rkey in expired_keys:
# r.delete(rkey)
return JsonResponse({'success': True, 'result': claim})
def index(request):
# If this is the form for the foundational claim,
# we have no prequisites so just render.e
if 'foundational' in configurator.config and \
configurator.config['foundational']:
return render(
request,
configurator.config['template_root'],
configurator.config
)
legal_entity_id = request.GET.get('org_id', None)
# If id isn't passed in, we render a form to ask for it.
if not legal_entity_id:
return render(request, 'missing_id.html')
logger.info('----\n\n\n\n\n\n{}\n\n\n\n\n'.format(legal_entity_id))
proof_request_manager = ProofRequestManager()
proof_response = proof_request_manager.request_proof({
'legal_entity_id': legal_entity_id
})
logger.info('----\n\n\n\n\n\n{}\n\n\n\n\n'.format(proof_response))
logger.info(legal_entity_id)
configurator.config['proof_response'] = proof_response
return render(
request,
configurator.config['template_root'],
configurator.config
)
def submit_claim(request):
start_time = time.time()
# Get json request body
body = json.loads(request.body.decode('utf-8'))
logger.info('-------------Int------')
logger.info(body)
logger.info('---------Body--------')
for claim in body:
logger.info(claim)
logger.info('---------------------')
# Get the schema we care about by 'schema'
# passed in request
try:
schema = next(
schema for
schema in
schema_manager.schemas if
schema['name'] == body['schema'])
except StopIteration:
raise Exception(
'Schema type "%s" in request did not match any schemas.' %
body['schema'])
# Build schema body skeleton
claim = {}
for attr in schema['attr_names']:
claim[attr] = None
# Get the schema mapper we care about
try:
schema_mapper = next(
schema_mapper for
schema_mapper in
configurator.config['schema_mappers'] if
schema_mapper['for'] == body['schema'])
except StopIteration:
raise Exception(
'Schema type "%s" in request did not match any schema mappers.' %
body['schema'])
# Build claim data from schema mapper
for attribute in schema_mapper['attributes']:
# Handle getting value from request data
if attribute['from'] == 'request':
claim[attribute['name']] = body[attribute['source']]
# Handle getting value from helpers (function defined in config)
elif attribute['from'] == 'helper':
try:
helpers = import_module('von_connector.helpers')
helper = getattr(helpers, attribute['source'])
claim[attribute['name']] = helper()
except AttributeError:
raise Exception(
'Cannot find helper "%s"' % attribute['source'])
# Handle setting value with string literal or None
elif attribute['from'] == 'literal':
try:
value = attribute['source']
except KeyError:
value = None
claim[attribute['name']] = value
# Handle getting value already set on schema skeleton
elif attribute['from'] == 'previous':
try:
claim[attribute['name']] = \
claim[attribute['source']]
except KeyError:
raise Exception(
'Cannot find previous value "%s"' % attribute['source'])
else:
raise Exception('Unkown mapper type "%s"' % attribute['from'])
if 'address_line_2' in claim and claim["address_line_2"]:
if not redis_service_name:
raise Exception('The REDIS_SERVICE_NAME environment variable must be defined. REDIS_SERVICE_NAME=redis.')
current_time = datetime.now().isoformat()
r.set(current_time, json.dumps(claim))
return JsonResponse({'success': True, 'result': None})
else:
claim = schema_manager.submit_claim(schema, claim)
logger.info('---------claim-------')
logger.info(claim)
logger.info('---------End of Claim-')
return JsonResponse({'success': True, 'result': claim})
def verify_dba(request):
# Get json request body
body = json.loads(request.body.decode('utf-8'))
if 'legal_entity_id' not in body or 'doing_business_as_name' not in body:
raise Exception('Missing required input')
(verified, message) = schema_manager.verify_dba(body)
return JsonResponse({'success': verified, 'message': message})
| [] | [] | [
"REDIS_PASSWORD",
"{}_SERVICE_PORT'.format(redis_key_nam",
"REDIS_SERVICE_NAME",
"{}_SERVICE_HOST'.format(redis_key_nam"
] | [] | ["REDIS_PASSWORD", "{}_SERVICE_PORT'.format(redis_key_nam", "REDIS_SERVICE_NAME", "{}_SERVICE_HOST'.format(redis_key_nam"] | python | 4 | 0 | |
cmd/publish.go | package cmd
import (
"os"
"github.com/rubiojr/tavern/client"
"github.com/spf13/cobra"
)
var url, charmHost *string
var charmHTTPPort, charmSSHPort *int
const defaultURL = "https://pub.rbel.co"
var publishCmd = &cobra.Command{
Use: "publish",
Short: "Publish Charm FS files to a Tavern server",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
if *url == defaultURL && os.Getenv("TAVERN_SERVER_URL") != "" {
*url = os.Getenv("TAVERN_SERVER_URL")
}
cfg := client.DefaultConfig()
cfg.ServerURL = *url
cfg.CharmServerHost = *charmHost
cfg.CharmServerHTTPPort = *charmHTTPPort
pc, err := client.NewClientWithConfig(cfg)
if err != nil {
return err
}
return pc.Publish(args[0])
},
}
func init() {
rootCmd.AddCommand(publishCmd)
url = publishCmd.Flags().StringP("server-url", "s", defaultURL, "Tavern server URL")
charmHost = publishCmd.Flags().StringP("charm-server-host", "", "cloud.charm.sh", "Charm server URL")
charmHTTPPort = publishCmd.Flags().IntP("charm-server-http-port", "", 35354, "Charm server URL")
charmSSHPort = publishCmd.Flags().IntP("charm-server-ssh-port", "", 35353, "Charm server URL")
}
| [
"\"TAVERN_SERVER_URL\"",
"\"TAVERN_SERVER_URL\""
] | [] | [
"TAVERN_SERVER_URL"
] | [] | ["TAVERN_SERVER_URL"] | go | 1 | 0 | |
tests/testutils/utils.go | // Copyright (c) 2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file contains test utils that are general purpose
// and should one day be moved to a central location for use across all
// projects.
package testutils
import (
"fmt"
"os"
"os/exec"
. "github.com/onsi/gomega"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/felix/fv/containers"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
client "github.com/projectcalico/libcalico-go/lib/clientv3"
"github.com/projectcalico/libcalico-go/lib/backend"
"github.com/projectcalico/libcalico-go/lib/backend/api"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
const KubeconfigTemplate = `apiVersion: v1
kind: Config
clusters:
- name: test
cluster:
server: http://%s:8080
users:
- name: calico
contexts:
- name: test-context
context:
cluster: test
user: calico
current-context: test-context`
func RunK8sApiserver(etcdIp string) *containers.Container {
return containers.Run("st-apiserver",
containers.RunOpts{AutoRemove: true},
"-v", os.Getenv("PRIVATE_KEY")+":/private.key",
fmt.Sprintf("%s", os.Getenv("HYPERKUBE_IMAGE")),
"/hyperkube", "apiserver",
"--service-cluster-ip-range=10.101.0.0/16",
"--authorization-mode=AlwaysAllow",
"--insecure-port=8080",
"--insecure-bind-address=0.0.0.0",
fmt.Sprintf("--etcd-servers=http://%s:2379", etcdIp),
"--service-account-key-file=/private.key",
)
}
func RunK8sControllerManager(apiserverIp string) *containers.Container {
c := containers.Run("st-controller-manager",
containers.RunOpts{AutoRemove: true},
"-v", os.Getenv("PRIVATE_KEY")+":/private.key",
fmt.Sprintf("%s", os.Getenv("HYPERKUBE_IMAGE")),
"/hyperkube", "controller-manager",
fmt.Sprintf("--master=%v:8080", apiserverIp),
"--min-resync-period=3m",
"--allocate-node-cidrs=true",
"--cluster-cidr=192.168.0.0/16",
"--v=5",
"--service-account-private-key-file=/private.key",
)
return c
}
func RunEtcd() *containers.Container {
return containers.Run("etcd-fv",
containers.RunOpts{AutoRemove: true},
fmt.Sprintf("%s", os.Getenv("ETCD_IMAGE")),
"etcd",
"--advertise-client-urls", "http://127.0.0.1:2379",
"--listen-client-urls", "http://0.0.0.0:2379")
}
func GetCalicoClient(etcdIP string) client.Interface {
cfg := apiconfig.NewCalicoAPIConfig()
cfg.Spec.DatastoreType = apiconfig.EtcdV3
cfg.Spec.EtcdEndpoints = fmt.Sprintf("http://%s:2379", etcdIP)
client, err := client.New(*cfg)
Expect(err).NotTo(HaveOccurred())
return client
}
func GetBackendClient(etcdIP string) api.Client {
cfg := apiconfig.NewCalicoAPIConfig()
cfg.Spec.DatastoreType = apiconfig.EtcdV3
cfg.Spec.EtcdEndpoints = fmt.Sprintf("http://%s:2379", etcdIP)
be, err := backend.NewClient(*cfg)
Expect(err).NotTo(HaveOccurred())
return be
}
// GetK8sClient gets a kubernetes client.
func GetK8sClient(kubeconfig string) (*kubernetes.Clientset, error) {
k8sconfig, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, fmt.Errorf("failed to build kubernetes client config: %s", err)
}
k8sClientset, err := kubernetes.NewForConfig(k8sconfig)
if err != nil {
return nil, fmt.Errorf("failed to build kubernetes client: %s", err)
}
return k8sClientset, nil
}
func Stop(c *containers.Container) {
log.WithField("container", c.Name).Info("Stopping container")
args := append([]string{"stop", c.Name})
cmd := exec.Command("docker", args...)
err := cmd.Run()
Expect(err).NotTo(HaveOccurred())
out, _ := cmd.CombinedOutput()
log.Info(out)
}
func Start(c *containers.Container) {
log.WithField("container", c.Name).Info("Starting container")
args := append([]string{"start", c.Name})
cmd := exec.Command("docker", args...)
err := cmd.Run()
Expect(err).NotTo(HaveOccurred())
out, _ := cmd.CombinedOutput()
log.Info(out)
}
| [
"\"PRIVATE_KEY\"",
"\"HYPERKUBE_IMAGE\"",
"\"PRIVATE_KEY\"",
"\"HYPERKUBE_IMAGE\"",
"\"ETCD_IMAGE\""
] | [] | [
"HYPERKUBE_IMAGE",
"PRIVATE_KEY",
"ETCD_IMAGE"
] | [] | ["HYPERKUBE_IMAGE", "PRIVATE_KEY", "ETCD_IMAGE"] | go | 3 | 0 | |
boskos/cmd/mason_client/main.go | // Copyright 2018 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"flag"
"fmt"
"io/ioutil"
"os"
"os/signal"
"sync"
"syscall"
"time"
"github.com/sirupsen/logrus"
"k8s.io/test-infra/boskos/client"
"k8s.io/test-infra/boskos/common"
"k8s.io/test-infra/boskos/mason"
"istio.io/test-infra/boskos/gcp"
)
const (
defaultBoskosRetryPeriod = 10 * time.Second
// Large enought such that Reaper does not take resource away from us
defaultBoskosSyncPeriod = 15 * time.Second
defaultTimeout = "10m"
)
func defaultKubeconfig() string {
home := os.Getenv("HOME")
if home == "" {
return ""
}
return fmt.Sprintf("%s/.kube/config", home)
}
var (
owner = flag.String("owner", "", "")
rType = flag.String("type", "", "Type of resource to acquire")
timeoutStr = flag.String("timeout", defaultTimeout, "Timeout ")
kubecfgPath = flag.String("kubeconfig-save", defaultKubeconfig(), "Path to write kubeconfig file to")
infoSave = flag.String("info-save", "", "Path to save info")
boskosURL = flag.String("boskos-url", "http://boskos", "Boskos Server URL")
)
type masonClient struct {
mason *mason.Client
wg sync.WaitGroup
}
func (m *masonClient) acquire(ctx context.Context, rtype, state string) (*common.Resource, error) {
for {
select {
case <-time.After(defaultBoskosRetryPeriod):
logrus.Infof("Attempting to acquire resource")
res, err := m.mason.Acquire(rtype, common.Free, state)
if err == nil {
logrus.Infof("Resource %s acquired", res.Name)
return res, nil
}
logrus.Infof("Failed to acquire resource")
case <-ctx.Done():
return nil, ctx.Err()
}
}
}
func (m *masonClient) release(res common.Resource) {
if err := m.mason.ReleaseOne(res.Name, common.Dirty); err != nil {
logrus.WithError(err).Warningf("unable to release resource %s", res.Name)
return
}
logrus.Infof("Released resource %s", res.Name)
}
func (m *masonClient) update(ctx context.Context, state string) {
updateTick := time.NewTicker(defaultBoskosSyncPeriod).C
go func() {
for {
select {
case <-updateTick:
if err := m.mason.UpdateAll(state); err != nil {
logrus.WithError(err).Warningf("unable to update resources to state %s", state)
}
logrus.Infof("Updated resources")
case <-ctx.Done():
m.wg.Done()
return
}
}
}()
m.wg.Add(1)
}
func saveUserdataToFile(ud *common.UserData, key, path string) error {
v, ok := ud.Load(key)
if !ok {
return nil
}
return ioutil.WriteFile(path, []byte(v.(string)), 0644)
}
func wait() {
stop := make(chan os.Signal, 1)
signal.Notify(stop, syscall.SIGINT, syscall.SIGTERM)
<-stop
}
func main() {
flag.Parse()
if *rType == "" {
logrus.Errorf("flag --type must be set")
flag.Usage()
return
}
if *owner == "" {
logrus.Errorf("flag --owner must be set")
flag.Usage()
return
}
timeout, err := time.ParseDuration(*timeoutStr)
if err != nil {
logrus.Errorf("unable to parse --timeout %s", *timeoutStr)
flag.Usage()
return
}
c, err := client.NewClient(*owner, *boskosURL, "", "")
if err != nil {
logrus.WithError(err).Panicf("unable to create boskos client")
}
client := masonClient{mason: mason.NewClient(c)}
if *kubecfgPath == "" {
logrus.Panic("flag --type must be set")
}
c1, acquireCancel := context.WithTimeout(context.Background(), timeout)
defer acquireCancel()
res, err := client.acquire(c1, *rType, common.Busy)
if err != nil {
logrus.WithError(err).Panicf("unable to find a resource")
}
defer client.release(*res)
c2, updateCancel := context.WithCancel(context.Background())
defer updateCancel()
client.update(c2, common.Busy)
loop:
for cType := range res.UserData.ToMap() {
switch cType {
case gcp.ResourceConfigType:
if *kubecfgPath != "" {
var info gcp.ResourceInfo
if err := res.UserData.Extract(gcp.ResourceConfigType, &info); err != nil {
logrus.WithError(err).Panicf("unable to parse %s", gcp.ResourceConfigType)
}
if err := info.Install(*kubecfgPath); err != nil {
logrus.WithError(err).Panicf("unable to install %s", gcp.ResourceConfigType)
}
}
if *infoSave != "" {
if err := saveUserdataToFile(res.UserData, gcp.ResourceConfigType, *infoSave); err != nil {
logrus.WithError(err).Panicf("unable to save info to %s", *infoSave)
}
logrus.Infof("Saved user data to %s", *infoSave)
}
break loop
}
}
logrus.Infof("READY")
logrus.Infof("Type CTRL-C to interrupt")
wait()
updateCancel()
client.wg.Wait()
}
| [
"\"HOME\""
] | [] | [
"HOME"
] | [] | ["HOME"] | go | 1 | 0 | |
configuration.go | package iris
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"os/user"
"path/filepath"
"runtime"
"strings"
"github.com/kataras/iris/v12/context"
"github.com/kataras/iris/v12/core/netutil"
"github.com/BurntSushi/toml"
"github.com/kataras/sitemap"
"gopkg.in/yaml.v3"
)
const globalConfigurationKeyword = "~"
// homeConfigurationFilename returns the physical location of the global configuration(yaml or toml) file.
// This is useful when we run multiple iris servers that share the same
// configuration, even with custom values at its "Other" field.
// It will return a file location
// which targets to $HOME or %HOMEDRIVE%+%HOMEPATH% + "iris" + the given "ext".
func homeConfigurationFilename(ext string) string {
return filepath.Join(homeDir(), "iris"+ext)
}
func homeDir() (home string) {
u, err := user.Current()
if u != nil && err == nil {
home = u.HomeDir
}
if home == "" {
home = os.Getenv("HOME")
}
if home == "" {
if runtime.GOOS == "plan9" {
home = os.Getenv("home")
} else if runtime.GOOS == "windows" {
home = os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
if home == "" {
home = os.Getenv("USERPROFILE")
}
}
}
return
}
func parseYAML(filename string) (Configuration, error) {
c := DefaultConfiguration()
// get the abs
// which will try to find the 'filename' from current workind dir too.
yamlAbsPath, err := filepath.Abs(filename)
if err != nil {
return c, fmt.Errorf("parse yaml: %w", err)
}
// read the raw contents of the file
data, err := ioutil.ReadFile(yamlAbsPath)
if err != nil {
return c, fmt.Errorf("parse yaml: %w", err)
}
// put the file's contents as yaml to the default configuration(c)
if err := yaml.Unmarshal(data, &c); err != nil {
return c, fmt.Errorf("parse yaml: %w", err)
}
return c, nil
}
// YAML reads Configuration from a configuration.yml file.
//
// Accepts the absolute path of the cfg.yml.
// An error will be shown to the user via panic with the error message.
// Error may occur when the cfg.yml doesn't exists or is not formatted correctly.
//
// Note: if the char '~' passed as "filename" then it tries to load and return
// the configuration from the $home_directory + iris.yml,
// see `WithGlobalConfiguration` for more information.
//
// Usage:
// app.Configure(iris.WithConfiguration(iris.YAML("myconfig.yml"))) or
// app.Run([iris.Runner], iris.WithConfiguration(iris.YAML("myconfig.yml"))).
func YAML(filename string) Configuration {
// check for globe configuration file and use that, otherwise
// return the default configuration if file doesn't exist.
if filename == globalConfigurationKeyword {
filename = homeConfigurationFilename(".yml")
if _, err := os.Stat(filename); os.IsNotExist(err) {
panic("default configuration file '" + filename + "' does not exist")
}
}
c, err := parseYAML(filename)
if err != nil {
panic(err)
}
return c
}
// TOML reads Configuration from a toml-compatible document file.
// Read more about toml's implementation at:
// https://github.com/toml-lang/toml
//
//
// Accepts the absolute path of the configuration file.
// An error will be shown to the user via panic with the error message.
// Error may occur when the file doesn't exists or is not formatted correctly.
//
// Note: if the char '~' passed as "filename" then it tries to load and return
// the configuration from the $home_directory + iris.tml,
// see `WithGlobalConfiguration` for more information.
//
// Usage:
// app.Configure(iris.WithConfiguration(iris.TOML("myconfig.tml"))) or
// app.Run([iris.Runner], iris.WithConfiguration(iris.TOML("myconfig.tml"))).
func TOML(filename string) Configuration {
c := DefaultConfiguration()
// check for globe configuration file and use that, otherwise
// return the default configuration if file doesn't exist.
if filename == globalConfigurationKeyword {
filename = homeConfigurationFilename(".tml")
if _, err := os.Stat(filename); os.IsNotExist(err) {
panic("default configuration file '" + filename + "' does not exist")
}
}
// get the abs
// which will try to find the 'filename' from current workind dir too.
tomlAbsPath, err := filepath.Abs(filename)
if err != nil {
panic(fmt.Errorf("toml: %w", err))
}
// read the raw contents of the file
data, err := ioutil.ReadFile(tomlAbsPath)
if err != nil {
panic(fmt.Errorf("toml :%w", err))
}
// put the file's contents as toml to the default configuration(c)
if _, err := toml.Decode(string(data), &c); err != nil {
panic(fmt.Errorf("toml :%w", err))
}
// Author's notes:
// The toml's 'usual thing' for key naming is: the_config_key instead of TheConfigKey
// but I am always prefer to use the specific programming language's syntax
// and the original configuration name fields for external configuration files
// so we do 'toml: "TheConfigKeySameAsTheConfigField" instead.
return c
}
// Configurator is just an interface which accepts the framework instance.
//
// It can be used to register a custom configuration with `Configure` in order
// to modify the framework instance.
//
// Currently Configurator is being used to describe the configuration's fields values.
type Configurator func(*Application)
// WithGlobalConfiguration will load the global yaml configuration file
// from the home directory and it will set/override the whole app's configuration
// to that file's contents. The global configuration file can be modified by user
// and be used by multiple iris instances.
//
// This is useful when we run multiple iris servers that share the same
// configuration, even with custom values at its "Other" field.
//
// Usage: `app.Configure(iris.WithGlobalConfiguration)` or `app.Run([iris.Runner], iris.WithGlobalConfiguration)`.
var WithGlobalConfiguration = func(app *Application) {
app.Configure(WithConfiguration(YAML(globalConfigurationKeyword)))
}
// variables for configurators don't need any receivers, functions
// for them that need (helps code editors to recognise as variables without parenthesis completion).
// WithoutServerError will cause to ignore the matched "errors"
// from the main application's `Run` function.
//
// Usage:
// err := app.Listen(":8080", iris.WithoutServerError(iris.ErrServerClosed))
// will return `nil` if the server's error was `http/iris#ErrServerClosed`.
//
// See `Configuration#IgnoreServerErrors []string` too.
//
// Example: https://github.com/kataras/iris/tree/master/_examples/http-listening/listen-addr/omit-server-errors
func WithoutServerError(errors ...error) Configurator {
return func(app *Application) {
if len(errors) == 0 {
return
}
errorsAsString := make([]string, len(errors))
for i, e := range errors {
errorsAsString[i] = e.Error()
}
app.config.IgnoreServerErrors = append(app.config.IgnoreServerErrors, errorsAsString...)
}
}
// WithoutStartupLog turns off the information send, once, to the terminal when the main server is open.
var WithoutStartupLog = func(app *Application) {
app.config.DisableStartupLog = true
}
// WithoutBanner is a conversion for the `WithoutStartupLog` option.
//
// Turns off the information send, once, to the terminal when the main server is open.
var WithoutBanner = WithoutStartupLog
// WithoutInterruptHandler disables the automatic graceful server shutdown
// when control/cmd+C pressed.
var WithoutInterruptHandler = func(app *Application) {
app.config.DisableInterruptHandler = true
}
// WithoutPathCorrection disables the PathCorrection setting.
//
// See `Configuration`.
var WithoutPathCorrection = func(app *Application) {
app.config.DisablePathCorrection = true
}
// WithoutPathCorrectionRedirection disables the PathCorrectionRedirection setting.
//
// See `Configuration`.
var WithoutPathCorrectionRedirection = func(app *Application) {
app.config.DisablePathCorrection = false
app.config.DisablePathCorrectionRedirection = true
}
// WithoutBodyConsumptionOnUnmarshal disables BodyConsumptionOnUnmarshal setting.
//
// See `Configuration`.
var WithoutBodyConsumptionOnUnmarshal = func(app *Application) {
app.config.DisableBodyConsumptionOnUnmarshal = true
}
// WithoutAutoFireStatusCode disables the AutoFireStatusCode setting.
//
// See `Configuration`.
var WithoutAutoFireStatusCode = func(app *Application) {
app.config.DisableAutoFireStatusCode = true
}
// WithPathEscape enables the PathEscape setting.
//
// See `Configuration`.
var WithPathEscape = func(app *Application) {
app.config.EnablePathEscape = true
}
// WithOptimizations can force the application to optimize for the best performance where is possible.
//
// See `Configuration`.
var WithOptimizations = func(app *Application) {
app.config.EnableOptimizations = true
}
// WithFireMethodNotAllowed enables the FireMethodNotAllowed setting.
//
// See `Configuration`.
var WithFireMethodNotAllowed = func(app *Application) {
app.config.FireMethodNotAllowed = true
}
// WithTimeFormat sets the TimeFormat setting.
//
// See `Configuration`.
func WithTimeFormat(timeformat string) Configurator {
return func(app *Application) {
app.config.TimeFormat = timeformat
}
}
// WithCharset sets the Charset setting.
//
// See `Configuration`.
func WithCharset(charset string) Configurator {
return func(app *Application) {
app.config.Charset = charset
}
}
// WithPostMaxMemory sets the maximum post data size
// that a client can send to the server, this differs
// from the overral request body size which can be modified
// by the `context#SetMaxRequestBodySize` or `iris#LimitRequestBodySize`.
//
// Defaults to 32MB or 32 << 20 if you prefer.
func WithPostMaxMemory(limit int64) Configurator {
return func(app *Application) {
app.config.PostMaxMemory = limit
}
}
// WithRemoteAddrHeader enables or adds a new or existing request header name
// that can be used to validate the client's real IP.
//
// By-default no "X-" header is consired safe to be used for retrieving the
// client's IP address, because those headers can manually change by
// the client. But sometimes are useful e.g., when behind a proxy
// you want to enable the "X-Forwarded-For" or when cloudflare
// you want to enable the "CF-Connecting-IP", inneed you
// can allow the `ctx.RemoteAddr()` to use any header
// that the client may sent.
//
// Defaults to an empty map but an example usage is:
// WithRemoteAddrHeader("X-Forwarded-For")
//
// Look `context.RemoteAddr()` for more.
func WithRemoteAddrHeader(headerName string) Configurator {
return func(app *Application) {
if app.config.RemoteAddrHeaders == nil {
app.config.RemoteAddrHeaders = make(map[string]bool)
}
app.config.RemoteAddrHeaders[headerName] = true
}
}
// WithoutRemoteAddrHeader disables an existing request header name
// that can be used to validate and parse the client's real IP.
//
//
// Keep note that RemoteAddrHeaders is already defaults to an empty map
// so you don't have to call this Configurator if you didn't
// add allowed headers via configuration or via `WithRemoteAddrHeader` before.
//
// Look `context.RemoteAddr()` for more.
func WithoutRemoteAddrHeader(headerName string) Configurator {
return func(app *Application) {
if app.config.RemoteAddrHeaders == nil {
app.config.RemoteAddrHeaders = make(map[string]bool)
}
app.config.RemoteAddrHeaders[headerName] = false
}
}
// WithOtherValue adds a value based on a key to the Other setting.
//
// See `Configuration.Other`.
func WithOtherValue(key string, val interface{}) Configurator {
return func(app *Application) {
if app.config.Other == nil {
app.config.Other = make(map[string]interface{})
}
app.config.Other[key] = val
}
}
// WithSitemap enables the sitemap generator.
// Use the Route's `SetLastMod`, `SetChangeFreq` and `SetPriority` to modify
// the sitemap's URL child element properties.
//
// It accepts a "startURL" input argument which
// is the prefix for the registered routes that will be included in the sitemap.
//
// If more than 50,000 static routes are registered then sitemaps will be splitted and a sitemap index will be served in
// /sitemap.xml.
//
// If `Application.I18n.Load/LoadAssets` is called then the sitemap will contain translated links for each static route.
//
// If the result does not complete your needs you can take control
// and use the github.com/kataras/sitemap package to generate a customized one instead.
//
// Example: https://github.com/kataras/iris/tree/master/_examples/sitemap.
func WithSitemap(startURL string) Configurator {
sitemaps := sitemap.New(startURL)
return func(app *Application) {
var defaultLang string
if tags := app.I18n.Tags(); len(tags) > 0 {
defaultLang = tags[0].String()
sitemaps.DefaultLang(defaultLang)
}
for _, r := range app.GetRoutes() {
if !r.IsStatic() || r.Subdomain != "" {
continue
}
loc := r.StaticPath()
var translatedLinks []sitemap.Link
for _, tag := range app.I18n.Tags() {
lang := tag.String()
langPath := lang
href := ""
if lang == defaultLang {
// http://domain.com/en-US/path to just http://domain.com/path if en-US is the default language.
langPath = ""
}
if app.I18n.PathRedirect {
// then use the path prefix.
// e.g. http://domain.com/el-GR/path
if langPath == "" { // fix double slashes http://domain.com// when self-included default language.
href = loc
} else {
href = "/" + langPath + loc
}
} else if app.I18n.Subdomain {
// then use the subdomain.
// e.g. http://el.domain.com/path
scheme := netutil.ResolveSchemeFromVHost(startURL)
host := strings.TrimLeft(startURL, scheme)
if langPath != "" {
href = scheme + strings.Split(langPath, "-")[0] + "." + host + loc
} else {
href = loc
}
} else if p := app.I18n.URLParameter; p != "" {
// then use the URL parameter.
// e.g. http://domain.com/path?lang=el-GR
href = loc + "?" + p + "=" + lang
} else {
// then skip it, we can't generate the link at this state.
continue
}
translatedLinks = append(translatedLinks, sitemap.Link{
Rel: "alternate",
Hreflang: lang,
Href: href,
})
}
sitemaps.URL(sitemap.URL{
Loc: loc,
LastMod: r.LastMod,
ChangeFreq: r.ChangeFreq,
Priority: r.Priority,
Links: translatedLinks,
})
}
for _, s := range sitemaps.Build() {
contentCopy := make([]byte, len(s.Content))
copy(contentCopy, s.Content)
handler := func(ctx Context) {
ctx.ContentType(context.ContentXMLHeaderValue)
ctx.Write(contentCopy)
}
if app.builded {
routes := app.CreateRoutes([]string{MethodGet, MethodHead, MethodOptions}, s.Path, handler)
for _, r := range routes {
if err := app.Router.AddRouteUnsafe(r); err != nil {
app.Logger().Errorf("sitemap route: %v", err)
}
}
} else {
app.HandleMany("GET HEAD OPTIONS", s.Path, handler)
}
}
}
}
// WithTunneling is the `iris.Configurator` for the `iris.Configuration.Tunneling` field.
// It's used to enable http tunneling for an Iris Application, per registered host
//
// Alternatively use the `iris.WithConfiguration(iris.Configuration{Tunneling: iris.TunnelingConfiguration{ ...}}}`.
var WithTunneling = func(app *Application) {
conf := TunnelingConfiguration{
Tunnels: []Tunnel{{}}, // create empty tunnel, its addr and name are set right before host serve.
}
app.config.Tunneling = conf
}
// Tunnel is the Tunnels field of the TunnelingConfiguration structure.
type Tunnel struct {
// Name is the only one required field,
// it is used to create and close tunnels, e.g. "MyApp".
// If this field is not empty then ngrok tunnels will be created
// when the iris app is up and running.
Name string `json:"name" yaml:"Name" toml:"Name"`
// Addr is basically optionally as it will be set through
// Iris built-in Runners, however, if `iris.Raw` is used
// then this field should be set of form 'hostname:port'
// because framework cannot be aware
// of the address you used to run the server on this custom runner.
Addr string `json:"addr,omitempty" yaml:"Addr" toml:"Addr"`
}
// TunnelingConfiguration contains configuration
// for the optional tunneling through ngrok feature.
// Note that the ngrok should be already installed at the host machine.
type TunnelingConfiguration struct {
// AuthToken field is optionally and can be used
// to authenticate the ngrok access.
// ngrok authtoken <YOUR_AUTHTOKEN>
AuthToken string `json:"authToken,omitempty" yaml:"AuthToken" toml:"AuthToken"`
// No...
// Config is optionally and can be used
// to load ngrok configuration from file system path.
//
// If you don't specify a location for a configuration file,
// ngrok tries to read one from the default location $HOME/.ngrok2/ngrok.yml.
// The configuration file is optional; no error is emitted if that path does not exist.
// Config string `json:"config,omitempty" yaml:"Config" toml:"Config"`
// Bin is the system binary path of the ngrok executable file.
// If it's empty then the framework will try to find it through system env variables.
Bin string `json:"bin,omitempty" yaml:"Bin" toml:"Bin"`
// WebUIAddr is the web interface address of an already-running ngrok instance.
// Iris will try to fetch the default web interface address(http://127.0.0.1:4040)
// to determinate if a ngrok instance is running before try to start it manually.
// However if a custom web interface address is used,
// this field must be set e.g. http://127.0.0.1:5050.
WebInterface string `json:"webInterface,omitempty" yaml:"WebInterface" toml:"WebInterface"`
// Region is optionally, can be used to set the region which defaults to "us".
// Available values are:
// "us" for United States
// "eu" for Europe
// "ap" for Asia/Pacific
// "au" for Australia
// "sa" for South America
// "jp" forJapan
// "in" for India
Region string `json:"region,omitempty" yaml:"Region" toml:"Region"`
// Tunnels the collection of the tunnels.
// One tunnel per Iris Host per Application, usually you only need one.
Tunnels []Tunnel `json:"tunnels" yaml:"Tunnels" toml:"Tunnels"`
}
func (tc *TunnelingConfiguration) isEnabled() bool {
return tc != nil && len(tc.Tunnels) > 0
}
func (tc *TunnelingConfiguration) isNgrokRunning() bool {
_, err := http.Get(tc.WebInterface)
return err == nil
}
// https://ngrok.com/docs
type ngrokTunnel struct {
Name string `json:"name"`
Addr string `json:"addr"`
Proto string `json:"proto"`
Auth string `json:"auth"`
BindTLS bool `json:"bind_tls"`
}
func (tc TunnelingConfiguration) startTunnel(t Tunnel, publicAddr *string) error {
tunnelAPIRequest := ngrokTunnel{
Name: t.Name,
Addr: t.Addr,
Proto: "http",
BindTLS: true,
}
if !tc.isNgrokRunning() {
ngrokBin := "ngrok" // environment binary.
if tc.Bin == "" {
_, err := exec.LookPath(ngrokBin)
if err != nil {
ngrokEnvVar, found := os.LookupEnv("NGROK")
if !found {
return fmt.Errorf(`"ngrok" executable not found, please install it from: https://ngrok.com/download`)
}
ngrokBin = ngrokEnvVar
}
} else {
ngrokBin = tc.Bin
}
if tc.AuthToken != "" {
cmd := exec.Command(ngrokBin, "authtoken", tc.AuthToken)
err := cmd.Run()
if err != nil {
return err
}
}
// start -none, start without tunnels.
// and finally the -log stdout logs to the stdout otherwise the pipe will never be able to read from, spent a lot of time on this lol.
cmd := exec.Command(ngrokBin, "start", "-none", "-log", "stdout")
// if tc.Config != "" {
// cmd.Args = append(cmd.Args, []string{"-config", tc.Config}...)
// }
if tc.Region != "" {
cmd.Args = append(cmd.Args, []string{"-region", tc.Region}...)
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
if err = cmd.Start(); err != nil {
return err
}
p := make([]byte, 256)
okText := []byte("client session established")
for {
n, err := stdout.Read(p)
if err != nil {
return err
}
// we need this one:
// msg="client session established"
// note that this will block if something terrible happens
// but ngrok's errors are strong so the error is easy to be resolved without any logs.
if bytes.Contains(p[:n], okText) {
break
}
}
}
return tc.createTunnel(tunnelAPIRequest, publicAddr)
}
func (tc TunnelingConfiguration) stopTunnel(t Tunnel) error {
url := fmt.Sprintf("%s/api/tunnels/%s", tc.WebInterface, t.Name)
req, err := http.NewRequest(http.MethodDelete, url, nil)
if err != nil {
return err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
if resp.StatusCode != StatusNoContent {
return fmt.Errorf("stop return an unexpected status code: %d", resp.StatusCode)
}
return nil
}
func (tc TunnelingConfiguration) createTunnel(tunnelAPIRequest ngrokTunnel, publicAddr *string) error {
url := fmt.Sprintf("%s/api/tunnels", tc.WebInterface)
requestData, err := json.Marshal(tunnelAPIRequest)
if err != nil {
return err
}
resp, err := http.Post(url, context.ContentJSONHeaderValue, bytes.NewBuffer(requestData))
if err != nil {
return err
}
defer resp.Body.Close()
type publicAddrOrErrResp struct {
PublicAddr string `json:"public_url"`
Details struct {
ErrorText string `json:"err"` // when can't bind more addresses, status code was successful.
} `json:"details"`
ErrMsg string `json:"msg"` // when ngrok is not yet ready, status code was unsuccessful.
}
var apiResponse publicAddrOrErrResp
err = json.NewDecoder(resp.Body).Decode(&apiResponse)
if err != nil {
return err
}
if errText := apiResponse.ErrMsg; errText != "" {
return errors.New(errText)
}
if errText := apiResponse.Details.ErrorText; errText != "" {
return errors.New(errText)
}
*publicAddr = apiResponse.PublicAddr
return nil
}
// Configuration the whole configuration for an iris instance
// these can be passed via options also, look at the top of this file(configuration.go).
// Configuration is a valid OptionSetter.
type Configuration struct {
// vhost is private and set only with .Run method, it cannot be changed after the first set.
// It can be retrieved by the context if needed (i.e router for subdomains)
vhost string
// Tunneling can be optionally set to enable ngrok http(s) tunneling for this Iris app instance.
// See the `WithTunneling` Configurator too.
Tunneling TunnelingConfiguration `json:"tunneling,omitempty" yaml:"Tunneling" toml:"Tunneling"`
// IgnoreServerErrors will cause to ignore the matched "errors"
// from the main application's `Run` function.
// This is a slice of string, not a slice of error
// users can register these errors using yaml or toml configuration file
// like the rest of the configuration fields.
//
// See `WithoutServerError(...)` function too.
//
// Example: https://github.com/kataras/iris/tree/master/_examples/http-listening/listen-addr/omit-server-errors
//
// Defaults to an empty slice.
IgnoreServerErrors []string `json:"ignoreServerErrors,omitempty" yaml:"IgnoreServerErrors" toml:"IgnoreServerErrors"`
// DisableStartupLog if set to true then it turns off the write banner on server startup.
//
// Defaults to false.
DisableStartupLog bool `json:"disableStartupLog,omitempty" yaml:"DisableStartupLog" toml:"DisableStartupLog"`
// DisableInterruptHandler if set to true then it disables the automatic graceful server shutdown
// when control/cmd+C pressed.
// Turn this to true if you're planning to handle this by your own via a custom host.Task.
//
// Defaults to false.
DisableInterruptHandler bool `json:"disableInterruptHandler,omitempty" yaml:"DisableInterruptHandler" toml:"DisableInterruptHandler"`
// DisablePathCorrection corrects and redirects or executes directly the handler of
// the requested path to the registered path
// for example, if /home/ path is requested but no handler for this Route found,
// then the Router checks if /home handler exists, if yes,
// (permanent)redirects the client to the correct path /home.
//
// See `DisablePathCorrectionRedirection` to enable direct handler execution instead of redirection.
//
// Defaults to false.
DisablePathCorrection bool `json:"disablePathCorrection,omitempty" yaml:"DisablePathCorrection" toml:"DisablePathCorrection"`
// DisablePathCorrectionRedirection works whenever configuration.DisablePathCorrection is set to false
// and if DisablePathCorrectionRedirection set to true then it will fire the handler of the matching route without
// the trailing slash ("/") instead of send a redirection status.
//
// Defaults to false.
DisablePathCorrectionRedirection bool `json:"disablePathCorrectionRedirection,omitempty" yaml:"DisablePathCorrectionRedirection" toml:"DisablePathCorrectionRedirection"`
// EnablePathEscape when is true then its escapes the path, the named parameters (if any).
// Change to false it if you want something like this https://github.com/kataras/iris/issues/135 to work
//
// When do you need to Disable(false) it:
// accepts parameters with slash '/'
// Request: http://localhost:8080/details/Project%2FDelta
// ctx.Param("project") returns the raw named parameter: Project%2FDelta
// which you can escape it manually with net/url:
// projectName, _ := url.QueryUnescape(c.Param("project").
//
// Defaults to false.
EnablePathEscape bool `json:"enablePathEscape,omitempty" yaml:"EnablePathEscape" toml:"EnablePathEscape"`
// EnableOptimization when this field is true
// then the application tries to optimize for the best performance where is possible.
//
// Defaults to false.
EnableOptimizations bool `json:"enableOptimizations,omitempty" yaml:"EnableOptimizations" toml:"EnableOptimizations"`
// FireMethodNotAllowed if it's true router checks for StatusMethodNotAllowed(405) and
// fires the 405 error instead of 404
// Defaults to false.
FireMethodNotAllowed bool `json:"fireMethodNotAllowed,omitempty" yaml:"FireMethodNotAllowed" toml:"FireMethodNotAllowed"`
// DisableBodyConsumptionOnUnmarshal manages the reading behavior of the context's body readers/binders.
// If set to true then it
// disables the body consumption by the `context.UnmarshalBody/ReadJSON/ReadXML`.
//
// By-default io.ReadAll` is used to read the body from the `context.Request.Body which is an `io.ReadCloser`,
// if this field set to true then a new buffer will be created to read from and the request body.
// The body will not be changed and existing data before the
// context.UnmarshalBody/ReadJSON/ReadXML will be not consumed.
DisableBodyConsumptionOnUnmarshal bool `json:"disableBodyConsumptionOnUnmarshal,omitempty" yaml:"DisableBodyConsumptionOnUnmarshal" toml:"DisableBodyConsumptionOnUnmarshal"`
// DisableAutoFireStatusCode if true then it turns off the http error status code handler automatic execution
// from (`context.StatusCodeNotSuccessful`, defaults to < 200 || >= 400).
// If that is false then for a direct error firing, then call the "context#FireStatusCode(statusCode)" manually.
//
// By-default a custom http error handler will be fired when "context.StatusCode(code)" called,
// code should be equal with the result of the the `context.StatusCodeNotSuccessful` in order to be received as an "http error handler".
//
// Developer may want this option to set as true in order to manually call the
// error handlers when needed via "context#FireStatusCode(< 200 || >= 400)".
// HTTP Custom error handlers are being registered via app.OnErrorCode(code, handler)".
//
// Defaults to false.
DisableAutoFireStatusCode bool `json:"disableAutoFireStatusCode,omitempty" yaml:"DisableAutoFireStatusCode" toml:"DisableAutoFireStatusCode"`
// TimeFormat time format for any kind of datetime parsing
// Defaults to "Mon, 02 Jan 2006 15:04:05 GMT".
TimeFormat string `json:"timeFormat,omitempty" yaml:"TimeFormat" toml:"TimeFormat"`
// Charset character encoding for various rendering
// used for templates and the rest of the responses
// Defaults to "UTF-8".
Charset string `json:"charset,omitempty" yaml:"Charset" toml:"Charset"`
// PostMaxMemory sets the maximum post data size
// that a client can send to the server, this differs
// from the overral request body size which can be modified
// by the `context#SetMaxRequestBodySize` or `iris#LimitRequestBodySize`.
//
// Defaults to 32MB or 32 << 20 if you prefer.
PostMaxMemory int64 `json:"postMaxMemory" yaml:"PostMaxMemory" toml:"PostMaxMemory"`
// +----------------------------------------------------+
// | Context's keys for values used on various featuers |
// +----------------------------------------------------+
// Context values' keys for various features.
//
// LocaleContextKey is used by i18n to get the current request's locale, which contains a translate function too.
//
// Defaults to "iris.locale".
LocaleContextKey string `json:"localeContextKey,omitempty" yaml:"LocaleContextKey" toml:"LocaleContextKey"`
// GetViewLayoutContextKey is the key of the context's user values' key
// which is being used to set the template
// layout from a middleware or the main handler.
// Overrides the parent's or the configuration's.
//
// Defaults to "iris.ViewLayout"
ViewLayoutContextKey string `json:"viewLayoutContextKey,omitempty" yaml:"ViewLayoutContextKey" toml:"ViewLayoutContextKey"`
// GetViewDataContextKey is the key of the context's user values' key
// which is being used to set the template
// binding data from a middleware or the main handler.
//
// Defaults to "iris.viewData"
ViewDataContextKey string `json:"viewDataContextKey,omitempty" yaml:"ViewDataContextKey" toml:"ViewDataContextKey"`
// RemoteAddrHeaders are the allowed request headers names
// that can be valid to parse the client's IP based on.
// By-default no "X-" header is consired safe to be used for retrieving the
// client's IP address, because those headers can manually change by
// the client. But sometimes are useful e.g., when behind a proxy
// you want to enable the "X-Forwarded-For" or when cloudflare
// you want to enable the "CF-Connecting-IP", inneed you
// can allow the `ctx.RemoteAddr()` to use any header
// that the client may sent.
//
// Defaults to an empty map but an example usage is:
// RemoteAddrHeaders {
// "X-Real-Ip": true,
// "X-Forwarded-For": true,
// "CF-Connecting-IP": true,
// }
//
// Look `context.RemoteAddr()` for more.
RemoteAddrHeaders map[string]bool `json:"remoteAddrHeaders,omitempty" yaml:"RemoteAddrHeaders" toml:"RemoteAddrHeaders"`
// Other are the custom, dynamic options, can be empty.
// This field used only by you to set any app's options you want.
//
// Defaults to a non-nil empty map.
Other map[string]interface{} `json:"other,omitempty" yaml:"Other" toml:"Other"`
}
var _ context.ConfigurationReadOnly = &Configuration{}
// GetVHost returns the non-exported vhost config field.
//
// If original addr ended with :443 or :80, it will return the host without the port.
// If original addr was :https or :http, it will return localhost.
// If original addr was 0.0.0.0, it will return localhost.
func (c Configuration) GetVHost() string {
return c.vhost
}
// GetDisablePathCorrection returns the Configuration#DisablePathCorrection,
// DisablePathCorrection corrects and redirects the requested path to the registered path
// for example, if /home/ path is requested but no handler for this Route found,
// then the Router checks if /home handler exists, if yes,
// (permanent)redirects the client to the correct path /home.
func (c Configuration) GetDisablePathCorrection() bool {
return c.DisablePathCorrection
}
// GetDisablePathCorrectionRedirection returns the Configuration#DisablePathCorrectionRedirection field.
// If DisablePathCorrectionRedirection set to true then it will fire the handler of the matching route without
// the last slash ("/") instead of send a redirection status.
func (c Configuration) GetDisablePathCorrectionRedirection() bool {
return c.DisablePathCorrectionRedirection
}
// GetEnablePathEscape is the Configuration#EnablePathEscape,
// returns true when its escapes the path, the named parameters (if any).
func (c Configuration) GetEnablePathEscape() bool {
return c.EnablePathEscape
}
// GetEnableOptimizations returns whether
// the application has performance optimizations enabled.
func (c Configuration) GetEnableOptimizations() bool {
return c.EnableOptimizations
}
// GetFireMethodNotAllowed returns the Configuration#FireMethodNotAllowed.
func (c Configuration) GetFireMethodNotAllowed() bool {
return c.FireMethodNotAllowed
}
// GetDisableBodyConsumptionOnUnmarshal returns the Configuration#GetDisableBodyConsumptionOnUnmarshal,
// manages the reading behavior of the context's body readers/binders.
// If returns true then the body consumption by the `context.UnmarshalBody/ReadJSON/ReadXML`
// is disabled.
//
// By-default io.ReadAll` is used to read the body from the `context.Request.Body which is an `io.ReadCloser`,
// if this field set to true then a new buffer will be created to read from and the request body.
// The body will not be changed and existing data before the
// context.UnmarshalBody/ReadJSON/ReadXML will be not consumed.
func (c Configuration) GetDisableBodyConsumptionOnUnmarshal() bool {
return c.DisableBodyConsumptionOnUnmarshal
}
// GetDisableAutoFireStatusCode returns the Configuration#DisableAutoFireStatusCode.
// Returns true when the http error status code handler automatic execution turned off.
func (c Configuration) GetDisableAutoFireStatusCode() bool {
return c.DisableAutoFireStatusCode
}
// GetTimeFormat returns the Configuration#TimeFormat,
// format for any kind of datetime parsing.
func (c Configuration) GetTimeFormat() string {
return c.TimeFormat
}
// GetCharset returns the Configuration#Charset,
// the character encoding for various rendering
// used for templates and the rest of the responses.
func (c Configuration) GetCharset() string {
return c.Charset
}
// GetPostMaxMemory returns the maximum configured post data size
// that a client can send to the server, this differs
// from the overral request body size which can be modified
// by the `context#SetMaxRequestBodySize` or `iris#LimitRequestBodySize`.
//
// Defaults to 32MB or 32 << 20 if you prefer.
func (c Configuration) GetPostMaxMemory() int64 {
return c.PostMaxMemory
}
// GetLocaleContextKey returns the configuration's LocaleContextKey value,
// used for i18n.
func (c Configuration) GetLocaleContextKey() string {
return c.LocaleContextKey
}
// GetViewLayoutContextKey returns the key of the context's user values' key
// which is being used to set the template
// layout from a middleware or the main handler.
// Overrides the parent's or the configuration's.
func (c Configuration) GetViewLayoutContextKey() string {
return c.ViewLayoutContextKey
}
// GetViewDataContextKey returns the key of the context's user values' key
// which is being used to set the template
// binding data from a middleware or the main handler.
func (c Configuration) GetViewDataContextKey() string {
return c.ViewDataContextKey
}
// GetRemoteAddrHeaders returns the allowed request headers names
// that can be valid to parse the client's IP based on.
// By-default no "X-" header is consired safe to be used for retrieving the
// client's IP address, because those headers can manually change by
// the client. But sometimes are useful e.g., when behind a proxy
// you want to enable the "X-Forwarded-For" or when cloudflare
// you want to enable the "CF-Connecting-IP", inneed you
// can allow the `ctx.RemoteAddr()` to use any header
// that the client may sent.
//
// Defaults to an empty map but an example usage is:
// RemoteAddrHeaders {
// "X-Real-Ip": true,
// "X-Forwarded-For": true,
// "CF-Connecting-IP": true,
// }
//
// Look `context.RemoteAddr()` for more.
func (c Configuration) GetRemoteAddrHeaders() map[string]bool {
return c.RemoteAddrHeaders
}
// GetOther returns the Configuration#Other map.
func (c Configuration) GetOther() map[string]interface{} {
return c.Other
}
// WithConfiguration sets the "c" values to the framework's configurations.
//
// Usage:
// app.Listen(":8080", iris.WithConfiguration(iris.Configuration{/* fields here */ }))
// or
// iris.WithConfiguration(iris.YAML("./cfg/iris.yml"))
// or
// iris.WithConfiguration(iris.TOML("./cfg/iris.tml"))
func WithConfiguration(c Configuration) Configurator {
return func(app *Application) {
main := app.config
if c.Tunneling.isEnabled() {
main.Tunneling = c.Tunneling
}
if v := c.IgnoreServerErrors; len(v) > 0 {
main.IgnoreServerErrors = append(main.IgnoreServerErrors, v...)
}
if v := c.DisableStartupLog; v {
main.DisableStartupLog = v
}
if v := c.DisableInterruptHandler; v {
main.DisableInterruptHandler = v
}
if v := c.DisablePathCorrection; v {
main.DisablePathCorrection = v
}
if v := c.DisablePathCorrectionRedirection; v {
main.DisablePathCorrectionRedirection = v
}
if v := c.EnablePathEscape; v {
main.EnablePathEscape = v
}
if v := c.EnableOptimizations; v {
main.EnableOptimizations = v
}
if v := c.FireMethodNotAllowed; v {
main.FireMethodNotAllowed = v
}
if v := c.DisableBodyConsumptionOnUnmarshal; v {
main.DisableBodyConsumptionOnUnmarshal = v
}
if v := c.DisableAutoFireStatusCode; v {
main.DisableAutoFireStatusCode = v
}
if v := c.TimeFormat; v != "" {
main.TimeFormat = v
}
if v := c.Charset; v != "" {
main.Charset = v
}
if v := c.PostMaxMemory; v > 0 {
main.PostMaxMemory = v
}
if v := c.LocaleContextKey; v != "" {
main.LocaleContextKey = v
}
if v := c.ViewLayoutContextKey; v != "" {
main.ViewLayoutContextKey = v
}
if v := c.ViewDataContextKey; v != "" {
main.ViewDataContextKey = v
}
if v := c.RemoteAddrHeaders; len(v) > 0 {
if main.RemoteAddrHeaders == nil {
main.RemoteAddrHeaders = make(map[string]bool, len(v))
}
for key, value := range v {
main.RemoteAddrHeaders[key] = value
}
}
if v := c.Other; len(v) > 0 {
if main.Other == nil {
main.Other = make(map[string]interface{}, len(v))
}
for key, value := range v {
main.Other[key] = value
}
}
}
}
// DefaultConfiguration returns the default configuration for an iris station, fills the main Configuration
func DefaultConfiguration() Configuration {
return Configuration{
DisableStartupLog: false,
DisableInterruptHandler: false,
DisablePathCorrection: false,
EnablePathEscape: false,
FireMethodNotAllowed: false,
DisableBodyConsumptionOnUnmarshal: false,
DisableAutoFireStatusCode: false,
TimeFormat: "Mon, 02 Jan 2006 15:04:05 GMT",
Charset: "UTF-8",
// PostMaxMemory is for post body max memory.
//
// The request body the size limit
// can be set by the middleware `LimitRequestBodySize`
// or `context#SetMaxRequestBodySize`.
PostMaxMemory: 32 << 20, // 32MB
LocaleContextKey: "iris.locale",
ViewLayoutContextKey: "iris.viewLayout",
ViewDataContextKey: "iris.viewData",
RemoteAddrHeaders: make(map[string]bool),
EnableOptimizations: false,
Other: make(map[string]interface{}),
}
}
| [
"\"HOME\"",
"\"home\"",
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\""
] | [] | [
"home",
"HOMEPATH",
"HOMEDRIVE",
"USERPROFILE",
"HOME"
] | [] | ["home", "HOMEPATH", "HOMEDRIVE", "USERPROFILE", "HOME"] | go | 5 | 0 | |
aicupboard/asgi.py | """
ASGI config for aicupboard project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'aicupboard.settings')
application = get_asgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
datalabeling/beta/cloud-client/src/test/java/com/example/datalabeling/LabelImageIT.java | /*
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.datalabeling;
import com.google.cloud.datalabeling.v1beta1.AnnotationSpecSet;
import com.google.cloud.datalabeling.v1beta1.DataLabelingServiceClient;
import com.google.cloud.datalabeling.v1beta1.DataLabelingServiceClient.ListAnnotationSpecSetsPagedResponse;
import com.google.cloud.datalabeling.v1beta1.DataLabelingServiceClient.ListDatasetsPagedResponse;
import com.google.cloud.datalabeling.v1beta1.DataLabelingServiceClient.ListInstructionsPagedResponse;
import com.google.cloud.datalabeling.v1beta1.Dataset;
import com.google.cloud.datalabeling.v1beta1.Instruction;
import com.google.cloud.datalabeling.v1beta1.ListAnnotationSpecSetsRequest;
import com.google.cloud.datalabeling.v1beta1.ListDatasetsRequest;
import com.google.cloud.datalabeling.v1beta1.ListInstructionsRequest;
import com.google.cloud.datalabeling.v1beta1.ProjectName;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/**
* Integration (system) tests for {@link LabelImage}.
*/
@RunWith(JUnit4.class)
@Ignore("b/146064330")
@SuppressWarnings("checkstyle:abbreviationaswordinname")
public class LabelImageIT {
private static String PROJECT_ID = System.getenv().get("GOOGLE_CLOUD_PROJECT");
private static String DATASET_GCS_SOURCE_URI =
"gs://cloud-samples-data/datalabeling/image/image_dataset.csv";
private static String INSTRUCTION_GCS_SOURCE_URI =
"gs://cloud-samples-data/datalabeling/instruction/test.pdf";
private static String datsetName = "LABEL_IMAGE_DATASET_NAME";
private Dataset dataset;
private Instruction instruction;
private AnnotationSpecSet annotationSpecSet;
@Before
public void setUp() {
System.setOut(new PrintStream(new ByteArrayOutputStream()));
try (DataLabelingServiceClient dataLabelingServiceClient = DataLabelingServiceClient.create()) {
// Create the dataset
CreateDataset.createDataset(PROJECT_ID,datsetName);
ProjectName projectName = ProjectName.of(PROJECT_ID);
// Get the Dataset
ListDatasetsRequest datasetsRequest = ListDatasetsRequest.newBuilder()
.setParent(projectName.toString())
.build();
ListDatasetsPagedResponse datasetsResponse = dataLabelingServiceClient
.listDatasets(datasetsRequest);
for (Dataset returnedDataset : datasetsResponse.getPage().iterateAll()) {
if (returnedDataset.getDisplayName().equals("LABEL_IMAGE_DATASET_NAME")) {
dataset = returnedDataset;
}
}
// Import the images
ImportData.importData(dataset.getName(), DATASET_GCS_SOURCE_URI);
// Create the instruction
CreateInstruction.createInstruction(PROJECT_ID, INSTRUCTION_GCS_SOURCE_URI);
// Create the annotation spec set
CreateAnnotationSpecSet.createAnnotationSpecSet(PROJECT_ID);
// Get the instruction
ListInstructionsRequest instructionsRequest = ListInstructionsRequest.newBuilder()
.setParent(projectName.toString())
.build();
ListInstructionsPagedResponse instructionsResponse = dataLabelingServiceClient
.listInstructions(instructionsRequest);
for (Instruction returnedInstruction : instructionsResponse.getPage().iterateAll()) {
if (returnedInstruction.getDisplayName().equals("YOUR_INSTRUCTION_DISPLAY_NAME")) {
instruction = returnedInstruction;
}
}
// Get the annotation spec set
ListAnnotationSpecSetsRequest annotationRequest = ListAnnotationSpecSetsRequest.newBuilder()
.setParent(projectName.toString())
.build();
ListAnnotationSpecSetsPagedResponse annotationsResponse = dataLabelingServiceClient
.listAnnotationSpecSets(annotationRequest);
for (AnnotationSpecSet returnedAnnotation : annotationsResponse.getPage().iterateAll()) {
if (returnedAnnotation.getDisplayName().equals("YOUR_ANNOTATION_SPEC_SET_DISPLAY_NAME")) {
annotationSpecSet = returnedAnnotation;
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
@After
public void tearDown() {
System.setOut(null);
// Delete the created dataset.
try (DataLabelingServiceClient dataLabelingServiceClient = DataLabelingServiceClient.create()) {
dataLabelingServiceClient.deleteDataset(dataset.getName());
dataLabelingServiceClient.deleteInstruction(instruction.getName());
dataLabelingServiceClient.deleteAnnotationSpecSet(annotationSpecSet.getName());
} catch (IOException e) {
e.printStackTrace();
}
}
@Test
public void testLabelImage() throws IOException {
// Start the labeling task
LabelImage.labelImage(instruction.getName(), annotationSpecSet.getName(), dataset.getName());
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
pkg/driver/plugin/plugin_test.go | package plugin_test
import (
"context"
"encoding/json"
"errors"
"os"
"os/exec"
"testing"
"github.com/graphql-editor/stucco/pkg/driver"
"github.com/graphql-editor/stucco/pkg/driver/drivertest"
"github.com/graphql-editor/stucco/pkg/driver/plugin"
goplugin "github.com/hashicorp/go-plugin"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
type execCommandMock struct {
mock.Mock
}
func (m *execCommandMock) Command(command string, args ...string) *exec.Cmd {
i := []interface{}{command}
for _, arg := range args {
i = append(i, arg)
}
return m.Called(i...).Get(0).(func(string, ...string) *exec.Cmd)(command, args...)
}
type newPluginClientMock struct {
mock.Mock
}
func (m *newPluginClientMock) NewPlugin(cfg *goplugin.ClientConfig) plugin.Client {
return m.Called(cfg).Get(0).(plugin.Client)
}
type pluginClientMock struct {
mock.Mock
}
func (m *pluginClientMock) Client() (goplugin.ClientProtocol, error) {
called := m.Called()
return called.Get(0).(goplugin.ClientProtocol), called.Error(1)
}
func (m *pluginClientMock) Kill() {
m.Called()
}
type pluginClientProtocolMock struct {
mock.Mock
}
func (m *pluginClientProtocolMock) Close() error {
return m.Called().Error(0)
}
func (m *pluginClientProtocolMock) Dispense(arg string) (interface{}, error) {
called := m.Called(arg)
return called.Get(0), called.Error(1)
}
func (m *pluginClientProtocolMock) Ping() error {
return m.Called().Error(0)
}
type grpcClientMock struct {
drivertest.MockDriver
}
func (m *grpcClientMock) Stdout(ctx context.Context, name string) error {
return m.Called(ctx, name).Error(0)
}
func (m *grpcClientMock) Stderr(ctx context.Context, name string) error {
return m.Called(ctx, name).Error(0)
}
func setupPluginDriverTests(t *testing.T) (*grpcClientMock, func(*testing.T)) {
execCommandMock := new(execCommandMock)
plugin.ExecCommand = execCommandMock.Command
newPluginClientMock := new(newPluginClientMock)
pluginClientProtocolMock := new(pluginClientProtocolMock)
pluginClientMock := new(pluginClientMock)
grpcClientMock := new(grpcClientMock)
grpcClientMock.On("Stdout", mock.Anything, mock.AnythingOfType("string")).Return(nil)
grpcClientMock.On("Stderr", mock.Anything, mock.AnythingOfType("string")).Return(nil)
plugin.NewPluginClient = newPluginClientMock.NewPlugin
execCommandMock.On("Command", "fake-plugin-command").Return(
func(string, ...string) *exec.Cmd {
return new(exec.Cmd)
},
)
newPluginClientMock.On("NewPlugin", mock.Anything).Return(pluginClientMock)
pluginClientMock.On("Client").Return(pluginClientProtocolMock, nil)
pluginClientMock.On("Kill").Return()
pluginClientProtocolMock.On("Dispense", "driver_grpc").Return(grpcClientMock, nil)
pluginClientProtocolMock.On("Ping").Return(nil)
return grpcClientMock, func(t *testing.T) {
plugin.ExecCommand = exec.Command
plugin.NewPluginClient = plugin.DefaultPluginClient
}
}
func TestPluginFieldResolve(t *testing.T) {
grpcClientMock, teardown := setupPluginDriverTests(t)
defer teardown(t)
plug := plugin.NewPlugin(plugin.Config{
Cmd: "fake-plugin-command",
})
defer plug.Close()
data := []struct {
in driver.FieldResolveInput
out driver.FieldResolveOutput
outErr error
}{
{
in: driver.FieldResolveInput{},
out: driver.FieldResolveOutput{},
},
{
in: driver.FieldResolveInput{},
out: driver.FieldResolveOutput{
Error: &driver.Error{
Message: "",
},
},
outErr: errors.New(""),
},
}
for _, tt := range data {
grpcClientMock.On("FieldResolve", tt.in).Return(tt.out, tt.outErr).Once()
out := plug.FieldResolve(tt.in)
assert.Equal(t, tt.out, out)
}
grpcClientMock.AssertNumberOfCalls(t, "FieldResolve", len(data))
}
func TestPluginInterfaceResolveType(t *testing.T) {
grpcClientMock, teardown := setupPluginDriverTests(t)
defer teardown(t)
plug := plugin.NewPlugin(plugin.Config{
Cmd: "fake-plugin-command",
})
defer plug.Close()
data := []struct {
in driver.InterfaceResolveTypeInput
out driver.InterfaceResolveTypeOutput
outErr error
}{
{
in: driver.InterfaceResolveTypeInput{},
out: driver.InterfaceResolveTypeOutput{},
},
{
in: driver.InterfaceResolveTypeInput{},
out: driver.InterfaceResolveTypeOutput{
Error: &driver.Error{
Message: "",
},
},
outErr: errors.New(""),
},
}
for _, tt := range data {
grpcClientMock.On("InterfaceResolveType", tt.in).Return(tt.out, tt.outErr).Once()
out := plug.InterfaceResolveType(tt.in)
assert.Equal(t, tt.out, out)
}
grpcClientMock.AssertNumberOfCalls(t, "InterfaceResolveType", len(data))
}
func TestPluginScalarParse(t *testing.T) {
grpcClientMock, teardown := setupPluginDriverTests(t)
defer teardown(t)
plug := plugin.NewPlugin(plugin.Config{
Cmd: "fake-plugin-command",
})
defer plug.Close()
data := []struct {
in driver.ScalarParseInput
out driver.ScalarParseOutput
outErr error
}{
{
in: driver.ScalarParseInput{},
out: driver.ScalarParseOutput{},
},
{
in: driver.ScalarParseInput{},
out: driver.ScalarParseOutput{
Error: &driver.Error{
Message: "",
},
},
outErr: errors.New(""),
},
}
for _, tt := range data {
grpcClientMock.On("ScalarParse", tt.in).Return(tt.out, tt.outErr).Once()
out := plug.ScalarParse(tt.in)
assert.Equal(t, tt.out, out)
}
grpcClientMock.AssertNumberOfCalls(t, "ScalarParse", len(data))
}
func TestPluginScalarSerialize(t *testing.T) {
grpcClientMock, teardown := setupPluginDriverTests(t)
defer teardown(t)
plug := plugin.NewPlugin(plugin.Config{
Cmd: "fake-plugin-command",
})
defer plug.Close()
data := []struct {
in driver.ScalarSerializeInput
out driver.ScalarSerializeOutput
outErr error
}{
{
in: driver.ScalarSerializeInput{},
out: driver.ScalarSerializeOutput{},
},
{
in: driver.ScalarSerializeInput{},
out: driver.ScalarSerializeOutput{
Error: &driver.Error{
Message: "",
},
},
outErr: errors.New(""),
},
}
for _, tt := range data {
grpcClientMock.On("ScalarSerialize", tt.in).Return(tt.out, tt.outErr).Once()
out := plug.ScalarSerialize(tt.in)
assert.Equal(t, tt.out, out)
}
grpcClientMock.AssertNumberOfCalls(t, "ScalarSerialize", len(data))
}
func TestPluginUnionResolveType(t *testing.T) {
grpcClientMock, teardown := setupPluginDriverTests(t)
defer teardown(t)
plug := plugin.NewPlugin(plugin.Config{
Cmd: "fake-plugin-command",
})
defer plug.Close()
data := []struct {
in driver.UnionResolveTypeInput
out driver.UnionResolveTypeOutput
outErr error
}{
{
in: driver.UnionResolveTypeInput{},
out: driver.UnionResolveTypeOutput{},
},
{
in: driver.UnionResolveTypeInput{},
out: driver.UnionResolveTypeOutput{
Error: &driver.Error{
Message: "",
},
},
outErr: errors.New(""),
},
}
for _, tt := range data {
grpcClientMock.On("UnionResolveType", tt.in).Return(tt.out, tt.outErr).Once()
out := plug.UnionResolveType(tt.in)
assert.Equal(t, tt.out, out)
}
grpcClientMock.AssertNumberOfCalls(t, "UnionResolveType", len(data))
}
func TestPluginSecrets(t *testing.T) {
grpcClientMock, teardown := setupPluginDriverTests(t)
defer teardown(t)
grpcClientMock.On("FieldResolve", driver.FieldResolveInput{}).Return(
driver.FieldResolveOutput{},
nil,
)
plug := plugin.NewPlugin(plugin.Config{
Cmd: "fake-plugin-command",
})
defer plug.Close()
out := plug.SetSecrets(driver.SetSecretsInput{
Secrets: driver.Secrets{
"SECRET_VAR": "value",
},
})
assert.Nil(t, out.Error)
plug.FieldResolve(driver.FieldResolveInput{})
out = plug.SetSecrets(driver.SetSecretsInput{
Secrets: driver.Secrets{
"SECRET_VAR": "value",
},
})
assert.NotNil(t, out.Error)
}
func TestPluginStream(t *testing.T) {
grpcClientMock, teardown := setupPluginDriverTests(t)
defer teardown(t)
plug := plugin.NewPlugin(plugin.Config{
Cmd: "fake-plugin-command",
})
defer plug.Close()
data := []struct {
in driver.StreamInput
out driver.StreamOutput
outErr error
}{
{
in: driver.StreamInput{},
out: driver.StreamOutput{},
},
{
in: driver.StreamInput{},
out: driver.StreamOutput{
Error: &driver.Error{
Message: "",
},
},
outErr: errors.New(""),
},
}
for _, tt := range data {
grpcClientMock.On("Stream", tt.in).Return(tt.out, tt.outErr).Once()
out := plug.Stream(tt.in)
assert.Equal(t, tt.out, out)
}
grpcClientMock.AssertNumberOfCalls(t, "Stream", len(data))
}
type execCommandContextMock struct {
mock.Mock
}
func (m *execCommandContextMock) CommandContext(ctx context.Context, command string, args ...string) *exec.Cmd {
i := []interface{}{ctx, command}
for _, arg := range args {
i = append(i, arg)
}
called := m.Called(i...)
f := called.Get(0).(func(ctx context.Context, command string, args ...string) *exec.Cmd)
return f(ctx, command, args...)
}
func fakeBadExecCommandContext(ctx context.Context, command string, args ...string) *exec.Cmd {
cs := []string{"-test.run=TestLoadDriverPluginsCallsConfigHelperBad", "--", command}
cs = append(cs, args...)
cmd := exec.CommandContext(ctx, os.Args[0], cs...)
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
return cmd
}
func fakeExecCommandContext(ctx context.Context, command string, args ...string) *exec.Cmd {
cs := []string{"-test.run=TestLoadDriverPluginsCallsConfigHelper", "--", command}
cs = append(cs, args...)
cmd := exec.CommandContext(ctx, os.Args[0], cs...)
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
return cmd
}
func TestLoadDriverPluginsCallsConfigHelper(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
if err := json.NewEncoder(os.Stdout).Encode([]driver.Config{
{
Provider: "fake",
Runtime: "fake",
},
}); err != nil {
os.Exit(1)
}
os.Exit(0)
}
func TestLoadDriverPluginsCallsConfigHelperBad(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
os.Exit(1)
}
| [
"\"GO_WANT_HELPER_PROCESS\"",
"\"GO_WANT_HELPER_PROCESS\""
] | [] | [
"GO_WANT_HELPER_PROCESS"
] | [] | ["GO_WANT_HELPER_PROCESS"] | go | 1 | 0 | |
src/synapse_downloader/synapse_downloader_old.py | import os
import getpass
import synapseclient as syn
import logging
from datetime import datetime
class SynapseDownloaderOld:
def __init__(self, starting_entity_id, download_path, username=None, password=None):
self._synapse_client = None
self._starting_entity_id = starting_entity_id
self._username = username
self._password = password
self.start_time = None
self.end_time = None
var_path = os.path.expandvars(download_path)
expanded_path = os.path.expanduser(var_path)
self._download_path = expanded_path
self.ensure_dirs(self._download_path)
def synapse_login(self):
print('Logging into Synapse...')
self._username = self._username or os.getenv('SYNAPSE_USERNAME')
self._password = self._password or os.getenv('SYNAPSE_PASSWORD')
if not self._username:
self._username = input('Synapse username: ')
if not self._password:
self._password = getpass.getpass(prompt='Synapse password: ')
try:
self._synapse_client = syn.Synapse(skip_checks=True)
self._synapse_client.login(self._username, self._password, silent=True)
except Exception as ex:
self._synapse_client = None
print('Synapse login failed: {0}'.format(str(ex)))
return self._synapse_client is not None
def ensure_dirs(self, local_path):
if not os.path.isdir(local_path):
os.makedirs(local_path)
def execute(self):
self.synapse_login()
parent = self._synapse_client.get(self._starting_entity_id, downloadFile=False)
if type(parent) not in [syn.Project, syn.Folder]:
raise Exception('Starting entity must be a Project or Folder.')
print('Starting entity: {0} ({1})'.format(parent.name, parent.id))
print('Downloading to: {0}'.format(self._download_path))
print('')
self.start_time = datetime.now()
self.download_children(parent, self._download_path)
self.end_time = datetime.now()
print('')
print('Run time: {0}'.format(self.end_time - self.start_time))
def download_children(self, parent, local_path):
try:
children = self._synapse_client.getChildren(parent, includeTypes=["folder", "file"])
for child in children:
child_id = child.get('id')
child_name = child.get('name')
if child.get('type') == 'org.sagebionetworks.repo.model.Folder':
self.download_folder(child_id, child_name, local_path)
else:
self.download_file(child_id, child_name, local_path)
except Exception as ex:
logging.exception(ex)
def download_folder(self, syn_id, name, local_path):
try:
full_path = os.path.join(local_path, name)
print('Folder: {0} -> {1}'.format(syn_id, full_path))
self.ensure_dirs(full_path)
self.download_children(syn_id, full_path)
except Exception as ex:
logging.exception(ex)
def download_file(self, syn_id, name, local_path):
try:
full_path = os.path.join(local_path, name)
print('File : {0} -> {1}'.format(syn_id, full_path))
self._synapse_client.get(syn_id,
downloadFile=True,
downloadLocation=local_path,
ifcollision='overwrite.local')
except Exception as ex:
logging.exception(ex)
| [] | [] | [
"SYNAPSE_PASSWORD",
"SYNAPSE_USERNAME"
] | [] | ["SYNAPSE_PASSWORD", "SYNAPSE_USERNAME"] | python | 2 | 0 | |
mtslogger/main.py | import logging
import os
import sys
from colorama import Back, Fore, Style
from datetime import datetime
class Logger(logging.Logger):
"""Encapsulate the logging methods.
This is the standard logger for use with this package. It is in its basic
stage, but it is functional. The basic usage is
```
import mts_logger
logger = mts_logger.Logger('info')
logger.warning('What is going on here?')
```
"""
# these are the variables that are better off left alone
message_colors = {
'error': Fore.RED,
'warning': Fore.LIGHTYELLOW_EX,
'info': Fore.LIGHTCYAN_EX,
'debug': Back.LIGHTYELLOW_EX + Fore.BLACK
}
modes = ['error', 'warning', 'info', 'debug']
outputs = {
'error': sys.stderr,
'file': 'file',
'out': sys.stdout
}
def __init__(self, name: str,
mode: str = 'warning', output: str = 'out', log_file: str = '', use_error: bool = True):
"""Initialize the object with the desired mode.
Mode can be any of the values as seen in the `modes` variable.
Parameters
----------
name : str
mode : str
The default value is warning so that error and warning are displayed.
log_file : str
The file path and name to use for writing.
output : str
The output method (as indexed in outputs).
use_error : bool
Whether the messages should go to stderr or stdout when out is the output value.
"""
super().__init__(name)
if mode in self.modes:
self.mode = mode
else:
self.mode = 'warning'
if log_file != '':
self.log_file = log_file
self.output = output
self.use_error = use_error
def log(self, message: str, *args, **kwargs):
"""Log a message with the current log level.
This method can be used to log an entry no matter the mode because it
logs using the current mode.
Parameters
----------
message : str
The message to log.
"""
switcher = {
'debug': self.debug,
'error': self.error,
'info': self.info,
'warning': self.warning
}
function_to_call = switcher.get(self.mode, self.info)
# noinspection PyArgumentList
function_to_call(message)
def debug(self, message: str, *args, **kwargs):
"""Log the message at the debug log level.
Parameters
----------
message : str
The message to log.
"""
self.write('debug', message)
def error(self, message: str, *args, **kwargs):
"""Log the message at the error log level.
Parameters
----------
message : str
The message to log.
"""
self.write('error', message)
def info(self, message: str, *args, **kwargs):
"""Log the message at the info log level.
Parameters
----------
message : str
The message to log.
"""
self.write('info', message)
def warning(self, message: str, *args, **kwargs):
"""Log the message at the warning log level.
Parameters
----------
message : str
The message to log.
"""
self.write('warning', message)
def write(self, level: str, message: str):
"""Write the actual message using the given log level.
This checks the position of the log level and the current mode and
writes the log if and only if the log level is less than or equal to the
current mode level. From there, it determines if the error should be
written to stdout or stderr. After that, it checks for a file and writes
the message to the desired output.
Parameters
----------
level : str
The log level for writing. This determines the color and might
determine the output.
message : str
The message to log.
"""
mode_position = self.modes.index(self.mode)
log_position = self.modes.index(level)
if log_position <= mode_position:
if level == 'error' and self.output == 'out' and self.use_error:
# switch to stderr when output is out and use error is true
output = self.outputs['error']
else:
if self.output not in self.outputs:
# in case output is incorrect, set to the default
self.output = 'out'
output = self.outputs[self.output]
date = datetime.now().isoformat(' ', 'seconds')
if output == 'file':
with open(self.log_file, 'a') as fileObject:
prefix = f'{date} {level}: '
print(prefix + message, file=fileObject)
else:
print(date + ' ' + self.message_colors[level] + message + Style.RESET_ALL, file=output)
def get_logger(
name,
mode: str = None, output: str = None, log_file: str = None, use_error: bool = None) -> Logger:
"""Gets the logger with the specified arguments.
Parameters
----------
name : str
mode : str
output : str
log_file : str
use_error : bool
Returns
-------
Logger
"""
# use the environment variables or default values if the arguments are None
if mode is None:
mode = os.environ.get('log_level') if os.environ.get('log_level') is not None else 'warning'
if output is None:
output = os.environ.get('log_output') if os.environ.get('log_output') is not None else 'out'
if log_file is None:
if os.environ.get('log_file') is not None:
log_file = os.environ.get('log_file')
if use_error is None:
use_error = os.environ.get('log_user_error') if os.environ.get('log_user_error') is not None else True
return Logger(name, mode, output, log_file, use_error)
| [] | [] | [
"log_user_error",
"log_file",
"log_output",
"log_level"
] | [] | ["log_user_error", "log_file", "log_output", "log_level"] | python | 4 | 0 | |
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/KubernetesResourceManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.kubernetes;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.GlobalConfiguration;
import org.apache.flink.kubernetes.configuration.KubernetesConfigOptions;
import org.apache.flink.kubernetes.kubeclient.FlinkKubeClient;
import org.apache.flink.kubernetes.kubeclient.KubeClientFactory;
import org.apache.flink.kubernetes.kubeclient.TaskManagerPodParameter;
import org.apache.flink.kubernetes.kubeclient.resources.KubernetesPod;
import org.apache.flink.kubernetes.taskmanager.KubernetesTaskExecutorRunner;
import org.apache.flink.kubernetes.utils.Constants;
import org.apache.flink.kubernetes.utils.KubernetesUtils;
import org.apache.flink.runtime.clusterframework.ApplicationStatus;
import org.apache.flink.runtime.clusterframework.BootstrapTools;
import org.apache.flink.runtime.clusterframework.ContaineredTaskManagerParameters;
import org.apache.flink.runtime.clusterframework.types.ResourceID;
import org.apache.flink.runtime.clusterframework.types.ResourceProfile;
import org.apache.flink.runtime.entrypoint.ClusterInformation;
import org.apache.flink.runtime.entrypoint.parser.CommandLineOptions;
import org.apache.flink.runtime.heartbeat.HeartbeatServices;
import org.apache.flink.runtime.highavailability.HighAvailabilityServices;
import org.apache.flink.runtime.metrics.groups.ResourceManagerMetricGroup;
import org.apache.flink.runtime.resourcemanager.ActiveResourceManager;
import org.apache.flink.runtime.resourcemanager.JobLeaderIdService;
import org.apache.flink.runtime.resourcemanager.ResourceManager;
import org.apache.flink.runtime.resourcemanager.exceptions.ResourceManagerException;
import org.apache.flink.runtime.resourcemanager.slotmanager.SlotManager;
import org.apache.flink.runtime.rpc.FatalErrorHandler;
import org.apache.flink.runtime.rpc.RpcService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
import java.io.File;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
/**
* Kubernetes specific implementation of the {@link ResourceManager}.
*/
public class KubernetesResourceManager extends ActiveResourceManager<KubernetesWorkerNode>
implements FlinkKubeClient.PodCallbackHandler {
private static final Logger LOG = LoggerFactory.getLogger(KubernetesResourceManager.class);
/** The taskmanager pod name pattern is {clusterId}-{taskmanager}-{attemptId}-{podIndex}. */
private static final String TASK_MANAGER_POD_FORMAT = "%s-taskmanager-%d-%d";
private final Map<ResourceID, KubernetesWorkerNode> workerNodes = new HashMap<>();
private final double defaultCpus;
/** When ResourceManager failover, the max attempt should recover. */
private long currentMaxAttemptId = 0;
/** Current max pod index. When creating a new pod, it should increase one. */
private long currentMaxPodId = 0;
private final String clusterId;
private final FlinkKubeClient kubeClient;
private final ContaineredTaskManagerParameters taskManagerParameters;
private final List<String> taskManagerStartCommand;
/** The number of pods requested, but not yet granted. */
private int numPendingPodRequests = 0;
public KubernetesResourceManager(
RpcService rpcService,
String resourceManagerEndpointId,
ResourceID resourceId,
Configuration flinkConfig,
HighAvailabilityServices highAvailabilityServices,
HeartbeatServices heartbeatServices,
SlotManager slotManager,
JobLeaderIdService jobLeaderIdService,
ClusterInformation clusterInformation,
FatalErrorHandler fatalErrorHandler,
ResourceManagerMetricGroup resourceManagerMetricGroup) {
super(
flinkConfig,
System.getenv(),
rpcService,
resourceManagerEndpointId,
resourceId,
highAvailabilityServices,
heartbeatServices,
slotManager,
jobLeaderIdService,
clusterInformation,
fatalErrorHandler,
resourceManagerMetricGroup);
this.clusterId = flinkConfig.getString(KubernetesConfigOptions.CLUSTER_ID);
this.defaultCpus = taskExecutorResourceSpec.getCpuCores().getValue().doubleValue();
this.kubeClient = createFlinkKubeClient();
this.taskManagerParameters =
ContaineredTaskManagerParameters.create(flinkConfig, taskExecutorResourceSpec, numSlotsPerTaskManager);
this.taskManagerStartCommand = getTaskManagerStartCommand();
}
@Override
protected Configuration loadClientConfiguration() {
return GlobalConfiguration.loadConfiguration();
}
@Override
protected void initialize() throws ResourceManagerException {
recoverWorkerNodesFromPreviousAttempts();
kubeClient.watchPodsAndDoCallback(getTaskManagerLabels(), this);
}
@Override
public CompletableFuture<Void> onStop() {
// shut down all components
Throwable exception = null;
try {
kubeClient.close();
} catch (Throwable t) {
exception = t;
}
return getStopTerminationFutureOrCompletedExceptionally(exception);
}
@Override
protected void internalDeregisterApplication(ApplicationStatus finalStatus, @Nullable String diagnostics) {
LOG.info(
"Stopping kubernetes cluster, clusterId: {}, diagnostics: {}",
clusterId,
diagnostics == null ? "" : diagnostics);
kubeClient.stopAndCleanupCluster(clusterId);
}
@Override
public Collection<ResourceProfile> startNewWorker(ResourceProfile resourceProfile) {
LOG.info("Starting new worker with resource profile, {}", resourceProfile);
if (!resourceProfilesPerWorker.iterator().next().isMatching(resourceProfile)) {
return Collections.emptyList();
}
requestKubernetesPod();
return resourceProfilesPerWorker;
}
@Override
protected KubernetesWorkerNode workerStarted(ResourceID resourceID) {
return workerNodes.get(resourceID);
}
@Override
public boolean stopWorker(final KubernetesWorkerNode worker) {
LOG.info("Stopping Worker {}.", worker.getResourceID());
workerNodes.remove(worker.getResourceID());
try {
kubeClient.stopPod(worker.getResourceID().toString());
} catch (Exception e) {
kubeClient.handleException(e);
return false;
}
return true;
}
@Override
public void onAdded(List<KubernetesPod> pods) {
runAsync(() -> {
for (KubernetesPod pod : pods) {
if (numPendingPodRequests > 0) {
numPendingPodRequests--;
final KubernetesWorkerNode worker = new KubernetesWorkerNode(new ResourceID(pod.getName()));
workerNodes.putIfAbsent(worker.getResourceID(), worker);
}
log.info("Received new TaskManager pod: {} - Remaining pending pod requests: {}",
pod.getName(), numPendingPodRequests);
}
});
}
@Override
public void onModified(List<KubernetesPod> pods) {
runAsync(() -> pods.forEach(this::removePodIfTerminated));
}
@Override
public void onDeleted(List<KubernetesPod> pods) {
runAsync(() -> pods.forEach(this::removePodIfTerminated));
}
@Override
public void onError(List<KubernetesPod> pods) {
runAsync(() -> pods.forEach(this::removePodIfTerminated));
}
@VisibleForTesting
Map<ResourceID, KubernetesWorkerNode> getWorkerNodes() {
return workerNodes;
}
private void recoverWorkerNodesFromPreviousAttempts() throws ResourceManagerException {
final List<KubernetesPod> podList = kubeClient.getPodsWithLabels(getTaskManagerLabels());
for (KubernetesPod pod : podList) {
final KubernetesWorkerNode worker = new KubernetesWorkerNode(new ResourceID(pod.getName()));
workerNodes.put(worker.getResourceID(), worker);
final long attempt = worker.getAttempt();
if (attempt > currentMaxAttemptId) {
currentMaxAttemptId = attempt;
}
}
log.info("Recovered {} pods from previous attempts, current attempt id is {}.",
workerNodes.size(),
++currentMaxAttemptId);
}
private void requestKubernetesPod() {
numPendingPodRequests++;
log.info("Requesting new TaskManager pod with <{},{}>. Number pending requests {}.",
defaultMemoryMB,
defaultCpus,
numPendingPodRequests);
final String podName = String.format(
TASK_MANAGER_POD_FORMAT,
clusterId,
currentMaxAttemptId,
++currentMaxPodId);
final HashMap<String, String> env = new HashMap<>();
env.put(Constants.ENV_FLINK_POD_NAME, podName);
env.putAll(taskManagerParameters.taskManagerEnv());
final TaskManagerPodParameter parameter = new TaskManagerPodParameter(
podName,
taskManagerStartCommand,
defaultMemoryMB,
defaultCpus,
env);
log.info("TaskManager {} will be started with {}.", podName, taskExecutorResourceSpec);
kubeClient.createTaskManagerPod(parameter);
}
/**
* Request new pod if pending pods cannot satisfy pending slot requests.
*/
private void requestKubernetesPodIfRequired() {
final int requiredTaskManagerSlots = getNumberRequiredTaskManagerSlots();
final int pendingTaskManagerSlots = numPendingPodRequests * numSlotsPerTaskManager;
if (requiredTaskManagerSlots > pendingTaskManagerSlots) {
requestKubernetesPod();
}
}
private void removePodIfTerminated(KubernetesPod pod) {
if (pod.isTerminated()) {
kubeClient.stopPod(pod.getName());
final KubernetesWorkerNode kubernetesWorkerNode = workerNodes.remove(new ResourceID(pod.getName()));
if (kubernetesWorkerNode != null) {
requestKubernetesPodIfRequired();
}
}
}
private List<String> getTaskManagerStartCommand() {
final String confDir = flinkConfig.getString(KubernetesConfigOptions.FLINK_CONF_DIR);
final boolean hasLogback = new File(confDir, Constants.CONFIG_FILE_LOGBACK_NAME).exists();
final boolean hasLog4j = new File(confDir, Constants.CONFIG_FILE_LOG4J_NAME).exists();
final String logDir = flinkConfig.getString(KubernetesConfigOptions.FLINK_LOG_DIR);
final String mainClassArgs = "--" + CommandLineOptions.CONFIG_DIR_OPTION.getLongOpt() + " " +
flinkConfig.getString(KubernetesConfigOptions.FLINK_CONF_DIR) + " " +
BootstrapTools.getDynamicProperties(flinkClientConfig, flinkConfig);
final String command = KubernetesUtils.getTaskManagerStartCommand(
flinkConfig,
taskManagerParameters,
confDir,
logDir,
hasLogback,
hasLog4j,
KubernetesTaskExecutorRunner.class.getCanonicalName(),
mainClassArgs);
return Arrays.asList("/bin/bash", "-c", command);
}
/**
* Get task manager labels for the current Flink cluster. They could be used to watch the pods status.
*
* @return Task manager labels.
*/
private Map<String, String> getTaskManagerLabels() {
final Map<String, String> labels = new HashMap<>();
labels.put(Constants.LABEL_TYPE_KEY, Constants.LABEL_TYPE_NATIVE_TYPE);
labels.put(Constants.LABEL_APP_KEY, clusterId);
labels.put(Constants.LABEL_COMPONENT_KEY, Constants.LABEL_COMPONENT_TASK_MANAGER);
return labels;
}
protected FlinkKubeClient createFlinkKubeClient() {
return KubeClientFactory.fromConfiguration(flinkConfig);
}
@Override
protected double getCpuCores(Configuration configuration) {
return flinkConfig.getDouble(KubernetesConfigOptions.TASK_MANAGER_CPU, numSlotsPerTaskManager);
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
CartPole-basic.py | # OpenGym CartPole-v0
# -------------------
#
# This code demonstrates use of a basic Q-network (without target network)
# to solve OpenGym CartPole-v0 problem.
#
# Made as part of blog series Let's make a DQN, available at:
# https://jaromiru.com/2016/10/03/lets-make-a-dqn-implementation/
#
# author: Jaromir Janisch, 2016
#--- enable this to run on GPU
# import os
# os.environ['THEANO_FLAGS'] = "device=gpu,floatX=float32"
import random, numpy, math, gym
#-------------------- BRAIN ---------------------------
from keras.models import Sequential
from keras.layers import *
from keras.optimizers import *
class Brain:
def __init__(self, stateCnt, actionCnt):
self.stateCnt = stateCnt # number of states.
self.actionCnt = actionCnt # number of actions.
self.model = self._createModel()
# self.model.load_weights("cartpole-basic.h5")
def _createModel(self):
model = Sequential()
model.add(Dense(output_dim=64, activation='relu', input_dim=stateCnt))
model.add(Dense(output_dim=actionCnt, activation='linear'))
opt = RMSprop(lr=0.00025)
model.compile(loss='mse', optimizer=opt)
return model
def train(self, x, y, epoch=1, verbose=0):
self.model.fit(x, y, batch_size=64, nb_epoch=epoch, verbose=verbose)
def predict(self, s):
return self.model.predict(s) #s is the state vector of the RL model.
def predictOne(self, s):
return self.predict(s.reshape(1, self.stateCnt)).flatten() # predict using a one-dimensional state vector
#-------------------- MEMORY --------------------------
class Memory: # stored as ( s, a, r, s_ )
samples = [] # create samples for replay memory.
def __init__(self, capacity):
self.capacity = capacity
def add(self, sample):
self.samples.append(sample)
if len(self.samples) > self.capacity:
self.samples.pop(0) # release the first data observation when running out of memory.
def sample(self, n):
n = min(n, len(self.samples))
return random.sample(self.samples, n) # randomly sample n observations from samples.
#-------------------- AGENT ---------------------------
MEMORY_CAPACITY = 100000
BATCH_SIZE = 64
GAMMA = 0.99
MAX_EPSILON = 1
MIN_EPSILON = 0.01
LAMBDA = 0.001 # speed of decay
class Agent:
steps = 0
epsilon = MAX_EPSILON
def __init__(self, stateCnt, actionCnt):
self.stateCnt = stateCnt
self.actionCnt = actionCnt
self.brain = Brain(stateCnt, actionCnt)
self.memory = Memory(MEMORY_CAPACITY)
def act(self, s):
if random.random() < self.epsilon:
return random.randint(0, self.actionCnt-1)
else:
return numpy.argmax(self.brain.predictOne(s))
def observe(self, sample): # in (s, a, r, s_) format
self.memory.add(sample)
# slowly decrease Epsilon based on our eperience
self.steps += 1
self.epsilon = MIN_EPSILON + (MAX_EPSILON - MIN_EPSILON) * math.exp(-LAMBDA * self.steps)
def replay(self):
batch = self.memory.sample(BATCH_SIZE)
batchLen = len(batch)
no_state = numpy.zeros(self.stateCnt)
states = numpy.array([ o[0] for o in batch ])
states_ = numpy.array([ (no_state if o[3] is None else o[3]) for o in batch ])
p = self.brain.predict(states)
p_ = self.brain.predict(states_)
x = numpy.zeros((batchLen, self.stateCnt))
y = numpy.zeros((batchLen, self.actionCnt))
for i in range(batchLen):
o = batch[i]
s = o[0]; a = o[1]; r = o[2]; s_ = o[3]
t = p[i]
if s_ is None:
t[a] = r
else:
t[a] = r + GAMMA * numpy.amax(p_[i])
x[i] = s
y[i] = t
self.brain.train(x, y)
#-------------------- ENVIRONMENT ---------------------
class Environment:
def __init__(self, problem):
self.problem = problem
self.env = gym.make(problem)
def run(self, agent):
s = self.env.reset()
R = 0
while True:
self.env.render()
a = agent.act(s)
s_, r, done, info = self.env.step(a)
if done: # terminal state
s_ = None
agent.observe( (s, a, r, s_) )
agent.replay()
s = s_
R += r
if done:
break
print("Total reward:", R)
#-------------------- MAIN ----------------------------
PROBLEM = 'CartPole-v0'
env = Environment(PROBLEM)
stateCnt = env.env.observation_space.shape[0]
actionCnt = env.env.action_space.n
agent = Agent(stateCnt, actionCnt)
try:
while True:
env.run(agent)
finally:
agent.brain.model.save("cartpole-basic.h5")
| [] | [] | [
"THEANO_FLAGS"
] | [] | ["THEANO_FLAGS"] | python | 1 | 0 | |
citrixadc/resource_citrixadc_interface.go | package citrixadc
import (
"github.com/chiradeep/go-nitro/config/network"
"github.com/chiradeep/go-nitro/netscaler"
"github.com/hashicorp/terraform/helper/schema"
"fmt"
"log"
)
func resourceCitrixAdcInterface() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
Create: createInterfaceFunc,
Read: readInterfaceFunc,
Update: updateInterfaceFunc,
Delete: deleteInterfaceFunc,
Schema: map[string]*schema.Schema{
"autoneg": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"bandwidthhigh": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"bandwidthnormal": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"duplex": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"flowctl": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"haheartbeat": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"hamonitor": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"interface_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"ifalias": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"lacpkey": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"lacpmode": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"lacppriority": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"lacptimeout": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"lagtype": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"linkredundancy": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"lldpmode": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"lrsetpriority": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"mtu": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"ringsize": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"ringtype": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"speed": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"tagall": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"throughput": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"trunk": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"trunkmode": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
},
}
}
func createInterfaceFunc(d *schema.ResourceData, meta interface{}) error {
log.Printf("[DEBUG] citrixadc-provider: In createInterfaceFunc")
client := meta.(*NetScalerNitroClient).client
interfaceId := d.Get("interface_id").(string)
Interface := network.Interface{
Autoneg: d.Get("autoneg").(string),
Bandwidthhigh: d.Get("bandwidthhigh").(int),
Bandwidthnormal: d.Get("bandwidthnormal").(int),
Duplex: d.Get("duplex").(string),
Flowctl: d.Get("flowctl").(string),
Haheartbeat: d.Get("haheartbeat").(string),
Hamonitor: d.Get("hamonitor").(string),
Id: d.Get("interface_id").(string),
Ifalias: d.Get("ifalias").(string),
Lacpkey: d.Get("lacpkey").(int),
Lacpmode: d.Get("lacpmode").(string),
Lacppriority: d.Get("lacppriority").(int),
Lacptimeout: d.Get("lacptimeout").(string),
Lagtype: d.Get("lagtype").(string),
Linkredundancy: d.Get("linkredundancy").(string),
Lldpmode: d.Get("lldpmode").(string),
Lrsetpriority: d.Get("lrsetpriority").(int),
Mtu: d.Get("mtu").(int),
Ringsize: d.Get("ringsize").(int),
Ringtype: d.Get("ringtype").(string),
Speed: d.Get("speed").(string),
Tagall: d.Get("tagall").(string),
Throughput: d.Get("throughput").(int),
Trunk: d.Get("trunk").(string),
Trunkmode: d.Get("trunkmode").(string),
}
_, err := client.UpdateResource(netscaler.Interface.Type(), "", &Interface)
if err != nil {
return fmt.Errorf("Error creating Interface %s. %s", interfaceId, err.Error())
}
d.SetId(interfaceId)
err = readInterfaceFunc(d, meta)
if err != nil {
log.Printf("[ERROR] netscaler-provider: ?? we just created this Interface but we can't read it ?? %s", interfaceId)
return nil
}
return nil
}
func readInterfaceFunc(d *schema.ResourceData, meta interface{}) error {
log.Printf("[DEBUG] citrixadc-provider: In readInterfaceFunc")
client := meta.(*NetScalerNitroClient).client
interfaceId := d.Id()
log.Printf("[DEBUG] citrixadc-provider: Reading Interface state %s", interfaceId)
array, _ := client.FindAllResources(netscaler.Interface.Type())
// Iterate over the retrieved addresses to find the particular interface id
foundInterface := false
foundIndex := -1
for i, item := range array {
if item["id"] == interfaceId {
foundInterface = true
foundIndex = i
break
}
}
if !foundInterface {
log.Printf("[WARN] citrixadc-provider: Clearing interface state %s", interfaceId)
d.SetId("")
return fmt.Errorf("Could not read interface %v", interfaceId)
}
// Fallthrough
data := array[foundIndex]
d.Set("autoneg", data["autoneg"])
d.Set("bandwidthhigh", data["bandwidthhigh"])
d.Set("bandwidthnormal", data["bandwidthnormal"])
d.Set("duplex", data["actduplex"])
d.Set("flowctl", data["actflowctl"])
d.Set("haheartbeat", data["haheartbeat"])
d.Set("hamonitor", data["hamonitor"])
d.Set("interface_id", interfaceId)
d.Set("ifalias", data["ifalias"])
d.Set("lacpkey", data["lacpkey"])
d.Set("lacpmode", data["lacpmode"])
d.Set("lacppriority", data["lacppriority"])
d.Set("lacptimeout", data["lacptimeout"])
d.Set("lagtype", data["lagtype"])
d.Set("linkredundancy", data["linkredundancy"])
d.Set("lldpmode", data["lldpmode"])
d.Set("lrsetpriority", data["lrsetpriority"])
d.Set("mtu", data["mtu"])
d.Set("ringsize", data["ringsize"])
d.Set("ringtype", data["ringtype"])
d.Set("speed", data["actspeed"])
d.Set("tagall", data["tagall"])
d.Set("throughput", data["actthroughput"])
d.Set("trunk", data["trunk"])
d.Set("trunkmode", data["trunkmode"])
return nil
}
func updateInterfaceFunc(d *schema.ResourceData, meta interface{}) error {
log.Printf("[DEBUG] citrixadc-provider: In updateInterfaceFunc")
client := meta.(*NetScalerNitroClient).client
interfaceId := d.Get("interface_id").(string)
Interface := network.Interface{
Id: d.Get("interface_id").(string),
}
hasChange := false
if d.HasChange("autoneg") {
log.Printf("[DEBUG] citrixadc-provider: Autoneg has changed for Interface %s, starting update", interfaceId)
Interface.Autoneg = d.Get("autoneg").(string)
hasChange = true
}
if d.HasChange("bandwidthhigh") {
log.Printf("[DEBUG] citrixadc-provider: Bandwidthhigh has changed for Interface %s, starting update", interfaceId)
Interface.Bandwidthhigh = d.Get("bandwidthhigh").(int)
hasChange = true
}
if d.HasChange("bandwidthnormal") {
log.Printf("[DEBUG] citrixadc-provider: Bandwidthnormal has changed for Interface %s, starting update", interfaceId)
Interface.Bandwidthnormal = d.Get("bandwidthnormal").(int)
hasChange = true
}
if d.HasChange("duplex") {
log.Printf("[DEBUG] citrixadc-provider: Duplex has changed for Interface %s, starting update", interfaceId)
Interface.Duplex = d.Get("duplex").(string)
hasChange = true
}
if d.HasChange("flowctl") {
log.Printf("[DEBUG] citrixadc-provider: Flowctl has changed for Interface %s, starting update", interfaceId)
Interface.Flowctl = d.Get("flowctl").(string)
hasChange = true
}
if d.HasChange("haheartbeat") {
log.Printf("[DEBUG] citrixadc-provider: Haheartbeat has changed for Interface %s, starting update", interfaceId)
Interface.Haheartbeat = d.Get("haheartbeat").(string)
hasChange = true
}
if d.HasChange("hamonitor") {
log.Printf("[DEBUG] citrixadc-provider: Hamonitor has changed for Interface %s, starting update", interfaceId)
Interface.Hamonitor = d.Get("hamonitor").(string)
hasChange = true
}
if d.HasChange("interface_id") {
log.Printf("[DEBUG] citrixadc-provider: Id has changed for Interface %s, starting update", interfaceId)
Interface.Id = d.Get("interface_id").(string)
hasChange = true
}
if d.HasChange("ifalias") {
log.Printf("[DEBUG] citrixadc-provider: Ifalias has changed for Interface %s, starting update", interfaceId)
Interface.Ifalias = d.Get("ifalias").(string)
hasChange = true
}
if d.HasChange("lacpkey") {
log.Printf("[DEBUG] citrixadc-provider: Lacpkey has changed for Interface %s, starting update", interfaceId)
Interface.Lacpkey = d.Get("lacpkey").(int)
hasChange = true
}
if d.HasChange("lacpmode") {
log.Printf("[DEBUG] citrixadc-provider: Lacpmode has changed for Interface %s, starting update", interfaceId)
Interface.Lacpmode = d.Get("lacpmode").(string)
hasChange = true
}
if d.HasChange("lacppriority") {
log.Printf("[DEBUG] citrixadc-provider: Lacppriority has changed for Interface %s, starting update", interfaceId)
Interface.Lacppriority = d.Get("lacppriority").(int)
hasChange = true
}
if d.HasChange("lacptimeout") {
log.Printf("[DEBUG] citrixadc-provider: Lacptimeout has changed for Interface %s, starting update", interfaceId)
Interface.Lacptimeout = d.Get("lacptimeout").(string)
hasChange = true
}
if d.HasChange("lagtype") {
log.Printf("[DEBUG] citrixadc-provider: Lagtype has changed for Interface %s, starting update", interfaceId)
Interface.Lagtype = d.Get("lagtype").(string)
hasChange = true
}
if d.HasChange("linkredundancy") {
log.Printf("[DEBUG] citrixadc-provider: Linkredundancy has changed for Interface %s, starting update", interfaceId)
Interface.Linkredundancy = d.Get("linkredundancy").(string)
hasChange = true
}
if d.HasChange("lldpmode") {
log.Printf("[DEBUG] citrixadc-provider: Lldpmode has changed for Interface %s, starting update", interfaceId)
Interface.Lldpmode = d.Get("lldpmode").(string)
hasChange = true
}
if d.HasChange("lrsetpriority") {
log.Printf("[DEBUG] citrixadc-provider: Lrsetpriority has changed for Interface %s, starting update", interfaceId)
Interface.Lrsetpriority = d.Get("lrsetpriority").(int)
hasChange = true
}
if d.HasChange("mtu") {
log.Printf("[DEBUG] citrixadc-provider: Mtu has changed for Interface %s, starting update", interfaceId)
Interface.Mtu = d.Get("mtu").(int)
hasChange = true
}
if d.HasChange("ringsize") {
log.Printf("[DEBUG] citrixadc-provider: Ringsize has changed for Interface %s, starting update", interfaceId)
Interface.Ringsize = d.Get("ringsize").(int)
hasChange = true
}
if d.HasChange("ringtype") {
log.Printf("[DEBUG] citrixadc-provider: Ringtype has changed for Interface %s, starting update", interfaceId)
Interface.Ringtype = d.Get("ringtype").(string)
hasChange = true
}
if d.HasChange("speed") {
log.Printf("[DEBUG] citrixadc-provider: Speed has changed for Interface %s, starting update", interfaceId)
Interface.Speed = d.Get("speed").(string)
hasChange = true
}
if d.HasChange("tagall") {
log.Printf("[DEBUG] citrixadc-provider: Tagall has changed for Interface %s, starting update", interfaceId)
Interface.Tagall = d.Get("tagall").(string)
hasChange = true
}
if d.HasChange("throughput") {
log.Printf("[DEBUG] citrixadc-provider: Throughput has changed for Interface %s, starting update", interfaceId)
Interface.Throughput = d.Get("throughput").(int)
hasChange = true
}
if d.HasChange("trunk") {
log.Printf("[DEBUG] citrixadc-provider: Trunk has changed for Interface %s, starting update", interfaceId)
Interface.Trunk = d.Get("trunk").(string)
hasChange = true
}
if d.HasChange("trunkmode") {
log.Printf("[DEBUG] citrixadc-provider: Trunkmode has changed for Interface %s, starting update", interfaceId)
Interface.Trunkmode = d.Get("trunkmode").(string)
hasChange = true
}
if hasChange {
_, err := client.UpdateResource(netscaler.Interface.Type(), "", &Interface)
if err != nil {
return fmt.Errorf("Error updating Interface %s. %s", interfaceId, err.Error())
}
}
return readInterfaceFunc(d, meta)
}
func deleteInterfaceFunc(d *schema.ResourceData, meta interface{}) error {
log.Printf("[DEBUG] citrixadc-provider: In deleteInterfaceFunc")
// We cannot really delete the interface.
// Hardware changes can only delete interfaces
// We just delete it from the local state
d.SetId("")
return nil
}
| [] | [] | [] | [] | [] | go | null | null | null |
code/register/register_model.py | """
Copyright (C) Microsoft Corporation. All rights reserved.
Microsoft Corporation (“Microsoft”) grants you a nonexclusive, perpetual,
royalty-free right to use, copy, and modify the software code provided by us
("Software Code"). You may not sublicense the Software Code or any use of it
(except to your affiliates and to vendors to perform work on your behalf)
through distribution, network access, service agreement, lease, rental, or
otherwise. This license does not purport to express any claim of ownership over
data you may have shared with Microsoft in the creation of the Software Code.
Unless applicable law gives you more rights, Microsoft reserves all other
rights not expressly granted herein, whether by implication, estoppel or
otherwise.
THE SOFTWARE CODE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
MICROSOFT OR ITS LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import os
import sys
import argparse
import traceback
from azureml.core import Run, Experiment, Workspace
from azureml.core.model import Model as AMLModel
def main():
run = Run.get_context()
if (run.id.startswith('OfflineRun')):
from dotenv import load_dotenv
# For local development, set values in this section
load_dotenv()
workspace_name = os.environ.get("WORKSPACE_NAME")
experiment_name = os.environ.get("EXPERIMENT_NAME")
resource_group = os.environ.get("RESOURCE_GROUP")
subscription_id = os.environ.get("SUBSCRIPTION_ID")
build_id = os.environ.get('BUILD_BUILDID')
# run_id useful to query previous runs
run_id = "bd184a18-2ac8-4951-8e78-e290bef3b012"
aml_workspace = Workspace.get(
name=workspace_name,
subscription_id=subscription_id,
resource_group=resource_group
)
ws = aml_workspace
exp = Experiment(ws, experiment_name)
else:
ws = run.experiment.workspace
exp = run.experiment
run_id = 'amlcompute'
parser = argparse.ArgumentParser("register")
parser.add_argument(
"--build_id",
type=str,
help="The Build ID of the build triggering this pipeline run",
)
parser.add_argument(
"--run_id",
type=str,
help="Training run ID",
)
parser.add_argument(
"--model_name",
type=str,
help="Name of the Model",
default="sklearn_regression_model.pkl",
)
args = parser.parse_args()
if (args.build_id is not None):
build_id = args.build_id
if (args.run_id is not None):
run_id = args.run_id
if (run_id == 'amlcompute'):
run_id = run.parent.id
model_name = args.model_name
if (build_id is None):
register_aml_model(model_name, exp, run_id)
else:
run.tag("BuildId", value=build_id)
register_aml_model(model_name, exp, run_id, build_id)
def model_already_registered(model_name, exp, run_id):
model_list = AMLModel.list(exp.workspace, name=model_name, run_id=run_id)
if len(model_list) >= 1:
e = ("Model name:", model_name, "in workspace",
exp.workspace, "with run_id ", run_id, "is already registered.")
print(e)
raise Exception(e)
else:
print("Model is not registered for this run.")
def register_aml_model(model_name, exp, run_id, build_id: str = 'none'):
try:
if (build_id != 'none'):
model_already_registered(model_name, exp, run_id)
run = Run(experiment=exp, run_id=run_id)
tagsValue = {"area": "diabetes", "type": "regression",
"BuildId": build_id, "run_id": run_id,
"experiment_name": exp.name}
else:
run = Run(experiment=exp, run_id=run_id)
if (run is not None):
tagsValue = {"area": "diabetes", "type": "regression",
"run_id": run_id, "experiment_name": exp.name}
else:
print("A model run for experiment", exp.name,
"matching properties run_id =", run_id,
"was not found. Skipping model registration.")
sys.exit(0)
model = run.register_model(model_name=model_name,
model_path="./outputs/" + model_name,
tags=tagsValue)
os.chdir("..")
print(
"Model registered: {} \nModel Description: {} "
"\nModel Version: {}".format(
model.name, model.description, model.version
)
)
except Exception:
traceback.print_exc(limit=None, file=None, chain=True)
print("Model registration failed")
raise
if __name__ == '__main__':
main()
| [] | [] | [
"RESOURCE_GROUP",
"WORKSPACE_NAME",
"EXPERIMENT_NAME",
"SUBSCRIPTION_ID",
"BUILD_BUILDID"
] | [] | ["RESOURCE_GROUP", "WORKSPACE_NAME", "EXPERIMENT_NAME", "SUBSCRIPTION_ID", "BUILD_BUILDID"] | python | 5 | 0 | |
docs/sphinx/make.py | #! python3
import os
import sys
import subprocess
import shutil
from os import path
class SphinxBuild(object):
'''Sphinx Build Script'''
def __init__(self):
'''Class Init'''
self.SOURCEDIR = path.abspath('source')
self.BUILDDIR = path.abspath('build')
self.SPHINXBUILD = 'sphinx-build'
self.DOXYGENDIR = path.abspath('../doxygen')
if 'SOURCEDIR' in os.environ:
self.SOURCEDIR = os.environ.get('SOURCEDIR')
if 'BUILDDIR' in os.environ:
self.BUILDDIR = os.environ.get('BUILDDIR')
if 'SPHINXBUILD' in os.environ:
self.SPHINXBUILD = os.environ.get('SPHINXBUILD')
def check_exe(self):
'''Check to make sure sphinx-build exists on the path'''
if not shutil.which(self.SPHINXBUILD):
print("")
print("The 'sphinx-build' command was not found. Make sure you have Sphinx")
print("installed, then set the SPHINXBUILD environment variable to point")
print("to the full path of the 'sphinx-build' executable. Alternatively you")
print("may add the Sphinx directory to PATH.")
print("")
print("If you don't have Sphinx installed, grab it from http://sphinx-doc.org/")
return False
return True
def usage(self):
'''Print the Usage'''
print('sphinx python build wrapper script (make.py)')
cmdopts = [self.SPHINXBUILD, '-M', 'help', self.SOURCEDIR, self.BUILDDIR]
self.run_cmd(cmdopts, '.')
print("\nAdditional commands include - make.py <target> where <target> is one of")
abs_build_path = path.abspath(self.BUILDDIR)
print(" clean to clean the output directory: " + abs_build_path)
print(" serve Serve the site out on a port for a demo / development")
print(" doxygen to build Doxygen related files")
print(" build does a clean followed by a html build")
print(" publish publish the site to the gh-pages branch")
def run_cmd(self, cmdarray, workingdir, captureout=False):
'''Run a command'''
stdout = stderr = None
if captureout:
stdout = stderr = subprocess.PIPE
proc = subprocess.Popen(cmdarray, cwd=workingdir, stdout=stdout, stderr=stderr, universal_newlines=True)
proc_out, proc_err = proc.communicate()
if proc.returncode != 0:
raise RuntimeError('Failure to run command')
return stdout, stderr
def emptydir(self, top):
'''Empty a directory'''
if(top == '/' or top == "\\"): return
else:
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
os.remove(path.join(root, name))
for name in dirs:
os.rmdir(path.join(root, name))
def clean(self):
'''Clean the build directory'''
abs_build_path = path.abspath(self.BUILDDIR)
self.emptydir(abs_build_path)
print ("Clean finished")
def serve(self):
'''Serve the site out locally for a demo / development'''
self.clean()
htmldir = path.join(self.BUILDDIR, 'html')
cmdopts = ['sphinx-autobuild', '-b', 'html', self.SOURCEDIR, htmldir]
self.run_cmd(cmdopts, '.')
print("Server Closed.")
def run_doxygen(self):
'''Regenerate the xml files exported from doxygen, these are used by sphinx'''
self.run_cmd(['doxygen'], self.DOXYGENDIR)
# Filter the xml for the types of parameters
self.run_cmd(['python', 'filter_types.py'], self.DOXYGENDIR)
def run_sphinxbuild(self, args):
'''Assuming one of the commands here isn't needed, instead pass the arguments to sphinx-build'''
cmdopts = [self.SPHINXBUILD, '-M'] + args + [self.SOURCEDIR, self.BUILDDIR]
self.run_cmd(cmdopts, '.')
def publish(self):
'''Publish / upload files to github pages'''
htmldir = path.join(self.BUILDDIR, 'html')
cmdopts = ['ghp-import', '-p', htmldir]
self.run_cmd(cmdopts, '.')
def main(self):
if not self.check_exe():
return
if len(sys.argv) < 2:
self.usage()
return
if sys.argv[1] == 'clean':
self.clean()
return
if sys.argv[1] == 'serve':
self.serve()
return
if sys.argv[1] == 'doxygen':
self.run_doxygen()
return
if sys.argv[1] == 'build':
self.clean()
self.run_sphinxbuild(['html'])
return
if sys.argv[1] == 'publish':
self.publish()
return
# If no command here is specified then just pass the args to sphinx-build
self.run_sphinxbuild(sys.argv[1:])
if __name__ == "__main__":
SphinxBuild().main()
| [] | [] | [
"SOURCEDIR",
"BUILDDIR",
"SPHINXBUILD"
] | [] | ["SOURCEDIR", "BUILDDIR", "SPHINXBUILD"] | python | 3 | 0 | |
database/database.go | package database
import (
"context"
"fmt"
"os"
"github.com/sthorer/api/ent"
)
type Database struct {
*ent.Client
}
const (
defaultDatabaseDriver = "sqlite3"
defaultDatabaseURL = "db.sqlite?_fk=1"
)
func Initialize() (*Database, error) {
databaseDriver := os.Getenv("STHORER_DB_DRIVER")
if databaseDriver == "" {
databaseDriver = defaultDatabaseDriver
}
databaseURL := os.Getenv("STHORER_DB_URL")
if databaseURL == "" {
databaseURL = defaultDatabaseURL
}
client, err := ent.Open(databaseDriver, databaseURL)
if err != nil {
return nil, fmt.Errorf("failed opening connection to %s: %v", databaseDriver, err)
}
if err := client.Schema.Create(context.Background()); err != nil {
return nil, fmt.Errorf("failed creating schema resources: %v", err)
}
return &Database{Client: client}, err
}
| [
"\"STHORER_DB_DRIVER\"",
"\"STHORER_DB_URL\""
] | [] | [
"STHORER_DB_URL",
"STHORER_DB_DRIVER"
] | [] | ["STHORER_DB_URL", "STHORER_DB_DRIVER"] | go | 2 | 0 | |
examples/edgefunctions/function/hello_world.py | import os
def handler(req, context):
name = os.getenv('NAME') or 'Unknown'
return {
'body': f'Hello, {name}!',
'statusCode': 200
}
| [] | [] | [
"NAME"
] | [] | ["NAME"] | python | 1 | 0 | |
sage/manage.py | ##
# @package manage
# This code to start server and application
# additional argument is find initial to start processing of intruppted
# files
# @author amarjeet kapoor
#
#
#!/usr/bin/env python
import os
import sys,threading
import initial_file
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sage.settings")
from django.core.management import execute_from_command_line
##
# code to fire thread to process intrupted process
if(sys.argv[1]== 'initial'):
#remove our argument so that argv can be send to django
sys.argv.remove('initial')
##
# starting thread
# @args initial_file.pdfemail()
thread = threading.Thread(target=initial_file.pdfemail,args=())
thread.daemon = True
thread.start()
execute_from_command_line(sys.argv)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
cmd/release/release.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Command release builds a Go release.
package main
import (
"archive/tar"
"archive/zip"
"bufio"
"bytes"
"compress/gzip"
"context"
"flag"
"fmt"
gobuild "go/build"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"golang.org/x/build"
"golang.org/x/build/buildenv"
"golang.org/x/build/buildlet"
"golang.org/x/build/dashboard"
)
var (
target = flag.String("target", "", "If specified, build specific target platform ('linux-amd64')")
watch = flag.Bool("watch", false, "Watch the build. Only compatible with -target")
rev = flag.String("rev", "", "Go revision to build")
toolsRev = flag.String("tools", "", "Tools revision to build")
tourRev = flag.String("tour", "master", "Tour revision to include")
blogRev = flag.String("blog", "master", "Blog revision to include")
netRev = flag.String("net", "master", "Net revision to include")
version = flag.String("version", "", "Version string (go1.5.2)")
user = flag.String("user", username(), "coordinator username, appended to 'user-'")
skipTests = flag.Bool("skip_tests", false, "skip tests; run make.bash instead of all.bash (only use if you ran trybots first)")
uploadMode = flag.Bool("upload", false, "Upload files (exclusive to all other flags)")
uploadKick = flag.String("edge_kick_command", "", "Command to run to kick off an edge cache update")
)
var (
coordClient *buildlet.CoordinatorClient
buildEnv *buildenv.Environment
)
func main() {
flag.Parse()
rand.Seed(time.Now().UnixNano())
if *uploadMode {
if err := upload(flag.Args()); err != nil {
log.Fatal(err)
}
return
}
if err := findReleaselet(); err != nil {
log.Fatalf("couldn't find releaselet source: %v", err)
}
if *rev == "" {
log.Fatal("must specify -rev flag")
}
if *toolsRev == "" {
log.Fatal("must specify -tools flag")
}
if *version == "" {
log.Fatal("must specify -version flag")
}
coordClient = coordinatorClient()
buildEnv = buildenv.Production
var wg sync.WaitGroup
matches := 0
for _, b := range builds {
b := b
if *target != "" && b.String() != *target {
continue
}
matches++
b.logf("Start.")
wg.Add(1)
go func() {
defer wg.Done()
if err := b.make(); err != nil {
b.logf("Error: %v", err)
} else {
b.logf("Done.")
}
}()
}
if *target != "" && matches == 0 {
log.Fatalf("no targets matched %q", *target)
}
wg.Wait()
}
var releaselet = "releaselet.go"
func findReleaselet() error {
// First try the working directory.
if _, err := os.Stat(releaselet); err == nil {
return nil
}
// Then, try to locate the release command in the workspace.
const importPath = "golang.org/x/build/cmd/release"
pkg, err := gobuild.Import(importPath, "", gobuild.FindOnly)
if err != nil {
return fmt.Errorf("finding %q: %v", importPath, err)
}
r := filepath.Join(pkg.Dir, releaselet)
if _, err := os.Stat(r); err != nil {
return err
}
releaselet = r
return nil
}
type Build struct {
OS, Arch string
Source bool
Race bool // Build race detector.
Builder string // Key for dashboard.Builders.
Goarm int // GOARM value if set.
MakeOnly bool // don't run tests; needed by cross-compile builders (s390x)
}
func (b *Build) String() string {
if b.Source {
return "src"
}
if b.Goarm != 0 {
return fmt.Sprintf("%v-%vv%vl", b.OS, b.Arch, b.Goarm)
}
return fmt.Sprintf("%v-%v", b.OS, b.Arch)
}
func (b *Build) toolDir() string { return "go/pkg/tool/" + b.OS + "_" + b.Arch }
func (b *Build) logf(format string, args ...interface{}) {
format = fmt.Sprintf("%v: %s", b, format)
log.Printf(format, args...)
}
var builds = []*Build{
{
Source: true,
Builder: "linux-amd64",
},
{
OS: "linux",
Arch: "386",
Builder: "linux-386",
},
{
OS: "linux",
Arch: "arm",
Builder: "linux-arm",
Goarm: 6, // for compatibility with all Raspberry Pi models.
// The tests take too long for the release packaging.
// Much of the time the whole buildlet times out.
MakeOnly: true,
},
{
OS: "linux",
Arch: "amd64",
Race: true,
Builder: "linux-amd64",
},
{
OS: "linux",
Arch: "arm64",
Builder: "linux-arm64-packet",
},
{
OS: "freebsd",
Arch: "386",
Builder: "freebsd-386-11_1",
},
{
OS: "freebsd",
Arch: "amd64",
Race: true,
Builder: "freebsd-amd64-11_1",
},
{
OS: "windows",
Arch: "386",
Builder: "windows-386-2008",
},
{
OS: "windows",
Arch: "amd64",
Race: true,
Builder: "windows-amd64-2008",
},
{
OS: "darwin",
Arch: "amd64",
Race: true,
Builder: "darwin-amd64-10_11",
},
{
OS: "linux",
Arch: "s390x",
MakeOnly: true,
Builder: "linux-s390x-crosscompile",
},
// TODO(bradfitz): switch this ppc64 builder to a Kubernetes
// container cross-compiling ppc64 like the s390x one? For
// now, the ppc64le builders (5) are back, so let's see if we
// can just depend on them not going away.
{
OS: "linux",
Arch: "ppc64le",
MakeOnly: true,
Builder: "linux-ppc64le-buildlet",
},
}
const (
toolsRepo = "golang.org/x/tools"
blogRepo = "golang.org/x/blog"
tourRepo = "golang.org/x/tour"
)
var toolPaths = []string{
"golang.org/x/tools/cmd/godoc",
"golang.org/x/tour/gotour",
}
var preBuildCleanFiles = []string{
".gitattributes",
".github",
".gitignore",
".hgignore",
".hgtags",
"misc/dashboard",
"misc/makerelease",
}
var postBuildCleanFiles = []string{
"VERSION.cache",
"pkg/bootstrap",
}
func (b *Build) buildlet() (*buildlet.Client, error) {
b.logf("Creating buildlet.")
bc, err := coordClient.CreateBuildlet(b.Builder)
if err != nil {
return nil, err
}
bc.SetReleaseMode(true) // disable pargzip; golang.org/issue/19052
return bc, nil
}
func (b *Build) make() error {
bc, ok := dashboard.Builders[b.Builder]
if !ok {
return fmt.Errorf("unknown builder: %v", bc)
}
var hostArch string // non-empty if we're cross-compiling (s390x)
if b.MakeOnly && bc.IsContainer() && (bc.GOARCH() != "amd64" && bc.GOARCH() != "386") {
hostArch = "amd64"
}
client, err := b.buildlet()
if err != nil {
return err
}
defer client.Close()
work, err := client.WorkDir()
if err != nil {
return err
}
// Push source to buildlet
b.logf("Pushing source to buildlet.")
const (
goDir = "go"
goPath = "gopath"
go14 = "go1.4"
)
for _, r := range []struct {
repo, rev string
}{
{"go", *rev},
{"tools", *toolsRev},
{"blog", *blogRev},
{"tour", *tourRev},
{"net", *netRev},
} {
if b.Source && r.repo != "go" {
continue
}
dir := goDir
if r.repo != "go" {
dir = goPath + "/src/golang.org/x/" + r.repo
}
tar := "https://go.googlesource.com/" + r.repo + "/+archive/" + r.rev + ".tar.gz"
if err := client.PutTarFromURL(tar, dir); err != nil {
b.logf("failed to put tarball %q into dir %q: %v", tar, dir, err)
return err
}
}
if u := bc.GoBootstrapURL(buildEnv); u != "" && !b.Source {
b.logf("Installing go1.4.")
if err := client.PutTarFromURL(u, go14); err != nil {
return err
}
}
// Write out version file.
b.logf("Writing VERSION file.")
if err := client.Put(strings.NewReader(*version), "go/VERSION", 0644); err != nil {
return err
}
b.logf("Cleaning goroot (pre-build).")
if err := client.RemoveAll(addPrefix(goDir, preBuildCleanFiles)...); err != nil {
return err
}
if b.Source {
b.logf("Skipping build.")
return b.fetchTarball(client)
}
// Set up build environment.
sep := "/"
if b.OS == "windows" {
sep = "\\"
}
env := append(bc.Env(),
"GOROOT_FINAL="+bc.GorootFinal(),
"GOROOT="+work+sep+goDir,
"GOPATH="+work+sep+goPath,
"GOBIN=",
)
if b.Goarm > 0 {
env = append(env, fmt.Sprintf("GOARM=%d", b.Goarm))
env = append(env, fmt.Sprintf("CGO_CFLAGS=-march=armv%d", b.Goarm))
env = append(env, fmt.Sprintf("CGO_LDFLAGS=-march=armv%d", b.Goarm))
}
// Execute build
b.logf("Building.")
out := new(bytes.Buffer)
script := bc.AllScript()
scriptArgs := bc.AllScriptArgs()
if *skipTests || b.MakeOnly {
script = bc.MakeScript()
scriptArgs = bc.MakeScriptArgs()
}
all := filepath.Join(goDir, script)
var execOut io.Writer = out
if *watch && *target != "" {
execOut = io.MultiWriter(out, os.Stdout)
}
remoteErr, err := client.Exec(all, buildlet.ExecOpts{
Output: execOut,
ExtraEnv: env,
Args: scriptArgs,
})
if err != nil {
return err
}
if remoteErr != nil {
return fmt.Errorf("Build failed: %v\nOutput:\n%v", remoteErr, out)
}
goCmd := path.Join(goDir, "bin/go")
if b.OS == "windows" {
goCmd += ".exe"
}
runGo := func(args ...string) error {
out := new(bytes.Buffer)
var execOut io.Writer = out
if *watch && *target != "" {
execOut = io.MultiWriter(out, os.Stdout)
}
cmdEnv := append([]string(nil), env...)
if len(args) > 0 && args[0] == "run" && hostArch != "" {
cmdEnv = setGOARCH(cmdEnv, hostArch)
}
remoteErr, err := client.Exec(goCmd, buildlet.ExecOpts{
Output: execOut,
Dir: ".", // root of buildlet work directory
Args: args,
ExtraEnv: cmdEnv,
})
if err != nil {
return err
}
if remoteErr != nil {
return fmt.Errorf("go %v: %v\n%s", strings.Join(args, " "), remoteErr, out)
}
return nil
}
if b.Race {
b.logf("Building race detector.")
if err := runGo("install", "-race", "std"); err != nil {
return err
}
}
b.logf("Building %v.", strings.Join(toolPaths, ", "))
if err := runGo(append([]string{"install"}, toolPaths...)...); err != nil {
return err
}
b.logf("Cleaning goroot (post-build).")
if err := client.RemoveAll(addPrefix(goDir, postBuildCleanFiles)...); err != nil {
return err
}
if err := client.RemoveAll(b.toolDir() + "/api"); err != nil {
return err
}
b.logf("Pushing and running releaselet.")
f, err := os.Open(releaselet)
if err != nil {
return err
}
err = client.Put(f, "releaselet.go", 0666)
f.Close()
if err != nil {
return err
}
if err := runGo("run", "releaselet.go"); err != nil {
log.Printf("releaselet failed: %v", err)
client.ListDir(".", buildlet.ListDirOpts{Recursive: true}, func(ent buildlet.DirEntry) {
log.Printf("remote: %v", ent)
})
return err
}
cleanFiles := []string{"releaselet.go", goPath, go14}
switch b.OS {
case "darwin":
filename := *version + "." + b.String() + ".pkg"
if err := b.fetchFile(client, filename, "pkg"); err != nil {
return err
}
cleanFiles = append(cleanFiles, "pkg")
case "windows":
filename := *version + "." + b.String() + ".msi"
if err := b.fetchFile(client, filename, "msi"); err != nil {
return err
}
cleanFiles = append(cleanFiles, "msi")
}
// Need to delete everything except the final "go" directory,
// as we make the tarball relative to workdir.
b.logf("Cleaning workdir.")
if err := client.RemoveAll(cleanFiles...); err != nil {
return err
}
if b.OS == "windows" {
return b.fetchZip(client)
}
return b.fetchTarball(client)
}
func (b *Build) fetchTarball(client *buildlet.Client) error {
b.logf("Downloading tarball.")
tgz, err := client.GetTar(context.Background(), ".")
if err != nil {
return err
}
filename := *version + "." + b.String() + ".tar.gz"
return b.writeFile(filename, tgz)
}
func (b *Build) fetchZip(client *buildlet.Client) error {
b.logf("Downloading tarball and re-compressing as zip.")
tgz, err := client.GetTar(context.Background(), ".")
if err != nil {
return err
}
defer tgz.Close()
filename := *version + "." + b.String() + ".zip"
f, err := os.Create(filename)
if err != nil {
return err
}
if err := tgzToZip(f, tgz); err != nil {
f.Close()
return err
}
if err := f.Close(); err != nil {
return err
}
b.logf("Wrote %q.", filename)
return nil
}
func tgzToZip(w io.Writer, r io.Reader) error {
zr, err := gzip.NewReader(r)
if err != nil {
return err
}
tr := tar.NewReader(zr)
zw := zip.NewWriter(w)
for {
th, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
fi := th.FileInfo()
zh, err := zip.FileInfoHeader(fi)
if err != nil {
return err
}
zh.Name = th.Name // for the full path
switch strings.ToLower(path.Ext(zh.Name)) {
case ".jpg", ".jpeg", ".png", ".gif":
// Don't re-compress already compressed files.
zh.Method = zip.Store
default:
zh.Method = zip.Deflate
}
if fi.IsDir() {
zh.Method = zip.Store
}
w, err := zw.CreateHeader(zh)
if err != nil {
return err
}
if fi.IsDir() {
continue
}
if _, err := io.Copy(w, tr); err != nil {
return err
}
}
return zw.Close()
}
// fetchFile fetches the specified directory from the given buildlet, and
// writes the first file it finds in that directory to dest.
func (b *Build) fetchFile(client *buildlet.Client, dest, dir string) error {
b.logf("Downloading file from %q.", dir)
tgz, err := client.GetTar(context.Background(), dir)
if err != nil {
return err
}
defer tgz.Close()
zr, err := gzip.NewReader(tgz)
if err != nil {
return err
}
tr := tar.NewReader(zr)
for {
h, err := tr.Next()
if err == io.EOF {
return io.ErrUnexpectedEOF
}
if err != nil {
return err
}
if !h.FileInfo().IsDir() {
break
}
}
return b.writeFile(dest, tr)
}
func (b *Build) writeFile(name string, r io.Reader) error {
f, err := os.Create(name)
if err != nil {
return err
}
if _, err := io.Copy(f, r); err != nil {
f.Close()
return err
}
if err := f.Close(); err != nil {
return err
}
if strings.HasSuffix(name, ".gz") {
if err := verifyGzipSingleStream(name); err != nil {
return fmt.Errorf("error verifying that %s is a single-stream gzip: %v", name, err)
}
}
b.logf("Wrote %q.", name)
return nil
}
// verifyGzipSingleStream verifies that the named gzip file is not
// a multi-stream file. See golang.org/issue/19052
func verifyGzipSingleStream(name string) error {
f, err := os.Open(name)
if err != nil {
return err
}
defer f.Close()
br := bufio.NewReader(f)
zr, err := gzip.NewReader(br)
if err != nil {
return err
}
zr.Multistream(false)
if _, err := io.Copy(ioutil.Discard, zr); err != nil {
return fmt.Errorf("reading first stream: %v", err)
}
peek, err := br.Peek(1)
if len(peek) > 0 || err != io.EOF {
return fmt.Errorf("unexpected peek of %d, %v after first gzip stream", len(peek), err)
}
return nil
}
func addPrefix(prefix string, in []string) []string {
var out []string
for _, s := range in {
out = append(out, path.Join(prefix, s))
}
return out
}
func coordinatorClient() *buildlet.CoordinatorClient {
return &buildlet.CoordinatorClient{
Auth: buildlet.UserPass{
Username: "user-" + *user,
Password: userToken(),
},
Instance: build.ProdCoordinator,
}
}
func homeDir() string {
if runtime.GOOS == "windows" {
return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
}
return os.Getenv("HOME")
}
func configDir() string {
if runtime.GOOS == "windows" {
return filepath.Join(os.Getenv("APPDATA"), "Gomote")
}
if xdg := os.Getenv("XDG_CONFIG_HOME"); xdg != "" {
return filepath.Join(xdg, "gomote")
}
return filepath.Join(homeDir(), ".config", "gomote")
}
func username() string {
if runtime.GOOS == "windows" {
return os.Getenv("USERNAME")
}
return os.Getenv("USER")
}
func userToken() string {
if *user == "" {
panic("userToken called with user flag empty")
}
keyDir := configDir()
baseFile := "user-" + *user + ".token"
tokenFile := filepath.Join(keyDir, baseFile)
slurp, err := ioutil.ReadFile(tokenFile)
if os.IsNotExist(err) {
log.Printf("Missing file %s for user %q. Change --user or obtain a token and place it there.",
tokenFile, *user)
}
if err != nil {
log.Fatal(err)
}
return strings.TrimSpace(string(slurp))
}
func setGOARCH(env []string, goarch string) []string {
wantKV := "GOARCH=" + goarch
existing := false
for i, kv := range env {
if strings.HasPrefix(kv, "GOARCH=") && kv != wantKV {
env[i] = wantKV
existing = true
}
}
if existing {
return env
}
return append(env, wantKV)
}
| [
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"HOME\"",
"\"APPDATA\"",
"\"XDG_CONFIG_HOME\"",
"\"USERNAME\"",
"\"USER\""
] | [] | [
"USERNAME",
"APPDATA",
"HOMEPATH",
"HOMEDRIVE",
"USER",
"HOME",
"XDG_CONFIG_HOME"
] | [] | ["USERNAME", "APPDATA", "HOMEPATH", "HOMEDRIVE", "USER", "HOME", "XDG_CONFIG_HOME"] | go | 7 | 0 | |
third_party/android_deps/libs/org_codehaus_mojo_animal_sniffer_annotations/3pp/fetch.py | #!/usr/bin/env python
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is generated, do not edit. Update BuildConfigGenerator.groovy and
# 3ppFetch.template instead.
from __future__ import print_function
import argparse
import json
import os
_FILE_URL = 'https://repo.maven.apache.org/maven2/org/codehaus/mojo/animal-sniffer-annotations/1.17/animal-sniffer-annotations-1.17.jar'
_FILE_NAME = 'animal-sniffer-annotations-1.17.jar'
_FILE_VERSION = '1.17'
def do_latest():
print(_FILE_VERSION)
def get_download_url(version):
if _FILE_URL.endswith('.jar'):
ext = '.jar'
elif _FILE_URL.endswith('.aar'):
ext = '.aar'
else:
raise Exception('Unsupported extension for %s' % _FILE_URL)
partial_manifest = {
'url': [_FILE_URL],
'name': [_FILE_NAME],
'ext': ext,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser("latest")
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser("get_url")
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
| [] | [] | [
"_3PP_VERSION"
] | [] | ["_3PP_VERSION"] | python | 1 | 0 | |
app/auth.go | package app
import (
"context"
"fmt"
"github.com/dgrijalva/jwt-go"
"github.com/amber-iot/go-jwt-auth/models"
u "github.com/amber-iot/go-jwt-auth/utils"
"log"
"net/http"
"os"
)
var JwtAuthentication = func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Endpoints that need no authentication
/*needAuthPaths := []string{"/api/account/me"}
requestPath := r.URL.Path
var needAuth = false
for _, value := range needAuthPaths {
if value == requestPath {
needAuth = true
break
}
}
if !needAuth {
next.ServeHTTP(w, r)
return
}*/
tokenHeader := r.Header.Get("Authorization")
log.Println("Token: " + tokenHeader + " test.")
// Token is missing
if tokenHeader == "" {
sendInvalidTokenResponse(w, "Missing auth token")
return
}
tk := &models.Token{}
token, err := jwt.ParseWithClaims(tokenHeader, tk, func(token *jwt.Token) (interface{}, error) {
return []byte(os.Getenv("token_password")), nil
})
fmt.Println("Parse token error:", err)
if err != nil {
sendInvalidTokenResponse(w, "Invalid auth token: "+err.Error())
return
}
// Token is invalid
if !token.Valid {
sendInvalidTokenResponse(w, "Token is not valid")
return
}
// Auth ok
fmt.Println("User:", tk.UserId)
ctx := context.WithValue(r.Context(), "user", tk.UserId)
r = r.WithContext(ctx)
next.ServeHTTP(w, r)
})
}
func sendInvalidTokenResponse(w http.ResponseWriter, message string) {
response := u.Message(false, message)
w.WriteHeader(http.StatusForbidden)
w.Header().Set("Content-Type", "application/json")
u.Respond(w, response)
}
| [
"\"token_password\""
] | [] | [
"token_password"
] | [] | ["token_password"] | go | 1 | 0 | |
mlchain/base/client.py | import os
import warnings
from mlchain.base.serializer import JsonSerializer, MsgpackSerializer, MsgpackBloscSerializer, JpgMsgpackSerializer, PngMsgpackSerializer
import mlchain
class ClientBase:
"""
Mlchain Client base class
"""
def __init__(self, api_key = None, api_address = None, serializer = 'msgpack', image_encoder=None):
"""
Client to communicate with Mlchain server
:api_key: Your API KEY
:api_address: API or URL of server to communicate with
:serializer: The way to serialize data ['json', 'msgpack', 'msgpack_blosc']
"""
assert serializer in ['json', 'msgpack', 'msgpack_blosc']
assert image_encoder in ['jpg', 'png', None]
if api_key is None and os.getenv('MLCHAIN_API_KEY') is not None:
api_key = os.getenv('MLCHAIN_API_KEY')
self.api_key = api_key
if api_address is None:
if os.getenv('MLCHAIN_API_ADDRESS') is not None:
api_address = os.getenv('MLCHAIN_API_ADDRESS')
else:
api_address = mlchain.api_address
self.api_address = api_address
if isinstance(self.api_address, str):
self.api_address = self.api_address.strip()
if len(self.api_address) > 0 and self.api_address[-1] == '/':
self.api_address = self.api_address[:-1]
if len(self.api_address) > 0 and self.api_address[0] != 'h':
self.api_address = 'http://{0}'.format(api_address)
self.json_serializer = JsonSerializer()
# Serializer initalization
self.serializer_type = serializer
try:
if serializer == 'msgpack':
if self.image_encoder is None:
self.serializer = MsgpackSerializer()
elif self.image_encoder == 'jpg':
self.serializer = JpgMsgpackSerializer()
elif self.image_encoder == 'png':
self.serializer = PngMsgpackSerializer()
elif serializer == 'msgpack_blosc':
self.serializer = MsgpackBloscSerializer()
else:
self.serializer_type = 'json'
self.serializer = self.json_serializer
except:
self.serializer_type = 'json'
self.serializer = self.json_serializer
self.content_type = 'application/{0}'.format(self.serializer_type)
self.image_encoder = image_encoder
def get(self):
raise NotImplementedError
def post(self):
raise NotImplementedError
def put(self):
raise NotImplementedError
def patch(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
def get_async(self):
raise NotImplementedError
def post_async(self):
raise NotImplementedError
def put_async(self):
raise NotImplementedError
def patch_async(self):
raise NotImplementedError
def delete_async(self):
raise NotImplementedError | [] | [] | [
"MLCHAIN_API_KEY",
"MLCHAIN_API_ADDRESS"
] | [] | ["MLCHAIN_API_KEY", "MLCHAIN_API_ADDRESS"] | python | 2 | 0 | |
src/cmd/go/internal/get/get.go | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package get implements the ``go get'' command.
package get
import (
"fmt"
"go/build"
"os"
"path/filepath"
"runtime"
"strings"
"cmd/go/internal/base"
"cmd/go/internal/cfg"
"cmd/go/internal/load"
"cmd/go/internal/str"
"cmd/go/internal/web"
"cmd/go/internal/work"
)
var CmdGet = &base.Command{
UsageLine: "get [-d] [-f] [-fix] [-insecure] [-t] [-u] [build flags] [packages]",
Short: "download and install packages and dependencies",
Long: `
Get downloads the packages named by the import paths, along with their
dependencies. It then installs the named packages, like 'go install'.
The -d flag instructs get to stop after downloading the packages; that is,
it instructs get not to install the packages.
The -f flag, valid only when -u is set, forces get -u not to verify that
each package has been checked out from the source control repository
implied by its import path. This can be useful if the source is a local fork
of the original.
The -fix flag instructs get to run the fix tool on the downloaded packages
before resolving dependencies or building the code.
The -insecure flag permits fetching from repositories and resolving
custom domains using insecure schemes such as HTTP. Use with caution.
The -t flag instructs get to also download the packages required to build
the tests for the specified packages.
The -u flag instructs get to use the network to update the named packages
and their dependencies. By default, get uses the network to check out
missing packages but does not use it to look for updates to existing packages.
The -v flag enables verbose progress and debug output.
Get also accepts build flags to control the installation. See 'go help build'.
When checking out a new package, get creates the target directory
GOPATH/src/<import-path>. If the GOPATH contains multiple entries,
get uses the first one. For more details see: 'go help gopath'.
When checking out or updating a package, get looks for a branch or tag
that matches the locally installed version of Go. The most important
rule is that if the local installation is running version "go1", get
searches for a branch or tag named "go1". If no such version exists it
retrieves the most recent version of the package.
When go get checks out or updates a Git repository,
it also updates any git submodules referenced by the repository.
Get never checks out or updates code stored in vendor directories.
For more about specifying packages, see 'go help packages'.
For more about how 'go get' finds source code to
download, see 'go help importpath'.
See also: go build, go install, go clean.
`,
}
var getD = CmdGet.Flag.Bool("d", false, "")
var getF = CmdGet.Flag.Bool("f", false, "")
var getT = CmdGet.Flag.Bool("t", false, "")
var getU = CmdGet.Flag.Bool("u", false, "")
var getFix = CmdGet.Flag.Bool("fix", false, "")
var getInsecure = CmdGet.Flag.Bool("insecure", false, "")
func init() {
work.AddBuildFlags(CmdGet)
CmdGet.Run = runGet // break init loop
}
func runGet(cmd *base.Command, args []string) {
if *getF && !*getU {
base.Fatalf("go get: cannot use -f flag without -u")
}
// Disable any prompting for passwords by Git.
// Only has an effect for 2.3.0 or later, but avoiding
// the prompt in earlier versions is just too hard.
// If user has explicitly set GIT_TERMINAL_PROMPT=1, keep
// prompting.
// See golang.org/issue/9341 and golang.org/issue/12706.
if os.Getenv("GIT_TERMINAL_PROMPT") == "" {
os.Setenv("GIT_TERMINAL_PROMPT", "0")
}
// Disable any ssh connection pooling by Git.
// If a Git subprocess forks a child into the background to cache a new connection,
// that child keeps stdout/stderr open. After the Git subprocess exits,
// os /exec expects to be able to read from the stdout/stderr pipe
// until EOF to get all the data that the Git subprocess wrote before exiting.
// The EOF doesn't come until the child exits too, because the child
// is holding the write end of the pipe.
// This is unfortunate, but it has come up at least twice
// (see golang.org/issue/13453 and golang.org/issue/16104)
// and confuses users when it does.
// If the user has explicitly set GIT_SSH or GIT_SSH_COMMAND,
// assume they know what they are doing and don't step on it.
// But default to turning off ControlMaster.
if os.Getenv("GIT_SSH") == "" && os.Getenv("GIT_SSH_COMMAND") == "" {
os.Setenv("GIT_SSH_COMMAND", "ssh -o ControlMaster=no")
}
// Phase 1. Download/update.
var stk load.ImportStack
mode := 0
if *getT {
mode |= load.GetTestDeps
}
args = downloadPaths(args)
for _, arg := range args {
download(arg, nil, &stk, mode)
}
base.ExitIfErrors()
// Phase 2. Rescan packages and re-evaluate args list.
// Code we downloaded and all code that depends on it
// needs to be evicted from the package cache so that
// the information will be recomputed. Instead of keeping
// track of the reverse dependency information, evict
// everything.
load.ClearPackageCache()
// In order to rebuild packages information completely,
// we need to clear commands cache. Command packages are
// referring to evicted packages from the package cache.
// This leads to duplicated loads of the standard packages.
load.ClearCmdCache()
args = load.ImportPaths(args)
load.PackagesForBuild(args)
// Phase 3. Install.
if *getD {
// Download only.
// Check delayed until now so that importPaths
// and packagesForBuild have a chance to print errors.
return
}
work.InstallPackages(args, true)
}
// downloadPaths prepares the list of paths to pass to download.
// It expands ... patterns that can be expanded. If there is no match
// for a particular pattern, downloadPaths leaves it in the result list,
// in the hope that we can figure out the repository from the
// initial ...-free prefix.
func downloadPaths(args []string) []string {
args = load.ImportPathsNoDotExpansion(args)
var out []string
for _, a := range args {
if strings.Contains(a, "...") {
var expand []string
// Use matchPackagesInFS to avoid printing
// warnings. They will be printed by the
// eventual call to importPaths instead.
if build.IsLocalImport(a) {
expand = load.MatchPackagesInFS(a)
} else {
expand = load.MatchPackages(a)
}
if len(expand) > 0 {
out = append(out, expand...)
continue
}
}
out = append(out, a)
}
return out
}
// downloadCache records the import paths we have already
// considered during the download, to avoid duplicate work when
// there is more than one dependency sequence leading to
// a particular package.
var downloadCache = map[string]bool{}
// downloadRootCache records the version control repository
// root directories we have already considered during the download.
// For example, all the packages in the github.com/google/codesearch repo
// share the same root (the directory for that path), and we only need
// to run the hg commands to consider each repository once.
var downloadRootCache = map[string]bool{}
// download runs the download half of the get command
// for the package named by the argument.
func download(arg string, parent *load.Package, stk *load.ImportStack, mode int) {
if mode&load.UseVendor != 0 {
// Caller is responsible for expanding vendor paths.
panic("internal error: download mode has useVendor set")
}
load1 := func(path string, mode int) *load.Package {
if parent == nil {
return load.LoadPackage(path, stk)
}
return load.LoadImport(path, parent.Dir, parent, stk, nil, mode)
}
p := load1(arg, mode)
if p.Error != nil && p.Error.Hard {
base.Errorf("%s", p.Error)
return
}
// loadPackage inferred the canonical ImportPath from arg.
// Use that in the following to prevent hysteresis effects
// in e.g. downloadCache and packageCache.
// This allows invocations such as:
// mkdir -p $GOPATH/src/github.com/user
// cd $GOPATH/src/github.com/user
// go get ./foo
// see: golang.org/issue/9767
arg = p.ImportPath
// There's nothing to do if this is a package in the standard library.
if p.Standard {
return
}
// Only process each package once.
// (Unless we're fetching test dependencies for this package,
// in which case we want to process it again.)
if downloadCache[arg] && mode&load.GetTestDeps == 0 {
return
}
downloadCache[arg] = true
pkgs := []*load.Package{p}
wildcardOkay := len(*stk) == 0
isWildcard := false
// Download if the package is missing, or update if we're using -u.
if p.Dir == "" || *getU {
// The actual download.
stk.Push(arg)
err := downloadPackage(p)
if err != nil {
base.Errorf("%s", &load.PackageError{ImportStack: stk.Copy(), Err: err.Error()})
stk.Pop()
return
}
stk.Pop()
args := []string{arg}
// If the argument has a wildcard in it, re-evaluate the wildcard.
// We delay this until after reloadPackage so that the old entry
// for p has been replaced in the package cache.
if wildcardOkay && strings.Contains(arg, "...") {
if build.IsLocalImport(arg) {
args = load.MatchPackagesInFS(arg)
} else {
args = load.MatchPackages(arg)
}
isWildcard = true
}
// Clear all relevant package cache entries before
// doing any new loads.
load.ClearPackageCachePartial(args)
pkgs = pkgs[:0]
for _, arg := range args {
// Note: load calls loadPackage or loadImport,
// which push arg onto stk already.
// Do not push here too, or else stk will say arg imports arg.
p := load1(arg, mode)
if p.Error != nil {
base.Errorf("%s", p.Error)
continue
}
pkgs = append(pkgs, p)
}
}
// Process package, which might now be multiple packages
// due to wildcard expansion.
for _, p := range pkgs {
if *getFix {
base.Run(cfg.BuildToolexec, str.StringList(base.Tool("fix"), base.RelPaths(p.Internal.AllGoFiles)))
// The imports might have changed, so reload again.
p = load.ReloadPackage(arg, stk)
if p.Error != nil {
base.Errorf("%s", p.Error)
return
}
}
if isWildcard {
// Report both the real package and the
// wildcard in any error message.
stk.Push(p.ImportPath)
}
// Process dependencies, now that we know what they are.
imports := p.Imports
if mode&load.GetTestDeps != 0 {
// Process test dependencies when -t is specified.
// (But don't get test dependencies for test dependencies:
// we always pass mode 0 to the recursive calls below.)
imports = str.StringList(imports, p.TestImports, p.XTestImports)
}
for i, path := range imports {
if path == "C" {
continue
}
// Fail fast on import naming full vendor path.
// Otherwise expand path as needed for test imports.
// Note that p.Imports can have additional entries beyond p.Internal.Build.Imports.
orig := path
if i < len(p.Internal.Build.Imports) {
orig = p.Internal.Build.Imports[i]
}
if j, ok := load.FindVendor(orig); ok {
stk.Push(path)
err := &load.PackageError{
ImportStack: stk.Copy(),
Err: "must be imported as " + path[j+len("vendor/"):],
}
stk.Pop()
base.Errorf("%s", err)
continue
}
// If this is a test import, apply vendor lookup now.
// We cannot pass useVendor to download, because
// download does caching based on the value of path,
// so it must be the fully qualified path already.
if i >= len(p.Imports) {
path = load.VendoredImportPath(p, path)
}
download(path, p, stk, 0)
}
if isWildcard {
stk.Pop()
}
}
}
// downloadPackage runs the create or download command
// to make the first copy of or update a copy of the given package.
func downloadPackage(p *load.Package) error {
var (
vcs *vcsCmd
repo, rootPath string
err error
)
security := web.Secure
if *getInsecure {
security = web.Insecure
}
if p.Internal.Build.SrcRoot != "" {
// Directory exists. Look for checkout along path to src.
vcs, rootPath, err = vcsFromDir(p.Dir, p.Internal.Build.SrcRoot)
if err != nil {
return err
}
repo = "<local>" // should be unused; make distinctive
// Double-check where it came from.
if *getU && vcs.remoteRepo != nil {
dir := filepath.Join(p.Internal.Build.SrcRoot, filepath.FromSlash(rootPath))
remote, err := vcs.remoteRepo(vcs, dir)
if err != nil {
return err
}
repo = remote
if !*getF {
if rr, err := repoRootForImportPath(p.ImportPath, security); err == nil {
repo := rr.repo
if rr.vcs.resolveRepo != nil {
resolved, err := rr.vcs.resolveRepo(rr.vcs, dir, repo)
if err == nil {
repo = resolved
}
}
if remote != repo && rr.isCustom {
return fmt.Errorf("%s is a custom import path for %s, but %s is checked out from %s", rr.root, repo, dir, remote)
}
}
}
}
} else {
// Analyze the import path to determine the version control system,
// repository, and the import path for the root of the repository.
rr, err := repoRootForImportPath(p.ImportPath, security)
if err != nil {
return err
}
vcs, repo, rootPath = rr.vcs, rr.repo, rr.root
}
if !vcs.isSecure(repo) && !*getInsecure {
return fmt.Errorf("cannot download, %v uses insecure protocol", repo)
}
if p.Internal.Build.SrcRoot == "" {
// Package not found. Put in first directory of $GOPATH.
list := filepath.SplitList(cfg.BuildContext.GOPATH)
if len(list) == 0 {
return fmt.Errorf("cannot download, $GOPATH not set. For more details see: 'go help gopath'")
}
// Guard against people setting GOPATH=$GOROOT.
if filepath.Clean(list[0]) == filepath.Clean(cfg.GOROOT) {
return fmt.Errorf("cannot download, $GOPATH must not be set to $GOROOT. For more details see: 'go help gopath'")
}
if _, err := os.Stat(filepath.Join(list[0], "src/cmd/go/alldocs.go")); err == nil {
return fmt.Errorf("cannot download, %s is a GOROOT, not a GOPATH. For more details see: 'go help gopath'", list[0])
}
p.Internal.Build.Root = list[0]
p.Internal.Build.SrcRoot = filepath.Join(list[0], "src")
p.Internal.Build.PkgRoot = filepath.Join(list[0], "pkg")
}
root := filepath.Join(p.Internal.Build.SrcRoot, filepath.FromSlash(rootPath))
// If we've considered this repository already, don't do it again.
if downloadRootCache[root] {
return nil
}
downloadRootCache[root] = true
if cfg.BuildV {
fmt.Fprintf(os.Stderr, "%s (download)\n", rootPath)
}
// Check that this is an appropriate place for the repo to be checked out.
// The target directory must either not exist or have a repo checked out already.
meta := filepath.Join(root, "."+vcs.cmd)
st, err := os.Stat(meta)
if err == nil && !st.IsDir() {
return fmt.Errorf("%s exists but is not a directory", meta)
}
if err != nil {
// Metadata directory does not exist. Prepare to checkout new copy.
// Some version control tools require the target directory not to exist.
// We require that too, just to avoid stepping on existing work.
if _, err := os.Stat(root); err == nil {
return fmt.Errorf("%s exists but %s does not - stale checkout?", root, meta)
}
_, err := os.Stat(p.Internal.Build.Root)
gopathExisted := err == nil
// Some version control tools require the parent of the target to exist.
parent, _ := filepath.Split(root)
if err = os.MkdirAll(parent, 0777); err != nil {
return err
}
if cfg.BuildV && !gopathExisted && p.Internal.Build.Root == cfg.BuildContext.GOPATH {
fmt.Fprintf(os.Stderr, "created GOPATH=%s; see 'go help gopath'\n", p.Internal.Build.Root)
}
if err = vcs.create(root, repo); err != nil {
return err
}
} else {
// Metadata directory does exist; download incremental updates.
if err = vcs.download(root); err != nil {
return err
}
}
if cfg.BuildN {
// Do not show tag sync in -n; it's noise more than anything,
// and since we're not running commands, no tag will be found.
// But avoid printing nothing.
fmt.Fprintf(os.Stderr, "# cd %s; %s sync/update\n", root, vcs.cmd)
return nil
}
// Select and sync to appropriate version of the repository.
tags, err := vcs.tags(root)
if err != nil {
return err
}
vers := runtime.Version()
if i := strings.Index(vers, " "); i >= 0 {
vers = vers[:i]
}
if err := vcs.tagSync(root, selectTag(vers, tags)); err != nil {
return err
}
return nil
}
// selectTag returns the closest matching tag for a given version.
// Closest means the latest one that is not after the current release.
// Version "goX" (or "goX.Y" or "goX.Y.Z") matches tags of the same form.
// Version "release.rN" matches tags of the form "go.rN" (N being a floating-point number).
// Version "weekly.YYYY-MM-DD" matches tags like "go.weekly.YYYY-MM-DD".
//
// NOTE(rsc): Eventually we will need to decide on some logic here.
// For now, there is only "go1". This matches the docs in go help get.
func selectTag(goVersion string, tags []string) (match string) {
for _, t := range tags {
if t == "go1" {
return "go1"
}
}
return ""
}
| [
"\"GIT_TERMINAL_PROMPT\"",
"\"GIT_SSH\"",
"\"GIT_SSH_COMMAND\""
] | [] | [
"GIT_SSH",
"GIT_SSH_COMMAND",
"GIT_TERMINAL_PROMPT"
] | [] | ["GIT_SSH", "GIT_SSH_COMMAND", "GIT_TERMINAL_PROMPT"] | go | 3 | 0 | |
wandb/sdk/launch/_project_spec.py | """
Internal utility for converting arguments from a launch spec or call to wandb launch
into a runnable wandb launch script
"""
import enum
import json
import logging
import os
from shlex import quote
import tempfile
from typing import Any, Dict, Optional, Tuple
import wandb
from wandb.apis.internal import Api
import wandb.docker as docker
from wandb.errors import Error as ExecutionError, LaunchError
from wandb.sdk.lib.runid import generate_id
from . import utils
_logger = logging.getLogger(__name__)
DEFAULT_LAUNCH_METADATA_PATH = "launch_metadata.json"
# need to make user root for sagemaker, so users have access to /opt/ml directories
# that let users create artifacts and access input data
RESOURCE_UID_MAP = {"local": 1000, "sagemaker": 0}
class LaunchSource(enum.IntEnum):
WANDB: int = 1
GIT: int = 2
LOCAL: int = 3
DOCKER: int = 4
class LaunchProject:
"""A launch project specification."""
def __init__(
self,
uri: Optional[str],
api: Api,
launch_spec: Dict[str, Any],
target_entity: str,
target_project: str,
name: Optional[str],
docker_config: Dict[str, Any],
git_info: Dict[str, str],
overrides: Dict[str, Any],
resource: str,
resource_args: Dict[str, Any],
cuda: Optional[bool],
):
if uri is not None and utils.is_bare_wandb_uri(uri):
uri = api.settings("base_url") + uri
_logger.info(f"Updating uri with base uri: {uri}")
self.uri = uri
self.api = api
self.launch_spec = launch_spec
self.target_entity = target_entity
self.target_project = target_project
self.name = name
self.build_image: bool = docker_config.get("build_image", False)
self.python_version: Optional[str] = docker_config.get("python_version")
self.cuda_version: Optional[str] = docker_config.get("cuda_version")
self._base_image: Optional[str] = docker_config.get("base_image")
self.docker_image: Optional[str] = docker_config.get("docker_image")
uid = RESOURCE_UID_MAP.get(resource, 1000)
if self._base_image:
uid = docker.get_image_uid(self._base_image)
_logger.info(f"Retrieved base image uid {uid}")
self.docker_user_id: int = docker_config.get("user_id", uid)
self.git_version: Optional[str] = git_info.get("version")
self.git_repo: Optional[str] = git_info.get("repo")
self.override_args: Dict[str, Any] = overrides.get("args", {})
self.override_config: Dict[str, Any] = overrides.get("run_config", {})
self.override_artifacts: Dict[str, Any] = overrides.get("artifacts", {})
self.resource = resource
self.resource_args = resource_args
self.deps_type: Optional[str] = None
self.cuda = cuda
self._runtime: Optional[str] = None
self.run_id = generate_id()
self._entry_points: Dict[
str, EntryPoint
] = {} # todo: keep multiple entrypoint support?
if (
"entry_point" in overrides
and overrides["entry_point"] is not None
and overrides["entry_point"] != ""
):
_logger.info("Adding override entry point")
self.add_entry_point(overrides["entry_point"])
if self.uri is None:
if self.docker_image is None:
raise LaunchError("Run requires a URI or a docker image")
self.source = LaunchSource.DOCKER
self.project_dir = None
elif utils._is_wandb_uri(self.uri):
_logger.info(f"URI {self.uri} indicates a wandb uri")
self.source = LaunchSource.WANDB
self.project_dir = tempfile.mkdtemp()
elif utils._is_git_uri(self.uri):
_logger.info(f"URI {self.uri} indicates a git uri")
self.source = LaunchSource.GIT
self.project_dir = tempfile.mkdtemp()
else:
_logger.info(f"URI {self.uri} indicates a local uri")
# assume local
if not os.path.exists(self.uri):
raise LaunchError(
"Assumed URI supplied is a local path but path is not valid"
)
self.source = LaunchSource.LOCAL
self.project_dir = self.uri
if launch_spec.get("resource_args"):
self.resource_args = launch_spec["resource_args"]
self.aux_dir = tempfile.mkdtemp()
self.clear_parameter_run_config_collisions()
@property
def base_image(self) -> str:
"""Returns {PROJECT}_base:{PYTHON_VERSION}"""
# TODO: this should likely be source_project when we have it...
# don't make up a separate base image name if user provides a docker image
if self.docker_image is not None:
return self.docker_image
python_version = (self.python_version or "3").replace("+", "dev")
generated_name = "{}_base:{}".format(
self.target_project.replace(" ", "-"), python_version
)
return self._base_image or generated_name
@property
def image_name(self) -> str:
"""Returns {PROJECT}_launch the ultimate version will
be tagged with a sha of the git repo"""
# TODO: this should likely be source_project when we have it...
return f"{self.target_project}_launch"
def clear_parameter_run_config_collisions(self) -> None:
"""Clear values from the override run config values if a matching key exists in the override arguments."""
if not self.override_config:
return
keys = [key for key in self.override_config.keys()]
for key in keys:
if self.override_args.get(key):
del self.override_config[key]
def get_single_entry_point(self) -> Optional["EntryPoint"]:
"""Returns the first entrypoint for the project, or None if no entry point was provided because a docker image was provided."""
# assuming project only has 1 entry point, pull that out
# tmp fn until we figure out if we want to support multiple entry points or not
if not self._entry_points:
if not self.docker_image:
raise LaunchError(
"Project must have at least one entry point unless docker image is specified."
)
return None
return list(self._entry_points.values())[0]
def add_entry_point(self, entry_point: str) -> "EntryPoint":
"""Adds an entry point to the project."""
_, file_extension = os.path.splitext(entry_point)
ext_to_cmd = {".py": "python", ".sh": os.environ.get("SHELL", "bash")}
if file_extension in ext_to_cmd:
command = f"{ext_to_cmd[file_extension]} {quote(entry_point)}"
new_entrypoint = EntryPoint(name=entry_point, command=command)
self._entry_points[entry_point] = new_entrypoint
return new_entrypoint
raise ExecutionError(
"Could not find {0} among entry points {1} or interpret {0} as a "
"runnable script. Supported script file extensions: "
"{2}".format(
entry_point, list(self._entry_points.keys()), list(ext_to_cmd.keys())
)
)
def get_entry_point(self, entry_point: str) -> "EntryPoint":
"""Gets the entrypoint if its set, or adds it and returns the entrypoint."""
if entry_point in self._entry_points:
return self._entry_points[entry_point]
return self.add_entry_point(entry_point)
def _fetch_project_local(self, internal_api: Api) -> None:
"""Fetch a project (either wandb run or git repo) into a local directory, returning the path to the local project directory."""
# these asserts are all guaranteed to pass, but are required by mypy
assert self.source != LaunchSource.LOCAL
assert isinstance(self.uri, str)
assert self.project_dir is not None
_logger.info("Fetching project locally...")
if utils._is_wandb_uri(self.uri):
source_entity, source_project, source_run_name = utils.parse_wandb_uri(
self.uri
)
run_info = utils.fetch_wandb_project_run_info(
source_entity, source_project, source_run_name, internal_api
)
entry_point = run_info.get("codePath") or run_info["program"]
if run_info.get("cudaVersion"):
original_cuda_version = ".".join(run_info["cudaVersion"].split(".")[:2])
if self.cuda is None:
# only set cuda on by default if cuda is None (unspecified), not False (user specifically requested cpu image)
wandb.termlog(
"Original wandb run {} was run with cuda version {}. Enabling cuda builds by default; to build on a CPU-only image, run again with --cuda=False".format(
source_run_name, original_cuda_version
)
)
self.cuda_version = original_cuda_version
self.cuda = True
if (
self.cuda
and self.cuda_version
and self.cuda_version != original_cuda_version
):
wandb.termlog(
"Specified cuda version {} differs from original cuda version {}. Running with specified version {}".format(
self.cuda_version, original_cuda_version, self.cuda_version
)
)
downloaded_code_artifact = utils.check_and_download_code_artifacts(
source_entity,
source_project,
source_run_name,
internal_api,
self.project_dir,
)
if downloaded_code_artifact:
self.build_image = True
elif not downloaded_code_artifact:
if not run_info["git"]:
raise ExecutionError(
"Reproducing a run requires either an associated git repo or a code artifact logged with `run.log_code()`"
)
utils._fetch_git_repo(
self.project_dir,
run_info["git"]["remote"],
run_info["git"]["commit"],
)
patch = utils.fetch_project_diff(
source_entity, source_project, source_run_name, internal_api
)
if patch:
utils.apply_patch(patch, self.project_dir)
# For cases where the entry point wasn't checked into git
if not os.path.exists(os.path.join(self.project_dir, entry_point)):
downloaded_entrypoint = utils.download_entry_point(
source_entity,
source_project,
source_run_name,
internal_api,
entry_point,
self.project_dir,
)
if not downloaded_entrypoint:
raise LaunchError(
f"Entrypoint: {entry_point} does not exist, "
"and could not be downloaded. Please specify the entrypoint for this run."
)
# if the entrypoint is downloaded and inserted into the project dir
# need to rebuild image with new code
self.build_image = True
if (
"_session_history.ipynb" in os.listdir(self.project_dir)
or ".ipynb" in entry_point
):
entry_point = utils.convert_jupyter_notebook_to_script(
entry_point, self.project_dir
)
# Download any frozen requirements
utils.download_wandb_python_deps(
source_entity,
source_project,
source_run_name,
internal_api,
self.project_dir,
)
# Specify the python runtime for jupyter2docker
self.python_version = run_info.get("python", "3")
if not self._entry_points:
self.add_entry_point(entry_point)
self.override_args = utils.merge_parameters(
self.override_args, run_info["args"]
)
else:
assert utils._GIT_URI_REGEX.match(self.uri), (
"Non-wandb URI %s should be a Git URI" % self.uri
)
if not self._entry_points:
wandb.termlog(
"Entry point for repo not specified, defaulting to main.py"
)
self.add_entry_point("main.py")
utils._fetch_git_repo(self.project_dir, self.uri, self.git_version)
class EntryPoint:
"""An entry point into a wandb launch specification."""
def __init__(self, name: str, command: str):
self.name = name
self.command = command
self.parameters: Dict[str, Any] = {}
def _validate_parameters(self, user_parameters: Dict[str, Any]) -> None:
missing_params = []
for name in self.parameters:
if name not in user_parameters and self.parameters[name].default is None:
missing_params.append(name)
if missing_params:
raise ExecutionError(
"No value given for missing parameters: %s"
% ", ".join(["'%s'" % name for name in missing_params])
)
def compute_parameters(
self, user_parameters: Optional[Dict[str, Any]]
) -> Tuple[Dict[str, Optional[str]], Dict[str, Optional[str]]]:
"""Validates and sanitizes parameters dict into expected dict format.
Given a dict mapping user-specified param names to values, computes parameters to
substitute into the command for this entry point. Returns a tuple (params, extra_params)
where `params` contains key-value pairs for parameters specified in the entry point
definition, and `extra_params` contains key-value pairs for additional parameters passed
by the user.
"""
if user_parameters is None:
user_parameters = {}
# Validate params before attempting to resolve parameter values
self._validate_parameters(user_parameters)
final_params = {}
extra_params = {}
parameter_keys = list(self.parameters.keys())
for key in parameter_keys:
param_obj = self.parameters[key]
key_position = parameter_keys.index(key)
value = (
user_parameters[key]
if key in user_parameters
else self.parameters[key].default
)
final_params[key] = param_obj.compute_value(value, key_position)
for key in user_parameters:
if key not in final_params:
extra_params[key] = user_parameters[key]
return (
self._sanitize_param_dict(final_params),
self._sanitize_param_dict(extra_params),
)
def compute_command(self, user_parameters: Optional[Dict[str, Any]]) -> str:
"""Converts user parameter dictionary to a string."""
params, extra_params = self.compute_parameters(user_parameters)
command_with_params = self.command.format(**params)
command_arr = [command_with_params]
command_arr.extend(
[
f"--{key} {value}" if value is not None else f"--{key}"
for key, value in extra_params.items()
]
)
return " ".join(command_arr)
@staticmethod
def _sanitize_param_dict(param_dict: Dict[str, Any]) -> Dict[str, Optional[str]]:
"""Sanitizes a dictionary of parameters, quoting values, except for keys with None values."""
return {
(str(key)): (quote(str(value)) if value is not None else None)
for key, value in param_dict.items()
}
def get_entry_point_command(
entry_point: Optional["EntryPoint"], parameters: Dict[str, Any]
) -> str:
"""Returns the shell command to execute in order to run the specified entry point.
Arguments:
entry_point: Entry point to run
parameters: Parameters (dictionary) for the entry point command
Returns:
List of strings representing the shell command to be executed
"""
if entry_point is None:
return ""
return entry_point.compute_command(parameters)
def create_project_from_spec(launch_spec: Dict[str, Any], api: Api) -> LaunchProject:
"""Constructs a LaunchProject instance using a launch spec.
Arguments:
launch_spec: Dictionary representation of launch spec
api: Instance of wandb.apis.internal Api
Returns:
An initialized `LaunchProject` object
"""
name: Optional[str] = None
if launch_spec.get("name"):
name = launch_spec["name"]
return LaunchProject(
launch_spec.get("uri"),
api,
launch_spec,
launch_spec["entity"],
launch_spec["project"],
name,
launch_spec.get("docker", {}),
launch_spec.get("git", {}),
launch_spec.get("overrides", {}),
launch_spec.get("resource", "local"),
launch_spec.get("resource_args", {}),
launch_spec.get("cuda", None),
)
def fetch_and_validate_project(
launch_project: LaunchProject, api: Api
) -> LaunchProject:
"""Fetches a project into a local directory, adds the config values to the directory, and validates the first entrypoint for the project.
Arguments:
launch_project: LaunchProject to fetch and validate.
api: Instance of wandb.apis.internal Api
Returns:
A validated `LaunchProject` object.
"""
if launch_project.source == LaunchSource.DOCKER:
return launch_project
if launch_project.source == LaunchSource.LOCAL:
if not launch_project._entry_points:
wandb.termlog("Entry point for repo not specified, defaulting to main.py")
launch_project.add_entry_point("main.py")
else:
launch_project._fetch_project_local(internal_api=api)
assert launch_project.project_dir is not None
# this prioritizes pip, and we don't support any cases where both are present
# conda projects when uploaded to wandb become pip projects via requirements.frozen.txt, wandb doesn't preserve conda envs
if os.path.exists(
os.path.join(launch_project.project_dir, "requirements.txt")
) or os.path.exists(
os.path.join(launch_project.project_dir, "requirements.frozen.txt")
):
launch_project.deps_type = "pip"
elif os.path.exists(os.path.join(launch_project.project_dir, "environment.yml")):
launch_project.deps_type = "conda"
first_entry_point = list(launch_project._entry_points.keys())[0]
_logger.info("validating entrypoint parameters")
launch_project.get_entry_point(first_entry_point)._validate_parameters(
launch_project.override_args
)
return launch_project
def create_metadata_file(
launch_project: LaunchProject,
image_uri: str,
sanitized_entrypoint_str: str,
docker_args: Dict[str, Any],
sanitized_dockerfile_contents: str,
) -> None:
assert launch_project.project_dir is not None
with open(
os.path.join(launch_project.project_dir, DEFAULT_LAUNCH_METADATA_PATH),
"w",
) as f:
json.dump(
{
**launch_project.launch_spec,
"image_uri": image_uri,
"command": sanitized_entrypoint_str,
"docker_args": docker_args,
"dockerfile_contents": sanitized_dockerfile_contents,
},
f,
)
| [] | [] | [
"SHELL"
] | [] | ["SHELL"] | python | 1 | 0 | |
src/github.com/smartystreets/goconvey/web/goconvey-server/watcher.go | package main
import (
"fmt"
"os"
"path/filepath"
"strings"
)
func updateWatch(root string) {
addNewWatches(root)
removeExpiredWatches()
}
func addNewWatches(root string) {
if rootWatch != root {
setGoPath(root)
adjustRoot(root)
}
watchNestedPaths(root)
}
func setGoPath(root string) {
goPath := root
fmt.Println("GOPATH (before):", os.Getenv("GOPATH"))
index := strings.Index(root, separator+"src")
if index > -1 {
goPath = root[:index]
}
err := os.Setenv("GOPATH", goPath)
if err != nil {
fmt.Println("Error setting GOPATH:", err)
}
fmt.Println("GOPATH (after):", os.Getenv("GOPATH"))
}
func adjustRoot(root string) {
clearAllWatches()
addWatch(root)
rootWatch = root
fmt.Println("Watching new root:", root)
}
func clearAllWatches() {
for path, _ := range watched {
removeWatch(path)
}
}
func watchNestedPaths(root string) {
filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
if matches, _ := filepath.Glob(filepath.Join(path, "*.go")); len(matches) > 0 {
addWatch(path)
}
return nil
})
}
func addWatch(path string) {
if watching(path) {
return
}
if !looksLikeGoPackage(path) {
return
}
watched[path] = true
watcher.Watch(path)
fmt.Println("Watching:", path)
}
func looksLikeGoPackage(path string) bool {
_, resolved := resolvePackageName(path)
return resolved
}
func watching(path string) bool {
for w, _ := range watched {
if w == path {
return true
}
}
return false
}
func removeExpiredWatches() {
for path, _ := range watched {
if !isDirectory(path) {
removeWatch(path)
}
}
}
func removeWatch(path string) {
delete(watched, path)
watcher.RemoveWatch(path)
fmt.Println("No longer watching:", path)
}
func isDirectory(directory string) bool {
info, err := os.Stat(directory)
return err == nil && info.IsDir()
}
| [
"\"GOPATH\"",
"\"GOPATH\""
] | [] | [
"GOPATH"
] | [] | ["GOPATH"] | go | 1 | 0 | |
scripts/devSetup/factories.py | __author__ = u'schmatz'
import errors
import configuration
import mongo
import node
import repositoryInstaller
import ruby
import shutil
import os
import glob
import subprocess
def print_computer_information(os_name,address_width):
print(os_name + " detected, architecture: " + str(address_width) + " bit")
def constructSetup():
config = configuration.Configuration()
address_width = config.system.get_virtual_memory_address_width()
if config.system.operating_system == u"mac":
print_computer_information("Mac",address_width)
return MacSetup(config)
elif config.system.operating_system == u"win":
print_computer_information("Windows",address_width)
raise NotImplementedError("Windows is not supported at this time.")
elif config.system.operating_system == u"linux":
print_computer_information("Linux",address_width)
return LinuxSetup(config)
class SetupFactory(object):
def __init__(self,config):
self.config = config
self.mongo = mongo.MongoDB(self.config)
self.node = node.Node(self.config)
self.repoCloner = repositoryInstaller.RepositoryInstaller(self.config)
self.ruby = ruby.Ruby(self.config)
def setup(self):
mongo_version_string = ""
try:
mongo_version_string = subprocess.check_output("mongod --version",shell=True)
mongo_version_string = mongo_version_string.decode(encoding='UTF-8')
except Exception as e:
print("Mongod not found: %s"%e)
if "v2.6." not in mongo_version_string:
if mongo_version_string:
print("Had MongoDB version: %s"%mongo_version_string)
print("MongoDB not found, so installing a local copy...")
self.mongo.download_dependencies()
self.mongo.install_dependencies()
self.node.download_dependencies()
self.node.install_dependencies()
#self.repoCloner.cloneRepository()
self.repoCloner.install_node_packages()
self.ruby.install_gems()
print ("Doing initial bower install...")
bower_path = self.config.directory.root_dir + os.sep + "coco" + os.sep + "node_modules" + os.sep + ".bin" + os.sep + "bower"
subprocess.call(bower_path + " --allow-root install",shell=True,cwd=self.config.directory.root_dir + os.sep + "coco")
print("Removing temporary directories")
self.config.directory.remove_tmp_directory()
print("Changing permissions of files...")
#TODO: Make this more robust and portable(doesn't pose security risk though)
subprocess.call("chmod -R 755 " + self.config.directory.root_dir + os.sep + "coco" + os.sep + "bin",shell=True)
chown_command = "chown -R " + os.getenv("SUDO_USER") + " bower_components"
chown_directory = self.config.directory.root_dir + os.sep + "coco"
subprocess.call(chown_command,shell=True,cwd=chown_directory)
print("")
print("Done! If you want to start the server, head into coco/bin and run ")
print("1. ./coco-mongodb")
print("2. ./coco-brunch ")
print("3. ./coco-dev-server")
print("NOTE: brunch may need to be run as sudo if it doesn't work (ulimit needs to be set higher than default)")
print("")
print("Before can play any levels you must update the database. See the Setup section here:")
print("https://github.com/codecombat/codecombat/wiki/Dev-Setup:-Linux#installing-the-database")
print("")
print("Go to http://localhost:3000 to see your local CodeCombat in action!")
def cleanup(self):
self.config.directory.remove_tmp_directory()
class MacSetup(SetupFactory):
def setup(self):
super(self.__class__, self).setup()
class WinSetup(SetupFactory):
def setup(self):
super(self.__class__, self).setup()
class LinuxSetup(SetupFactory):
def setup(self):
self.distroSetup()
super(self.__class__, self).setup()
def detectDistro(self):
distro_checks = {
"arch": "/etc/arch-release",
"ubuntu": "/etc/lsb-release"
}
for distro, path in distro_checks.items():
if os.path.exists(path):
return(distro)
def distroSetup(self):
distro = self.detectDistro()
if distro == "ubuntu":
print("Ubuntu installation detected. Would you like to install \n"
"NodeJS and MongoDB via apt-get? [y/N]")
if raw_input().lower() in ["y", "yes"]:
print("Adding repositories for MongoDB and NodeJS...")
try:
subprocess.check_call(["apt-key", "adv",
"--keyserver",
"hkp://keyserver.ubuntu.com:80",
"--recv", "7F0CEB10"])
subprocess.check_call(["add-apt-repository",
"deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen"])
subprocess.check_call("curl -sL "
"https://deb.nodesource.com/setup"
" | bash", shell=True)
subprocess.check_call(["apt-get", "update"])
except subprocess.CalledProcessError as err:
print("Adding repositories failed. Retry, Install without"
"adding \nrepositories, Skip apt-get installation, "
"or Abort? [r/i/s/A]")
answer = raw_input().lower()
if answer in ["r", "retry"]:
return(self.distroSetup())
elif answer in ["i", "install"]:
pass
elif answer in ["s", "skip"]:
return()
else:
exit(1)
else:
try:
print("Repositories added successfully. Installing NodeJS and MongoDB.")
subprocess.check_call(["apt-get", "install",
"nodejs", "mongodb-org",
"build-essential", "-y"])
except subprocess.CalledProcessError as err:
print("Installation via apt-get failed. \nContinue "
"with manual installation, or Abort? [c/A]")
if raw_input().lower() in ["c", "continue"]:
return()
else:
exit(1)
else:
print("NodeJS and MongoDB installed successfully. "
"Starting MongoDB.")
#try:
#subprocess.check_call(["service", "mongod", "start"])
#except subprocess.CalledProcessError as err:
#print("Mongo failed to start. Aborting.")
#exit(1)
if distro == "arch":
print("Arch Linux detected. Would you like to install \n"
"NodeJS and MongoDB via pacman? [y/N]")
if raw_input().lower() in ["y", "yes"]:
try:
subprocess.check_call(["pacman", "-S",
"nodejs", "mongodb",
"--noconfirm"])
except subprocess.CalledProcessError as err:
print("Installation failed. Retry, Continue, or "
"Abort? [r/c/A]")
answer = raw_input().lower()
if answer in ["r", "retry"]:
return(self.distroSetup())
elif answer in ["c", "continue"]:
return()
else:
exit(1)
| [] | [] | [
"SUDO_USER"
] | [] | ["SUDO_USER"] | python | 1 | 0 | |
CellProfiler-Analyst.py | # =============================================================================
#
# Main file for CellProfiler-Analyst
#
# =============================================================================
import sys
import os
import os.path
import logging
from cpa.dimensionreduction import DimensionReduction
from cpa.guiutils import create_status_bar
from cpa.util.version import display_version
from cpa.properties import Properties
from cpa.dbconnect import DBConnect
from cpa.classifier import Classifier
from cpa.tableviewer import TableViewer
from cpa.plateviewer import PlateViewer
from cpa.imageviewer import ImageViewer
from cpa.imagegallery import ImageGallery
from cpa.boxplot import BoxPlot
from cpa.scatter import Scatter
from cpa.histogram import Histogram
from cpa.density import Density
from cpa.querymaker import QueryMaker
from cpa.normalizationtool import NormalizationUI
class FuncLog(logging.Handler):
'''A logging handler that sends logs to an update function.
'''
def __init__(self, update):
logging.Handler.__init__(self)
self.update = update
def emit(self, record):
self.update(self.format(record))
logging.basicConfig(level=logging.DEBUG)
# Handles args to MacOS "Apps"
if len(sys.argv) > 1 and sys.argv[1].startswith('-psn'):
del sys.argv[1]
if len(sys.argv) > 1:
# Load a properties file if passed in args
p = Properties()
if sys.argv[1] == '--incell':
# GE Incell xml wrapper
# LOOP
p.LoadIncellFiles(sys.argv[2], sys.argv[3], sys.argv[4:])
else:
p.LoadFile(sys.argv[1])
from cpa.cpatool import CPATool
import inspect
import cpa.multiclasssql
# ---
import wx
ID_CLASSIFIER = wx.NewIdRef()
ID_IMAGE_GALLERY = wx.NewIdRef()
ID_PLATE_VIEWER = wx.NewIdRef()
ID_TABLE_VIEWER = wx.NewIdRef()
ID_IMAGE_VIEWER = wx.NewIdRef()
ID_SCATTER = wx.NewIdRef()
ID_HISTOGRAM = wx.NewIdRef()
ID_DENSITY = wx.NewIdRef()
ID_BOXPLOT = wx.NewIdRef()
ID_DIMENSREDUX = wx.NewIdRef()
ID_NORMALIZE = wx.NewIdRef()
def get_cpatool_subclasses():
'''returns a list of CPATool subclasses.
'''
class_objs = inspect.getmembers(sys.modules[__name__], inspect.isclass)
return [klass for name, klass in class_objs
if issubclass(klass, CPATool) and klass!=CPATool]
class MainGUI(wx.Frame):
'''Main GUI frame for CellProfiler Analyst
'''
def __init__(self, properties, parent, id=-1, **kwargs):
from cpa.icons import get_icon, get_cpa_icon
wx.Frame.__init__(self, parent, id=id, title='CellProfiler Analyst %s'%(display_version), **kwargs)
self.properties = properties
self.SetIcon(get_cpa_icon())
self.tbicon = None
self.SetName('CPA')
self.Center(wx.HORIZONTAL)
self.status_bar = create_status_bar(self)
self.log_io = True
#
# Setup toolbar
#
tb = self.CreateToolBar(wx.TB_HORZ_TEXT|wx.TB_FLAT)
tb.SetToolBitmapSize((32,32))
tb.SetSize((-1,132))
tb.AddTool(ID_IMAGE_GALLERY.GetId(), 'Image Gallery', get_icon("image_gallery").ConvertToBitmap(), shortHelp='Image Gallery')
tb.AddTool(ID_CLASSIFIER.GetId(), 'Classifier', get_icon("classifier").ConvertToBitmap(), shortHelp='Classifier')
# tb.AddLabelTool(ID_CLASSIFIER, 'PixelClassifier', get_icon("pixelclassifier").ConvertToBitmap(), shortHelp='Pixel-based Classifier', longHelp='Launch pixel-based Classifier')
tb.AddTool(ID_PLATE_VIEWER.GetId(), 'Plate Viewer', get_icon("platemapbrowser").ConvertToBitmap(), shortHelp='Plate Viewer')
# tb.AddLabelTool(ID_IMAGE_VIEWER, 'ImageViewer', get_icon("image_viewer").ConvertToBitmap(), shortHelp='Image Viewer', longHelp='Launch ImageViewer')
tb.AddTool(ID_SCATTER.GetId(), 'Scatter Plot', get_icon("scatter").ConvertToBitmap(), shortHelp='Scatter Plot')
tb.AddTool(ID_HISTOGRAM.GetId(), 'Histogram', get_icon("histogram").ConvertToBitmap(), shortHelp='Histogram')
tb.AddTool(ID_DENSITY.GetId(), 'Density Plot', get_icon("density").ConvertToBitmap(), shortHelp='Density Plot')
tb.AddTool(ID_BOXPLOT.GetId(), 'Box Plot', get_icon("boxplot").ConvertToBitmap(), shortHelp='Box Plot')
tb.AddTool(ID_DIMENSREDUX.GetId(), 'Reduction', get_icon("dimensionality").ConvertToBitmap(), shortHelp='Dimensionality Reduction')
tb.AddTool(ID_TABLE_VIEWER.GetId(), 'Table Viewer', get_icon("data_grid").ConvertToBitmap(), shortHelp='Table Viewer')
# tb.AddLabelTool(ID_NORMALIZE, 'Normalize', get_icon("normalize").ConvertToBitmap(), shortHelp='Normalization Tool', longHelp='Launch Feature Normalization Tool')
tb.Realize()
#
# Setup menu items
#
self.SetMenuBar(wx.MenuBar())
fileMenu = wx.Menu()
loadPropertiesMenuItem = fileMenu.Append(-1, 'Load properties file', helpString='Load a properties file.')
savePropertiesMenuItem = fileMenu.Append(-1, 'Save properties\tCtrl+S', helpString='Save the properties.')
## loadWorkspaceMenuItem = fileMenu.Append(-1, 'Load properties\tCtrl+O', helpString='Open another properties file.')
fileMenu.AppendSeparator()
saveWorkspaceMenuItem = fileMenu.Append(-1, 'Save workspace\tCtrl+Shift+S', helpString='Save the currently open plots and settings.')
loadWorkspaceMenuItem = fileMenu.Append(-1, 'Load workspace\tCtrl+Shift+O', helpString='Open plots saved in a previous workspace.')
fileMenu.AppendSeparator()
saveLogMenuItem = fileMenu.Append(-1, 'Save log', helpString='Save the contents of the log window.')
fileMenu.AppendSeparator()
self.exitMenuItem = fileMenu.Append(wx.ID_EXIT, 'Exit\tCtrl+Q', helpString='Exit classifier')
self.GetMenuBar().Append(fileMenu, 'File')
toolsMenu = wx.Menu()
imageGalleryMenuItem = toolsMenu.Append(ID_IMAGE_GALLERY, 'Image Gallery Viewer\tCtrl+Shift+I', helpString='Launches the Image Gallery')
classifierMenuItem = toolsMenu.Append(ID_CLASSIFIER, 'Classifier\tCtrl+Shift+C', helpString='Launches Classifier.')
plateMapMenuItem = toolsMenu.Append(ID_PLATE_VIEWER, 'Plate Viewer\tCtrl+Shift+P', helpString='Launches the Plate Viewer tool.')
#imageViewerMenuItem = toolsMenu.Append(ID_IMAGE_VIEWER, 'Image Viewer\tCtrl+Shift+I', helpString='Launches the ImageViewer tool.')
scatterMenuItem = toolsMenu.Append(ID_SCATTER, 'Scatter Plot\tCtrl+Shift+A', helpString='Launches the Scatter Plot tool.')
histogramMenuItem = toolsMenu.Append(ID_HISTOGRAM, 'Histogram Plot\tCtrl+Shift+H', helpString='Launches the Histogram Plot tool.')
densityMenuItem = toolsMenu.Append(ID_DENSITY, 'Density Plot\tCtrl+Shift+D', helpString='Launches the Density Plot tool.')
boxplotMenuItem = toolsMenu.Append(ID_BOXPLOT, 'Box Plot\tCtrl+Shift+B', helpString='Launches the Box Plot tool.')
dimensionMenuItem = toolsMenu.Append(ID_DIMENSREDUX, 'Dimensionality Reduction', helpString='Launches the Dimensionality Reduction tool.')
dataTableMenuItem = toolsMenu.Append(ID_TABLE_VIEWER, 'Table Viewer\tCtrl+Shift+T', helpString='Launches the Table Viewer tool.')
normalizeMenuItem = toolsMenu.Append(ID_NORMALIZE, 'Normalization Tool\tCtrl+Shift+T', helpString='Launches a tool for generating normalized values for measurement columns in your tables.')
self.GetMenuBar().Append(toolsMenu, 'Tools')
logMenu = wx.Menu()
debugMenuItem = logMenu.AppendRadioItem(-1, 'Debug\tCtrl+1', help='Logging window will display debug-level messages.')
infoMenuItem = logMenu.AppendRadioItem(-1, 'Info\tCtrl+2', help='Logging window will display info-level messages.')
warnMenuItem = logMenu.AppendRadioItem(-1, 'Warnings\tCtrl+3', help='Logging window will display warning-level messages.')
errorMenuItem = logMenu.AppendRadioItem(-1, 'Errors\tCtrl+4', help='Logging window will display error-level messages.')
criticalMenuItem = logMenu.AppendRadioItem(-1, 'Critical\tCtrl+5', help='Logging window will only display critical messages.')
logioItem = logMenu.AppendCheckItem(-1, item='Log image loading', help='Log image loader events')
infoMenuItem.Check()
logioItem.Check()
self.GetMenuBar().Append(logMenu, 'Logging')
advancedMenu = wx.Menu()
#normalizeMenuItem = advancedMenu.Append(-1, 'Launch feature normalization tool', helpString='Launches a tool for generating normalized values for measurement columns in your tables.')
queryMenuItem = advancedMenu.Append(-1, 'Launch SQL query tool', helpString='Opens a tool for making SQL queries to the CPA database. Advanced users only.')
clearTableLinksMenuItem = advancedMenu.Append(-1, 'Clear table linking information', helpString='Removes the tables from your database that tell CPA how to link your tables.')
self.GetMenuBar().Append(advancedMenu, 'Advanced')
import cpa.helpmenu
self.GetMenuBar().Append(cpa.helpmenu.make_help_menu(self, main=True), 'Help')
# console and logging
self.console = wx.TextCtrl(self, -1, '', style=wx.TE_MULTILINE|wx.TE_READONLY|wx.TE_RICH2)
self.console.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
# Black background and white font
self.console.SetDefaultStyle(wx.TextAttr(wx.WHITE,wx.BLACK))
self.console.SetBackgroundColour('#000000')
log_level = logging.INFO # INFO is the default log level
self.logr = logging.getLogger()
self.set_log_level(log_level)
self.log_text = ""
def update(x):
self.log_text += x+'\n'
hdlr = FuncLog(update)
# hdlr.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
# hdlr.setFormatter(logging.Formatter('%(levelname)s | %(name)s | %(message)s [@ %(asctime)s in %(filename)s:%(lineno)d]'))
self.logr.addHandler(hdlr)
# log_levels are 10,20,30,40,50
logMenu.GetMenuItems()[(log_level//10)-1].Check()
self.Bind(wx.EVT_MENU, lambda _:self.set_log_level(logging.DEBUG), debugMenuItem)
self.Bind(wx.EVT_MENU, lambda _:self.set_log_level(logging.INFO), infoMenuItem)
self.Bind(wx.EVT_MENU, lambda _:self.set_log_level(logging.WARN), warnMenuItem)
self.Bind(wx.EVT_MENU, lambda _:self.set_log_level(logging.ERROR), errorMenuItem)
self.Bind(wx.EVT_MENU, lambda _:self.set_log_level(logging.CRITICAL), criticalMenuItem)
self.Bind(wx.EVT_MENU, self.on_toggle_iologging, logioItem)
self.Bind(wx.EVT_MENU, self.on_load_properties, loadPropertiesMenuItem)
self.Bind(wx.EVT_MENU, self.on_save_properties, savePropertiesMenuItem)
self.Bind(wx.EVT_MENU, self.on_save_workspace, saveWorkspaceMenuItem)
self.Bind(wx.EVT_MENU, self.on_load_workspace, loadWorkspaceMenuItem)
self.Bind(wx.EVT_MENU, self.save_log, saveLogMenuItem)
self.Bind(wx.EVT_MENU, self.launch_normalization_tool, normalizeMenuItem)
self.Bind(wx.EVT_MENU, self.clear_link_tables, clearTableLinksMenuItem)
self.Bind(wx.EVT_MENU, self.launch_query_maker, queryMenuItem)
self.Bind(wx.EVT_TOOL, self.launch_classifier, id=ID_CLASSIFIER)
self.Bind(wx.EVT_TOOL, self.launch_plate_map_browser, id=ID_PLATE_VIEWER)
self.Bind(wx.EVT_TOOL, self.launch_table_viewer, id=ID_TABLE_VIEWER)
self.Bind(wx.EVT_TOOL, self.launch_image_viewer, id=ID_IMAGE_VIEWER)
self.Bind(wx.EVT_TOOL, self.launch_image_gallery, id=ID_IMAGE_GALLERY)
self.Bind(wx.EVT_TOOL, self.launch_scatter_plot, id=ID_SCATTER)
self.Bind(wx.EVT_TOOL, self.launch_histogram_plot, id=ID_HISTOGRAM)
self.Bind(wx.EVT_TOOL, self.launch_density_plot, id=ID_DENSITY)
self.Bind(wx.EVT_TOOL, self.launch_box_plot, id=ID_BOXPLOT)
self.Bind(wx.EVT_TOOL, self.launch_dimens_redux, id=ID_DIMENSREDUX)
self.Bind(wx.EVT_TOOL, self.launch_normalization_tool, id=ID_NORMALIZE)
self.Bind(wx.EVT_MENU, self.on_close, self.exitMenuItem)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.Bind(wx.EVT_IDLE, self.on_idle)
def confirm_properties(self):
if self.properties.is_initialized():
return True
else:
dlg = wx.MessageDialog(self, 'A valid properties file must be loaded to use this functionality',
'Properties file needed', wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
return False
def launch_classifier(self, evt=None):
if self.confirm_properties():
classifier = Classifier(parent=self, properties=self.properties)
classifier.Show(True)
def launch_plate_map_browser(self, evt=None):
if self.confirm_properties():
self.pv = PlateViewer(parent=self)
self.pv.Show(True)
def launch_table_viewer(self, evt=None):
if self.confirm_properties():
table = TableViewer(parent=self)
table.new_blank_table(100,10)
table.Show(True)
def launch_scatter_plot(self, evt=None):
if self.confirm_properties():
scatter = Scatter(parent=self)
scatter.Show(True)
def launch_histogram_plot(self, evt=None):
if self.confirm_properties():
hist = Histogram(parent=self)
hist.Show(True)
def launch_density_plot(self, evt=None):
if self.confirm_properties():
density = Density(parent=self)
density.Show(True)
def launch_image_viewer(self, evt=None):
if self.confirm_properties():
imviewer = ImageViewer(parent=self)
imviewer.Show(True)
def launch_image_gallery(self, evt=None):
if self.confirm_properties():
colViewer = ImageGallery(parent=self, properties=self.properties)
colViewer.Show(True)
def launch_box_plot(self, evt=None):
if self.confirm_properties():
boxplot = BoxPlot(parent=self)
boxplot.Show(True)
def launch_dimens_redux(self, evt=None):
if self.confirm_properties():
dimens = DimensionReduction(parent=self)
dimens.Show(True)
def launch_query_maker(self, evt=None):
if self.confirm_properties():
querymaker = QueryMaker(parent=self)
querymaker.Show(True)
def launch_normalization_tool(self, evt=None):
if self.confirm_properties():
normtool = NormalizationUI(parent=self)
normtool.Show(True)
def on_toggle_iologging(self, evt):
if evt.IsChecked():
self.log_io = True
else:
self.log_io = False
def on_load_properties(self, evt):
# Show confirmation dialog.
if self.properties.is_initialized():
dlg = wx.MessageDialog(None, "Loading a new file will close all windows and clear unsaved data. Proceed?", 'Load properties file', wx.OK|wx.CANCEL)
response = dlg.ShowModal()
if response != wx.ID_OK:
return
# Close all subwindows
for window in self.GetChildren():
if isinstance(window, wx.Frame):
window.Destroy()
# Shut down existing connections and wipe properties.
db = DBConnect()
db.Disconnect()
p = Properties()
p.clear()
from cpa.datamodel import DataModel
DataModel.forget()
from cpa.trainingset import CellCache
CellCache.forget()
self.console.Clear()
if not p.is_initialized():
from cpa.guiutils import show_load_dialog
if not show_load_dialog():
example_link_address = 'cellprofileranalyst.org'
dlg = wx.MessageDialog(None, 'CellProfiler Analyst requires a properties file. Download an example at %s' % (
example_link_address), 'Properties file required', wx.OK)
response = dlg.ShowModal()
logging.error('CellProfiler Analyst requires a properties file.')
else:
db.connect()
db.register_gui_parent(self)
def on_save_properties(self, evt):
if not self.confirm_properties():
return
p = Properties()
dirname, filename = os.path.split(p._filename)
ext = os.path.splitext(p._filename)[-1]
dlg = wx.FileDialog(self, message="Save properties as...", defaultDir=dirname,
defaultFile=filename, wildcard=ext,
style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT|wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
p.save_file(dlg.GetPath())
def on_save_workspace(self, evt):
if not self.confirm_properties():
return
p = Properties()
dlg = wx.FileDialog(self, message="Save workspace as...", defaultDir=os.getcwd(),
defaultFile='%s_%s.workspace'%(os.path.splitext(os.path.split(p._filename)[1])[0], p.image_table),
style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT|wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
wx.GetApp().save_workspace(dlg.GetPath())
def on_load_workspace(self, evt):
if not self.confirm_properties():
return
dlg = wx.FileDialog(self, "Select the file containing your CPAnalyst workspace...", wildcard="Workspace file (*.workspace)|*.workspace",
defaultDir=os.getcwd(), style=wx.FD_OPEN|wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
wx.GetApp().load_workspace(dlg.GetPath())
def save_log(self, evt=None):
dlg = wx.FileDialog(self, message="Save log as...", defaultDir=os.getcwd(),
defaultFile='CPA_log.txt', wildcard='txt',
style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT|wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
f = open(filename, 'w')
f.write(self.console.Value)
logging.info('Log saved to "%s"'%filename)
def set_log_level(self, level):
self.logr.setLevel(level)
# cheat the logger so these always get displayed
self.console.AppendText('Logging level: %s\n'%(logging.getLevelName(level)))
def clear_link_tables(self, evt=None):
if not self.confirm_properties():
return
p = Properties()
dlg = wx.MessageDialog(self, 'This will delete the tables '
'"%s" and "%s" from your database. '
'CPA will automatically recreate these tables as it '
'discovers how your database is linked. Are you sure you '
'want to proceed?'
%(p.link_tables_table, p.link_columns_table),
'Clear table linking information?',
wx.YES_NO|wx.NO_DEFAULT|wx.ICON_QUESTION)
response = dlg.ShowModal()
if response != wx.ID_YES:
return
db = DBConnect()
db.execute('DROP TABLE IF EXISTS %s'%(p.link_tables_table))
db.execute('DROP TABLE IF EXISTS %s'%(p.link_columns_table))
db.Commit()
def on_close(self, evt=None):
# Classifier needs to be told to close so it can clean up it's threads
classifier = wx.FindWindowById(ID_CLASSIFIER) or wx.FindWindowByName('Classifier')
if classifier and classifier.Close() == False:
return
if any(wx.GetApp().get_plots()):
dlg = wx.MessageDialog(self, 'Some tools are open, are you sure you want to quit CPA?', 'Quit CellProfiler Analyst?', wx.YES_NO|wx.NO_DEFAULT|wx.ICON_QUESTION)
response = dlg.ShowModal()
if response != wx.ID_YES:
return
try:
logging.debug("Shutting of Java VM")
import javabridge
if javabridge.get_env() is not None:
javabridge.kill_vm()
except:
logging.debug("Failed to kill the Java VM")
# Blow up EVVVVERYTHIIINGGG!!! Muahahahahhahahah!
for win in wx.GetTopLevelWindows():
logging.debug('Destroying: %s'%(win))
win.Destroy()
if self.tbicon is not None:
self.tbicon.Destroy()
self.Destroy()
def on_idle(self, evt=None):
if self.log_text != '':
self.console.AppendText(self.log_text)
self.log_text = ''
class CPAnalyst(wx.App):
'''The CPAnalyst application.
This launches the main UI, and keeps track of the session.
'''
def Start(self):
'''Initialize CPA
'''
if hasattr(sys, "frozen") and sys.platform == "darwin":
# Some versions of Macos like to put CPA in a sandbox. If we're frozen Java should be packed in,
# so let's just figure out the directory on run time.
os.environ["CP_JAVA_HOME"] = os.path.abspath(os.path.join(sys.prefix, "..", "Resources/Home"))
elif sys.platform == "win32":
# Windows gets very upset if system locale and wxpython locale don't match exactly.
import locale
# Need to startup wx in English, otherwise C++ can't load images.
self.locale = wx.Locale(wx.LANGUAGE_ENGLISH)
# Ensure Python uses the same locale as wx
try:
locale.setlocale(locale.LC_ALL, self.locale.GetName())
except:
logging.info(f"Python rejected the system locale detected by WX ('{self.locale.GetName()}').\n"
"This shouldn't cause problems, but please let us know if you encounter errors.")
p = Properties()
self.frame = MainGUI(p, None, size=(1000,-1))
self.frame.Show() # Show frame
if not p.is_initialized():
from cpa.guiutils import show_load_dialog
try:
if not show_load_dialog():
example_link_address = 'cellprofileranalyst.org'
dlg = wx.MessageDialog(None, 'CellProfiler Analyst requires a properties file. Download an example at %s' % (
example_link_address), 'Properties file required', wx.OK)
response = dlg.ShowModal()
logging.error('CellProfiler Analyst requires a properties file.')
return False
else:
db = DBConnect()
db.connect()
db.register_gui_parent(self.frame)
except Exception as e:
p._initialized = False
# Fully raising the exception during this startup sequence will crash CPA.
# So we'll display it manually.
show_exception_as_dialog(type(e), e, e.__traceback__, raisefurther=False)
logging.critical(e)
try:
from cpa.updatechecker import check_update
check_update(self.frame, event=False)
except:
logging.warn("CPA was unable to check for updates.")
return True
def get_plots(self):
'''return a list of all plots'''
return [win for win in self.frame.Children if issubclass(type(win), CPATool)]
def get_plot(self, name):
'''return the plot with the given name'''
return wx.FindWindowByName(name)
def load_workspace(self, filepath, close_open_plots=False):
'''Loads a CPA workspace file and uses it to restore all plots, gates,
and filters that were saved.
'''
logging.info('loading workspace...')
f = open(filepath, 'U')
lines = f.read()
lines = lines.split('\n')
lines = lines [4:] # first 4 lines are header information
settings = {}
pos = 20
for i in range(len(lines)):
for cpatool in get_cpatool_subclasses():
if lines[i] == cpatool.__name__:
logging.info('opening plot "%s"'%(lines[i]))
while lines[i+1].startswith('\t'):
i += 1
setting, value = list(map(str.strip, lines[i].split(':', 1)))
settings[setting] = value
pos += 15
plot = cpatool(parent=self.frame, pos=(pos,pos))
plot.Show(True)
plot.load_settings(settings)
logging.info('...done loading workspace')
def save_workspace(self, filepath):
'''Saves the current CPA workspace. This includes the current settings
of all open tools along with any gates and filters that have been created.
'''
f = open(filepath, 'w')
f.write('CellProfiler Analyst workflow\n')
f.write('version: 1\n')
f.write('CPA version: %s\n'%(display_version))
f.write('\n')
for plot in self.get_plots():
f.write('%s\n'%(plot.tool_name))
for setting, value in list(plot.save_settings().items()):
assert ':' not in setting
f.write('\t%s : %s\n'%(setting, str(value)))
f.write('\n')
f.close()
if __name__ == "__main__":
# Initialize the app early because the fancy exception handler
# depends on it in order to show a
app = CPAnalyst(redirect=False)
# Install our own pretty exception handler unless one has already
# been installed (e.g., a debugger)
if sys.excepthook == sys.__excepthook__:
from cpa.errors import show_exception_as_dialog
sys.excepthook = show_exception_as_dialog
app.Start()
app.MainLoop()
os._exit(0) # Enforces Exit, see issue #102
| [] | [] | [
"CP_JAVA_HOME"
] | [] | ["CP_JAVA_HOME"] | python | 1 | 0 | |
xcnet-common/src/main/java/org/itas/xcnet/common/monitor/MonitorServiceImpl.java | /*
* Copyright 2014-2015 itas group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.itas.xcnet.common.monitor;
import java.io.InputStreamReader;
import java.io.LineNumberReader;
import java.lang.management.ManagementFactory;
import com.sun.management.OperatingSystemMXBean;
/**
* 获取系统信息的业务逻辑实现.
*
* @author liuzhen<[email protected]>
* @createTime 2015年5月22日下午4:39:25
*/
public class MonitorServiceImpl implements IMonitorService {
//可以设置长些,防止读到运行此次系统检查时的cpu占用率,就不准了
private static final int CPUTIME = 5000;
private static final int PERCENT = 100;
private static final int FAULTLENGTH = 10;
/** *//**
* 获得当前的监控对象.
* @return 返回构造好的监控对象
* @throws Exception
* @author amg * Creation date: 2008-4-25 - 上午10:45:08
*/
public MonitorInfoBean getMonitorInfoBean() throws Exception {
int kb = 1024;
// 可使用内存
long totalMemory = Runtime.getRuntime().totalMemory() / kb;
// 剩余内存
long freeMemory = Runtime.getRuntime().freeMemory() / kb;
// 最大可使用内存
long maxMemory = Runtime.getRuntime().maxMemory() / kb;
OperatingSystemMXBean osmxb = (OperatingSystemMXBean) ManagementFactory
.getOperatingSystemMXBean();
// 操作系统
String osName = System.getProperty("os.name");
// 总的物理内存
long totalMemorySize = osmxb.getTotalPhysicalMemorySize() / kb;
// 剩余的物理内存
long freePhysicalMemorySize = osmxb.getFreePhysicalMemorySize() / kb;
// 已使用的物理内存
long usedMemory = (osmxb.getTotalPhysicalMemorySize() - osmxb
.getFreePhysicalMemorySize())
/ kb;
// 获得线程总数
ThreadGroup parentThread;
for (parentThread = Thread.currentThread().getThreadGroup(); parentThread
.getParent() != null; parentThread = parentThread.getParent())
;
int totalThread = parentThread.activeCount();
double cpuRatio = 0;
if (osName.toLowerCase().startsWith("windows")) {
cpuRatio = this.getCpuRatioForWindows();
}
// 构造返回对象
MonitorInfoBean infoBean = new MonitorInfoBean();
infoBean.setFreeMemory(freeMemory);
infoBean.setFreePhysicalMemorySize(freePhysicalMemorySize);
infoBean.setMaxMemory(maxMemory);
infoBean.setOsName(osName);
infoBean.setTotalMemory(totalMemory);
infoBean.setTotalMemorySize(totalMemorySize);
infoBean.setTotalThread(totalThread);
infoBean.setUsedMemory(usedMemory);
infoBean.setCpuRatio(cpuRatio);
return infoBean;
}
/** *//**
* 获得CPU使用率.
* @return 返回cpu使用率
* @author amg * Creation date: 2008-4-25 - 下午06:05:11
*/
private double getCpuRatioForWindows() {
try {
String procCmd = System.getenv("windir")
+ "//system32//wbem//wmic.exe process get Caption,CommandLine,"
+ "KernelModeTime,ReadOperationCount,ThreadCount,UserModeTime,WriteOperationCount";
// 取进程信息
long[] c0 = readCpu(Runtime.getRuntime().exec(procCmd));
Thread.sleep(CPUTIME);
long[] c1 = readCpu(Runtime.getRuntime().exec(procCmd));
if (c0 != null && c1 != null) {
long idletime = c1[0] - c0[0];
long busytime = c1[1] - c0[1];
return Double.valueOf(
PERCENT * (busytime) / (busytime + idletime))
.doubleValue();
} else {
return 0.0;
}
} catch (Exception ex) {
ex.printStackTrace();
return 0.0;
}
}
/** *//**
* 读取CPU信息.
* @param proc
* @return
* @author amg * Creation date: 2008-4-25 - 下午06:10:14
*/
private long[] readCpu(final Process proc) {
long[] retn = new long[2];
try {
proc.getOutputStream().close();
InputStreamReader ir = new InputStreamReader(proc.getInputStream());
LineNumberReader input = new LineNumberReader(ir);
String line = input.readLine();
if (line == null || line.length() < FAULTLENGTH) {
return null;
}
int capidx = line.indexOf("Caption");
int cmdidx = line.indexOf("CommandLine");
int rocidx = line.indexOf("ReadOperationCount");
int umtidx = line.indexOf("UserModeTime");
int kmtidx = line.indexOf("KernelModeTime");
int wocidx = line.indexOf("WriteOperationCount");
long idletime = 0;
long kneltime = 0;
long usertime = 0;
while ((line = input.readLine()) != null) {
if (line.length() < wocidx) {
continue;
}
// 字段出现顺序:Caption,CommandLine,KernelModeTime,ReadOperationCount,
// ThreadCount,UserModeTime,WriteOperation
String caption = substring(line, capidx, cmdidx - 1)
.trim();
String cmd = substring(line, cmdidx, kmtidx - 1).trim();
if (cmd.indexOf("wmic.exe") >= 0) {
continue;
}
// log.info("line="+line);
if (caption.equals("System Idle Process")
|| caption.equals("System")) {
idletime += Long.valueOf(
substring(line, kmtidx, rocidx - 1).trim())
.longValue();
idletime += Long.valueOf(
substring(line, umtidx, wocidx - 1).trim())
.longValue();
continue;
}
kneltime += Long.valueOf(
substring(line, kmtidx, rocidx - 1).trim())
.longValue();
usertime += Long.valueOf(
substring(line, umtidx, wocidx - 1).trim())
.longValue();
}
retn[0] = idletime;
retn[1] = kneltime + usertime;
return retn;
} catch (Exception ex) {
ex.printStackTrace();
} finally {
try {
proc.getInputStream().close();
} catch (Exception e) {
e.printStackTrace();
}
}
return null;
}
/** *//**
* 由于String.subString对汉字处理存在问题(把一个汉字视为一个字节),因此在
* 包含汉字的字符串时存在隐患,现调整如下:
* @param src 要截取的字符串
* @param start_idx 开始坐标(包括该坐标)
* @param end_idx 截止坐标(包括该坐标)
* @return
*/
public static String substring(String src, int start_idx, int end_idx){
byte[] b = src.getBytes();
String tgt = "";
for(int i=start_idx; i<=end_idx; i++){
tgt +=(char)b[i];
}
return tgt;
}
/** *//**
* 测试方法.
* @param args
* @throws Exception
* @author amg * Creation date: 2008-4-30 - 下午04:47:29
*/
public static void main(String[] args) throws Exception {
IMonitorService service = new MonitorServiceImpl();
MonitorInfoBean monitorInfo = service.getMonitorInfoBean();
System.out.println("cpu占有率=" + monitorInfo.getCpuRatio());
System.out.println("可使用内存=" + monitorInfo.getTotalMemory());
System.out.println("剩余内存=" + monitorInfo.getFreeMemory());
System.out.println("最大可使用内存=" + monitorInfo.getMaxMemory());
System.out.println("操作系统=" + monitorInfo.getOsName());
System.out.println("总的物理内存=" + monitorInfo.getTotalMemorySize() + "kb");
System.out.println("剩余的物理内存=" + monitorInfo.getFreeMemory() + "kb");
System.out.println("已使用的物理内存=" + monitorInfo.getUsedMemory() + "kb");
System.out.println("线程总数=" + monitorInfo.getTotalThread() + "kb");
}
}
| [
"\"windir\""
] | [] | [
"windir"
] | [] | ["windir"] | java | 1 | 0 | |
scons/llvm.py | """llvm
Tool-specific initialization for LLVM
"""
#
# Copyright (c) 2009 VMware, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import os.path
import re
import sys
import distutils.version
import SCons.Errors
import SCons.Util
required_llvm_version = '3.3'
def generate(env):
env['llvm'] = False
try:
llvm_dir = os.environ['LLVM']
except KeyError:
# Do nothing -- use the system headers/libs
llvm_dir = None
else:
if not os.path.isdir(llvm_dir):
raise SCons.Errors.InternalError("Specified LLVM directory not found")
if env['debug']:
llvm_subdir = 'Debug'
else:
llvm_subdir = 'Release'
llvm_bin_dir = os.path.join(llvm_dir, llvm_subdir, 'bin')
if not os.path.isdir(llvm_bin_dir):
llvm_bin_dir = os.path.join(llvm_dir, 'bin')
if not os.path.isdir(llvm_bin_dir):
raise SCons.Errors.InternalError("LLVM binary directory not found")
env.PrependENVPath('PATH', llvm_bin_dir)
if env['platform'] == 'windows':
# XXX: There is no llvm-config on Windows, so assume a standard layout
if llvm_dir is None:
print('scons: LLVM environment variable must be specified when building for windows')
return
# Try to determine the LLVM version from llvm/Config/config.h
llvm_config = os.path.join(llvm_dir, 'include/llvm/Config/llvm-config.h')
if not os.path.exists(llvm_config):
print('scons: could not find %s' % llvm_config)
return
llvm_version_major_re = re.compile(r'^#define LLVM_VERSION_MAJOR ([0-9]+)')
llvm_version_minor_re = re.compile(r'^#define LLVM_VERSION_MINOR ([0-9]+)')
llvm_version = None
llvm_version_major = None
llvm_version_minor = None
for line in open(llvm_config, 'rt'):
mo = llvm_version_major_re.match(line)
if mo:
llvm_version_major = mo.group(1)
mo = llvm_version_minor_re.match(line)
if mo:
llvm_version_minor = mo.group(1)
if llvm_version_major is not None and llvm_version_minor is not None:
llvm_version = distutils.version.LooseVersion('%s.%s' % (llvm_version_major, llvm_version_minor))
if llvm_version is None:
print('scons: could not determine the LLVM version from %s' % llvm_config)
return
if llvm_version < distutils.version.LooseVersion(required_llvm_version):
print('scons: LLVM version %s found, but %s is required' % (llvm_version, required_llvm_version))
return
env.Prepend(CPPPATH = [os.path.join(llvm_dir, 'include')])
env.AppendUnique(CPPDEFINES = [
'HAVE_STDINT_H',
])
env.Prepend(LIBPATH = [os.path.join(llvm_dir, 'lib')])
# LIBS should match the output of `llvm-config --libs engine mcjit bitwriter x86asmprinter irreader`
if llvm_version >= distutils.version.LooseVersion('5.0'):
env.Prepend(LIBS = [
'LLVMX86Disassembler', 'LLVMX86AsmParser',
'LLVMX86CodeGen', 'LLVMSelectionDAG', 'LLVMAsmPrinter',
'LLVMDebugInfoCodeView', 'LLVMCodeGen',
'LLVMScalarOpts', 'LLVMInstCombine',
'LLVMTransformUtils',
'LLVMBitWriter', 'LLVMX86Desc',
'LLVMMCDisassembler', 'LLVMX86Info',
'LLVMX86AsmPrinter', 'LLVMX86Utils',
'LLVMMCJIT', 'LLVMExecutionEngine', 'LLVMTarget',
'LLVMAnalysis', 'LLVMProfileData',
'LLVMRuntimeDyld', 'LLVMObject', 'LLVMMCParser',
'LLVMBitReader', 'LLVMMC', 'LLVMCore',
'LLVMSupport',
'LLVMIRReader', 'LLVMAsmParser',
'LLVMDemangle', 'LLVMGlobalISel', 'LLVMDebugInfoMSF',
'LLVMBinaryFormat',
])
elif llvm_version >= distutils.version.LooseVersion('4.0'):
env.Prepend(LIBS = [
'LLVMX86Disassembler', 'LLVMX86AsmParser',
'LLVMX86CodeGen', 'LLVMSelectionDAG', 'LLVMAsmPrinter',
'LLVMDebugInfoCodeView', 'LLVMCodeGen',
'LLVMScalarOpts', 'LLVMInstCombine',
'LLVMTransformUtils',
'LLVMBitWriter', 'LLVMX86Desc',
'LLVMMCDisassembler', 'LLVMX86Info',
'LLVMX86AsmPrinter', 'LLVMX86Utils',
'LLVMMCJIT', 'LLVMExecutionEngine', 'LLVMTarget',
'LLVMAnalysis', 'LLVMProfileData',
'LLVMRuntimeDyld', 'LLVMObject', 'LLVMMCParser',
'LLVMBitReader', 'LLVMMC', 'LLVMCore',
'LLVMSupport',
'LLVMIRReader', 'LLVMAsmParser',
'LLVMDemangle', 'LLVMGlobalISel', 'LLVMDebugInfoMSF',
])
elif llvm_version >= distutils.version.LooseVersion('3.9'):
env.Prepend(LIBS = [
'LLVMX86Disassembler', 'LLVMX86AsmParser',
'LLVMX86CodeGen', 'LLVMSelectionDAG', 'LLVMAsmPrinter',
'LLVMDebugInfoCodeView', 'LLVMCodeGen',
'LLVMScalarOpts', 'LLVMInstCombine',
'LLVMInstrumentation', 'LLVMTransformUtils',
'LLVMBitWriter', 'LLVMX86Desc',
'LLVMMCDisassembler', 'LLVMX86Info',
'LLVMX86AsmPrinter', 'LLVMX86Utils',
'LLVMMCJIT', 'LLVMExecutionEngine', 'LLVMTarget',
'LLVMAnalysis', 'LLVMProfileData',
'LLVMRuntimeDyld', 'LLVMObject', 'LLVMMCParser',
'LLVMBitReader', 'LLVMMC', 'LLVMCore',
'LLVMSupport',
'LLVMIRReader', 'LLVMASMParser'
])
elif llvm_version >= distutils.version.LooseVersion('3.7'):
env.Prepend(LIBS = [
'LLVMBitWriter', 'LLVMX86Disassembler', 'LLVMX86AsmParser',
'LLVMX86CodeGen', 'LLVMSelectionDAG', 'LLVMAsmPrinter',
'LLVMCodeGen', 'LLVMScalarOpts', 'LLVMProfileData',
'LLVMInstCombine', 'LLVMInstrumentation', 'LLVMTransformUtils', 'LLVMipa',
'LLVMAnalysis', 'LLVMX86Desc', 'LLVMMCDisassembler',
'LLVMX86Info', 'LLVMX86AsmPrinter', 'LLVMX86Utils',
'LLVMMCJIT', 'LLVMTarget', 'LLVMExecutionEngine',
'LLVMRuntimeDyld', 'LLVMObject', 'LLVMMCParser',
'LLVMBitReader', 'LLVMMC', 'LLVMCore', 'LLVMSupport'
])
elif llvm_version >= distutils.version.LooseVersion('3.6'):
env.Prepend(LIBS = [
'LLVMBitWriter', 'LLVMX86Disassembler', 'LLVMX86AsmParser',
'LLVMX86CodeGen', 'LLVMSelectionDAG', 'LLVMAsmPrinter',
'LLVMCodeGen', 'LLVMScalarOpts', 'LLVMProfileData',
'LLVMInstCombine', 'LLVMTransformUtils', 'LLVMipa',
'LLVMAnalysis', 'LLVMX86Desc', 'LLVMMCDisassembler',
'LLVMX86Info', 'LLVMX86AsmPrinter', 'LLVMX86Utils',
'LLVMMCJIT', 'LLVMTarget', 'LLVMExecutionEngine',
'LLVMRuntimeDyld', 'LLVMObject', 'LLVMMCParser',
'LLVMBitReader', 'LLVMMC', 'LLVMCore', 'LLVMSupport'
])
elif llvm_version >= distutils.version.LooseVersion('3.5'):
env.Prepend(LIBS = [
'LLVMMCDisassembler',
'LLVMBitWriter', 'LLVMMCJIT', 'LLVMRuntimeDyld',
'LLVMX86Disassembler', 'LLVMX86AsmParser', 'LLVMX86CodeGen',
'LLVMSelectionDAG', 'LLVMAsmPrinter', 'LLVMX86Desc',
'LLVMObject', 'LLVMMCParser', 'LLVMBitReader', 'LLVMX86Info',
'LLVMX86AsmPrinter', 'LLVMX86Utils', 'LLVMJIT',
'LLVMExecutionEngine', 'LLVMCodeGen', 'LLVMScalarOpts',
'LLVMInstCombine', 'LLVMTransformUtils', 'LLVMipa',
'LLVMAnalysis', 'LLVMTarget', 'LLVMMC', 'LLVMCore',
'LLVMSupport'
])
else:
env.Prepend(LIBS = [
'LLVMMCDisassembler',
'LLVMBitWriter', 'LLVMX86Disassembler', 'LLVMX86AsmParser',
'LLVMX86CodeGen', 'LLVMX86Desc', 'LLVMSelectionDAG',
'LLVMAsmPrinter', 'LLVMMCParser', 'LLVMX86AsmPrinter',
'LLVMX86Utils', 'LLVMX86Info', 'LLVMMCJIT', 'LLVMJIT',
'LLVMExecutionEngine', 'LLVMCodeGen', 'LLVMScalarOpts',
'LLVMInstCombine', 'LLVMTransformUtils', 'LLVMipa',
'LLVMAnalysis', 'LLVMTarget', 'LLVMMC', 'LLVMCore',
'LLVMSupport', 'LLVMRuntimeDyld', 'LLVMObject'
])
env.Append(LIBS = [
'imagehlp',
'psapi',
'shell32',
'advapi32'
])
if env['msvc']:
# Some of the LLVM C headers use the inline keyword without
# defining it.
env.Append(CPPDEFINES = [('inline', '__inline')])
# Match some of the warning options from llvm/cmake/modules/HandleLLVMOptions.cmake
env.AppendUnique(CXXFLAGS = [
'/wd4355', # 'this' : used in base member initializer list
'/wd4624', # 'derived class' : destructor could not be generated because a base class destructor is inaccessible
])
if env['build'] in ('debug', 'checked'):
# LLVM libraries are static, build with /MT, and they
# automatically link agains LIBCMT. When we're doing a
# debug build we'll be linking against LIBCMTD, so disable
# that.
env.Append(LINKFLAGS = ['/nodefaultlib:LIBCMT'])
else:
llvm_config = os.environ.get('LLVM_CONFIG', 'llvm-config')
if not env.Detect(llvm_config):
print('scons: %s script not found' % llvm_config)
return
llvm_version = env.backtick('%s --version' % llvm_config).rstrip()
llvm_version = distutils.version.LooseVersion(llvm_version)
if llvm_version < distutils.version.LooseVersion(required_llvm_version):
print('scons: LLVM version %s found, but %s is required' % (llvm_version, required_llvm_version))
return
try:
# Treat --cppflags specially to prevent NDEBUG from disabling
# assertion failures in debug builds.
cppflags = env.ParseFlags('!%s --cppflags' % llvm_config)
try:
cppflags['CPPDEFINES'].remove('NDEBUG')
except ValueError:
pass
env.MergeFlags(cppflags)
# Match llvm --fno-rtti flag
cxxflags = env.backtick('%s --cxxflags' % llvm_config).split()
if '-fno-rtti' in cxxflags:
env.Append(CXXFLAGS = ['-fno-rtti'])
components = ['engine', 'mcjit', 'bitwriter', 'x86asmprinter', 'mcdisassembler', 'irreader']
env.ParseConfig('%s --libs ' % llvm_config + ' '.join(components))
env.ParseConfig('%s --ldflags' % llvm_config)
if llvm_version >= distutils.version.LooseVersion('3.5'):
env.ParseConfig('%s --system-libs' % llvm_config)
env.Append(CXXFLAGS = ['-std=c++11'])
except OSError:
print('scons: llvm-config version %s failed' % llvm_version)
return
assert llvm_version is not None
env['llvm'] = True
print('scons: Found LLVM version %s' % llvm_version)
env['LLVM_VERSION'] = llvm_version
# Define HAVE_LLVM macro with the major/minor version number (e.g., 0x0206 for 2.6)
llvm_version_major = int(llvm_version.version[0])
llvm_version_minor = int(llvm_version.version[1])
llvm_version_hex = '0x%02x%02x' % (llvm_version_major, llvm_version_minor)
env.Prepend(CPPDEFINES = [('HAVE_LLVM', llvm_version_hex)])
def exists(env):
return True
# vim:set ts=4 sw=4 et:
| [] | [] | [
"LLVM_CONFIG",
"LLVM"
] | [] | ["LLVM_CONFIG", "LLVM"] | python | 2 | 0 | |
Find-Words in Clipboard.py | #Pyhton 3.x
# -*- coding: UTF-8 -*-
import time
import traceback
import re
import os,sys
import win32gui
import win32con
import win32api
import win32clipboard
import win32ui
import ctypes
from tkinter import *
from tkinter import messagebox, tix as Tix
import tkinter.font as tf
import threading
from pynput import keyboard
from pynput.mouse import Listener as mouseListener
from PIL import Image,ImageTk
WindX = {}
WindX['self_folder'] = re.sub(r'\\','/',os.path.abspath(os.path.dirname(__file__)))
print("\nroot:",WindX['self_folder'])
sys.path.append(WindX['self_folder'])
os.chdir(WindX['self_folder'])
WindX['pcName'] = os.environ['COMPUTERNAME']
print("getcwd:",os.getcwd() + "\nDevice Name:",WindX['pcName'])
WindX['main'] = None
WindX['Label'] = None
WindX['Ctrl_Pressed'] = 0
WindX['Ctrl_C'] = 0
WindX['check'] = 0
WindX['stop_cheking'] = 0
WindX['TopLevel'] = None
WindX['Label_ImageTk'] = None
def CheckClipboard():
if WindX['stop_cheking']:
return
WindX['check'] +=1
WindX['Label'].configure(text='---------------- Checking: ' + str(WindX['check']) + " ----------------")
WindX['main'].update()
gs = re.split(r'x|\+', WindX['main'].geometry()) #506x152+-1418+224
WindX['main'].geometry('+'+ str(gs[2]) +'+' + str(gs[3]))
time.sleep(0.5)
text = ''
try:
win32clipboard.OpenClipboard()
text = win32clipboard.GetClipboardData(win32con.CF_UNICODETEXT)
win32clipboard.CloseClipboard()
except:
pass
matchQTY = 0
matchedTypes = {}
matchedLines = []
if len(text):
#print("\tClipboard=", text)
FindWords = []
FindWords = re.split(r'\s*\,\s*', re.sub(r'^\s+|\s+$', '', WindX['FindStringVar'].get()))
print("\tFind Words=", FindWords)
if len(FindWords):
row = 0
ft = tf.Font(family='微软雅黑',size=10) ###有很多参数
a = '1.0'
WindX['Text'].tag_add('tag_match', a)
WindX['Text'].tag_config('tag_match',foreground ='blue',background='pink',font = ft)
b = '1.0'
WindX['Text'].tag_add('tag_no_match', b)
WindX['Text'].tag_config('tag_no_match',foreground ='black',font = ft)
c = '1.0'
WindX['Text'].tag_add('tag_match_word', c)
WindX['Text'].tag_config('tag_match_word',foreground ='red',background='yellow',font = ft)
d = '1.0'
WindX['Text'].tag_add('tag_x', c)
WindX['Text'].tag_config('tag_x',foreground ='black',background='white',font = ft)
for t in re.split(r'\n', text):
row +=1
matches = []
match_points = {}
#print(t)
tcopy = t
for s in FindWords:
#['Category','Group','Business\s+Sector','Audit\s+Finding\s+Type', 'Audit\s+Type', 'Severity', 'Division', 'CAR\s+Type', 'Concern\s+Type', 'Type', 'Checklist']:
matchObj = re.match(r'.*({})'.format(s), tcopy, re.I)
if matchObj:
#print(matchObj.groups())
matches.append(matchObj.groups()[0])
if matchedTypes.__contains__(matchObj.groups()[0]):
matchedTypes[matchObj.groups()[0]] += 1
else:
matchedTypes[matchObj.groups()[0]] = 1
st = 0
while st > -1:
index = tcopy.find(matchObj.groups()[0], st)
if index > -1:
match_points[index + 1] = [matchObj.groups()[0], index, index + len(matchObj.groups()[0]) - 1]
tcopy = tcopy.replace(matchObj.groups()[0],' '*len(matchObj.groups()[0]),1)
#print(tcopy)
st = index + len(matchObj.groups()[0])
else:
st = -1
if len(matches):
matchQTY +=1 #len(matches)
#print(match_points)
st = 0
for s in sorted(match_points.keys()):
print('\t\t',s, match_points[s])
dt_b = ''
dt_a = ''
if match_points[s][1] > st:
dt_b = t[st : match_points[s][1]]
dt_a = t[match_points[s][1]: match_points[s][2]+1]
else:
dt_a = t[match_points[s][1]: match_points[s][2]+1]
#print(dt_b, '--', dt_a)
if dt_b:
b = str(row) + '.' + str(st)
#WindX['Text'].insert(b, dt_b, 'tag_no_match')
matchedLines.append([b, dt_b, 'tag_no_match'])
if dt_a:
a = str(row) + '.' + str(match_points[s][1])
#WindX['Text'].insert(a, dt_a, 'tag_match')
matchedLines.append([a, dt_a, 'tag_match'])
st = match_points[s][2] + 1
if st < len(t) -1:
dt_b = t[st : len(t)]
b = str(row) + '.' + str(st)
#WindX['Text'].insert(b, dt_b, 'tag_no_match')
matchedLines.append([b, dt_b, 'tag_no_match'])
#a = str(row) + '.0'
#WindX['Text'].insert(a, t, 'tag_match')
c = str(row) + '.' + str(len(t) + 5)
ms= " ::["+str(matchQTY)+"]:: Found ["+", ".join(matches)+"] "
#WindX['Text'].insert(c, ms, 'tag_match_word')
matchedLines.append([c, ms, 'tag_match_word'])
d = str(row) + '.' + str(len(t + ms) + 5)
#WindX['Text'].insert(d, "\n", 'tag_x')
matchedLines.append([d, "\n", 'tag_x'])
else:
b = str(row) + '.0'
#WindX['Text'].insert(b, t + "\n", 'tag_no_match')
matchedLines.append([b, t + "\n", 'tag_no_match'])
if matchQTY:
matches = []
for key in sorted(matchedTypes.keys()):
matches.append(key + " ("+ str(matchedTypes[key]) +")")
WindX['Status'].configure(text='Find Words: Found in ' + str(matchQTY) + " lines -- checked done! Found: [" + ", ".join(matches) + "]" ,bg='yellow', fg='red')
Message('Find Words: Found in ' + str(matchQTY) + " lines -- checked done! \nFound: [" + ", ".join(matches) + "]", 'yellow', 'red')
else:
WindX['Status'].configure(text='Find Words: No found -- checked done!',bg='green', fg='white')
Message('Find Words: No found -- checked done!', 'green', 'white')
if len(matchedLines):
for t in matchedLines:
WindX['Text'].insert(t[0], t[1], t[2])
WindX['main'].update()
def Message(msg, bgColor, fgColor):
if WindX['TopLevel']:
WindX['TopLevel'].destroy()
WindX['TopLevel'] = Toplevel()
WindX['TopLevel'].wm_attributes('-topmost',1)
pos = win32api.GetCursorPos()
WindX['TopLevel'].geometry('+'+ str(pos[0]) +'+' + str(pos[1] + 20))
WindX['TopLevel'].overrideredirect(1)
font_type = None
try:
font_type = tf.Font(family="Lucida Grande") #, size=12
except:
pass
label = Label(WindX['TopLevel'], text=msg, justify=LEFT, relief=FLAT,pady=3,padx=3, anchor='w', bg=bgColor, fg=fgColor, font=font_type)
label.pack(side=TOP, fill=X)
def WindExit():
if WindX['TopLevel']:
WindX['TopLevel'].destroy()
WindX['main'].destroy()
os._exit(0)
#sys.exit(0) # This will cause the window error: Python has stopped working ...
def main():
WindX['main'] = Tix.Tk()
WindX['main'].title("Find Words in Clipboard")
WindX['main'].geometry('+0+30')
#WindX['main'].wm_attributes('-topmost',1)
WindX['main'].protocol("WM_DELETE_WINDOW", WindExit)
WindX['Label'] = Label(WindX['main'], text='---------------- Check: 0 ----------------', justify=CENTER, relief=FLAT,pady=3,padx=3)
WindX['Label'].pack(side=TOP, fill=X)
frame1 = Frame(WindX['main'])
frame1.pack(side=TOP, fill=X, pady=3,padx=3)
Label(frame1, text='Find: ', justify=CENTER, relief=FLAT, pady=3, padx=3).pack(side=LEFT)
WindX['FindStringVar'] = StringVar()
e=Entry(frame1, justify=LEFT, relief=FLAT, textvariable= WindX['FindStringVar'], fg='blue')
e.pack(side=LEFT, fill=BOTH, pady=3,padx=0, expand=True)
e.insert(0,'Category, Group, Business\s+Sector, Audit\s+Finding\s+Type, Audit\s+Type, Severity, Division, CAR\s+Type, Concern\s+Type, Type, Checklist, Process\s+CAR, Product\s+CAR')
#WindX['Find_Entry'] = e
WindX['Label_from'] = Label(WindX['main'], text='', justify=CENTER, relief=FLAT,pady=3,padx=3)
WindX['Label_from'].pack(side=TOP, fill=X)
framex = Frame(WindX['main'])
#s1 = Scrollbar(framex, orient= HORIZONTAL)
s2 = Scrollbar(framex, orient= VERTICAL)
WindX['Text'] = Text(framex, padx=5, pady=5, yscrollcommand=s2.set, relief=FLAT, wrap=WORD) #wrap=WORD NONE CHAR xscrollcommand=s1.set,
s2.config(command=WindX['Text'].yview)
#s1.config(command=WindX['Text'].xview)
#s1.pack(side=BOTTOM,fill=X)
s2.pack(side=RIGHT,fill=Y)
WindX['Text'].pack(side=LEFT, fill=BOTH, expand=True)
framex.pack(side=TOP,fill=BOTH, expand=True)
WindX['Text'].insert('1.0', "\n\t\tPress [Esc] key to pause, press again to continue.")
WindX['Status'] = Label(WindX['main'], text='', justify=LEFT, relief=FLAT,pady=3,padx=3, anchor='w', fg='green')
WindX['Status'].pack(side=TOP, fill=X)
mainloop()
def on_press(key):
try:
if isinstance(key, keyboard.Key):
#print('Key:', key.name, ",", key.value.vk)
if key.name == 'ctrl_l':
WindX['Ctrl_Pressed'] = 1
#print('special key {0} pressed'.format(key))
elif key.name == 'esc':
if WindX['stop_cheking']:
WindX['stop_cheking'] = 0
WindX['Label'].configure(text='---------------- Ready to check ----------------',fg='black',bg='#EFEFEF')
WindX['main'].update()
else:
WindX['stop_cheking'] = 1
WindX['Label'].configure(text='---------------- locked now, hit [ESC] key to unlock it! ----------------',fg='red',bg='yellow')
WindX['main'].update()
elif isinstance(key, keyboard.KeyCode):
#print('KeyCode:', key.char,",", key.vk)
#print(key.vk == 67, WindX['Ctrl_Pressed'])
if key.vk == 67:
if WindX['Text'] and (not WindX['stop_cheking']):
WindX['Text'].delete('1.0',END)
WindX['Status'].configure(text='',bg='#EFEFEF')
if WindX['Ctrl_Pressed']:
WindX['Ctrl_C'] = 1
if not WindX['stop_cheking']:
WindX['Label_from'].configure(image='')
WindX['Label_ImageTk'] = None
except:
print(traceback.format_exc())
def on_release(key):
try:
if isinstance(key, keyboard.Key):
#print('Key:', key.name, ",", key.value.vk)
if key.name == 'ctrl_l':
WindX['Ctrl_Pressed'] = 0
#print('special key {0} pressed'.format(key))
if WindX['Ctrl_C']:
try:
fgwHandle = win32gui.GetForegroundWindow()
if fgwHandle and (not WindX['stop_cheking']):
print('\tForeground Window:',fgwHandle,',', win32gui.GetWindowText(fgwHandle), ',', win32gui.GetWindowRect(fgwHandle), get_window_rect(fgwHandle))
#left, top, right, bottom = win32gui.GetWindowRect(fgwHandle) #this function causes an offset -8!!! see https://blog.csdn.net/See_Star/article/details/103940462
#offsetx = 8
left, top, right, bottom = get_window_rect(fgwHandle)
offsetx = 0
im_PIL,err = ScreenShotXY(width=right - left - offsetx*2,height=100,xSrc=left + offsetx,ySrc=top + offsetx)
if im_PIL:
gs = re.split(r'x|\+', WindX['main'].geometry()) #506x152+-1418+224
img_size = im_PIL.size
if img_size[0] > int(gs[0]):
xheight = int(img_size[1] * int(gs[0])/img_size[0])
im_PIL = im_PIL.resize((int(gs[0]),xheight),Image.ANTIALIAS)
WindX['Label_ImageTk'] = ImageTk.PhotoImage(im_PIL)
WindX['Label_from'].configure(image=WindX['Label_ImageTk'])
except:
print(traceback.format_exc())
CheckClipboard()
WindX['Ctrl_C'] = 0
except:
print(traceback.format_exc())
def keyboardListener():
with keyboard.Listener(
on_press=on_press,
on_release=on_release) as listener:
listener.join()
def MouseOnClick(x, y, button, pressed):
#print(button,'{0} at {1}'.format('Pressed' if pressed else 'Released', (x, y)))
#if re.match(r'.*left',str(button),re.I) and (not pressed):
if WindX['TopLevel']:
WindX['TopLevel'].destroy()
WindX['TopLevel'] = None
def MouseOnMove(x, y):
if WindX['TopLevel']:
WindX['TopLevel'].geometry('+'+ str(x) +'+' + str(y + 20))
def MouseListener():
with mouseListener(on_click=MouseOnClick, on_move=MouseOnMove) as listener: #(on_move=on_move, on_click=on_click, on_scroll=on_scroll)
listener.join()
def get_window_rect(hwnd):
try:
f = ctypes.windll.dwmapi.DwmGetWindowAttribute
except WindowsError:
f = None
if f:
rect = ctypes.wintypes.RECT()
DWMWA_EXTENDED_FRAME_BOUNDS = 9
f(ctypes.wintypes.HWND(hwnd),
ctypes.wintypes.DWORD(DWMWA_EXTENDED_FRAME_BOUNDS),
ctypes.byref(rect),
ctypes.sizeof(rect)
)
return rect.left, rect.top, rect.right, rect.bottom
def ScreenShotXY(width=0,height=0,xSrc=None,ySrc=None):
im_PIL = None
err = None
try:
if width == 0:
width = WindX['Displays']['FullScreenSize'][0]
if height== 0:
height= WindX['Displays']['FullScreenSize'][1]
if xSrc == None:
xSrc = WindX['Displays']['FullScreenSize'][2]
if ySrc == None:
ySrc = WindX['Displays']['FullScreenSize'][3]
hWnd = 0
hWndDC = win32gui.GetWindowDC(hWnd) #0 - desktop
#创建设备描述表
mfcDC = win32ui.CreateDCFromHandle(hWndDC)
#创建内存设备描述表
saveDC = mfcDC.CreateCompatibleDC()
#创建位图对象准备保存图片
saveBitMap = win32ui.CreateBitmap()
#为bitmap开辟存储空间
#print(width,height,xSrc,ySrc,';',hWndDC,';',mfcDC) #-1920 1080 1920 1080 ; 889263399 ; object 'PyCDC' - assoc is 000001F1B3EC5998, vi=<None>
saveBitMap.CreateCompatibleBitmap(mfcDC,width,height)
#将截图保存到saveBitMap中
saveDC.SelectObject(saveBitMap)
#保存bitmap到内存设备描述表
saveDC.BitBlt((0,0), (width,height), mfcDC, (xSrc, ySrc), win32con.SRCCOPY)
#BOOLBitBlt((int x,int y),(int nWidth,int nHeight),CDC*pSrcDC,(int xSrc,int ySrc),DWORDdwRop);
bmpinfo = saveBitMap.GetInfo()
bmpstr = saveBitMap.GetBitmapBits(True)
###生成图像
im_PIL = Image.frombuffer('RGB',(bmpinfo['bmWidth'],bmpinfo['bmHeight']),bmpstr,'raw','BGRX',0,1)
#print(im_PIL)
win32gui.DeleteObject(saveBitMap.GetHandle())
saveDC.DeleteDC()
mfcDC.DeleteDC()
win32gui.ReleaseDC(hWnd,hWndDC)
except:
err = traceback.format_exc()
print(traceback.format_exc())
'''
if isinstance(im_PIL, Image.Image):
print ("Image size: %s, mode: %s" % (im_PIL.size, im_PIL.mode))
else:
print("Failed to get screenshot!")
'''
return im_PIL,err
if __name__ == "__main__":
t1 = threading.Timer(1,keyboardListener)
t2 = threading.Timer(1,main)
t3 = threading.Timer(1,MouseListener)
t1.start()
t2.start()
t3.start()
'''
CAR Category .[CAR Category Workflow]
CAR Group .[CAR Group Workflow]
CAR Type .[CAR Type Workflow]
Business Sector .[Business Sector Workflow]
Audit Finding Type .[Audit Finding Type Workflow]
Audit Type .[Audit Type Workflow]
Segment .[Segment Workflow]
Severity .[Severity Workflow]
''' | [] | [] | [
"COMPUTERNAME"
] | [] | ["COMPUTERNAME"] | python | 1 | 0 | |
tests/main.go | package main
import (
"fmt"
"net/http"
"os"
"github.com/qor/admin/tests/dummy"
"github.com/qor/qor/utils"
)
func main() {
port := os.Getenv("PORT")
if port == "" {
port = "3000"
}
fmt.Printf("Listening on: %s\n", port)
mux := http.NewServeMux()
mux.Handle("/system/", utils.FileServer(http.Dir("public")))
dummy.NewDummyAdmin(true).MountTo("/admin", mux)
http.ListenAndServe(fmt.Sprintf(":%s", port), mux)
}
| [
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
module01/src/myapptodo/src/main/java/com/microsoft/azuresample/acscicdtodo/Utils/SqlHelper.java | package com.microsoft.azuresample.acscicdtodo.Utils;
import org.springframework.stereotype.Component;
import java.sql.DriverManager;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Component
public class SqlHelper {
static final Logger LOG = LoggerFactory.getLogger(SqlHelper.class);
public static String sqlurl;
public static Connection GetConnection() throws SQLException {
if(sqlurl==null){
new SqlHelper().Init();
}
Connection connection = (Connection) DriverManager.getConnection(sqlurl);
return connection;
}
public void Init(){
Map<String, String> env = System.getenv();
sqlurl = env.get("ORASQL_URL");
LOG.info("### INIT of SqlHelper called.");
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go | package grpcclient
import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"os"
"strings"
"sync"
"time"
"github.com/moby/buildkit/util/bklog"
"github.com/gogo/googleapis/google/rpc"
gogotypes "github.com/gogo/protobuf/types"
"github.com/golang/protobuf/ptypes/any"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/frontend/gateway/client"
pb "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/identity"
opspb "github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps"
"github.com/moby/buildkit/util/grpcerrors"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
fstypes "github.com/tonistiigi/fsutil/types"
"golang.org/x/sync/errgroup"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
const frontendPrefix = "BUILDKIT_FRONTEND_OPT_"
type GrpcClient interface {
client.Client
Run(context.Context, client.BuildFunc) error
}
func New(ctx context.Context, opts map[string]string, session, product string, c pb.LLBBridgeClient, w []client.WorkerInfo) (GrpcClient, error) {
pingCtx, pingCancel := context.WithTimeout(ctx, 15*time.Second)
defer pingCancel()
resp, err := c.Ping(pingCtx, &pb.PingRequest{})
if err != nil {
return nil, err
}
if resp.FrontendAPICaps == nil {
resp.FrontendAPICaps = defaultCaps()
}
if resp.LLBCaps == nil {
resp.LLBCaps = defaultLLBCaps()
}
return &grpcClient{
client: c,
opts: opts,
sessionID: session,
workers: w,
product: product,
caps: pb.Caps.CapSet(resp.FrontendAPICaps),
llbCaps: opspb.Caps.CapSet(resp.LLBCaps),
requests: map[string]*pb.SolveRequest{},
execMsgs: newMessageForwarder(ctx, c),
}, nil
}
func current() (GrpcClient, error) {
if ep := product(); ep != "" {
apicaps.ExportedProduct = ep
}
ctx, conn, err := grpcClientConn(context.Background())
if err != nil {
return nil, err
}
return New(ctx, opts(), sessionID(), product(), pb.NewLLBBridgeClient(conn), workers())
}
func convertRef(ref client.Reference) (*pb.Ref, error) {
if ref == nil {
return &pb.Ref{}, nil
}
r, ok := ref.(*reference)
if !ok {
return nil, errors.Errorf("invalid return reference type %T", ref)
}
return &pb.Ref{Id: r.id, Def: r.def}, nil
}
func RunFromEnvironment(ctx context.Context, f client.BuildFunc) error {
client, err := current()
if err != nil {
return errors.Wrapf(err, "failed to initialize client from environment")
}
return client.Run(ctx, f)
}
func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError error) {
export := c.caps.Supports(pb.CapReturnResult) == nil
var (
res *client.Result
err error
)
if export {
defer func() {
req := &pb.ReturnRequest{}
if retError == nil {
if res == nil {
res = &client.Result{}
}
pbRes := &pb.Result{
Metadata: res.Metadata,
}
if res.Refs != nil {
if c.caps.Supports(pb.CapProtoRefArray) == nil {
m := map[string]*pb.Ref{}
for k, r := range res.Refs {
pbRef, err := convertRef(r)
if err != nil {
retError = err
continue
}
m[k] = pbRef
}
pbRes.Result = &pb.Result_Refs{Refs: &pb.RefMap{Refs: m}}
} else {
// Server doesn't support the new wire format for refs, so we construct
// a deprecated result ref map.
m := map[string]string{}
for k, r := range res.Refs {
pbRef, err := convertRef(r)
if err != nil {
retError = err
continue
}
m[k] = pbRef.Id
}
pbRes.Result = &pb.Result_RefsDeprecated{RefsDeprecated: &pb.RefMapDeprecated{Refs: m}}
}
} else {
pbRef, err := convertRef(res.Ref)
if err != nil {
retError = err
} else {
if c.caps.Supports(pb.CapProtoRefArray) == nil {
pbRes.Result = &pb.Result_Ref{Ref: pbRef}
} else {
// Server doesn't support the new wire format for refs, so we construct
// a deprecated result ref.
pbRes.Result = &pb.Result_RefDeprecated{RefDeprecated: pbRef.Id}
}
}
}
if retError == nil {
req.Result = pbRes
}
}
if retError != nil {
st, _ := status.FromError(grpcerrors.ToGRPC(retError))
stp := st.Proto()
req.Error = &rpc.Status{
Code: stp.Code,
Message: stp.Message,
Details: convertToGogoAny(stp.Details),
}
}
if _, err := c.client.Return(ctx, req); err != nil && retError == nil {
retError = err
}
}()
}
defer func() {
err = c.execMsgs.Release()
if err != nil && retError != nil {
retError = err
}
}()
if res, err = f(ctx, c); err != nil {
return err
}
if res == nil {
return nil
}
if err := c.caps.Supports(pb.CapReturnMap); len(res.Refs) > 1 && err != nil {
return err
}
if !export {
exportedAttrBytes, err := json.Marshal(res.Metadata)
if err != nil {
return errors.Wrapf(err, "failed to marshal return metadata")
}
req, err := c.requestForRef(res.Ref)
if err != nil {
return errors.Wrapf(err, "failed to find return ref")
}
req.Final = true
req.ExporterAttr = exportedAttrBytes
if _, err := c.client.Solve(ctx, req); err != nil {
return errors.Wrapf(err, "failed to solve")
}
}
return nil
}
// defaultCaps returns the capabilities that were implemented when capabilities
// support was added. This list is frozen and should never be changed.
func defaultCaps() []apicaps.PBCap {
return []apicaps.PBCap{
{ID: string(pb.CapSolveBase), Enabled: true},
{ID: string(pb.CapSolveInlineReturn), Enabled: true},
{ID: string(pb.CapResolveImage), Enabled: true},
{ID: string(pb.CapReadFile), Enabled: true},
}
}
// defaultLLBCaps returns the LLB capabilities that were implemented when capabilities
// support was added. This list is frozen and should never be changed.
func defaultLLBCaps() []apicaps.PBCap {
return []apicaps.PBCap{
{ID: string(opspb.CapSourceImage), Enabled: true},
{ID: string(opspb.CapSourceLocal), Enabled: true},
{ID: string(opspb.CapSourceLocalUnique), Enabled: true},
{ID: string(opspb.CapSourceLocalSessionID), Enabled: true},
{ID: string(opspb.CapSourceLocalIncludePatterns), Enabled: true},
{ID: string(opspb.CapSourceLocalFollowPaths), Enabled: true},
{ID: string(opspb.CapSourceLocalExcludePatterns), Enabled: true},
{ID: string(opspb.CapSourceLocalSharedKeyHint), Enabled: true},
{ID: string(opspb.CapSourceGit), Enabled: true},
{ID: string(opspb.CapSourceGitKeepDir), Enabled: true},
{ID: string(opspb.CapSourceGitFullURL), Enabled: true},
{ID: string(opspb.CapSourceHTTP), Enabled: true},
{ID: string(opspb.CapSourceHTTPChecksum), Enabled: true},
{ID: string(opspb.CapSourceHTTPPerm), Enabled: true},
{ID: string(opspb.CapSourceHTTPUIDGID), Enabled: true},
{ID: string(opspb.CapBuildOpLLBFileName), Enabled: true},
{ID: string(opspb.CapExecMetaBase), Enabled: true},
{ID: string(opspb.CapExecMetaProxy), Enabled: true},
{ID: string(opspb.CapExecMountBind), Enabled: true},
{ID: string(opspb.CapExecMountCache), Enabled: true},
{ID: string(opspb.CapExecMountCacheSharing), Enabled: true},
{ID: string(opspb.CapExecMountSelector), Enabled: true},
{ID: string(opspb.CapExecMountTmpfs), Enabled: true},
{ID: string(opspb.CapExecMountSecret), Enabled: true},
{ID: string(opspb.CapConstraints), Enabled: true},
{ID: string(opspb.CapPlatform), Enabled: true},
{ID: string(opspb.CapMetaIgnoreCache), Enabled: true},
{ID: string(opspb.CapMetaDescription), Enabled: true},
{ID: string(opspb.CapMetaExportCache), Enabled: true},
}
}
type grpcClient struct {
client pb.LLBBridgeClient
opts map[string]string
sessionID string
product string
workers []client.WorkerInfo
caps apicaps.CapSet
llbCaps apicaps.CapSet
requests map[string]*pb.SolveRequest
execMsgs *messageForwarder
}
func (c *grpcClient) requestForRef(ref client.Reference) (*pb.SolveRequest, error) {
emptyReq := &pb.SolveRequest{
Definition: &opspb.Definition{},
}
if ref == nil {
return emptyReq, nil
}
r, ok := ref.(*reference)
if !ok {
return nil, errors.Errorf("return reference has invalid type %T", ref)
}
if r.id == "" {
return emptyReq, nil
}
req, ok := c.requests[r.id]
if !ok {
return nil, errors.Errorf("did not find request for return reference %s", r.id)
}
return req, nil
}
func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res *client.Result, err error) {
if creq.Definition != nil {
for _, md := range creq.Definition.Metadata {
for cap := range md.Caps {
if err := c.llbCaps.Supports(cap); err != nil {
return nil, err
}
}
}
}
var (
// old API
legacyRegistryCacheImports []string
// new API (CapImportCaches)
cacheImports []*pb.CacheOptionsEntry
)
supportCapImportCaches := c.caps.Supports(pb.CapImportCaches) == nil
for _, im := range creq.CacheImports {
if !supportCapImportCaches && im.Type == "registry" {
legacyRegistryCacheImports = append(legacyRegistryCacheImports, im.Attrs["ref"])
} else {
cacheImports = append(cacheImports, &pb.CacheOptionsEntry{
Type: im.Type,
Attrs: im.Attrs,
})
}
}
req := &pb.SolveRequest{
Definition: creq.Definition,
Frontend: creq.Frontend,
FrontendOpt: creq.FrontendOpt,
FrontendInputs: creq.FrontendInputs,
AllowResultReturn: true,
AllowResultArrayRef: true,
// old API
ImportCacheRefsDeprecated: legacyRegistryCacheImports,
// new API
CacheImports: cacheImports,
}
// backwards compatibility with inline return
if c.caps.Supports(pb.CapReturnResult) != nil {
req.ExporterAttr = []byte("{}")
}
if creq.Evaluate {
if c.caps.Supports(pb.CapGatewayEvaluateSolve) == nil {
req.Evaluate = creq.Evaluate
} else {
// If evaluate is not supported, fallback to running Stat(".") in order to
// trigger an evaluation of the result.
defer func() {
if res == nil {
return
}
var (
id string
ref client.Reference
)
ref, err = res.SingleRef()
if err != nil {
for refID := range res.Refs {
id = refID
break
}
} else {
id = ref.(*reference).id
}
_, err = c.client.StatFile(ctx, &pb.StatFileRequest{
Ref: id,
Path: ".",
})
}()
}
}
resp, err := c.client.Solve(ctx, req)
if err != nil {
return nil, err
}
res = &client.Result{}
if resp.Result == nil {
if id := resp.Ref; id != "" {
c.requests[id] = req
}
res.SetRef(&reference{id: resp.Ref, c: c})
} else {
res.Metadata = resp.Result.Metadata
switch pbRes := resp.Result.Result.(type) {
case *pb.Result_RefDeprecated:
if id := pbRes.RefDeprecated; id != "" {
res.SetRef(&reference{id: id, c: c})
}
case *pb.Result_RefsDeprecated:
for k, v := range pbRes.RefsDeprecated.Refs {
ref := &reference{id: v, c: c}
if v == "" {
ref = nil
}
res.AddRef(k, ref)
}
case *pb.Result_Ref:
if pbRes.Ref.Id != "" {
ref, err := newReference(c, pbRes.Ref)
if err != nil {
return nil, err
}
res.SetRef(ref)
}
case *pb.Result_Refs:
for k, v := range pbRes.Refs.Refs {
var ref *reference
if v.Id != "" {
ref, err = newReference(c, v)
if err != nil {
return nil, err
}
}
res.AddRef(k, ref)
}
}
}
return res, nil
}
func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) {
var p *opspb.Platform
if platform := opt.Platform; platform != nil {
p = &opspb.Platform{
OS: platform.OS,
Architecture: platform.Architecture,
Variant: platform.Variant,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
}
}
resp, err := c.client.ResolveImageConfig(ctx, &pb.ResolveImageConfigRequest{Ref: ref, Platform: p, ResolveMode: opt.ResolveMode, LogName: opt.LogName})
if err != nil {
return "", nil, err
}
return resp.Digest, resp.Config, nil
}
func (c *grpcClient) BuildOpts() client.BuildOpts {
return client.BuildOpts{
Opts: c.opts,
SessionID: c.sessionID,
Workers: c.workers,
Product: c.product,
LLBCaps: c.llbCaps,
Caps: c.caps,
}
}
func (c *grpcClient) Inputs(ctx context.Context) (map[string]llb.State, error) {
err := c.caps.Supports(pb.CapFrontendInputs)
if err != nil {
return nil, err
}
resp, err := c.client.Inputs(ctx, &pb.InputsRequest{})
if err != nil {
return nil, err
}
inputs := make(map[string]llb.State)
for key, def := range resp.Definitions {
op, err := llb.NewDefinitionOp(def)
if err != nil {
return nil, err
}
inputs[key] = llb.NewState(op)
}
return inputs, nil
}
// procMessageForwarder is created per container process to act as the
// communication channel between the process and the ExecProcess message
// stream.
type procMessageForwarder struct {
done chan struct{}
closeOnce sync.Once
msgs chan *pb.ExecMessage
}
func newProcMessageForwarder() *procMessageForwarder {
return &procMessageForwarder{
done: make(chan struct{}),
msgs: make(chan *pb.ExecMessage),
}
}
func (b *procMessageForwarder) Send(ctx context.Context, m *pb.ExecMessage) {
select {
case <-ctx.Done():
case <-b.done:
b.closeOnce.Do(func() {
close(b.msgs)
})
case b.msgs <- m:
}
}
func (b *procMessageForwarder) Recv(ctx context.Context) (m *pb.ExecMessage, ok bool) {
select {
case <-ctx.Done():
return nil, true
case <-b.done:
return nil, false
case m = <-b.msgs:
return m, true
}
}
func (b *procMessageForwarder) Close() {
close(b.done)
b.Recv(context.Background()) // flush any messages in queue
b.Send(context.Background(), nil) // ensure channel is closed
}
// messageForwarder manages a single grpc stream for ExecProcess to facilitate
// a pub/sub message channel for each new process started from the client
// connection.
type messageForwarder struct {
client pb.LLBBridgeClient
ctx context.Context
cancel func()
eg *errgroup.Group
mu sync.Mutex
pids map[string]*procMessageForwarder
stream pb.LLBBridge_ExecProcessClient
// startOnce used to only start the exec message forwarder once,
// so we only have one exec stream per client
startOnce sync.Once
// startErr tracks the error when initializing the stream, it will
// be returned on subsequent calls to Start
startErr error
}
func newMessageForwarder(ctx context.Context, client pb.LLBBridgeClient) *messageForwarder {
ctx, cancel := context.WithCancel(ctx)
eg, ctx := errgroup.WithContext(ctx)
return &messageForwarder{
client: client,
pids: map[string]*procMessageForwarder{},
ctx: ctx,
cancel: cancel,
eg: eg,
}
}
func (m *messageForwarder) Start() (err error) {
defer func() {
if err != nil {
m.startErr = err
}
}()
if m.startErr != nil {
return m.startErr
}
m.startOnce.Do(func() {
m.stream, err = m.client.ExecProcess(m.ctx)
if err != nil {
return
}
m.eg.Go(func() error {
for {
msg, err := m.stream.Recv()
if errors.Is(err, io.EOF) || grpcerrors.Code(err) == codes.Canceled {
return nil
}
bklog.G(m.ctx).Debugf("|<--- %s", debugMessage(msg))
if err != nil {
return err
}
m.mu.Lock()
msgs, ok := m.pids[msg.ProcessID]
m.mu.Unlock()
if !ok {
bklog.G(m.ctx).Debugf("Received exec message for unregistered process: %s", msg.String())
continue
}
msgs.Send(m.ctx, msg)
}
})
})
return err
}
func debugMessage(msg *pb.ExecMessage) string {
switch m := msg.GetInput().(type) {
case *pb.ExecMessage_Init:
return fmt.Sprintf("Init Message %s", msg.ProcessID)
case *pb.ExecMessage_File:
if m.File.EOF {
return fmt.Sprintf("File Message %s, fd=%d, EOF", msg.ProcessID, m.File.Fd)
}
return fmt.Sprintf("File Message %s, fd=%d, %d bytes", msg.ProcessID, m.File.Fd, len(m.File.Data))
case *pb.ExecMessage_Resize:
return fmt.Sprintf("Resize Message %s", msg.ProcessID)
case *pb.ExecMessage_Started:
return fmt.Sprintf("Started Message %s", msg.ProcessID)
case *pb.ExecMessage_Exit:
return fmt.Sprintf("Exit Message %s, code=%d, err=%s", msg.ProcessID, m.Exit.Code, m.Exit.Error)
case *pb.ExecMessage_Done:
return fmt.Sprintf("Done Message %s", msg.ProcessID)
}
return fmt.Sprintf("Unknown Message %s", msg.String())
}
func (m *messageForwarder) Send(msg *pb.ExecMessage) error {
m.mu.Lock()
_, ok := m.pids[msg.ProcessID]
defer m.mu.Unlock()
if !ok {
return errors.Errorf("process %s has ended, not sending message %#v", msg.ProcessID, msg.Input)
}
bklog.G(m.ctx).Debugf("|---> %s", debugMessage(msg))
return m.stream.Send(msg)
}
func (m *messageForwarder) Release() error {
m.cancel()
return m.eg.Wait()
}
func (m *messageForwarder) Register(pid string) *procMessageForwarder {
m.mu.Lock()
defer m.mu.Unlock()
sender := newProcMessageForwarder()
m.pids[pid] = sender
return sender
}
func (m *messageForwarder) Deregister(pid string) {
m.mu.Lock()
defer m.mu.Unlock()
sender, ok := m.pids[pid]
if !ok {
return
}
delete(m.pids, pid)
sender.Close()
}
type msgWriter struct {
mux *messageForwarder
fd uint32
processID string
}
func (w *msgWriter) Write(msg []byte) (int, error) {
err := w.mux.Send(&pb.ExecMessage{
ProcessID: w.processID,
Input: &pb.ExecMessage_File{
File: &pb.FdMessage{
Fd: w.fd,
Data: msg,
},
},
})
if err != nil {
return 0, err
}
return len(msg), nil
}
func (c *grpcClient) NewContainer(ctx context.Context, req client.NewContainerRequest) (client.Container, error) {
err := c.caps.Supports(pb.CapGatewayExec)
if err != nil {
return nil, err
}
id := identity.NewID()
var mounts []*opspb.Mount
for _, m := range req.Mounts {
resultID := m.ResultID
if m.Ref != nil {
ref, ok := m.Ref.(*reference)
if !ok {
return nil, errors.Errorf("unexpected type for reference, got %T", m.Ref)
}
resultID = ref.id
}
mounts = append(mounts, &opspb.Mount{
Dest: m.Dest,
Selector: m.Selector,
Readonly: m.Readonly,
MountType: m.MountType,
ResultID: resultID,
CacheOpt: m.CacheOpt,
SecretOpt: m.SecretOpt,
SSHOpt: m.SSHOpt,
})
}
bklog.G(ctx).Debugf("|---> NewContainer %s", id)
_, err = c.client.NewContainer(ctx, &pb.NewContainerRequest{
ContainerID: id,
Mounts: mounts,
Platform: req.Platform,
Constraints: req.Constraints,
Network: req.NetMode,
ExtraHosts: req.ExtraHosts,
})
if err != nil {
return nil, err
}
// ensure message forwarder is started, only sets up stream first time called
err = c.execMsgs.Start()
if err != nil {
return nil, err
}
return &container{
client: c.client,
id: id,
execMsgs: c.execMsgs,
}, nil
}
type container struct {
client pb.LLBBridgeClient
id string
execMsgs *messageForwarder
}
func (ctr *container) Start(ctx context.Context, req client.StartRequest) (client.ContainerProcess, error) {
pid := fmt.Sprintf("%s:%s", ctr.id, identity.NewID())
msgs := ctr.execMsgs.Register(pid)
init := &pb.InitMessage{
ContainerID: ctr.id,
Meta: &opspb.Meta{
Args: req.Args,
Env: req.Env,
Cwd: req.Cwd,
User: req.User,
},
Tty: req.Tty,
Security: req.SecurityMode,
}
if req.Stdin != nil {
init.Fds = append(init.Fds, 0)
}
if req.Stdout != nil {
init.Fds = append(init.Fds, 1)
}
if req.Stderr != nil {
init.Fds = append(init.Fds, 2)
}
err := ctr.execMsgs.Send(&pb.ExecMessage{
ProcessID: pid,
Input: &pb.ExecMessage_Init{
Init: init,
},
})
if err != nil {
return nil, err
}
msg, _ := msgs.Recv(ctx)
if msg == nil {
return nil, errors.Errorf("failed to receive started message")
}
started := msg.GetStarted()
if started == nil {
return nil, errors.Errorf("expecting started message, got %T", msg.GetInput())
}
eg, ctx := errgroup.WithContext(ctx)
done := make(chan struct{})
ctrProc := &containerProcess{
execMsgs: ctr.execMsgs,
id: pid,
eg: eg,
}
var stdinReader *io.PipeReader
ctrProc.eg.Go(func() error {
<-done
if stdinReader != nil {
return stdinReader.Close()
}
return nil
})
if req.Stdin != nil {
var stdinWriter io.WriteCloser
stdinReader, stdinWriter = io.Pipe()
// This go routine is intentionally not part of the errgroup because
// if os.Stdin is used for req.Stdin then this will block until
// the user closes the input, which will likely be after we are done
// with the container, so we can't Wait on it.
go func() {
io.Copy(stdinWriter, req.Stdin)
stdinWriter.Close()
}()
ctrProc.eg.Go(func() error {
m := &msgWriter{
mux: ctr.execMsgs,
processID: pid,
fd: 0,
}
_, err := io.Copy(m, stdinReader)
// ignore ErrClosedPipe, it is EOF for our usage.
if err != nil && !errors.Is(err, io.ErrClosedPipe) {
return err
}
// not an error so must be eof
return ctr.execMsgs.Send(&pb.ExecMessage{
ProcessID: pid,
Input: &pb.ExecMessage_File{
File: &pb.FdMessage{
Fd: 0,
EOF: true,
},
},
})
})
}
ctrProc.eg.Go(func() error {
var closeDoneOnce sync.Once
var exitError error
for {
msg, ok := msgs.Recv(ctx)
if !ok {
// no more messages, return
return exitError
}
if msg == nil {
// empty message from ctx cancel, so just start shutting down
// input, but continue processing more exit/done messages
closeDoneOnce.Do(func() {
close(done)
})
continue
}
if file := msg.GetFile(); file != nil {
var out io.WriteCloser
switch file.Fd {
case 1:
out = req.Stdout
case 2:
out = req.Stderr
}
if out == nil {
// if things are plumbed correctly this should never happen
return errors.Errorf("missing writer for output fd %d", file.Fd)
}
if len(file.Data) > 0 {
_, err := out.Write(file.Data)
if err != nil {
return err
}
}
} else if exit := msg.GetExit(); exit != nil {
// capture exit message to exitError so we can return it after
// the server sends the Done message
closeDoneOnce.Do(func() {
close(done)
})
if exit.Code == 0 {
continue
}
exitError = grpcerrors.FromGRPC(status.ErrorProto(&spb.Status{
Code: exit.Error.Code,
Message: exit.Error.Message,
Details: convertGogoAny(exit.Error.Details),
}))
if exit.Code != pb.UnknownExitStatus {
exitError = &pb.ExitError{ExitCode: exit.Code, Err: exitError}
}
} else if serverDone := msg.GetDone(); serverDone != nil {
return exitError
} else {
return errors.Errorf("unexpected Exec Message for pid %s: %T", pid, msg.GetInput())
}
}
})
return ctrProc, nil
}
func (ctr *container) Release(ctx context.Context) error {
bklog.G(ctx).Debugf("|---> ReleaseContainer %s", ctr.id)
_, err := ctr.client.ReleaseContainer(ctx, &pb.ReleaseContainerRequest{
ContainerID: ctr.id,
})
return err
}
type containerProcess struct {
execMsgs *messageForwarder
id string
eg *errgroup.Group
}
func (ctrProc *containerProcess) Wait() error {
defer ctrProc.execMsgs.Deregister(ctrProc.id)
return ctrProc.eg.Wait()
}
func (ctrProc *containerProcess) Resize(_ context.Context, size client.WinSize) error {
return ctrProc.execMsgs.Send(&pb.ExecMessage{
ProcessID: ctrProc.id,
Input: &pb.ExecMessage_Resize{
Resize: &pb.ResizeMessage{
Cols: size.Cols,
Rows: size.Rows,
},
},
})
}
type reference struct {
c *grpcClient
id string
def *opspb.Definition
}
func newReference(c *grpcClient, ref *pb.Ref) (*reference, error) {
return &reference{c: c, id: ref.Id, def: ref.Def}, nil
}
func (r *reference) ToState() (st llb.State, err error) {
err = r.c.caps.Supports(pb.CapReferenceOutput)
if err != nil {
return st, err
}
if r.def == nil {
return st, errors.Errorf("gateway did not return reference with definition")
}
defop, err := llb.NewDefinitionOp(r.def)
if err != nil {
return st, err
}
return llb.NewState(defop), nil
}
func (r *reference) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) {
rfr := &pb.ReadFileRequest{FilePath: req.Filename, Ref: r.id}
if r := req.Range; r != nil {
rfr.Range = &pb.FileRange{
Offset: int64(r.Offset),
Length: int64(r.Length),
}
}
resp, err := r.c.client.ReadFile(ctx, rfr)
if err != nil {
return nil, err
}
return resp.Data, nil
}
func (r *reference) ReadDir(ctx context.Context, req client.ReadDirRequest) ([]*fstypes.Stat, error) {
if err := r.c.caps.Supports(pb.CapReadDir); err != nil {
return nil, err
}
rdr := &pb.ReadDirRequest{
DirPath: req.Path,
IncludePattern: req.IncludePattern,
Ref: r.id,
}
resp, err := r.c.client.ReadDir(ctx, rdr)
if err != nil {
return nil, err
}
return resp.Entries, nil
}
func (r *reference) StatFile(ctx context.Context, req client.StatRequest) (*fstypes.Stat, error) {
if err := r.c.caps.Supports(pb.CapStatFile); err != nil {
return nil, err
}
rdr := &pb.StatFileRequest{
Path: req.Path,
Ref: r.id,
}
resp, err := r.c.client.StatFile(ctx, rdr)
if err != nil {
return nil, err
}
return resp.Stat, nil
}
func grpcClientConn(ctx context.Context) (context.Context, *grpc.ClientConn, error) {
dialOpt := grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
return stdioConn(), nil
})
cc, err := grpc.DialContext(ctx, "localhost", dialOpt, grpc.WithInsecure(), grpc.WithUnaryInterceptor(grpcerrors.UnaryClientInterceptor), grpc.WithStreamInterceptor(grpcerrors.StreamClientInterceptor))
if err != nil {
return nil, nil, errors.Wrap(err, "failed to create grpc client")
}
ctx, cancel := context.WithCancel(ctx)
_ = cancel
// go monitorHealth(ctx, cc, cancel)
return ctx, cc, nil
}
func stdioConn() net.Conn {
return &conn{os.Stdin, os.Stdout, os.Stdout}
}
type conn struct {
io.Reader
io.Writer
io.Closer
}
func (s *conn) LocalAddr() net.Addr {
return dummyAddr{}
}
func (s *conn) RemoteAddr() net.Addr {
return dummyAddr{}
}
func (s *conn) SetDeadline(t time.Time) error {
return nil
}
func (s *conn) SetReadDeadline(t time.Time) error {
return nil
}
func (s *conn) SetWriteDeadline(t time.Time) error {
return nil
}
type dummyAddr struct {
}
func (d dummyAddr) Network() string {
return "pipe"
}
func (d dummyAddr) String() string {
return "localhost"
}
func opts() map[string]string {
opts := map[string]string{}
for _, env := range os.Environ() {
parts := strings.SplitN(env, "=", 2)
k := parts[0]
v := ""
if len(parts) == 2 {
v = parts[1]
}
if !strings.HasPrefix(k, frontendPrefix) {
continue
}
parts = strings.SplitN(v, "=", 2)
v = ""
if len(parts) == 2 {
v = parts[1]
}
opts[parts[0]] = v
}
return opts
}
func sessionID() string {
return os.Getenv("BUILDKIT_SESSION_ID")
}
func workers() []client.WorkerInfo {
var c []client.WorkerInfo
if err := json.Unmarshal([]byte(os.Getenv("BUILDKIT_WORKERS")), &c); err != nil {
return nil
}
return c
}
func product() string {
return os.Getenv("BUILDKIT_EXPORTEDPRODUCT")
}
func convertGogoAny(in []*gogotypes.Any) []*any.Any {
out := make([]*any.Any, len(in))
for i := range in {
out[i] = &any.Any{TypeUrl: in[i].TypeUrl, Value: in[i].Value}
}
return out
}
func convertToGogoAny(in []*any.Any) []*gogotypes.Any {
out := make([]*gogotypes.Any, len(in))
for i := range in {
out[i] = &gogotypes.Any{TypeUrl: in[i].TypeUrl, Value: in[i].Value}
}
return out
}
| [
"\"BUILDKIT_SESSION_ID\"",
"\"BUILDKIT_WORKERS\"",
"\"BUILDKIT_EXPORTEDPRODUCT\""
] | [] | [
"BUILDKIT_WORKERS",
"BUILDKIT_SESSION_ID",
"BUILDKIT_EXPORTEDPRODUCT"
] | [] | ["BUILDKIT_WORKERS", "BUILDKIT_SESSION_ID", "BUILDKIT_EXPORTEDPRODUCT"] | go | 3 | 0 | |
pythonlearn/Third-Module/django/django_scrpt.py | # -*- coding: utf-8 -*-
#
" django模块导入作为脚本,结合gevent "
from gevent import monkey
monkey.patch_all()
import gevent
import gevent.pool
import os
import re
import sys
import signal
import json
import datetime
import traceback
import django
ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), '../../django_demo'))
SCRIPT = os.path.join(ROOT, 'script')
# 添加django工程路径
sys.path.append(ROOT)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_demo.settings")
django.setup()
from django.db import InterfaceError, DatabaseError, connections
from django.db.models import F, Sum, Q
from django.conf import settings
from django_redis import get_redis_connection
from django.core.exceptions import ObjectDoesNotExist
import logging
_handler = logging.StreamHandler()
_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(module)s:%(lineno)s %(message)s'))
log = logging.getLogger('Test')
log.addHandler(_handler)
log.setLevel(logging.DEBUG)
redis = get_redis_connection()
signal_stop = False
# ###########################################################
def do_worker1(task_id):
log.info('-------do_worker1-------')
return
def worker1():
while True:
if signal_stop: break
_, task_id = redis.brpop(settings.EDMWEB_NULL_TEMPLATE_QUEQUE)
try:
do_worker1(task_id)
gevent.sleep(0.5)
except ObjectDoesNotExist:
log.error(traceback.format_exc())
gevent.sleep(0.1)
except (DatabaseError, InterfaceError), e:
redis.rpush(settings.EDMWEB_NULL_TEMPLATE_QUEQUE, task_id)
log.error(traceback.format_exc())
reconnect('dbname')
gevent.sleep(0.1)
except BaseException as e:
log.error(traceback.format_exc())
gevent.sleep(0.1)
# ###########################################################
def do_worker2():
log.info('-------do_worker2-------')
return
def worker2():
pool = gevent.pool.Pool(50)
while True:
if signal_stop: break
pool.spawn(do_worker2)
gevent.sleep(0.1)
pool.join()
# ###########################################################
def reconnect(dbname):
try:
log.info('reconnect {} start'.format(dbname))
cursor = connections[dbname].cursor()
db = cursor.db
if db.connection is None or not db.is_usable():
db.close_if_unusable_or_obsolete()
with db.wrap_database_errors:
db.connect()
log.info(u'reconnect {} end'.format(dbname))
gevent.sleep(10)
except Exception as e:
log.warning(u'DB Connection error', exc_info=1)
def get_connect_cr(dbname):
cr = connections[dbname].cursor()
db = cr.db
if db.connection is None or not db.is_usable():
db.close_if_unusable_or_obsolete()
with db.wrap_database_errors:
db.connect()
cr = connections[dbname].cursor()
log.info(u'reconnect {} get end'.format(dbname))
return cr
############################################################
# 信号量处理
def signal_handle(mode):
log.info("catch signal: %s" % mode)
global signal_stop
signal_stop = True
# 设置程序结束信号量
def init_gevent_signal(handler):
gevent.signal(signal.SIGINT, handler, 'sigint') # 处理 Ctrl-C
gevent.signal(signal.SIGTERM, handler, 'sigterm') # 处理 kill
gevent.signal(signal.SIGALRM, handler, 'sigalrm') # 处理 signal.alarm()
# ###########################################################
def init():
log.info('init...')
def finish():
log.info('finish...')
def main():
gevent.spawn(worker1)
worker2()
if __name__ == "__main__":
# 运行程序
EXIT_CODE = 0
log.info(u'program start...')
init_gevent_signal(signal_handle)
init()
EXIT_CODE = 0
try:
main()
except KeyboardInterrupt:
signal_handle('sigint')
except:
log.error(traceback.format_exc())
EXIT_CODE = 1
finally:
finish()
log.info(u"program quit...")
sys.exit(EXIT_CODE)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
docs/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
on_rtd = os.environ.get('READTHEDOCS') == 'True'
# -- Project information -----------------------------------------------------
project = u'Asp.Net MVC 5 Starter Kit'
copyright = u'2019, Sarto Research, Thiago A. Schneider'
author = u'Sarto Research, Thiago A. Schneider'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u'1.0.0'
dev=True
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.ifconfig',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
if on_rtd:
html_theme = 'default'
else:
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'AspNetMVC5StarterKitdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AspNetMVC5StarterKit.tex', u'Asp.Net MVC 5 Starter Kit Documentation',
u'Sarto Research, Thiago A. Schneider', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'aspnetmvc5starterkit', u'Asp.Net MVC 5 Starter Kit Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AspNetMVC5StarterKit', u'Asp.Net MVC 5 Starter Kit Documentation',
author, 'AspNetMVC5StarterKit', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| [] | [] | [
"READTHEDOCS"
] | [] | ["READTHEDOCS"] | python | 1 | 0 | |
greaterwms/wsgi.py | """
WSGI config for greaterwms project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'greaterwms.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
services/bssopenapi/allocate_cost_unit_resource.go | package bssopenapi
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// AllocateCostUnitResource invokes the bssopenapi.AllocateCostUnitResource API synchronously
// api document: https://help.aliyun.com/api/bssopenapi/allocatecostunitresource.html
func (client *Client) AllocateCostUnitResource(request *AllocateCostUnitResourceRequest) (response *AllocateCostUnitResourceResponse, err error) {
response = CreateAllocateCostUnitResourceResponse()
err = client.DoAction(request, response)
return
}
// AllocateCostUnitResourceWithChan invokes the bssopenapi.AllocateCostUnitResource API asynchronously
// api document: https://help.aliyun.com/api/bssopenapi/allocatecostunitresource.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) AllocateCostUnitResourceWithChan(request *AllocateCostUnitResourceRequest) (<-chan *AllocateCostUnitResourceResponse, <-chan error) {
responseChan := make(chan *AllocateCostUnitResourceResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.AllocateCostUnitResource(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// AllocateCostUnitResourceWithCallback invokes the bssopenapi.AllocateCostUnitResource API asynchronously
// api document: https://help.aliyun.com/api/bssopenapi/allocatecostunitresource.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) AllocateCostUnitResourceWithCallback(request *AllocateCostUnitResourceRequest, callback func(response *AllocateCostUnitResourceResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *AllocateCostUnitResourceResponse
var err error
defer close(result)
response, err = client.AllocateCostUnitResource(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// AllocateCostUnitResourceRequest is the request struct for api AllocateCostUnitResource
type AllocateCostUnitResourceRequest struct {
*requests.RpcRequest
ResourceInstanceList *[]AllocateCostUnitResourceResourceInstanceList `position:"Query" name:"ResourceInstanceList" type:"Repeated"`
FromUnitId requests.Integer `position:"Query" name:"FromUnitId"`
ToUnitId requests.Integer `position:"Query" name:"ToUnitId"`
FromUnitUserId requests.Integer `position:"Query" name:"FromUnitUserId"`
ToUnitUserId requests.Integer `position:"Query" name:"ToUnitUserId"`
}
// AllocateCostUnitResourceResourceInstanceList is a repeated param struct in AllocateCostUnitResourceRequest
type AllocateCostUnitResourceResourceInstanceList struct {
ResourceId string `name:"ResourceId"`
CommodityCode string `name:"CommodityCode"`
ResourceUserId string `name:"ResourceUserId"`
}
// AllocateCostUnitResourceResponse is the response struct for api AllocateCostUnitResource
type AllocateCostUnitResourceResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
Success bool `json:"Success" xml:"Success"`
Code string `json:"Code" xml:"Code"`
Message string `json:"Message" xml:"Message"`
Data Data `json:"Data" xml:"Data"`
}
// CreateAllocateCostUnitResourceRequest creates a request to invoke AllocateCostUnitResource API
func CreateAllocateCostUnitResourceRequest() (request *AllocateCostUnitResourceRequest) {
request = &AllocateCostUnitResourceRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("BssOpenApi", "2017-12-14", "AllocateCostUnitResource", "", "")
request.Method = requests.POST
return
}
// CreateAllocateCostUnitResourceResponse creates a response to parse from AllocateCostUnitResource response
func CreateAllocateCostUnitResourceResponse() (response *AllocateCostUnitResourceResponse) {
response = &AllocateCostUnitResourceResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
| [] | [] | [] | [] | [] | go | null | null | null |
internal/driver/keys.go | // Copyright 2013 The Ebiten Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by genkeys.go using 'go generate'. DO NOT EDIT.
package driver
import (
"fmt"
)
type Key int
const (
Key0 Key = iota
Key1
Key2
Key3
Key4
Key5
Key6
Key7
Key8
Key9
KeyA
KeyB
KeyC
KeyD
KeyE
KeyF
KeyG
KeyH
KeyI
KeyJ
KeyK
KeyL
KeyM
KeyN
KeyO
KeyP
KeyQ
KeyR
KeyS
KeyT
KeyU
KeyV
KeyW
KeyX
KeyY
KeyZ
KeyApostrophe
KeyBackslash
KeyBackspace
KeyCapsLock
KeyComma
KeyDelete
KeyDown
KeyEnd
KeyEnter
KeyEqual
KeyEscape
KeyF1
KeyF2
KeyF3
KeyF4
KeyF5
KeyF6
KeyF7
KeyF8
KeyF9
KeyF10
KeyF11
KeyF12
KeyGraveAccent
KeyHome
KeyInsert
KeyKP0
KeyKP1
KeyKP2
KeyKP3
KeyKP4
KeyKP5
KeyKP6
KeyKP7
KeyKP8
KeyKP9
KeyKPAdd
KeyKPDecimal
KeyKPDivide
KeyKPEnter
KeyKPEqual
KeyKPMultiply
KeyKPSubtract
KeyLeft
KeyLeftAlt
KeyLeftBracket
KeyLeftControl
KeyLeftShift
KeyMenu
KeyMinus
KeyNumLock
KeyPageDown
KeyPageUp
KeyPause
KeyPeriod
KeyPrintScreen
KeyRight
KeyRightAlt
KeyRightBracket
KeyRightControl
KeyRightShift
KeyScrollLock
KeySemicolon
KeySlash
KeySpace
KeyTab
KeyUp
KeyReserved0
KeyReserved1
KeyReserved2
)
func (k Key) String() string {
switch k {
case Key0:
return "Key0"
case Key1:
return "Key1"
case Key2:
return "Key2"
case Key3:
return "Key3"
case Key4:
return "Key4"
case Key5:
return "Key5"
case Key6:
return "Key6"
case Key7:
return "Key7"
case Key8:
return "Key8"
case Key9:
return "Key9"
case KeyA:
return "KeyA"
case KeyB:
return "KeyB"
case KeyC:
return "KeyC"
case KeyD:
return "KeyD"
case KeyE:
return "KeyE"
case KeyF:
return "KeyF"
case KeyG:
return "KeyG"
case KeyH:
return "KeyH"
case KeyI:
return "KeyI"
case KeyJ:
return "KeyJ"
case KeyK:
return "KeyK"
case KeyL:
return "KeyL"
case KeyM:
return "KeyM"
case KeyN:
return "KeyN"
case KeyO:
return "KeyO"
case KeyP:
return "KeyP"
case KeyQ:
return "KeyQ"
case KeyR:
return "KeyR"
case KeyS:
return "KeyS"
case KeyT:
return "KeyT"
case KeyU:
return "KeyU"
case KeyV:
return "KeyV"
case KeyW:
return "KeyW"
case KeyX:
return "KeyX"
case KeyY:
return "KeyY"
case KeyZ:
return "KeyZ"
case KeyApostrophe:
return "KeyApostrophe"
case KeyBackslash:
return "KeyBackslash"
case KeyBackspace:
return "KeyBackspace"
case KeyCapsLock:
return "KeyCapsLock"
case KeyComma:
return "KeyComma"
case KeyDelete:
return "KeyDelete"
case KeyDown:
return "KeyDown"
case KeyEnd:
return "KeyEnd"
case KeyEnter:
return "KeyEnter"
case KeyEqual:
return "KeyEqual"
case KeyEscape:
return "KeyEscape"
case KeyF1:
return "KeyF1"
case KeyF2:
return "KeyF2"
case KeyF3:
return "KeyF3"
case KeyF4:
return "KeyF4"
case KeyF5:
return "KeyF5"
case KeyF6:
return "KeyF6"
case KeyF7:
return "KeyF7"
case KeyF8:
return "KeyF8"
case KeyF9:
return "KeyF9"
case KeyF10:
return "KeyF10"
case KeyF11:
return "KeyF11"
case KeyF12:
return "KeyF12"
case KeyGraveAccent:
return "KeyGraveAccent"
case KeyHome:
return "KeyHome"
case KeyInsert:
return "KeyInsert"
case KeyKP0:
return "KeyKP0"
case KeyKP1:
return "KeyKP1"
case KeyKP2:
return "KeyKP2"
case KeyKP3:
return "KeyKP3"
case KeyKP4:
return "KeyKP4"
case KeyKP5:
return "KeyKP5"
case KeyKP6:
return "KeyKP6"
case KeyKP7:
return "KeyKP7"
case KeyKP8:
return "KeyKP8"
case KeyKP9:
return "KeyKP9"
case KeyKPAdd:
return "KeyKPAdd"
case KeyKPDecimal:
return "KeyKPDecimal"
case KeyKPDivide:
return "KeyKPDivide"
case KeyKPEnter:
return "KeyKPEnter"
case KeyKPEqual:
return "KeyKPEqual"
case KeyKPMultiply:
return "KeyKPMultiply"
case KeyKPSubtract:
return "KeyKPSubtract"
case KeyLeft:
return "KeyLeft"
case KeyLeftAlt:
return "KeyLeftAlt"
case KeyLeftBracket:
return "KeyLeftBracket"
case KeyLeftControl:
return "KeyLeftControl"
case KeyLeftShift:
return "KeyLeftShift"
case KeyMenu:
return "KeyMenu"
case KeyMinus:
return "KeyMinus"
case KeyNumLock:
return "KeyNumLock"
case KeyPageDown:
return "KeyPageDown"
case KeyPageUp:
return "KeyPageUp"
case KeyPause:
return "KeyPause"
case KeyPeriod:
return "KeyPeriod"
case KeyPrintScreen:
return "KeyPrintScreen"
case KeyRight:
return "KeyRight"
case KeyRightAlt:
return "KeyRightAlt"
case KeyRightBracket:
return "KeyRightBracket"
case KeyRightControl:
return "KeyRightControl"
case KeyRightShift:
return "KeyRightShift"
case KeyScrollLock:
return "KeyScrollLock"
case KeySemicolon:
return "KeySemicolon"
case KeySlash:
return "KeySlash"
case KeySpace:
return "KeySpace"
case KeyTab:
return "KeyTab"
case KeyUp:
return "KeyUp"
}
panic(fmt.Sprintf("driver: invalid key: %d", k))
}
| [] | [] | [] | [] | [] | go | null | null | null |
src/subscriber.py | #!/usr/bin/env python
#
# =============================================================================
#
# Consume Avro messages to Kafka getting the schema from the Confluent registry
# Using Confluent Python Client
#
# =============================================================================
from confluent_kafka import avro
from confluent_kafka.avro import AvroConsumer
from confluent_kafka.avro.serializer import SerializerError
import os
from requests import get
from messages import Message
class Subscriber:
def __init__(
self,
topic,
schema_registry_url,
bootstrap_servers,
group_id
):
self.topic = topic
self.bootstrap_servers = bootstrap_servers
self.schema_registry_url = schema_registry_url
self.group_id = group_id
self.schema = self.get_last_schema_version()
# also the message key is serialised in Avro
self.schema_key = avro.loads(
"{\"namespace\": \"io.confluent.schema.keys\", \"name\": \"Key\", \"type\": \"string\"}"
)
self.consumer = AvroConsumer(
{
"bootstrap.servers": self.bootstrap_servers,
"schema.registry.url": self.schema_registry_url,
"group.id": self.group_id,
"auto.offset.reset": "latest"
},
reader_key_schema=self.schema_key,
reader_value_schema=self.schema
)
self.consumer.subscribe([self.topic])
def get_last_schema_version(self):
"""
get the schema from the schema registry
"""
versions = get(
url=f"{schema_registry_url}/subjects/{self.topic}-value/versions/"
).json()
schema = get(
url=(
f"{schema_registry_url}/subjects/{self.topic}-value/"
f"versions/{max(versions)}"
)
).json()["schema"]
return avro.loads(schema)
def to_message(self, dict_obj):
"""
messages are serialised in Avro using a dict representation of the
messages (which need to be defined as data classes). Messages are
then deserialised passing from dictionaries to the actual Message
object.
Args:
dict_obj (dict): the message as a Python dictionary
Returns:
(Message): the user-defined message object
"""
if not dict_obj:
return None
return Message(
string_key=dict_obj["stringKey"],
int_key=dict_obj["intKey"]
)
def poll(self, timeout=1.0, max_num_records_per_poll=5):
"""
wrapper generator of the `poll` method
NOTE: Confluent client doesn't have the batching
system yet implemented, we need to implement
it here
Args:
timeout (float): timeout in the poll operation
max_num_records_per_poll (int): number of polled records
required to commit
"""
num_records = 1
while True:
try:
# SIGINT can't be handled when polling, set a proper timeout
msg = self.consumer.poll(timeout)
if msg is None:
continue
yield msg.key(), self.to_message(msg.value())
num_records += 1
if num_records == max_num_records_per_poll:
self.consumer.commit()
num_records = 1
except SerializerError as e:
# try to reload the consumer with the latest schema uploaded
# in the schema registry
self.consumer = AvroConsumer(
{
"bootstrap.servers": self.bootstrap_servers,
"schema.registry.url": self.schema_registry_url,
"group.id": self.group_id,
"auto.offset.reset": "latest"
},
reader_key_schema=self.schema_key,
reader_value_schema=self.schema
)
self.consumer.subscribe([self.topic])
continue
except KeyboardInterrupt:
self.consumer.commit()
break
if __name__ == "__main__":
bootstrap_servers = os.environ.get("BOOTSTRAP_SERVERS")
topic = os.environ.get("TOPIC")
schema_registry_url = os.environ.get("SCHEMA_REGISTRY_URL")
group_id = os.environ.get("KAFKA_GROUP_ID")
timeout = float(os.environ.get("KAFKA_POLL_TIMEOUT"))
max_num_records_per_poll = int(os.environ.get("KAFKA_POLL_MAX_RECS_PER_POLL"))
subscriber = Subscriber(
topic,
schema_registry_url,
bootstrap_servers,
group_id
)
for message in subscriber.poll(timeout, max_num_records_per_poll):
print(
f"Consumed record {message[0]}:\tstring: {message[1].string_key}"
f", number: {message[1].int_key}"
)
| [] | [] | [
"SCHEMA_REGISTRY_URL",
"KAFKA_GROUP_ID",
"KAFKA_POLL_MAX_RECS_PER_POLL",
"BOOTSTRAP_SERVERS",
"KAFKA_POLL_TIMEOUT",
"TOPIC"
] | [] | ["SCHEMA_REGISTRY_URL", "KAFKA_GROUP_ID", "KAFKA_POLL_MAX_RECS_PER_POLL", "BOOTSTRAP_SERVERS", "KAFKA_POLL_TIMEOUT", "TOPIC"] | python | 6 | 0 | |
mesonbuild/modules/python.py | # Copyright 2018 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import shutil
import typing as T
from pathlib import Path
from .. import mesonlib
from ..mesonlib import MachineChoice, MesonException
from . import ExtensionModule
from mesonbuild.modules import ModuleReturnValue
from ..interpreterbase import (
noPosargs, noKwargs, permittedKwargs,
InvalidArguments,
FeatureNew, FeatureNewKwargs, disablerIfNotFound
)
from ..interpreter import ExternalProgramHolder, extract_required_kwarg, permitted_kwargs
from ..build import known_shmod_kwargs
from .. import mlog
from ..environment import detect_cpu_family
from ..dependencies.base import (
DependencyMethods, ExternalDependency,
ExternalProgram, PkgConfigDependency,
NonExistingExternalProgram, NotFoundDependency
)
mod_kwargs = set(['subdir'])
mod_kwargs.update(known_shmod_kwargs)
mod_kwargs -= set(['name_prefix', 'name_suffix'])
class PythonDependency(ExternalDependency):
def __init__(self, python_holder, environment, kwargs):
super().__init__('python', environment, kwargs)
self.name = 'python'
self.static = kwargs.get('static', False)
self.embed = kwargs.get('embed', False)
self.version = python_holder.version
self.platform = python_holder.platform
self.pkgdep = None
self.variables = python_holder.variables
self.paths = python_holder.paths
self.link_libpython = python_holder.link_libpython
if mesonlib.version_compare(self.version, '>= 3.0'):
self.major_version = 3
else:
self.major_version = 2
# We first try to find the necessary python variables using pkgconfig
if DependencyMethods.PKGCONFIG in self.methods and not python_holder.is_pypy:
pkg_version = self.variables.get('LDVERSION') or self.version
pkg_libdir = self.variables.get('LIBPC')
pkg_embed = '-embed' if self.embed and mesonlib.version_compare(self.version, '>=3.8') else ''
pkg_name = 'python-{}{}'.format(pkg_version, pkg_embed)
# If python-X.Y.pc exists in LIBPC, we will try to use it
if pkg_libdir is not None and Path(os.path.join(pkg_libdir, '{}.pc'.format(pkg_name))).is_file():
old_pkg_libdir = os.environ.get('PKG_CONFIG_LIBDIR')
old_pkg_path = os.environ.get('PKG_CONFIG_PATH')
os.environ.pop('PKG_CONFIG_PATH', None)
if pkg_libdir:
os.environ['PKG_CONFIG_LIBDIR'] = pkg_libdir
try:
self.pkgdep = PkgConfigDependency(pkg_name, environment, kwargs)
mlog.debug('Found "{}" via pkgconfig lookup in LIBPC ({})'.format(pkg_name, pkg_libdir))
py_lookup_method = 'pkgconfig'
except MesonException as e:
mlog.debug('"{}" could not be found in LIBPC ({})'.format(pkg_name, pkg_libdir))
mlog.debug(e)
if old_pkg_path is not None:
os.environ['PKG_CONFIG_PATH'] = old_pkg_path
if old_pkg_libdir is not None:
os.environ['PKG_CONFIG_LIBDIR'] = old_pkg_libdir
else:
os.environ.pop('PKG_CONFIG_LIBDIR', None)
else:
mlog.debug('"{}" could not be found in LIBPC ({}), this is likely due to a relocated python installation'.format(pkg_name, pkg_libdir))
# If lookup via LIBPC failed, try to use fallback PKG_CONFIG_LIBDIR/PKG_CONFIG_PATH mechanisms
if self.pkgdep is None or not self.pkgdep.found():
try:
self.pkgdep = PkgConfigDependency(pkg_name, environment, kwargs)
mlog.debug('Found "{}" via fallback pkgconfig lookup in PKG_CONFIG_LIBDIR/PKG_CONFIG_PATH'.format(pkg_name))
py_lookup_method = 'pkgconfig-fallback'
except MesonException as e:
mlog.debug('"{}" could not be found via fallback pkgconfig lookup in PKG_CONFIG_LIBDIR/PKG_CONFIG_PATH'.format(pkg_name))
mlog.debug(e)
if self.pkgdep and self.pkgdep.found():
self.compile_args = self.pkgdep.get_compile_args()
self.link_args = self.pkgdep.get_link_args()
self.is_found = True
self.pcdep = self.pkgdep
else:
self.pkgdep = None
# Finally, try to find python via SYSCONFIG as a final measure
if DependencyMethods.SYSCONFIG in self.methods:
if mesonlib.is_windows():
self._find_libpy_windows(environment)
else:
self._find_libpy(python_holder, environment)
if self.is_found:
mlog.debug('Found "python-{}" via SYSCONFIG module'.format(self.version))
py_lookup_method = 'sysconfig'
if self.is_found:
mlog.log('Dependency', mlog.bold(self.name), 'found:', mlog.green('YES ({})'.format(py_lookup_method)))
else:
mlog.log('Dependency', mlog.bold(self.name), 'found:', mlog.red('NO'))
def _find_libpy(self, python_holder, environment):
if python_holder.is_pypy:
if self.major_version == 3:
libname = 'pypy3-c'
else:
libname = 'pypy-c'
libdir = os.path.join(self.variables.get('base'), 'bin')
libdirs = [libdir]
else:
libname = 'python{}'.format(self.version)
if 'DEBUG_EXT' in self.variables:
libname += self.variables['DEBUG_EXT']
if 'ABIFLAGS' in self.variables:
libname += self.variables['ABIFLAGS']
libdirs = []
largs = self.clib_compiler.find_library(libname, environment, libdirs)
if largs is not None:
self.link_args = largs
self.is_found = largs is not None or self.link_libpython
inc_paths = mesonlib.OrderedSet([
self.variables.get('INCLUDEPY'),
self.paths.get('include'),
self.paths.get('platinclude')])
self.compile_args += ['-I' + path for path in inc_paths if path]
def get_windows_python_arch(self):
if self.platform == 'mingw':
pycc = self.variables.get('CC')
if pycc.startswith('x86_64'):
return '64'
elif pycc.startswith(('i686', 'i386')):
return '32'
else:
mlog.log('MinGW Python built with unknown CC {!r}, please file'
'a bug'.format(pycc))
return None
elif self.platform == 'win32':
return '32'
elif self.platform in ('win64', 'win-amd64'):
return '64'
mlog.log('Unknown Windows Python platform {!r}'.format(self.platform))
return None
def get_windows_link_args(self):
if self.platform.startswith('win'):
vernum = self.variables.get('py_version_nodot')
if self.static:
libpath = Path('libs') / 'libpython{}.a'.format(vernum)
else:
comp = self.get_compiler()
if comp.id == "gcc":
libpath = 'python{}.dll'.format(vernum)
else:
libpath = Path('libs') / 'python{}.lib'.format(vernum)
lib = Path(self.variables.get('base')) / libpath
elif self.platform == 'mingw':
if self.static:
libname = self.variables.get('LIBRARY')
else:
libname = self.variables.get('LDLIBRARY')
lib = Path(self.variables.get('LIBDIR')) / libname
if not lib.exists():
mlog.log('Could not find Python3 library {!r}'.format(str(lib)))
return None
return [str(lib)]
def _find_libpy_windows(self, env):
'''
Find python3 libraries on Windows and also verify that the arch matches
what we are building for.
'''
pyarch = self.get_windows_python_arch()
if pyarch is None:
self.is_found = False
return
arch = detect_cpu_family(env.coredata.compilers.host)
if arch == 'x86':
arch = '32'
elif arch == 'x86_64':
arch = '64'
else:
# We can't cross-compile Python 3 dependencies on Windows yet
mlog.log('Unknown architecture {!r} for'.format(arch),
mlog.bold(self.name))
self.is_found = False
return
# Pyarch ends in '32' or '64'
if arch != pyarch:
mlog.log('Need', mlog.bold(self.name), 'for {}-bit, but '
'found {}-bit'.format(arch, pyarch))
self.is_found = False
return
# This can fail if the library is not found
largs = self.get_windows_link_args()
if largs is None:
self.is_found = False
return
self.link_args = largs
# Compile args
inc_paths = mesonlib.OrderedSet([
self.variables.get('INCLUDEPY'),
self.paths.get('include'),
self.paths.get('platinclude')])
self.compile_args += ['-I' + path for path in inc_paths if path]
# https://sourceforge.net/p/mingw-w64/mailman/message/30504611/
if pyarch == '64' and self.major_version == 2:
self.compile_args += ['-DMS_WIN64']
self.is_found = True
@staticmethod
def get_methods():
if mesonlib.is_windows():
return [DependencyMethods.PKGCONFIG, DependencyMethods.SYSCONFIG]
elif mesonlib.is_osx():
return [DependencyMethods.PKGCONFIG, DependencyMethods.EXTRAFRAMEWORK]
else:
return [DependencyMethods.PKGCONFIG, DependencyMethods.SYSCONFIG]
def get_pkgconfig_variable(self, variable_name, kwargs):
if self.pkgdep:
return self.pkgdep.get_pkgconfig_variable(variable_name, kwargs)
else:
return super().get_pkgconfig_variable(variable_name, kwargs)
INTROSPECT_COMMAND = '''import sysconfig
import json
import sys
install_paths = sysconfig.get_paths(scheme='posix_prefix', vars={'base': '', 'platbase': '', 'installed_base': ''})
def links_against_libpython():
from distutils.core import Distribution, Extension
cmd = Distribution().get_command_obj('build_ext')
cmd.ensure_finalized()
return bool(cmd.get_libraries(Extension('dummy', [])))
print (json.dumps ({
'variables': sysconfig.get_config_vars(),
'paths': sysconfig.get_paths(),
'install_paths': install_paths,
'version': sysconfig.get_python_version(),
'platform': sysconfig.get_platform(),
'is_pypy': '__pypy__' in sys.builtin_module_names,
'link_libpython': links_against_libpython(),
}))
'''
class PythonInstallation(ExternalProgramHolder):
def __init__(self, interpreter, python, info):
ExternalProgramHolder.__init__(self, python, interpreter.subproject)
self.interpreter = interpreter
self.subproject = self.interpreter.subproject
prefix = self.interpreter.environment.coredata.get_builtin_option('prefix')
self.variables = info['variables']
self.paths = info['paths']
install_paths = info['install_paths']
self.platlib_install_path = os.path.join(prefix, install_paths['platlib'][1:])
self.purelib_install_path = os.path.join(prefix, install_paths['purelib'][1:])
self.version = info['version']
self.platform = info['platform']
self.is_pypy = info['is_pypy']
self.link_libpython = info['link_libpython']
self.methods.update({
'extension_module': self.extension_module_method,
'dependency': self.dependency_method,
'install_sources': self.install_sources_method,
'get_install_dir': self.get_install_dir_method,
'language_version': self.language_version_method,
'found': self.found_method,
'has_path': self.has_path_method,
'get_path': self.get_path_method,
'has_variable': self.has_variable_method,
'get_variable': self.get_variable_method,
'path': self.path_method,
})
@permittedKwargs(mod_kwargs)
def extension_module_method(self, args, kwargs):
if 'subdir' in kwargs and 'install_dir' in kwargs:
raise InvalidArguments('"subdir" and "install_dir" are mutually exclusive')
if 'subdir' in kwargs:
subdir = kwargs.pop('subdir', '')
if not isinstance(subdir, str):
raise InvalidArguments('"subdir" argument must be a string.')
kwargs['install_dir'] = os.path.join(self.platlib_install_path, subdir)
# On macOS and some Linux distros (Debian) distutils doesn't link
# extensions against libpython. We call into distutils and mirror its
# behavior. See https://github.com/mesonbuild/meson/issues/4117
if not self.link_libpython:
new_deps = []
for holder in mesonlib.extract_as_list(kwargs, 'dependencies'):
dep = holder.held_object
if isinstance(dep, PythonDependency):
holder = self.interpreter.holderify(dep.get_partial_dependency(compile_args=True))
new_deps.append(holder)
kwargs['dependencies'] = new_deps
suffix = self.variables.get('EXT_SUFFIX') or self.variables.get('SO') or self.variables.get('.so')
# msys2's python3 has "-cpython-36m.dll", we have to be clever
split = suffix.rsplit('.', 1)
suffix = split.pop(-1)
args[0] += ''.join(s for s in split)
kwargs['name_prefix'] = ''
kwargs['name_suffix'] = suffix
return self.interpreter.func_shared_module(None, args, kwargs)
@permittedKwargs(permitted_kwargs['dependency'])
@FeatureNewKwargs('python_installation.dependency', '0.53.0', ['embed'])
def dependency_method(self, args, kwargs):
if args:
mlog.warning('python_installation.dependency() does not take any '
'positional arguments. It always returns a Python '
'dependency. This will become an error in the future.',
location=self.interpreter.current_node)
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Dependency', mlog.bold('python'), 'skipped: feature', mlog.bold(feature), 'disabled')
dep = NotFoundDependency(self.interpreter.environment)
else:
dep = PythonDependency(self, self.interpreter.environment, kwargs)
if required and not dep.found():
raise mesonlib.MesonException('Python dependency not found')
return self.interpreter.holderify(dep)
@permittedKwargs(['pure', 'subdir'])
def install_sources_method(self, args, kwargs):
pure = kwargs.pop('pure', True)
if not isinstance(pure, bool):
raise InvalidArguments('"pure" argument must be a boolean.')
subdir = kwargs.pop('subdir', '')
if not isinstance(subdir, str):
raise InvalidArguments('"subdir" argument must be a string.')
if pure:
kwargs['install_dir'] = os.path.join(self.purelib_install_path, subdir)
else:
kwargs['install_dir'] = os.path.join(self.platlib_install_path, subdir)
return self.interpreter.holderify(self.interpreter.func_install_data(None, args, kwargs))
@noPosargs
@permittedKwargs(['pure', 'subdir'])
def get_install_dir_method(self, args, kwargs):
pure = kwargs.pop('pure', True)
if not isinstance(pure, bool):
raise InvalidArguments('"pure" argument must be a boolean.')
subdir = kwargs.pop('subdir', '')
if not isinstance(subdir, str):
raise InvalidArguments('"subdir" argument must be a string.')
if pure:
res = os.path.join(self.purelib_install_path, subdir)
else:
res = os.path.join(self.platlib_install_path, subdir)
return self.interpreter.module_method_callback(ModuleReturnValue(res, []))
@noPosargs
@noKwargs
def language_version_method(self, args, kwargs):
return self.interpreter.module_method_callback(ModuleReturnValue(self.version, []))
@noKwargs
def has_path_method(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('has_path takes exactly one positional argument.')
path_name = args[0]
if not isinstance(path_name, str):
raise InvalidArguments('has_path argument must be a string.')
return self.interpreter.module_method_callback(ModuleReturnValue(path_name in self.paths, []))
@noKwargs
def get_path_method(self, args, kwargs):
if len(args) not in (1, 2):
raise InvalidArguments('get_path must have one or two arguments.')
path_name = args[0]
if not isinstance(path_name, str):
raise InvalidArguments('get_path argument must be a string.')
try:
path = self.paths[path_name]
except KeyError:
if len(args) == 2:
path = args[1]
else:
raise InvalidArguments('{} is not a valid path name'.format(path_name))
return self.interpreter.module_method_callback(ModuleReturnValue(path, []))
@noKwargs
def has_variable_method(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('has_variable takes exactly one positional argument.')
var_name = args[0]
if not isinstance(var_name, str):
raise InvalidArguments('has_variable argument must be a string.')
return self.interpreter.module_method_callback(ModuleReturnValue(var_name in self.variables, []))
@noKwargs
def get_variable_method(self, args, kwargs):
if len(args) not in (1, 2):
raise InvalidArguments('get_variable must have one or two arguments.')
var_name = args[0]
if not isinstance(var_name, str):
raise InvalidArguments('get_variable argument must be a string.')
try:
var = self.variables[var_name]
except KeyError:
if len(args) == 2:
var = args[1]
else:
raise InvalidArguments('{} is not a valid variable name'.format(var_name))
return self.interpreter.module_method_callback(ModuleReturnValue(var, []))
@noPosargs
@noKwargs
@FeatureNew('Python module path method', '0.50.0')
def path_method(self, args, kwargs):
return super().path_method(args, kwargs)
class PythonModule(ExtensionModule):
@FeatureNew('Python Module', '0.46.0')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.snippets.add('find_installation')
# https://www.python.org/dev/peps/pep-0397/
def _get_win_pythonpath(self, name_or_path):
if name_or_path not in ['python2', 'python3']:
return None
if not shutil.which('py'):
# program not installed, return without an exception
return None
ver = {'python2': '-2', 'python3': '-3'}[name_or_path]
cmd = ['py', ver, '-c', "import sysconfig; print(sysconfig.get_config_var('BINDIR'))"]
_, stdout, _ = mesonlib.Popen_safe(cmd)
directory = stdout.strip()
if os.path.exists(directory):
return os.path.join(directory, 'python')
else:
return None
def _check_version(self, name_or_path, version):
if name_or_path == 'python2':
return mesonlib.version_compare(version, '< 3.0')
elif name_or_path == 'python3':
return mesonlib.version_compare(version, '>= 3.0')
return True
@FeatureNewKwargs('python.find_installation', '0.49.0', ['disabler'])
@FeatureNewKwargs('python.find_installation', '0.51.0', ['modules'])
@disablerIfNotFound
@permittedKwargs({'required', 'modules'})
def find_installation(self, interpreter, state, args, kwargs):
feature_check = FeatureNew('Passing "feature" option to find_installation', '0.48.0')
disabled, required, feature = extract_required_kwarg(kwargs, state.subproject, feature_check)
want_modules = mesonlib.extract_as_list(kwargs, 'modules') # type: T.List[str]
found_modules = [] # type: T.List[str]
missing_modules = [] # type: T.List[str]
if len(args) > 1:
raise InvalidArguments('find_installation takes zero or one positional argument.')
name_or_path = state.environment.lookup_binary_entry(MachineChoice.HOST, 'python')
if name_or_path is None and args:
name_or_path = args[0]
if not isinstance(name_or_path, str):
raise InvalidArguments('find_installation argument must be a string.')
if disabled:
mlog.log('Program', name_or_path or 'python', 'found:', mlog.red('NO'), '(disabled by:', mlog.bold(feature), ')')
return ExternalProgramHolder(NonExistingExternalProgram(), state.subproject)
if not name_or_path:
python = ExternalProgram('python3', mesonlib.python_command, silent=True)
else:
python = ExternalProgram.from_entry('python3', name_or_path)
if not python.found() and mesonlib.is_windows():
pythonpath = self._get_win_pythonpath(name_or_path)
if pythonpath is not None:
name_or_path = pythonpath
python = ExternalProgram(name_or_path, silent=True)
# Last ditch effort, python2 or python3 can be named python
# on various platforms, let's not give up just yet, if an executable
# named python is available and has a compatible version, let's use
# it
if not python.found() and name_or_path in ['python2', 'python3']:
python = ExternalProgram('python', silent=True)
if python.found() and want_modules:
for mod in want_modules:
p, out, err = mesonlib.Popen_safe(
python.command +
['-c', 'import {0}'.format(mod)])
if p.returncode != 0:
missing_modules.append(mod)
else:
found_modules.append(mod)
msg = ['Program', python.name]
if want_modules:
msg.append('({})'.format(', '.join(want_modules)))
msg.append('found:')
if python.found() and not missing_modules:
msg.extend([mlog.green('YES'), '({})'.format(' '.join(python.command))])
else:
msg.append(mlog.red('NO'))
if found_modules:
msg.append('modules:')
msg.append(', '.join(found_modules))
mlog.log(*msg)
if not python.found():
if required:
raise mesonlib.MesonException('{} not found'.format(name_or_path or 'python'))
res = ExternalProgramHolder(NonExistingExternalProgram(), state.subproject)
elif missing_modules:
if required:
raise mesonlib.MesonException('{} is missing modules: {}'.format(name_or_path or 'python', ', '.join(missing_modules)))
res = ExternalProgramHolder(NonExistingExternalProgram(), state.subproject)
else:
# Sanity check, we expect to have something that at least quacks in tune
try:
cmd = python.get_command() + ['-c', INTROSPECT_COMMAND]
p, stdout, stderr = mesonlib.Popen_safe(cmd)
info = json.loads(stdout)
except json.JSONDecodeError:
info = None
mlog.debug('Could not introspect Python (%s): exit code %d' % (str(p.args), p.returncode))
mlog.debug('Program stdout:\n')
mlog.debug(stdout)
mlog.debug('Program stderr:\n')
mlog.debug(stderr)
if isinstance(info, dict) and 'version' in info and self._check_version(name_or_path, info['version']):
res = PythonInstallation(interpreter, python, info)
else:
res = ExternalProgramHolder(NonExistingExternalProgram(), state.subproject)
if required:
raise mesonlib.MesonException('{} is not a valid python or it is missing setuptools'.format(python))
return res
def initialize(*args, **kwargs):
return PythonModule(*args, **kwargs)
| [] | [] | [
"PKG_CONFIG_LIBDIR",
"PKG_CONFIG_PATH"
] | [] | ["PKG_CONFIG_LIBDIR", "PKG_CONFIG_PATH"] | python | 2 | 0 | |
test/common/astron.py | import os, time, socket, struct, tempfile, subprocess, ssl
__all__ = ['Daemon', 'Datagram', 'DatagramIterator',
'MDConnection', 'ChannelConnection', 'ClientConnection']
class Daemon(object):
DAEMON_PATH = './astrond'
def __init__(self, config):
self.config = config
self.daemon = None
self.config_file = None
def start(self):
if 'MANUAL_LAUNCH_CONFIG' in os.environ:
# User wants to manually launch their Astron daemon, so we'll write
# out the config for them and prompt them to
with open(os.environ['MANUAL_LAUNCH_CONFIG'], 'wb') as config:
config.write(self.config)
raw_input('Waiting for manual launch; press enter when ready...')
return # Because the start happened manually.
configHandle, self.config_file = tempfile.mkstemp(prefix = 'astron', suffix = 'cfg.yaml')
os.write(configHandle, self.config)
os.close(configHandle)
args = [self.DAEMON_PATH]
if 'USE_LOGLEVEL' in os.environ:
args += ["--loglevel", os.environ['USE_LOGLEVEL']]
args += [self.config_file]
self.daemon = subprocess.Popen(args)
time.sleep(1.0) # Allow some time for daemon to initialize...
def stop(self):
time.sleep(1.0) # Allow some time for daemon to finish up...
if self.daemon is not None:
self.daemon.kill()
# In Windows, kill() is an alias for terminate(), which isn't guaranteed to be instantaneous like SIGKILL signals are on UNIX systems.
self.daemon.wait()
if self.config_file is not None:
os.remove(self.config_file)
DATATYPES = {
'int8': '<b',
'uint8': '<B',
'int16': '<h',
'uint16': '<H',
'int32': '<i',
'uint32': '<I',
'int64': '<q',
'uint64': '<Q',
'char': '<s',
'float64': '<d',
}
CONSTANTS = {
# Size types
# Reserved Values
'INVALID_DO_ID': 0,
'INVALID_ZONE': 0,
'RESERVED_MSG_TYPE': 0,
# Success booleans
'SUCCESS': 1,
'FAILURE': 0,
'BOOL_YES': 1,
'BOOL_NO': 0,
# Reserved Channels
'INVALID_CHANNEL': 0,
'CONTROL_CHANNEL': 1,
'PARENT_PREFIX': 1 << 32,
'DATABASE_PREFIX': 2 << 32,
# Control message-type constants
'CONTROL_ADD_CHANNEL': 9000,
'CONTROL_REMOVE_CHANNEL': 9001,
'CONTROL_ADD_RANGE': 9002,
'CONTROL_REMOVE_RANGE': 9003,
'CONTROL_ADD_POST_REMOVE': 9010,
'CONTROL_CLEAR_POST_REMOVE': 9011,
'CONTROL_SET_CON_NAME': 9012,
'CONTROL_SET_CON_URL': 9013,
'CONTROL_LOG_MESSAGE': 9014,
# State Server control message-type constants
'STATESERVER_CREATE_OBJECT_WITH_REQUIRED': 2000,
'STATESERVER_CREATE_OBJECT_WITH_REQUIRED_OTHER': 2001,
'STATESERVER_DELETE_AI_OBJECTS': 2009,
# State Server object message-type constants
'STATESERVER_OBJECT_GET_FIELD': 2010,
'STATESERVER_OBJECT_GET_FIELD_RESP': 2011,
'STATESERVER_OBJECT_GET_FIELDS': 2012,
'STATESERVER_OBJECT_GET_FIELDS_RESP': 2013,
'STATESERVER_OBJECT_GET_ALL': 2014,
'STATESERVER_OBJECT_GET_ALL_RESP': 2015,
'STATESERVER_OBJECT_SET_FIELD': 2020,
'STATESERVER_OBJECT_SET_FIELDS': 2021,
'STATESERVER_OBJECT_DELETE_FIELD_RAM': 2030,
'STATESERVER_OBJECT_DELETE_FIELDS_RAM': 2031,
'STATESERVER_OBJECT_DELETE_RAM': 2032,
# State Server visibility message-type constants
'STATESERVER_OBJECT_SET_LOCATION': 2040,
'STATESERVER_OBJECT_CHANGING_LOCATION': 2041,
'STATESERVER_OBJECT_ENTER_LOCATION_WITH_REQUIRED': 2042,
'STATESERVER_OBJECT_ENTER_LOCATION_WITH_REQUIRED_OTHER': 2043,
'STATESERVER_OBJECT_GET_LOCATION': 2044,
'STATESERVER_OBJECT_GET_LOCATION_RESP': 2045,
'STATESERVER_OBJECT_LOCATION_ACK': 2046,
'STATESERVER_OBJECT_SET_AI': 2050,
'STATESERVER_OBJECT_CHANGING_AI': 2051,
'STATESERVER_OBJECT_ENTER_AI_WITH_REQUIRED': 2052,
'STATESERVER_OBJECT_ENTER_AI_WITH_REQUIRED_OTHER': 2053,
'STATESERVER_OBJECT_GET_AI': 2054,
'STATESERVER_OBJECT_GET_AI_RESP': 2055,
'STATESERVER_OBJECT_SET_OWNER': 2060,
'STATESERVER_OBJECT_CHANGING_OWNER': 2061,
'STATESERVER_OBJECT_ENTER_OWNER_WITH_REQUIRED': 2062,
'STATESERVER_OBJECT_ENTER_OWNER_WITH_REQUIRED_OTHER': 2063,
'STATESERVER_OBJECT_GET_OWNER': 2064,
'STATESERVER_OBJECT_GET_OWNER_RESP': 2065,
'STATESERVER_OBJECT_ENTER_INTEREST_WITH_REQUIRED': 2066,
'STATESERVER_OBJECT_ENTER_INTEREST_WITH_REQUIRED_OTHER': 2067,
# State Server parent methods message-type constants
'STATESERVER_OBJECT_GET_ZONE_OBJECTS': 2100,
'STATESERVER_OBJECT_GET_ZONES_OBJECTS': 2102,
'STATESERVER_OBJECT_GET_CHILDREN': 2104,
'STATESERVER_OBJECT_GET_ZONE_COUNT': 2110,
'STATESERVER_OBJECT_GET_ZONE_COUNT_RESP': 2111,
'STATESERVER_OBJECT_GET_ZONES_COUNT': 2112,
'STATESERVER_OBJECT_GET_ZONES_COUNT_RESP': 2113,
'STATESERVER_OBJECT_GET_CHILD_COUNT': 2114,
'STATESERVER_OBJECT_GET_CHILD_COUNT_RESP': 2115,
'STATESERVER_OBJECT_DELETE_ZONE': 2120,
'STATESERVER_OBJECT_DELETE_ZONES': 2122,
'STATESERVER_OBJECT_DELETE_CHILDREN': 2124,
'STATESERVER_GET_ACTIVE_ZONES': 2125,
'STATESERVER_GET_ACTIVE_ZONES_RESP': 2126,
# DBSS object message-type constants
'DBSS_OBJECT_ACTIVATE_WITH_DEFAULTS': 2200,
'DBSS_OBJECT_ACTIVATE_WITH_DEFAULTS_OTHER': 2201,
'DBSS_OBJECT_GET_ACTIVATED': 2207,
'DBSS_OBJECT_GET_ACTIVATED_RESP': 2208,
'DBSS_OBJECT_DELETE_FIELD_DISK': 2230,
'DBSS_OBJECT_DELETE_FIELDS_DISK': 2231,
'DBSS_OBJECT_DELETE_DISK': 2232,
# Stateserver internal contexts
'STATESERVER_CONTEXT_WAKE_CHILDREN': 1001,
# Database Server
'DBSERVER_CREATE_OBJECT': 3000,
'DBSERVER_CREATE_OBJECT_RESP': 3001,
'DBSERVER_OBJECT_GET_FIELD': 3010,
'DBSERVER_OBJECT_GET_FIELD_RESP': 3011,
'DBSERVER_OBJECT_GET_FIELDS': 3012,
'DBSERVER_OBJECT_GET_FIELDS_RESP': 3013,
'DBSERVER_OBJECT_GET_ALL': 3014,
'DBSERVER_OBJECT_GET_ALL_RESP': 3015,
'DBSERVER_OBJECT_SET_FIELD': 3020,
'DBSERVER_OBJECT_SET_FIELDS': 3021,
'DBSERVER_OBJECT_SET_FIELD_IF_EQUALS': 3022,
'DBSERVER_OBJECT_SET_FIELD_IF_EQUALS_RESP': 3023,
'DBSERVER_OBJECT_SET_FIELDS_IF_EQUALS': 3024,
'DBSERVER_OBJECT_SET_FIELDS_IF_EQUALS_RESP': 3025,
'DBSERVER_OBJECT_SET_FIELD_IF_EMPTY': 3026,
'DBSERVER_OBJECT_SET_FIELD_IF_EMPTY_RESP': 3027,
'DBSERVER_OBJECT_DELETE_FIELD': 3030,
'DBSERVER_OBJECT_DELETE_FIELDS': 3031,
'DBSERVER_OBJECT_DELETE': 3032,
# Client Agent
'CLIENTAGENT_SET_STATE': 1000,
'CLIENTAGENT_SET_CLIENT_ID': 1001,
'CLIENTAGENT_SEND_DATAGRAM': 1002,
'CLIENTAGENT_EJECT': 1004,
'CLIENTAGENT_DROP': 1005,
'CLIENTAGENT_GET_NETWORK_ADDRESS': 1006,
'CLIENTAGENT_GET_NETWORK_ADDRESS_RESP': 1007,
'CLIENTAGENT_DECLARE_OBJECT': 1010,
'CLIENTAGENT_UNDECLARE_OBJECT': 1011,
'CLIENTAGENT_ADD_SESSION_OBJECT': 1012,
'CLIENTAGENT_REMOVE_SESSION_OBJECT': 1013,
'CLIENTAGENT_SET_FIELDS_SENDABLE': 1014,
'CLIENTAGENT_GET_TLVS': 1015,
'CLIENTAGENT_GET_TLVS_RESP': 1016,
'CLIENTAGENT_OPEN_CHANNEL': 1100,
'CLIENTAGENT_CLOSE_CHANNEL': 1101,
'CLIENTAGENT_ADD_POST_REMOVE': 1110,
'CLIENTAGENT_CLEAR_POST_REMOVES': 1111,
'CLIENTAGENT_ADD_INTEREST': 1200,
'CLIENTAGENT_ADD_INTEREST_MULTIPLE': 1201,
'CLIENTAGENT_REMOVE_INTEREST': 1203,
'CLIENTAGENT_DONE_INTEREST_RESP': 1204,
# Client
'CLIENT_HELLO': 1,
'CLIENT_HELLO_RESP': 2,
'CLIENT_DISCONNECT': 3,
'CLIENT_EJECT': 4,
'CLIENT_HEARTBEAT': 5,
'CLIENT_OBJECT_SET_FIELD': 120,
'CLIENT_OBJECT_SET_FIELDS': 121,
'CLIENT_OBJECT_LEAVING': 132,
'CLIENT_OBJECT_LEAVING_OWNER': 161,
'CLIENT_ENTER_OBJECT_REQUIRED': 142,
'CLIENT_ENTER_OBJECT_REQUIRED_OTHER': 143,
'CLIENT_ENTER_OBJECT_REQUIRED_OWNER': 172,
'CLIENT_ENTER_OBJECT_REQUIRED_OTHER_OWNER': 173,
'CLIENT_DONE_INTEREST_RESP': 204,
'CLIENT_ADD_INTEREST': 200,
'CLIENT_ADD_INTEREST_MULTIPLE': 201,
'CLIENT_REMOVE_INTEREST': 203,
'CLIENT_OBJECT_LOCATION': 140,
# Client DC reasons
'CLIENT_DISCONNECT_OVERSIZED_DATAGRAM': 106,
'CLIENT_DISCONNECT_NO_HELLO': 107,
'CLIENT_DISCONNECT_INVALID_MSGTYPE': 108,
'CLIENT_DISCONNECT_TRUNCATED_DATAGRAM': 109,
'CLIENT_DISCONNECT_ANONYMOUS_VIOLATION': 113,
'CLIENT_DISCONNECT_FORBIDDEN_INTEREST': 115,
'CLIENT_DISCONNECT_MISSING_OBJECT': 117,
'CLIENT_DISCONNECT_FORBIDDEN_FIELD': 118,
'CLIENT_DISCONNECT_FORBIDDEN_RELOCATE': 119,
'CLIENT_DISCONNECT_BAD_VERSION': 124,
'CLIENT_DISCONNECT_BAD_DCHASH': 125,
'CLIENT_DISCONNECT_FIELD_CONSTRAINT': 127,
'CLIENT_DISCONNECT_SESSION_OBJECT_DELETED': 153,
'CLIENT_DISCONNECT_NO_HEARTBEAT': 345,
'CLIENT_DISCONNECT_NETWORK_WRITE_ERROR': 347,
'CLIENT_DISCONNECT_NETWORK_READ_ERROR': 348,
# Client state codes
'CLIENT_STATE_NEW': 0,
'CLIENT_STATE_ANONYMOUS': 1,
'CLIENT_STATE_ESTABLISHED': 2,
}
if 'USE_32BIT_DATAGRAMS' in os.environ:
DATATYPES['size'] = '<I'
CONSTANTS['DGSIZE_MAX'] = (1 << 32) - 1
CONSTANTS['DGSIZE_SIZE_BYTES'] = 4
else:
DATATYPES['size'] = '<H'
CONSTANTS['DGSIZE_MAX'] = (1 << 16) - 1
CONSTANTS['DGSIZE_SIZE_BYTES'] = 2
if 'USE_128BIT_CHANNELS' in os.environ:
CONSTANTS['PARENT_PREFIX'] = 1 << 64;
CONSTANTS['CHANNEL_MAX'] = (1 << 128) - 1
CONSTANTS['CHANNEL_SIZE_BYTES'] = 16
DATATYPES['doid'] = '<Q'
CONSTANTS['DOID_MAX'] = (1 << 64) - 1
CONSTANTS['DOID_SIZE_BYTES'] = 8
DATATYPES['zone'] = '<Q'
CONSTANTS['ZONE_MAX'] = (1 << 64) - 1
CONSTANTS['ZONE_SIZE_BYTES'] = 8
CONSTANTS['ZONE_SIZE_BITS'] = 64
CONSTANTS['PARENT_PREFIX'] = 1 << 64
CONSTANTS['DATABASE_PREFIX'] = 2 << 64
else:
CONSTANTS['CHANNEL_MAX'] = (1 << 64) - 1
CONSTANTS['CHANNEL_SIZE_BYTES'] = 8
DATATYPES['doid'] = '<I'
CONSTANTS['DOID_MAX'] = (1 << 32) - 1
CONSTANTS['DOID_SIZE_BYTES'] = 4
DATATYPES['zone'] = '<I'
CONSTANTS['ZONE_MAX'] = (1 << 32) - 1
CONSTANTS['ZONE_SIZE_BYTES'] = 4
CONSTANTS['ZONE_SIZE_BITS'] = 32
CONSTANTS['USE_THREADING'] = 'DISABLE_THREADING' not in os.environ
locals().update(CONSTANTS)
__all__.extend(CONSTANTS.keys())
class Datagram(object):
def __init__(self, data=b''):
self._data = data
def make_adder(v):
def adder(data):
self.add_raw(struct.pack(v, data))
return adder
for k,v in DATATYPES.items():
adder = make_adder(v)
setattr(self, 'add_' + k, adder)
def add_raw(self, data):
self._data += data
def add_string(self, string):
self.add_size(len(string))
self.add_raw(string)
def add_blob(self, blob):
self.add_size(len(blob))
self.add_raw(blob)
def add_channel(self, channel):
if 'USE_128BIT_CHANNELS' in os.environ:
max_int64 = 0xFFFFFFFFFFFFFFFF
self.add_raw(struct.pack('<QQ', channel & max_int64, (channel >> 64) & max_int64))
else:
self.add_uint64(channel)
def get_data(self):
return self._data
def get_payload(self):
return self._data[CHANNEL_SIZE_BYTES*ord(self._data[0])+1:]
def get_msgtype(self):
return struct.unpack('<H', self.get_payload()[CHANNEL_SIZE_BYTES:CHANNEL_SIZE_BYTES+2])[0]
def get_channels(self):
channels = []
iterator = DatagramIterator(self, 1)
for x in xrange(ord(self._data[0])):
channels.append(iterator.read_channel())
return set(channels)
def get_size(self):
return len(self._data)
def matches(self, other):
"""Returns true if the set of channels and payload match.
This may be used instead of Datagram.equals() to compare two packets
while ignoring the ordering of the recipient channels.
This should not be called for a client datagram, because clients do not
have a list of recipients in their datagram headers.
"""
return self.get_payload() == other.get_payload() and \
self.get_channels() <= other.get_channels()
def equals(self, other):
"""Returns true only if all bytes are equal and in the same order."""
return self.get_data() == other.get_data()
# Common datagram type helpers:
@classmethod
def create(cls, recipients, sender, msgtype):
dg = cls()
dg.add_uint8(len(recipients))
for recipient in recipients: dg.add_channel(recipient)
dg.add_channel(sender)
dg.add_uint16(msgtype)
return dg
@classmethod
def create_control(cls):
dg = cls()
dg.add_uint8(1)
dg.add_channel(CONTROL_CHANNEL)
return dg
@classmethod
def create_add_channel(cls, channel):
dg = cls.create_control()
dg.add_uint16(CONTROL_ADD_CHANNEL)
dg.add_channel(channel)
return dg
@classmethod
def create_remove_channel(cls, channel):
dg = cls.create_control()
dg.add_uint16(CONTROL_REMOVE_CHANNEL)
dg.add_channel(channel)
return dg
@classmethod
def create_add_range(cls, upper, lower):
dg = cls.create_control()
dg.add_uint16(CONTROL_ADD_RANGE)
dg.add_channel(upper)
dg.add_channel(lower)
return dg
@classmethod
def create_remove_range(cls, upper, lower):
dg = cls.create_control()
dg.add_uint16(CONTROL_REMOVE_RANGE)
dg.add_channel(upper)
dg.add_channel(lower)
return dg
@classmethod
def create_add_post_remove(cls, sender, datagram):
dg = cls.create_control()
dg.add_uint16(CONTROL_ADD_POST_REMOVE)
dg.add_channel(sender)
dg.add_blob(datagram.get_data())
return dg
@classmethod
def create_clear_post_removes(cls, sender):
dg = cls.create_control()
dg.add_uint16(CONTROL_CLEAR_POST_REMOVE)
dg.add_channel(sender)
return dg
@classmethod
def create_set_con_name(cls, name):
dg = cls.create_control()
dg.add_uint16(CONTROL_SET_CON_NAME)
dg.add_string(name)
return dg
@classmethod
def create_set_con_url(cls, name):
dg = cls.create_control()
dg.add_uint16(CONTROL_SET_CON_URL)
dg.add_string(name)
return dg
class DatagramIterator(object):
def __init__(self, datagram, offset = 0):
self._datagram = datagram
self._data = datagram.get_data()
self._offset = offset
def make_reader(v):
def reader():
return self.read_format(v)
return reader
for k,v in DATATYPES.items():
reader = make_reader(v)
setattr(self, 'read_' + k, reader)
def read_format(self, f):
offset = struct.calcsize(f)
self._offset += offset
if self._offset > len(self._data):
raise EOFError('End of Datagram')
unpacked = struct.unpack(f, self._data[self._offset-offset:self._offset])
if len(unpacked) is 1:
return unpacked[0]
else:
return unpacked
def read_channel(self):
if 'USE_128BIT_CHANNELS' in os.environ:
a, b = self.read_format('<QQ')
return (b << 64) | a
else:
return self.read_uint64()
def matches_header(self, recipients, sender, msgtype, remaining=-1):
self.seek(0)
channels = [i for i, j in zip(self._datagram.get_channels(), recipients) if i == j]
if len(channels) != len(recipients):
return (False, "Recipients length doesn't match")
self.seek(CHANNEL_SIZE_BYTES*ord(self._data[0])+1)
readSender = self.read_channel()
if sender != readSender:
return (False, "Sender doesn't match, %d != %d (expected, actual)"
% (sender, readSender))
readMsgtype = self.read_uint16()
if msgtype != readMsgtype:
return (False, "Message type doesn't match, %d != %d (expected, actual)"
% (msgtype, readMsgtype))
data_left = len(self._data) - self._offset
if remaining != -1 and remaining != data_left:
return (False, "Datagram size is %d; expecting %d" % (data_left, remaining))
return (True, "")
def read_string(self):
length = self.read_size()
self._offset += length
if self._offset > len(self._data):
raise EOFError('End of Datagram')
return struct.unpack("<%ds" % length, self._data[self._offset-length:self._offset])[0]
def read_remainder(self):
remainder = self._data[self._offset:]
self._offset = len(self._data)
return remainder
def seek(self, offset):
self._offset = offset
def tell(self):
return self._offset
class MDConnection(object):
def __init__(self, sock):
self.s = sock
self.s.settimeout(0.1)
def send(self, datagram):
data = datagram.get_data()
msg = struct.pack(DATATYPES['size'], len(data)) + data
self.s.send(msg)
def recv(self):
dg = self._read()
if dg is None:
raise EOFError('No message received')
return Datagram(dg)
def recv_maybe(self):
dg = self._read()
if dg is None:
return None
return Datagram(dg)
def close(self):
self.s.close()
def flush(self):
while self._read(): pass
def _read(self):
try:
length = DGSIZE_SIZE_BYTES
result = ''
while len(result) < length:
data = self.s.recv(length - len(result))
if data == '':
raise EOFError('Remote socket closed connection')
result += data
length = struct.unpack(DATATYPES['size'], result)[0]
except socket.error:
return None
result = ''
while len(result) < length:
data = self.s.recv(length - len(result))
if data == '':
raise EOFError('Remote socket closed connection')
result += data
return result
class ChannelConnection(MDConnection):
def __init__(self, connAddr, connPort, MDChannel=None):
c = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
c.connect((connAddr, connPort))
MDConnection.__init__(self, c)
if MDChannel is not None:
self.channels = [MDChannel]
self.send(Datagram.create_add_channel(MDChannel))
else:
self.channels = []
def add_channel(self, channel):
if channel not in self.channels:
self.channels.append(channel)
self.send(Datagram.create_add_channel(channel))
def remove_channel(self, channel):
if channel in self.channels:
self.send(Datagram.create_remove_channel(channel))
self.channels.remove(channel)
def clear_channels(self):
for channel in self.channels:
self.send(Datagram.create_remove_channel(channel))
self.channels.remove(channel)
self.channels = []
def close(self):
for chan in self.channels:
self.send(Datagram.create_remove_channel(chan))
MDConnection.close(self)
class ClientConnection(MDConnection):
def expect_multi(self, datagrams, only=False):
datagrams = list(datagrams) # We're going to be doing datagrams.remove()
numIn = 0
numE = len(datagrams)
while datagrams:
dg = self._read()
if dg is None:
if numIn is 0:
return (False, "No datagram received.")
else:
return (False, "Only received " + str(numIn) + " datagrams, but expected " + str(numE))
numIn += 1
dg = Datagram(dg)
for datagram in datagrams:
if datagram.equals(dg):
datagrams.remove(datagram)
break
else:
if only:
f = open("test.received", "wb")
f.write(dg.get_data())
f.close()
f = open("test.expected", "wb")
f.write(datagram.get_data())
f.close()
return (False, "Received wrong datagram; written to test.{expected,received}.")
return (True, "")
| [] | [] | [
"MANUAL_LAUNCH_CONFIG",
"USE_LOGLEVEL"
] | [] | ["MANUAL_LAUNCH_CONFIG", "USE_LOGLEVEL"] | python | 2 | 0 | |
api/server/server.go | package server
import (
"crypto/tls"
"net"
"net/http"
"os"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/api/server/router/container"
"github.com/docker/docker/api/server/router/local"
"github.com/docker/docker/api/server/router/network"
"github.com/docker/docker/api/server/router/system"
"github.com/docker/docker/api/server/router/volume"
"github.com/docker/docker/daemon"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/sockets"
"github.com/docker/docker/utils"
"github.com/gorilla/mux"
"golang.org/x/net/context"
)
// versionMatcher defines a variable matcher to be parsed by the router
// when a request is about to be served.
const versionMatcher = "/v{version:[0-9.]+}"
// Config provides the configuration for the API server
type Config struct {
Logging bool
EnableCors bool
CorsHeaders string
AuthZPluginNames []string
Version string
SocketGroup string
TLSConfig *tls.Config
Addrs []Addr
}
// Server contains instance details for the server
type Server struct {
cfg *Config
servers []*HTTPServer
routers []router.Router
authZPlugins []authorization.Plugin
}
// Addr contains string representation of address and its protocol (tcp, unix...).
type Addr struct {
Proto string
Addr string
}
// New returns a new instance of the server based on the specified configuration.
// It allocates resources which will be needed for ServeAPI(ports, unix-sockets).
func New(cfg *Config) (*Server, error) {
s := &Server{
cfg: cfg,
}
for _, addr := range cfg.Addrs {
srv, err := s.newServer(addr.Proto, addr.Addr)
if err != nil {
return nil, err
}
logrus.Debugf("Server created for HTTP on %s (%s)", addr.Proto, addr.Addr)
s.servers = append(s.servers, srv...)
}
return s, nil
}
// Close closes servers and thus stop receiving requests
func (s *Server) Close() {
for _, srv := range s.servers {
if err := srv.Close(); err != nil {
logrus.Error(err)
}
}
}
// ServeAPI loops through all initialized servers and spawns goroutine
// with Server method for each. It sets CreateMux() as Handler also.
func (s *Server) ServeAPI() error {
var chErrors = make(chan error, len(s.servers))
for _, srv := range s.servers {
srv.srv.Handler = s.CreateMux()
go func(srv *HTTPServer) {
var err error
logrus.Infof("API listen on %s", srv.l.Addr())
if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") {
err = nil
}
chErrors <- err
}(srv)
}
for i := 0; i < len(s.servers); i++ {
err := <-chErrors
if err != nil {
return err
}
}
return nil
}
// HTTPServer contains an instance of http server and the listener.
// srv *http.Server, contains configuration to create a http server and a mux router with all api end points.
// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router.
type HTTPServer struct {
srv *http.Server
l net.Listener
}
// Serve starts listening for inbound requests.
func (s *HTTPServer) Serve() error {
return s.srv.Serve(s.l)
}
// Close closes the HTTPServer from listening for the inbound requests.
func (s *HTTPServer) Close() error {
return s.l.Close()
}
func writeCorsHeaders(w http.ResponseWriter, r *http.Request, corsHeaders string) {
logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders)
w.Header().Add("Access-Control-Allow-Origin", corsHeaders)
w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth")
w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS")
}
func (s *Server) initTCPSocket(addr string) (l net.Listener, err error) {
if s.cfg.TLSConfig == nil || s.cfg.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert {
logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
}
if l, err = sockets.NewTCPSocket(addr, s.cfg.TLSConfig); err != nil {
return nil, err
}
if err := allocateDaemonPort(addr); err != nil {
return nil, err
}
return
}
func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// log the handler call
logrus.Debugf("Calling %s %s", r.Method, r.URL.Path)
// Define the context that we'll pass around to share info
// like the docker-request-id.
//
// The 'context' will be used for global data that should
// apply to all requests. Data that is specific to the
// immediate function being called should still be passed
// as 'args' on the function call.
ctx := context.Background()
handlerFunc := s.handleWithGlobalMiddlewares(handler)
vars := mux.Vars(r)
if vars == nil {
vars = make(map[string]string)
}
if err := handlerFunc(ctx, w, r, vars); err != nil {
logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.URL.Path, utils.GetErrorMessage(err))
httputils.WriteError(w, err)
}
}
}
// InitRouters initializes a list of routers for the server.
func (s *Server) InitRouters(d *daemon.Daemon) {
s.addRouter(container.NewRouter(d))
s.addRouter(local.NewRouter(d))
s.addRouter(network.NewRouter(d))
s.addRouter(system.NewRouter(d))
s.addRouter(volume.NewRouter(d))
}
// addRouter adds a new router to the server.
func (s *Server) addRouter(r router.Router) {
s.routers = append(s.routers, r)
}
// CreateMux initializes the main router the server uses.
// we keep enableCors just for legacy usage, need to be removed in the future
func (s *Server) CreateMux() *mux.Router {
m := mux.NewRouter()
if os.Getenv("DEBUG") != "" {
profilerSetup(m, "/debug/")
}
logrus.Debugf("Registering routers")
for _, apiRouter := range s.routers {
for _, r := range apiRouter.Routes() {
f := s.makeHTTPHandler(r.Handler())
logrus.Debugf("Registering %s, %s", r.Method(), r.Path())
m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f)
m.Path(r.Path()).Methods(r.Method()).Handler(f)
}
}
return m
}
| [
"\"DEBUG\""
] | [] | [
"DEBUG"
] | [] | ["DEBUG"] | go | 1 | 0 | |
create_flask_app/app/config.py | import os
import random
class Config:
{% if extras_includes('sqlite') -%}
SQLALCHEMY_DATABASE_URI = os.getenv("DATABASE_URL") or "sqlite:///db.sqlite"
SQLALCHEMY_TRACK_MODIFICATIONS = False
{% elif extras_includes('mongodb') %}
MONGO_URI = os.getenv("MONGO_URI") or "mongodb://localhost:27017/myDatabase"
{% endif -%}
SECRET_KEY = os.getenv("SECRET_KEY") or "".join(
[chr(random.randint(65, 92)) for _ in range(50)]
)
| [] | [] | [
"MONGO_URI",
"SECRET_KEY",
"DATABASE_URL"
] | [] | ["MONGO_URI", "SECRET_KEY", "DATABASE_URL"] | python | 3 | 0 | |
aws/lambda/rotate-ebs-snapshots.py |
import boto3
import datetime
import time
import os
# Manually configure EC2 region, account IDs, timezone
ec = boto3.client('ec2', region_name='us-west-1')
aws_account_ids = ['728118514760']
default_days_of_retention = 2
os.environ['TZ'] = 'Brazil/East'
# Nothing to configure below this line
def create_new_backups():
# Current hour
current_hour = int(datetime.datetime.now().strftime('%H'))
print '%d current_hour' % current_hour
# Find volumes tagged with key "Backup"
volumes = ec.describe_volumes(
Filters=[
{'Name': 'tag-key', 'Values': ['Backup']},
]
).get(
'Volumes', []
)
print 'Number of volumes with backup tag: %d' % len(volumes)
for volume in volumes:
vol_id = volume['VolumeId']
vol_retention = default_days_of_retention
snap_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
snap_desc = vol_id
# Loop over all tags and grab out relevant keys
for name in volume['Tags']:
tag_key = name['Key']
tag_val = name['Value']
# print '...Found EBS volume key %s : %s ' % ( tag_key, tag_val );
# Below the code is create Snapshot name as instance Name
if tag_key == 'Name':
snap_desc = vol_id + ' (' + tag_val + ')'
if tag_key == 'Retention' and tag_val.isdigit():
vol_retention = int(tag_val)
if tag_key == 'Backup':
backup_mod = False
if tag_val == '' or tag_val == 'false':
backup_mod = False
elif tag_val == 'true':
backup_mod = 24
elif tag_val == 'daily':
backup_mod = 24
elif tag_val == '1/day':
backup_mod = 24
elif tag_val == '2/day':
backup_mod = 12
elif tag_val == '4/day':
backup_mod = 6
elif tag_val == '6/day':
backup_mod = 4
elif tag_val == '8/day':
backup_mod = 3
elif tag_val == '12/day':
backup_mod = 2
elif tag_val == '24/day':
backup_mod = 1
elif tag_val == 'hourly':
backup_mod = 1
else:
print '%s unknown backup schedule %s' % (vol_id, tag_val)
continue
snap_name = 'Backup of ' + snap_desc
snap_desc = 'Lambda backup ' + snap_date + ' of ' + snap_desc
delete_ts = '%.0f' % ((vol_retention * 86400) + time.time())
# Only backup if scheduled to do so
if backup_mod is False or current_hour % backup_mod != 0:
print '%s is not scheduled this hour' % vol_id
continue
else:
print '%s is scheduled this hour' % vol_id
# Create snapshot
snap = ec.create_snapshot(
VolumeId=vol_id,
Description=snap_desc,
)
print 'snap %s' % snap
ec.create_tags(
Resources=[snap['SnapshotId']],
Tags=[
{'Key': 'Name', 'Value': snap_name},
{'Key': 'DeleteAfter', 'Value': str(delete_ts)}
]
)
def destroy_old_backups():
filters = [
{'Name': 'tag-key', 'Values': ['DeleteAfter']}
]
snapshot_response = ec.describe_snapshots(
OwnerIds=aws_account_ids,
Filters=filters
)
# print snapshot_response
for snap in snapshot_response['Snapshots']:
# Loop over all tags and grab out relevant keys
for name in snap['Tags']:
tag_key = name['Key']
tag_val = name['Value']
# print '...Found EBS volume key %s : %s ' % ( tag_key, tag_val );
if tag_key == 'DeleteAfter':
if int(tag_val) < time.time():
print '%s is being deleted' % snap['SnapshotId']
ec.delete_snapshot(SnapshotId=snap['SnapshotId'])
else:
print '%s is safe.' % snap['SnapshotId']
def lambda_handler(event, context):
create_new_backups()
destroy_old_backups()
return 'successful' | [] | [] | [
"TZ"
] | [] | ["TZ"] | python | 1 | 0 | |
release/nightly_tests/placement_group_tests/placement_group_performance_test.py | import argparse
import json
import os
import ray
from ray._private.ray_microbenchmark_helpers import timeit
RESOURCES_VALUE = 0.01
def test_placement_group_perf(num_pgs, num_bundles, num_pending_pgs):
# Run the placement group performance benchmark given arguments.
assert ray.cluster_resources()["custom"] >= (
RESOURCES_VALUE * num_pgs * num_bundles)
def placement_group_create(num_pgs):
pgs = [
ray.util.placement_group(
bundles=[{
"custom": 0.001
} for _ in range(num_bundles)],
strategy="SPREAD") for _ in range(num_pgs)
]
[pg.wait(timeout_seconds=30) for pg in pgs]
for pg in pgs:
ray.util.remove_placement_group(pg)
print(f"Num pending pgs: {num_pending_pgs}, "
f"Num pgs: {num_pgs}, "
f"Num bundles {num_bundles}")
# Get the throughput.
throughput = timeit("placement group create per second",
lambda: placement_group_create(num_pgs), num_pgs)
# Get fine-grained scheduling stats.
latencies = []
e2e_latencies = []
scheduling_attempts = []
for entry in ray.util.placement_group_table().values():
latency = entry["stats"]["scheduling_latency_ms"]
e2e_latency = entry["stats"]["end_to_end_creation_latency_ms"]
scheduling_attempt = entry["stats"]["scheduling_attempt"]
latencies.append(latency)
e2e_latencies.append(e2e_latency)
scheduling_attempts.append(scheduling_attempt)
latencies = sorted(latencies)
e2e_latencies = sorted(e2e_latencies)
scheduling_attempts = sorted(scheduling_attempts)
# Pure scheduling latency without queuing time.
print("P50 scheduling latency ms: "
f"{latencies[int(len(latencies) * 0.5)]}")
print("P95 scheduling latency ms: "
f"{latencies[int(len(latencies) * 0.95)]}")
print("P99 scheduling latency ms: "
f"{latencies[int(len(latencies) * 0.99)]}")
# Scheduling latency including queueing time.
print("P50 e2e scheduling latency ms: "
f"{e2e_latencies[int(len(e2e_latencies) * 0.5)]}")
print("P95 e2e scheduling latency ms: "
f"{e2e_latencies[int(len(e2e_latencies) * 0.95)]}")
print("P99 e2e scheduling latency ms: "
f"{e2e_latencies[int(len(e2e_latencies) * 0.99)]}")
# Number of time scheduling was retried before succeeds.
print("P50 scheduling attempts: "
f"{scheduling_attempts[int(len(scheduling_attempts) * 0.5)]}")
print("P95 scheduling attempts: "
f"{scheduling_attempts[int(len(scheduling_attempts) * 0.95)]}")
print("P99 scheduling attempts: "
f"{scheduling_attempts[int(len(scheduling_attempts) * 0.99)]}")
return {
"pg_creation_per_second": throughput[0][1],
"p50_scheduling_latency_ms": latencies[int(len(latencies) * 0.5)],
"p50_e2e_pg_creation_latency_ms": e2e_latencies[int(
len(e2e_latencies) * 0.5)]
}
def run_full_benchmark(num_pending_pgs):
# Run the benchmark with different num_bundles & num_pgs params.
num_bundles = 1
num_pgs_test = [10, 100, 200, 400, 800, 1600]
results = []
for num_pgs in num_pgs_test:
results.append(
test_placement_group_perf(num_pgs, num_bundles, num_pending_pgs))
def print_result(num_pending_pgs, num_pgs, num_bundles, result):
print(f"Num pending pgs: {num_pending_pgs}, "
f"Num pgs: {num_pgs}, "
f"Num bundles {num_bundles}")
print("Throughput: ")
for k, v in result.items():
print(f"\t{k}: {v}")
for i in range(len(results)):
num_pgs = num_pgs_test[i]
result = results[i]
print_result(num_pending_pgs, num_pgs, num_bundles, result)
# Test with different length of bundles.
num_bundles_list = [1, 10, 20, 40]
num_pgs = 100
results = []
for num_bundles in num_bundles_list:
results.append(
test_placement_group_perf(num_pgs, num_bundles, num_pending_pgs))
for i in range(len(results)):
num_bundles = num_bundles_list[i]
result = results[i]
print_result(num_pending_pgs, num_pgs, num_bundles, result)
def parse_script_args():
parser = argparse.ArgumentParser()
parser.add_argument("--num-pgs", type=int, default=-1)
parser.add_argument("--num-bundles", type=int, default=-1)
parser.add_argument("--num-pending_pgs", type=int, default=0)
parser.add_argument("--local", action="store_true")
return parser.parse_known_args()
if __name__ == "__main__":
args, _ = parse_script_args()
if args.local:
ray.init(resources={"custom": 100, "pending": 1})
else:
ray.init(address="auto")
# Create pending placement groups.
# It is used to see the impact of pending placement group.
# Currently, pending placement groups could increase the load of
# GCS server.
assert ray.cluster_resources()["pending"] >= 1
pending_pgs = [
ray.util.placement_group(bundles=[{
"pending": 1
}]) for _ in range(args.num_pending_pgs + 1)
]
# If arguments are given, run a single test.
# It is used to analyze the scheduling performance trace with
# Grafana.
if args.num_pgs != -1 and args.num_bundles != -1:
test_placement_group_perf(args.num_pgs, args.num_bundles,
args.num_pending_pgs)
else:
run_full_benchmark(args.num_pending_pgs)
if "TEST_OUTPUT_JSON" in os.environ:
out_file = open(os.environ["TEST_OUTPUT_JSON"], "w")
results = {"success": 1}
json.dump(results, out_file)
| [] | [] | [
"TEST_OUTPUT_JSON"
] | [] | ["TEST_OUTPUT_JSON"] | python | 1 | 0 | |
DeepLearningFramework/train.py | import os
import torch
from torch import optim
from torch.utils.data import DataLoader
from utils.solvers import PolyLR
from utils.loss import HDRLoss
from utils.HDRutils import tonemap
from utils.dataprocessor import dump_sample
from dataset.HDR import KalantariDataset, KalantariTestDataset
from utils.configs import Configs
import random
import numpy as np
def setup_seed(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# Get configurations
configs = Configs()
# Load Data & build dataset
train_dataset = KalantariDataset(configs=configs)
train_dataloader = DataLoader(train_dataset, batch_size=configs.batch_size, shuffle=True)
test_dataset = KalantariTestDataset(configs=configs)
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=True)
# Build DeepHDR model from configs
##########################################################
# Fill in Your model here! #
##########################################################
model = YOUR_MODEL(configs)
if configs.multigpu is False:
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model.to(device)
else:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if device == torch.device('cpu'):
raise EnvironmentError('No GPUs, cannot initialize multigpu training.')
model.to(device)
# Define optimizer
optimizer = optim.Adam(model.parameters(), betas=(configs.beta1, configs.beta2), lr=configs.learning_rate)
# Define Criterion
criterion = HDRLoss()
# Define Scheduler
lr_scheduler = PolyLR(optimizer, max_iter=configs.epoch, power=0.9)
# Read checkpoints
start_epoch = 0
checkpoint_file = configs.checkpoint_dir + '/checkpoint.tar'
if os.path.isfile(checkpoint_file):
checkpoint = torch.load(checkpoint_file)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
start_epoch = checkpoint['epoch']
lr_scheduler.load_state_dict(checkpoint['scheduler'])
print("Load checkpoint %s (epoch %d)", checkpoint_file, start_epoch)
if configs.multigpu is True:
model = torch.nn.DataParallel(model)
def train_one_epoch():
model.train()
for idx, data in enumerate(train_dataloader):
in_LDRs, ref_LDRs, in_HDRs, ref_HDRs, in_exps, ref_exps = data
in_LDRs = in_LDRs.to(device)
in_HDRs = in_HDRs.to(device)
ref_HDRs = ref_HDRs.to(device)
# Forward
result = model(in_LDRs, in_HDRs)
# Backward
loss = criterion(tonemap(result), tonemap(ref_HDRs))
loss.backward()
optimizer.step()
optimizer.zero_grad()
print('--------------- Train Batch %d ---------------' % (idx + 1))
print('loss: %.12f' % loss.item())
def eval_one_epoch():
model.eval()
mean_loss = 0
count = 0
for idx, data in enumerate(test_dataloader):
sample_path, in_LDRs, in_HDRs, in_exps, ref_HDRs = data
sample_path = sample_path[0]
in_LDRs = in_LDRs.to(device)
in_HDRs = in_HDRs.to(device)
ref_HDRs = ref_HDRs.to(device)
# Forward
with torch.no_grad():
res = model(in_LDRs, in_HDRs)
# Compute loss
with torch.no_grad():
loss = criterion(tonemap(res), tonemap(ref_HDRs))
dump_sample(sample_path, res.cpu().detach().numpy())
print('--------------- Eval Batch %d ---------------' % (idx + 1))
print('loss: %.12f' % loss.item())
mean_loss += loss.item()
count += 1
mean_loss = mean_loss / count
return mean_loss
def train(start_epoch):
global cur_epoch
for epoch in range(start_epoch, configs.epoch):
cur_epoch = epoch
print('**************** Epoch %d ****************' % (epoch + 1))
print('learning rate: %f' % (lr_scheduler.get_last_lr()[0]))
train_one_epoch()
loss = eval_one_epoch()
lr_scheduler.step()
if configs.multigpu is False:
save_dict = {'epoch': epoch + 1, 'loss': loss,
'optimizer_state_dict': optimizer.state_dict(),
'model_state_dict': model.state_dict(),
'scheduler': lr_scheduler.state_dict()
}
else:
save_dict = {'epoch': epoch + 1, 'loss': loss,
'optimizer_state_dict': optimizer.state_dict(),
'model_state_dict': model.module.state_dict(),
'scheduler': lr_scheduler.state_dict()
}
torch.save(save_dict, os.path.join(configs.checkpoint_dir, 'checkpoint.tar'))
torch.save(save_dict, os.path.join(configs.checkpoint_dir, 'checkpoint' + str(epoch) + '.tar'))
print('mean eval loss: %.12f' % loss)
if __name__ == '__main__':
train(start_epoch)
| [] | [] | [
"PYTHONHASHSEED"
] | [] | ["PYTHONHASHSEED"] | python | 1 | 0 | |
tools/fuzz/process_test.go | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package fuzz
import (
"flag"
"fmt"
"io"
"math/rand"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
)
// Note: In order to avoid needing to mock out exec.Cmd or to have external
// program(s) act as mock subprocesses, we implement the subprocess mocks here
// in the main test binary (see TestDoProcessMock) and spawn a copy of ourself
// as a subprocess whenever mockCommand is called. A special environment
// variable is set to trigger the mocking behavior, and the first non-flag
// command-line argument is used as the command name so we know which specific
// behavior to emulate. Despite being misleadingly named process_test.go (which
// is necessary so that we can invoke TestDoProcessMock in the subprocess),
// this file is actually mostly process mocking.
// Drop-in replacement for exec.Command for use during testing.
func mockCommand(command string, args ...string) *exec.Cmd {
// For ps, pass through directly
if command == "ps" {
return exec.Command(command, args...)
}
// Call ourself as the subprocess so we can mock behavior as appropriate
argv := []string{"-logtostderr", "-test.run=TestDoProcessMock", "--", command}
argv = append(argv, args...)
cmd := exec.Command(os.Args[0], argv...)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, "MOCK_PROCESS=yes")
// Ensure the subprocess CPRNG is seeded uniquely (but deterministically)
cmd.Env = append(cmd.Env, fmt.Sprintf("RAND_SEED=%d", rand.Int63()))
return cmd
}
// This is the actual implementation of mocked subprocesses.
// The name is a little confusing because it has to have the Test prefix
func TestDoProcessMock(t *testing.T) {
if os.Getenv("MOCK_PROCESS") != "yes" {
t.Skip("Not in a subprocess")
}
seed, err := strconv.ParseInt(os.Getenv("RAND_SEED"), 10, 64)
if err != nil {
t.Fatalf("Invalid CPRNG seed: %s", err)
}
rand.Seed(seed)
cmd, args := flag.Arg(0), flag.Args()[1:]
// Check all args for a special invalid path being passed along
for _, arg := range args {
if strings.Contains(arg, invalidPath) {
fmt.Println("file not found")
os.Exit(1)
}
}
var out string
var exitCode int
var stayAlive bool
switch filepath.Base(cmd) {
case "echo":
out = strings.Join(args, " ") + "\n"
exitCode = 0
case FakeQemuFailing:
out = "qemu error message"
exitCode = 1
case FakeQemuSlow:
stayAlive = true
out = "welcome to qemu for turtles 🐢"
exitCode = 0
// The following utilities already had their args checked for invalid paths
// above, so at this point we just need to touch an expected output file.
// The contents are randomized to allow for simple change detection.
case "cp":
touchRandomFile(t, args[len(args)-1])
exitCode = 0
case "fvm":
touchRandomFile(t, args[0])
exitCode = 0
case "zbi":
found := false
for j, arg := range args[:len(args)-1] {
if arg == "-o" {
touchRandomFile(t, args[j+1])
found = true
}
}
if !found {
t.Fatalf("No output specified in zbi command args: %s", args)
}
exitCode = 0
case "qemu-system-x86_64", "qemu-system-aarch64":
stayAlive = true
var logFile string
// Check for a logfile specified in a serial parameter
for j, arg := range args[:len(args)-1] {
if arg != "-serial" {
continue
}
parts := strings.SplitN(args[j+1], ":", 2)
if parts[0] == "file" {
logFile = parts[1]
}
}
logWriter := os.Stdout
if logFile != "" {
outFile, err := os.Create(logFile)
if err != nil {
t.Fatalf("error creating qemu log file: %s", err)
}
defer outFile.Close()
logWriter = outFile
}
io.WriteString(logWriter, "early boot\n")
io.WriteString(logWriter, successfulBootMarker+"\n")
// Output >100KB of additional data to ensure we handle large logs correctly
filler := strings.Repeat("data", 1024/4)
for j := 0; j < 100; j++ {
logLine := fmt.Sprintf("%d: %s", j, filler)
io.WriteString(logWriter, logLine)
}
// Write a final success marker
io.WriteString(logWriter, lateBootMessage+"\n")
exitCode = 0
case "symbolizer":
if err := fakeSymbolize(os.Stdin, os.Stdout); err != nil {
t.Fatalf("failed during scan: %s", err)
}
exitCode = 0
default:
exitCode = 127
}
os.Stdout.WriteString(out)
if stayAlive {
// Simulate a long-running process
time.Sleep(10 * time.Second)
}
os.Exit(exitCode)
}
// This tests the subprocess mocking itself
func TestProcessMocking(t *testing.T) {
// Enable subprocess mocking
ExecCommand = mockCommand
defer func() { ExecCommand = exec.Command }()
cmd := NewCommand("echo", "it", "works")
out, err := cmd.Output()
if err != nil {
t.Fatalf("Error running mock command: %s", err)
}
if string(out) != "it works\n" {
t.Fatalf("Mock command returned unexpected output: %q", out)
}
cmd = NewCommand("ps", "-p", strconv.Itoa(os.Getpid()))
out, err = cmd.Output()
if err != nil {
t.Fatalf("Error running mock command: %s (%q)", err, out)
}
}
| [
"\"MOCK_PROCESS\"",
"\"RAND_SEED\""
] | [] | [
"RAND_SEED",
"MOCK_PROCESS"
] | [] | ["RAND_SEED", "MOCK_PROCESS"] | go | 2 | 0 | |
pkg/client/listers/machine/internalversion/machineset.go | /*
Copyright (c) SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package internalversion
import (
machine "github.com/gardener/machine-controller-manager/pkg/apis/machine"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// MachineSetLister helps list MachineSets.
// All objects returned here must be treated as read-only.
type MachineSetLister interface {
// List lists all MachineSets in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*machine.MachineSet, err error)
// MachineSets returns an object that can list and get MachineSets.
MachineSets(namespace string) MachineSetNamespaceLister
MachineSetListerExpansion
}
// machineSetLister implements the MachineSetLister interface.
type machineSetLister struct {
indexer cache.Indexer
}
// NewMachineSetLister returns a new MachineSetLister.
func NewMachineSetLister(indexer cache.Indexer) MachineSetLister {
return &machineSetLister{indexer: indexer}
}
// List lists all MachineSets in the indexer.
func (s *machineSetLister) List(selector labels.Selector) (ret []*machine.MachineSet, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*machine.MachineSet))
})
return ret, err
}
// MachineSets returns an object that can list and get MachineSets.
func (s *machineSetLister) MachineSets(namespace string) MachineSetNamespaceLister {
return machineSetNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// MachineSetNamespaceLister helps list and get MachineSets.
// All objects returned here must be treated as read-only.
type MachineSetNamespaceLister interface {
// List lists all MachineSets in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*machine.MachineSet, err error)
// Get retrieves the MachineSet from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*machine.MachineSet, error)
MachineSetNamespaceListerExpansion
}
// machineSetNamespaceLister implements the MachineSetNamespaceLister
// interface.
type machineSetNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all MachineSets in the indexer for a given namespace.
func (s machineSetNamespaceLister) List(selector labels.Selector) (ret []*machine.MachineSet, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*machine.MachineSet))
})
return ret, err
}
// Get retrieves the MachineSet from the indexer for a given namespace and name.
func (s machineSetNamespaceLister) Get(name string) (*machine.MachineSet, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(machine.Resource("machineset"), name)
}
return obj.(*machine.MachineSet), nil
}
| [] | [] | [] | [] | [] | go | null | null | null |
main.go | package main
import (
"bufio"
"flag"
"fmt"
"io/ioutil"
"os"
"os/user"
"regexp"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/sts"
)
const version = "1.0.3"
func main() {
var yesFlag bool
var profileFlag string
var versionFlag bool
var deleteFlag bool
flag.BoolVar(&yesFlag, "y", false, `Automatic "yes" to prompts.`)
flag.BoolVar(&deleteFlag, "d", false, "Delete old key without deactivation.")
flag.StringVar(&profileFlag, "profile", "default", "The profile to use.")
flag.BoolVar(&versionFlag, "version", false, "Print version number ("+version+")")
flag.Parse()
if versionFlag {
fmt.Println(version)
os.Exit(0)
}
// Get credentials
usr, _ := user.Current()
credentialsPath := os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
if len(credentialsPath) == 0 {
credentialsPath = fmt.Sprintf("%s/.aws/credentials", usr.HomeDir)
}
credentialsProvider := credentials.NewSharedCredentials(credentialsPath, profileFlag)
creds, err := credentialsProvider.Get()
check(err)
fmt.Printf("Using access key %s from profile \"%s\".\n", creds.AccessKeyID, profileFlag)
// Create session
sess, err := session.NewSession(&aws.Config{Credentials: credentialsProvider})
check(err)
// sts get-caller-identity
stsClient := sts.New(sess)
respGetCallerIdentity, err := stsClient.GetCallerIdentity(&sts.GetCallerIdentityInput{})
if err != nil {
fmt.Println("Error getting caller identity. Is the key disabled?")
fmt.Println()
check(err)
}
fmt.Printf("Your user arn is: %s\n\n", *respGetCallerIdentity.Arn)
// iam list-access-keys
// If the UserName field is not specified, the UserName is determined implicitly based on the AWS access key ID used to sign the request.
iamClient := iam.New(sess)
respListAccessKeys, err := iamClient.ListAccessKeys(&iam.ListAccessKeysInput{})
check(err)
// Print key information
fmt.Printf("You have %d access key%v associated with your user:\n", len(respListAccessKeys.AccessKeyMetadata), pluralize(len(respListAccessKeys.AccessKeyMetadata)))
for _, key := range respListAccessKeys.AccessKeyMetadata {
respAccessKeyLastUsed, err2 := iamClient.GetAccessKeyLastUsed(&iam.GetAccessKeyLastUsedInput{
AccessKeyId: key.AccessKeyId,
})
check(err2)
if respAccessKeyLastUsed.AccessKeyLastUsed.LastUsedDate == nil {
fmt.Printf("- %s (%s, created %s, never used)\n", *key.AccessKeyId, *key.Status, key.CreateDate)
} else {
fmt.Printf("- %s (%s, created %s, last used %s for service %s in %s)\n", *key.AccessKeyId, *key.Status, key.CreateDate, respAccessKeyLastUsed.AccessKeyLastUsed.LastUsedDate, *respAccessKeyLastUsed.AccessKeyLastUsed.ServiceName, *respAccessKeyLastUsed.AccessKeyLastUsed.Region)
}
}
fmt.Println()
if len(respListAccessKeys.AccessKeyMetadata) == 2 {
keyIndex := 0
if *respListAccessKeys.AccessKeyMetadata[0].AccessKeyId == creds.AccessKeyID {
keyIndex = 1
}
if yesFlag == false {
fmt.Println("You have two access keys, which is the max number of access keys.")
fmt.Printf("Do you want to delete %s and create a new key? [yN] ", *respListAccessKeys.AccessKeyMetadata[keyIndex].AccessKeyId)
if *respListAccessKeys.AccessKeyMetadata[keyIndex].Status == "Active" {
fmt.Printf("\nWARNING: This key is currently Active! ")
}
reader := bufio.NewReader(os.Stdin)
yn, err2 := reader.ReadString('\n')
check(err2)
if yn[0] != 'y' && yn[0] != 'Y' {
return
}
}
_, err2 := iamClient.DeleteAccessKey(&iam.DeleteAccessKeyInput{
AccessKeyId: respListAccessKeys.AccessKeyMetadata[keyIndex].AccessKeyId,
})
check(err2)
fmt.Printf("Deleted access key %s.\n", *respListAccessKeys.AccessKeyMetadata[keyIndex].AccessKeyId)
} else if yesFlag == false {
fmt.Printf("Do you want to create a new key and deactivate %s? [yN] ", *respListAccessKeys.AccessKeyMetadata[0].AccessKeyId)
reader := bufio.NewReader(os.Stdin)
yn, err2 := reader.ReadString('\n')
check(err2)
if yn[0] != 'y' && yn[0] != 'Y' {
return
}
}
// Create the new access key
// If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID signing the request.
respCreateAccessKey, err := iamClient.CreateAccessKey(&iam.CreateAccessKeyInput{})
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
return
}
fmt.Printf("Created access key %s.\n", *respCreateAccessKey.AccessKey.AccessKeyId)
// Read credentials file
bytes, err := ioutil.ReadFile(credentialsPath)
check(err)
text := string(bytes)
// Replace key pair in credentials file
// This search & replace does not limit itself to the specified profile, which may be useful if the user is using the same key in multiple profiles
re := regexp.MustCompile(fmt.Sprintf(`(?m)^aws_access_key_id ?= ?%s`, regexp.QuoteMeta(creds.AccessKeyID)))
text = re.ReplaceAllString(text, `aws_access_key_id=`+*respCreateAccessKey.AccessKey.AccessKeyId)
re = regexp.MustCompile(fmt.Sprintf(`(?m)^aws_secret_access_key ?= ?%s`, regexp.QuoteMeta(creds.SecretAccessKey)))
text = re.ReplaceAllString(text, `aws_secret_access_key=`+*respCreateAccessKey.AccessKey.SecretAccessKey)
// Verify that the regexp actually replaced something
if !strings.Contains(text, *respCreateAccessKey.AccessKey.AccessKeyId) || !strings.Contains(text, *respCreateAccessKey.AccessKey.SecretAccessKey) {
fmt.Println("Failed to replace old access key. Aborting.")
fmt.Printf("Please verify that the file %s is formatted correctly.\n", credentialsPath)
// Delete the key we created
_, err2 := iamClient.DeleteAccessKey(&iam.DeleteAccessKeyInput{
AccessKeyId: respCreateAccessKey.AccessKey.AccessKeyId,
})
check(err2)
fmt.Printf("Deleted access key %s.\n", *respCreateAccessKey.AccessKey.AccessKeyId)
os.Exit(1)
}
// Write new file
err = ioutil.WriteFile(credentialsPath, []byte(text), 0600)
check(err)
fmt.Printf("Wrote new key pair to %s\n", credentialsPath)
// Deleting the key if flag is set, otherwise only deactivating
if deleteFlag {
_, err := iamClient.DeleteAccessKey(&iam.DeleteAccessKeyInput{
AccessKeyId: &creds.AccessKeyID,
})
check(err)
fmt.Printf("Deleted old access key %s.\n", creds.AccessKeyID)
} else {
_, err = iamClient.UpdateAccessKey(&iam.UpdateAccessKeyInput{
AccessKeyId: &creds.AccessKeyID,
Status: aws.String("Inactive"),
})
check(err)
fmt.Printf("Deactivated old access key %s.\n", creds.AccessKeyID)
fmt.Println("Please make sure this key is not used elsewhere.")
}
}
func pluralize(n int) string {
if n == 1 {
return ""
}
return "s"
}
func check(err error) {
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
}
| [
"\"AWS_SHARED_CREDENTIALS_FILE\""
] | [] | [
"AWS_SHARED_CREDENTIALS_FILE"
] | [] | ["AWS_SHARED_CREDENTIALS_FILE"] | go | 1 | 0 | |
train_ssd.py | from tensorflow.python.lib.io import file_io
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TerminateOnNaN, CSVLogger
from keras import backend as K
import tensorflow as tf
from keras.models import load_model
from keras.utils import plot_model
from math import ceil
import numpy as np
#from matplotlib import pyplot as plt
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__)))
os.environ["TF_CPP_MIN_LOG_LEVEL"]="3"
from models.keras_ssd300 import ssd_300
from keras_loss_function.keras_ssd_loss import SSDLoss
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
from keras_layers.keras_layer_L2Normalization import L2Normalization
from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder
from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast
from data_generator.object_detection_2d_data_generator import DataGenerator
from data_generator.object_detection_2d_geometric_ops import Resize
from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels
from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation
from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms
from tensorflow.python.lib.io import file_io
import argparse
from tensorflow.python.client import device_lib
print("CHECK GPU USAGE!")
print(device_lib.list_local_devices())
K.tensorflow_backend._get_available_gpus()
img_height = 300 # Height of the model input images
img_width = 300 # Width of the model input images
img_channels = 3 # Number of color channels of the model input images
mean_color = [123, 117, 104] # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.
swap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images.
n_classes = 20 # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO
scales_pascal = [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets
scales_coco = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets
scales = scales_pascal
aspect_ratios = [[1.0, 2.0, 0.5, 3.0, 1.0/3.0, 4.0, 0.25],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0, 4.0, 0.25],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0]] # The anchor box aspect ratios used in the original SSD300; the order matters
two_boxes_for_ar1 = True
steps = [8, 16, 32, 64, 100, 300] # The space between two adjacent anchor box center points for each predictor layer.
offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.
clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries
variances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are divided as in the original implementation
normalize_coords = True
def main(job_dir, **args):
##Setting up the path for saving logs
logs_dir = job_dir + 'logs/'
data_dir = "gs://deeplearningteam11/data"
print("Current Directory: " + os.path.dirname(__file__))
print("Lets copy the data to: " + os.path.dirname(__file__))
os.system("gsutil -m cp -r " + data_dir + " " + os.path.dirname(__file__) + " > /dev/null 2>&1 " )
#exit(0)
with tf.device('/device:GPU:0'):
# 1: Build the Keras model.
K.clear_session() # Clear previous models from memory.
model = ssd_300(image_size=(img_height, img_width, img_channels),
n_classes=n_classes,
mode='training',
l2_regularization=0.0005,
scales=scales,
aspect_ratios_per_layer=aspect_ratios,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
normalize_coords=normalize_coords,
subtract_mean=mean_color,
swap_channels=swap_channels)
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
model.compile(optimizer=adam, loss=ssd_loss.compute_loss)
model.summary()
# 1: Instantiate two `DataGenerator` objects: One for training, one for validation.
train_dataset = DataGenerator(load_images_into_memory=True, hdf5_dataset_path=None)
val_dataset = DataGenerator(load_images_into_memory=True, hdf5_dataset_path=None)
# 2: Parse the image and label lists for the training and validation datasets. This can take a while.
# VOC 2007
# The directories that contain the images.
VOC_2007_train_images_dir = 'data/data/VOC2007/train/JPEGImages/'
VOC_2007_test_images_dir = 'data/data/VOC2007/test/JPEGImages/'
VOC_2007_train_anns_dir = 'data/data/VOC2007/train/Annotations/'
VOC_2007_test_anns_dir = 'data/data/VOC2007/test/Annotations/'
# The paths to the image sets.
VOC_2007_trainval_image_set_dir = 'data/data/VOC2007/train/ImageSets/Main/'
VOC_2007_test_image_set_dir = 'data/data/VOC2007/test/ImageSets/Main/'
VOC_2007_train_images_dir = os.path.dirname(__file__) + "/" + VOC_2007_train_images_dir
VOC_2007_test_images_dir = os.path.dirname(__file__) + "/" + VOC_2007_test_images_dir
VOC_2007_train_anns_dir = os.path.dirname(__file__) + "/" + VOC_2007_train_anns_dir
VOC_2007_test_anns_dir = os.path.dirname(__file__) + "/" + VOC_2007_test_anns_dir
VOC_2007_trainval_image_set_dir = os.path.dirname(__file__) + "/" + VOC_2007_trainval_image_set_dir
VOC_2007_test_image_set_dir = os.path.dirname(__file__) + "/" + VOC_2007_test_image_set_dir
VOC_2007_trainval_image_set_filename = VOC_2007_trainval_image_set_dir + '/trainval.txt'
VOC_2007_test_image_set_filename = VOC_2007_test_image_set_dir + '/test.txt'
# The XML parser needs to now what object class names to look for and in which order to map them to integers.
classes = ['background',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']
print("Parsing Training Data ...")
train_dataset.parse_xml(images_dirs=[VOC_2007_train_images_dir],
image_set_filenames=[VOC_2007_trainval_image_set_filename],
annotations_dirs=[VOC_2007_train_anns_dir],
classes=classes,
include_classes='all',
exclude_truncated=False,
exclude_difficult=False,
ret=False,
verbose=False)
print("Done")
print("================================================================")
print("Parsing Test Data ...")
val_dataset.parse_xml(images_dirs=[VOC_2007_test_images_dir],
image_set_filenames=[VOC_2007_test_image_set_filename],
annotations_dirs=[VOC_2007_test_anns_dir],
classes=classes,
include_classes='all',
exclude_truncated=False,
exclude_difficult=True,
ret=False,
verbose=False)
print("Done")
print("================================================================")
# 3: Set the batch size.
batch_size = 32 # Change the batch size if you like, or if you run into GPU memory issues.
# 4: Set the image transformations for pre-processing and data augmentation options.
# For the training generator:
ssd_data_augmentation = SSDDataAugmentation(img_height=img_height,
img_width=img_width,
background=mean_color)
# For the validation generator:
convert_to_3_channels = ConvertTo3Channels()
resize = Resize(height=img_height, width=img_width)
# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.
# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.
predictor_sizes = [model.get_layer('conv4_4_norm_mbox_conf').output_shape[1:3],
model.get_layer('fc7_mbox_conf').output_shape[1:3],
model.get_layer('conv8_2_mbox_conf').output_shape[1:3],
model.get_layer('conv9_2_mbox_conf').output_shape[1:3],
model.get_layer('conv10_2_mbox_conf').output_shape[1:3],
model.get_layer('conv11_2_mbox_conf').output_shape[1:3]]
ssd_input_encoder = SSDInputEncoder(img_height=img_height,
img_width=img_width,
n_classes=n_classes,
predictor_sizes=predictor_sizes,
scales=scales,
aspect_ratios_per_layer=aspect_ratios,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
matching_type='multi',
pos_iou_threshold=0.5,
neg_iou_limit=0.5,
normalize_coords=normalize_coords)
# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.
train_generator = train_dataset.generate(batch_size=batch_size,
shuffle=True,
transformations=[ssd_data_augmentation],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'encoded_labels'},
keep_images_without_gt=False)
val_generator = val_dataset.generate(batch_size=batch_size,
shuffle=False,
transformations=[convert_to_3_channels,
resize],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'encoded_labels'},
keep_images_without_gt=False)
# Get the number of samples in the training and validations datasets.
train_dataset_size = train_dataset.get_dataset_size()
val_dataset_size = val_dataset.get_dataset_size()
print("Number of images in the training dataset:\t{:>6}".format(train_dataset_size))
print("Number of images in the validation dataset:\t{:>6}".format(val_dataset_size))
# Define a learning rate schedule.
def lr_schedule(epoch):
if epoch < 80:
return 0.001
elif epoch < 100:
return 0.0001
else:
return 0.00001
learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule,
verbose=1)
terminate_on_nan = TerminateOnNaN()
callbacks = [learning_rate_scheduler,
terminate_on_nan]
# If you're resuming a previous training, set `initial_epoch` and `final_epoch` accordingly.
initial_epoch = 0
final_epoch = 120
steps_per_epoch = 500
history = model.fit_generator(generator=train_generator,
steps_per_epoch=steps_per_epoch,
epochs=final_epoch,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=ceil(val_dataset_size/batch_size),
initial_epoch=initial_epoch)
model_name = "vgg19BNReLUmodel.h5"
model.save(model_name)
with file_io.FileIO(model_name, mode='rb') as input_f:
with file_io.FileIO("gs://deeplearningteam11/" + model_name, mode='w+') as output_f:
output_f.write(input_f.read())
##Running the app
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Input Arguments
parser.add_argument(
'--job-dir',
help='GCS location to write checkpoints and export models',
required=True
)
args = parser.parse_args()
arguments = args.__dict__
main(**arguments)
| [] | [] | [
"TF_CPP_MIN_LOG_LEVEL"
] | [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
docusign_test.go | // Copyright 2015 James Cote and Liberty Fund, Inc.
// All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// docusign implements a service to use the version 2 Docusign
// rest api. Api documentation may be found at:
// https://www.docusign.com/p/RESTAPIGuide/RESTAPIGuide.htm
// You must define an environment variables for the test to run properly.
// The necessary variables are:
// DOCUSIGN_HOST=XXXXXX (set to 'demo.docusign.net' for non-prod testing)
// DOCUSIGN_USERNAME=XXXXXXXXXX
// DOCUSIGN_PASSWORD=XXXXXXXXXXx
// DOCUSIGN_ACCTID=XXXXXX
// DOCUSING_INT_KEY=XXXX-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
// DOCUSIGN_TESTENVID=XXXXXXXXX
// DOCUSIGN_TEMPLATEID=XxxXXXXXX
//
//
// If you wish to skip generating an oauth2 token, you may define an environment
// variable named DOCUSIGN_TOKEN which contains an existing token.
//
// A draft envelope will be created in the Docusign demo environment with the subject "Created by Go Test".
package docusign
import (
"bytes"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"mime/multipart"
"net/http"
"os"
"testing"
"time"
"golang.org/x/net/context"
)
var testEnvId string
var testTemplateId string
var testCtx context.Context
func TestCalls(t *testing.T) {
ctx := context.WithValue(context.Background(), HTTPClient, http.DefaultClient)
testEnvId = os.Getenv("DOCUSIGN_TESTENVID")
testTemplateId = os.Getenv("DOCUSIGN_TEMPLATEID")
testToken := os.Getenv("DOCUSIGN_TOKEN")
testTemplateId = "321d2832-1244-48f7-a6db-949c2cd319c0"
cfg := &Config{
UserName: os.Getenv("DOCUSIGN_USERNAME"),
Password: os.Getenv("DOCUSIGN_PASSWORD"),
IntegratorKey: os.Getenv("DOCUSIGN_APIKEY"),
AccountId: os.Getenv("DOCUSIGN_ACCTID"),
Host: os.Getenv("DOCUSIGN_HOST"),
}
cfg.UserName = "0ba0d798-49ca-43c3-88dc-840d6bcb37af"
cfg.Password = "1DLfrdLa/68U4uzty+pAhM3TUTg="
if cfg.UserName == "" || cfg.Password == "" || cfg.IntegratorKey == "" || cfg.AccountId == "" {
t.Errorf("Invalid Config")
return
}
var err error
var c *OauthCredential
if testToken > "" {
c = &OauthCredential{AccessToken: testToken, AccountId: cfg.AccountId, Host: cfg.Host}
} else {
c, err = cfg.OauthCredential(ctx)
if err != nil {
t.Errorf("Ouauth2 token fail: %v", err)
return
}
t.Logf("Token: %#v\n", c)
defer func() {
if err := c.Revoke(ctx); err != nil {
t.Errorf("Revoke token failed: %v", err)
}
}()
}
sv := New(c, "")
_, err = sv.GetTemplate(ctx, testTemplateId)
if err != nil {
t.Errorf("GetTemplate: %v", err)
return
}
r, err := sv.TemplateSearch(ctx)
if err != nil {
t.Errorf("TemplateSearch: %v", err)
return
}
for _, et := range r.EnvelopeTemplates {
t.Logf("%s %s", et.TemplateId, et.Name)
}
// Get Draft Folder
var draftFolder string
fl, err := sv.FolderList(ctx, FolderTemplatesInclude)
if err != nil {
t.Errorf("GetFolderList: %v", err)
return
}
for _, fd := range fl.Folders {
if fd.Name == "Draft" {
draftFolder = fd.FolderId
}
}
if draftFolder == "" {
t.Errorf("Unable to find Draft folder")
return
}
_, err = sv.AccountCustomFields(ctx)
if err != nil {
t.Errorf("AccountCustomFields error: %v", err)
return
}
_, err = sv.EnvelopeStatusChanges(ctx, StatusChangeToDate(time.Now()), StatusChangeFromDate(time.Now().AddDate(0, 0, -1)),
StatusChangeStatusCode("created"), StatusChangeFromToStatus("created"), StatusChangeCustomField("PID", "123456"))
//(time.Now().Add(time.Hour*24*-30)), StatusChangeToDate(time.Now()))
if err != nil {
t.Errorf("EnvelopeStatusChanges error: %v", err)
return
}
_, err = sv.EnvelopeSearch(ctx, SearchFolderDrafts, EnvelopeSearchCount(3), EnvelopeSearchFromDate(time.Now().AddDate(0, -1, 0)),
EnvelopeSearchToDate(time.Now()), EnvelopeSearchIncludeRecipients)
if err != nil {
t.Errorf("EnvelopeSearch error: %v", err)
return
}
testEnv := testEnvelopePayload(cfg.UserName)
file, err := os.Open("testdata/TestDocument.pdf")
if err != nil {
t.Errorf("Unable to open TestDocument.pdf: %v", err)
}
defer file.Close()
u := &UploadFile{
ContentType: "application/pdf",
FileName: "TestData.pdf",
Id: "1",
Data: file,
}
ex, err := sv.EnvelopeCreate(ctx, testEnv, u)
if err != nil {
t.Errorf("CreateEnvelope: %v", err)
return
}
testEnvId = ex.EnvelopeId
t.Logf("Envelope: %s", testEnvId)
return
aTab := &Tabs{
SignerAttachmentTabs: []SignerAttachmentTab{
SignerAttachmentTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "attTab",
},
BasePosTab: BasePosTab{
AnchorString: "SignatureA:",
AnchorXOffset: "240",
AnchorYOffset: "10",
AnchorUnits: "pixels",
PageNumber: "1",
TabId: "9985fd9a-a660-4ff3-983d-eb43706d496d",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "1",
},
Optional: DSTrue(),
},
},
TextTabs: []TextTab{
TextTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "deleteThisTab",
},
BasePosTab: BasePosTab{
PageNumber: "1",
XPosition: "300",
YPosition: "350",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "1",
},
},
},
}
aTab, err = sv.RecipientTabsAdd(ctx, testEnvId, "1", aTab)
if err != nil {
t.Errorf("Add Tabs error: %v", err)
return
}
var deleteTabId string
if len(aTab.TextTabs) == 1 {
deleteTabId = aTab.TextTabs[0].TabId
}
recList, err := sv.Recipients(ctx, testEnvId, RecipientsIncludeTabs)
if err != nil {
t.Errorf("GetRecipients error: %v\n", err)
return
}
if recList == nil || len(recList.Signers) != 2 {
t.Errorf("Invalid recipients returned.")
return
}
mTabs := &Tabs{
RadioGroupTabs: recList.Signers[1].Tabs.RadioGroupTabs,
ListTabs: recList.Signers[1].Tabs.ListTabs,
TextTabs: []TextTab{
TextTab{Value: "ASFDAFD", BasePosTab: BasePosTab{TabId: "e611bf5f-339c-4ed0-8c71-87ec7f77fdc5"}},
},
}
for i, rd := range mTabs.RadioGroupTabs[0].Radios {
if rd.Value == "val2" {
mTabs.RadioGroupTabs[0].Radios[i].Selected = DSTrue()
} else {
mTabs.RadioGroupTabs[0].Radios[i].Selected = DSFalse()
}
}
for i, li := range mTabs.ListTabs[0].ListItems {
xval := DSFalse()
if li.Value == "Y" {
xval = DSTrue()
}
mTabs.ListTabs[0].ListItems[i].Selected = xval
}
mTabs.ListTabs[0].Value = "Y Val"
mTabs, err = sv.RecipientTabsModify(ctx, testEnvId, "2", mTabs)
if err != nil {
t.Errorf("Modify Tabs Error: %v", err)
return
}
if len(mTabs.TextTabs) != 1 || mTabs.TextTabs[0].ErrorDetails == nil {
t.Errorf("Wanted INVALID_TAB_OPERATION on TextTab[0]; got nil")
return
}
rTabs := &Tabs{
TextTabs: []TextTab{
TextTab{
BasePosTab: BasePosTab{
TabId: deleteTabId,
},
},
},
}
rTabs, err = sv.RecipientTabsRemove(ctx, testEnvId, "1", rTabs)
if err != nil {
t.Errorf("Error Deleting Tab: %v", err)
return
}
newRecipients := &RecipientList{
Signers: []Signer{
Signer{
EmailRecipient: EmailRecipient{
Email: "[email protected]",
Recipient: Recipient{
Name: "Extra Name",
Note: "This is the ,Note for Extra Name",
EmailNotification: &EmailNotification{EmailBody: "This is the recipient 3 email blurb", EmailSubject: "This is the Subject for recipient 3"},
RecipientId: "3",
RoleName: "Role3",
RoutingOrder: "6",
},
},
},
},
CarbonCopies: []CarbonCopy{
CarbonCopy{
EmailRecipient: EmailRecipient{
Email: "[email protected]",
Recipient: Recipient{
Name: "CC Name",
Note: "This is the ,Note for CCName",
EmailNotification: &EmailNotification{EmailBody: "This is the recipient 4 email blurb", EmailSubject: "This is the Subject for recipient 4"},
RecipientId: "4",
RoleName: "Role4",
RoutingOrder: "5",
},
},
},
},
}
newRecipients, err = sv.RecipientsAdd(ctx, testEnvId, newRecipients)
if err != nil {
t.Errorf("Recipients Add Error: %v", err)
return
}
for i := range newRecipients.Signers {
if newRecipients.Signers[i].RecipientId == "3" {
newRecipients.Signers[i].Name = "Modified Name"
}
}
modRec, err := sv.RecipientsModify(ctx, testEnvId, newRecipients)
if err != nil {
t.Errorf("Recipients Modify Error: %v", err)
return
}
for _, rur := range modRec.recipientUpdateResults {
if rur.ErrorDetails != nil && rur.ErrorDetails.Err == "SUCCESS" {
continue
}
t.Errorf("RecipientsModify error: %v", rur.ErrorDetails)
return
}
return
}
func testEnvelopePayload(userName string) *Envelope {
return &Envelope{
Status: "created",
CustomFields: &CustomFieldList{
TextCustomFields: []CustomField{
CustomField{Name: "PID", Value: "123456"},
CustomField{Name: "Project", Value: "P1"},
},
},
Documents: []Document{
Document{
DocumentFields: []NmVal{
NmVal{Name: "Pid", Value: "122312"},
NmVal{Name: "DocType", Value: "TestDoc"},
},
DocumentId: "1",
Name: "TestDoc.pdf",
Order: "1",
},
},
EmailSubject: "Created by Go Test",
EmailBlurb: "Dear Person: Please read <strong>this</strong>.",
Recipients: &RecipientList{
Signers: []Signer{
Signer{
EmailRecipient: EmailRecipient{
Email: userName,
Recipient: Recipient{
Name: "My Name",
Note: "This is the ,Note for My Name",
EmailNotification: &EmailNotification{EmailBody: "This is the recipient 1 email blurb", EmailSubject: "This is the Subject for recipient 1"},
RecipientId: "1",
RoleName: "Role1",
RoutingOrder: "1",
},
},
BaseSigner: BaseSigner{
Tabs: &Tabs{
TextTabs: []TextTab{
TextTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "txtTextFieldA",
},
BasePosTab: BasePosTab{
AnchorString: "TextFieldA:",
AnchorXOffset: "40",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "1",
},
Value: "Value 1",
},
},
SignHereTabs: []SignHereTab{
SignHereTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "signHereA",
},
BasePosTab: BasePosTab{
AnchorString: "SignatureA:",
AnchorXOffset: "40",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "1",
},
},
},
DateSignedTabs: []DateSignedTab{
DateSignedTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "dtSignedA",
},
BasePosTab: BasePosTab{
AnchorString: "DateSignedA:",
PageNumber: "1",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "1",
},
},
},
},
},
},
Signer{
EmailRecipient: EmailRecipient{
Email: "[email protected]",
Recipient: Recipient{
Name: "XXX YYYY",
Note: "Note for Recipient 2",
EmailNotification: &EmailNotification{EmailBody: "This is the recipient 2 email blurb", EmailSubject: "This is the Subject for recipient 2"},
RecipientId: "2",
RoleName: "Role2",
RoutingOrder: "2",
},
},
BaseSigner: BaseSigner{
Tabs: &Tabs{
TextTabs: []TextTab{
TextTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "txtTextFieldB",
},
BasePosTab: BasePosTab{
AnchorString: "TextFieldB:",
AnchorXOffset: "40",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "2",
},
Value: "Value 2",
},
},
SignHereTabs: []SignHereTab{
SignHereTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "signHereA",
},
BasePosTab: BasePosTab{
AnchorString: "SignatureB:",
AnchorXOffset: "40",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "2",
},
},
},
DateSignedTabs: []DateSignedTab{
DateSignedTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "dtSignedB",
},
BasePosTab: BasePosTab{
AnchorString: "DateSignedB:",
AnchorXOffset: "40",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "2",
},
},
},
CheckboxTabs: []CheckboxTab{
CheckboxTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "cbTest",
},
BasePosTab: BasePosTab{
AnchorString: "Checkbox:",
AnchorXOffset: "40",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "2",
},
Selected: DSTrue(),
},
},
RadioGroupTabs: []RadioGroupTab{
RadioGroupTab{
GroupName: "rbGrp",
RecipientID: "2",
DocumentID: "1",
Radios: []Radio{
Radio{
BasePosTab: BasePosTab{
AnchorString: "rbA",
AnchorXOffset: "28",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
Selected: DSFalse(),
Value: "val1",
},
Radio{
BasePosTab: BasePosTab{
AnchorString: "rbB",
AnchorXOffset: "28",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
Selected: DSTrue(),
Value: "val2",
},
Radio{
BasePosTab: BasePosTab{
AnchorString: "rbC",
AnchorXOffset: "28",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
Selected: DSFalse(),
Value: "val3",
},
},
},
},
ListTabs: []ListTab{
ListTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "dlDrop",
},
BasePosTab: BasePosTab{
AnchorString: "DropdownList:",
AnchorXOffset: "40",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "2",
},
//Value: "X",
ListItems: []ListItem{
ListItem{
Selected: DSTrue(),
Text: "X Val",
Value: "X",
},
ListItem{
Selected: DSFalse(),
Text: "Y Val",
Value: "Y",
},
ListItem{
Selected: DSFalse(),
Text: "Z Val",
Value: "Z",
},
},
},
},
},
},
},
},
},
}
}
func TestXML(t *testing.T) {
_ = bytes.NewBufferString("")
f, err := os.Open("testdata/connect.xml")
if err != nil {
t.Fatalf("Open Connect.xml: %v", err)
return
}
var v *ConnectData = &ConnectData{}
decoder := xml.NewDecoder(f)
err = decoder.Decode(v)
if err != nil {
t.Fatalf("XML Decode: %v", err)
return
}
if v.EnvelopeStatus.DocumentStatuses[0].Name != "Docusign1.pdf" {
t.Errorf("Invalid Document Name in Connect XML: %s", v.EnvelopeStatus.DocumentStatuses[0].Name)
}
return
}
func TestMultiBody(t *testing.T) {
var payload struct {
A string `json:"a,omitempty"`
B int `json:"b,omitempty"`
}
payload.A = "A"
payload.B = 999
files := []*UploadFile{
&UploadFile{Data: newReadCloser("XXXX"), ContentType: "text/plain", FileName: "fn1", Id: "1"},
&UploadFile{Data: newReadCloser("XXXX"), ContentType: "text/plain", FileName: "fn2", Id: "2"},
&UploadFile{Data: newReadCloser("XXXX"), ContentType: "text/plain", FileName: "fn3", Id: "3"},
}
r, ct := multiBody(payload, files)
defer r.(io.ReadCloser).Close()
mpr := multipart.NewReader(r, ct[30:])
pt, err := mpr.NextPart()
if err != nil {
t.Errorf("Unable to parse part from multireader: %v", err)
return
}
payload.A = ""
payload.B = 0
if err := json.NewDecoder(pt).Decode(&payload); err != nil {
t.Errorf("JSON Unmarshal: %v", err)
return
} else {
if payload.A != "A" || payload.B != 999 {
t.Errorf("Expect A=A and B=999; got %s %d", payload.A, payload.B)
return
}
}
for cnt := 0; cnt < len(files); cnt++ {
if pt, err = mpr.NextPart(); err != nil {
t.Errorf("Unable to parse multipart reader: %v", err)
return
}
if pt.Header.Get("content-disposition") != fmt.Sprintf("file; filename=\"%s\";documentid=%s", files[cnt].FileName, files[cnt].Id) {
t.Errorf("Invalid content-dispostion: %s", pt.Header.Get("content-dispostion"))
}
bx := make([]byte, 4)
if _, err = pt.Read(bx); err != nil {
t.Errorf("Expected EOF: got %v", err)
} else if string(bx) != "XXXX" {
t.Errorf("expectd XXXX; got %s", string(bx))
}
}
}
func newReadCloser(s string) io.ReadCloser {
return byteReadCloser{Buffer: bytes.NewBufferString(s)}
}
type byteReadCloser struct {
*bytes.Buffer
}
func (b byteReadCloser) Close() error {
return nil
}
| [
"\"DOCUSIGN_TESTENVID\"",
"\"DOCUSIGN_TEMPLATEID\"",
"\"DOCUSIGN_TOKEN\"",
"\"DOCUSIGN_USERNAME\"",
"\"DOCUSIGN_PASSWORD\"",
"\"DOCUSIGN_APIKEY\"",
"\"DOCUSIGN_ACCTID\"",
"\"DOCUSIGN_HOST\""
] | [] | [
"DOCUSIGN_HOST",
"DOCUSIGN_USERNAME",
"DOCUSIGN_TOKEN",
"DOCUSIGN_TEMPLATEID",
"DOCUSIGN_ACCTID",
"DOCUSIGN_PASSWORD",
"DOCUSIGN_APIKEY",
"DOCUSIGN_TESTENVID"
] | [] | ["DOCUSIGN_HOST", "DOCUSIGN_USERNAME", "DOCUSIGN_TOKEN", "DOCUSIGN_TEMPLATEID", "DOCUSIGN_ACCTID", "DOCUSIGN_PASSWORD", "DOCUSIGN_APIKEY", "DOCUSIGN_TESTENVID"] | go | 8 | 0 | |
_example/basic/server.go | package main
import (
"log"
"net/http"
"os"
"time"
"github.com/appleboy/gin-jwt/v2"
"github.com/gin-gonic/gin"
)
type login struct {
Username string `form:"username" json:"username" binding:"required"`
Password string `form:"password" json:"password" binding:"required"`
}
var identityKey = "id"
func helloHandler(c *gin.Context) {
claims := jwt.ExtractClaims(c)
user, _ := c.Get(identityKey)
c.JSON(200, gin.H{
"userID": claims[identityKey],
"userName": user.(*User).UserName,
"text": "Hello World.",
})
}
// User demo
type User struct {
UserName string
FirstName string
LastName string
}
func main() {
port := os.Getenv("PORT")
r := gin.New()
r.Use(gin.Logger())
r.Use(gin.Recovery())
if port == "" {
port = "8000"
}
// the jwt middleware
authMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
Realm: "test zone",
Key: []byte("secret key"),
Timeout: time.Hour,
MaxRefresh: time.Hour,
IdentityKey: identityKey,
PayloadFunc: func(data interface{}) jwt.MapClaims {
if v, ok := data.(*User); ok {
return jwt.MapClaims{
identityKey: v.UserName,
}
}
return jwt.MapClaims{}
},
IdentityHandler: func(c *gin.Context) interface{} {
claims := jwt.ExtractClaims(c)
return &User{
UserName: claims[identityKey].(string),
}
},
Authenticator: func(c *gin.Context) (interface{}, error) {
var loginVals login
if err := c.ShouldBind(&loginVals); err != nil {
return "", jwt.ErrMissingLoginValues
}
userID := loginVals.Username
password := loginVals.Password
if (userID == "admin" && password == "admin") || (userID == "test" && password == "test") {
return &User{
UserName: userID,
LastName: "Bo-Yi",
FirstName: "Wu",
}, nil
}
return nil, jwt.ErrFailedAuthentication
},
Authorizator: func(data interface{}, c *gin.Context) bool {
if v, ok := data.(*User); ok && v.UserName == "admin" {
return true
}
return false
},
Unauthorized: func(c *gin.Context, code int, message string) {
c.JSON(code, gin.H{
"code": code,
"message": message,
})
},
// TokenLookup is a string in the form of "<source>:<name>" that is used
// to extract token from the request.
// Optional. Default value "header:Authorization".
// Possible values:
// - "header:<name>"
// - "query:<name>"
// - "cookie:<name>"
// - "param:<name>"
TokenLookup: "header: Authorization, query: token, cookie: jwt",
// TokenLookup: "query:token",
// TokenLookup: "cookie:token",
// TokenHeadName is a string in the header. Default value is "Bearer"
TokenHeadName: "Bearer",
// TimeFunc provides the current time. You can override it to use another time value. This is useful for testing or if your server uses a different time zone than your tokens.
TimeFunc: time.Now,
})
if err != nil {
log.Fatal("JWT Error:" + err.Error())
}
r.POST("/login", authMiddleware.LoginHandler)
r.NoRoute(authMiddleware.MiddlewareFunc(), func(c *gin.Context) {
claims := jwt.ExtractClaims(c)
log.Printf("NoRoute claims: %#v\n", claims)
c.JSON(404, gin.H{"code": "PAGE_NOT_FOUND", "message": "Page not found"})
})
auth := r.Group("/auth")
// Refresh time can be longer than token timeout
auth.GET("/refresh_token", authMiddleware.RefreshHandler)
auth.Use(authMiddleware.MiddlewareFunc())
{
auth.GET("/hello", helloHandler)
}
if err := http.ListenAndServe(":"+port, r); err != nil {
log.Fatal(err)
}
}
| [
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
nova/api/openstack/wsgi_app.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""WSGI application initialization for Nova APIs."""
import os
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from oslo_reports import opts as gmr_opts
from oslo_service import _options as service_opts
from paste import deploy
from nova import config
from nova import context
from nova import exception
from nova import objects
from nova import service
from nova import utils
from nova import version
CONF = cfg.CONF
CONFIG_FILES = ['api-paste.ini', 'nova.conf']
LOG = logging.getLogger(__name__)
objects.register_all()
def _get_config_files(env=None):
if env is None:
env = os.environ
dirname = env.get('OS_NOVA_CONFIG_DIR', '/etc/nova').strip()
return [os.path.join(dirname, config_file)
for config_file in CONFIG_FILES]
def _setup_service(host, name):
utils.raise_if_old_compute()
binary = name if name.startswith('nova-') else "nova-%s" % name
ctxt = context.get_admin_context()
service_ref = objects.Service.get_by_host_and_binary(
ctxt, host, binary)
if service_ref:
service._update_service_ref(service_ref)
else:
try:
service_obj = objects.Service(ctxt)
service_obj.host = host
service_obj.binary = binary
service_obj.topic = None
service_obj.report_count = 0
service_obj.create()
except (exception.ServiceTopicExists,
exception.ServiceBinaryExists):
# If we race to create a record with a sibling, don't
# fail here.
pass
def error_application(exc, name):
# TODO(cdent): make this something other than a stub
def application(environ, start_response):
start_response('500 Internal Server Error', [
('Content-Type', 'text/plain; charset=UTF-8')])
return ['Out of date %s service %s\n' % (name, exc)]
return application
@utils.run_once('Global data already initialized, not re-initializing.',
LOG.info)
def init_global_data(conf_files, service_name):
# NOTE(melwitt): parse_args initializes logging and calls global rpc.init()
# and db_api.configure(). The db_api.configure() call does not initiate any
# connection to the database.
# NOTE(gibi): sys.argv is set by the wsgi runner e.g. uwsgi sets it based
# on the --pyargv parameter of the uwsgi binary
config.parse_args(sys.argv, default_config_files=conf_files)
logging.setup(CONF, "nova")
gmr_opts.set_defaults(CONF)
gmr.TextGuruMeditation.setup_autorun(
version, conf=CONF, service_name=service_name)
# dump conf at debug (log_options option comes from oslo.service)
# FIXME(mriedem): This is gross but we don't have a public hook into
# oslo.service to register these options, so we are doing it manually for
# now; remove this when we have a hook method into oslo.service.
CONF.register_opts(service_opts.service_opts)
if CONF.log_options:
CONF.log_opt_values(
logging.getLogger(__name__),
logging.DEBUG)
def init_application(name):
conf_files = _get_config_files()
# NOTE(melwitt): The init_application method can be called multiple times
# within a single python interpreter instance if any exception is raised
# during it (example: DBConnectionError while setting up the service) and
# apache/mod_wsgi reloads the init_application script. So, we initialize
# global data separately and decorate the method to run only once in a
# python interpreter instance.
init_global_data(conf_files, name)
try:
_setup_service(CONF.host, name)
except exception.ServiceTooOld as exc:
return error_application(exc, name)
# This global init is safe because if we got here, we already successfully
# set up the service and setting up the profile cannot fail.
service.setup_profiler(name, CONF.host)
conf = conf_files[0]
return deploy.loadapp('config:%s' % conf, name=name)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
pkg/metrics/api_rate_limiting.go | package metrics
import (
"context"
"os"
"time"
log "github.com/sirupsen/logrus"
"github.com/sylr/prometheus-azure-exporter/pkg/azure"
)
func init() {
if GetUpdateMetricsFunctionInterval("api_rate_limiting") == nil {
RegisterUpdateMetricsFunctionWithInterval("api_rate_limiting", UpdateAPIRateLimitingMetrics, 30*time.Second)
}
}
// UpdateAPIRateLimitingMetrics updates api metrics.
func UpdateAPIRateLimitingMetrics(ctx context.Context) error {
var err error
contextLogger := log.WithFields(log.Fields{
"_id": ctx.Value("id").(string),
"_func": "UpdateApiRateLimitingMetrics",
})
// In order to retrieve the remaining number of write API calls that Azure
// allows for the subscription or the tenant id we need to do make a call to
// an API which does a write operation.
// The ListKeys API of storage accounts although really being a read API is
// considered a write API by Azure so we are going to use that.
// We do not need to register metrics in this module. All the work is done by
// callbacks given to the Azure SDK. See:
// - pkg/azure/clients.go -> respondInspect()
// - pkg/azure/azure.go -> SetReadRateLimitRemaining()
// SetWriteRateLimitRemaining()
azureClients := azure.NewAzureClients()
sub, err := azure.GetSubscription(ctx, azureClients, os.Getenv("AZURE_SUBSCRIPTION_ID"))
if err != nil {
contextLogger.Errorf("Unable to get subscription: %s", err)
return err
}
storageAccounts, err := azure.ListSubscriptionStorageAccounts(ctx, azureClients, sub)
if err != nil {
contextLogger.Errorf("Unable to list account azure storage accounts: %s", err)
return err
}
// Loop over storage accounts.
for accountKey := range *storageAccounts {
_, err := azure.ListStorageAccountKeys(ctx, azureClients, sub, &(*storageAccounts)[accountKey])
if err != nil {
contextLogger.Error(err)
} else {
break
}
}
return err
}
| [
"\"AZURE_SUBSCRIPTION_ID\""
] | [] | [
"AZURE_SUBSCRIPTION_ID"
] | [] | ["AZURE_SUBSCRIPTION_ID"] | go | 1 | 0 | |
google/appengine/api/appinfo.py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""AppInfo tools.
This library allows you to work with AppInfo records in memory, as well as store
and load from configuration files.
"""
import logging
import os
import re
import string
import sys
import wsgiref.util
if os.environ.get('APPENGINE_RUNTIME') == 'python27':
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
else:
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
from google.appengine.api import appinfo_errors
from google.appengine.api import backendinfo
_URL_REGEX = r'(?!\^)/.*|\..*|(\(.).*(?!\$).'
_FILES_REGEX = r'.+'
_URL_ROOT_REGEX = r'/.*'
_DELTA_REGEX = r'([0-9]+)([DdHhMm]|[sS]?)'
_EXPIRATION_REGEX = r'\s*(%s)(\s+%s)*\s*' % (_DELTA_REGEX, _DELTA_REGEX)
_START_PATH = '/_ah/start'
_NON_WHITE_SPACE_REGEX = r'^\S+$'
_ALLOWED_SERVICES = ['mail', 'mail_bounce', 'xmpp_message', 'xmpp_subscribe',
'xmpp_presence', 'xmpp_error', 'channel_presence', 'rest',
'warmup']
_SERVICE_RE_STRING = '(' + '|'.join(_ALLOWED_SERVICES) + ')'
_PAGE_NAME_REGEX = r'^.+$'
_EXPIRATION_CONVERSIONS = {
'd': 60 * 60 * 24,
'h': 60 * 60,
'm': 60,
's': 1,
}
APP_ID_MAX_LEN = 100
MODULE_ID_MAX_LEN = 63
MODULE_VERSION_ID_MAX_LEN = 63
MAX_URL_MAPS = 100
PARTITION_SEPARATOR = '~'
DOMAIN_SEPARATOR = ':'
VERSION_SEPARATOR = '.'
MODULE_SEPARATOR = ':'
DEFAULT_MODULE = 'default'
PARTITION_RE_STRING_WITHOUT_SEPARATOR = (r'[a-z\d\-]{1,%d}' % APP_ID_MAX_LEN)
PARTITION_RE_STRING = (r'%s\%s' %
(PARTITION_RE_STRING_WITHOUT_SEPARATOR,
PARTITION_SEPARATOR))
DOMAIN_RE_STRING_WITHOUT_SEPARATOR = (r'(?!\-)[a-z\d\-\.]{1,%d}' %
APP_ID_MAX_LEN)
DOMAIN_RE_STRING = (r'%s%s' %
(DOMAIN_RE_STRING_WITHOUT_SEPARATOR, DOMAIN_SEPARATOR))
DISPLAY_APP_ID_RE_STRING = r'(?!-)[a-z\d\-]{0,%d}[a-z\d]' % (APP_ID_MAX_LEN - 1)
APPLICATION_RE_STRING = (r'(?:%s)?(?:%s)?%s' %
(PARTITION_RE_STRING,
DOMAIN_RE_STRING,
DISPLAY_APP_ID_RE_STRING))
MODULE_ID_RE_STRING = r'^(?!-)[a-z\d\-]{0,%d}[a-z\d]$' % (MODULE_ID_MAX_LEN - 1)
MODULE_VERSION_ID_RE_STRING = (r'^(?!-)[a-z\d\-]{0,%d}[a-z\d]$' %
(MODULE_VERSION_ID_MAX_LEN - 1))
_IDLE_INSTANCES_REGEX = r'^([\d]+|automatic)$'
_INSTANCES_REGEX = r'^[1-9][\d]*$'
_CONCURRENT_REQUESTS_REGEX = r'^([1-9]\d*)$'
_PENDING_LATENCY_REGEX = r'^(\d+((\.\d{1,3})?s|ms)|automatic)$'
_IDLE_TIMEOUT_REGEX = r'^[\d]+(s|m)$'
GCE_RESOURCE_NAME_REGEX = r'^[a-z]([a-z\d-]{0,61}[a-z\d])?$'
ALTERNATE_HOSTNAME_SEPARATOR = '-dot-'
BUILTIN_NAME_PREFIX = 'ah-builtin'
RUNTIME_RE_STRING = r'((gs://[a-z0-9\-\._/]+)|([a-z][a-z0-9\-\.]{0,29}))'
API_VERSION_RE_STRING = r'[\w.]{1,32}'
ENV_RE_STRING = r'(1|2|standard|flex|flexible)'
SOURCE_LANGUAGE_RE_STRING = r'[\w.\-]{1,32}'
HANDLER_STATIC_FILES = 'static_files'
HANDLER_STATIC_DIR = 'static_dir'
HANDLER_SCRIPT = 'script'
HANDLER_API_ENDPOINT = 'api_endpoint'
LOGIN_OPTIONAL = 'optional'
LOGIN_REQUIRED = 'required'
LOGIN_ADMIN = 'admin'
AUTH_FAIL_ACTION_REDIRECT = 'redirect'
AUTH_FAIL_ACTION_UNAUTHORIZED = 'unauthorized'
DATASTORE_ID_POLICY_LEGACY = 'legacy'
DATASTORE_ID_POLICY_DEFAULT = 'default'
SECURE_HTTP = 'never'
SECURE_HTTPS = 'always'
SECURE_HTTP_OR_HTTPS = 'optional'
SECURE_DEFAULT = 'default'
REQUIRE_MATCHING_FILE = 'require_matching_file'
DEFAULT_SKIP_FILES = (r'^(.*/)?('
r'(#.*#)|'
r'(.*~)|'
r'(.*\.py[co])|'
r'(.*/RCS/.*)|'
r'(\..*)|'
r')$')
SKIP_NO_FILES = r'(?!)'
DEFAULT_NOBUILD_FILES = (r'^$')
LOGIN = 'login'
AUTH_FAIL_ACTION = 'auth_fail_action'
SECURE = 'secure'
URL = 'url'
POSITION = 'position'
POSITION_HEAD = 'head'
POSITION_TAIL = 'tail'
STATIC_FILES = 'static_files'
UPLOAD = 'upload'
STATIC_DIR = 'static_dir'
MIME_TYPE = 'mime_type'
SCRIPT = 'script'
EXPIRATION = 'expiration'
API_ENDPOINT = 'api_endpoint'
HTTP_HEADERS = 'http_headers'
APPLICATION_READABLE = 'application_readable'
REDIRECT_HTTP_RESPONSE_CODE = 'redirect_http_response_code'
APPLICATION = 'application'
PROJECT = 'project'
MODULE = 'module'
SERVICE = 'service'
AUTOMATIC_SCALING = 'automatic_scaling'
MANUAL_SCALING = 'manual_scaling'
BASIC_SCALING = 'basic_scaling'
VM = 'vm'
VM_SETTINGS = 'vm_settings'
ZONES = 'zones'
BETA_SETTINGS = 'beta_settings'
VM_HEALTH_CHECK = 'vm_health_check'
HEALTH_CHECK = 'health_check'
RESOURCES = 'resources'
LIVENESS_CHECK = 'liveness_check'
READINESS_CHECK = 'readiness_check'
NETWORK = 'network'
VERSION = 'version'
MAJOR_VERSION = 'major_version'
MINOR_VERSION = 'minor_version'
RUNTIME = 'runtime'
RUNTIME_CHANNEL = 'runtime_channel'
API_VERSION = 'api_version'
ENDPOINTS_API_SERVICE = 'endpoints_api_service'
ENV = 'env'
ENTRYPOINT = 'entrypoint'
RUNTIME_CONFIG = 'runtime_config'
SOURCE_LANGUAGE = 'source_language'
BUILTINS = 'builtins'
INCLUDES = 'includes'
HANDLERS = 'handlers'
LIBRARIES = 'libraries'
DEFAULT_EXPIRATION = 'default_expiration'
SKIP_FILES = 'skip_files'
NOBUILD_FILES = 'nobuild_files'
SERVICES = 'inbound_services'
DERIVED_FILE_TYPE = 'derived_file_type'
JAVA_PRECOMPILED = 'java_precompiled'
PYTHON_PRECOMPILED = 'python_precompiled'
ADMIN_CONSOLE = 'admin_console'
ERROR_HANDLERS = 'error_handlers'
BACKENDS = 'backends'
THREADSAFE = 'threadsafe'
DATASTORE_AUTO_ID_POLICY = 'auto_id_policy'
API_CONFIG = 'api_config'
CODE_LOCK = 'code_lock'
ENV_VARIABLES = 'env_variables'
STANDARD_WEBSOCKET = 'standard_websocket'
SOURCE_REPO_RE_STRING = r'^[a-z][a-z0-9\-\+\.]*:[^#]*$'
SOURCE_REVISION_RE_STRING = r'^[0-9a-fA-F]+$'
SOURCE_REFERENCES_MAX_SIZE = 2048
INSTANCE_CLASS = 'instance_class'
MINIMUM_PENDING_LATENCY = 'min_pending_latency'
MAXIMUM_PENDING_LATENCY = 'max_pending_latency'
MINIMUM_IDLE_INSTANCES = 'min_idle_instances'
MAXIMUM_IDLE_INSTANCES = 'max_idle_instances'
MAXIMUM_CONCURRENT_REQUEST = 'max_concurrent_requests'
MIN_NUM_INSTANCES = 'min_num_instances'
MAX_NUM_INSTANCES = 'max_num_instances'
COOL_DOWN_PERIOD_SEC = 'cool_down_period_sec'
CPU_UTILIZATION = 'cpu_utilization'
CPU_UTILIZATION_UTILIZATION = 'target_utilization'
CPU_UTILIZATION_AGGREGATION_WINDOW_LENGTH_SEC = 'aggregation_window_length_sec'
TARGET_NETWORK_SENT_BYTES_PER_SEC = 'target_network_sent_bytes_per_sec'
TARGET_NETWORK_SENT_PACKETS_PER_SEC = 'target_network_sent_packets_per_sec'
TARGET_NETWORK_RECEIVED_BYTES_PER_SEC = 'target_network_received_bytes_per_sec'
TARGET_NETWORK_RECEIVED_PACKETS_PER_SEC = (
'target_network_received_packets_per_sec')
TARGET_DISK_WRITE_BYTES_PER_SEC = 'target_disk_write_bytes_per_sec'
TARGET_DISK_WRITE_OPS_PER_SEC = 'target_disk_write_ops_per_sec'
TARGET_DISK_READ_BYTES_PER_SEC = 'target_disk_read_bytes_per_sec'
TARGET_DISK_READ_OPS_PER_SEC = 'target_disk_read_ops_per_sec'
TARGET_REQUEST_COUNT_PER_SEC = 'target_request_count_per_sec'
TARGET_CONCURRENT_REQUESTS = 'target_concurrent_requests'
CUSTOM_METRICS = 'custom_metrics'
METRIC_NAME = 'metric_name'
TARGET_TYPE = 'target_type'
TARGET_TYPE_REGEX = r'^(GAUGE|DELTA_PER_SECOND|DELTA_PER_MINUTE)$'
CUSTOM_METRIC_UTILIZATION = 'target_utilization'
SINGLE_INSTANCE_ASSIGNMENT = 'single_instance_assignment'
FILTER = 'filter'
INSTANCES = 'instances'
MAX_INSTANCES = 'max_instances'
IDLE_TIMEOUT = 'idle_timeout'
PAGES = 'pages'
NAME = 'name'
ENDPOINTS_NAME = 'name'
CONFIG_ID = 'config_id'
ROLLOUT_STRATEGY = 'rollout_strategy'
ROLLOUT_STRATEGY_FIXED = 'fixed'
ROLLOUT_STRATEGY_MANAGED = 'managed'
TRACE_SAMPLING = 'trace_sampling'
ERROR_CODE = 'error_code'
FILE = 'file'
_ERROR_CODE_REGEX = r'(default|over_quota|dos_api_denial|timeout)'
ON = 'on'
ON_ALIASES = ['yes', 'y', 'True', 't', '1', 'true']
OFF = 'off'
OFF_ALIASES = ['no', 'n', 'False', 'f', '0', 'false']
ENABLE_HEALTH_CHECK = 'enable_health_check'
CHECK_INTERVAL_SEC = 'check_interval_sec'
TIMEOUT_SEC = 'timeout_sec'
APP_START_TIMEOUT_SEC = 'app_start_timeout_sec'
UNHEALTHY_THRESHOLD = 'unhealthy_threshold'
HEALTHY_THRESHOLD = 'healthy_threshold'
FAILURE_THRESHOLD = 'failure_threshold'
SUCCESS_THRESHOLD = 'success_threshold'
RESTART_THRESHOLD = 'restart_threshold'
INITIAL_DELAY_SEC = 'initial_delay_sec'
HOST = 'host'
PATH = 'path'
CPU = 'cpu'
MEMORY_GB = 'memory_gb'
DISK_SIZE_GB = 'disk_size_gb'
VOLUMES = 'volumes'
VOLUME_NAME = 'name'
VOLUME_TYPE = 'volume_type'
SIZE_GB = 'size_gb'
FORWARDED_PORTS = 'forwarded_ports'
INSTANCE_TAG = 'instance_tag'
NETWORK_NAME = 'name'
SUBNETWORK_NAME = 'subnetwork_name'
SESSION_AFFINITY = 'session_affinity'
STANDARD_MIN_INSTANCES = 'min_instances'
STANDARD_MAX_INSTANCES = 'max_instances'
STANDARD_TARGET_CPU_UTILIZATION = 'target_cpu_utilization'
STANDARD_TARGET_THROUGHPUT_UTILIZATION = 'target_throughput_utilization'
class _VersionedLibrary(object):
"""A versioned library supported by App Engine."""
def __init__(self,
name,
url,
description,
supported_versions,
latest_version,
default_version=None,
deprecated_versions=None,
experimental_versions=None,
hidden_versions=None):
"""Initializer for `_VersionedLibrary`.
Args:
name: The name of the library; for example, `django`.
url: The URL for the library's project page; for example,
`http://www.djangoproject.com/`.
description: A short description of the library; for example,
`A framework...`.
supported_versions: A list of supported version names, ordered by release
date; for example, `["v1", "v2", "v3"]`.
latest_version: The version of the library that will be used when you
specify `latest.` The rule of thumb is that this value should be the
newest version that is neither deprecated nor experimental; however
this value might be an experimental version if all of the supported
versions are either deprecated or experimental.
default_version: The version of the library that is enabled by default
in the Python 2.7 runtime, or `None` if the library is not available
by default; for example, `v1`.
deprecated_versions: A list of the versions of the library that have been
deprecated; for example, `["v1", "v2"]`. Order by release version.
experimental_versions: A list of the versions of the library that are
currently experimental; for example, `["v1"]`. Order by release
version.
hidden_versions: A list of versions that will not show up in public
documentation for release purposes. If, as a result, the library
has no publicly documented versions, the entire library won't show
up in the docs. Order by release version.
"""
self.name = name
self.url = url
self.description = description
self.supported_versions = supported_versions
self.latest_version = latest_version
self.default_version = default_version
self.deprecated_versions = deprecated_versions or []
self.experimental_versions = experimental_versions or []
self.hidden_versions = hidden_versions or []
@property
def hidden(self):
"""Determines if the entire library should be hidden from public docs.
Returns:
True if there is every supported version is hidden.
"""
return sorted(self.supported_versions) == sorted(self.hidden_versions)
@property
def non_deprecated_versions(self):
"""Retrieves the versions of the library that are not deprecated.
Returns:
A list of the versions of the library that are not deprecated.
"""
return [version for version in self.supported_versions
if version not in self.deprecated_versions]
_SUPPORTED_LIBRARIES = [
_VersionedLibrary(
'clearsilver',
'http://www.clearsilver.net/',
'A fast, powerful, and language-neutral HTML template system.',
['0.10.5'],
latest_version='0.10.5',
hidden_versions=['0.10.5'],
),
_VersionedLibrary(
'click',
'http://click.pocoo.org/',
'A command line library for Python.',
['6.6'],
latest_version='6.6',
hidden_versions=['6.6'],
),
_VersionedLibrary(
'django',
'http://www.djangoproject.com/',
'A full-featured web application framework for Python.',
['1.2', '1.3', '1.4', '1.5', '1.9', '1.11'],
latest_version='1.4',
deprecated_versions=['1.2', '1.3', '1.5', '1.9'],
),
_VersionedLibrary(
'enum',
'https://pypi.python.org/pypi/enum34',
'A backport of the enum module introduced in python 3.4',
['0.9.23'],
latest_version='0.9.23',
),
_VersionedLibrary(
'endpoints',
'https://cloud.google.com/appengine/docs/standard/python/endpoints/',
'Libraries for building APIs in an App Engine application.',
['1.0'],
latest_version='1.0',
),
_VersionedLibrary(
'flask',
'http://flask.pocoo.org/',
'Flask is a microframework for Python based on Werkzeug, Jinja 2 '
'and good intentions.',
['0.12'],
latest_version='0.12',
),
_VersionedLibrary(
'futures',
'https://docs.python.org/3/library/concurrent.futures.html',
'Backport of Python 3.2 Futures.',
['3.0.5'],
latest_version='3.0.5',
),
_VersionedLibrary(
'grpcio',
'http://www.grpc.io/',
'A high performance general RPC framework',
['1.0.0'],
latest_version='1.0.0',
experimental_versions=['1.0.0'],
),
_VersionedLibrary(
'itsdangerous',
'http://pythonhosted.org/itsdangerous/',
'HMAC and SHA1 signing for Python.',
['0.24'],
latest_version='0.24',
hidden_versions=['0.24'],
),
_VersionedLibrary(
'jinja2',
'http://jinja.pocoo.org/docs/',
'A modern and designer friendly templating language for Python.',
['2.6'],
latest_version='2.6',
),
_VersionedLibrary(
'lxml',
'http://lxml.de/',
'A Pythonic binding for the C libraries libxml2 and libxslt.',
['2.3', '2.3.5', '3.7.3'],
latest_version='3.7.3',
deprecated_versions=['2.3', '2.3.5'],
),
_VersionedLibrary(
'markupsafe',
'http://pypi.python.org/pypi/MarkupSafe',
'A XML/HTML/XHTML markup safe string for Python.',
['0.15', '0.23'],
latest_version='0.15',
),
_VersionedLibrary(
'matplotlib',
'http://matplotlib.org/',
'A 2D plotting library which produces publication-quality figures.',
['1.2.0'],
latest_version='1.2.0',
),
_VersionedLibrary(
'MySQLdb',
'http://mysql-python.sourceforge.net/',
'A Python DB API v2.0 compatible interface to MySQL.',
['1.2.4b4', '1.2.4', '1.2.5'],
latest_version='1.2.5',
deprecated_versions=['1.2.4b4', '1.2.4'],
),
_VersionedLibrary(
'numpy',
'http://numpy.scipy.org/',
'A general-purpose library for array-processing.',
['1.6.1'],
latest_version='1.6.1',
),
_VersionedLibrary(
'PIL',
'http://www.pythonware.com/library/pil/handbook/',
'A library for creating and transforming images.',
['1.1.7'],
latest_version='1.1.7',
),
_VersionedLibrary(
'protorpc',
'https://code.google.com/p/google-protorpc/',
'A framework for implementing HTTP-based remote procedure call (RPC) '
'services.',
['1.0'],
latest_version='1.0',
default_version='1.0',
),
_VersionedLibrary(
'pytz',
'https://pypi.python.org/pypi/pytz?',
'A library for cross-platform timezone calculations',
['2016.4', '2017.2', '2017.3'],
latest_version='2017.3',
default_version='2017.3',
deprecated_versions=['2016.4', '2017.2'],
),
_VersionedLibrary(
'crcmod',
'http://crcmod.sourceforge.net/',
'A library for generating Cyclic Redundancy Checks (CRC).',
['1.7'],
latest_version='1.7',
),
_VersionedLibrary(
'protobuf',
'https://developers.google.com/protocol-buffers/',
'A library for serializing structured data',
['3.0.0'],
latest_version='3.0.0',
experimental_versions=['3.0.0'],
),
_VersionedLibrary(
'PyAMF',
'https://pypi.python.org/pypi/PyAMF',
'A library that provides (AMF) Action Message Format functionality.',
['0.6.1', '0.7.2'],
latest_version='0.6.1',
experimental_versions=['0.7.2'],
),
_VersionedLibrary(
'pycrypto',
'https://www.dlitz.net/software/pycrypto/',
'A library of cryptogoogle.appengine._internal.graphy functions such as random number generation.',
['2.3', '2.6', '2.6.1'],
latest_version='2.6',
deprecated_versions=['2.3'],
),
_VersionedLibrary(
'setuptools',
'http://pypi.python.org/pypi/setuptools',
'A library that provides package and module discovery capabilities.',
['0.6c11', '36.6.0'],
latest_version='36.6.0',
deprecated_versions=['0.6c11'],
),
_VersionedLibrary(
'six',
'https://pypi.python.org/pypi/six',
'Abstract differences between py2.x and py3',
['1.9.0'],
latest_version='1.9.0',
),
_VersionedLibrary(
'ssl',
'http://docs.python.org/dev/library/ssl.html',
'The SSL socket wrapper built-in module.',
['2.7', '2.7.11'],
latest_version='2.7.11',
deprecated_versions=['2.7']
),
_VersionedLibrary(
'ujson',
'https://pypi.python.org/pypi/ujson',
'UltraJSON is an ultra fast JSON encoder and decoder written in pure C',
['1.35'],
latest_version='1.35',
),
_VersionedLibrary(
'webapp2',
'http://webapp-improved.appspot.com/',
'A lightweight Python web framework.',
['2.3', '2.5.1', '2.5.2'],
latest_version='2.5.2',
default_version='2.3',
deprecated_versions=['2.5.1']
),
_VersionedLibrary(
'webob',
'http://www.webob.org/',
'A library that provides wrappers around the WSGI request environment.',
['1.1.1', '1.2.3'],
latest_version='1.2.3',
default_version='1.1.1',
),
_VersionedLibrary(
'werkzeug',
'http://www.werkzeug.pocoo.org/',
'A WSGI utility library.',
['0.11.10'],
latest_version='0.11.10',
default_version='0.11.10',
),
_VersionedLibrary(
'yaml',
'http://www.yaml.org/',
'A library for YAML serialization and deserialization.',
['3.10'],
latest_version='3.10',
default_version='3.10'
),
]
_NAME_TO_SUPPORTED_LIBRARY = dict((library.name, library)
for library in _SUPPORTED_LIBRARIES)
REQUIRED_LIBRARIES = {
('django', '1.11'): [('pytz', '2017.2')],
('flask', '0.12'): [('click', '6.6'), ('itsdangerous', '0.24'),
('jinja2', '2.6'), ('werkzeug', '0.11.10')],
('jinja2', '2.6'): [('markupsafe', '0.15'), ('setuptools', '0.6c11')],
('jinja2', 'latest'): [('markupsafe', 'latest'), ('setuptools', 'latest')],
('matplotlib', '1.2.0'): [('numpy', '1.6.1')],
('matplotlib', 'latest'): [('numpy', 'latest')],
('protobuf', '3.0.0'): [('six', '1.9.0')],
('protobuf', 'latest'): [('six', 'latest')],
('grpcio', '1.0.0'): [('protobuf', '3.0.0'), ('enum', '0.9.23'),
('futures', '3.0.5'), ('six', '1.9.0'),
('setuptools', '36.6.0')],
('grpcio', 'latest'): [('protobuf', 'latest'), ('enum', 'latest'),
('futures', 'latest'), ('six', 'latest'),
('setuptools', 'latest')]
}
_USE_VERSION_FORMAT = ('use one of: "%s"')
_HTTP_SEPARATOR_CHARS = frozenset('()<>@,;:\\"/[]?={} \t')
_HTTP_TOKEN_CHARS = frozenset(string.printable[:-5]) - _HTTP_SEPARATOR_CHARS
_HTTP_TOKEN_RE = re.compile('[%s]+$' % re.escape(''.join(_HTTP_TOKEN_CHARS)))
_HTTP_REQUEST_HEADERS = frozenset([
'accept',
'accept-charset',
'accept-encoding',
'accept-language',
'authorization',
'expect',
'from',
'host',
'if-match',
'if-modified-since',
'if-none-match',
'if-range',
'if-unmodified-since',
'max-forwards',
'proxy-authorization',
'range',
'referer',
'te',
'user-agent',
])
_MAX_COOKIE_LENGTH = 4096
_MAX_URL_LENGTH = 2047
_MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS = 10240
_CANNED_RUNTIMES = ('contrib-dart', 'dart', 'go', 'php', 'php55', 'php72',
'python', 'python27', 'python-compat', 'java', 'java7',
'java8', 'vm', 'custom', 'nodejs', 'ruby')
_all_runtimes = _CANNED_RUNTIMES
def GetAllRuntimes():
"""Returns the list of all valid runtimes.
This list can include third-party runtimes as well as canned runtimes.
Returns:
Tuple of strings.
"""
return _all_runtimes
class HandlerBase(validation.Validated):
"""Base class for URLMap and ApiConfigHandler."""
ATTRIBUTES = {
URL: validation.Optional(_URL_REGEX),
LOGIN: validation.Options(LOGIN_OPTIONAL,
LOGIN_REQUIRED,
LOGIN_ADMIN,
default=LOGIN_OPTIONAL),
AUTH_FAIL_ACTION: validation.Options(AUTH_FAIL_ACTION_REDIRECT,
AUTH_FAIL_ACTION_UNAUTHORIZED,
default=AUTH_FAIL_ACTION_REDIRECT),
SECURE: validation.Options(SECURE_HTTP,
SECURE_HTTPS,
SECURE_HTTP_OR_HTTPS,
SECURE_DEFAULT,
default=SECURE_DEFAULT),
HANDLER_SCRIPT: validation.Optional(_FILES_REGEX)
}
class HttpHeadersDict(validation.ValidatedDict):
"""A dict that limits keys and values to what `http_headers` allows.
`http_headers` is an static handler key; it applies to handlers with
`static_dir` or `static_files` keys. The following code is an example of how
`http_headers` is used::
handlers:
- url: /static
static_dir: static
http_headers:
X-Foo-Header: foo value
X-Bar-Header: bar value
"""
DISALLOWED_HEADERS = frozenset([
'content-encoding',
'content-length',
'date',
'server'
])
MAX_HEADER_LENGTH = 500
MAX_HEADER_VALUE_LENGTHS = {
'content-security-policy': _MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'x-content-security-policy': _MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'x-webkit-csp': _MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'content-security-policy-report-only':
_MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'set-cookie': _MAX_COOKIE_LENGTH,
'set-cookie2': _MAX_COOKIE_LENGTH,
'location': _MAX_URL_LENGTH}
MAX_LEN = 500
class KeyValidator(validation.Validator):
"""Ensures that keys in `HttpHeadersDict` are valid.
`HttpHeadersDict` contains a list of headers. An instance is used as
`HttpHeadersDict`'s `KEY_VALIDATOR`.
"""
def Validate(self, name, unused_key=None):
"""Returns an argument, or raises an exception if the argument is invalid.
HTTP header names are defined by `RFC 2616, section 4.2`_.
Args:
name: HTTP header field value.
unused_key: Unused.
Returns:
name argument, unchanged.
Raises:
appinfo_errors.InvalidHttpHeaderName: An argument cannot be used as an
HTTP header name.
.. _RFC 2616, section 4.2:
https://www.ietf.org/rfc/rfc2616.txt
"""
original_name = name
if isinstance(name, unicode):
try:
name = name.encode('ascii')
except UnicodeEncodeError:
raise appinfo_errors.InvalidHttpHeaderName(
'HTTP header values must not contain non-ASCII data')
name = name.lower()
if not _HTTP_TOKEN_RE.match(name):
raise appinfo_errors.InvalidHttpHeaderName(
'An HTTP header must be a non-empty RFC 2616 token.')
if name in _HTTP_REQUEST_HEADERS:
raise appinfo_errors.InvalidHttpHeaderName(
'%r can only be used in HTTP requests, not responses.'
% original_name)
if name.startswith('x-appengine'):
raise appinfo_errors.InvalidHttpHeaderName(
'HTTP header names that begin with X-Appengine are reserved.')
if wsgiref.util.is_hop_by_hop(name):
raise appinfo_errors.InvalidHttpHeaderName(
'Only use end-to-end headers may be used. See RFC 2616 section'
' 13.5.1.')
if name in HttpHeadersDict.DISALLOWED_HEADERS:
raise appinfo_errors.InvalidHttpHeaderName(
'%s is a disallowed header.' % name)
return original_name
class ValueValidator(validation.Validator):
"""Ensures that values in `HttpHeadersDict` are valid.
An instance is used as `HttpHeadersDict`'s `VALUE_VALIDATOR`.
"""
def Validate(self, value, key=None):
"""Returns a value, or raises an exception if the value is invalid.
According to `RFC 2616 section 4.2`_ header field values must consist "of
either *TEXT or combinations of token, separators, and quoted-string"::
TEXT = <any OCTET except CTLs, but including LWS>
Args:
value: HTTP header field value.
key: HTTP header field name.
Returns:
A value argument.
Raises:
appinfo_errors.InvalidHttpHeaderValue: An argument cannot be used as an
HTTP header value.
.. _RFC 2616, section 4.2:
https://www.ietf.org/rfc/rfc2616.txt
"""
if isinstance(value, unicode):
try:
value = value.encode('ascii')
except UnicodeEncodeError:
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header values must not contain non-ASCII data')
key = key.lower()
printable = set(string.printable[:-5])
if not all(char in printable for char in str(value)):
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header field values must consist of printable characters.')
HttpHeadersDict.ValueValidator.AssertHeaderNotTooLong(key, value)
return value
@staticmethod
def AssertHeaderNotTooLong(name, value):
header_length = len('%s: %s\r\n' % (name, value))
if header_length >= HttpHeadersDict.MAX_HEADER_LENGTH:
try:
max_len = HttpHeadersDict.MAX_HEADER_VALUE_LENGTHS[name]
except KeyError:
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header (name + value) is too long.')
if len(value) > max_len:
insert = name, len(value), max_len
raise appinfo_errors.InvalidHttpHeaderValue(
'%r header value has length %d, which exceed the maximum allowed,'
' %d.' % insert)
KEY_VALIDATOR = KeyValidator()
VALUE_VALIDATOR = ValueValidator()
def Get(self, header_name):
"""Gets a header value.
Args:
header_name: HTTP header name to look for.
Returns:
A header value that corresponds to `header_name`. If more than one such
value is in `self`, one of the values is selected arbitrarily and
returned. The selection is not deterministic.
"""
for name in self:
if name.lower() == header_name.lower():
return self[name]
def __setitem__(self, key, value):
is_addition = self.Get(key) is None
if is_addition and len(self) >= self.MAX_LEN:
raise appinfo_errors.TooManyHttpHeaders(
'Tried to add another header when the current set of HTTP headers'
' already has the maximum allowed number of headers, %d.'
% HttpHeadersDict.MAX_LEN)
super(HttpHeadersDict, self).__setitem__(key, value)
class URLMap(HandlerBase):
r"""Maps from URLs to handlers.
This class acts similar to a union type. Its purpose is to describe a mapping
between a set of URLs and their handlers. The handler type of a given instance
is determined by which `handler-id` attribute is used.
Every mapping can have one and only one handler type. Attempting to use more
than one `handler-id` attribute will cause an `UnknownHandlerType` to be
raised during validation. Failure to provide any `handler-id` attributes will
cause `MissingHandlerType` to be raised during validation.
The regular expression used by the `url` field will be used to match against
the entire URL path and query string of the request; therefore, partial maps
will not be matched. Specifying a `url`, such as `/admin`, is the same as
matching against the regular expression `^/admin$`. Don't start your matching
`url` with `^` or end them with `$`. These regular expressions won't be
accepted and will raise `ValueError`.
Attributes:
login: Specifies whether a user should be logged in to access a URL.
The default value of this argument is `optional`.
secure: Sets the restriction on the protocol that can be used to serve this
URL or handler. This value can be set to `HTTP`, `HTTPS` or `either`.
url: Specifies a regular expression that is used to fully match against the
request URLs path. See the "Special cases" section of this document to
learn more.
static_files: Specifies the handler ID attribute that maps `url` to the
appropriate file. You can specify regular expression backreferences to
the string matched to `url`.
upload: Specifies the regular expression that is used by the application
configuration program to determine which files are uploaded as blobs.
Because it is difficult to determine this information using just the
`url` and `static_files` arguments, this attribute must be included.
This attribute is required when you define a `static_files` mapping. A
matching file name must fully match against the `upload` regular
expression, similar to how `url` is matched against the request path. Do
not begin the `upload` argument with the `^` character or end it with
the `$` character.
static_dir: Specifies the handler ID that maps the provided `url` to a
sub-directory within the application directory. See "Special cases."
mime_type: When used with `static_files` and `static_dir`, this argument
specifies that the MIME type of the files that are served from those
directories must be overridden with this value.
script: Specifies the handler ID that maps URLs to a script handler within
the application directory that will run using CGI.
position: Used in `AppInclude` objects to specify whether a handler should
be inserted at the beginning of the primary handler list or at the end.
If `tail` is specified, the handler is inserted at the end; otherwise,
the handler is inserted at the beginning. This behavior implies that
`head` is the effective default.
expiration: When used with static files and directories, this argument
specifies the time delta to use for cache expiration. This argument
should use the following format: `4d 5h 30m 15s`, where each letter
signifies days, hours, minutes, and seconds, respectively. The `s` for
"seconds" can be omitted. Only one amount must be specified, though
combining multiple amounts is optional. The following list contains
examples of values that are acceptable: `10`, `1d 6h`, `1h 30m`,
`7d 7d 7d`, `5m 30`.
api_endpoint: Specifies the handler ID that identifies an endpoint as an API
endpoint. Calls that terminate here will be handled by the API serving
framework.
Special cases:
When defining a `static_dir` handler, do not use a regular expression in the
`url` attribute. Both the `url` and `static_dir` attributes are
automatically mapped to these equivalents::
<url>/(.*)
<static_dir>/\1
For example, this declaration...::
url: /images
static_dir: images_folder
...is equivalent to this `static_files` declaration::
url: /images/(.*)
static_files: images_folder/\1
upload: images_folder/(.*)
"""
ATTRIBUTES = {
HANDLER_STATIC_FILES: validation.Optional(_FILES_REGEX),
UPLOAD: validation.Optional(_FILES_REGEX),
APPLICATION_READABLE: validation.Optional(bool),
HANDLER_STATIC_DIR: validation.Optional(_FILES_REGEX),
MIME_TYPE: validation.Optional(str),
EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
REQUIRE_MATCHING_FILE: validation.Optional(bool),
HTTP_HEADERS: validation.Optional(HttpHeadersDict),
POSITION: validation.Optional(validation.Options(POSITION_HEAD,
POSITION_TAIL)),
HANDLER_API_ENDPOINT: validation.Optional(validation.Options(
(ON, ON_ALIASES),
(OFF, OFF_ALIASES))),
REDIRECT_HTTP_RESPONSE_CODE: validation.Optional(validation.Options(
'301', '302', '303', '307')),
}
ATTRIBUTES.update(HandlerBase.ATTRIBUTES)
COMMON_FIELDS = set([
URL, LOGIN, AUTH_FAIL_ACTION, SECURE, REDIRECT_HTTP_RESPONSE_CODE])
ALLOWED_FIELDS = {
HANDLER_STATIC_FILES: (MIME_TYPE, UPLOAD, EXPIRATION,
REQUIRE_MATCHING_FILE, HTTP_HEADERS,
APPLICATION_READABLE),
HANDLER_STATIC_DIR: (MIME_TYPE, EXPIRATION, REQUIRE_MATCHING_FILE,
HTTP_HEADERS, APPLICATION_READABLE),
HANDLER_SCRIPT: (POSITION),
HANDLER_API_ENDPOINT: (POSITION, SCRIPT),
}
def GetHandler(self):
"""Gets the handler for a mapping.
Returns:
The value of the handler, as determined by the handler ID attribute.
"""
return getattr(self, self.GetHandlerType())
def GetHandlerType(self):
"""Gets the handler type of a mapping.
Returns:
The handler type as determined by which handler ID attribute is set.
Raises:
UnknownHandlerType: If none of the handler ID attributes are set.
UnexpectedHandlerAttribute: If an unexpected attribute is set for the
discovered handler type.
HandlerTypeMissingAttribute: If the handler is missing a required
attribute for its handler type.
MissingHandlerAttribute: If a URL handler is missing an attribute.
"""
if getattr(self, HANDLER_API_ENDPOINT) is not None:
mapping_type = HANDLER_API_ENDPOINT
else:
for id_field in URLMap.ALLOWED_FIELDS.iterkeys():
if getattr(self, id_field) is not None:
mapping_type = id_field
break
else:
raise appinfo_errors.UnknownHandlerType(
'Unknown url handler type.\n%s' % str(self))
allowed_fields = URLMap.ALLOWED_FIELDS[mapping_type]
for attribute in self.ATTRIBUTES.iterkeys():
if (getattr(self, attribute) is not None and
not (attribute in allowed_fields or
attribute in URLMap.COMMON_FIELDS or
attribute == mapping_type)):
raise appinfo_errors.UnexpectedHandlerAttribute(
'Unexpected attribute "%s" for mapping type %s.' %
(attribute, mapping_type))
if mapping_type == HANDLER_STATIC_FILES and not self.upload:
raise appinfo_errors.MissingHandlerAttribute(
'Missing "%s" attribute for URL "%s".' % (UPLOAD, self.url))
return mapping_type
def CheckInitialized(self):
"""Adds additional checking to make sure a handler has correct fields.
In addition to normal `ValidatedCheck`, this method calls `GetHandlerType`,
which validates whether all of the handler fields are configured properly.
Raises:
UnknownHandlerType: If none of the handler ID attributes are set.
UnexpectedHandlerAttribute: If an unexpected attribute is set for the
discovered handler type.
HandlerTypeMissingAttribute: If the handler is missing a required
attribute for its handler type.
ContentTypeSpecifiedMultipleTimes: If `mime_type` is inconsistent with
`http_headers`.
"""
super(URLMap, self).CheckInitialized()
if self.GetHandlerType() in (STATIC_DIR, STATIC_FILES):
self.AssertUniqueContentType()
def AssertUniqueContentType(self):
"""Makes sure that `self.http_headers` is consistent with `self.mime_type`.
This method assumes that `self` is a static handler, either
`self.static_dir` or `self.static_files`. You cannot specify `None`.
Raises:
appinfo_errors.ContentTypeSpecifiedMultipleTimes: If `self.http_headers`
contains a `Content-Type` header, and `self.mime_type` is set. For
example, the following configuration would be rejected::
handlers:
- url: /static
static_dir: static
mime_type: text/html
http_headers:
content-type: text/html
As this example shows, a configuration will be rejected when
`http_headers` and `mime_type` specify a content type, even when they
specify the same content type.
"""
used_both_fields = self.mime_type and self.http_headers
if not used_both_fields:
return
content_type = self.http_headers.Get('Content-Type')
if content_type is not None:
raise appinfo_errors.ContentTypeSpecifiedMultipleTimes(
'http_header specified a Content-Type header of %r in a handler that'
' also specified a mime_type of %r.' % (content_type, self.mime_type))
def FixSecureDefaults(self):
"""Forces omitted `secure` handler fields to be set to 'secure: optional'.
The effect is that `handler.secure` is never equal to the nominal default.
"""
if self.secure == SECURE_DEFAULT:
self.secure = SECURE_HTTP_OR_HTTPS
def WarnReservedURLs(self):
"""Generates a warning for reserved URLs.
See the `version element documentation`_ to learn which URLs are reserved.
.. _`version element documentation`:
https://cloud.google.com/appengine/docs/python/config/appref#syntax
"""
if self.url == '/form':
logging.warning(
'The URL path "/form" is reserved and will not be matched.')
def ErrorOnPositionForAppInfo(self):
"""Raises an error if position is specified outside of AppInclude objects.
Raises:
PositionUsedInAppYamlHandler: If the `position` attribute is specified for
an `app.yaml` file instead of an `include.yaml` file.
"""
if self.position:
raise appinfo_errors.PositionUsedInAppYamlHandler(
'The position attribute was specified for this handler, but this is '
'an app.yaml file. Position attribute is only valid for '
'include.yaml files.')
class AdminConsolePage(validation.Validated):
"""Class representing the admin console page in an `AdminConsole` object."""
ATTRIBUTES = {
URL: _URL_REGEX,
NAME: _PAGE_NAME_REGEX,
}
class AdminConsole(validation.Validated):
"""Class representing an admin console directives in application info."""
ATTRIBUTES = {
PAGES: validation.Optional(validation.Repeated(AdminConsolePage)),
}
@classmethod
def Merge(cls, adminconsole_one, adminconsole_two):
"""Returns the result of merging two `AdminConsole` objects."""
if not adminconsole_one or not adminconsole_two:
return adminconsole_one or adminconsole_two
if adminconsole_one.pages:
if adminconsole_two.pages:
adminconsole_one.pages.extend(adminconsole_two.pages)
else:
adminconsole_one.pages = adminconsole_two.pages
return adminconsole_one
class ErrorHandlers(validation.Validated):
"""Class representing error handler directives in application info."""
ATTRIBUTES = {
ERROR_CODE: validation.Optional(_ERROR_CODE_REGEX),
FILE: _FILES_REGEX,
MIME_TYPE: validation.Optional(str),
}
class BuiltinHandler(validation.Validated):
"""Class representing built-in handler directives in application info.
This class permits arbitrary keys, but their values must be described by the
`validation.Options` object that is returned by `ATTRIBUTES`.
"""
class DynamicAttributes(dict):
"""Provides a dictionary object that will always claim to have a key.
This dictionary returns a fixed value for any `get` operation. The fixed
value that you pass in as a constructor parameter should be a
`validation.Validated` object.
"""
def __init__(self, return_value, **parameters):
self.__return_value = return_value
dict.__init__(self, parameters)
def __contains__(self, _):
return True
def __getitem__(self, _):
return self.__return_value
ATTRIBUTES = DynamicAttributes(
validation.Optional(validation.Options((ON, ON_ALIASES),
(OFF, OFF_ALIASES))))
def __init__(self, **attributes):
"""Ensures all BuiltinHandler objects at least use the `default` attribute.
Args:
**attributes: The attributes that you want to use.
"""
self.builtin_name = ''
super(BuiltinHandler, self).__init__(**attributes)
def __setattr__(self, key, value):
"""Allows `ATTRIBUTES.iteritems()` to return set of items that have values.
Whenever `validate` calls `iteritems()`, it is always called on
`ATTRIBUTES`, not on `__dict__`, so this override is important to ensure
that functions such as `ToYAML()` return the correct set of keys.
Args:
key: The key for the `iteritem` that you want to set.
value: The value for the `iteritem` that you want to set.
Raises:
MultipleBuiltinsSpecified: If more than one built-in is defined in a list
element.
"""
if key == 'builtin_name':
object.__setattr__(self, key, value)
elif not self.builtin_name:
self.ATTRIBUTES[key] = ''
self.builtin_name = key
super(BuiltinHandler, self).__setattr__(key, value)
else:
raise appinfo_errors.MultipleBuiltinsSpecified(
'More than one builtin defined in list element. Each new builtin '
'should be prefixed by "-".')
def __getattr__(self, key):
if key.startswith('_'):
raise AttributeError
return None
def GetUnnormalized(self, key):
try:
return super(BuiltinHandler, self).GetUnnormalized(key)
except AttributeError:
return getattr(self, key)
def ToDict(self):
"""Converts a `BuiltinHander` object to a dictionary.
Returns:
A dictionary in `{builtin_handler_name: on/off}` form
"""
return {self.builtin_name: getattr(self, self.builtin_name)}
@classmethod
def IsDefined(cls, builtins_list, builtin_name):
"""Finds if a builtin is defined in a given list of builtin handler objects.
Args:
builtins_list: A list of `BuiltinHandler` objects, typically
`yaml.builtins`.
builtin_name: The name of the built-in that you want to determine whether
it is defined.
Returns:
`True` if `builtin_name` is defined by a member of `builtins_list`; all
other results return `False`.
"""
for b in builtins_list:
if b.builtin_name == builtin_name:
return True
return False
@classmethod
def ListToTuples(cls, builtins_list):
"""Converts a list of `BuiltinHandler` objects.
Args:
builtins_list: A list of `BuildinHandler` objects to convert to tuples.
Returns:
A list of `(name, status)` that is derived from the `BuiltinHandler`
objects.
"""
return [(b.builtin_name, getattr(b, b.builtin_name)) for b in builtins_list]
@classmethod
def Validate(cls, builtins_list, runtime=None):
"""Verifies that all `BuiltinHandler` objects are valid and not repeated.
Args:
builtins_list: A list of `BuiltinHandler` objects to validate.
runtime: If you specify this argument, warnings are generated for
built-ins that have been deprecated in the given runtime.
Raises:
InvalidBuiltinFormat: If the name of a `BuiltinHandler` object cannot be
determined.
DuplicateBuiltinsSpecified: If a `BuiltinHandler` name is used more than
once in the list.
"""
seen = set()
for b in builtins_list:
if not b.builtin_name:
raise appinfo_errors.InvalidBuiltinFormat(
'Name of builtin for list object %s could not be determined.'
% b)
if b.builtin_name in seen:
raise appinfo_errors.DuplicateBuiltinsSpecified(
'Builtin %s was specified more than once in one yaml file.'
% b.builtin_name)
if b.builtin_name == 'datastore_admin' and runtime == 'python':
logging.warning(
'The datastore_admin builtin is deprecated. You can find '
'information on how to enable it through the Administrative '
'Console here: '
'http://developers.google.com/appengine/docs/adminconsole/'
'datastoreadmin.html')
elif b.builtin_name == 'mapreduce' and runtime == 'python':
logging.warning(
'The mapreduce builtin is deprecated. You can find more '
'information on how to configure and use it here: '
'http://developers.google.com/appengine/docs/python/dataprocessing/'
'overview.html')
seen.add(b.builtin_name)
class ApiConfigHandler(HandlerBase):
"""Class representing `api_config` handler directives in application info."""
ATTRIBUTES = HandlerBase.ATTRIBUTES
ATTRIBUTES.update({
URL: validation.Regex(_URL_REGEX),
HANDLER_SCRIPT: validation.Regex(_FILES_REGEX)
})
class Library(validation.Validated):
"""Class representing the configuration of a single library."""
ATTRIBUTES = {'name': validation.Type(str),
'version': validation.Type(str)}
def CheckInitialized(self):
"""Determines if the library configuration is not valid.
Raises:
appinfo_errors.InvalidLibraryName: If the specified library is not
supported.
appinfo_errors.InvalidLibraryVersion: If the specified library version is
not supported.
"""
super(Library, self).CheckInitialized()
if self.name not in _NAME_TO_SUPPORTED_LIBRARY:
raise appinfo_errors.InvalidLibraryName(
'the library "%s" is not supported' % self.name)
supported_library = _NAME_TO_SUPPORTED_LIBRARY[self.name]
if self.version == 'latest':
self.version = supported_library.latest_version
elif self.version not in supported_library.supported_versions:
raise appinfo_errors.InvalidLibraryVersion(
('%s version "%s" is not supported, ' + _USE_VERSION_FORMAT) % (
self.name,
self.version,
'", "'.join(supported_library.non_deprecated_versions)))
elif self.version in supported_library.deprecated_versions:
use_vers = '", "'.join(supported_library.non_deprecated_versions)
logging.warning(
'%s version "%s" is deprecated, ' + _USE_VERSION_FORMAT,
self.name,
self.version,
use_vers)
class CpuUtilization(validation.Validated):
"""Class representing the configuration of VM CPU utilization."""
ATTRIBUTES = {
CPU_UTILIZATION_UTILIZATION: validation.Optional(
validation.Range(1e-6, 1.0, float)),
CPU_UTILIZATION_AGGREGATION_WINDOW_LENGTH_SEC: validation.Optional(
validation.Range(1, sys.maxint)),
}
class CustomMetric(validation.Validated):
"""Class representing CustomMetrics in AppInfoExternal."""
ATTRIBUTES = {
METRIC_NAME: validation.Regex(_NON_WHITE_SPACE_REGEX),
TARGET_TYPE: validation.Regex(TARGET_TYPE_REGEX),
CUSTOM_METRIC_UTILIZATION: validation.Optional(validation.TYPE_FLOAT),
SINGLE_INSTANCE_ASSIGNMENT: validation.Optional(validation.TYPE_FLOAT),
FILTER: validation.Optional(validation.TYPE_STR),
}
def CheckInitialized(self):
"""Determines if the CustomMetric is not valid.
Raises:
appinfo_errors.TooManyAutoscalingUtilizationTargetsError: If too many
scaling targets are set.
appinfo_errors.NotEnoughAutoscalingUtilizationTargetsError: If no scaling
targets are set.
"""
super(CustomMetric, self).CheckInitialized()
if bool(self.target_utilization) and bool(self.single_instance_assignment):
raise appinfo_errors.TooManyAutoscalingUtilizationTargetsError(
("There may be only one of '%s' or '%s'." % CUSTOM_METRIC_UTILIZATION,
SINGLE_INSTANCE_ASSIGNMENT))
elif not (bool(self.target_utilization) or
bool(self.single_instance_assignment)):
raise appinfo_errors.NotEnoughAutoscalingUtilizationTargetsError(
("There must be one of '%s' or '%s'." % CUSTOM_METRIC_UTILIZATION,
SINGLE_INSTANCE_ASSIGNMENT))
class EndpointsApiService(validation.Validated):
"""Class representing EndpointsApiService in AppInfoExternal."""
ATTRIBUTES = {
ENDPOINTS_NAME:
validation.Regex(_NON_WHITE_SPACE_REGEX),
ROLLOUT_STRATEGY:
validation.Optional(
validation.Options(ROLLOUT_STRATEGY_FIXED,
ROLLOUT_STRATEGY_MANAGED)),
CONFIG_ID:
validation.Optional(_NON_WHITE_SPACE_REGEX),
TRACE_SAMPLING:
validation.Optional(validation.TYPE_BOOL),
}
def CheckInitialized(self):
"""Determines if the Endpoints API Service is not valid.
Raises:
appinfo_errors.MissingEndpointsConfigId: If the config id is missing when
the rollout strategy is unspecified or set to "fixed".
appinfo_errors.UnexpectedEndpointsConfigId: If the config id is set when
the rollout strategy is "managed".
"""
super(EndpointsApiService, self).CheckInitialized()
if (self.rollout_strategy != ROLLOUT_STRATEGY_MANAGED and
self.config_id is None):
raise appinfo_errors.MissingEndpointsConfigId(
'config_id must be specified when rollout_strategy is unspecified or'
' set to "fixed"')
elif (self.rollout_strategy == ROLLOUT_STRATEGY_MANAGED and
self.config_id is not None):
raise appinfo_errors.UnexpectedEndpointsConfigId(
'config_id is forbidden when rollout_strategy is set to "managed"')
class AutomaticScaling(validation.Validated):
"""Class representing automatic scaling settings in AppInfoExternal."""
ATTRIBUTES = {
MINIMUM_IDLE_INSTANCES:
validation.Optional(_IDLE_INSTANCES_REGEX),
MAXIMUM_IDLE_INSTANCES:
validation.Optional(_IDLE_INSTANCES_REGEX),
MINIMUM_PENDING_LATENCY:
validation.Optional(_PENDING_LATENCY_REGEX),
MAXIMUM_PENDING_LATENCY:
validation.Optional(_PENDING_LATENCY_REGEX),
MAXIMUM_CONCURRENT_REQUEST:
validation.Optional(_CONCURRENT_REQUESTS_REGEX),
MIN_NUM_INSTANCES:
validation.Optional(validation.Range(1, sys.maxint)),
MAX_NUM_INSTANCES:
validation.Optional(validation.Range(1, sys.maxint)),
COOL_DOWN_PERIOD_SEC:
validation.Optional(validation.Range(60, sys.maxint, int)),
CPU_UTILIZATION:
validation.Optional(CpuUtilization),
STANDARD_MAX_INSTANCES:
validation.Optional(validation.TYPE_INT),
STANDARD_MIN_INSTANCES:
validation.Optional(validation.TYPE_INT),
STANDARD_TARGET_CPU_UTILIZATION:
validation.Optional(validation.TYPE_FLOAT),
STANDARD_TARGET_THROUGHPUT_UTILIZATION:
validation.Optional(validation.TYPE_FLOAT),
TARGET_NETWORK_SENT_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_NETWORK_SENT_PACKETS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_NETWORK_RECEIVED_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_NETWORK_RECEIVED_PACKETS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_DISK_WRITE_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_DISK_WRITE_OPS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_DISK_READ_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_DISK_READ_OPS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_REQUEST_COUNT_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_CONCURRENT_REQUESTS:
validation.Optional(validation.Range(1, sys.maxint)),
CUSTOM_METRICS: validation.Optional(validation.Repeated(CustomMetric)),
}
class ManualScaling(validation.Validated):
"""Class representing manual scaling settings in AppInfoExternal."""
ATTRIBUTES = {
INSTANCES: validation.Regex(_INSTANCES_REGEX),
}
class BasicScaling(validation.Validated):
"""Class representing basic scaling settings in AppInfoExternal."""
ATTRIBUTES = {
MAX_INSTANCES: validation.Regex(_INSTANCES_REGEX),
IDLE_TIMEOUT: validation.Optional(_IDLE_TIMEOUT_REGEX),
}
class RuntimeConfig(validation.ValidatedDict):
"""Class for "vanilla" runtime configuration.
Fields used vary by runtime, so validation is delegated to the per-runtime
build processes.
These are intended to be used during Dockerfile generation, not after VM boot.
"""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
class VmSettings(validation.ValidatedDict):
"""Class for VM settings.
The settings are not further validated here. The settings are validated on
the server side.
"""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
@classmethod
def Merge(cls, vm_settings_one, vm_settings_two):
"""Merges two `VmSettings` instances.
If a variable is specified by both instances, the value from
`vm_settings_one` is used.
Args:
vm_settings_one: The first `VmSettings` instance, or `None`.
vm_settings_two: The second `VmSettings` instance, or `None`.
Returns:
The merged `VmSettings` instance, or `None` if both input instances are
`None` or empty.
"""
result_vm_settings = (vm_settings_two or {}).copy()
result_vm_settings.update(vm_settings_one or {})
return VmSettings(**result_vm_settings) if result_vm_settings else None
class BetaSettings(VmSettings):
"""Class for Beta (internal or unreleased) settings.
This class is meant to replace `VmSettings` eventually.
Note:
All new beta settings must be registered in `shared_constants.py`.
These settings are not validated further here. The settings are validated on
the server side.
"""
@classmethod
def Merge(cls, beta_settings_one, beta_settings_two):
"""Merges two `BetaSettings` instances.
Args:
beta_settings_one: The first `BetaSettings` instance, or `None`.
beta_settings_two: The second `BetaSettings` instance, or `None`.
Returns:
The merged `BetaSettings` instance, or `None` if both input instances are
`None` or empty.
"""
merged = VmSettings.Merge(beta_settings_one, beta_settings_two)
return BetaSettings(**merged.ToDict()) if merged else None
class EnvironmentVariables(validation.ValidatedDict):
"""Class representing a mapping of environment variable key/value pairs."""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
@classmethod
def Merge(cls, env_variables_one, env_variables_two):
"""Merges two `EnvironmentVariables` instances.
If a variable is specified by both instances, the value from
`env_variables_two` is used.
Args:
env_variables_one: The first `EnvironmentVariables` instance or `None`.
env_variables_two: The second `EnvironmentVariables` instance or `None`.
Returns:
The merged `EnvironmentVariables` instance, or `None` if both input
instances are `None` or empty.
"""
result_env_variables = (env_variables_one or {}).copy()
result_env_variables.update(env_variables_two or {})
return (EnvironmentVariables(**result_env_variables)
if result_env_variables else None)
def ValidateSourceReference(ref):
"""Determines if a source reference is valid.
Args:
ref: A source reference in the following format:
`[repository_uri#]revision`.
Raises:
ValidationError: If the reference is malformed.
"""
repo_revision = ref.split('#', 1)
revision_id = repo_revision[-1]
if not re.match(SOURCE_REVISION_RE_STRING, revision_id):
raise validation.ValidationError('Bad revision identifier: %s' %
revision_id)
if len(repo_revision) == 2:
uri = repo_revision[0]
if not re.match(SOURCE_REPO_RE_STRING, uri):
raise validation.ValidationError('Bad repository URI: %s' % uri)
def ValidateCombinedSourceReferencesString(source_refs):
"""Determines if `source_refs` contains a valid list of source references.
Args:
source_refs: A multi-line string containing one source reference per line.
Raises:
ValidationError: If the reference is malformed.
"""
if len(source_refs) > SOURCE_REFERENCES_MAX_SIZE:
raise validation.ValidationError(
'Total source reference(s) size exceeds the limit: %d > %d' % (
len(source_refs), SOURCE_REFERENCES_MAX_SIZE))
for ref in source_refs.splitlines():
ValidateSourceReference(ref.strip())
class HealthCheck(validation.Validated):
"""Class representing the health check configuration."""
ATTRIBUTES = {
ENABLE_HEALTH_CHECK: validation.Optional(validation.TYPE_BOOL),
CHECK_INTERVAL_SEC: validation.Optional(validation.Range(0, sys.maxint)),
TIMEOUT_SEC: validation.Optional(validation.Range(0, sys.maxint)),
UNHEALTHY_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
HEALTHY_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
RESTART_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
HOST: validation.Optional(validation.TYPE_STR)}
class LivenessCheck(validation.Validated):
"""Class representing the liveness check configuration."""
ATTRIBUTES = {
CHECK_INTERVAL_SEC: validation.Optional(validation.Range(0, sys.maxint)),
TIMEOUT_SEC: validation.Optional(validation.Range(0, sys.maxint)),
FAILURE_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
SUCCESS_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
INITIAL_DELAY_SEC: validation.Optional(validation.Range(0, sys.maxint)),
PATH: validation.Optional(validation.TYPE_STR),
HOST: validation.Optional(validation.TYPE_STR)}
class ReadinessCheck(validation.Validated):
"""Class representing the readiness check configuration."""
ATTRIBUTES = {
CHECK_INTERVAL_SEC: validation.Optional(validation.Range(0, sys.maxint)),
TIMEOUT_SEC: validation.Optional(validation.Range(0, sys.maxint)),
APP_START_TIMEOUT_SEC: validation.Optional(
validation.Range(0, sys.maxint)),
FAILURE_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
SUCCESS_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
PATH: validation.Optional(validation.TYPE_STR),
HOST: validation.Optional(validation.TYPE_STR)}
class VmHealthCheck(HealthCheck):
"""Class representing the configuration of the VM health check.
Note:
This class is deprecated and will be removed in a future release. Use
`HealthCheck` instead.
"""
pass
class Volume(validation.Validated):
"""Class representing the configuration of a volume."""
ATTRIBUTES = {
VOLUME_NAME: validation.TYPE_STR,
SIZE_GB: validation.TYPE_FLOAT,
VOLUME_TYPE: validation.TYPE_STR,
}
class Resources(validation.Validated):
"""Class representing the configuration of VM resources."""
ATTRIBUTES = {
CPU: validation.Optional(validation.TYPE_FLOAT),
MEMORY_GB: validation.Optional(validation.TYPE_FLOAT),
DISK_SIZE_GB: validation.Optional(validation.TYPE_INT),
VOLUMES: validation.Optional(validation.Repeated(Volume))
}
class Network(validation.Validated):
"""Class representing the VM network configuration."""
ATTRIBUTES = {
FORWARDED_PORTS: validation.Optional(validation.Repeated(validation.Regex(
'[0-9]+(:[0-9]+)?(/(udp|tcp))?'))),
INSTANCE_TAG: validation.Optional(validation.Regex(
GCE_RESOURCE_NAME_REGEX)),
NETWORK_NAME: validation.Optional(validation.Regex(
GCE_RESOURCE_NAME_REGEX)),
SUBNETWORK_NAME: validation.Optional(validation.Regex(
GCE_RESOURCE_NAME_REGEX)),
SESSION_AFFINITY:
validation.Optional(bool)
}
class AppInclude(validation.Validated):
"""Class representing the contents of an included `app.yaml` file.
This class is used for both `builtins` and `includes` directives.
"""
ATTRIBUTES = {
BUILTINS: validation.Optional(validation.Repeated(BuiltinHandler)),
INCLUDES: validation.Optional(validation.Type(list)),
HANDLERS: validation.Optional(validation.Repeated(URLMap), default=[]),
ADMIN_CONSOLE: validation.Optional(AdminConsole),
MANUAL_SCALING: validation.Optional(ManualScaling),
VM: validation.Optional(bool),
VM_SETTINGS: validation.Optional(VmSettings),
BETA_SETTINGS: validation.Optional(BetaSettings),
ENV_VARIABLES: validation.Optional(EnvironmentVariables),
SKIP_FILES: validation.RegexStr(default=SKIP_NO_FILES),
}
@classmethod
def MergeManualScaling(cls, appinclude_one, appinclude_two):
"""Takes the greater of `<manual_scaling.instances>` from the arguments.
`appinclude_one` is mutated to be the merged result in this process.
Also, this function must be updated if `ManualScaling` gets additional
fields.
Args:
appinclude_one: The first object to merge. The object must have a
`manual_scaling` field that contains a `ManualScaling()`.
appinclude_two: The second object to merge. The object must have a
`manual_scaling` field that contains a `ManualScaling()`.
Returns:
An object that is the result of merging
`appinclude_one.manual_scaling.instances` and
`appinclude_two.manual_scaling.instances`; this is returned as a revised
`appinclude_one` object after the mutations are complete.
"""
def _Instances(appinclude):
"""Determines the number of `manual_scaling.instances` sets.
Args:
appinclude: The include for which you want to determine the number of
`manual_scaling.instances` sets.
Returns:
The number of instances as an integer, or `None`.
"""
if appinclude.manual_scaling:
if appinclude.manual_scaling.instances:
return int(appinclude.manual_scaling.instances)
return None
instances = max(_Instances(appinclude_one), _Instances(appinclude_two))
if instances is not None:
appinclude_one.manual_scaling = ManualScaling(instances=str(instances))
return appinclude_one
@classmethod
def _CommonMergeOps(cls, one, two):
"""This function performs common merge operations.
Args:
one: The first object that you want to merge.
two: The second object that you want to merge.
Returns:
An updated `one` object containing all merged data.
"""
AppInclude.MergeManualScaling(one, two)
one.admin_console = AdminConsole.Merge(one.admin_console,
two.admin_console)
one.vm = two.vm or one.vm
one.vm_settings = VmSettings.Merge(one.vm_settings,
two.vm_settings)
if hasattr(one, 'beta_settings'):
one.beta_settings = BetaSettings.Merge(one.beta_settings,
two.beta_settings)
one.env_variables = EnvironmentVariables.Merge(one.env_variables,
two.env_variables)
one.skip_files = cls.MergeSkipFiles(one.skip_files, two.skip_files)
return one
@classmethod
def MergeAppYamlAppInclude(cls, appyaml, appinclude):
"""Merges an `app.yaml` file with referenced builtins/includes.
Args:
appyaml: The `app.yaml` file that you want to update with `appinclude`.
appinclude: The includes that you want to merge into `appyaml`.
Returns:
An updated `app.yaml` file that includes the directives you specified in
`appinclude`.
"""
if not appinclude:
return appyaml
if appinclude.handlers:
tail = appyaml.handlers or []
appyaml.handlers = []
for h in appinclude.handlers:
if not h.position or h.position == 'head':
appyaml.handlers.append(h)
else:
tail.append(h)
h.position = None
appyaml.handlers.extend(tail)
appyaml = cls._CommonMergeOps(appyaml, appinclude)
appyaml.NormalizeVmSettings()
return appyaml
@classmethod
def MergeAppIncludes(cls, appinclude_one, appinclude_two):
"""Merges the non-referential state of the provided `AppInclude`.
That is, `builtins` and `includes` directives are not preserved, but any
static objects are copied into an aggregate `AppInclude` object that
preserves the directives of both provided `AppInclude` objects.
`appinclude_one` is updated to be the merged result in this process.
Args:
appinclude_one: First `AppInclude` to merge.
appinclude_two: Second `AppInclude` to merge.
Returns:
`AppInclude` object that is the result of merging the static directives of
`appinclude_one` and `appinclude_two`. An updated version of
`appinclude_one` is returned.
"""
if not appinclude_one or not appinclude_two:
return appinclude_one or appinclude_two
if appinclude_one.handlers:
if appinclude_two.handlers:
appinclude_one.handlers.extend(appinclude_two.handlers)
else:
appinclude_one.handlers = appinclude_two.handlers
return cls._CommonMergeOps(appinclude_one, appinclude_two)
@staticmethod
def MergeSkipFiles(skip_files_one, skip_files_two):
"""Merges two `skip_files` directives.
Args:
skip_files_one: The first `skip_files` element that you want to merge.
skip_files_two: The second `skip_files` element that you want to merge.
Returns:
A list of regular expressions that are merged.
"""
if skip_files_one == SKIP_NO_FILES:
return skip_files_two
if skip_files_two == SKIP_NO_FILES:
return skip_files_one
return validation.RegexStr().Validate(
[skip_files_one, skip_files_two], SKIP_FILES)
class AppInfoExternal(validation.Validated):
"""Class representing users application info.
This class is passed to a `yaml_object` builder to provide the validation
for the application information file format parser.
Attributes:
application: Unique identifier for application.
version: Application's major version.
runtime: Runtime used by application.
api_version: Which version of APIs to use.
source_language: Optional specification of the source language. For example,
you could specify `php-quercus` if this is a Java app that was generated
from PHP source using Quercus.
handlers: List of URL handlers.
default_expiration: Default time delta to use for cache expiration for
all static files, unless they have their own specific `expiration` set.
See the documentation for the `URLMap.expiration` field for more
information.
skip_files: A regular expression object. Files that match this regular
expression will not be uploaded by `appcfg.py`. For example::
skip_files: |
.svn.*|
#.*#
nobuild_files: A regular expression object. Files that match this regular
expression will not be built into the app. This directive is valid for
Go only.
api_config: URL root and script or servlet path for enhanced API serving.
"""
ATTRIBUTES = {
APPLICATION: validation.Optional(APPLICATION_RE_STRING),
PROJECT: validation.Optional(APPLICATION_RE_STRING),
SERVICE: validation.Preferred(MODULE,
validation.Optional(MODULE_ID_RE_STRING)),
MODULE: validation.Deprecated(SERVICE,
validation.Optional(MODULE_ID_RE_STRING)),
VERSION: validation.Optional(MODULE_VERSION_ID_RE_STRING),
RUNTIME: validation.Optional(RUNTIME_RE_STRING),
RUNTIME_CHANNEL: validation.Optional(validation.Type(str)),
API_VERSION: validation.Optional(API_VERSION_RE_STRING),
ENV: validation.Optional(ENV_RE_STRING),
ENDPOINTS_API_SERVICE: validation.Optional(EndpointsApiService),
ENTRYPOINT: validation.Optional(
validation.Exec() if hasattr(
validation, 'Exec') else validation.Type(str)),
RUNTIME_CONFIG: validation.Optional(RuntimeConfig),
INSTANCE_CLASS: validation.Optional(validation.Type(str)),
SOURCE_LANGUAGE: validation.Optional(
validation.Regex(SOURCE_LANGUAGE_RE_STRING)),
AUTOMATIC_SCALING: validation.Optional(AutomaticScaling),
MANUAL_SCALING: validation.Optional(ManualScaling),
BASIC_SCALING: validation.Optional(BasicScaling),
VM: validation.Optional(bool),
VM_SETTINGS: validation.Optional(VmSettings),
BETA_SETTINGS: validation.Optional(BetaSettings),
VM_HEALTH_CHECK: validation.Optional(VmHealthCheck),
HEALTH_CHECK: validation.Optional(HealthCheck),
RESOURCES: validation.Optional(Resources),
LIVENESS_CHECK: validation.Optional(LivenessCheck),
READINESS_CHECK: validation.Optional(ReadinessCheck),
NETWORK: validation.Optional(Network),
ZONES: validation.Optional(validation.Repeated(validation.TYPE_STR)),
BUILTINS: validation.Optional(validation.Repeated(BuiltinHandler)),
INCLUDES: validation.Optional(validation.Type(list)),
HANDLERS: validation.Optional(validation.Repeated(URLMap), default=[]),
LIBRARIES: validation.Optional(validation.Repeated(Library)),
SERVICES: validation.Optional(validation.Repeated(
validation.Regex(_SERVICE_RE_STRING))),
DEFAULT_EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
SKIP_FILES: validation.RegexStr(default=DEFAULT_SKIP_FILES),
NOBUILD_FILES: validation.RegexStr(default=DEFAULT_NOBUILD_FILES),
DERIVED_FILE_TYPE: validation.Optional(validation.Repeated(
validation.Options(JAVA_PRECOMPILED, PYTHON_PRECOMPILED))),
ADMIN_CONSOLE: validation.Optional(AdminConsole),
ERROR_HANDLERS: validation.Optional(validation.Repeated(ErrorHandlers)),
BACKENDS: validation.Optional(validation.Repeated(
backendinfo.BackendEntry)),
THREADSAFE: validation.Optional(bool),
DATASTORE_AUTO_ID_POLICY: validation.Optional(
validation.Options(DATASTORE_ID_POLICY_LEGACY,
DATASTORE_ID_POLICY_DEFAULT)),
API_CONFIG: validation.Optional(ApiConfigHandler),
CODE_LOCK: validation.Optional(bool),
ENV_VARIABLES: validation.Optional(EnvironmentVariables),
STANDARD_WEBSOCKET: validation.Optional(bool),
}
def CheckInitialized(self):
"""Performs non-regular expression-based validation.
The following are verified:
- At least one URL mapping is provided in the URL mappers.
- The number of URL mappers doesn't exceed `MAX_URL_MAPS`.
- The major version does not contain the string `-dot-`.
- If `api_endpoints` are defined, an `api_config` stanza must be
defined.
- If the `runtime` is `python27` and `threadsafe` is set, then no CGI
handlers can be used.
- The version name doesn't start with `BUILTIN_NAME_PREFIX`.
- If `redirect_http_response_code` exists, it is in the list of valid
300s.
- Module and service aren't both set. Services were formerly known as
modules.
Raises:
DuplicateLibrary: If `library_name` is specified more than once.
MissingURLMapping: If no `URLMap` object is present in the object.
TooManyURLMappings: If there are too many `URLMap` entries.
MissingApiConfig: If `api_endpoints` exists without an `api_config`.
MissingThreadsafe: If `threadsafe` is not set but the runtime requires it.
ThreadsafeWithCgiHandler: If the `runtime` is `python27`, `threadsafe` is
set and CGI handlers are specified.
TooManyScalingSettingsError: If more than one scaling settings block is
present.
RuntimeDoesNotSupportLibraries: If the libraries clause is used for a
runtime that does not support it, such as `python25`.
"""
super(AppInfoExternal, self).CheckInitialized()
if self.runtime is None and not self.IsVm():
raise appinfo_errors.MissingRuntimeError(
'You must specify a "runtime" field for non-vm applications.')
elif self.runtime is None:
self.runtime = 'custom'
if self.handlers and len(self.handlers) > MAX_URL_MAPS:
raise appinfo_errors.TooManyURLMappings(
'Found more than %d URLMap entries in application configuration' %
MAX_URL_MAPS)
vm_runtime_python27 = (
self.runtime == 'vm' and
(hasattr(self, 'vm_settings') and
self.vm_settings and
self.vm_settings.get('vm_runtime') == 'python27') or
(hasattr(self, 'beta_settings') and
self.beta_settings and
self.beta_settings.get('vm_runtime') == 'python27'))
if (self.threadsafe is None and
(self.runtime == 'python27' or vm_runtime_python27)):
raise appinfo_errors.MissingThreadsafe(
'threadsafe must be present and set to a true or false YAML value')
if self.auto_id_policy == DATASTORE_ID_POLICY_LEGACY:
datastore_auto_ids_url = ('http://developers.google.com/'
'appengine/docs/python/datastore/'
'entities#Kinds_and_Identifiers')
appcfg_auto_ids_url = ('http://developers.google.com/appengine/docs/'
'python/config/appconfig#auto_id_policy')
logging.warning(
"You have set the datastore auto_id_policy to 'legacy'. It is "
"recommended that you select 'default' instead.\n"
"Legacy auto ids are deprecated. You can continue to allocate\n"
"legacy ids manually using the allocate_ids() API functions.\n"
"For more information see:\n"
+ datastore_auto_ids_url + '\n' + appcfg_auto_ids_url + '\n')
if (hasattr(self, 'beta_settings') and self.beta_settings
and self.beta_settings.get('source_reference')):
ValidateCombinedSourceReferencesString(
self.beta_settings.get('source_reference'))
if self.libraries:
if not (vm_runtime_python27 or self.runtime == 'python27'):
raise appinfo_errors.RuntimeDoesNotSupportLibraries(
'libraries entries are only supported by the "python27" runtime')
library_names = [library.name for library in self.libraries]
for library_name in library_names:
if library_names.count(library_name) > 1:
raise appinfo_errors.DuplicateLibrary(
'Duplicate library entry for %s' % library_name)
if self.version and self.version.find(ALTERNATE_HOSTNAME_SEPARATOR) != -1:
raise validation.ValidationError(
'Version "%s" cannot contain the string "%s"' % (
self.version, ALTERNATE_HOSTNAME_SEPARATOR))
if self.version and self.version.startswith(BUILTIN_NAME_PREFIX):
raise validation.ValidationError(
('Version "%s" cannot start with "%s" because it is a '
'reserved version name prefix.') % (self.version,
BUILTIN_NAME_PREFIX))
if self.handlers:
api_endpoints = [handler.url for handler in self.handlers
if handler.GetHandlerType() == HANDLER_API_ENDPOINT]
if api_endpoints and not self.api_config:
raise appinfo_errors.MissingApiConfig(
'An api_endpoint handler was specified, but the required '
'api_config stanza was not configured.')
if self.threadsafe and self.runtime == 'python27':
for handler in self.handlers:
if (handler.script and (handler.script.endswith('.py') or
'/' in handler.script)):
raise appinfo_errors.ThreadsafeWithCgiHandler(
'threadsafe cannot be enabled with CGI handler: %s' %
handler.script)
if sum([bool(self.automatic_scaling),
bool(self.manual_scaling),
bool(self.basic_scaling)]) > 1:
raise appinfo_errors.TooManyScalingSettingsError(
"There may be only one of 'automatic_scaling', 'manual_scaling', "
"or 'basic_scaling'.")
def GetAllLibraries(self):
"""Returns a list of all `Library` instances active for this configuration.
Returns:
The list of active `Library` instances for this configuration. This
includes directly-specified libraries as well as any required
dependencies.
"""
if not self.libraries:
return []
library_names = set(library.name for library in self.libraries)
required_libraries = []
for library in self.libraries:
for required_name, required_version in REQUIRED_LIBRARIES.get(
(library.name, library.version), []):
if required_name not in library_names:
required_libraries.append(Library(name=required_name,
version=required_version))
return [Library(**library.ToDict())
for library in self.libraries + required_libraries]
def GetNormalizedLibraries(self):
"""Returns a list of normalized `Library` instances for this configuration.
Returns:
The list of active `Library` instances for this configuration. This
includes directly-specified libraries, their required dependencies, and
any libraries enabled by default. Any libraries with `latest` as their
version will be replaced with the latest available version.
"""
libraries = self.GetAllLibraries()
enabled_libraries = set(library.name for library in libraries)
for library in _SUPPORTED_LIBRARIES:
if library.default_version and library.name not in enabled_libraries:
libraries.append(Library(name=library.name,
version=library.default_version))
return libraries
def ApplyBackendSettings(self, backend_name):
"""Applies settings from the indicated backend to the `AppInfoExternal`.
Backend entries can contain directives that modify other parts of the
`app.yaml` file, such as the `start` directive, which adds a handler for the
start request. This method performs those modifications.
Args:
backend_name: The name of a backend that is defined in the `backends`
directive.
Raises:
BackendNotFound: If the indicated backend was not listed in the
`backends` directive.
DuplicateBackend: If the backend is found more than once in the `backends`
directive.
"""
if backend_name is None:
return
if self.backends is None:
raise appinfo_errors.BackendNotFound
self.version = backend_name
match = None
for backend in self.backends:
if backend.name != backend_name:
continue
if match:
raise appinfo_errors.DuplicateBackend
else:
match = backend
if match is None:
raise appinfo_errors.BackendNotFound
if match.start is None:
return
start_handler = URLMap(url=_START_PATH, script=match.start)
self.handlers.insert(0, start_handler)
def GetEffectiveRuntime(self):
"""Returns the app's runtime, resolving VMs to the underlying `vm_runtime`.
Returns:
The effective runtime: The value of `beta/vm_settings.vm_runtime` if
`runtime` is `vm`, or `runtime` otherwise.
"""
if (self.runtime == 'vm' and hasattr(self, 'vm_settings')
and self.vm_settings is not None):
return self.vm_settings.get('vm_runtime')
if (self.runtime == 'vm' and hasattr(self, 'beta_settings')
and self.beta_settings is not None):
return self.beta_settings.get('vm_runtime')
return self.runtime
def SetEffectiveRuntime(self, runtime):
"""Sets the runtime while respecting vm runtimes rules for runtime settings.
Args:
runtime: The runtime to use.
"""
if self.IsVm():
if not self.vm_settings:
self.vm_settings = VmSettings()
self.vm_settings['vm_runtime'] = runtime
self.runtime = 'vm'
else:
self.runtime = runtime
def NormalizeVmSettings(self):
"""Normalizes VM settings."""
if self.IsVm():
if not self.vm_settings:
self.vm_settings = VmSettings()
if 'vm_runtime' not in self.vm_settings:
self.SetEffectiveRuntime(self.runtime)
if hasattr(self, 'beta_settings') and self.beta_settings:
for field in ['vm_runtime',
'has_docker_image',
'image',
'module_yaml_path']:
if field not in self.beta_settings and field in self.vm_settings:
self.beta_settings[field] = self.vm_settings[field]
def IsVm(self):
return (self.vm or
self.env in ['2', 'flex', 'flexible'])
def ValidateHandlers(handlers, is_include_file=False):
"""Validates a list of handler (`URLMap`) objects.
Args:
handlers: A list of a handler (`URLMap`) objects.
is_include_file: If this argument is set to `True`, the handlers that are
added as part of the `includes` directive are validated.
"""
if not handlers:
return
for handler in handlers:
handler.FixSecureDefaults()
handler.WarnReservedURLs()
if not is_include_file:
handler.ErrorOnPositionForAppInfo()
def LoadSingleAppInfo(app_info):
"""Loads a single `AppInfo` object where one and only one is expected.
This method validates that the values in the `AppInfo` match the
validators that are defined in this file, in particular,
`AppInfoExternal.ATTRIBUTES`.
Args:
app_info: A file-like object or string. If the argument is a string, the
argument is parsed as a configuration file. If the argument is a
file-like object, the data is read, then parsed.
Returns:
An instance of `AppInfoExternal` as loaded from a YAML file.
Raises:
ValueError: If a specified service is not valid.
EmptyConfigurationFile: If there are no documents in YAML file.
MultipleConfigurationFile: If more than one document exists in the YAML
file.
DuplicateBackend: If a backend is found more than once in the `backends`
directive.
yaml_errors.EventError: If the `app.yaml` file fails validation.
appinfo_errors.MultipleProjectNames: If the `app.yaml` file has both an
`application` directive and a `project` directive.
"""
builder = yaml_object.ObjectBuilder(AppInfoExternal)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(app_info)
app_infos = handler.GetResults()
if len(app_infos) < 1:
raise appinfo_errors.EmptyConfigurationFile()
if len(app_infos) > 1:
raise appinfo_errors.MultipleConfigurationFile()
appyaml = app_infos[0]
ValidateHandlers(appyaml.handlers)
if appyaml.builtins:
BuiltinHandler.Validate(appyaml.builtins, appyaml.runtime)
if appyaml.application and appyaml.project:
raise appinfo_errors.MultipleProjectNames(
'Specify one of "application: name" or "project: name"')
elif appyaml.project:
appyaml.application = appyaml.project
appyaml.project = None
appyaml.NormalizeVmSettings()
return appyaml
class AppInfoSummary(validation.Validated):
"""This class contains only basic summary information about an app.
This class is used to pass back information about the newly created app to
users after a new version has been created.
"""
ATTRIBUTES = {
APPLICATION: APPLICATION_RE_STRING,
MAJOR_VERSION: MODULE_VERSION_ID_RE_STRING,
MINOR_VERSION: validation.TYPE_LONG
}
def LoadAppInclude(app_include):
"""Loads a single `AppInclude` object where one and only one is expected.
Args:
app_include: A file-like object or string. The argument is set to a string,
the argument is parsed as a configuration file. If the argument is set
to a file-like object, the data is read and parsed.
Returns:
An instance of `AppInclude` as loaded from a YAML file.
Raises:
EmptyConfigurationFile: If there are no documents in the YAML file.
MultipleConfigurationFile: If there is more than one document in the YAML
file.
"""
builder = yaml_object.ObjectBuilder(AppInclude)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(app_include)
includes = handler.GetResults()
if len(includes) < 1:
raise appinfo_errors.EmptyConfigurationFile()
if len(includes) > 1:
raise appinfo_errors.MultipleConfigurationFile()
includeyaml = includes[0]
if includeyaml.handlers:
for handler in includeyaml.handlers:
handler.FixSecureDefaults()
handler.WarnReservedURLs()
if includeyaml.builtins:
BuiltinHandler.Validate(includeyaml.builtins)
return includeyaml
def ParseExpiration(expiration):
"""Parses an expiration delta string.
Args:
expiration: String that matches `_DELTA_REGEX`.
Returns:
Time delta in seconds.
"""
delta = 0
for match in re.finditer(_DELTA_REGEX, expiration):
amount = int(match.group(1))
units = _EXPIRATION_CONVERSIONS.get(match.group(2).lower(), 1)
delta += amount * units
return delta
_file_path_negative_1_re = re.compile(r'\.\.|^\./|\.$|/\./|^-|^_ah/|^/')
_file_path_negative_2_re = re.compile(r'//|/$')
_file_path_negative_3_re = re.compile(r'^ | $|/ | /|\r|\n')
def ValidFilename(filename):
"""Determines if a file name is valid.
Args:
filename: The file name to validate. The file name must be a valid file
name:
- It must only contain letters, numbers, and the following special
characters: `@`, `_`, `+`, `/` `$`, `.`, `-`, or '~'.
- It must be less than 256 characters.
- It must not contain `/./`, `/../`, or `//`.
- It must not end in `/`.
- All spaces must be in the middle of a directory or file name.
Returns:
An error string if the file name is invalid. `''` is returned if the file
name is valid.
"""
if not filename:
return 'Filename cannot be empty'
if len(filename) > 1024:
return 'Filename cannot exceed 1024 characters: %s' % filename
if _file_path_negative_1_re.search(filename) is not None:
return ('Filename cannot contain "." or ".." '
'or start with "-" or "_ah/": %s' %
filename)
if _file_path_negative_2_re.search(filename) is not None:
return 'Filename cannot have trailing / or contain //: %s' % filename
if _file_path_negative_3_re.search(filename) is not None:
return 'Any spaces must be in the middle of a filename: %s' % filename
return ''
| [] | [] | [
"APPENGINE_RUNTIME"
] | [] | ["APPENGINE_RUNTIME"] | python | 1 | 0 | |
repairTools/objectScanner/objectScanner.go | package main
import (
"ObjectStorage/apiServer/object"
"ObjectStorage/src/lib/es"
"ObjectStorage/src/lib/utils"
"log"
"os"
"path/filepath"
"strings"
)
func main() {
files, _ := filepath.Glob(os.Getenv("STORAGE_ROOT") + "/objects/*")
for i := range files {
hash := strings.Split(filepath.Base(files[i]), ".")[0]
verify(hash)
}
}
func verify(hash string) {
log.Println("verify", hash)
size, e := es.SearchHashSize(hash)
if e != nil {
log.Println(e)
return
}
stream, e := object.GetStream(hash, size)
if e != nil {
log.Println(e)
return
}
d := utils.CalculateHash(stream)
if d != hash {
log.Printf("object hash mismatch, calculated=%s, requested=%s", d, hash)
}
stream.Close()
}
| [
"\"STORAGE_ROOT\""
] | [] | [
"STORAGE_ROOT"
] | [] | ["STORAGE_ROOT"] | go | 1 | 0 | |
python/pub-sub-bot/bot.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Hangouts Chat bot that listens for messages via Cloud Pub/Sub.
"""
# [START pub-sub-bot]
import json
import logging
import os
import sys
import time
from google.cloud import pubsub_v1
from googleapiclient.discovery import build
import google.auth
def receive_messages():
"""Receives messages from a pull subscription."""
scopes = ['https://www.googleapis.com/auth/chat.bot']
credentials, project_id = google.auth.default()
credentials = credentials.with_scopes(scopes=scopes)
chat = build('chat', 'v1', credentials=credentials)
subscription_id = os.environ.get('SUBSCRIPTION_ID')
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project_id, subscription_id)
def callback(message):
logging.info('Received message: %s', message.data)
event = json.loads(message.data)
space_name = event['space']['name']
# If the bot was removed, we don't need to return a response.
if event['type'] == 'REMOVED_FROM_SPACE':
logging.info('Bot removed rom space %s', space_name)
return
response = format_response(event)
# Send the asynchronous response back to Hangouts Chat
chat.spaces().messages().create(
parent=space_name,
body=response).execute()
message.ack()
subscriber.subscribe(subscription_path, callback=callback)
logging.info('Listening for messages on %s', subscription_path)
# Keep main thread from exiting while waiting for messages
while True:
time.sleep(60)
def format_response(event):
"""Determine what response to provide based upon event data.
Args:
event: A dictionary with the event data.
"""
event_type = event['type']
text = ""
sender_name = event['user']['displayName']
# Case 1: The bot was added to a room
if event_type == 'ADDED_TO_SPACE' and event['space']['type'] == 'ROOM':
text = 'Thanks for adding me to {}!'.format(event['space']['displayName'])
# Case 2: The bot was added to a DM
elif event_type == 'ADDED_TO_SPACE' and event['space']['type'] == 'DM':
text = 'Thanks for adding me to a DM, {}!'.format(sender_name)
elif event_type == 'MESSAGE':
text = 'Your message, {}: "{}"'.format(sender_name, event['message']['text'])
response = {'text': text}
# The following three lines of code update the thread that raised the event.
# Delete them if you want to send the message in a new thread.
if event_type == 'MESSAGE' and event['message']['thread'] is not None:
thread_id = event['message']['thread']
response['thread'] = thread_id
return response
if __name__ == '__main__':
if 'SUBSCRIPTION_ID' not in os.environ:
logging.error('Missing SUBSCRIPTION_ID env var.')
sys.exit(1)
logging.basicConfig(
level=logging.INFO,
style='{',
format='{levelname:.1}{asctime} {filename}:{lineno}] {message}')
receive_messages()
# [END pub-sub-bot]
| [] | [] | [
"SUBSCRIPTION_ID"
] | [] | ["SUBSCRIPTION_ID"] | python | 1 | 0 | |
djangorestautomatepm/djangorestautomatepm/wsgi.py | """
WSGI config for djangorestautomatepm project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangorestautomatepm.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
vendor/github.com/vmware/vsphere-automation-sdk-go/runtime/protocol/server/Server.go | /* Copyright © 2019 VMware, Inc. All Rights Reserved.
SPDX-License-Identifier: BSD-2-Clause */
package server
import (
"fmt"
"net"
"net/http"
"github.com/vmware/vsphere-automation-sdk-go/runtime/l10n"
"github.com/vmware/vsphere-automation-sdk-go/runtime/log"
"golang.org/x/net/context"
)
// Server Wraps http server and provides basic functionality
// such as server start and stop
type Server struct {
srv *http.Server
}
// NewServer returns Server object
func NewServer(portnumber string, handler http.Handler) *Server {
return &Server{srv: &http.Server{Addr: portnumber, Handler: handler}}
}
// Start Starts http server
func (s *Server) Start() chan error {
errorChannel := make(chan error)
go func() {
// catch when server panics
defer func() {
if r := recover(); r != nil {
err, ok := r.(error)
var errorMsg string
if ok {
errorMsg = err.Error()
} else {
errorMsg = fmt.Sprintf("Server error occured: %#v", r)
}
errorChannel <- l10n.NewRuntimeError(
"vapi.protocol.server.error",
map[string]string{"err": errorMsg})
}
}()
if err := s.srv.ListenAndServe(); err != nil {
errorChannel <- l10n.NewRuntimeError(
"vapi.protocol.server.error",
map[string]string{"err": err.Error()})
}
}()
log.Infof("HTTP Server starting on %s", s.srv.Addr)
return errorChannel
}
// WaitForRunningPort waits until a server port gets used by the OS.
// Should be used after call to Start method. Returned error channel from Start
// method is expected as input parameter.
// Closes server in case of error.
func (s *Server) WaitForRunningPort(errChannel chan error, seconds int) (bool, error) {
check, err := WaitForFunc(seconds, func() (bool, error) {
select {
case err := <-errChannel:
return false, err
default:
_, err := net.Dial("tcp", s.Address())
if err == nil {
return true, nil
}
return false, nil
}
})
if err != nil || !check {
s.Stop()
return false, err
}
log.Info("Server started successfully on ", s.Address())
return true, nil
}
// Stop Stops http server gracefully
func (s *Server) Stop() {
log.Info("Stopping Http Server")
if err := s.srv.Shutdown(context.TODO()); err != nil {
panic(err) // failure/timeout shutting down the server gracefully
}
log.Infof("Server at %s stopped.", s.srv.Addr)
}
// Address Returns server address
func (s *Server) Address() string {
return s.srv.Addr
}
| [] | [] | [] | [] | [] | go | null | null | null |
repo/repo_test.go | /* Copyright 2017 The Bazel Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package repo_test
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"github.com/bazelbuild/bazel-gazelle/repo"
"github.com/bazelbuild/bazel-gazelle/rule"
"github.com/bazelbuild/bazel-gazelle/testtools"
)
func TestFindExternalRepo(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("symlinks not supported on windows")
}
dir, err := ioutil.TempDir(os.Getenv("TEST_TEMPDIR"), "TestFindExternalRepo")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir, err = filepath.EvalSymlinks(dir)
if err != nil {
t.Fatal(err)
}
name := "foo"
externalPath := filepath.Join(dir, "bazel", "output-base", "external", name)
if err := os.MkdirAll(externalPath, 0777); err != nil {
t.Fatal(err)
}
bazelOutPath := filepath.Join(dir, "bazel", "output-base", "execroot", "test", "bazel-out")
if err := os.MkdirAll(bazelOutPath, 0777); err != nil {
t.Fatal(err)
}
workspacePath := filepath.Join(dir, "workspace")
if err := os.MkdirAll(workspacePath, 0777); err != nil {
t.Fatal(err)
}
if err := os.Symlink(bazelOutPath, filepath.Join(workspacePath, "bazel-out")); err != nil {
t.Fatal(err)
}
if got, err := repo.FindExternalRepo(workspacePath, name); err != nil {
t.Fatal(err)
} else if got != externalPath {
t.Errorf("got %q ; want %q", got, externalPath)
}
}
func TestListRepositories(t *testing.T) {
for _, tc := range []struct {
desc, workspace, want string
}{
{
desc: "empty",
want: "",
}, {
desc: "go_repository",
workspace: `
go_repository(
name = "custom_repo",
commit = "123456",
remote = "https://example.com/repo",
importpath = "example.com/repo",
)
`,
want: "custom_repo example.com/repo",
},
} {
t.Run(tc.desc, func(t *testing.T) {
workspace, err := rule.LoadData("WORKSPACE", "", []byte(tc.workspace))
if err != nil {
t.Fatal(err)
}
repos, _, err := repo.ListRepositories(workspace)
if err != nil {
t.Fatal(err)
}
got := reposToString(repos)
if got != tc.want {
t.Errorf("got\n%s\n\nwant:\n%s", got, tc.want)
}
})
}
}
func TestListRepositoriesWithRepositoryDirective(t *testing.T) {
for _, tc := range []struct {
desc, workspace, want string
}{
{
desc: "empty",
want: "",
}, {
desc: "git_repository",
workspace: `
git_repository(
name = "custom_repo",
commit = "123456",
remote = "https://example.com/repo",
importpath = "example.com/repo",
)
# gazelle:repository go_repository name=custom_repo importpath=example.com/repo1
# gazelle:repository go_repository name=custom_repo_2 importpath=example.com/repo2
`,
want: `custom_repo example.com/repo1
custom_repo_2 example.com/repo2`,
},
} {
t.Run(tc.desc, func(t *testing.T) {
workspace, err := rule.LoadData("WORKSPACE", "", []byte(tc.workspace))
if err != nil {
t.Fatal(err)
}
repos, _, err := repo.ListRepositories(workspace)
if err != nil {
t.Fatal(err)
}
got := reposToString(repos)
if got != tc.want {
t.Errorf("got\n%s\n\nwant:\n%s", got, tc.want)
}
})
}
}
func TestListRepositoriesWithRepositoryMacroDirective(t *testing.T) {
files := []testtools.FileSpec{{
Path: "repos1.bzl",
Content: `
def go_repositories():
go_repository(
name = "go_repo",
commit = "123456",
remote = "https://example.com/go",
importpath = "example.com/go",
)
def foo_repositories():
go_repository(
name = "foo_repo",
commit = "123456",
remote = "https://example.com/foo",
importpath = "example.com/foo",
)
`}, {
Path: "repos2.bzl",
Content: `
def bar_repositories():
# gazelle:repository go_repository name=extra_repo importpath=example.com/extra
go_repository(
name = "bar_repo",
commit = "123456",
remote = "https://example.com/bar",
importpath = "example.com/bar",
)
def baz_repositories():
# gazelle:repository go_repository name=ignored_repo importpath=example.com/ignored
go_repository(
name = "ignored_repo",
commit = "123456",
remote = "https://example.com/ignored",
importpath = "example.com/ignored",
)
`}}
dir, cleanup := testtools.CreateFiles(t, files)
defer cleanup()
workspaceString := `
# gazelle:repository_macro repos1.bzl%go_repositories
# gazelle:repository_macro repos1.bzl%foo_repositories
# gazelle:repository_macro repos2.bzl%bar_repositories
`
workspace, err := rule.LoadData(dir+"/WORKSPACE", "", []byte(workspaceString))
if err != nil {
t.Fatal(err)
}
repos, _, err := repo.ListRepositories(workspace)
if err != nil {
t.Fatal(err)
}
got := reposToString(repos)
want := `go_repo example.com/go
foo_repo example.com/foo
bar_repo example.com/bar
extra_repo example.com/extra`
if got != want {
t.Errorf("got\n%s\n\nwant:\n%s", got, want)
}
}
func reposToString(repos []*rule.Rule) string {
buf := &strings.Builder{}
sep := ""
for _, r := range repos {
fmt.Fprintf(buf, "%s%s %s", sep, r.Name(), r.AttrString("importpath"))
sep = "\n"
}
return buf.String()
}
| [
"\"TEST_TEMPDIR\""
] | [] | [
"TEST_TEMPDIR"
] | [] | ["TEST_TEMPDIR"] | go | 1 | 0 | |
integration-tests/smoke_test.go | // +build integration
package integrationTests
import (
"bytes"
"flag"
"os"
"strings"
"testing"
)
var apiVersion = flag.String("api-version", "2015.01.01", "Expected version of Api Server")
func TestApiServerIsUp(t *testing.T) {
// override flags if enviroment variable exists
if e := os.Getenv("API_VERSION"); e != "" {
apiVersion = &e
}
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
statusCode, body := client.Get("/healthcheck", ApiServer)
expected := "200 OK"
if statusCode != expected {
t.Errorf("Expected to get %#v code for the ApiServer. Got %#v", expected, statusCode)
return
}
bodystring := bytes.NewBuffer(*body).String()
if !strings.Contains(bodystring, *apiVersion) {
t.Errorf("ApiServer is running version %#v, expected %#v", bodystring, *apiVersion)
}
}
| [
"\"API_VERSION\""
] | [] | [
"API_VERSION"
] | [] | ["API_VERSION"] | go | 1 | 0 | |
test/resources/ray_cartpole/train_ray_torch.py | import os
import ray
import ray.rllib.agents.ppo as ppo
from ray.tune.logger import pretty_print
# Based on https://github.com/ray-project/ray/blob/master/doc/source/rllib-training.rst#python-api
ray.init(log_to_driver=False, webui_host='127.0.0.1')
config = ppo.DEFAULT_CONFIG.copy()
config["num_gpus"] = int(os.environ.get("SM_NUM_GPUS", 0))
checkpoint_dir = os.environ.get("SM_MODEL_DIR", '/Users/nadzeya/gym')
config["num_workers"] = 1
config["use_pytorch"] = True
trainer = ppo.PPOTrainer(config=config, env="CartPole-v0")
# Can optionally call agent.restore(path) to load a checkpoint.
for i in range(5):
# Perform one iteration of training the policy with PPO
result = trainer.train()
print(pretty_print(result))
checkpoint = trainer.save(checkpoint_dir=checkpoint_dir)
print("checkpoint saved at", checkpoint)
| [] | [] | [
"SM_MODEL_DIR",
"SM_NUM_GPUS"
] | [] | ["SM_MODEL_DIR", "SM_NUM_GPUS"] | python | 2 | 0 | |
py2app/bootstrap/site_packages.py | def _site_packages():
import os
import site
import sys
paths = []
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
paths.append(
os.path.join(
prefix, "lib", "python%d.%d" % (sys.version_info[:2]), "site-packages"
)
)
if os.path.join(".framework", "") in os.path.join(sys.prefix, ""):
home = os.environ.get("HOME")
if home:
# Sierra and later
paths.append(
os.path.join(
home,
"Library",
"Python",
"%d.%d" % (sys.version_info[:2]),
"lib",
"python",
"site-packages",
)
)
# Before Sierra
paths.append(
os.path.join(
home,
"Library",
"Python",
"%d.%d" % (sys.version_info[:2]),
"site-packages",
)
)
# Work around for a misfeature in setuptools: easy_install.pth places
# site-packages way to early on sys.path and that breaks py2app bundles.
# NOTE: this is hacks into an undocumented feature of setuptools and
# might stop to work without warning.
sys.__egginsert = len(sys.path)
for path in paths:
site.addsitedir(path)
_site_packages()
| [] | [] | [
"HOME"
] | [] | ["HOME"] | python | 1 | 0 | |
go1.5/test/const.go | // run
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Test simple boolean and numeric constants.
package main
import "os"
const (
c0 = 0
cm1 = -1
chuge = 1 << 100
chuge_1 = chuge - 1
c1 = chuge >> 100
c3div2 = 3 / 2
c1e3 = 1e3
ctrue = true
cfalse = !ctrue
)
const (
f0 = 0.0
fm1 = -1.
fhuge float64 = 1 << 100
fhuge_1 float64 = chuge - 1
f1 float64 = chuge >> 100
f3div2 = 3. / 2.
f1e3 float64 = 1e3
)
func assert(t bool, s string) {
if !t {
panic(s)
}
}
func ints() {
assert(c0 == 0, "c0")
assert(c1 == 1, "c1")
assert(chuge > chuge_1, "chuge")
assert(chuge_1+1 == chuge, "chuge 1")
assert(chuge+cm1+1 == chuge, "cm1")
assert(c3div2 == 1, "3/2")
assert(c1e3 == 1000, "c1e3 int")
assert(c1e3 == 1e3, "c1e3 float")
// verify that all (in range) are assignable as ints
var i int
i = c0
assert(i == c0, "i == c0")
i = cm1
assert(i == cm1, "i == cm1")
i = c1
assert(i == c1, "i == c1")
i = c3div2
assert(i == c3div2, "i == c3div2")
i = c1e3
assert(i == c1e3, "i == c1e3")
// verify that all are assignable as floats
var f float64
f = c0
assert(f == c0, "f == c0")
f = cm1
assert(f == cm1, "f == cm1")
f = chuge
assert(f == chuge, "f == chuge")
f = chuge_1
assert(f == chuge_1, "f == chuge_1")
f = c1
assert(f == c1, "f == c1")
f = c3div2
assert(f == c3div2, "f == c3div2")
f = c1e3
assert(f == c1e3, "f == c1e3")
}
func floats() {
assert(f0 == c0, "f0")
assert(f1 == c1, "f1")
// TODO(gri): exp/ssa/interp constant folding is incorrect.
if os.Getenv("GOSSAINTERP") == "" {
assert(fhuge == fhuge_1, "fhuge") // float64 can't distinguish fhuge, fhuge_1.
}
assert(fhuge_1+1 == fhuge, "fhuge 1")
assert(fhuge+fm1+1 == fhuge, "fm1")
assert(f3div2 == 1.5, "3./2.")
assert(f1e3 == 1000, "f1e3 int")
assert(f1e3 == 1.e3, "f1e3 float")
// verify that all (in range) are assignable as ints
var i int
i = f0
assert(i == f0, "i == f0")
i = fm1
assert(i == fm1, "i == fm1")
// verify that all are assignable as floats
var f float64
f = f0
assert(f == f0, "f == f0")
f = fm1
assert(f == fm1, "f == fm1")
f = fhuge
assert(f == fhuge, "f == fhuge")
f = fhuge_1
assert(f == fhuge_1, "f == fhuge_1")
f = f1
assert(f == f1, "f == f1")
f = f3div2
assert(f == f3div2, "f == f3div2")
f = f1e3
assert(f == f1e3, "f == f1e3")
}
func main() {
ints()
floats()
assert(ctrue == true, "ctrue == true")
assert(cfalse == false, "cfalse == false")
}
| [
"\"GOSSAINTERP\""
] | [] | [
"GOSSAINTERP"
] | [] | ["GOSSAINTERP"] | go | 1 | 0 | |
qa/rpc-tests/rpcbind_test.py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test for -rpcbind, as well as -rpcallowip and -rpcconnect
# TODO extend this test from the test framework (like all other tests)
import tempfile
import traceback
from test_framework.util import *
from test_framework.netutil import *
def run_bind_test(tmpdir, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
nodes = start_nodes(self.num_nodes, tmpdir, [base_args + binds], connect_to)
try:
pid = bsmcoind_processes[0].pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
finally:
stop_nodes(nodes)
wait_bsmcoinds()
def run_allowip_test(tmpdir, allow_ips, rpchost, rpcport):
'''
Start a node with rpcwallow IP, and request getinfo
at a non-localhost IP.
'''
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
nodes = start_nodes(self.num_nodes, tmpdir, [base_args])
try:
# connect to node through non-loopback interface
url = "http://rt:rt@%s:%d" % (rpchost, rpcport,)
node = get_rpc_proxy(url, 1)
node.getinfo()
finally:
node = None # make sure connection will be garbage collected and closed
stop_nodes(nodes)
wait_bsmcoinds()
def run_test(tmpdir):
assert(sys.platform.startswith('linux')) # due to OS-specific network stats queries, this test works only on Linux
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
assert(not 'This test requires at least one non-loopback IPv4 interface')
print("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
run_bind_test(tmpdir, None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
run_bind_test(tmpdir, ['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
run_bind_test(tmpdir, [non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
run_allowip_test(tmpdir, [non_loopback_ip], non_loopback_ip, defaultport)
try:
run_allowip_test(tmpdir, ['1.1.1.1'], non_loopback_ip, defaultport)
assert(not 'Connection not denied by rpcallowip as expected')
except ValueError:
pass
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bsmcoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing bsmcoind/bsmcoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
run_test(options.tmpdir)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
wait_bsmcoinds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
| [] | [] | [
"PATH"
] | [] | ["PATH"] | python | 1 | 0 | |
helper/sniffer.py | import os
from helpers import *
import subprocess
def init_sniffer(run,site,node_type,sniffer_nodes,exp_id):
if len(sniffer_nodes) == 0:
return
try:
SNIFFER_PATH = os.path.join(os.environ['COMETOS_PATH'], 'examples/hardware/sniffer/PureSniffer')
SNIFFER_CMD = "cob platform=M3OpenNode mac_default_channel=13"
SNIFFER_IMAGE = os.path.join(SNIFFER_PATH,"bin/M3OpenNode/Device.elf")
except:
print("Sniffer is only available when using self-compiled binaries. Please set sniffer_nodes = []")
exit(1)
# Remove image first to verify later it is this file that is built
try:
os.remove(SNIFFER_IMAGE)
except OSError:
pass # not existing so no problem
result = subprocess.call(SNIFFER_CMD, stdout=None, stderr=None, shell=True, cwd=SNIFFER_PATH)
assert(result == 0)
sniffer_resources = resources(site,node_type,sniffer_nodes)
flash(run,site,node_type,SNIFFER_IMAGE,exp_id,sniffer_resources['nodes'])
| [] | [] | [
"COMETOS_PATH"
] | [] | ["COMETOS_PATH"] | python | 1 | 0 | |
dao_universal_sqlite_test.go | package henge
import (
"fmt"
"os"
"strings"
"testing"
"github.com/btnguyen2k/prom"
_ "github.com/mattn/go-sqlite3"
)
func _testSqliteInitSqlConnect(t *testing.T, testName string) *prom.SqlConnect {
driver := strings.ReplaceAll(os.Getenv("SQLITE_DRIVER"), `"`, "")
url := strings.ReplaceAll(os.Getenv("SQLITE_URL"), `"`, "")
if driver == "" || url == "" {
t.Skipf("%s skipped", testName)
return nil
}
timezone := strings.ReplaceAll(os.Getenv("TIMEZONE"), `"`, "")
if timezone == "" {
timezone = "Asia/Ho_Chi_Minh"
}
urlTimezone := strings.ReplaceAll(timezone, "/", "%2f")
url = strings.ReplaceAll(url, "${loc}", urlTimezone)
url = strings.ReplaceAll(url, "${tz}", urlTimezone)
url = strings.ReplaceAll(url, "${timezone}", urlTimezone)
dbName := "tempdb"
sqlc, err := NewSqliteConnection(url, dbName, timezone, driver, 10000, nil)
if err != nil {
t.Fatalf("%s/%s failed: %s", testName, "NewSqliteConnection", err)
}
return sqlc
}
func TestInitSqliteTable(t *testing.T) {
testName := "TestInitSqliteTable"
teardownTest := setupTest(t, testName, setupTestSqlite, teardownTestSqlite)
defer teardownTest(t)
testSqlc.GetDB().Exec(fmt.Sprintf("DROP TABLE %s", testTable))
colDef := map[string]string{"col_email": "VARCHAR(64)", "col_age": "INT"}
for i := 0; i < 2; i++ {
if err := InitSqliteTable(testSqlc, testTable, colDef); err != nil {
t.Fatalf("%s failed: %s", testName, err)
}
}
}
func TestCreateIndexSqlite(t *testing.T) {
testName := "TestCreateIndexSqlite"
teardownTest := setupTest(t, testName, setupTestSqlite, teardownTestSqlite)
defer teardownTest(t)
testSqlc.GetDB().Exec(fmt.Sprintf("DROP TABLE %s", testTable))
colDef := map[string]string{"col_email": "VARCHAR(64)", "col_age": "INT"}
if err := InitSqliteTable(testSqlc, testTable, colDef); err != nil {
t.Fatalf("%s failed: %s", testName, err)
}
if err := CreateIndexSql(testSqlc, testTable, true, []string{"col_email"}); err != nil {
t.Fatalf("%s failed: %s", testName, err)
}
if err := CreateIndexSql(testSqlc, testTable, false, []string{"col_age"}); err != nil {
t.Fatalf("%s failed: %s", testName, err)
}
}
var setupTestSqlite = func(t *testing.T, testName string) {
testSqlc = _testSqliteInitSqlConnect(t, testName)
testSqlc.GetDB().Exec(fmt.Sprintf("DROP TABLE %s", testTable))
colDef := map[string]string{"col_email": "VARCHAR(64)", "col_age": "INT"}
if err := InitSqliteTable(testSqlc, testTable, colDef); err != nil {
t.Fatalf("%s failed: %s", testName, err)
}
if err := CreateIndexSql(testSqlc, testTable, true, []string{"col_email"}); err != nil {
t.Fatalf("%s failed: %s", testName, err)
}
if err := CreateIndexSql(testSqlc, testTable, false, []string{"col_age"}); err != nil {
t.Fatalf("%s failed: %s", testName, err)
}
extraColNameToFieldMappings := map[string]string{"col_email": "email", "col_age": "age"}
testDao = NewUniversalDaoSql(testSqlc, testTable, true, extraColNameToFieldMappings)
}
var teardownTestSqlite = func(t *testing.T, testName string) {
if testSqlc != nil {
defer func() { testSqlc, testDao = nil, nil }()
testSqlc.Close()
}
}
| [
"\"SQLITE_DRIVER\"",
"\"SQLITE_URL\"",
"\"TIMEZONE\""
] | [] | [
"TIMEZONE",
"SQLITE_URL",
"SQLITE_DRIVER"
] | [] | ["TIMEZONE", "SQLITE_URL", "SQLITE_DRIVER"] | go | 3 | 0 | |
cmd/executor/cmd/root.go | /*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/GoogleContainerTools/kaniko/pkg/buildcontext"
"github.com/GoogleContainerTools/kaniko/pkg/config"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/GoogleContainerTools/kaniko/pkg/executor"
"github.com/GoogleContainerTools/kaniko/pkg/logging"
"github.com/GoogleContainerTools/kaniko/pkg/timing"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/genuinetools/bpfd/proc"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
var (
opts = &config.KanikoOptions{}
ctxSubPath string
force bool
logLevel string
logFormat string
logTimestamp bool
)
func init() {
RootCmd.PersistentFlags().StringVarP(&logLevel, "verbosity", "v", logging.DefaultLevel, "Log level (trace, debug, info, warn, error, fatal, panic)")
RootCmd.PersistentFlags().StringVar(&logFormat, "log-format", logging.FormatColor, "Log format (text, color, json)")
RootCmd.PersistentFlags().BoolVar(&logTimestamp, "log-timestamp", logging.DefaultLogTimestamp, "Timestamp in log output")
RootCmd.PersistentFlags().BoolVarP(&force, "force", "", false, "Force building outside of a container")
addKanikoOptionsFlags()
addHiddenFlags(RootCmd)
RootCmd.PersistentFlags().BoolVarP(&opts.IgnoreVarRun, "whitelist-var-run", "", true, "Ignore /var/run directory when taking image snapshot. Set it to false to preserve /var/run/ in destination image. (Default true).")
RootCmd.PersistentFlags().MarkDeprecated("whitelist-var-run", "please use ignore-var-run instead.")
RootCmd.PersistentFlags().SetNormalizeFunc(normalizeWhitelistVarRun)
}
func normalizeWhitelistVarRun(f *pflag.FlagSet, name string) pflag.NormalizedName {
switch name {
case "whitelist-var-run":
name = "ignore-var-run"
break
}
return pflag.NormalizedName(name)
}
// RootCmd is the kaniko command that is run
var RootCmd = &cobra.Command{
Use: "executor",
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if cmd.Use == "executor" {
resolveEnvironmentBuildArgs(opts.BuildArgs, os.Getenv)
if err := logging.Configure(logLevel, logFormat, logTimestamp); err != nil {
return err
}
if !opts.NoPush && len(opts.Destinations) == 0 {
return errors.New("You must provide --destination, or use --no-push")
}
if err := cacheFlagsValid(); err != nil {
return errors.Wrap(err, "cache flags invalid")
}
if err := resolveSourceContext(); err != nil {
return errors.Wrap(err, "error resolving source context")
}
if err := resolveDockerfilePath(); err != nil {
return errors.Wrap(err, "error resolving dockerfile path")
}
if len(opts.Destinations) == 0 && opts.ImageNameDigestFile != "" {
return errors.New("You must provide --destination if setting ImageNameDigestFile")
}
if len(opts.Destinations) == 0 && opts.ImageNameTagDigestFile != "" {
return errors.New("You must provide --destination if setting ImageNameTagDigestFile")
}
// Update ignored paths
if opts.IgnoreVarRun {
// /var/run is a special case. It's common to mount in /var/run/docker.sock
// or something similar which leads to a special mount on the /var/run/docker.sock
// file itself, but the directory to exist in the image with no way to tell if it came
// from the base image or not.
logrus.Trace("Adding /var/run to default ignore list")
util.AddToDefaultIgnoreList(util.IgnoreListEntry{
Path: "/var/run",
PrefixMatchOnly: false,
})
}
for _, p := range opts.IgnorePaths {
util.AddToDefaultIgnoreList(util.IgnoreListEntry{
Path: p,
PrefixMatchOnly: false,
})
}
}
return nil
},
Run: func(cmd *cobra.Command, args []string) {
if !checkContained() {
if !force {
exit(errors.New("kaniko should only be run inside of a container, run with the --force flag if you are sure you want to continue"))
}
logrus.Warn("kaniko is being run outside of a container. This can have dangerous effects on your system")
}
if !opts.NoPush || opts.CacheRepo != "" {
if err := executor.CheckPushPermissions(opts); err != nil {
exit(errors.Wrap(err, "error checking push permissions -- make sure you entered the correct tag name, and that you are authenticated correctly, and try again"))
}
}
if err := resolveRelativePaths(); err != nil {
exit(errors.Wrap(err, "error resolving relative paths to absolute paths"))
}
if err := os.Chdir("/"); err != nil {
exit(errors.Wrap(err, "error changing to root dir"))
}
image, err := executor.DoBuild(opts)
if err != nil {
exit(errors.Wrap(err, "error building image"))
}
if err := executor.DoPush(image, opts); err != nil {
exit(errors.Wrap(err, "error pushing image"))
}
benchmarkFile := os.Getenv("BENCHMARK_FILE")
// false is a keyword for integration tests to turn off benchmarking
if benchmarkFile != "" && benchmarkFile != "false" {
s, err := timing.JSON()
if err != nil {
logrus.Warnf("Unable to write benchmark file: %s", err)
return
}
if strings.HasPrefix(benchmarkFile, "gs://") {
logrus.Info("uploading to gcs")
if err := buildcontext.UploadToBucket(strings.NewReader(s), benchmarkFile); err != nil {
logrus.Infof("Unable to upload %s due to %v", benchmarkFile, err)
}
logrus.Infof("benchmark file written at %s", benchmarkFile)
} else {
f, err := os.Create(benchmarkFile)
if err != nil {
logrus.Warnf("Unable to create benchmarking file %s: %s", benchmarkFile, err)
return
}
defer f.Close()
f.WriteString(s)
logrus.Infof("benchmark file written at %s", benchmarkFile)
}
}
},
}
// addKanikoOptionsFlags configures opts
func addKanikoOptionsFlags() {
RootCmd.PersistentFlags().StringVarP(&opts.DockerfilePath, "dockerfile", "f", "Dockerfile", "Path to the dockerfile to be built.")
RootCmd.PersistentFlags().StringVarP(&opts.SrcContext, "context", "c", "/workspace/", "Path to the dockerfile build context.")
RootCmd.PersistentFlags().StringVarP(&ctxSubPath, "context-sub-path", "", "", "Sub path within the given context.")
RootCmd.PersistentFlags().StringVarP(&opts.Bucket, "bucket", "b", "", "Name of the GCS bucket from which to access build context as tarball.")
RootCmd.PersistentFlags().VarP(&opts.Destinations, "destination", "d", "Registry the final image should be pushed to. Set it repeatedly for multiple destinations.")
RootCmd.PersistentFlags().StringVarP(&opts.SnapshotMode, "snapshotMode", "", "full", "Change the file attributes inspected during snapshotting")
RootCmd.PersistentFlags().StringVarP(&opts.CustomPlatform, "customPlatform", "", "", "Specify the build platform if different from the current host")
RootCmd.PersistentFlags().VarP(&opts.BuildArgs, "build-arg", "", "This flag allows you to pass in ARG values at build time. Set it repeatedly for multiple values.")
RootCmd.PersistentFlags().BoolVarP(&opts.Insecure, "insecure", "", false, "Push to insecure registry using plain HTTP")
RootCmd.PersistentFlags().BoolVarP(&opts.SkipTLSVerify, "skip-tls-verify", "", false, "Push to insecure registry ignoring TLS verify")
RootCmd.PersistentFlags().BoolVarP(&opts.InsecurePull, "insecure-pull", "", false, "Pull from insecure registry using plain HTTP")
RootCmd.PersistentFlags().BoolVarP(&opts.SkipTLSVerifyPull, "skip-tls-verify-pull", "", false, "Pull from insecure registry ignoring TLS verify")
RootCmd.PersistentFlags().IntVar(&opts.PushRetry, "push-retry", 0, "Number of retries for the push operation")
RootCmd.PersistentFlags().StringVarP(&opts.TarPath, "tarPath", "", "", "Path to save the image in as a tarball instead of pushing")
RootCmd.PersistentFlags().BoolVarP(&opts.SingleSnapshot, "single-snapshot", "", false, "Take a single snapshot at the end of the build.")
RootCmd.PersistentFlags().BoolVarP(&opts.Reproducible, "reproducible", "", false, "Strip timestamps out of the image to make it reproducible")
RootCmd.PersistentFlags().StringVarP(&opts.Target, "target", "", "", "Set the target build stage to build")
RootCmd.PersistentFlags().BoolVarP(&opts.NoPush, "no-push", "", false, "Do not push the image to the registry")
RootCmd.PersistentFlags().StringVarP(&opts.CacheRepo, "cache-repo", "", "", "Specify a repository to use as a cache, otherwise one will be inferred from the destination provided")
RootCmd.PersistentFlags().StringVarP(&opts.CacheDir, "cache-dir", "", "/cache", "Specify a local directory to use as a cache.")
RootCmd.PersistentFlags().StringVarP(&opts.DigestFile, "digest-file", "", "", "Specify a file to save the digest of the built image to.")
RootCmd.PersistentFlags().StringVarP(&opts.ImageNameDigestFile, "image-name-with-digest-file", "", "", "Specify a file to save the image name w/ digest of the built image to.")
RootCmd.PersistentFlags().StringVarP(&opts.ImageNameTagDigestFile, "image-name-tag-with-digest-file", "", "", "Specify a file to save the image name w/ image tag w/ digest of the built image to.")
RootCmd.PersistentFlags().StringVarP(&opts.OCILayoutPath, "oci-layout-path", "", "", "Path to save the OCI image layout of the built image.")
RootCmd.PersistentFlags().BoolVarP(&opts.Cache, "cache", "", false, "Use cache when building image")
RootCmd.PersistentFlags().BoolVarP(&opts.Cleanup, "cleanup", "", false, "Clean the filesystem at the end")
RootCmd.PersistentFlags().DurationVarP(&opts.CacheTTL, "cache-ttl", "", time.Hour*336, "Cache timeout in hours. Defaults to two weeks.")
RootCmd.PersistentFlags().VarP(&opts.InsecureRegistries, "insecure-registry", "", "Insecure registry using plain HTTP to push and pull. Set it repeatedly for multiple registries.")
RootCmd.PersistentFlags().VarP(&opts.SkipTLSVerifyRegistries, "skip-tls-verify-registry", "", "Insecure registry ignoring TLS verify to push and pull. Set it repeatedly for multiple registries.")
opts.RegistriesCertificates = make(map[string]string)
RootCmd.PersistentFlags().VarP(&opts.RegistriesCertificates, "registry-certificate", "", "Use the provided certificate for TLS communication with the given registry. Expected format is 'my.registry.url=/path/to/the/server/certificate'.")
RootCmd.PersistentFlags().VarP(&opts.RegistryMirrors, "registry-mirror", "", "Registry mirror to use as pull-through cache instead of docker.io. Set it repeatedly for multiple mirrors.")
RootCmd.PersistentFlags().BoolVarP(&opts.IgnoreVarRun, "ignore-var-run", "", true, "Ignore /var/run directory when taking image snapshot. Set it to false to preserve /var/run/ in destination image. (Default true).")
RootCmd.PersistentFlags().VarP(&opts.Labels, "label", "", "Set metadata for an image. Set it repeatedly for multiple labels.")
RootCmd.PersistentFlags().BoolVarP(&opts.SkipUnusedStages, "skip-unused-stages", "", false, "Build only used stages if defined to true. Otherwise it builds by default all stages, even the unnecessaries ones until it reaches the target stage / end of Dockerfile")
RootCmd.PersistentFlags().BoolVarP(&opts.RunV2, "use-new-run", "", false, "Use the experimental run implementation for detecting changes without requiring file system snapshots.")
RootCmd.PersistentFlags().Var(&opts.Git, "git", "Branch to clone if build context is a git repository")
RootCmd.PersistentFlags().BoolVarP(&opts.CacheCopyLayers, "cache-copy-layers", "", false, "Caches copy layers")
RootCmd.PersistentFlags().VarP(&opts.IgnorePaths, "ignore-path", "", "Ignore these paths when taking a snapshot. Set it repeatedly for multiple paths.")
}
// addHiddenFlags marks certain flags as hidden from the executor help text
func addHiddenFlags(cmd *cobra.Command) {
// This flag is added in a vendored directory, hide so that it doesn't come up via --help
pflag.CommandLine.MarkHidden("azure-container-registry-config")
// Hide this flag as we want to encourage people to use the --context flag instead
cmd.PersistentFlags().MarkHidden("bucket")
}
func checkContained() bool {
return proc.GetContainerRuntime(0, 0) != proc.RuntimeNotFound
}
// cacheFlagsValid makes sure the flags passed in related to caching are valid
func cacheFlagsValid() error {
if !opts.Cache {
return nil
}
// If --cache=true and --no-push=true, then cache repo must be provided
// since cache can't be inferred from destination
if opts.CacheRepo == "" && opts.NoPush {
return errors.New("if using cache with --no-push, specify cache repo with --cache-repo")
}
return nil
}
// resolveDockerfilePath resolves the Dockerfile path to an absolute path
func resolveDockerfilePath() error {
if isURL(opts.DockerfilePath) {
return nil
}
if util.FilepathExists(opts.DockerfilePath) {
abs, err := filepath.Abs(opts.DockerfilePath)
if err != nil {
return errors.Wrap(err, "getting absolute path for dockerfile")
}
opts.DockerfilePath = abs
return copyDockerfile()
}
// Otherwise, check if the path relative to the build context exists
if util.FilepathExists(filepath.Join(opts.SrcContext, opts.DockerfilePath)) {
abs, err := filepath.Abs(filepath.Join(opts.SrcContext, opts.DockerfilePath))
if err != nil {
return errors.Wrap(err, "getting absolute path for src context/dockerfile path")
}
opts.DockerfilePath = abs
return copyDockerfile()
}
return errors.New("please provide a valid path to a Dockerfile within the build context with --dockerfile")
}
// resolveEnvironmentBuildArgs replace build args without value by the same named environment variable
func resolveEnvironmentBuildArgs(arguments []string, resolver func(string) string) {
for index, argument := range arguments {
i := strings.Index(argument, "=")
if i < 0 {
value := resolver(argument)
arguments[index] = fmt.Sprintf("%s=%s", argument, value)
}
}
}
// copy Dockerfile to /kaniko/Dockerfile so that if it's specified in the .dockerignore
// it won't be copied into the image
func copyDockerfile() error {
if _, err := util.CopyFile(opts.DockerfilePath, constants.DockerfilePath, util.FileContext{}, util.DoNotChangeUID, util.DoNotChangeGID); err != nil {
return errors.Wrap(err, "copying dockerfile")
}
dockerignorePath := opts.DockerfilePath + ".dockerignore"
if util.FilepathExists(dockerignorePath) {
if _, err := util.CopyFile(dockerignorePath, constants.DockerfilePath+".dockerignore", util.FileContext{}, util.DoNotChangeUID, util.DoNotChangeGID); err != nil {
return errors.Wrap(err, "copying Dockerfile.dockerignore")
}
}
opts.DockerfilePath = constants.DockerfilePath
return nil
}
// resolveSourceContext unpacks the source context if it is a tar in a bucket or in kaniko container
// it resets srcContext to be the path to the unpacked build context within the image
func resolveSourceContext() error {
if opts.SrcContext == "" && opts.Bucket == "" {
return errors.New("please specify a path to the build context with the --context flag or a bucket with the --bucket flag")
}
if opts.SrcContext != "" && !strings.Contains(opts.SrcContext, "://") {
return nil
}
if opts.Bucket != "" {
if !strings.Contains(opts.Bucket, "://") {
// if no prefix use Google Cloud Storage as default for backwards compatibility
opts.SrcContext = constants.GCSBuildContextPrefix + opts.Bucket
} else {
opts.SrcContext = opts.Bucket
}
}
contextExecutor, err := buildcontext.GetBuildContext(opts.SrcContext, buildcontext.BuildOptions{
GitBranch: opts.Git.Branch,
GitSingleBranch: opts.Git.SingleBranch,
GitRecurseSubmodules: opts.Git.RecurseSubmodules,
})
if err != nil {
return err
}
logrus.Debugf("Getting source context from %s", opts.SrcContext)
opts.SrcContext, err = contextExecutor.UnpackTarFromBuildContext()
if err != nil {
return err
}
if ctxSubPath != "" {
opts.SrcContext = filepath.Join(opts.SrcContext, ctxSubPath)
if _, err := os.Stat(opts.SrcContext); os.IsNotExist(err) {
return err
}
}
logrus.Debugf("Build context located at %s", opts.SrcContext)
return nil
}
func resolveRelativePaths() error {
optsPaths := []*string{
&opts.DockerfilePath,
&opts.SrcContext,
&opts.CacheDir,
&opts.TarPath,
&opts.DigestFile,
&opts.ImageNameDigestFile,
&opts.ImageNameTagDigestFile,
}
for _, p := range optsPaths {
if path := *p; shdSkip(path) {
logrus.Debugf("Skip resolving path %s", path)
continue
}
// Resolve relative path to absolute path
var err error
relp := *p // save original relative path
if *p, err = filepath.Abs(*p); err != nil {
return errors.Wrapf(err, "Couldn't resolve relative path %s to an absolute path", *p)
}
logrus.Debugf("Resolved relative path %s to %s", relp, *p)
}
return nil
}
func exit(err error) {
var execErr *exec.ExitError
if errors.As(err, &execErr) {
// if there is an exit code propagate it
exitWithCode(err, execErr.ExitCode())
}
// otherwise exit with catch all 1
exitWithCode(err, 1)
}
//exits with the given error and exit code
func exitWithCode(err error, exitCode int) {
fmt.Println(err)
os.Exit(exitCode)
}
func isURL(path string) bool {
if match, _ := regexp.MatchString("^https?://", path); match {
return true
}
return false
}
func shdSkip(path string) bool {
return path == "" || isURL(path) || filepath.IsAbs(path)
}
| [
"\"BENCHMARK_FILE\""
] | [] | [
"BENCHMARK_FILE"
] | [] | ["BENCHMARK_FILE"] | go | 1 | 0 | |
main.go | /*
* [2013] - [2018] Avi Networks Incorporated
* All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"flag"
"os"
"github.com/avinetworks/servicemesh/pkg/k8s"
"github.com/avinetworks/servicemesh/pkg/mcp"
"github.com/avinetworks/servicemesh/pkg/utils"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var (
masterURL string
kubeconfig string
)
func main() {
flag.Parse()
flag.Lookup("logtostderr").Value.Set("true")
// set up signals so we handle the first shutdown signal gracefully
stopCh := utils.SetupSignalHandler()
kubeCluster := false
// Check if we are running inside kubernetes. Hence try authenticating with service token
cfg, err := rest.InClusterConfig()
if err != nil {
utils.AviLog.Warning.Printf("We are not running inside kubernetes cluster. %s", err.Error())
} else {
// TODO (sudswas): Remove the hard coding later.
istioEnabled := "False"
istioEnabled = os.Getenv("ISTIO_ENABLED")
if istioEnabled == "True" {
stop := make(chan struct{})
mcpServers := []string{"mcp://istio-galley.istio-system.svc:9901"}
mcpClient := mcp.MCPClient{MCPServerAddrs: mcpServers}
_ = mcpClient.InitMCPClient()
// TODO (sudswas): Need to handle the stop signal
mcpClient.Start(stop)
}
utils.AviLog.Info.Println("We are running inside kubernetes cluster. Won't use kubeconfig files.")
kubeCluster = true
}
if kubeCluster == false {
cfg, err = clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
if err != nil {
utils.AviLog.Error.Fatalf("Error building kubeconfig: %s", err.Error())
}
}
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
utils.AviLog.Error.Fatalf("Error building kubernetes clientset: %s", err.Error())
}
informers := NewInformers(kubeClient)
avi_obj_cache := utils.NewAviObjCache()
// TODO get API endpoint/username/password from configmap and track configmap
// for changes and update rest client
ctrlUsername := os.Getenv("CTRL_USERNAME")
ctrlPassword := os.Getenv("CTRL_PASSWORD")
ctrlIpAddress := os.Getenv("CTRL_IPADDRESS")
if ctrlUsername == "" || ctrlPassword == "" || ctrlIpAddress == "" {
utils.AviLog.Error.Panic("AVI controller information missing. Update them in kubernetes secret or via environment variables.")
}
avi_rest_client_pool, err := utils.NewAviRestClientPool(utils.NumWorkers,
ctrlIpAddress, ctrlUsername, ctrlPassword)
k8s_ep := k8s.NewK8sEp(avi_obj_cache, avi_rest_client_pool, informers)
//k8s_ingr = kube.NewK8sIng(avi_obj_cache, avi_rest_client_pool, informers, k8s_ep)
k8s_svc := k8s.NewK8sSvc(avi_obj_cache, avi_rest_client_pool, informers, k8s_ep)
c := NewAviController(utils.NumWorkers, informers, kubeClient, k8s_ep, k8s_svc)
c.Start(stopCh)
c.Run(stopCh)
}
func init() {
def_kube_config := os.Getenv("HOME") + "/.kube/config"
flag.StringVar(&kubeconfig, "kubeconfig", def_kube_config, "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
}
| [
"\"ISTIO_ENABLED\"",
"\"CTRL_USERNAME\"",
"\"CTRL_PASSWORD\"",
"\"CTRL_IPADDRESS\"",
"\"HOME\""
] | [] | [
"CTRL_USERNAME",
"CTRL_IPADDRESS",
"CTRL_PASSWORD",
"ISTIO_ENABLED",
"HOME"
] | [] | ["CTRL_USERNAME", "CTRL_IPADDRESS", "CTRL_PASSWORD", "ISTIO_ENABLED", "HOME"] | go | 5 | 0 | |
config.go | // Copyright (c) 2013-2017 The btcsuite developers
// Copyright (c) 2015-2019 The Decred developers
// Copyright (C) 2015-2020 The Lightning Network Developers
package dcrlnd
import (
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"os/user"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"github.com/decred/dcrd/dcrutil/v3"
"github.com/decred/dcrlnd/autopilot"
"github.com/decred/dcrlnd/build"
"github.com/decred/dcrlnd/chanbackup"
"github.com/decred/dcrlnd/channeldb"
"github.com/decred/dcrlnd/discovery"
"github.com/decred/dcrlnd/htlcswitch"
"github.com/decred/dcrlnd/htlcswitch/hodl"
"github.com/decred/dcrlnd/input"
"github.com/decred/dcrlnd/lncfg"
"github.com/decred/dcrlnd/lnrpc/routerrpc"
"github.com/decred/dcrlnd/lnrpc/signrpc"
"github.com/decred/dcrlnd/lnwire"
"github.com/decred/dcrlnd/routing"
"github.com/decred/dcrlnd/tor"
flags "github.com/jessevdk/go-flags"
)
const (
defaultDataDirname = "data"
defaultChainSubDirname = "chain"
defaultGraphSubDirname = "graph"
defaultTowerSubDirname = "watchtower"
defaultTLSCertFilename = "tls.cert"
defaultTLSKeyFilename = "tls.key"
defaultAdminMacFilename = "admin.macaroon"
defaultReadMacFilename = "readonly.macaroon"
defaultInvoiceMacFilename = "invoice.macaroon"
defaultLogLevel = "info"
defaultLogDirname = "logs"
defaultLogFilename = "lnd.log"
defaultRPCPort = 10009
defaultRESTPort = 8080
defaultPeerPort = 9735
defaultRPCHost = "localhost"
defaultNoSeedBackup = false
defaultPaymentsExpirationGracePeriod = time.Duration(0)
defaultTrickleDelay = 90 * 1000
defaultChanStatusSampleInterval = time.Minute
defaultChanEnableTimeout = 19 * time.Minute
defaultChanDisableTimeout = 20 * time.Minute
defaultHeightHintCacheQueryDisable = false
defaultMaxLogFiles = 3
defaultMaxLogFileSize = 10
defaultMinBackoff = time.Second
defaultMaxBackoff = time.Hour
defaultTorSOCKSPort = 9050
defaultTorDNSHost = "soa.nodes.lightning.directory"
defaultTorDNSPort = 53
defaultTorControlPort = 9051
defaultTorV2PrivateKeyFilename = "v2_onion_private_key"
defaultTorV3PrivateKeyFilename = "v3_onion_private_key"
// minTimeLockDelta is the minimum timelock we require for incoming
// HTLCs on our channels.
minTimeLockDelta = routing.MinCLTVDelta
// defaultAcceptorTimeout is the time after which an RPCAcceptor will time
// out and return false if it hasn't yet received a response.
defaultAcceptorTimeout = 15 * time.Second
defaultAlias = ""
defaultColor = "#3399FF"
// defaultHostSampleInterval is the default amount of time that the
// HostAnnouncer will wait between DNS resolutions to check if the
// backing IP of a host has changed.
defaultHostSampleInterval = time.Minute * 5
// defaultRemoteMaxHtlcs specifies the default limit for maximum
// concurrent HTLCs the remote party may add to commitment transactions.
// This value can be overridden with --default-remote-max-htlcs.
defaultRemoteMaxHtlcs = input.MaxHTLCNumber / 2
defaultChainInterval = time.Minute
defaultChainTimeout = time.Second * 10
defaultChainBackoff = time.Second * 30
defaultChainAttempts = 0
// Set defaults for a health check which ensures that we have space
// available on disk. Although this check is off by default so that we
// avoid breaking any existing setups (particularly on mobile), we still
// set the other default values so that the health check can be easily
// enabled with sane defaults.
defaultRequiredDisk = 0.1
defaultDiskInterval = time.Hour * 12
defaultDiskTimeout = time.Second * 5
defaultDiskBackoff = time.Minute
defaultDiskAttempts = 0
)
var (
// DefaultLndDir is the default directory where lnd tries to find its
// configuration file and store its data. This is a directory in the
// user's application data, for example:
// C:\Users\<username>\AppData\Local\Lnd on Windows ~/.lnd on Linux
// ~/Library/Application Support/Lnd on MacOS
DefaultLndDir = dcrutil.AppDataDir("dcrlnd", false)
// DefaultConfigFile is the default full path of lnd's configuration
// file.
DefaultConfigFile = filepath.Join(DefaultLndDir, lncfg.DefaultConfigFilename)
defaultDataDir = filepath.Join(DefaultLndDir, defaultDataDirname)
defaultLogDir = filepath.Join(DefaultLndDir, defaultLogDirname)
defaultTowerDir = filepath.Join(defaultDataDir, defaultTowerSubDirname)
defaultTLSCertPath = filepath.Join(DefaultLndDir, defaultTLSCertFilename)
defaultTLSKeyPath = filepath.Join(DefaultLndDir, defaultTLSKeyFilename)
defaultDcrdDir = dcrutil.AppDataDir("dcrd", false)
defaultDcrdRPCCertFile = filepath.Join(defaultDcrdDir, "rpc.cert")
defaultTorSOCKS = net.JoinHostPort("localhost", strconv.Itoa(defaultTorSOCKSPort))
defaultTorDNS = net.JoinHostPort(defaultTorDNSHost, strconv.Itoa(defaultTorDNSPort))
defaultTorControl = net.JoinHostPort("localhost", strconv.Itoa(defaultTorControlPort))
defaultSphinxDbName = "sphinxreplay.db"
)
// Config defines the configuration options for lnd.
//
// See LoadConfig for further details regarding the configuration
// loading+parsing process.
type Config struct {
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
LndDir string `long:"lnddir" description:"The base directory that contains lnd's data, logs, configuration file, etc."`
ConfigFile string `short:"C" long:"configfile" description:"Path to configuration file"`
DataDir string `short:"b" long:"datadir" description:"The directory to store lnd's data within"`
SyncFreelist bool `long:"sync-freelist" description:"Whether the databases used within lnd should sync their freelist to disk. This is disabled by default resulting in improved memory performance during operation, but with an increase in startup time."`
TLSCertPath string `long:"tlscertpath" description:"Path to write the TLS certificate for lnd's RPC and REST services"`
TLSKeyPath string `long:"tlskeypath" description:"Path to write the TLS private key for lnd's RPC and REST services"`
TLSExtraIPs []string `long:"tlsextraip" description:"Adds an extra ip to the generated certificate"`
TLSExtraDomains []string `long:"tlsextradomain" description:"Adds an extra domain to the generated certificate"`
TLSAutoRefresh bool `long:"tlsautorefresh" description:"Re-generate TLS certificate and key if the IPs or domains are changed"`
TLSDisableAutofill bool `long:"tlsdisableautofill" description:"Do not include the interface IPs or the system hostname in TLS certificate, use first --tlsextradomain as Common Name instead, if set"`
NoMacaroons bool `long:"no-macaroons" description:"Disable macaroon authentication"`
AdminMacPath string `long:"adminmacaroonpath" description:"Path to write the admin macaroon for lnd's RPC and REST services if it doesn't exist"`
ReadMacPath string `long:"readonlymacaroonpath" description:"Path to write the read-only macaroon for lnd's RPC and REST services if it doesn't exist"`
InvoiceMacPath string `long:"invoicemacaroonpath" description:"Path to the invoice-only macaroon for lnd's RPC and REST services if it doesn't exist"`
LogDir string `long:"logdir" description:"Directory to log output."`
MaxLogFiles int `long:"maxlogfiles" description:"Maximum logfiles to keep (0 for no rotation)"`
MaxLogFileSize int `long:"maxlogfilesize" description:"Maximum logfile size in MB"`
AcceptorTimeout time.Duration `long:"acceptortimeout" description:"Time after which an RPCAcceptor will time out and return false if it hasn't yet received a response"`
// We'll parse these 'raw' string arguments into real net.Addrs in the
// loadConfig function. We need to expose the 'raw' strings so the
// command line library can access them.
// Only the parsed net.Addrs should be used!
RawRPCListeners []string `long:"rpclisten" description:"Add an interface/port/socket to listen for RPC connections"`
RawRESTListeners []string `long:"restlisten" description:"Add an interface/port/socket to listen for REST connections"`
RawListeners []string `long:"listen" description:"Add an interface/port to listen for peer connections"`
RawExternalIPs []string `long:"externalip" description:"Add an ip:port to the list of local addresses we claim to listen on to peers. If a port is not specified, the default (9735) will be used regardless of other parameters"`
ExternalHosts []string `long:"externalhosts" description:"A set of hosts that should be periodically resolved to announce IPs for"`
RPCListeners []net.Addr
RESTListeners []net.Addr
RestCORS []string `long:"restcors" description:"Add an ip:port/hostname to allow cross origin access from. To allow all origins, set as \"*\"."`
Listeners []net.Addr
ExternalIPs []net.Addr
DisableListen bool `long:"nolisten" description:"Disable listening for incoming peer connections"`
DisableRest bool `long:"norest" description:"Disable REST API"`
NAT bool `long:"nat" description:"Toggle NAT traversal support (using either UPnP or NAT-PMP) to automatically advertise your external IP address to the network -- NOTE this does not support devices behind multiple NATs"`
MinBackoff time.Duration `long:"minbackoff" description:"Shortest backoff when reconnecting to persistent peers. Valid time units are {s, m, h}."`
MaxBackoff time.Duration `long:"maxbackoff" description:"Longest backoff when reconnecting to persistent peers. Valid time units are {s, m, h}."`
DebugLevel string `short:"d" long:"debuglevel" description:"Logging level for all subsystems {trace, debug, info, warn, error, critical} -- You may also specify <subsystem>=<level>,<subsystem2>=<level>,... to set the log level for individual subsystems -- Use show to list available subsystems"`
CPUProfile string `long:"cpuprofile" description:"Write CPU profile to the specified file"`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65535"`
UnsafeDisconnect bool `long:"unsafe-disconnect" description:"DEPRECATED: Allows the rpcserver to intentionally disconnect from peers with open channels. THIS FLAG WILL BE REMOVED IN THE FUTURE"`
UnsafeReplay bool `long:"unsafe-replay" description:"Causes a link to replay the adds on its commitment txn after starting up, this enables testing of the sphinx replay logic."`
MaxPendingChannels int `long:"maxpendingchannels" description:"The maximum number of incoming pending channels permitted per peer."`
BackupFilePath string `long:"backupfilepath" description:"The target location of the channel backup file"`
ChainDir string `long:"chaindir" description:"The directory to store the chain's data within."`
Node string `long:"node" description:"The blockchain interface to use." choice:"dcrd" choice:"dcrw"`
TestNet3 bool `long:"testnet" description:"Use the test network"`
SimNet bool `long:"simnet" description:"Use the simulation test network"`
RegTest bool `long:"regtest" description:"Use the regression test network"`
DefaultNumChanConfs int `long:"defaultchanconfs" description:"The default number of confirmations a channel must have before it's considered open. If this is not set, we will scale the value according to the channel size."`
DefaultRemoteDelay int `long:"defaultremotedelay" description:"The default number of blocks we will require our channel counterparty to wait before accessing its funds in case of unilateral close. If this is not set, we will scale the value according to the channel size."`
MinHTLCIn lnwire.MilliAtom `long:"minhtlc" description:"The smallest HTLC we are willing to accept on our channels, in MilliAtoms"`
MinHTLCOut lnwire.MilliAtom `long:"minhtlcout" description:"The smallest HTLC we are willing to send out on our channels, in MilliAtoms"`
BaseFee lnwire.MilliAtom `long:"basefee" description:"The base fee in MilliAtom we will charge for forwarding payments on our channels"`
FeeRate lnwire.MilliAtom `long:"feerate" description:"The fee rate used when forwarding payments on our channels. The total fee charged is basefee + (amount * feerate / 1000000), where amount is the forwarded amount."`
TimeLockDelta uint32 `long:"timelockdelta" description:"The CLTV delta we will subtract from a forwarded HTLC's timelock value"`
DcrdMode *lncfg.DcrdConfig `group:"dcrd" namespace:"dcrd"`
Dcrwallet *lncfg.DcrwalletConfig `group:"dcrwallet" namespace:"dcrwallet"`
Autopilot *lncfg.AutoPilot `group:"Autopilot" namespace:"autopilot"`
Tor *lncfg.Tor `group:"Tor" namespace:"tor"`
SubRPCServers *subRPCServerConfigs `group:"subrpc"`
Hodl *hodl.Config `group:"hodl" namespace:"hodl"`
NoNetBootstrap bool `long:"nobootstrap" description:"If true, then automatic network bootstrapping will not be attempted."`
NoSeedBackup bool `long:"noseedbackup" description:"If true, NO SEED WILL BE EXPOSED -- EVER, AND THE WALLET WILL BE ENCRYPTED USING THE DEFAULT PASSPHRASE. THIS FLAG IS ONLY FOR TESTING AND SHOULD NEVER BE USED ON MAINNET."`
PaymentsExpirationGracePeriod time.Duration `long:"payments-expiration-grace-period" description:"A period to wait before force closing channels with outgoing htlcs that have timed-out and are a result of this node initiated payments."`
TrickleDelay int `long:"trickledelay" description:"Time in milliseconds between each release of announcements to the network"`
ChanEnableTimeout time.Duration `long:"chan-enable-timeout" description:"The duration that a peer connection must be stable before attempting to send a channel update to reenable or cancel a pending disables of the peer's channels on the network."`
ChanDisableTimeout time.Duration `long:"chan-disable-timeout" description:"The duration that must elapse after first detecting that an already active channel is actually inactive and sending channel update disabling it to the network. The pending disable can be canceled if the peer reconnects and becomes stable for chan-enable-timeout before the disable update is sent."`
ChanStatusSampleInterval time.Duration `long:"chan-status-sample-interval" description:"The polling interval between attempts to detect if an active channel has become inactive due to its peer going offline."`
HeightHintCacheQueryDisable bool `long:"height-hint-cache-query-disable" description:"Disable queries from the height-hint cache to try to recover channels stuck in the pending close state. Disabling height hint queries may cause longer chain rescans, resulting in a performance hit. Unset this after channels are unstuck so you can get better performance again."`
Alias string `long:"alias" description:"The node alias. Used as a moniker by peers and intelligence services"`
Color string `long:"color" description:"The color of the node in hex format (i.e. '#3399FF'). Used to customize node appearance in intelligence services"`
MinChanSize int64 `long:"minchansize" description:"The smallest channel size (in atoms) that we should accept. Incoming channels smaller than this will be rejected"`
MaxChanSize int64 `long:"maxchansize" description:"The largest channel size (in atoms) that we should accept. Incoming channels larger than this will be rejected"`
DefaultRemoteMaxHtlcs uint16 `long:"default-remote-max-htlcs" description:"The default max_htlc applied when opening or accepting channels. This value limits the number of concurrent HTLCs that the remote party can add to the commitment. The maximum possible value is 300."`
NumGraphSyncPeers int `long:"numgraphsyncpeers" description:"The number of peers that we should receive new graph updates from. This option can be tuned to save bandwidth for light clients or routing nodes."`
HistoricalSyncInterval time.Duration `long:"historicalsyncinterval" description:"The polling interval between historical graph sync attempts. Each historical graph sync attempt ensures we reconcile with the remote peer's graph from the genesis block."`
IgnoreHistoricalGossipFilters bool `long:"ignore-historical-gossip-filters" description:"If true, will not reply with historical data that matches the range specified by a remote peer's gossip_timestamp_filter. Doing so will result in lower memory and bandwidth requirements."`
RejectPush bool `long:"rejectpush" description:"If true, lnd will not accept channel opening requests with non-zero push amounts. This should prevent accidental pushes to merchant nodes."`
RejectHTLC bool `long:"rejecthtlc" description:"If true, lnd will not forward any HTLCs that are meant as onward payments. This option will still allow lnd to send HTLCs and receive HTLCs but lnd won't be used as a hop."`
StaggerInitialReconnect bool `long:"stagger-initial-reconnect" description:"If true, will apply a randomized staggering between 0s and 30s when reconnecting to persistent peers on startup. The first 10 reconnections will be attempted instantly, regardless of the flag's value"`
MaxOutgoingCltvExpiry uint32 `long:"max-cltv-expiry" description:"The maximum number of blocks funds could be locked up for when forwarding payments."`
MaxChannelFeeAllocation float64 `long:"max-channel-fee-allocation" description:"The maximum percentage of total funds that can be allocated to a channel's commitment fee. This only applies for the initiator of the channel. Valid values are within [0.1, 1]."`
DryRunMigration bool `long:"dry-run-migration" description:"If true, lnd will abort committing a migration if it would otherwise have been successful. This leaves the database unmodified, and still compatible with the previously active version of lnd."`
net tor.Net
EnableUpfrontShutdown bool `long:"enable-upfront-shutdown" description:"If true, option upfront shutdown script will be enabled. If peers that we open channels with support this feature, we will automatically set the script to which cooperative closes should be paid out to on channel open. This offers the partial protection of a channel peer disconnecting from us if cooperative close is attempted with a different script."`
AcceptKeySend bool `long:"accept-keysend" description:"If true, spontaneous payments through keysend will be accepted. [experimental]"`
KeysendHoldTime time.Duration `long:"keysend-hold-time" description:"If non-zero, keysend payments are accepted but not immediately settled. If the payment isn't settled manually after the specified time, it is canceled automatically. [experimental]"`
Routing *routing.Conf `group:"routing" namespace:"routing"`
Workers *lncfg.Workers `group:"workers" namespace:"workers"`
Caches *lncfg.Caches `group:"caches" namespace:"caches"`
Prometheus lncfg.Prometheus `group:"prometheus" namespace:"prometheus"`
WtClient *lncfg.WtClient `group:"wtclient" namespace:"wtclient"`
Watchtower *lncfg.Watchtower `group:"watchtower" namespace:"watchtower"`
ProtocolOptions *lncfg.ProtocolOptions `group:"protocol" namespace:"protocol"`
AllowCircularRoute bool `long:"allow-circular-route" description:"If true, our node will allow htlc forwards that arrive and depart on the same channel."`
HealthChecks *lncfg.HealthCheckConfig `group:"healthcheck" namespace:"healthcheck"`
DB *lncfg.DB `group:"db" namespace:"db"`
// LogWriter is the root logger that all of the daemon's subloggers are
// hooked up to.
LogWriter *build.RotatingLogWriter
// registeredChains keeps track of all chains that have been registered
// with the daemon.
registeredChains *chainRegistry
// networkDir is the path to the directory of the currently active
// network. This path will hold the files related to each different
// network.
networkDir string
}
// DefaultConfig returns all default values for the Config struct.
func DefaultConfig() Config {
return Config{
LndDir: DefaultLndDir,
ConfigFile: DefaultConfigFile,
DataDir: defaultDataDir,
DebugLevel: defaultLogLevel,
TLSCertPath: defaultTLSCertPath,
TLSKeyPath: defaultTLSKeyPath,
LogDir: defaultLogDir,
MaxLogFiles: defaultMaxLogFiles,
MaxLogFileSize: defaultMaxLogFileSize,
AcceptorTimeout: defaultAcceptorTimeout,
MinHTLCIn: defaultDecredMinHTLCInMAtoms,
MinHTLCOut: defaultDecredMinHTLCOutMAtoms,
BaseFee: DefaultDecredBaseFeeMAtoms,
FeeRate: DefaultDecredFeeRate,
TimeLockDelta: DefaultDecredTimeLockDelta,
Node: "dcrd",
DcrdMode: &lncfg.DcrdConfig{
Dir: defaultDcrdDir,
RPCHost: defaultRPCHost,
RPCCert: defaultDcrdRPCCertFile,
},
Dcrwallet: &lncfg.DcrwalletConfig{},
UnsafeDisconnect: true,
MaxPendingChannels: lncfg.DefaultMaxPendingChannels,
NoSeedBackup: defaultNoSeedBackup,
MinBackoff: defaultMinBackoff,
MaxBackoff: defaultMaxBackoff,
SubRPCServers: &subRPCServerConfigs{
SignRPC: &signrpc.Config{},
RouterRPC: routerrpc.DefaultConfig(),
},
Autopilot: &lncfg.AutoPilot{
MaxChannels: 5,
Allocation: 0.6,
MinChannelSize: int64(minChanFundingSize),
MaxChannelSize: int64(MaxFundingAmount),
MinConfs: 1,
ConfTarget: autopilot.DefaultConfTarget,
Heuristic: map[string]float64{
"preferential": 1.0,
},
},
PaymentsExpirationGracePeriod: defaultPaymentsExpirationGracePeriod,
TrickleDelay: defaultTrickleDelay,
ChanStatusSampleInterval: defaultChanStatusSampleInterval,
ChanEnableTimeout: defaultChanEnableTimeout,
ChanDisableTimeout: defaultChanDisableTimeout,
HeightHintCacheQueryDisable: defaultHeightHintCacheQueryDisable,
Alias: defaultAlias,
Color: defaultColor,
MinChanSize: int64(minChanFundingSize),
MaxChanSize: int64(0),
DefaultRemoteMaxHtlcs: defaultRemoteMaxHtlcs,
NumGraphSyncPeers: defaultMinPeers,
HistoricalSyncInterval: discovery.DefaultHistoricalSyncInterval,
Tor: &lncfg.Tor{
SOCKS: defaultTorSOCKS,
DNS: defaultTorDNS,
Control: defaultTorControl,
},
net: &tor.ClearNet{},
Workers: &lncfg.Workers{
Read: lncfg.DefaultReadWorkers,
Write: lncfg.DefaultWriteWorkers,
Sig: lncfg.DefaultSigWorkers,
},
Caches: &lncfg.Caches{
RejectCacheSize: channeldb.DefaultRejectCacheSize,
ChannelCacheSize: channeldb.DefaultChannelCacheSize,
},
Prometheus: lncfg.DefaultPrometheus(),
Watchtower: &lncfg.Watchtower{
TowerDir: defaultTowerDir,
},
HealthChecks: &lncfg.HealthCheckConfig{
ChainCheck: &lncfg.CheckConfig{
Interval: defaultChainInterval,
Timeout: defaultChainTimeout,
Attempts: defaultChainAttempts,
Backoff: defaultChainBackoff,
},
DiskCheck: &lncfg.DiskCheckConfig{
RequiredRemaining: defaultRequiredDisk,
CheckConfig: &lncfg.CheckConfig{
Interval: defaultDiskInterval,
Attempts: defaultDiskAttempts,
Timeout: defaultDiskTimeout,
Backoff: defaultDiskBackoff,
},
},
},
MaxOutgoingCltvExpiry: htlcswitch.DefaultMaxOutgoingCltvExpiry,
MaxChannelFeeAllocation: htlcswitch.DefaultMaxLinkFeeAllocation,
LogWriter: build.NewRotatingLogWriter(),
DB: lncfg.DefaultDB(),
registeredChains: newChainRegistry(),
}
}
// LoadConfig initializes and parses the config using a config file and command
// line options.
//
// The configuration proceeds as follows:
// 1) Start with a default config with sane settings
// 2) Pre-parse the command line to check for an alternative config file
// 3) Load configuration file overwriting defaults with any specified options
// 4) Parse CLI options and overwrite/add any specified options
func LoadConfig() (*Config, error) {
// Pre-parse the command line options to pick up an alternative config
// file.
preCfg := DefaultConfig()
if _, err := flags.Parse(&preCfg); err != nil {
return nil, err
}
// Show the version and exit if the version flag was specified.
appName := filepath.Base(os.Args[0])
appName = strings.TrimSuffix(appName, filepath.Ext(appName))
usageMessage := fmt.Sprintf("Use %s -h to show usage", appName)
if preCfg.ShowVersion {
commit := build.SourceCommit()
if commit != "" {
commit = fmt.Sprintf("Commit %s; ", commit)
}
fmt.Printf("%s version %s (%sGo version %s %s/%s)\n",
appName, build.Version(), commit,
runtime.Version(), runtime.GOOS, runtime.GOARCH)
os.Exit(0)
}
// If the config file path has not been modified by the user, then we'll
// use the default config file path. However, if the user has modified
// their lnddir, then we should assume they intend to use the config
// file within it.
configFileDir := CleanAndExpandPath(preCfg.LndDir)
configFilePath := CleanAndExpandPath(preCfg.ConfigFile)
if configFileDir != DefaultLndDir {
if configFilePath == DefaultConfigFile {
configFilePath = filepath.Join(
configFileDir, lncfg.DefaultConfigFilename,
)
}
}
// Next, load any additional configuration options from the file.
var configFileError error
cfg := preCfg
if err := flags.IniParse(configFilePath, &cfg); err != nil {
// If it's a parsing related error, then we'll return
// immediately, otherwise we can proceed as possibly the config
// file doesn't exist which is OK.
if _, ok := err.(*flags.IniError); ok {
return nil, err
}
configFileError = err
}
// Finally, parse the remaining command line options again to ensure
// they take precedence.
if _, err := flags.Parse(&cfg); err != nil {
return nil, err
}
// Make sure everything we just loaded makes sense.
cleanCfg, err := ValidateConfig(cfg, usageMessage)
if err != nil {
return nil, err
}
// Warn about missing config file only after all other configuration is
// done. This prevents the warning on help messages and invalid
// options. Note this should go directly before the return.
if configFileError != nil {
ltndLog.Warnf("%v", configFileError)
}
return cleanCfg, nil
}
// ValidateConfig check the given configuration to be sane. This makes sure no
// illegal values or combination of values are set. All file system paths are
// normalized. The cleaned up config is returned on success.
func ValidateConfig(cfg Config, usageMessage string) (*Config, error) {
// If the provided lnd directory is not the default, we'll modify the
// path to all of the files and directories that will live within it.
lndDir := CleanAndExpandPath(cfg.LndDir)
if lndDir != DefaultLndDir {
cfg.DataDir = filepath.Join(lndDir, defaultDataDirname)
cfg.TLSCertPath = filepath.Join(lndDir, defaultTLSCertFilename)
cfg.TLSKeyPath = filepath.Join(lndDir, defaultTLSKeyFilename)
cfg.LogDir = filepath.Join(lndDir, defaultLogDirname)
// If the watchtower's directory is set to the default, i.e. the
// user has not requested a different location, we'll move the
// location to be relative to the specified lnd directory.
if cfg.Watchtower.TowerDir == defaultTowerDir {
cfg.Watchtower.TowerDir =
filepath.Join(cfg.DataDir, defaultTowerSubDirname)
}
}
// Create the lnd directory if it doesn't already exist.
funcName := "loadConfig"
if err := os.MkdirAll(lndDir, 0700); err != nil {
// Show a nicer error message if it's because a symlink is
// linked to a directory that does not exist (probably because
// it's not mounted).
if e, ok := err.(*os.PathError); ok && os.IsExist(err) {
if link, lerr := os.Readlink(e.Path); lerr == nil {
str := "is symlink %s -> %s mounted?"
err = fmt.Errorf(str, e.Path, link)
}
}
str := "%s: Failed to create lnd directory: %v"
err := fmt.Errorf(str, funcName, err)
_, _ = fmt.Fprintln(os.Stderr, err)
return nil, err
}
// As soon as we're done parsing configuration options, ensure all paths
// to directories and files are cleaned and expanded before attempting
// to use them later on.
cfg.DataDir = CleanAndExpandPath(cfg.DataDir)
cfg.TLSCertPath = CleanAndExpandPath(cfg.TLSCertPath)
cfg.TLSKeyPath = CleanAndExpandPath(cfg.TLSKeyPath)
cfg.AdminMacPath = CleanAndExpandPath(cfg.AdminMacPath)
cfg.ReadMacPath = CleanAndExpandPath(cfg.ReadMacPath)
cfg.InvoiceMacPath = CleanAndExpandPath(cfg.InvoiceMacPath)
cfg.LogDir = CleanAndExpandPath(cfg.LogDir)
cfg.DcrdMode.Dir = CleanAndExpandPath(cfg.DcrdMode.Dir)
cfg.Tor.PrivateKeyPath = CleanAndExpandPath(cfg.Tor.PrivateKeyPath)
cfg.Tor.WatchtowerKeyPath = CleanAndExpandPath(cfg.Tor.WatchtowerKeyPath)
cfg.Watchtower.TowerDir = CleanAndExpandPath(cfg.Watchtower.TowerDir)
cfg.Dcrwallet.CertPath = CleanAndExpandPath(cfg.Dcrwallet.CertPath)
cfg.Dcrwallet.ClientKeyPath = CleanAndExpandPath(cfg.Dcrwallet.ClientKeyPath)
cfg.Dcrwallet.ClientCertPath = CleanAndExpandPath(cfg.Dcrwallet.ClientCertPath)
// Ensure that the user didn't attempt to specify negative values for
// any of the autopilot params.
if cfg.Autopilot.MaxChannels < 0 {
str := "%s: autopilot.maxchannels must be non-negative"
err := fmt.Errorf(str, funcName)
_, _ = fmt.Fprintln(os.Stderr, err)
return nil, err
}
if cfg.Autopilot.Allocation < 0 {
str := "%s: autopilot.allocation must be non-negative"
err := fmt.Errorf(str, funcName)
_, _ = fmt.Fprintln(os.Stderr, err)
return nil, err
}
if cfg.Autopilot.MinChannelSize < 0 {
str := "%s: autopilot.minchansize must be non-negative"
err := fmt.Errorf(str, funcName)
_, _ = fmt.Fprintln(os.Stderr, err)
return nil, err
}
if cfg.Autopilot.MaxChannelSize < 0 {
str := "%s: autopilot.maxchansize must be non-negative"
err := fmt.Errorf(str, funcName)
_, _ = fmt.Fprintln(os.Stderr, err)
return nil, err
}
if cfg.Autopilot.MinConfs < 0 {
str := "%s: autopilot.minconfs must be non-negative"
err := fmt.Errorf(str, funcName)
_, _ = fmt.Fprintln(os.Stderr, err)
return nil, err
}
if cfg.Autopilot.ConfTarget < 1 {
str := "%s: autopilot.conftarget must be positive"
err := fmt.Errorf(str, funcName)
_, _ = fmt.Fprintln(os.Stderr, err)
return nil, err
}
// Ensure that the specified values for the min and max channel size
// are within the bounds of the normal chan size constraints.
if cfg.Autopilot.MinChannelSize < int64(minChanFundingSize) {
cfg.Autopilot.MinChannelSize = int64(minChanFundingSize)
}
if cfg.Autopilot.MaxChannelSize > int64(MaxFundingAmount) {
cfg.Autopilot.MaxChannelSize = int64(MaxFundingAmount)
}
if _, err := validateAtplCfg(cfg.Autopilot); err != nil {
return nil, err
}
// Ensure that --maxchansize is properly handled when set by user. For
// non-Wumbo channels this limit remains 1073741823 atoms by default as
// specified in BOLT-02. For wumbo channels this limit is 500e8 atoms
// (500 DCR). Always enforce --maxchansize explicitly set by user. If
// unset (marked by 0 value), then enforce proper default.
if cfg.MaxChanSize == 0 {
if cfg.ProtocolOptions.Wumbo() {
cfg.MaxChanSize = int64(MaxDecredFundingAmountWumbo)
} else {
cfg.MaxChanSize = int64(MaxDecredFundingAmount)
}
}
// Ensure that the user specified values for the min and max channel
// size make sense.
if cfg.MaxChanSize < cfg.MinChanSize {
return nil, fmt.Errorf("invalid channel size parameters: "+
"max channel size %v, must be no less than min chan size %v",
cfg.MaxChanSize, cfg.MinChanSize,
)
}
// Don't allow superflous --maxchansize greater than
// BOLT 02 soft-limit for non-wumbo channel
if !cfg.ProtocolOptions.Wumbo() && cfg.MaxChanSize > int64(MaxFundingAmount) {
return nil, fmt.Errorf("invalid channel size parameters: "+
"maximum channel size %v is greater than maximum non-wumbo"+
" channel size %v",
cfg.MaxChanSize, MaxFundingAmount,
)
}
// Ensure a valid max channel fee allocation was set.
if cfg.MaxChannelFeeAllocation <= 0 || cfg.MaxChannelFeeAllocation > 1 {
return nil, fmt.Errorf("invalid max channel fee allocation: "+
"%v, must be within (0, 1]",
cfg.MaxChannelFeeAllocation)
}
// Validate the Tor config parameters.
socks, err := lncfg.ParseAddressString(
cfg.Tor.SOCKS, strconv.Itoa(defaultTorSOCKSPort),
cfg.net.ResolveTCPAddr,
)
if err != nil {
return nil, err
}
cfg.Tor.SOCKS = socks.String()
// We'll only attempt to normalize and resolve the DNS host if it hasn't
// changed, as it doesn't need to be done for the default.
if cfg.Tor.DNS != defaultTorDNS {
dns, err := lncfg.ParseAddressString(
cfg.Tor.DNS, strconv.Itoa(defaultTorDNSPort),
cfg.net.ResolveTCPAddr,
)
if err != nil {
return nil, err
}
cfg.Tor.DNS = dns.String()
}
control, err := lncfg.ParseAddressString(
cfg.Tor.Control, strconv.Itoa(defaultTorControlPort),
cfg.net.ResolveTCPAddr,
)
if err != nil {
return nil, err
}
cfg.Tor.Control = control.String()
// Ensure that tor socks host:port is not equal to tor control
// host:port. This would lead to lnd not starting up properly.
if cfg.Tor.SOCKS == cfg.Tor.Control {
str := "%s: tor.socks and tor.control can not use " +
"the same host:port"
return nil, fmt.Errorf(str, funcName)
}
switch {
case cfg.Tor.V2 && cfg.Tor.V3:
return nil, errors.New("either tor.v2 or tor.v3 can be set, " +
"but not both")
case cfg.DisableListen && (cfg.Tor.V2 || cfg.Tor.V3):
return nil, errors.New("listening must be enabled when " +
"enabling inbound connections over Tor")
}
if cfg.Tor.PrivateKeyPath == "" {
switch {
case cfg.Tor.V2:
cfg.Tor.PrivateKeyPath = filepath.Join(
lndDir, defaultTorV2PrivateKeyFilename,
)
case cfg.Tor.V3:
cfg.Tor.PrivateKeyPath = filepath.Join(
lndDir, defaultTorV3PrivateKeyFilename,
)
}
}
if cfg.Tor.WatchtowerKeyPath == "" {
switch {
case cfg.Tor.V2:
cfg.Tor.WatchtowerKeyPath = filepath.Join(
cfg.Watchtower.TowerDir, defaultTorV2PrivateKeyFilename,
)
case cfg.Tor.V3:
cfg.Tor.WatchtowerKeyPath = filepath.Join(
cfg.Watchtower.TowerDir, defaultTorV3PrivateKeyFilename,
)
}
}
// Set up the network-related functions that will be used throughout
// the daemon. We use the standard Go "net" package functions by
// default. If we should be proxying all traffic through Tor, then
// we'll use the Tor proxy specific functions in order to avoid leaking
// our real information.
if cfg.Tor.Active {
cfg.net = &tor.ProxyNet{
SOCKS: cfg.Tor.SOCKS,
DNS: cfg.Tor.DNS,
StreamIsolation: cfg.Tor.StreamIsolation,
}
}
if cfg.DisableListen && cfg.NAT {
return nil, errors.New("NAT traversal cannot be used when " +
"listening is disabled")
}
if cfg.NAT && len(cfg.ExternalHosts) != 0 {
return nil, errors.New("NAT support and externalhosts are " +
"mutually exclusive, only one should be selected")
}
// Multiple networks can't be selected simultaneously. Count number of
// network flags passed; assign active network params while we're at
// it.
numNets := 0
if cfg.TestNet3 {
numNets++
activeNetParams = decredTestNetParams
}
if cfg.RegTest {
numNets++
activeNetParams = regTestNetParams
}
if cfg.SimNet {
numNets++
activeNetParams = decredSimNetParams
}
if numNets > 1 {
str := "%s: The testnet, regtest, and simnet params" +
"can't be used together -- choose one of the three"
err := fmt.Errorf(str, funcName)
return nil, err
}
// We default to mainnet if none are specified.
if numNets == 0 {
activeNetParams = decredMainNetParams
}
if cfg.TimeLockDelta < minTimeLockDelta {
return nil, fmt.Errorf("timelockdelta must be at least %v",
minTimeLockDelta)
}
switch cfg.Node {
case "dcrd":
err := parseRPCParams(
cfg.DcrdMode, decredChain, cfg.SimNet,
cfg.Node, funcName,
)
if err != nil {
err := fmt.Errorf("unable to load RPC "+
"credentials for dcrd: %v", err)
return nil, err
}
case "dcrw":
// In dcrw mode we use the underlying wallet for chain
// operations.
default:
str := "%s: only dcrd and dcrw modes supported for Decred at " +
"this time"
return nil, fmt.Errorf(str, funcName)
}
cfg.ChainDir = filepath.Join(cfg.DataDir,
defaultChainSubDirname,
decredChain.String())
// Ensure sane config when using a remote wallet.
if cfg.Dcrwallet.GRPCHost != "" {
// Update Dcrwallet.GRPCHost with correct port from
// activeNetParam selected.
_, _, err := net.SplitHostPort(cfg.Dcrwallet.GRPCHost)
if err != nil {
cfg.Dcrwallet.GRPCHost = net.JoinHostPort(
cfg.Dcrwallet.GRPCHost,
activeNetParams.dcrwPort,
)
}
// dcrwallet v1.6.0+ requires a client cert and key for gRPC
// auth, so if any of {ClientKeyPath, ClientCertPath} are
// specified, *both* need to be.
//
// Otherwise, the client key and cert will come on a
// WalletUnlocker.Unlock() call.
clientKeyEmpty := cfg.Dcrwallet.ClientKeyPath == ""
clientCertEmpty := cfg.Dcrwallet.ClientCertPath == ""
if clientKeyEmpty != clientCertEmpty {
str := "%s: dcrwallet.clientkeypath and clientcertpath must both be specified"
err := fmt.Errorf(str, funcName)
return nil, err
}
}
// Finally we'll register the decred chain as our current
// primary chain.
cfg.registeredChains.RegisterPrimaryChain(decredChain)
// Ensure that the user didn't attempt to specify negative values for
// any of the autopilot params.
if cfg.Autopilot.MaxChannels < 0 {
str := "%s: autopilot.maxchannels must be non-negative"
err := fmt.Errorf(str, funcName)
_, _ = fmt.Fprintln(os.Stderr, err)
return nil, err
}
if cfg.Autopilot.Allocation < 0 {
str := "%s: autopilot.allocation must be non-negative"
err := fmt.Errorf(str, funcName)
_, _ = fmt.Fprintln(os.Stderr, err)
return nil, err
}
if cfg.Autopilot.MinChannelSize < 0 {
str := "%s: autopilot.minchansize must be non-negative"
err := fmt.Errorf(str, funcName)
_, _ = fmt.Fprintln(os.Stderr, err)
return nil, err
}
if cfg.Autopilot.MaxChannelSize < 0 {
str := "%s: autopilot.maxchansize must be non-negative"
err := fmt.Errorf(str, funcName)
_, _ = fmt.Fprintln(os.Stderr, err)
return nil, err
}
// Ensure that the specified values for the min and max channel size
// don't are within the bounds of the normal chan size constraints.
if cfg.Autopilot.MinChannelSize < int64(minChanFundingSize) {
cfg.Autopilot.MinChannelSize = int64(minChanFundingSize)
}
if cfg.Autopilot.MaxChannelSize > int64(MaxFundingAmount) {
cfg.Autopilot.MaxChannelSize = int64(MaxFundingAmount)
}
// Validate profile port number.
if cfg.Profile != "" {
profilePort, err := strconv.Atoi(cfg.Profile)
if err != nil || profilePort < 1024 || profilePort > 65535 {
str := "%s: The profile port must be between 1024 and 65535"
err := fmt.Errorf(str, funcName)
_, _ = fmt.Fprintln(os.Stderr, err)
_, _ = fmt.Fprintln(os.Stderr, usageMessage)
return nil, err
}
}
// We'll now construct the network directory which will be where we
// store all the data specific to this chain/network.
cfg.networkDir = filepath.Join(
cfg.DataDir, defaultChainSubDirname,
cfg.registeredChains.PrimaryChain().String(),
normalizeNetwork(activeNetParams.Name),
)
// If a custom macaroon directory wasn't specified and the data
// directory has changed from the default path, then we'll also update
// the path for the macaroons to be generated.
if cfg.AdminMacPath == "" {
cfg.AdminMacPath = filepath.Join(
cfg.networkDir, defaultAdminMacFilename,
)
}
if cfg.ReadMacPath == "" {
cfg.ReadMacPath = filepath.Join(
cfg.networkDir, defaultReadMacFilename,
)
}
if cfg.InvoiceMacPath == "" {
cfg.InvoiceMacPath = filepath.Join(
cfg.networkDir, defaultInvoiceMacFilename,
)
}
// Similarly, if a custom back up file path wasn't specified, then
// we'll update the file location to match our set network directory.
if cfg.BackupFilePath == "" {
cfg.BackupFilePath = filepath.Join(
cfg.networkDir, chanbackup.DefaultBackupFileName,
)
}
// Append the network type to the log directory so it is "namespaced"
// per network in the same fashion as the data directory.
cfg.LogDir = filepath.Join(cfg.LogDir,
cfg.registeredChains.PrimaryChain().String(),
normalizeNetwork(activeNetParams.Name))
// A log writer must be passed in, otherwise we can't function and would
// run into a panic later on.
if cfg.LogWriter == nil {
return nil, fmt.Errorf("log writer missing in config")
}
// Special show command to list supported subsystems and exit.
if cfg.DebugLevel == "show" {
fmt.Println("Supported subsystems",
cfg.LogWriter.SupportedSubsystems())
os.Exit(0)
}
// Initialize logging at the default logging level.
SetupLoggers(cfg.LogWriter)
err = cfg.LogWriter.InitLogRotator(
filepath.Join(cfg.LogDir, defaultLogFilename),
cfg.MaxLogFileSize, cfg.MaxLogFiles,
)
if err != nil {
str := "%s: log rotation setup failed: %v"
err = fmt.Errorf(str, funcName, err.Error())
_, _ = fmt.Fprintln(os.Stderr, err)
return nil, err
}
// Parse, validate, and set debug log level(s).
err = build.ParseAndSetDebugLevels(cfg.DebugLevel, cfg.LogWriter)
if err != nil {
err = fmt.Errorf("%s: %v", funcName, err.Error())
_, _ = fmt.Fprintln(os.Stderr, err)
_, _ = fmt.Fprintln(os.Stderr, usageMessage)
return nil, err
}
// At least one RPCListener is required. So listen on localhost per
// default.
if len(cfg.RawRPCListeners) == 0 {
addr := fmt.Sprintf("localhost:%d", defaultRPCPort)
cfg.RawRPCListeners = append(cfg.RawRPCListeners, addr)
}
// Listen on localhost if no REST listeners were specified.
if len(cfg.RawRESTListeners) == 0 {
addr := fmt.Sprintf("localhost:%d", defaultRESTPort)
cfg.RawRESTListeners = append(cfg.RawRESTListeners, addr)
}
// Listen on the default interface/port if no listeners were specified.
// An empty address string means default interface/address, which on
// most unix systems is the same as 0.0.0.0. If Tor is active, we
// default to only listening on localhost for hidden service
// connections.
if len(cfg.RawListeners) == 0 {
addr := fmt.Sprintf(":%d", defaultPeerPort)
if cfg.Tor.Active {
addr = fmt.Sprintf("localhost:%d", defaultPeerPort)
}
cfg.RawListeners = append(cfg.RawListeners, addr)
}
// Add default port to all RPC listener addresses if needed and remove
// duplicate addresses.
cfg.RPCListeners, err = lncfg.NormalizeAddresses(
cfg.RawRPCListeners, strconv.Itoa(defaultRPCPort),
cfg.net.ResolveTCPAddr,
)
if err != nil {
return nil, err
}
// Add default port to all REST listener addresses if needed and remove
// duplicate addresses.
cfg.RESTListeners, err = lncfg.NormalizeAddresses(
cfg.RawRESTListeners, strconv.Itoa(defaultRESTPort),
cfg.net.ResolveTCPAddr,
)
if err != nil {
return nil, err
}
// For each of the RPC listeners (REST+gRPC), we'll ensure that users
// have specified a safe combo for authentication. If not, we'll bail
// out with an error.
err = lncfg.EnforceSafeAuthentication(
cfg.RPCListeners, !cfg.NoMacaroons,
)
if err != nil {
return nil, err
}
if cfg.DisableRest {
ltndLog.Infof("REST API is disabled!")
cfg.RESTListeners = nil
} else {
err = lncfg.EnforceSafeAuthentication(
cfg.RESTListeners, !cfg.NoMacaroons,
)
if err != nil {
return nil, err
}
}
// Remove the listening addresses specified if listening is disabled.
if cfg.DisableListen {
ltndLog.Infof("Listening on the p2p interface is disabled!")
cfg.Listeners = nil
cfg.ExternalIPs = nil
} else {
// Add default port to all listener addresses if needed and remove
// duplicate addresses.
cfg.Listeners, err = lncfg.NormalizeAddresses(
cfg.RawListeners, strconv.Itoa(defaultPeerPort),
cfg.net.ResolveTCPAddr,
)
if err != nil {
return nil, err
}
// Add default port to all external IP addresses if needed and remove
// duplicate addresses.
cfg.ExternalIPs, err = lncfg.NormalizeAddresses(
cfg.RawExternalIPs, strconv.Itoa(defaultPeerPort),
cfg.net.ResolveTCPAddr,
)
if err != nil {
return nil, err
}
// For the p2p port it makes no sense to listen to an Unix socket.
// Also, we would need to refactor the brontide listener to support
// that.
for _, p2pListener := range cfg.Listeners {
if lncfg.IsUnix(p2pListener) {
err := fmt.Errorf("unix socket addresses cannot be "+
"used for the p2p connection listener: %s",
p2pListener)
return nil, err
}
}
}
// Ensure that the specified minimum backoff is below or equal to the
// maximum backoff.
if cfg.MinBackoff > cfg.MaxBackoff {
return nil, fmt.Errorf("maxbackoff must be greater than minbackoff")
}
// Newer versions of lnd added a new sub-config for bolt-specific
// parameters. However we want to also allow existing users to use the
// value on the top-level config. If the outer config value is set,
// then we'll use that directly.
if cfg.SyncFreelist {
cfg.DB.Bolt.SyncFreelist = cfg.SyncFreelist
}
// Ensure that the user hasn't chosen a remote-max-htlc value greater
// than the protocol maximum.
maxRemoteHtlcs := uint16(input.MaxHTLCNumber / 2)
if cfg.DefaultRemoteMaxHtlcs > maxRemoteHtlcs {
return nil, fmt.Errorf("default-remote-max-htlcs (%v) must be "+
"less than %v", cfg.DefaultRemoteMaxHtlcs,
maxRemoteHtlcs)
}
// Validate the subconfigs for workers, caches, and the tower client.
err = lncfg.Validate(
cfg.Workers,
cfg.Caches,
cfg.WtClient,
cfg.DB,
cfg.HealthChecks,
)
if err != nil {
return nil, err
}
// Finally, ensure that the user's color is correctly formatted,
// otherwise the server will not be able to start after the unlocking
// the wallet.
_, err = parseHexColor(cfg.Color)
if err != nil {
return nil, fmt.Errorf("unable to parse node color: %v", err)
}
// All good, return the sanitized result.
return &cfg, err
}
// localDatabaseDir returns the default directory where the
// local bolt db files are stored.
func (c *Config) localDatabaseDir() string {
return filepath.Join(c.DataDir,
defaultGraphSubDirname,
normalizeNetwork(activeNetParams.Name))
}
func (c *Config) networkName() string {
return normalizeNetwork(activeNetParams.Name)
}
// CleanAndExpandPath expands environment variables and leading ~ in the
// passed path, cleans the result, and returns it.
// This function is taken from https://github.com/decred/dcrd
func CleanAndExpandPath(path string) string {
if path == "" {
return ""
}
// Expand initial ~ to OS specific home directory.
if strings.HasPrefix(path, "~") {
var homeDir string
u, err := user.Current()
if err == nil {
homeDir = u.HomeDir
} else {
homeDir = os.Getenv("HOME")
}
path = strings.Replace(path, "~", homeDir, 1)
}
// NOTE: The os.ExpandEnv doesn't work with Windows-style %VARIABLE%,
// but the variables can still be expanded via POSIX-style $VARIABLE.
return filepath.Clean(os.ExpandEnv(path))
}
func parseRPCParams(nodeConfig interface{}, net chainCode,
simnet bool, flagNode string, funcName string) error {
// First, we'll check our node config to make sure the RPC parameters
// were set correctly. We'll also determine the path to the conf file
// depending on the backend node.
var daemonName, confDir, confFile string
switch conf := nodeConfig.(type) {
case *lncfg.DcrdConfig:
// If both RPCUser and RPCPass are set, we assume those
// credentials are good to use.
if conf.RPCUser != "" && conf.RPCPass != "" {
return nil
}
// Get the daemon name for displaying proper errors.
switch net {
case decredChain:
daemonName = "dcrd"
confDir = conf.Dir
confFile = "dcrd"
}
// If only ONE of RPCUser or RPCPass is set, we assume the
// user did that unintentionally.
if conf.RPCUser != "" || conf.RPCPass != "" {
return fmt.Errorf("please set both or neither of "+
"%[1]v.rpcuser, %[1]v.rpcpass", daemonName)
}
}
// If we're in simnet mode, then the running dcrd instance won't read
// the RPC credentials from the configuration. So if lnd wasn't
// specified the parameters, then we won't be able to start.
if simnet {
str := "%v: rpcuser and rpcpass must be set to your dcrd " +
"node's RPC parameters for simnet mode"
return fmt.Errorf(str, funcName)
}
fmt.Println("Attempting automatic RPC configuration to " + daemonName)
confFile = filepath.Join(confDir, fmt.Sprintf("%v.conf", confFile))
switch flagNode {
case "dcrd":
nConf := nodeConfig.(*lncfg.DcrdConfig)
rpcUser, rpcPass, err := extractDcrdRPCParams(confFile)
if err != nil {
return fmt.Errorf("unable to extract RPC credentials:"+
" %v, cannot start w/o RPC connection",
err)
}
nConf.RPCUser, nConf.RPCPass = rpcUser, rpcPass
}
fmt.Printf("Automatically obtained %v's RPC credentials\n", daemonName)
return nil
}
// extractDcrdRPCParams attempts to extract the RPC credentials for an existing
// dcrd instance. The passed path is expected to be the location of dcrd's
// application data directory on the target system.
func extractDcrdRPCParams(dcrdConfigPath string) (string, string, error) {
// First, we'll open up the dcrd configuration file found at the target
// destination.
dcrdConfigFile, err := os.Open(dcrdConfigPath)
if err != nil {
return "", "", err
}
defer dcrdConfigFile.Close()
// With the file open extract the contents of the configuration file so
// we can attempt to locate the RPC credentials.
configContents, err := ioutil.ReadAll(dcrdConfigFile)
if err != nil {
return "", "", err
}
// Attempt to locate the RPC user using a regular expression. If we
// don't have a match for our regular expression then we'll exit with
// an error.
rpcUserRegexp, err := regexp.Compile(`(?m)^\s*rpcuser\s*=\s*([^\s]+)`)
if err != nil {
return "", "", err
}
userSubmatches := rpcUserRegexp.FindSubmatch(configContents)
if userSubmatches == nil {
return "", "", fmt.Errorf("unable to find rpcuser in config")
}
// Similarly, we'll use another regular expression to find the set
// rpcpass (if any). If we can't find the pass, then we'll exit with an
// error.
rpcPassRegexp, err := regexp.Compile(`(?m)^\s*rpcpass\s*=\s*([^\s]+)`)
if err != nil {
return "", "", err
}
passSubmatches := rpcPassRegexp.FindSubmatch(configContents)
if passSubmatches == nil {
return "", "", fmt.Errorf("unable to find rpcuser in config")
}
return string(userSubmatches[1]), string(passSubmatches[1]), nil
}
// normalizeNetwork returns the common name of a network type used to create
// file paths. This allows differently versioned networks to use the same path.
func normalizeNetwork(network string) string {
if strings.HasPrefix(network, "testnet") {
return "testnet"
}
return network
}
| [
"\"HOME\""
] | [] | [
"HOME"
] | [] | ["HOME"] | go | 1 | 0 |