filename
stringlengths
4
198
content
stringlengths
25
939k
environment
sequence
variablearg
sequence
constarg
sequence
variableargjson
stringclasses
1 value
constargjson
stringlengths
2
3.9k
lang
stringclasses
3 values
constargcount
float64
0
129
variableargcount
float64
0
0
sentence
stringclasses
1 value
gds_metrics/__init__.py
import gzip import hmac import json import os from time import monotonic from flask import abort, g, request, Response from flask.signals import got_request_exception, request_finished # set multiprocess temp directory before we import prometheus_client os.environ.setdefault('prometheus_multiproc_dir', '/tmp') # noqa import prometheus_client from prometheus_client import multiprocess, CollectorRegistry from .metrics import ( # noqa proxy metric types imports Counter, Gauge, Summary, Histogram, HTTP_SERVER_EXCEPTIONS_TOTAL, HTTP_SERVER_REQUEST_DURATION_SECONDS, HTTP_SERVER_REQUESTS_TOTAL, ) class GDSMetrics(object): def __init__(self): self.metrics_path = os.environ.get('PROMETHEUS_METRICS_PATH', '/metrics') if os.environ.get("METRICS_BASIC_AUTH", "true") == "true": self.auth_token = json.loads(os.environ.get("VCAP_APPLICATION", "{}")).get("application_id") else: self.auth_token = False self.registry = CollectorRegistry() multiprocess.MultiProcessCollector(self.registry) def init_app(self, app): app.add_url_rule(self.metrics_path, 'metrics', self.metrics_endpoint) app.before_request(self.before_request) request_finished.connect(self.teardown_request, sender=app) got_request_exception.connect(self.handle_exception, sender=app) def metrics_endpoint(self): if self.auth_token: auth_header = request.headers.get('Authorization', '') if not auth_header: abort(401) elif not hmac.compare_digest(auth_header, 'Bearer {}'.format(self.auth_token)): abort(403) response = Response( prometheus_client.generate_latest(self.registry), mimetype='text/plain; version=0.0.4; charset=utf-8', headers={ 'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate', } ) accept_encoding = request.headers.get('Accept-Encoding', '') if 'gzip' not in accept_encoding.lower(): return response response.data = gzip.compress(response.data) response.headers['Content-Encoding'] = 'gzip' response.headers['Content-Length'] = len(response.data) response.headers['Vary'] = 'Accept-Encoding' return response def before_request(self, *args, **kwargs): g._gds_metrics_start_time = monotonic() def teardown_request(self, sender, response, *args, **kwargs): if hasattr(g, "_gds_metrics_start_time"): # There is not a guarantee that `g._gds_metrics_start_time` will exist # as our `before_request` function will not run if the flask app has # another `before_request` that runs before ours and throws an exception resp_time = monotonic() - g._gds_metrics_start_time HTTP_SERVER_REQUEST_DURATION_SECONDS.labels( request.method, request.host, request.url_rule.rule if request.url_rule else 'No endpoint', response.status_code ).observe(resp_time) HTTP_SERVER_REQUESTS_TOTAL.labels( request.method, request.host, request.url_rule.rule if request.url_rule else 'No endpoint', response.status_code ).inc() return response def handle_exception(self, sender, exception, *args, **kwargs): HTTP_SERVER_EXCEPTIONS_TOTAL.labels(type(exception)).inc()
[]
[]
[ "VCAP_APPLICATION", "METRICS_BASIC_AUTH", "PROMETHEUS_METRICS_PATH" ]
[]
["VCAP_APPLICATION", "METRICS_BASIC_AUTH", "PROMETHEUS_METRICS_PATH"]
python
3
0
scripts/generate_postgres_toml.py
#!/usr/bin/python import os REPO = [ 'aws_app_rust', 'calendar_app_rust', 'diary_app_rust', 'garmin_rust', 'movie_collection_rust', 'podcatch_rust', 'security_log_analysis_rust', 'sync_app_rust', ] HOME = os.environ['HOME'] def generate_postgres_toml(): for repo in REPO: config = f'{HOME}/.config/{repo}/config.env' backup_db = f'{HOME}/setup_files/build/{repo}/scripts/backup_db.sh' restore_db = f'{HOME}/setup_files/build/{repo}/scripts/restore_db.sh' db_url = None bucket = None tables = None sequences = {} if os.path.exists(config) and os.path.exists(backup_db): config = open(config, 'r') for line in config: if 'DATABASE_URL' in line or 'PGURL' in line: db_url = line.split('=')[1].strip() for line in open(backup_db, 'r'): if line.startswith('BUCKET'): bucket=line.split('=')[1].strip().replace('"', '') if line.startswith('TABLES'): tables = [] elif isinstance(tables, list): if line.strip() == '"': break else: tables.append(line.strip().replace('"', '').replace("'", '')) if os.path.exists(restore_db): for line in open(restore_db): if 'setval' in line: seq = None max = None table = None for word in line.split(' '): if 'setval' in word: seq = word.replace('setval(', '').replace("'", '').replace(',', '').replace('"', '') elif 'max' in word: max = word.strip().replace('max(', '').replace(')', '') elif seq is not None and max is not None and table is None and word.lower() != 'from': table = word.strip().replace(')', '').replace(',', '').replace('"', '') sequences[seq] = (table, max) print(f'[{repo}]') print(f"database_url = '{db_url}'") # print(f"destination = 's3://{bucket}'") print(f"destination = 'file://{HOME}/setup_files/build/{repo}/backup'") print(f"tables = {tables}") if sequences: sequences = '{' + ', '.join(f"{k}=['{t}', '{v}']" for (k, (t, v)) in sequences.items()) + '}' print(f"sequences = {sequences}") print("") def prettify_toml(): import toml generate_postgres_toml()
[]
[]
[ "HOME" ]
[]
["HOME"]
python
1
0
test/extended/util/azure/config_file.go
package azure import ( "context" "encoding/json" "errors" "io/ioutil" "os" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" e2e "k8s.io/kubernetes/test/e2e/framework" "k8s.io/legacy-cloud-providers/azure" "sigs.k8s.io/yaml" ) // LoadConfigFile uses the cluster to fetch the cloud provider config from `openshift-config/cloud-provider-config` config map's config key. // It then uses the `AZURE_AUTH_LOCATION` to load the credentials for Azure API and update the cloud provider config with the client secret. In-cluster cloud provider config // uses Azure Managed Identity attached to virtual machines to provide Azure API access, while the e2e tests are usually run from outside the cluster and therefore need explicit auth creds. func LoadConfigFile() ([]byte, error) { // LoadClientset but don't set the UserAgent to include the current test name because // we don't run any test yet and this call panics client, err := e2e.LoadClientset(true) if err != nil { return nil, err } config, err := cloudProviderConfigFromCluster(client.CoreV1()) if err != nil { return nil, err } settings, err := getAuthFile() if err != nil { return nil, err } config.AADClientID = settings.ClientID config.AADClientSecret = settings.ClientSecret config.UseManagedIdentityExtension = false config.UseInstanceMetadata = false data, err := yaml.Marshal(config) if err != nil { return nil, err } return data, nil } func cloudProviderConfigFromCluster(client clientcorev1.ConfigMapsGetter) (*azure.Config, error) { cm, err := client.ConfigMaps("openshift-config").Get(context.Background(), "cloud-provider-config", metav1.GetOptions{}) if err != nil { return nil, err } data, ok := cm.Data["config"] if !ok { return nil, errors.New("No cloud provider config was set in openshift-config/cloud-provider-config") } config := &azure.Config{} if err := yaml.Unmarshal([]byte(data), config); err != nil { return nil, err } return config, nil } // loads the auth file using the file provided by AZURE_AUTH_LOCATION // this mimics the function https://godoc.org/github.com/Azure/go-autorest/autorest/azure/auth#GetSettingsFromFile which is not currently available with vendor Azure SDK. func getAuthFile() (*file, error) { fileLocation := os.Getenv("AZURE_AUTH_LOCATION") if fileLocation == "" { return nil, errors.New("environment variable AZURE_AUTH_LOCATION is not set") } contents, err := ioutil.ReadFile(fileLocation) if err != nil { return nil, err } authFile := file{} err = json.Unmarshal(contents, &authFile) if err != nil { return nil, err } return &authFile, nil } // File represents the authentication file type file struct { ClientID string `json:"clientId,omitempty"` ClientSecret string `json:"clientSecret,omitempty"` SubscriptionID string `json:"subscriptionId,omitempty"` TenantID string `json:"tenantId,omitempty"` ActiveDirectoryEndpoint string `json:"activeDirectoryEndpointUrl,omitempty"` ResourceManagerEndpoint string `json:"resourceManagerEndpointUrl,omitempty"` GraphResourceID string `json:"activeDirectoryGraphResourceId,omitempty"` SQLManagementEndpoint string `json:"sqlManagementEndpointUrl,omitempty"` GalleryEndpoint string `json:"galleryEndpointUrl,omitempty"` ManagementEndpoint string `json:"managementEndpointUrl,omitempty"` }
[ "\"AZURE_AUTH_LOCATION\"" ]
[]
[ "AZURE_AUTH_LOCATION" ]
[]
["AZURE_AUTH_LOCATION"]
go
1
0
cherrytree/github_utils.py
import os import re from collections import OrderedDict from typing import Generator, List, Optional, Reversible import delegator from git import Commit from git.exc import InvalidGitRepositoryError from git.repo import Repo from github import Github from github.Label import Label from github.Issue import Issue from github.GithubException import UnknownObjectException from github.Repository import Repository from cherrytree.classes import CherryTreeExecutionException # PRs are either of form "Merge pull request #nnn from..." or "...(#nnn)" PR_REGEX = re.compile(r"(^Merge pull request #(\d+) from|\(#(\d+)\)$)") def get_github_instance() -> Github: token = os.environ.get("GITHUB_TOKEN") if not token: raise Exception("Env var 'GITHUB_TOKEN' is missing") return Github(token) def get_repo(repo: str) -> Repository: g = get_github_instance() return g.get_repo(repo) def get_issues_from_labels(repo: str, label: str, prs_only: bool = False) -> List[Issue]: label_objects: List[Label] = [] gh_repo = get_repo(repo) try: label_objects.append(gh_repo.get_label(label)) except UnknownObjectException: # unknown label return [] issues = gh_repo.get_issues(labels=label_objects, state="all") if prs_only: return [o for o in issues if o.pull_request] return [o for o in issues] def get_issue(repo: str, id_: int) -> Optional[Issue]: gh_repo = get_repo(repo) try: return gh_repo.get_issue(id_) except UnknownObjectException: # unknown id return None def get_commits(repo: str, branch: str, since=None): """Get commit objects from a branch, over a limited period""" gh_repo = get_repo(repo) branch_object = gh_repo.get_branch(branch) sha = branch_object.commit.sha if since: commits = gh_repo.get_commits(sha=sha, since=since) else: commits = gh_repo.get_commits(sha=sha) return commits def commit_pr_number(commit: Commit) -> Optional[int]: """Given a commit object, returns the PR number""" res = PR_REGEX.search(commit.summary) if res: groups = res.groups() return int(groups[1] or groups[2]) return None def get_commit_pr_map(commits: Reversible[Commit]): """Given a list of commits and prs, returns a map of pr_number to commit""" d = OrderedDict() for commit in reversed(commits): pr_number = commit_pr_number(commit) if pr_number: d[pr_number] = commit return d def truncate_str(value: str, width: int = 90) -> str: cont_str = "..." trunc_value = value[: width - len(cont_str)].strip() if len(trunc_value) < len(value.strip()): trunc_value = f"{trunc_value}{cont_str}" return f"{trunc_value:<{width}}" def git_get_current_head() -> str: output = os_system("git status | head -1") match = re.match("(?:HEAD detached at|On branch) (.*)", output) if not match: return "" return match.group(1) def os_system(cmd, raise_on_error=True) -> str: p = delegator.run(cmd) if raise_on_error and p.return_code != 0: raise CherryTreeExecutionException(p.err) return p.out def check_if_branch_exists(branch: str) -> bool: current_head = git_get_current_head() try: os_system(f"git checkout {branch}") except CherryTreeExecutionException: return False os_system(f"git checkout {current_head}") return True def deduplicate_prs(prs: List[Issue]) -> List[Issue]: pr_set = set() ret: List[Issue] = [] for pr in prs: if pr.number not in pr_set: ret.append(pr) pr_set.add(pr.number) return ret def get_git_repo() -> Repo: """ Find the path containing the git repo. Start by checking the current working directory, and proceed up the directory tree if a git repo can't be found. returns: Paath to closest git repo raises FileNotFoundError: if no git repo is found in the current path """ def _traverse_dirs(path: str) -> Generator[str, None, None]: # first yield the current directory yield path # then start yielding parents until we reach the root while True: parent = os.path.dirname(path) if path != parent: yield parent path = parent else: break cwd = os.getcwd() for dir_ in _traverse_dirs(cwd): try: repo = Repo(dir_) return repo except InvalidGitRepositoryError: pass raise FileNotFoundError("No git repo found in path: {}". format(cwd))
[]
[]
[ "GITHUB_TOKEN" ]
[]
["GITHUB_TOKEN"]
python
1
0
vendor/github.com/containers/common/pkg/auth/auth.go
package auth import ( "bufio" "context" "fmt" "os" "path/filepath" "strings" "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/pkg/docker/config" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" "github.com/pkg/errors" "github.com/sirupsen/logrus" terminal "golang.org/x/term" ) // GetDefaultAuthFile returns env value REGISTRY_AUTH_FILE as default // --authfile path used in multiple --authfile flag definitions // Will fail over to DOCKER_CONFIG if REGISTRY_AUTH_FILE environment is not set func GetDefaultAuthFile() string { if authfile := os.Getenv("REGISTRY_AUTH_FILE"); authfile != "" { return authfile } if auth_env := os.Getenv("DOCKER_CONFIG"); auth_env != "" { return filepath.Join(auth_env, "config.json") } return "" } // CheckAuthFile validates filepath given by --authfile // used by command has --authfile flag func CheckAuthFile(authfile string) error { if authfile == "" { return nil } if _, err := os.Stat(authfile); err != nil { return errors.Wrap(err, "checking authfile") } return nil } // systemContextWithOptions returns a version of sys // updated with authFile and certDir values (if they are not ""). // NOTE: this is a shallow copy that can be used and updated, but may share // data with the original parameter. func systemContextWithOptions(sys *types.SystemContext, authFile, certDir string) *types.SystemContext { if sys != nil { sysCopy := *sys sys = &sysCopy } else { sys = &types.SystemContext{} } if authFile != "" { sys.AuthFilePath = authFile } if certDir != "" { sys.DockerCertPath = certDir } return sys } // Login implements a “log in” command with the provided opts and args // reading the password from opts.Stdin or the options in opts. func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginOptions, args []string) error { systemContext = systemContextWithOptions(systemContext, opts.AuthFile, opts.CertDir) var ( key, registry string err error ) switch len(args) { case 0: if !opts.AcceptUnspecifiedRegistry { return errors.New("please provide a registry to login to") } if key, err = defaultRegistryWhenUnspecified(systemContext); err != nil { return err } registry = key logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", key) case 1: key, registry, err = parseCredentialsKey(args[0], opts.AcceptRepositories) if err != nil { return err } default: return errors.New("login accepts only one registry to login to") } authConfig, err := config.GetCredentials(systemContext, key) if err != nil { return errors.Wrap(err, "get credentials") } if opts.GetLoginSet { if authConfig.Username == "" { return errors.Errorf("not logged into %s", key) } fmt.Fprintf(opts.Stdout, "%s\n", authConfig.Username) return nil } if authConfig.IdentityToken != "" { return errors.New("currently logged in, auth file contains an Identity token") } password := opts.Password if opts.StdinPassword { var stdinPasswordStrBuilder strings.Builder if opts.Password != "" { return errors.New("Can't specify both --password-stdin and --password") } if opts.Username == "" { return errors.New("Must provide --username with --password-stdin") } scanner := bufio.NewScanner(opts.Stdin) for scanner.Scan() { fmt.Fprint(&stdinPasswordStrBuilder, scanner.Text()) } password = stdinPasswordStrBuilder.String() } // If no username and no password is specified, try to use existing ones. if opts.Username == "" && password == "" && authConfig.Username != "" && authConfig.Password != "" { fmt.Fprintf(opts.Stdout, "Authenticating with existing credentials for %s\n", key) if err := docker.CheckAuth(ctx, systemContext, authConfig.Username, authConfig.Password, registry); err == nil { fmt.Fprintf(opts.Stdout, "Existing credentials are valid. Already logged in to %s\n", registry) return nil } fmt.Fprintln(opts.Stdout, "Existing credentials are invalid, please enter valid username and password") } username, password, err := getUserAndPass(opts, password, authConfig.Username) if err != nil { return errors.Wrap(err, "getting username and password") } if err = docker.CheckAuth(ctx, systemContext, username, password, registry); err == nil { // Write the new credentials to the authfile desc, err := config.SetCredentials(systemContext, key, username, password) if err != nil { return err } if opts.Verbose { fmt.Fprintln(opts.Stdout, "Used: ", desc) } } if err == nil { fmt.Fprintln(opts.Stdout, "Login Succeeded!") return nil } if unauthorized, ok := err.(docker.ErrUnauthorizedForCredentials); ok { logrus.Debugf("error logging into %q: %v", key, unauthorized) return errors.Errorf("error logging into %q: invalid username/password", key) } return errors.Wrapf(err, "authenticating creds for %q", key) } // parseCredentialsKey turns the provided argument into a valid credential key // and computes the registry part. func parseCredentialsKey(arg string, acceptRepositories bool) (key, registry string, err error) { if !acceptRepositories { registry = getRegistryName(arg) key = registry return key, registry, nil } key = trimScheme(arg) if key != arg { return "", "", errors.New("credentials key has https[s]:// prefix") } registry = getRegistryName(key) if registry == key { // The key is not namespaced return key, registry, nil } // Sanity-check that the key looks reasonable (e.g. doesn't use invalid characters), // and does not contain a tag or digest. // WARNING: ref.Named() MUST NOT be used to compute key, because // reference.ParseNormalizedNamed() turns docker.io/vendor to docker.io/library/vendor // Ideally c/image should provide dedicated validation functionality. ref, err := reference.ParseNormalizedNamed(key) if err != nil { return "", "", errors.Wrapf(err, "parse reference from %q", key) } if !reference.IsNameOnly(ref) { return "", "", errors.Errorf("reference %q contains tag or digest", ref.String()) } refRegistry := reference.Domain(ref) if refRegistry != registry { // This should never happen, check just to make sure return "", "", fmt.Errorf("internal error: key %q registry mismatch, %q vs. %q", key, ref, refRegistry) } return key, registry, nil } // getRegistryName scrubs and parses the input to get the server name func getRegistryName(server string) string { // removes 'http://' or 'https://' from the front of the // server/registry string if either is there. This will be mostly used // for user input from 'Buildah login' and 'Buildah logout'. server = trimScheme(server) // gets the registry from the input. If the input is of the form // quay.io/myuser/myimage, it will parse it and just return quay.io split := strings.Split(server, "/") return split[0] } // trimScheme removes the HTTP(s) scheme from the provided repository. func trimScheme(repository string) string { // removes 'http://' or 'https://' from the front of the // server/registry string if either is there. This will be mostly used // for user input from 'Buildah login' and 'Buildah logout'. return strings.TrimPrefix(strings.TrimPrefix(repository, "https://"), "http://") } // getUserAndPass gets the username and password from STDIN if not given // using the -u and -p flags. If the username prompt is left empty, the // displayed userFromAuthFile will be used instead. func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user, pass string, err error) { reader := bufio.NewReader(opts.Stdin) username := opts.Username if username == "" { if userFromAuthFile != "" { fmt.Fprintf(opts.Stdout, "Username (%s): ", userFromAuthFile) } else { fmt.Fprint(opts.Stdout, "Username: ") } username, err = reader.ReadString('\n') if err != nil { return "", "", errors.Wrap(err, "reading username") } // If the user just hit enter, use the displayed user from the // the authentication file. This allows to do a lazy // `$ buildah login -p $NEW_PASSWORD` without specifying the // user. if strings.TrimSpace(username) == "" { username = userFromAuthFile } } if password == "" { fmt.Fprint(opts.Stdout, "Password: ") pass, err := terminal.ReadPassword(0) if err != nil { return "", "", errors.Wrap(err, "reading password") } password = string(pass) fmt.Fprintln(opts.Stdout) } return strings.TrimSpace(username), password, err } // Logout implements a “log out” command with the provided opts and args func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []string) error { if err := CheckAuthFile(opts.AuthFile); err != nil { return err } systemContext = systemContextWithOptions(systemContext, opts.AuthFile, "") if opts.All { if len(args) != 0 { return errors.New("--all takes no arguments") } if err := config.RemoveAllAuthentication(systemContext); err != nil { return err } fmt.Fprintln(opts.Stdout, "Removed login credentials for all registries") return nil } var ( key, registry string err error ) switch len(args) { case 0: if !opts.AcceptUnspecifiedRegistry { return errors.New("please provide a registry to logout from") } if key, err = defaultRegistryWhenUnspecified(systemContext); err != nil { return err } registry = key logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", key) case 1: key, registry, err = parseCredentialsKey(args[0], opts.AcceptRepositories) if err != nil { return err } default: return errors.New("logout accepts only one registry to logout from") } err = config.RemoveAuthentication(systemContext, key) switch errors.Cause(err) { case nil: fmt.Fprintf(opts.Stdout, "Removed login credentials for %s\n", key) return nil case config.ErrNotLoggedIn: authConfig, err := config.GetCredentials(systemContext, key) if err != nil { return errors.Wrap(err, "get credentials") } authInvalid := docker.CheckAuth(context.Background(), systemContext, authConfig.Username, authConfig.Password, registry) if authConfig.Username != "" && authConfig.Password != "" && authInvalid == nil { fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", key) return nil } return errors.Errorf("Not logged into %s\n", key) default: return errors.Wrapf(err, "logging out of %q", key) } } // defaultRegistryWhenUnspecified returns first registry from search list of registry.conf // used by login/logout when registry argument is not specified func defaultRegistryWhenUnspecified(systemContext *types.SystemContext) (string, error) { registriesFromFile, err := sysregistriesv2.UnqualifiedSearchRegistries(systemContext) if err != nil { return "", errors.Wrap(err, "getting registry from registry.conf, please specify a registry") } if len(registriesFromFile) == 0 { return "", errors.New("no registries found in registries.conf, a registry must be provided") } return registriesFromFile[0], nil }
[ "\"REGISTRY_AUTH_FILE\"", "\"DOCKER_CONFIG\"" ]
[]
[ "DOCKER_CONFIG", "REGISTRY_AUTH_FILE" ]
[]
["DOCKER_CONFIG", "REGISTRY_AUTH_FILE"]
go
2
0
apps/phaseofmoon/phaseofmoon.go
// Package phaseofmoon provides details for the Phase Of Moon applet. package phaseofmoon import ( _ "embed" "tidbyt.dev/community/apps/manifest" ) //go:embed phase_of_moon.star var source []byte // New creates a new instance of the Phase Of Moon applet. func New() manifest.Manifest { return manifest.Manifest{ ID: "phase-of-moon", Name: "Phase Of Moon", Author: "Alan Fleming", Summary: "Shows the phase of the moon", Desc: "Shows the current phase of the moon.", FileName: "phase_of_moon.star", PackageName: "phaseofmoon", Source: source, } }
[]
[]
[]
[]
[]
go
null
null
null
internal/projector/datastore/fetch_test.go
package datastore_test import ( "context" "encoding/json" "errors" "fmt" "testing" "github.com/OpenSlides/openslides-autoupdate-service/internal/projector/datastore" "github.com/OpenSlides/openslides-autoupdate-service/pkg/dsmock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestFetchObject(t *testing.T) { recorder := datastore.NewRecorder(dsmock.Stub(map[string][]byte{ "testmodel/1/id": []byte("1"), "testmodel/1/number": []byte("456"), "testmodel/1/text": []byte(`"my text"`), "testmodel/1/friend_ids": []byte("[1,2,3]"), })) fetch := datastore.NewFetcher(recorder) object := fetch.Object(context.Background(), "testmodel/1", "number", "text", "friend_ids") require.NoError(t, fetch.Err(), "Get returned unexpected error") assert.Equal(t, json.RawMessage([]byte("456")), object["number"]) assert.Equal(t, json.RawMessage([]byte(`"my text"`)), object["text"]) assert.Equal(t, json.RawMessage([]byte("[1,2,3]")), object["friend_ids"]) assert.ElementsMatch(t, []string{"testmodel/1/id", "testmodel/1/number", "testmodel/1/text", "testmodel/1/friend_ids"}, toSlice(recorder.Keys())) } func toSlice(in map[string]bool) []string { out := make([]string, 0, len(in)) for k := range in { out = append(out, k) } return out } func TestFetchObjectOnError(t *testing.T) { shutdownCtx, cancel := context.WithCancel(context.Background()) defer cancel() ds := dsmock.NewMockDatastore(shutdownCtx.Done(), map[string][]byte{ "testmodel/1/id": []byte("1"), "testmodel/1/number": []byte("456"), "testmodel/1/text": []byte(`"my text"`), "testmodel/1/friend_ids": []byte("[1,2,3]"), }) ds.InjectError(errors.New("some error")) recorder := datastore.NewRecorder(ds) fetch := datastore.NewFetcher(recorder) fetch.Object(context.Background(), "testmodel/1", "number", "text", "friend_ids") if err := fetch.Err(); err == nil { t.Fatalf("Object did not return an error") } keysEqual(t, recorder.Keys(), "testmodel/1/id", "testmodel/1/number", "testmodel/1/text", "testmodel/1/friend_ids") } func TestFetchObjectDoesNotExist(t *testing.T) { shutdownCtx, cancel := context.WithCancel(context.Background()) defer cancel() fetch := datastore.NewFetcher(dsmock.NewMockDatastore(shutdownCtx.Done(), map[string][]byte{})) fetch.Object(context.Background(), "testmodel/1", "text") var errDoesNotExist datastore.DoesNotExistError if err := fetch.Err(); !errors.As(err, &errDoesNotExist) { t.Errorf("fetch.Object returned error %v, expected a does not exist error", err) } } func TestFetchValue(t *testing.T) { recorder := datastore.NewRecorder(dsmock.Stub(map[string][]byte{ "testmodel/1/text": []byte(`"my text"`), })) fetch := datastore.NewFetcher(recorder) var value string fetch.Fetch(context.Background(), &value, "testmodel/1/text") if err := fetch.Err(); err != nil { t.Fatalf("Fetch() returned error: %v", err) } expect := "my text" if value != expect { t.Errorf("Fetch() fetched value %q, expected %q", value, expect) } keysEqual(t, recorder.Keys(), "testmodel/1/text") } func TestFetchValueDoesNotExist(t *testing.T) { recorder := datastore.NewRecorder(dsmock.Stub{}) fetch := datastore.NewFetcher(recorder) var value string fetch.Fetch(context.Background(), &value, "testmodel/1/text") if err := fetch.Err(); err != nil { t.Errorf("Fetch returned unexpected error %v", err) } keysEqual(t, recorder.Keys(), "testmodel/1/text") if value != "" { t.Errorf("Fetch of unexpected key returned %q, expected am empty string", value) } } func TestFetchValueAfterError(t *testing.T) { shutdownCtx, cancel := context.WithCancel(context.Background()) defer cancel() ds := dsmock.NewMockDatastore(shutdownCtx.Done(), map[string][]byte{ "testmodel/1/text": []byte(`"my text"`), }) recorder := datastore.NewRecorder(ds) fetch := datastore.NewFetcher(recorder) myerr := errors.New("some error") ds.InjectError(myerr) var errorValue string fetch.Fetch(context.Background(), &errorValue, "testmodel/1/error_value") ds.InjectError(nil) var value string fetch.Fetch(context.Background(), &value, "testmodel/1/text") if err := fetch.Err(); !errors.Is(err, myerr) { t.Errorf("Fetch returned error %q, expected %q", fetch.Err(), myerr) } if value != "" { t.Errorf("Fetch set value after an error to %q", value) } keysEqual(t, recorder.Keys(), "testmodel/1/error_value") } func TestFetchIfExist(t *testing.T) { recorder := datastore.NewRecorder(dsmock.Stub(map[string][]byte{ "testmodel/1/id": []byte("1"), })) fetch := datastore.NewFetcher(recorder) var value string fetch.FetchIfExist(context.Background(), &value, "testmodel/1/text") if err := fetch.Err(); err != nil { t.Errorf("Fetch returned error: %v", err) } keysEqual(t, recorder.Keys(), "testmodel/1/id", "testmodel/1/text") } func TestFetchIfExistObjectDoesNotExist(t *testing.T) { recorder := datastore.NewRecorder(dsmock.Stub(map[string][]byte{ "testmodel/1/text": []byte(`"some test"`), })) fetch := datastore.NewFetcher(recorder) var value string fetch.FetchIfExist(context.Background(), &value, "testmodel/1/text") var errDoesNotExist datastore.DoesNotExistError if err := fetch.Err(); !errors.As(err, &errDoesNotExist) { t.Errorf("FetchIfExist returned error: %q, expected DoesNotExistError", err) } keysEqual(t, recorder.Keys(), "testmodel/1/id", "testmodel/1/text") } func TestFetchIfExistAfterError(t *testing.T) { shutdownCtx, cancel := context.WithCancel(context.Background()) defer cancel() ds := dsmock.NewMockDatastore(shutdownCtx.Done(), map[string][]byte{ "testmodel/1/id": []byte("1"), "testmodel/1/text": []byte(`"some test"`), }) recorder := datastore.NewRecorder(ds) fetch := datastore.NewFetcher(recorder) myerr := errors.New("some error") ds.InjectError(myerr) var value string fetch.Fetch(context.Background(), &value, "testmodel/1/text") ds.InjectError(nil) fetch.FetchIfExist(context.Background(), &value, "testmodel/1/text") if err := fetch.Err(); !errors.Is(err, myerr) { t.Errorf("Fetch returned error %q, expected %q", err, myerr) } if value != "" { t.Errorf("Fetch set value after an error to %q", value) } keysEqual(t, recorder.Keys(), "testmodel/1/text") } func ExampleInt() { shutdownCtx, cancel := context.WithCancel(context.Background()) defer cancel() fetch := datastore.NewFetcher(dsmock.NewMockDatastore(shutdownCtx.Done(), map[string][]byte{ "testmodel/1/id": []byte("1"), })) i := datastore.Int(context.Background(), fetch.Fetch, "testmodel/%d/id", 1) fmt.Println(i) fmt.Println(fetch.Err()) // Output: // 1 // <nil> } func ExampleInt_doesNotExist() { shutdownCtx, cancel := context.WithCancel(context.Background()) defer cancel() fetch := datastore.NewFetcher(dsmock.NewMockDatastore(shutdownCtx.Done(), nil)) i := datastore.Int(context.Background(), fetch.Fetch, "testmodel/%d/number", 1) fmt.Println(i) fmt.Println(fetch.Err()) // Output: // 0 // <nil> } func ExampleInt_wrongType() { shutdownCtx, cancel := context.WithCancel(context.Background()) defer cancel() fetch := datastore.NewFetcher(dsmock.NewMockDatastore(shutdownCtx.Done(), map[string][]byte{ "testmodel/1/id": []byte(`"a string"`), })) i := datastore.Int(context.Background(), fetch.Fetch, "testmodel/%d/id", 1) fmt.Println(i) fmt.Println(fetch.Err() == nil) // Output: // 0 // false } func ExampleInt_ifExist() { shutdownCtx, cancel := context.WithCancel(context.Background()) defer cancel() fetch := datastore.NewFetcher(dsmock.NewMockDatastore(shutdownCtx.Done(), nil)) i := datastore.Int(context.Background(), fetch.FetchIfExist, "testmodel/%d/number", 1) fmt.Println(i) fmt.Println(fetch.Err()) // Output: // 0 // testmodel/1 does not exist. } func ExampleInts() { shutdownCtx, cancel := context.WithCancel(context.Background()) defer cancel() fetch := datastore.NewFetcher(dsmock.NewMockDatastore(shutdownCtx.Done(), map[string][]byte{ "testmodel/1/ids": []byte("[1,2,3]"), })) ints := datastore.Ints(context.Background(), fetch.Fetch, "testmodel/%d/ids", 1) fmt.Println(ints) fmt.Println(fetch.Err()) // Output: // [1 2 3] // <nil> } func ExampleString() { shutdownCtx, cancel := context.WithCancel(context.Background()) defer cancel() fetch := datastore.NewFetcher(dsmock.NewMockDatastore(shutdownCtx.Done(), map[string][]byte{ "testmodel/1/name": []byte(`"hugo"`), })) str := datastore.String(context.Background(), fetch.Fetch, "testmodel/%d/name", 1) fmt.Println(str) fmt.Println(fetch.Err()) // Output: // hugo // <nil> } func keysEqual(t *testing.T, got map[string]bool, expect ...string) { t.Helper() gotkeys := make([]string, 0, len(got)) for k := range got { gotkeys = append(gotkeys, k) } if len(got) != len(expect) { t.Errorf("Got %d keys, expected %d\nGot:\n%v\nExpected:\n%v", len(got), len(expect), gotkeys, expect) return } for _, k := range expect { if !got[k] { t.Errorf("did not get expected key %q", k) } } }
[]
[]
[]
[]
[]
go
null
null
null
api/server/server.go
package server import ( "log" "net/http" "os" "time" "github.com/cristianblar/restaurant-manager/api/lib" "github.com/cristianblar/restaurant-manager/api/utils" "github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5/middleware" "github.com/rs/cors" ) type Server struct { Port string Router *chi.Mux } func CreateServer() *Server { lib.StartDb(schemaObject) return &Server{ Port: os.Getenv("API_PORT"), Router: CreateRouter(), } } func (s *Server) Listen() { // Primera carga de hoy por default: todayDate := time.Now().Unix() dateData := lib.FetchDayData(todayDate, queryProducts, queryOrigins) lib.AddDayData(dateData) log.Println("Default data uploaded -> Server ready to start") mainRouter := chi.NewRouter() mainRouter.Use(middleware.Logger) mainRouter.Use(middleware.Recoverer) mainRouter.Use(cors.Default().Handler) mainRouter.Mount("/api", s.Router) mainRouter.Get("/", HandleRoot) serverError := http.ListenAndServe(s.Port, mainRouter) utils.PanicErrorHandler(serverError) }
[ "\"API_PORT\"" ]
[]
[ "API_PORT" ]
[]
["API_PORT"]
go
1
0
start.py
#!/usr/bin/env python import getpass import sys import re from optparse import OptionParser from minecraft import authentication from minecraft.exceptions import YggdrasilError from minecraft.networking.connection import Connection from minecraft.networking.packets import Packet, clientbound, serverbound def get_options(): parser = OptionParser() parser.add_option("-u", "--username", dest="username", default=None, help="username to log in with") parser.add_option("-p", "--password", dest="password", default=None, help="password to log in with") parser.add_option("-s", "--server", dest="server", default=None, help="server host or host:port " "(enclose IPv6 addresses in square brackets)") parser.add_option("-o", "--offline", dest="offline", action="store_true", help="connect to a server in offline mode " "(no password required)") parser.add_option("-d", "--dump-packets", dest="dump_packets", action="store_true", help="print sent and received packets to standard error") parser.add_option("-v", "--dump-unknown-packets", dest="dump_unknown", action="store_true", help="include unknown packets in --dump-packets output") (options, args) = parser.parse_args() if not options.username: options.username = input("Enter your username: ") if not options.password and not options.offline: options.password = getpass.getpass("Enter your password (leave " "blank for offline mode): ") options.offline = options.offline or (options.password == "") if not options.server: options.server = input("Enter server host or host:port " "(enclose IPv6 addresses in square brackets): ") # Try to split out port and address match = re.match(r"((?P<host>[^\[\]:]+)|\[(?P<addr>[^\[\]]+)\])" r"(:(?P<port>\d+))?$", options.server) if match is None: raise ValueError("Invalid server address: '%s'." % options.server) options.address = match.group("host") or match.group("addr") options.port = int(match.group("port") or 25565) return options def main(): options = get_options() if options.offline: print("Connecting in offline mode...") connection = Connection( options.address, options.port, username=options.username) else: auth_token = authentication.AuthenticationToken() try: auth_token.authenticate(options.username, options.password) except YggdrasilError as e: print(e) sys.exit() print("Logged in as %s..." % auth_token.username) connection = Connection( options.address, options.port, auth_token=auth_token) if options.dump_packets: def print_incoming(packet): if type(packet) is Packet: # This is a direct instance of the base Packet type, meaning # that it is a packet of unknown type, so we do not print it # unless explicitly requested by the user. if options.dump_unknown: print('--> [unknown packet] %s' % packet, file=sys.stderr) else: print('--> %s' % packet, file=sys.stderr) def print_outgoing(packet): print('<-- %s' % packet, file=sys.stderr) connection.register_packet_listener( print_incoming, Packet, early=True) connection.register_packet_listener( print_outgoing, Packet, outgoing=True) def handle_join_game(join_game_packet): print('Connected.') connection.register_packet_listener( handle_join_game, clientbound.play.JoinGamePacket) def print_chat(chat_packet): print("Message (%s): %s" % ( chat_packet.field_string('position'), chat_packet.json_data)) connection.register_packet_listener( print_chat, clientbound.play.ChatMessagePacket) connection.connect() while True: try: text = input() if text == "/respawn": print("respawning...") packet = serverbound.play.ClientStatusPacket() packet.action_id = serverbound.play.ClientStatusPacket.RESPAWN connection.write_packet(packet) if text.startswith("/say "): text_body = text[5:] print("saying...", text_body) packet = serverbound.play.ChatPacket() packet.message = text_body connection.write_packet(packet) else: packet = serverbound.play.ChatPacket() packet.message = text connection.write_packet(packet) except KeyboardInterrupt: print("Bye!") sys.exit() if __name__ == "__main__": main()
[]
[]
[]
[]
[]
python
null
null
null
python/helpers/pydev/build_tools/build.py
''' Helper to build pydevd. It should: * recreate our generated files * compile cython deps (properly setting up the environment first). Note that it's used in the CI to build the cython deps based on the PYDEVD_USE_CYTHON environment variable. ''' from __future__ import print_function import os import subprocess import sys from generate_code import remove_if_exists, root_dir, is_python_64bit, generate_dont_trace_files, generate_cython_module def validate_pair(ob): try: if not (len(ob) == 2): print("Unexpected result:", ob, file=sys.stderr) raise ValueError except: return False return True def consume(it): try: while True: next(it) except StopIteration: pass def get_environment_from_batch_command(env_cmd, initial=None): """ Take a command (either a single command or list of arguments) and return the environment created after running that command. Note that if the command must be a batch file or .cmd file, or the changes to the environment will not be captured. If initial is supplied, it is used as the initial environment passed to the child process. """ if not isinstance(env_cmd, (list, tuple)): env_cmd = [env_cmd] if not os.path.exists(env_cmd[0]): raise RuntimeError('Error: %s does not exist' % (env_cmd[0],)) # construct the command that will alter the environment env_cmd = subprocess.list2cmdline(env_cmd) # create a tag so we can tell in the output when the proc is done tag = 'Done running command' # construct a cmd.exe command to do accomplish this cmd = 'cmd.exe /s /c "{env_cmd} && echo "{tag}" && set"'.format(**vars()) # launch the process proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=initial) # parse the output sent to stdout lines = proc.stdout # consume whatever output occurs until the tag is reached for line in lines: line = line.decode('utf-8') if 'The specified configuration type is missing.' in line: raise AssertionError('Error executing %s. View http://blog.ionelmc.ro/2014/12/21/compiling-python-extensions-on-windows/ for details.' % (env_cmd)) if tag in line: break def handle_line(l): try: if sys.version_info[0] > 2: # define a way to handle each KEY=VALUE line return l.decode('utf-8').rstrip().split('=', 1) else: # define a way to handle each KEY=VALUE line return l.rstrip().split('=', 1) except UnicodeDecodeError: print("Bad env variable: ", l) # parse key/values into pairs pairs = map(handle_line, lines) # make sure the pairs are valid valid_pairs = filter(validate_pair, pairs) # construct a dictionary of the pairs result = dict(valid_pairs) # let the process finish proc.communicate() return result def remove_binaries(suffixes): for f in os.listdir(os.path.join(root_dir, '_pydevd_bundle')): for suffix in suffixes: if f.endswith(suffix): remove_if_exists(os.path.join(root_dir, '_pydevd_bundle', f)) def build(): if '--no-remove-binaries' not in sys.argv: remove_binaries(['.pyd', '.so']) os.chdir(root_dir) env=None if sys.platform == 'win32': # "C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\bin\vcvars64.bat" # set MSSdk=1 # set DISTUTILS_USE_SDK=1 # set VS100COMNTOOLS=C:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools env = os.environ.copy() if sys.version_info[:2] in ((2, 7), (3, 5), (3, 6), (3, 7)): import setuptools # We have to import it first for the compiler to be found from distutils import msvc9compiler if sys.version_info[:2] == (2, 7): vcvarsall = msvc9compiler.find_vcvarsall(9.0) elif sys.version_info[:2] in ((3, 5), (3, 6), (3, 7)): vcvarsall = msvc9compiler.find_vcvarsall(14.0) if vcvarsall is None or not os.path.exists(vcvarsall): raise RuntimeError('Error finding vcvarsall.') if is_python_64bit(): env.update(get_environment_from_batch_command( [vcvarsall, 'amd64'], initial=os.environ.copy())) else: env.update(get_environment_from_batch_command( [vcvarsall, 'x86'], initial=os.environ.copy())) elif sys.version_info[:2] == (3, 4): if is_python_64bit(): env.update(get_environment_from_batch_command( [r"C:\Program Files\Microsoft SDKs\Windows\v7.1\Bin\SetEnv.cmd", '/x64'], initial=os.environ.copy())) else: env.update(get_environment_from_batch_command( [r"C:\Program Files\Microsoft SDKs\Windows\v7.1\Bin\SetEnv.cmd", '/x86'], initial=os.environ.copy())) else: raise AssertionError('Unable to setup environment for Python: %s' % (sys.version,)) env['MSSdk'] = '1' env['DISTUTILS_USE_SDK'] = '1' additional_args = [] for arg in sys.argv: if arg.startswith('--target-pyd-name='): additional_args.append(arg) if arg.startswith('--target-pyd-frame-eval='): additional_args.append(arg) break else: additional_args.append('--force-cython') # Build always forces cython! args = [ sys.executable, os.path.join(os.path.dirname(__file__), '..', 'setup_cython.py'), 'build_ext', '--inplace', ]+additional_args print('Calling args: %s' % (args,)) subprocess.check_call(args, env=env,) if __name__ == '__main__': use_cython = os.getenv('PYDEVD_USE_CYTHON', None) if use_cython == 'YES': build() elif use_cython == 'NO': remove_binaries(['.pyd', '.so']) elif use_cython is None: # Regular process if '--no-regenerate-files' not in sys.argv: generate_dont_trace_files() generate_cython_module() build() else: raise RuntimeError('Unexpected value for PYDEVD_USE_CYTHON: %s (accepted: YES, NO)' % (use_cython,))
[]
[]
[ "PYDEVD_USE_CYTHON" ]
[]
["PYDEVD_USE_CYTHON"]
python
1
0
tst/accept/steps/predict_customers_test.py
import os import boto3 import time from src.modules.get_export import get_export from src.modules.queue_message_writer import write_message from src.modules.type_enum import Types from src.predictors.predict_customers import make_prediction from src.dependencies.boto3_sqs_provider import get_sqs_provider from src.dependencies.pynamodb_online_statuses_provider import get_online_statuses_provider from src.dependencies.pynamodb_customers_provider import get_customers_provider from src.dependencies.pynamodb_checkin_provider import get_checkin_provider PREDICT_TOPIC_ARN = get_export('predict-topic-arn') ENV_TYPE = get_export('pipeline-enviroment-type') @given(u'a lambda function') def step_impl(context): # Pass as the import to the lambda didn't fail context.sqs_client = get_sqs_provider(test=True) context.online_statuses_model = get_online_statuses_provider(test=True) context.customer_model = get_customers_provider(test=True) context.checkin_model = get_checkin_provider(test=True) pass @when(u'a message is consumed from a predict topic') def step_impl(context): context.event = { 'Records': [ { 'Sns': { 'MessageAttributes': { 'consultant': {'Type': 'String', 'Value': 'BehaveTest1'}, 'type': {'Type': 'String', 'Value': 'predict'}, 'checkin-id': {'Type': 'String', 'Value': '1234'} } } } ] } #queue_url = 'predict-topic' #os.environ['CHECKIN_ACTION_URL'] = queue_url #context.prediction = make_prediction(context.event, None, context.sqs_client, context.customer_model, context.checkin_model) pass @then(u'Then make a hardcoded prediction') def step_impl(context): #assert len(context.sqs_client.send_message_batch.call_args.kwargs['Entries']) == 3 pass @then(u'publish it to action queue') def step_impl(context): #assert context.sqs_client.send_message_batch.call_count == 1 pass
[]
[]
[ "CHECKIN_ACTION_URL" ]
[]
["CHECKIN_ACTION_URL"]
python
1
0
chatbotconfig.py
import os basedir = os.path.abspath(os.path.dirname(__file__)) class Config(object): SECRET_KEY=os.environ.get('SECRET_KEY') or 'you-will-never-guess'
[]
[]
[ "SECRET_KEY" ]
[]
["SECRET_KEY"]
python
1
0
test/python/docker/__init__.py
import configparser import json import os import pathlib import shutil import subprocess import tempfile from docker import DockerClient from .compat import constant class Podman(object): """ Instances hold the configuration and setup for running podman commands """ def __init__(self): """Initialize a Podman instance with global options""" binary = os.getenv("PODMAN", "bin/podman") self.cmd = [binary, "--storage-driver=vfs"] cgroupfs = os.getenv("CGROUP_MANAGER", "systemd") self.cmd.append(f"--cgroup-manager={cgroupfs}") # No support for tmpfs (/tmp) or extfs (/var/tmp) # self.cmd.append("--storage-driver=overlay") if os.getenv("PODMAN_PYTHON_TEST_DEBUG"): self.cmd.append("--log-level=debug") self.cmd.append("--syslog=true") self.anchor_directory = tempfile.mkdtemp(prefix="podman_docker_") self.image_cache = os.path.join(self.anchor_directory, "cache") os.makedirs(self.image_cache, exist_ok=True) self.cmd.append("--root=" + os.path.join(self.anchor_directory, "crio")) self.cmd.append("--runroot=" + os.path.join(self.anchor_directory, "crio-run")) os.environ["CONTAINERS_REGISTRIES_CONF"] = os.path.join( self.anchor_directory, "registry.conf" ) p = configparser.ConfigParser() p.read_dict( { "registries.search": {"registries": "['quay.io', 'docker.io']"}, "registries.insecure": {"registries": "[]"}, "registries.block": {"registries": "[]"}, } ) with open(os.environ["CONTAINERS_REGISTRIES_CONF"], "w") as w: p.write(w) os.environ["CNI_CONFIG_PATH"] = os.path.join( self.anchor_directory, "cni", "net.d" ) os.makedirs(os.environ["CNI_CONFIG_PATH"], exist_ok=True) self.cmd.append("--cni-config-dir=" + os.environ["CNI_CONFIG_PATH"]) cni_cfg = os.path.join( os.environ["CNI_CONFIG_PATH"], "87-podman-bridge.conflist" ) # json decoded and encoded to ensure legal json buf = json.loads( """ { "cniVersion": "0.3.0", "name": "default", "plugins": [{ "type": "bridge", "bridge": "cni0", "isGateway": true, "ipMasq": true, "ipam": { "type": "host-local", "subnet": "10.88.0.0/16", "routes": [{ "dst": "0.0.0.0/0" }] } }, { "type": "portmap", "capabilities": { "portMappings": true } } ] } """ ) with open(cni_cfg, "w") as w: json.dump(buf, w) def open(self, command, *args, **kwargs): """Podman initialized instance to run a given command :param self: Podman instance :param command: podman sub-command to run :param args: arguments and options for command :param kwargs: See subprocess.Popen() for shell keyword :return: subprocess.Popen() instance configured to run podman instance """ cmd = self.cmd.copy() cmd.append(command) cmd.extend(args) shell = kwargs.get("shell", False) return subprocess.Popen( cmd, shell=shell, stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) def run(self, command, *args, **kwargs): """Podman initialized instance to run a given command :param self: Podman instance :param command: podman sub-command to run :param args: arguments and options for command :param kwargs: See subprocess.Popen() for shell and check keywords :return: subprocess.Popen() instance configured to run podman instance """ cmd = self.cmd.copy() cmd.append(command) cmd.extend(args) check = kwargs.get("check", False) shell = kwargs.get("shell", False) return subprocess.run( cmd, shell=shell, check=check, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) def tear_down(self): shutil.rmtree(self.anchor_directory, ignore_errors=True) def restore_image_from_cache(self, client: DockerClient): path = os.path.join(self.image_cache, constant.ALPINE_TARBALL) if not os.path.exists(path): img = client.images.pull(constant.ALPINE) with open(path, mode="wb") as tarball: for frame in img.save(named=True): tarball.write(frame) else: self.run("load", "-i", path, check=True) def flush_image_cache(self): for f in pathlib.Path(self.image_cache).glob("*.tar"): f.unlink(f)
[]
[]
[ "CNI_CONFIG_PATH", "CGROUP_MANAGER", "CONTAINERS_REGISTRIES_CONF", "PODMAN_PYTHON_TEST_DEBUG", "PODMAN" ]
[]
["CNI_CONFIG_PATH", "CGROUP_MANAGER", "CONTAINERS_REGISTRIES_CONF", "PODMAN_PYTHON_TEST_DEBUG", "PODMAN"]
python
5
0
manage.py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): """Run administrative tasks.""" os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'control.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[]
[]
[]
[]
[]
python
0
0
database.go
package main import ( "database/sql" "log" "os" "strconv" ) // dbConn ... func dbInit() (*sql.DB, *sql.DB) { dbDriver := "mysql" dbName := os.Getenv("MYSQL_DB") dbUser := os.Getenv("MYSQL_USER") dbPass := os.Getenv("MYSQL_PASS") readServer := os.Getenv("MYSQL_READ") writeServer := os.Getenv("MYSQL_WRITE") readConns, _ := strconv.Atoi(os.Getenv("MYSQL_READ_OPEN")) writeConns, _ := strconv.Atoi(os.Getenv("MYSQL_WRITE_OPEN")) dbRead, err := sql.Open(dbDriver, dbUser+":"+dbPass+"@"+readServer+"/"+dbName+"?parseTime=true") if err != nil { log.Fatal(err) } dbWrite, err := sql.Open(dbDriver, dbUser+":"+dbPass+"@"+writeServer+"/"+dbName+"?parseTime=true") if err != nil { log.Fatal(err) } dbRead.SetMaxOpenConns(readConns) dbWrite.SetMaxOpenConns(writeConns) /* db.SetMaxIdleConns(16) db.SetConnMaxLifetime(30 * time.Second)*/ return dbRead, dbWrite } // RowExists ... func RowExists(sqlStmt string, args ...interface{}) bool { var empty int err := dbRead.QueryRow(sqlStmt, args...).Scan(&empty) if err != nil { if err != sql.ErrNoRows { // Real error log.Print(err) } return false } return true }
[ "\"MYSQL_DB\"", "\"MYSQL_USER\"", "\"MYSQL_PASS\"", "\"MYSQL_READ\"", "\"MYSQL_WRITE\"", "\"MYSQL_READ_OPEN\"", "\"MYSQL_WRITE_OPEN\"" ]
[]
[ "MYSQL_DB", "MYSQL_WRITE", "MYSQL_USER", "MYSQL_PASS", "MYSQL_READ_OPEN", "MYSQL_WRITE_OPEN", "MYSQL_READ" ]
[]
["MYSQL_DB", "MYSQL_WRITE", "MYSQL_USER", "MYSQL_PASS", "MYSQL_READ_OPEN", "MYSQL_WRITE_OPEN", "MYSQL_READ"]
go
7
0
api/conf/conf.go
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package conf import ( "flag" "fmt" "io/ioutil" "log" "os" "path/filepath" "github.com/tidwall/gjson" "gopkg.in/yaml.v2" "github.com/apisix/manager-api/internal/utils" ) const ( EnvPROD = "prod" EnvBETA = "beta" EnvDEV = "dev" EnvLOCAL = "local" WebDir = "./html" ) var ( ENV string Schema gjson.Result WorkDir = "." ServerHost = "127.0.0.1" ServerPort = 80 ETCDEndpoints = []string{"127.0.0.1:2379"} ErrorLogLevel = "warn" ErrorLogPath = "logs/error.log" UserList = make(map[string]User, 2) AuthConf Authentication SSLDefaultStatus = 1 //enable ssl by default ) type Etcd struct { Endpoints []string } type Listen struct { Host string Port int } type ErrorLog struct { Level string FilePath string `yaml:"file_path"` } type Log struct { ErrorLog ErrorLog `yaml:"error_log"` } type Conf struct { Etcd Etcd Listen Listen Log Log } type User struct { Username string Password string } type Authentication struct { Secret string ExpireTime int `yaml:"expire_time"` Users []User } type Config struct { Conf Conf Authentication Authentication } func init() { //go test if workDir := os.Getenv("APISIX_API_WORKDIR"); workDir != "" { WorkDir = workDir } else { flag.StringVar(&WorkDir, "p", ".", "current work dir") flag.Parse() } setConf() setEnvironment() initSchema() } func setConf() { filePath := WorkDir + "/conf/conf.yaml" if configurationContent, err := ioutil.ReadFile(filePath); err != nil { panic(fmt.Sprintf("fail to read configuration: %s", filePath)) } else { //configuration := gjson.ParseBytes(configurationContent) config := Config{} err := yaml.Unmarshal(configurationContent, &config) if err != nil { log.Printf("conf: %s, error: %v", configurationContent, err) } //listen if config.Conf.Listen.Port != 0 { ServerPort = config.Conf.Listen.Port } if config.Conf.Listen.Host != "" { ServerHost = config.Conf.Listen.Host } //etcd if len(config.Conf.Etcd.Endpoints) > 0 { ETCDEndpoints = config.Conf.Etcd.Endpoints } //error log if config.Conf.Log.ErrorLog.Level != "" { ErrorLogLevel = config.Conf.Log.ErrorLog.Level } if config.Conf.Log.ErrorLog.FilePath != "" { ErrorLogPath = config.Conf.Log.ErrorLog.FilePath } if !filepath.IsAbs(ErrorLogPath) { ErrorLogPath, err = filepath.Abs(WorkDir + "/" + ErrorLogPath) if err != nil { panic(err) } } //auth initAuthentication(config.Authentication) } } func setEnvironment() { ENV = EnvPROD if env := os.Getenv("ENV"); env != "" { ENV = env } } func initAuthentication(conf Authentication) { AuthConf = conf if AuthConf.Secret == "secret" { AuthConf.Secret = utils.GetFlakeUidStr() } userList := conf.Users // create user list for _, item := range userList { UserList[item.Username] = item } } func initSchema() { filePath := WorkDir + "/conf/schema.json" if schemaContent, err := ioutil.ReadFile(filePath); err != nil { panic(fmt.Sprintf("fail to read configuration: %s", filePath)) } else { Schema = gjson.ParseBytes(schemaContent) } }
[ "\"APISIX_API_WORKDIR\"", "\"ENV\"" ]
[]
[ "APISIX_API_WORKDIR", "ENV" ]
[]
["APISIX_API_WORKDIR", "ENV"]
go
2
0
lib/python2.7/site-packages/django/core/management/commands/shell.py
import os from django.core.management.base import NoArgsCommand from optparse import make_option class Command(NoArgsCommand): option_list = NoArgsCommand.option_list + ( make_option('--plain', action='store_true', dest='plain', help='Tells Django to use plain Python, not IPython.'), ) help = "Runs a Python interactive interpreter. Tries to use IPython, if it's available." shells = ['ipython', 'bpython'] requires_model_validation = False def ipython(self): try: from IPython import embed embed() except ImportError: # IPython < 0.11 # Explicitly pass an empty list as arguments, because otherwise # IPython would use sys.argv from this script. try: from IPython.Shell import IPShell shell = IPShell(argv=[]) shell.mainloop() except ImportError: # IPython not found at all, raise ImportError raise def bpython(self): import bpython bpython.embed() def run_shell(self): for shell in self.shells: try: return getattr(self, shell)() except ImportError: pass raise ImportError def handle_noargs(self, **options): # XXX: (Temporary) workaround for ticket #1796: force early loading of all # models from installed apps. from django.db.models.loading import get_models loaded_models = get_models() use_plain = options.get('plain', False) try: if use_plain: # Don't bother loading IPython, because the user wants plain Python. raise ImportError self.run_shell() except ImportError: import code # Set up a dictionary to serve as the environment for the shell, so # that tab completion works on objects that are imported at runtime. # See ticket 5082. imported_objects = {} try: # Try activating rlcompleter, because it's handy. import readline except ImportError: pass else: # We don't have to wrap the following import in a 'try', because # we already know 'readline' was imported successfully. import rlcompleter readline.set_completer(rlcompleter.Completer(imported_objects).complete) readline.parse_and_bind("tab:complete") # We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system # conventions and get $PYTHONSTARTUP first then import user. if not use_plain: pythonrc = os.environ.get("PYTHONSTARTUP") if pythonrc and os.path.isfile(pythonrc): try: execfile(pythonrc) except NameError: pass # This will import .pythonrc.py as a side-effect import user code.interact(local=imported_objects)
[]
[]
[ "PYTHONSTARTUP" ]
[]
["PYTHONSTARTUP"]
python
1
0
docker_images/spacy/app/pipelines/sentence_similarity.py
import os import subprocess import sys from typing import Dict, List, Union from app.pipelines import Pipeline class SentenceSimilarityPipeline(Pipeline): def __init__( self, model_id: str, ): # At the time, only public models from spaCy are allowed in the inference API. full_model_path = model_id.split("/") if len(full_model_path) != 2: raise ValueError( f"Invalid model_id: {model_id}. It should have a namespace (:namespace:/:model_name:)" ) namespace, model_name = full_model_path hf_endpoint = os.getenv("HF_ENDPOINT", "https://huggingface.co.") package = f"{hf_endpoint}/{namespace}/{model_name}/resolve/main/{model_name}-any-py3-none-any.whl" cache_dir = os.environ["PIP_CACHE"] subprocess.check_call( [sys.executable, "-m", "pip", "install", "--cache-dir", cache_dir, package] ) import spacy self.model = spacy.load(model_name) def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]: """ Args: inputs (:obj:`dict`): a dictionary containing two keys, 'source_sentence' mapping to the sentence that will be compared against all the others, and 'sentences', mapping to a list of strings to which the source will be compared. Return: A :obj:`list` of floats: Some similarity measure between `source_sentence` and each sentence from `sentences`. """ source_sentence = inputs["source_sentence"] source_doc = self.model(source_sentence) similarities = [] for sentence in inputs["sentences"]: search_doc = self.model(sentence) similarities.append(source_doc.similarity(search_doc)) return similarities
[]
[]
[ "HF_ENDPOINT", "PIP_CACHE" ]
[]
["HF_ENDPOINT", "PIP_CACHE"]
python
2
0
main.go
package main import ( "fmt" "log" "net/http" "os" "github.com/g-harel/gothrough/internal/typeindex" "github.com/g-harel/gothrough/pages" ) func headerResponse(w http.ResponseWriter, statusCode int) { w.WriteHeader(statusCode) fmt.Fprintf(w, "%d %s", statusCode, http.StatusText(statusCode)) } func main() { port := os.Getenv("PORT") if port == "" { port = "8080" } indexFilename := os.Getenv("INDEX") if indexFilename == "" { indexFilename = ".index" } fi, err := os.Stat(indexFilename) if err != nil { panic(fmt.Sprintf("stat index file: %v", err)) } log.Printf("Index size: %v bytes", fi.Size()) f, err := os.Open(indexFilename) if err != nil { panic(fmt.Sprintf("open index file: %v", err)) } idx, err := typeindex.NewIndexFromBytes(f) if err != nil { panic(err) } http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { pages.Home(idx.Packages())(w, r) }) http.HandleFunc("/search", func(w http.ResponseWriter, r *http.Request) { err := r.ParseForm() if err != nil { headerResponse(w, http.StatusBadRequest) return } query := r.Form.Get("q") if query == "" { headerResponse(w, http.StatusBadRequest) return } results, err := idx.Search(query) if err != nil { panic(err) } pages.Results(query, results)(w, r) }) log.Printf("accepting connections at :%v", port) log.Fatal(http.ListenAndServe(fmt.Sprintf(":%v", port), nil)) }
[ "\"PORT\"", "\"INDEX\"" ]
[]
[ "PORT", "INDEX" ]
[]
["PORT", "INDEX"]
go
2
0
staging/src/k8s.io/client-go/1.4/pkg/security/apparmor/validate.go
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package apparmor import ( "bufio" "errors" "fmt" "io/ioutil" "os" "path" "strings" "k8s.io/client-go/1.4/pkg/api" "k8s.io/client-go/1.4/pkg/util" utilconfig "k8s.io/client-go/1.4/pkg/util/config" ) // Whether AppArmor should be disabled by default. // Set to true if the wrong build tags are set (see validate_disabled.go). var isDisabledBuild bool // Interface for validating that a pod with with an AppArmor profile can be run by a Node. type Validator interface { Validate(pod *api.Pod) error } func NewValidator(runtime string) Validator { if err := validateHost(runtime); err != nil { return &validator{validateHostErr: err} } appArmorFS, err := getAppArmorFS() if err != nil { return &validator{ validateHostErr: fmt.Errorf("error finding AppArmor FS: %v", err), } } return &validator{ appArmorFS: appArmorFS, } } type validator struct { validateHostErr error appArmorFS string } func (v *validator) Validate(pod *api.Pod) error { if !isRequired(pod) { return nil } if v.validateHostErr != nil { return v.validateHostErr } loadedProfiles, err := v.getLoadedProfiles() if err != nil { return fmt.Errorf("could not read loaded profiles: %v", err) } for _, container := range pod.Spec.InitContainers { if err := validateProfile(GetProfileName(pod, container.Name), loadedProfiles); err != nil { return err } } for _, container := range pod.Spec.Containers { if err := validateProfile(GetProfileName(pod, container.Name), loadedProfiles); err != nil { return err } } return nil } // Verify that the host and runtime is capable of enforcing AppArmor profiles. func validateHost(runtime string) error { // Check feature-gates if !utilconfig.DefaultFeatureGate.AppArmor() { return errors.New("AppArmor disabled by feature-gate") } // Check build support. if isDisabledBuild { return errors.New("Binary not compiled for linux") } // Check kernel support. if !IsAppArmorEnabled() { return errors.New("AppArmor is not enabled on the host") } // Check runtime support. Currently only Docker is supported. if runtime != "docker" { return fmt.Errorf("AppArmor is only enabled for 'docker' runtime. Found: %q.", runtime) } return nil } // Verify that the profile is valid and loaded. func validateProfile(profile string, loadedProfiles map[string]bool) error { if err := ValidateProfileFormat(profile); err != nil { return err } if strings.HasPrefix(profile, ProfileNamePrefix) { profileName := strings.TrimPrefix(profile, ProfileNamePrefix) if !loadedProfiles[profileName] { return fmt.Errorf("profile %q is not loaded", profileName) } } return nil } func ValidateProfileFormat(profile string) error { if profile == "" || profile == ProfileRuntimeDefault { return nil } if !strings.HasPrefix(profile, ProfileNamePrefix) { return fmt.Errorf("invalid AppArmor profile name: %q", profile) } return nil } func (v *validator) getLoadedProfiles() (map[string]bool, error) { profilesPath := path.Join(v.appArmorFS, "profiles") profilesFile, err := os.Open(profilesPath) if err != nil { return nil, fmt.Errorf("failed to open %s: %v", profilesPath, err) } defer profilesFile.Close() profiles := map[string]bool{} scanner := bufio.NewScanner(profilesFile) for scanner.Scan() { profileName := parseProfileName(scanner.Text()) if profileName == "" { // Unknown line format; skip it. continue } profiles[profileName] = true } return profiles, nil } // The profiles file is formatted with one profile per line, matching a form: // namespace://profile-name (mode) // profile-name (mode) // Where mode is {enforce, complain, kill}. The "namespace://" is only included for namespaced // profiles. For the purposes of Kubernetes, we consider the namespace part of the profile name. func parseProfileName(profileLine string) string { modeIndex := strings.IndexRune(profileLine, '(') if modeIndex < 0 { return "" } return strings.TrimSpace(profileLine[:modeIndex]) } func getAppArmorFS() (string, error) { mountsFile, err := os.Open("/proc/mounts") if err != nil { return "", fmt.Errorf("could not open /proc/mounts: %v", err) } defer mountsFile.Close() scanner := bufio.NewScanner(mountsFile) for scanner.Scan() { fields := strings.Fields(scanner.Text()) if len(fields) < 3 { // Unknown line format; skip it. continue } if fields[2] == "securityfs" { appArmorFS := path.Join(fields[1], "apparmor") if ok, err := util.FileExists(appArmorFS); !ok { msg := fmt.Sprintf("path %s does not exist", appArmorFS) if err != nil { return "", fmt.Errorf("%s: %v", msg, err) } else { return "", errors.New(msg) } } else { return appArmorFS, nil } } } if err := scanner.Err(); err != nil { return "", fmt.Errorf("error scanning mounts: %v", err) } return "", errors.New("securityfs not found") } // IsAppArmorEnabled returns true if apparmor is enabled for the host. // This function is forked from // https://github.com/opencontainers/runc/blob/1a81e9ab1f138c091fe5c86d0883f87716088527/libcontainer/apparmor/apparmor.go // to avoid the libapparmor dependency. func IsAppArmorEnabled() bool { if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" { if _, err = os.Stat("/sbin/apparmor_parser"); err == nil { buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") return err == nil && len(buf) > 1 && buf[0] == 'Y' } } return false }
[ "\"container\"" ]
[]
[ "container" ]
[]
["container"]
go
1
0
api/v1alpha2/conversion_test.go
/* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha2 import ( "testing" fuzz "github.com/google/gofuzz" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" v1beta1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func TestFuzzyConversion(t *testing.T) { g := NewWithT(t) scheme := runtime.NewScheme() g.Expect(AddToScheme(scheme)).To(Succeed()) g.Expect(v1beta1.AddToScheme(scheme)).To(Succeed()) t.Run("for AWSMachine", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ Scheme: scheme, Hub: &v1beta1.AWSMachine{}, Spoke: &AWSMachine{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs, CustomObjectMetaFuzzFunc}, })) t.Run("for AWSMachineTemplate", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ Scheme: scheme, Hub: &v1beta1.AWSMachineTemplate{}, Spoke: &AWSMachineTemplate{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs, CustomObjectMetaFuzzFunc}, })) } func fuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { return []interface{}{ AWSMachineFuzzer, AWSMachineTemplateFuzzer, AMIReferenceFuzzer, Beta1AWSMachineTemplateFuzzer, } } func AWSMachineFuzzer(obj *AWSMachine, c fuzz.Continue) { c.FuzzNoCustom(obj) // AWSMachine.Spec.AMI.ARN and AWSMachine.Spec.AMI.Filters has been removed in v1beta1, so setting it to nil in order to avoid v1alpha3 --> <hub> --> v1alpha3 round trip errors. obj.Spec.AMI.ARN = nil obj.Spec.AMI.Filters = nil } func AWSMachineTemplateFuzzer(obj *AWSMachineTemplate, c fuzz.Continue) { c.FuzzNoCustom(obj) // AWSMachineTemplate.Spec.Template.Spec.AMI.ARN and AWSMachineTemplate.Spec.Template.Spec.AMI.Filters has been removed in v1beta1, so setting it to nil in order to avoid v1alpha3 --> v1beta1 --> v1alpha3 round trip errors. obj.Spec.Template.Spec.AMI.ARN = nil obj.Spec.Template.Spec.AMI.Filters = nil } func Beta1AWSMachineTemplateFuzzer(obj *v1beta1.AWSMachineTemplate, c fuzz.Continue) { c.FuzzNoCustom(obj) obj.Spec.Template.Spec.NonRootVolumes = nil } func AMIReferenceFuzzer(obj *v1beta1.AMIReference, c fuzz.Continue) { c.FuzzNoCustom(obj) obj.EKSOptimizedLookupType = nil } func CustomObjectMetaFuzzFunc(_ runtimeserializer.CodecFactory) []interface{} { return []interface{}{ ObjectMetaFuzzer, ClusterObjectMetaFuzzer, } } func ObjectMetaFuzzer(in *metav1.ObjectMeta, c fuzz.Continue) { c.FuzzNoCustom(in) in.Annotations = make(map[string]string) in.Labels = make(map[string]string) } func ClusterObjectMetaFuzzer(in *clusterv1.ObjectMeta, c fuzz.Continue) { c.FuzzNoCustom(in) in.Annotations = make(map[string]string) in.Labels = make(map[string]string) }
[]
[]
[]
[]
[]
go
null
null
null
fuse/ipns/ipns_unix.go
// +build !nofuse // package fuse/ipns implements a fuse filesystem that interfaces // with ipns, the naming system for ipfs. package ipns import ( "context" "errors" "fmt" "io" "os" core "github.com/ipfs/go-ipfs/core" namesys "github.com/ipfs/go-ipfs/namesys" path "gx/ipfs/QmR3bNAtBoTN6xZ2HQNqpRQARcDoazH9jU6zKUNjFyQKWS/go-path" dag "gx/ipfs/QmScf5hnTEK8fDpRJAbcdMnKXpKUp1ytdymzXUbXDCFssp/go-merkledag" ft "gx/ipfs/QmVytt7Vtb9jbGN2uNfEm15t78pBUjw1SS72nkJFcWYZwe/go-unixfs" fuse "gx/ipfs/QmSJBsmLP1XMjv8hxYg2rUMdPDB7YUpyBo9idjrJ6Cmq6F/fuse" fs "gx/ipfs/QmSJBsmLP1XMjv8hxYg2rUMdPDB7YUpyBo9idjrJ6Cmq6F/fuse/fs" ci "gx/ipfs/QmTW4SdgBWq9GjsBsHeUx8WuGxzhgzAf88UMH2w62PC8yK/go-libp2p-crypto" cid "gx/ipfs/QmTbxNB1NwDesLmKTscr4udL2tVP7MaxvXnD1D9yX7g3PN/go-cid" peer "gx/ipfs/QmTu65MVbemtUxJEWgsTtzv9Zv9P8rvmqNA4eG9TrTRGYc/go-libp2p-peer" mfs "gx/ipfs/QmaAvPYDfQ9CqJA2UUXqfFjnYuyGb342xrDLsHCrey1BEq/go-mfs" logging "gx/ipfs/QmbkT7eMTyXfpeyB3ZMxxcxg7XH8t6uXp49jqzz4HB7BGF/go-log" ) func init() { if os.Getenv("IPFS_FUSE_DEBUG") != "" { fuse.Debug = func(msg interface{}) { fmt.Println(msg) } } } var log = logging.Logger("fuse/ipns") // FileSystem is the readwrite IPNS Fuse Filesystem. type FileSystem struct { Ipfs *core.IpfsNode RootNode *Root } // NewFileSystem constructs new fs using given core.IpfsNode instance. func NewFileSystem(ipfs *core.IpfsNode, sk ci.PrivKey, ipfspath, ipnspath string) (*FileSystem, error) { kmap := map[string]ci.PrivKey{ "local": sk, } root, err := CreateRoot(ipfs, kmap, ipfspath, ipnspath) if err != nil { return nil, err } return &FileSystem{Ipfs: ipfs, RootNode: root}, nil } // Root constructs the Root of the filesystem, a Root object. func (f *FileSystem) Root() (fs.Node, error) { log.Debug("filesystem, get root") return f.RootNode, nil } func (f *FileSystem) Destroy() { err := f.RootNode.Close() if err != nil { log.Errorf("Error Shutting Down Filesystem: %s\n", err) } } // Root is the root object of the filesystem tree. type Root struct { Ipfs *core.IpfsNode Keys map[string]ci.PrivKey // Used for symlinking into ipfs IpfsRoot string IpnsRoot string LocalDirs map[string]fs.Node Roots map[string]*keyRoot LocalLinks map[string]*Link } func ipnsPubFunc(ipfs *core.IpfsNode, k ci.PrivKey) mfs.PubFunc { return func(ctx context.Context, c cid.Cid) error { return ipfs.Namesys.Publish(ctx, k, path.FromCid(c)) } } func loadRoot(ctx context.Context, rt *keyRoot, ipfs *core.IpfsNode, name string) (fs.Node, error) { p, err := path.ParsePath("/ipns/" + name) if err != nil { log.Errorf("mkpath %s: %s", name, err) return nil, err } node, err := core.Resolve(ctx, ipfs.Namesys, ipfs.Resolver, p) switch err { case nil: case namesys.ErrResolveFailed: node = ft.EmptyDirNode() default: log.Errorf("looking up %s: %s", p, err) return nil, err } pbnode, ok := node.(*dag.ProtoNode) if !ok { return nil, dag.ErrNotProtobuf } root, err := mfs.NewRoot(ctx, ipfs.DAG, pbnode, ipnsPubFunc(ipfs, rt.k)) if err != nil { return nil, err } rt.root = root return &Directory{dir: root.GetDirectory()}, nil } type keyRoot struct { k ci.PrivKey alias string root *mfs.Root } func CreateRoot(ipfs *core.IpfsNode, keys map[string]ci.PrivKey, ipfspath, ipnspath string) (*Root, error) { ldirs := make(map[string]fs.Node) roots := make(map[string]*keyRoot) links := make(map[string]*Link) for alias, k := range keys { pid, err := peer.IDFromPrivateKey(k) if err != nil { return nil, err } name := pid.Pretty() kr := &keyRoot{k: k, alias: alias} fsn, err := loadRoot(ipfs.Context(), kr, ipfs, name) if err != nil { return nil, err } roots[name] = kr ldirs[name] = fsn // set up alias symlink links[alias] = &Link{ Target: name, } } return &Root{ Ipfs: ipfs, IpfsRoot: ipfspath, IpnsRoot: ipnspath, Keys: keys, LocalDirs: ldirs, LocalLinks: links, Roots: roots, }, nil } // Attr returns file attributes. func (*Root) Attr(ctx context.Context, a *fuse.Attr) error { log.Debug("Root Attr") a.Mode = os.ModeDir | 0111 // -rw+x return nil } // Lookup performs a lookup under this node. func (s *Root) Lookup(ctx context.Context, name string) (fs.Node, error) { switch name { case "mach_kernel", ".hidden", "._.": // Just quiet some log noise on OS X. return nil, fuse.ENOENT } if lnk, ok := s.LocalLinks[name]; ok { return lnk, nil } nd, ok := s.LocalDirs[name] if ok { switch nd := nd.(type) { case *Directory: return nd, nil case *FileNode: return nd, nil default: return nil, fuse.EIO } } // other links go through ipns resolution and are symlinked into the ipfs mountpoint ipnsName := "/ipns/" + name resolved, err := s.Ipfs.Namesys.Resolve(s.Ipfs.Context(), ipnsName) if err != nil { log.Warningf("ipns: namesys resolve error: %s", err) return nil, fuse.ENOENT } segments := resolved.Segments() if segments[0] == "ipfs" { p := path.Join(resolved.Segments()[1:]) return &Link{s.IpfsRoot + "/" + p}, nil } log.Error("Invalid path.Path: ", resolved) return nil, errors.New("invalid path from ipns record") } func (r *Root) Close() error { for _, mr := range r.Roots { err := mr.root.Close() if err != nil { return err } } return nil } // Forget is called when the filesystem is unmounted. probably. // see comments here: http://godoc.org/bazil.org/fuse/fs#FSDestroyer func (r *Root) Forget() { err := r.Close() if err != nil { log.Error(err) } } // ReadDirAll reads a particular directory. Will show locally available keys // as well as a symlink to the peerID key func (r *Root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { log.Debug("Root ReadDirAll") var listing []fuse.Dirent for alias, k := range r.Keys { pid, err := peer.IDFromPrivateKey(k) if err != nil { continue } ent := fuse.Dirent{ Name: pid.Pretty(), Type: fuse.DT_Dir, } link := fuse.Dirent{ Name: alias, Type: fuse.DT_Link, } listing = append(listing, ent, link) } return listing, nil } // Directory is wrapper over an mfs directory to satisfy the fuse fs interface type Directory struct { dir *mfs.Directory } type FileNode struct { fi *mfs.File } // File is wrapper over an mfs file to satisfy the fuse fs interface type File struct { fi mfs.FileDescriptor } // Attr returns the attributes of a given node. func (d *Directory) Attr(ctx context.Context, a *fuse.Attr) error { log.Debug("Directory Attr") a.Mode = os.ModeDir | 0555 a.Uid = uint32(os.Getuid()) a.Gid = uint32(os.Getgid()) return nil } // Attr returns the attributes of a given node. func (fi *FileNode) Attr(ctx context.Context, a *fuse.Attr) error { log.Debug("File Attr") size, err := fi.fi.Size() if err != nil { // In this case, the dag node in question may not be unixfs return fmt.Errorf("fuse/ipns: failed to get file.Size(): %s", err) } a.Mode = os.FileMode(0666) a.Size = uint64(size) a.Uid = uint32(os.Getuid()) a.Gid = uint32(os.Getgid()) return nil } // Lookup performs a lookup under this node. func (s *Directory) Lookup(ctx context.Context, name string) (fs.Node, error) { child, err := s.dir.Child(name) if err != nil { // todo: make this error more versatile. return nil, fuse.ENOENT } switch child := child.(type) { case *mfs.Directory: return &Directory{dir: child}, nil case *mfs.File: return &FileNode{fi: child}, nil default: // NB: if this happens, we do not want to continue, unpredictable behaviour // may occur. panic("invalid type found under directory. programmer error.") } } // ReadDirAll reads the link structure as directory entries func (dir *Directory) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { var entries []fuse.Dirent listing, err := dir.dir.List(ctx) if err != nil { return nil, err } for _, entry := range listing { dirent := fuse.Dirent{Name: entry.Name} switch mfs.NodeType(entry.Type) { case mfs.TDir: dirent.Type = fuse.DT_Dir case mfs.TFile: dirent.Type = fuse.DT_File } entries = append(entries, dirent) } if len(entries) > 0 { return entries, nil } return nil, fuse.ENOENT } func (fi *File) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { _, err := fi.fi.Seek(req.Offset, io.SeekStart) if err != nil { return err } fisize, err := fi.fi.Size() if err != nil { return err } select { case <-ctx.Done(): return ctx.Err() default: } readsize := min(req.Size, int(fisize-req.Offset)) n, err := fi.fi.CtxReadFull(ctx, resp.Data[:readsize]) resp.Data = resp.Data[:n] return err } func (fi *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { // TODO: at some point, ensure that WriteAt here respects the context wrote, err := fi.fi.WriteAt(req.Data, req.Offset) if err != nil { return err } resp.Size = wrote return nil } func (fi *File) Flush(ctx context.Context, req *fuse.FlushRequest) error { errs := make(chan error, 1) go func() { errs <- fi.fi.Flush() }() select { case err := <-errs: return err case <-ctx.Done(): return ctx.Err() } } func (fi *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { if req.Valid.Size() { cursize, err := fi.fi.Size() if err != nil { return err } if cursize != int64(req.Size) { err := fi.fi.Truncate(int64(req.Size)) if err != nil { return err } } } return nil } // Fsync flushes the content in the file to disk. func (fi *FileNode) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { // This needs to perform a *full* flush because, in MFS, a write isn't // persisted until the root is updated. errs := make(chan error, 1) go func() { errs <- fi.fi.Flush() }() select { case err := <-errs: return err case <-ctx.Done(): return ctx.Err() } } func (fi *File) Forget() { // TODO(steb): this seems like a place where we should be *uncaching*, not flushing. err := fi.fi.Flush() if err != nil { log.Debug("forget file error: ", err) } } func (dir *Directory) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { child, err := dir.dir.Mkdir(req.Name) if err != nil { return nil, err } return &Directory{dir: child}, nil } func (fi *FileNode) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) { fd, err := fi.fi.Open(mfs.Flags{ Read: req.Flags.IsReadOnly() || req.Flags.IsReadWrite(), Write: req.Flags.IsWriteOnly() || req.Flags.IsReadWrite(), Sync: true, }) if err != nil { return nil, err } if req.Flags&fuse.OpenTruncate != 0 { if req.Flags.IsReadOnly() { log.Error("tried to open a readonly file with truncate") return nil, fuse.ENOTSUP } log.Info("Need to truncate file!") err := fd.Truncate(0) if err != nil { return nil, err } } else if req.Flags&fuse.OpenAppend != 0 { log.Info("Need to append to file!") if req.Flags.IsReadOnly() { log.Error("tried to open a readonly file with append") return nil, fuse.ENOTSUP } _, err := fd.Seek(0, io.SeekEnd) if err != nil { log.Error("seek reset failed: ", err) return nil, err } } return &File{fi: fd}, nil } func (fi *File) Release(ctx context.Context, req *fuse.ReleaseRequest) error { return fi.fi.Close() } func (dir *Directory) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) { // New 'empty' file nd := dag.NodeWithData(ft.FilePBData(nil, 0)) err := dir.dir.AddChild(req.Name, nd) if err != nil { return nil, nil, err } child, err := dir.dir.Child(req.Name) if err != nil { return nil, nil, err } fi, ok := child.(*mfs.File) if !ok { return nil, nil, errors.New("child creation failed") } nodechild := &FileNode{fi: fi} fd, err := fi.Open(mfs.Flags{ Read: req.Flags.IsReadOnly() || req.Flags.IsReadWrite(), Write: req.Flags.IsWriteOnly() || req.Flags.IsReadWrite(), Sync: true, }) if err != nil { return nil, nil, err } return nodechild, &File{fi: fd}, nil } func (dir *Directory) Remove(ctx context.Context, req *fuse.RemoveRequest) error { err := dir.dir.Unlink(req.Name) if err != nil { return fuse.ENOENT } return nil } // Rename implements NodeRenamer func (dir *Directory) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fs.Node) error { cur, err := dir.dir.Child(req.OldName) if err != nil { return err } err = dir.dir.Unlink(req.OldName) if err != nil { return err } switch newDir := newDir.(type) { case *Directory: nd, err := cur.GetNode() if err != nil { return err } err = newDir.dir.AddChild(req.NewName, nd) if err != nil { return err } case *FileNode: log.Error("Cannot move node into a file!") return fuse.EPERM default: log.Error("Unknown node type for rename target dir!") return errors.New("unknown fs node type") } return nil } func min(a, b int) int { if a < b { return a } return b } // to check that out Node implements all the interfaces we want type ipnsRoot interface { fs.Node fs.HandleReadDirAller fs.NodeStringLookuper } var _ ipnsRoot = (*Root)(nil) type ipnsDirectory interface { fs.HandleReadDirAller fs.Node fs.NodeCreater fs.NodeMkdirer fs.NodeRemover fs.NodeRenamer fs.NodeStringLookuper } var _ ipnsDirectory = (*Directory)(nil) type ipnsFile interface { fs.HandleFlusher fs.HandleReader fs.HandleWriter fs.HandleReleaser } type ipnsFileNode interface { fs.Node fs.NodeFsyncer fs.NodeOpener } var _ ipnsFileNode = (*FileNode)(nil) var _ ipnsFile = (*File)(nil)
[ "\"IPFS_FUSE_DEBUG\"" ]
[]
[ "IPFS_FUSE_DEBUG" ]
[]
["IPFS_FUSE_DEBUG"]
go
1
0
accountsynchr/tests/dao/test_inactive_accounts.py
# Copyright 2021 UW-IT, University of Washington # SPDX-License-Identifier: Apache-2.0 from django.test import TestCase from django.conf import settings from accountsynchr.dao.inactive_accounts import ( get_accounts_to_purge1, get_accounts_to_purge, get_file_path) class TestInactiveAccounts(TestCase): def test_get_file_path(self): with self.settings(CSV_FILE_PATH='data'): self.assertEqual(get_file_path(), "data/accounts.csv") def test_get_accounts_to_purge(self): with self.settings(CSV_FILE_PATH=None, EMAIL_ADDRESS_DOMAIN='@test.edu'): accounts_to_purge, user_set = get_accounts_to_purge(set()) self.assertEqual(len(accounts_to_purge), 2) self.assertTrue('sdummys' in user_set) self.assertTrue('sdummyp' in user_set) self.assertEqual(accounts_to_purge[0].uwnetid, 'sdummys') self.assertEqual(accounts_to_purge[1].uwnetid, 'sdummyp') def test_get_accounts_to_purge1(self): with self.settings(CSV_FILE_PATH=None, EMAIL_ADDRESS_DOMAIN='@test.edu'): accounts_to_purge, user_set = get_accounts_to_purge1(set()) self.assertEqual(len(accounts_to_purge), 2) self.assertTrue('sdummys' in user_set) self.assertTrue('sdummyp' in user_set) self.assertEqual(accounts_to_purge[0].uwnetid, 'sdummys') self.assertEqual(accounts_to_purge[1].uwnetid, 'sdummyp')
[]
[]
[]
[]
[]
python
null
null
null
tests/models.py
from typing import List, Optional from sqlmodel import Field, Relationship, SQLModel class Sport(SQLModel, table=True): id: Optional[int] = Field(default=None, primary_key=True) name: str athletes: List["Athlete"] = Relationship(back_populates="sport") class Athlete(SQLModel, table=True): id: Optional[int] = Field(default=None, primary_key=True) name: str sport_id: Optional[int] = Field(default=None, foreign_key="sport.id") sport: Optional[Sport] = Relationship(back_populates="athletes")
[]
[]
[]
[]
[]
python
null
null
null
soracom/generated/cmd/groups_delete_config_namespace.go
// Code generated by soracom-cli generate-cmd. DO NOT EDIT. package cmd import ( "fmt" "net/url" "os" "github.com/spf13/cobra" ) // GroupsDeleteConfigNamespaceCmdGroupId holds value of 'group_id' option var GroupsDeleteConfigNamespaceCmdGroupId string // GroupsDeleteConfigNamespaceCmdNamespace holds value of 'namespace' option var GroupsDeleteConfigNamespaceCmdNamespace string func init() { GroupsDeleteConfigNamespaceCmd.Flags().StringVar(&GroupsDeleteConfigNamespaceCmdGroupId, "group-id", "", TRAPI("Target group.")) GroupsDeleteConfigNamespaceCmd.Flags().StringVar(&GroupsDeleteConfigNamespaceCmdNamespace, "namespace", "", TRAPI("Namespace to be deleted.")) GroupsCmd.AddCommand(GroupsDeleteConfigNamespaceCmd) } // GroupsDeleteConfigNamespaceCmd defines 'delete-config-namespace' subcommand var GroupsDeleteConfigNamespaceCmd = &cobra.Command{ Use: "delete-config-namespace", Short: TRAPI("/groups/{group_id}/configuration/{namespace}:delete:summary"), Long: TRAPI(`/groups/{group_id}/configuration/{namespace}:delete:description`), RunE: func(cmd *cobra.Command, args []string) error { if len(args) > 0 { return fmt.Errorf("unexpected arguments passed => %v", args) } opt := &apiClientOptions{ BasePath: "/v1", Language: getSelectedLanguage(), } ac := newAPIClient(opt) if v := os.Getenv("SORACOM_VERBOSE"); v != "" { ac.SetVerbose(true) } err := authHelper(ac, cmd, args) if err != nil { cmd.SilenceUsage = true return err } param, err := collectGroupsDeleteConfigNamespaceCmdParams(ac) if err != nil { return err } body, err := ac.callAPI(param) if err != nil { cmd.SilenceUsage = true return err } if body == "" { return nil } if rawOutput { _, err = os.Stdout.Write([]byte(body)) } else { return prettyPrintStringAsJSON(body) } return err }, } func collectGroupsDeleteConfigNamespaceCmdParams(ac *apiClient) (*apiParams, error) { var parsedBody interface{} var err error err = checkIfRequiredStringParameterIsSupplied("group_id", "group-id", "path", parsedBody, GroupsDeleteConfigNamespaceCmdGroupId) if err != nil { return nil, err } err = checkIfRequiredStringParameterIsSupplied("namespace", "namespace", "path", parsedBody, GroupsDeleteConfigNamespaceCmdNamespace) if err != nil { return nil, err } return &apiParams{ method: "DELETE", path: buildPathForGroupsDeleteConfigNamespaceCmd("/groups/{group_id}/configuration/{namespace}"), query: buildQueryForGroupsDeleteConfigNamespaceCmd(), noRetryOnError: noRetryOnError, }, nil } func buildPathForGroupsDeleteConfigNamespaceCmd(path string) string { escapedGroupId := url.PathEscape(GroupsDeleteConfigNamespaceCmdGroupId) path = strReplace(path, "{"+"group_id"+"}", escapedGroupId, -1) escapedNamespace := url.PathEscape(GroupsDeleteConfigNamespaceCmdNamespace) path = strReplace(path, "{"+"namespace"+"}", escapedNamespace, -1) return path } func buildQueryForGroupsDeleteConfigNamespaceCmd() url.Values { result := url.Values{} return result }
[ "\"SORACOM_VERBOSE\"" ]
[]
[ "SORACOM_VERBOSE" ]
[]
["SORACOM_VERBOSE"]
go
1
0
pkg/helm/tiller.go
package helm import ( "os" "path/filepath" "github.com/jenkins-x/jx-logging/pkg/log" "github.com/jenkins-x/jx/v2/pkg/util" "github.com/pkg/errors" ) // GetTillerAddress returns the address that tiller is listening on func GetTillerAddress() string { tillerAddress := os.Getenv("TILLER_ADDR") if tillerAddress == "" { tillerAddress = ":44134" } return tillerAddress } // StartLocalTiller starts local tiller server func StartLocalTiller(lazy bool) error { tillerAddress := GetTillerAddress() tillerArgs := os.Getenv("TILLER_ARGS") args := []string{"-listen", tillerAddress, "-alsologtostderr"} if tillerArgs != "" { args = append(args, tillerArgs) } logsDir, err := util.LogsDir() if err != nil { return err } logFile := filepath.Join(logsDir, "tiller.log") f, err := os.Create(logFile) if err != nil { return errors.Wrapf(err, "Failed to create tiller log file %s: %s", logFile, err) } err = util.RunCommandBackground("tiller", f, !lazy, args...) if err == nil { log.Logger().Infof("running tiller locally and logging to file: %s", util.ColorInfo(logFile)) } else if lazy { // lets assume its because the process is already running so lets ignore return nil } return err } // RestartLocalTiller resttarts locall tiller func RestartLocalTiller() error { log.Logger().Info("checking if we need to kill a local tiller process") err := util.KillProcesses("tiller") if err != nil { return errors.Wrap(err, "unable to kill local tiller process") } return StartLocalTiller(false) } // StartLocalTillerIfNotRunning starts local tiller if not running func StartLocalTillerIfNotRunning() error { return StartLocalTiller(true) }
[ "\"TILLER_ADDR\"", "\"TILLER_ARGS\"" ]
[]
[ "TILLER_ADDR", "TILLER_ARGS" ]
[]
["TILLER_ADDR", "TILLER_ARGS"]
go
2
0
kernel/groovy/src/main/java/com/twosigma/beakerx/groovy/evaluator/GroovyEvaluator.java
/* * Copyright 2014 TWO SIGMA OPEN SOURCE, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.twosigma.beakerx.groovy.evaluator; import com.twosigma.beakerx.BeakerXClient; import com.twosigma.beakerx.TryResult; import com.twosigma.beakerx.autocomplete.AutocompleteResult; import com.twosigma.beakerx.autocomplete.MagicCommandAutocompletePatterns; import com.twosigma.beakerx.evaluator.BaseEvaluator; import com.twosigma.beakerx.evaluator.ClasspathScanner; import com.twosigma.beakerx.evaluator.JobDescriptor; import com.twosigma.beakerx.evaluator.TempFolderFactory; import com.twosigma.beakerx.evaluator.TempFolderFactoryImpl; import com.twosigma.beakerx.groovy.autocomplete.GroovyAutocomplete; import com.twosigma.beakerx.groovy.autocomplete.GroovyClasspathScanner; import com.twosigma.beakerx.jvm.classloader.BeakerXUrlClassLoader; import com.twosigma.beakerx.jvm.object.SimpleEvaluationObject; import com.twosigma.beakerx.jvm.threads.BeakerCellExecutor; import com.twosigma.beakerx.jvm.threads.CellExecutor; import com.twosigma.beakerx.kernel.Classpath; import com.twosigma.beakerx.kernel.EvaluatorParameters; import com.twosigma.beakerx.kernel.ExecutionOptions; import com.twosigma.beakerx.kernel.ImportPath; import com.twosigma.beakerx.kernel.Imports; import com.twosigma.beakerx.kernel.PathToJar; import groovy.lang.Binding; import groovy.lang.GroovyClassLoader; import org.codehaus.groovy.control.customizers.ImportCustomizer; import java.io.File; import java.util.concurrent.Executors; import static com.twosigma.beakerx.groovy.evaluator.EnvVariablesFilter.envVariablesFilter; import static com.twosigma.beakerx.groovy.evaluator.GroovyClassLoaderFactory.addImportPathToImportCustomizer; import static com.twosigma.beakerx.groovy.evaluator.GroovyClassLoaderFactory.newEvaluator; import static com.twosigma.beakerx.groovy.evaluator.GroovyClassLoaderFactory.newParentClassLoader; public class GroovyEvaluator extends BaseEvaluator { private GroovyClassLoader groovyClassLoader; private Binding scriptBinding = null; private ImportCustomizer icz; private BeakerXUrlClassLoader beakerxUrlClassLoader; private GroovyAutocomplete gac; public GroovyEvaluator(String id, String sId, EvaluatorParameters evaluatorParameters, BeakerXClient beakerxClient, MagicCommandAutocompletePatterns autocompletePatterns, ClasspathScanner classpathScanner) { this(id, sId, new BeakerCellExecutor("groovy"), new TempFolderFactoryImpl(), evaluatorParameters, beakerxClient, autocompletePatterns, classpathScanner); } public GroovyEvaluator(String id, String sId, CellExecutor cellExecutor, TempFolderFactory tempFolderFactory, EvaluatorParameters evaluatorParameters, BeakerXClient beakerxClient, MagicCommandAutocompletePatterns autocompletePatterns, ClasspathScanner classpathScanner) { super(id, sId, cellExecutor, tempFolderFactory, evaluatorParameters, beakerxClient, autocompletePatterns, classpathScanner); reloadClassloader(); gac = createGroovyAutocomplete(new GroovyClasspathScanner(), groovyClassLoader, imports, autocompletePatterns); outDir = envVariablesFilter(outDir, System.getenv()); } @Override public TryResult evaluate(SimpleEvaluationObject seo, String code, ExecutionOptions executionOptions) { return evaluate(seo, new GroovyWorkerThread(this, new JobDescriptor(code, seo, executionOptions))); } @Override public AutocompleteResult autocomplete(String code, int caretPosition) { return gac.find(code, caretPosition); } @Override protected void doResetEnvironment() { String cpp = createClasspath(classPath); reloadClassloader(); gac = createGroovyAutocomplete(new GroovyClasspathScanner(cpp), groovyClassLoader, imports, autocompletePatterns); executorService.shutdown(); executorService = Executors.newSingleThreadExecutor(); } @Override public void exit() { super.exit(); killAllThreads(); executorService.shutdown(); executorService = Executors.newSingleThreadExecutor(); } @Override protected void addJarToClassLoader(PathToJar pathToJar) { this.beakerxUrlClassLoader.addJar(pathToJar); } @Override protected void addImportToClassLoader(ImportPath anImport) { addImportPathToImportCustomizer(icz, anImport); } private GroovyAutocomplete createGroovyAutocomplete(GroovyClasspathScanner c, GroovyClassLoader groovyClassLoader, Imports imports, MagicCommandAutocompletePatterns autocompletePatterns) { return new GroovyAutocomplete(c, groovyClassLoader, imports, autocompletePatterns, scriptBinding); } private String createClasspath(Classpath classPath) { StringBuilder cppBuilder = new StringBuilder(); for (String pt : classPath.getPathsAsStrings()) { cppBuilder.append(pt); cppBuilder.append(File.pathSeparator); } String cpp = cppBuilder.toString(); cpp += File.pathSeparator; cpp += System.getProperty("java.class.path"); return cpp; } @Override public ClassLoader getClassLoader() { return groovyClassLoader; } private void reloadClassloader() { this.beakerxUrlClassLoader = newParentClassLoader(getClasspath()); this.icz = new ImportCustomizer(); this.groovyClassLoader = newEvaluator(getImports(), getClasspath(), getOutDir(), icz, beakerxUrlClassLoader); this.scriptBinding = new Binding(); } public GroovyClassLoader getGroovyClassLoader() { return groovyClassLoader; } public Binding getScriptBinding() { return scriptBinding; } }
[]
[]
[]
[]
[]
java
0
0
vendor/github.com/rancher/remotedialer/session.go
package remotedialer import ( "context" "errors" "fmt" "io" "net" "os" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/gorilla/websocket" "github.com/sirupsen/logrus" ) type Session struct { sync.Mutex nextConnID int64 clientKey string sessionKey int64 conn *wsConn conns map[int64]*connection remoteClientKeys map[string]map[int]bool auth ConnectAuthorizer pingCancel context.CancelFunc pingWait sync.WaitGroup dialer Dialer client bool } // PrintTunnelData No tunnel logging by default var PrintTunnelData bool = true func init() { if os.Getenv("CATTLE_TUNNEL_DATA_DEBUG") == "false" { PrintTunnelData = false } } func NewClientSession(auth ConnectAuthorizer, conn *websocket.Conn) *Session { return &Session{ clientKey: "client", conn: newWSConn(conn), conns: map[int64]*connection{}, auth: auth, client: true, } } func newSession(sessionKey int64, clientKey string, conn *websocket.Conn) *Session { return &Session{ nextConnID: 1, clientKey: clientKey, sessionKey: sessionKey, conn: newWSConn(conn), conns: map[int64]*connection{}, remoteClientKeys: map[string]map[int]bool{}, } } func (s *Session) startPings(rootCtx context.Context) { ctx, cancel := context.WithCancel(rootCtx) s.pingCancel = cancel s.pingWait.Add(1) go func() { defer s.pingWait.Done() t := time.NewTicker(PingWriteInterval) defer t.Stop() for { select { case <-ctx.Done(): return case <-t.C: s.conn.Lock() if err := s.conn.conn.WriteControl(websocket.PingMessage, []byte(""), time.Now().Add(time.Second)); err != nil { logrus.WithError(err).Error("Error writing ping") } logrus.Debug("Wrote ping") s.conn.Unlock() } } }() } func (s *Session) stopPings() { if s.pingCancel == nil { return } s.pingCancel() s.pingWait.Wait() } func (s *Session) Serve(ctx context.Context) (int, error) { if s.client { s.startPings(ctx) } for { msType, reader, err := s.conn.NextReader() if err != nil { return 400, err } if msType != websocket.BinaryMessage { return 400, errWrongMessageType } if err := s.serveMessage(reader); err != nil { return 500, err } } } func (s *Session) serveMessage(reader io.Reader) error { message, err := newServerMessage(reader) if err != nil { return err } if PrintTunnelData { logrus.Debug("REQUEST ", message) } if message.messageType == Connect { if s.auth == nil || !s.auth(message.proto, message.address) { return errors.New("connect not allowed") } s.clientConnect(message) return nil } s.Lock() if message.messageType == AddClient && s.remoteClientKeys != nil { err := s.addRemoteClient(message.address) s.Unlock() return err } else if message.messageType == RemoveClient { err := s.removeRemoteClient(message.address) s.Unlock() return err } conn := s.conns[message.connID] s.Unlock() if conn == nil { if message.messageType == Data { err := fmt.Errorf("connection not found %s/%d/%d", s.clientKey, s.sessionKey, message.connID) newErrorMessage(message.connID, err).WriteTo(s.conn) } return nil } switch message.messageType { case Data: if _, err := io.Copy(conn.tunnelWriter(), message); err != nil { s.closeConnection(message.connID, err) } case Error: s.closeConnection(message.connID, message.Err()) } return nil } func parseAddress(address string) (string, int, error) { parts := strings.SplitN(address, "/", 2) if len(parts) != 2 { return "", 0, errors.New("not / separated") } v, err := strconv.Atoi(parts[1]) return parts[0], v, err } func (s *Session) addRemoteClient(address string) error { clientKey, sessionKey, err := parseAddress(address) if err != nil { return fmt.Errorf("invalid remote Session %s: %v", address, err) } keys := s.remoteClientKeys[clientKey] if keys == nil { keys = map[int]bool{} s.remoteClientKeys[clientKey] = keys } keys[int(sessionKey)] = true if PrintTunnelData { logrus.Debugf("ADD REMOTE CLIENT %s, SESSION %d", address, s.sessionKey) } return nil } func (s *Session) removeRemoteClient(address string) error { clientKey, sessionKey, err := parseAddress(address) if err != nil { return fmt.Errorf("invalid remote Session %s: %v", address, err) } keys := s.remoteClientKeys[clientKey] delete(keys, int(sessionKey)) if len(keys) == 0 { delete(s.remoteClientKeys, clientKey) } if PrintTunnelData { logrus.Debugf("REMOVE REMOTE CLIENT %s, SESSION %d", address, s.sessionKey) } return nil } func (s *Session) closeConnection(connID int64, err error) { s.Lock() conn := s.conns[connID] delete(s.conns, connID) if PrintTunnelData { logrus.Debugf("CONNECTIONS %d %d", s.sessionKey, len(s.conns)) } s.Unlock() if conn != nil { conn.tunnelClose(err) } } func (s *Session) clientConnect(message *message) { conn := newConnection(message.connID, s, message.proto, message.address) s.Lock() s.conns[message.connID] = conn if PrintTunnelData { logrus.Debugf("CONNECTIONS %d %d", s.sessionKey, len(s.conns)) } s.Unlock() go clientDial(s.dialer, conn, message) } func (s *Session) serverConnect(deadline time.Duration, proto, address string) (net.Conn, error) { connID := atomic.AddInt64(&s.nextConnID, 1) conn := newConnection(connID, s, proto, address) s.Lock() s.conns[connID] = conn if PrintTunnelData { logrus.Debugf("CONNECTIONS %d %d", s.sessionKey, len(s.conns)) } s.Unlock() _, err := s.writeMessage(newConnect(connID, deadline, proto, address)) if err != nil { s.closeConnection(connID, err) return nil, err } return conn, err } func (s *Session) writeMessage(message *message) (int, error) { if PrintTunnelData { logrus.Debug("WRITE ", message) } return message.WriteTo(s.conn) } func (s *Session) Close() { s.Lock() defer s.Unlock() s.stopPings() for _, connection := range s.conns { connection.tunnelClose(errors.New("tunnel disconnect")) } s.conns = map[int64]*connection{} } func (s *Session) sessionAdded(clientKey string, sessionKey int64) { client := fmt.Sprintf("%s/%d", clientKey, sessionKey) _, err := s.writeMessage(newAddClient(client)) if err != nil { s.conn.conn.Close() } } func (s *Session) sessionRemoved(clientKey string, sessionKey int64) { client := fmt.Sprintf("%s/%d", clientKey, sessionKey) _, err := s.writeMessage(newRemoveClient(client)) if err != nil { s.conn.conn.Close() } }
[ "\"CATTLE_TUNNEL_DATA_DEBUG\"" ]
[]
[ "CATTLE_TUNNEL_DATA_DEBUG" ]
[]
["CATTLE_TUNNEL_DATA_DEBUG"]
go
1
0
services/bitcou/bitcou2.go
package bitcou import ( b64 "encoding/base64" "encoding/json" "errors" "github.com/grupokindynos/common/ladon" "io/ioutil" "log" "net/http" "os" "strconv" "time" ) type ServiceV2 struct { BitcouURL string BitcouToken string ImageMap map[int]ProviderImage } func InitServiceV2() *ServiceV2 { service := &ServiceV2{ BitcouURL: os.Getenv("BITCOU_URL_DEV_V2"), BitcouToken: os.Getenv("BITCOU_TOKEN_V2"), ImageMap: make(map[int]ProviderImage), } return service } func (bs *ServiceV2) GetListV2(dev bool) ([]VoucherV2, error) { var url string if dev { url = os.Getenv("BITCOU_URL_DEV_V2") + "voucher/availableVouchers/" } else { url = os.Getenv("BITCOU_URL_PROD_V2") + "voucher/availableVouchers/" } log.Println("Getting products using url: ", url) token := "Bearer " + os.Getenv("BITCOU_TOKEN_V2") req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, err } req.Header.Add("Authorization", token) client := &http.Client{Timeout: 500 * time.Second} res, err := client.Do(req) if err != nil { return nil, err } defer func() { _ = res.Body.Close() }() contents, _ := ioutil.ReadAll(res.Body) var response BaseResponse err = json.Unmarshal(contents, &response) if err != nil { return nil, err } var vouchersList []VoucherV2 dataBytes, err := json.Marshal(response.Data) if err != nil { return nil, err } err = json.Unmarshal(dataBytes, &vouchersList) if err != nil { return nil, err } return vouchersList, nil } func (bs *ServiceV2) GetProvidersV2(dev bool) ([]Provider, error) { var url string if dev { url = os.Getenv("BITCOU_URL_DEV_V2") + "voucher/providers" } else { url = os.Getenv("BITCOU_URL_PROD_V2") + "voucher/providers" } log.Println("Getting providers using url: ", url) token := "Bearer " + os.Getenv("BITCOU_TOKEN_V2") req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, err } req.Header.Add("Authorization", token) client := &http.Client{Timeout: 20 * time.Second} res, err := client.Do(req) if err != nil { return nil, err } defer func() { _ = res.Body.Close() }() contents, _ := ioutil.ReadAll(res.Body) var response BaseResponse err = json.Unmarshal(contents, &response) if err != nil { return nil, err } var providerList []Provider dataBytes, err := json.Marshal(response.Data) if err != nil { return nil, err } err = json.Unmarshal(dataBytes, &providerList) if err != nil { return nil, err } return providerList, nil } func (bs *ServiceV2) GetProviderImage(providerId int, dev bool) (imageInfo ProviderImage, err error) { if val, ok := bs.ImageMap[providerId]; ok { //log.Println("using cached image for ", providerId) return val, nil } var url string if dev { url = os.Getenv("BITCOU_URL_DEV_V2") + "voucher/providerImage" } else { url = os.Getenv("BITCOU_URL_PROD_V2") + "voucher/providerImage" } token := "Bearer " + os.Getenv("BITCOU_TOKEN_V2") req, err := http.NewRequest("GET", url + "?provider_id=" + strconv.Itoa(providerId), nil) if err != nil { return imageInfo, err } req.Header.Add("Authorization", token) client := &http.Client{Timeout: 20 * time.Second} res, err := client.Do(req) if err != nil { return imageInfo, err } defer func() { _ = res.Body.Close() }() contents, _ := ioutil.ReadAll(res.Body) var response BaseResponse err = json.Unmarshal(contents, &response) if err != nil { return imageInfo, err } if len(response.Data) > 0 { dataBytes, err := json.Marshal(response.Data[0]) if err != nil { return imageInfo, err } err = json.Unmarshal(dataBytes, &imageInfo) if err != nil { return imageInfo, err } bs.ImageMap[providerId] = imageInfo return imageInfo, nil } else { return imageInfo, errors.New("image unavailable") } } func (bs *ServiceV2) GetProviderImageBase64(imageUrl string, providerId int) (imageInfo ladon.ProviderImageApp, err error) { token := "Bearer " + os.Getenv("BITCOU_TOKEN_V2") req, err := http.NewRequest("GET", imageUrl, nil) if err != nil { return imageInfo, err } req.Header.Add("Authorization", token) client := &http.Client{Timeout: 20 * time.Second} res, err := client.Do(req) if err != nil { return imageInfo, err } contents, _ := ioutil.ReadAll(res.Body) _ = res.Body.Close() uEnc := b64.StdEncoding.EncodeToString(contents) imageInfo.Image = uEnc imageInfo.ProviderId = providerId imageInfo.Url = imageUrl return }
[ "\"BITCOU_URL_DEV_V2\"", "\"BITCOU_TOKEN_V2\"", "\"BITCOU_URL_DEV_V2\"", "\"BITCOU_URL_PROD_V2\"", "\"BITCOU_TOKEN_V2\"", "\"BITCOU_URL_DEV_V2\"", "\"BITCOU_URL_PROD_V2\"", "\"BITCOU_TOKEN_V2\"", "\"BITCOU_URL_DEV_V2\"", "\"BITCOU_URL_PROD_V2\"", "\"BITCOU_TOKEN_V2\"", "\"BITCOU_TOKEN_V2\"" ]
[]
[ "BITCOU_TOKEN_V2", "BITCOU_URL_DEV_V2", "BITCOU_URL_PROD_V2" ]
[]
["BITCOU_TOKEN_V2", "BITCOU_URL_DEV_V2", "BITCOU_URL_PROD_V2"]
go
3
0
gdalplugin/gcs_adapter.go
package main import "C" import ( "context" "log" "os" "strconv" "strings" "unicode" "github.com/airbusgeo/godal/gcs" ) var ctx context.Context func blockSize() int { s := os.Getenv("GODAL_BLOCKSIZE") const ( BYTE = 1 << (10 * iota) KILOBYTE MEGABYTE GIGABYTE TERABYTE PETABYTE EXABYTE ) s = strings.TrimSpace(s) if len(s) == 0 { return 0 } s = strings.ToUpper(s) i := strings.IndexFunc(s, unicode.IsLetter) if i == -1 { ii, err := strconv.Atoi(s) if err != nil || ii <= 0 { log.Printf("failed to parse GODAL_BLOCKSIZE %s", s) return 0 } return ii } bytesString, multiple := s[:i], s[i:] bytes, err := strconv.ParseFloat(bytesString, 64) if err != nil || bytes < 0 { log.Printf("failed to parse GODAL_BLOCKSIZE %s", s) return 0 } switch multiple { case "E", "EB", "EIB": return int(bytes * EXABYTE) case "P", "PB", "PIB": return int(bytes * PETABYTE) case "T", "TB", "TIB": return int(bytes * TERABYTE) case "G", "GB", "GIB": return int(bytes * GIGABYTE) case "M", "MB", "MIB": return int(bytes * MEGABYTE) case "K", "KB", "KIB": return int(bytes * KILOBYTE) case "B": return int(bytes) default: log.Printf("failed to parse GODAL_BLOCKSIZE %s", s) return 0 } } func numBlocks() int { s := os.Getenv("GODAL_NUMBLOCKS") s = strings.TrimSpace(s) if len(s) == 0 { return 64 } ii, err := strconv.Atoi(s) if err != nil || ii <= 0 { log.Printf("failed to parse GODAL_NUMBLOCKS %s", s) return 0 } return ii } func splitRanges() bool { s := strings.ToLower(strings.TrimSpace(os.Getenv("GODAL_SPLIT_CONSECUTIVE_RANGES"))) switch s { case "", "0", "no", "false": return false default: return true } } //export GDALRegister_gcs func GDALRegister_gcs() { ctx = context.Background() opts := []gcs.Option{} if bs := blockSize(); bs > 0 { opts = append(opts, gcs.BlockSize(bs)) } if nb := numBlocks(); nb > 0 { opts = append(opts, gcs.MaxCachedBlocks(nb)) } if splitRanges() { opts = append(opts, gcs.SplitConsecutiveRanges(true)) } err := gcs.RegisterHandler(ctx, opts...) if err != nil { log.Printf("Failed to register gcs handler: %v", err) return } go func() { <-ctx.Done() }() } func main() {}
[ "\"GODAL_BLOCKSIZE\"", "\"GODAL_NUMBLOCKS\"", "\"GODAL_SPLIT_CONSECUTIVE_RANGES\"" ]
[]
[ "GODAL_NUMBLOCKS", "GODAL_BLOCKSIZE", "GODAL_SPLIT_CONSECUTIVE_RANGES" ]
[]
["GODAL_NUMBLOCKS", "GODAL_BLOCKSIZE", "GODAL_SPLIT_CONSECUTIVE_RANGES"]
go
3
0
examples/v1/dashboards/GetDashboard_4262333854.java
// Get a dashboard returns 'author_name' import com.datadog.api.v1.client.ApiClient; import com.datadog.api.v1.client.ApiException; import com.datadog.api.v1.client.Configuration; import com.datadog.api.v1.client.api.DashboardsApi; import com.datadog.api.v1.client.model.Dashboard; import java.time.*; import java.util.*; public class Example { public static void main(String[] args) { ApiClient defaultClient = Configuration.getDefaultApiClient(); DashboardsApi apiInstance = new DashboardsApi(defaultClient); // there is a valid "dashboard" in the system String DASHBOARD_ID = System.getenv("DASHBOARD_ID"); try { Dashboard result = apiInstance.getDashboard(DASHBOARD_ID); System.out.println(result); } catch (ApiException e) { System.err.println("Exception when calling DashboardsApi#getDashboard"); System.err.println("Status code: " + e.getCode()); System.err.println("Reason: " + e.getResponseBody()); System.err.println("Response headers: " + e.getResponseHeaders()); e.printStackTrace(); } } }
[ "\"DASHBOARD_ID\"" ]
[]
[ "DASHBOARD_ID" ]
[]
["DASHBOARD_ID"]
java
1
0
test/soak/serve_hostnames/serve_hostnames.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* This soak tests places a specified number of pods on each node and then repeatedly sends queries to a service running on these pods via a serivce */ package main import ( "flag" "fmt" "os" "path/filepath" "time" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/kubernetes/pkg/api/legacyscheme" e2e "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/service" "k8s.io/klog" ) var ( queriesAverage = flag.Int("queries", 100, "Number of hostname queries to make in each iteration per pod on average") podsPerNode = flag.Int("pods_per_node", 1, "Number of serve_hostname pods per node") upTo = flag.Int("up_to", 1, "Number of iterations or -1 for no limit") maxPar = flag.Int("max_par", 500, "Maximum number of queries in flight") gke = flag.String("gke_context", "", "Target GKE cluster with context gke_{project}_{zone}_{cluster-name}") ) const ( deleteTimeout = 2 * time.Minute endpointTimeout = 5 * time.Minute nodeListTimeout = 2 * time.Minute podCreateTimeout = 2 * time.Minute podStartTimeout = 30 * time.Minute serviceCreateTimeout = 2 * time.Minute namespaceDeleteTimeout = 5 * time.Minute ) func main() { flag.Parse() klog.Infof("Starting serve_hostnames soak test with queries=%d and podsPerNode=%d upTo=%d", *queriesAverage, *podsPerNode, *upTo) var spec string if *gke != "" { spec = filepath.Join(os.Getenv("HOME"), ".config", "gcloud", "kubernetes", "kubeconfig") } else { spec = filepath.Join(os.Getenv("HOME"), ".kube", "config") } settings, err := clientcmd.LoadFromFile(spec) if err != nil { klog.Fatalf("Error loading configuration: %v", err.Error()) } if *gke != "" { settings.CurrentContext = *gke } config, err := clientcmd.NewDefaultClientConfig(*settings, &clientcmd.ConfigOverrides{}).ClientConfig() if err != nil { klog.Fatalf("Failed to construct config: %v", err) } client, err := clientset.NewForConfig(config) if err != nil { klog.Fatalf("Failed to make client: %v", err) } var nodes *v1.NodeList for start := time.Now(); time.Since(start) < nodeListTimeout; time.Sleep(2 * time.Second) { nodes, err = client.CoreV1().Nodes().List(metav1.ListOptions{}) if err == nil { break } klog.Warningf("Failed to list nodes: %v", err) } if err != nil { klog.Fatalf("Giving up trying to list nodes: %v", err) } if len(nodes.Items) == 0 { klog.Fatalf("Failed to find any nodes.") } klog.Infof("Found %d nodes on this cluster:", len(nodes.Items)) for i, node := range nodes.Items { klog.Infof("%d: %s", i, node.Name) } queries := *queriesAverage * len(nodes.Items) * *podsPerNode // Create the namespace got, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "serve-hostnames-"}}) if err != nil { klog.Fatalf("Failed to create namespace: %v", err) } ns := got.Name defer func(ns string) { if err := client.CoreV1().Namespaces().Delete(ns, nil); err != nil { klog.Warningf("Failed to delete namespace %s: %v", ns, err) } else { // wait until the namespace disappears for i := 0; i < int(namespaceDeleteTimeout/time.Second); i++ { if _, err := client.CoreV1().Namespaces().Get(ns, metav1.GetOptions{}); err != nil { if apierrors.IsNotFound(err) { return } } time.Sleep(time.Second) } } }(ns) klog.Infof("Created namespace %s", ns) // Create a service for these pods. klog.Infof("Creating service %s/serve-hostnames", ns) // Make several attempts to create a service. var svc *v1.Service for start := time.Now(); time.Since(start) < serviceCreateTimeout; time.Sleep(2 * time.Second) { t := time.Now() svc, err = client.CoreV1().Services(ns).Create(&v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "serve-hostnames", Labels: map[string]string{ "name": "serve-hostname", }, }, Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{{ Protocol: "TCP", Port: 9376, TargetPort: intstr.FromInt(9376), }}, Selector: map[string]string{ "name": "serve-hostname", }, }, }) klog.V(4).Infof("Service create %s/server-hostnames took %v", ns, time.Since(t)) if err == nil { break } klog.Warningf("After %v failed to create service %s/serve-hostnames: %v", time.Since(start), ns, err) } if err != nil { klog.Warningf("Unable to create service %s/%s: %v", ns, svc.Name, err) return } // Clean up service defer func() { klog.Infof("Cleaning up service %s/serve-hostnames", ns) // Make several attempts to delete the service. for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { if err := client.CoreV1().Services(ns).Delete(svc.Name, nil); err == nil { return } klog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err) } }() // Put serve-hostname pods on each node. podNames := []string{} for i, node := range nodes.Items { for j := 0; j < *podsPerNode; j++ { podName := fmt.Sprintf("serve-hostname-%d-%d", i, j) podNames = append(podNames, podName) // Make several attempts for start := time.Now(); time.Since(start) < podCreateTimeout; time.Sleep(2 * time.Second) { klog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name) t := time.Now() _, err = client.CoreV1().Pods(ns).Create(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, Labels: map[string]string{ "name": "serve-hostname", }, }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "serve-hostname", Image: e2e.ServeHostnameImage, Ports: []v1.ContainerPort{{ContainerPort: 9376}}, }, }, NodeName: node.Name, }, }) klog.V(4).Infof("Pod create %s/%s request took %v", ns, podName, time.Since(t)) if err == nil { break } klog.Warningf("After %s failed to create pod %s/%s: %v", time.Since(start), ns, podName, err) } if err != nil { klog.Warningf("Failed to create pod %s/%s: %v", ns, podName, err) return } } } // Clean up the pods defer func() { klog.Info("Cleaning up pods") // Make several attempts to delete the pods. for _, podName := range podNames { for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { if err = client.CoreV1().Pods(ns).Delete(podName, nil); err == nil { break } klog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err) } } }() klog.Info("Waiting for the serve-hostname pods to be ready") for _, podName := range podNames { var pod *v1.Pod for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) { pod, err = client.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) if err != nil { klog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout) continue } if pod.Status.Phase == v1.PodRunning { break } } if pod.Status.Phase != v1.PodRunning { klog.Warningf("Gave up waiting on pod %s/%s to be running (saw %v)", ns, podName, pod.Status.Phase) } else { klog.Infof("%s/%s is running", ns, podName) } } rclient, err := restclient.RESTClientFor(config) if err != nil { klog.Warningf("Failed to build restclient: %v", err) return } proxyRequest, errProxy := service.GetServicesProxyRequest(client, rclient.Get()) if errProxy != nil { klog.Warningf("Get services proxy request failed: %v", errProxy) return } // Wait for the endpoints to propagate. for start := time.Now(); time.Since(start) < endpointTimeout; time.Sleep(10 * time.Second) { hostname, err := proxyRequest. Namespace(ns). Name("serve-hostnames"). DoRaw() if err != nil { klog.Infof("After %v while making a proxy call got error %v", time.Since(start), err) continue } var r metav1.Status if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), hostname, &r); err != nil { break } if r.Status == metav1.StatusFailure { klog.Infof("After %v got status %v", time.Since(start), string(hostname)) continue } break } // Repeatedly make requests. for iteration := 0; iteration != *upTo; iteration++ { responseChan := make(chan string, queries) // Use a channel of size *maxPar to throttle the number // of in-flight requests to avoid overloading the service. inFlight := make(chan struct{}, *maxPar) start := time.Now() for q := 0; q < queries; q++ { go func(i int, query int) { inFlight <- struct{}{} t := time.Now() hostname, err := proxyRequest. Namespace(ns). Name("serve-hostnames"). DoRaw() klog.V(4).Infof("Proxy call in namespace %s took %v", ns, time.Since(t)) if err != nil { klog.Warningf("Call failed during iteration %d query %d : %v", i, query, err) // If the query failed return a string which starts with a character // that can't be part of a hostname. responseChan <- fmt.Sprintf("!failed in iteration %d to issue query %d: %v", i, query, err) } else { responseChan <- string(hostname) } <-inFlight }(iteration, q) } responses := make(map[string]int, *podsPerNode*len(nodes.Items)) missing := 0 for q := 0; q < queries; q++ { r := <-responseChan klog.V(4).Infof("Got response from %s", r) responses[r]++ // If the returned hostname starts with '!' then it indicates // an error response. if len(r) > 0 && r[0] == '!' { klog.V(3).Infof("Got response %s", r) missing++ } } if missing > 0 { klog.Warningf("Missing %d responses out of %d", missing, queries) } // Report any nodes that did not respond. for n, node := range nodes.Items { for i := 0; i < *podsPerNode; i++ { name := fmt.Sprintf("serve-hostname-%d-%d", n, i) if _, ok := responses[name]; !ok { klog.Warningf("No response from pod %s on node %s at iteration %d", name, node.Name, iteration) } } } klog.Infof("Iteration %d took %v for %d queries (%.2f QPS) with %d missing", iteration, time.Since(start), queries-missing, float64(queries-missing)/time.Since(start).Seconds(), missing) } }
[ "\"HOME\"", "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
examples/run_session_peer.py
#!/usr/bin/env python __author__ = 'RADICAL-Cybertools Team' __email__ = '[email protected]' __copyright__ = 'Copyright 2021, The RADICAL-Cybertools Team' __license__ = 'MIT' import os from radical.dreamer import Session, Resource, Workload from radical.dreamer.configs import cfg_default if __name__ == '__main__': os.environ['RADICAL_DREAMER_LOG_LVL'] = 'DEBUG' # Create a resource with a specific number of cores, with performance of # each core drawn from a distribution (provided resource is dynamic due to # `var_temporal` input data). # # NOTE: Core objects are not generated during the resource initialization # (by default), but will be generated in the `processing` method of # ResourceManager class. In case, cores should be defined before the # session run, please use `set_cores` input flag or a method with the # same name (e.g., `resource.set_cores()`). resource = Resource(num_cores=32, perf_dist={'name': 'normal', 'mean': 16., 'var_spatial': 4., 'var_temporal': 2.}) # Create a workload with a specific number of tasks, with number of # operations per task drawn from a distribution. # # NOTE: Task objects are not generated during the workload initialization # (by default), which is similar to Resource with Cores behaviour. # Same as for Resource to generate task objects explicitly, # corresponding input flag and method are `set_tasks`. workload = Workload(num_tasks=256, ops_dist={'mean': 512.}) # Create Session (peer mode) and set descriptions of Resource & Workload(s) session = Session(cfg=cfg_default, is_peer=True) session.set(resource=resource, workload=workload) # for multi-workloads: session.set(resource=resource, workloads=[workload]) # Start the processing and collect output profiles session.run() # ### Example of config data: # # from radical.dreamer import Config, Session # # cfg_data = { # 'session': { # 'profile_base_name': 'rd.profile' # }, # 'schedule': { # 'strategy': 'largest_to_fastest', # 'early_binding': False # } # } # # session = Session(cfg=cfg_data or Config(cfg_data), is_peer=True) #
[]
[]
[ "RADICAL_DREAMER_LOG_LVL" ]
[]
["RADICAL_DREAMER_LOG_LVL"]
python
1
0
internal/readpassword/extpass_test.go
package readpassword import ( "os" "os/exec" "testing" "github.com/rfjakob/gocryptfs/internal/tlog" ) var testPw = []byte("test") func TestMain(m *testing.M) { // Shut up info output tlog.Info.Enabled = false os.Exit(m.Run()) } func TestExtpass(t *testing.T) { p1 := "ads2q4tw41reg52" p2 := string(readPasswordExtpass("echo " + p1)) if p1 != p2 { t.Errorf("p1=%q != p2=%q", p1, p2) } } func TestOnceExtpass(t *testing.T) { p1 := "lkadsf0923rdfi48rqwhdsf" p2 := string(Once("echo "+p1, "", "")) if p1 != p2 { t.Errorf("p1=%q != p2=%q", p1, p2) } } func TestTwiceExtpass(t *testing.T) { p1 := "w5w44t3wfe45srz434" p2 := string(Once("echo "+p1, "", "")) if p1 != p2 { t.Errorf("p1=%q != p2=%q", p1, p2) } } // When extpass returns an empty string, we should crash. // // The TEST_SLAVE magic is explained at // https://talks.golang.org/2014/testing.slide#23 . func TestExtpassEmpty(t *testing.T) { if os.Getenv("TEST_SLAVE") == "1" { readPasswordExtpass("echo") return } cmd := exec.Command(os.Args[0], "-test.run=TestExtpassEmpty$") cmd.Env = append(os.Environ(), "TEST_SLAVE=1") err := cmd.Run() if err != nil { return } t.Fatal("empty password should have failed") }
[ "\"TEST_SLAVE\"" ]
[]
[ "TEST_SLAVE" ]
[]
["TEST_SLAVE"]
go
1
0
docs/conf.py
# -*- coding: utf-8 -*- import os import sys sys.path.insert(0, os.path.abspath("..")) # -- General configuration ------------------------------------------------ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.napoleon", "sphinx.ext.todo", ] # TODO: Please Read! # Uncomment the below if you use native CircuitPython modules such as # digitalio, micropython and busio. List the modules you use. Without it, the # autodoc module docs will fail to generate with a warning. autodoc_mock_imports = ["digitalio", "busio", "adafruit_bus_device", "micropython"] intersphinx_mapping = { "python": ("https://docs.python.org/3.4", None), "BusDevice": ( "https://circuitpython.readthedocs.io/projects/busdevice/en/latest/", None, ), "CircuitPython": ("https://circuitpython.readthedocs.io/en/latest/", None), } # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] source_suffix = ".rst" # The master toctree document. master_doc = "index" # General information about the project. project = "Adafruit PN532 Library" copyright = "2018 ladyada" author = "ladyada" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = "1.0" # The full version, including alpha/beta/rc tags. release = "1.0" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".env", "CODE_OF_CONDUCT.md"] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = "any" # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # If this is True, todo emits a warning for each TODO entries. The default is False. todo_emit_warnings = True napoleon_numpy_docstring = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # on_rtd = os.environ.get("READTHEDOCS", None) == "True" if not on_rtd: # only import and set the theme if we're building docs locally try: import sphinx_rtd_theme html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."] except: html_theme = "default" html_theme_path = ["."] else: html_theme_path = ["."] # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = "_static/favicon.ico" # Output file base name for HTML help builder. htmlhelp_basename = "AdafruitPn532Librarydoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( master_doc, "AdafruitPN532Library.tex", "AdafruitPN532 Library Documentation", author, "manual", ), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ( master_doc, "AdafruitPN532library", "Adafruit PN532 Library Documentation", [author], 1, ) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "AdafruitPN532Library", "Adafruit PN532 Library Documentation", author, "AdafruitPN532Library", "One line description of project.", "Miscellaneous", ), ]
[]
[]
[ "READTHEDOCS" ]
[]
["READTHEDOCS"]
python
1
0
tools/main.py
# coding=utf-8 # # Copyright (c) 2006-2020, RT-Thread Development Team # # SPDX-License-Identifier: Apache-2.0 # # Change Logs: # Date Author Notes # 2020-05-08 SummerGift first version # 2020-05-11 SummerGift optimize schema checking # import os import json import logging import requests import urllib.error import urllib.parse import urllib.request from jsonschema import RefResolver, Draft7Validator, FormatChecker from package_sync import PackagesSync def init_logger(): log_format = "[%(filename)s %(lineno)d %(levelname)s] %(message)s " date_format = '%Y-%m-%d %H:%M:%S %a ' logging.basicConfig(level=logging.INFO, format=log_format, datefmt=date_format, ) class StudioSdkManagerIndex: def __init__(self, index): self.index_entry_file = index self.index_all = "" @staticmethod def get_json_obj_from_file(file): with open(file, 'r') as f: content = f.read() return json.loads(content) @staticmethod def write_json_to_file(json_content, file_name): with open(file_name, 'w', encoding='UTF-8') as f: f.write(json.dumps(json_content, indent=4)) @staticmethod def walk_all_folder(self, index_entry_file): logging.debug("index_entry_file {0}".format(index_entry_file)) index_entry = self.get_json_obj_from_file(index_entry_file) if "index" in index_entry: index_entry["children"] = list() logging.debug("index has index") for item in index_entry["index"]: abs_path = os.path.abspath(index_entry_file) next_entry_file = os.path.join(os.path.dirname(abs_path), item, "index.json") sub_index = self.walk_all_folder(self, next_entry_file) index_entry["children"].append(sub_index) index_entry.pop('index') return index_entry def generate_all_index(self, file_name): index_entry = self.walk_all_folder(self, self.index_entry_file) self.index_all = index_entry self.write_json_to_file(index_entry, file_name) return index_entry def index_schema_check(self, index_content): def get_schema_json_obj(path): return self.get_json_obj_from_file(os.path.join("index_schema_check", path)) index_all_schema = get_schema_json_obj("index_all_schema.json") rtt_source_schema = get_schema_json_obj("rtt_source_schema.json") rtt_source_releases_schema = get_schema_json_obj("rtt_source_releases_schema.json") csp_schema = get_schema_json_obj("csp_schema.json") csp_dvendor_schema = get_schema_json_obj("csp_dvendor_schema.json") csp_dvendor_package_schema = get_schema_json_obj("csp_dvendor_package_schema.json") csp_dvendor_package_releases_schema = get_schema_json_obj("csp_dvendor_package_releases_schema.json") schema_store = { index_all_schema['$id']: index_all_schema, rtt_source_releases_schema['$id']: rtt_source_releases_schema, rtt_source_schema['$id']: rtt_source_schema, csp_schema['$id']: csp_schema, csp_dvendor_schema['$id']: csp_dvendor_schema, csp_dvendor_package_schema['$id']: csp_dvendor_package_schema, csp_dvendor_package_releases_schema['$id']: csp_dvendor_package_releases_schema } resolver = RefResolver.from_schema(rtt_source_releases_schema, store=schema_store) validator = Draft7Validator(index_all_schema, resolver=resolver, format_checker=FormatChecker()) validator.validate(index_content) logging.info("SDK index checking successful.") @staticmethod def get_differ_from_index(last_csp_list, new_csp_list): last_csp_list_str = json.dumps(last_csp_list, indent=4) new_csp_list_str = json.dumps(new_csp_list, indent=4) last_csp_list = list() for line in last_csp_list_str.splitlines(): if line.find(".zip") != -1: url = line.strip()[line.strip().find("https"): line.strip().find(".zip") + 4] last_csp_list.append(url) logging.debug(last_csp_list) new_csp_list = list() for line in new_csp_list_str.splitlines(): if line.find(".zip") != -1: url = line.strip()[line.strip().find("https"): line.strip().find(".zip") + 4] new_csp_list.append(url) logging.debug(new_csp_list) result = list(set(new_csp_list).difference(set(last_csp_list))) return result @staticmethod def csp_to_test(csp_result): if len(csp_result) == 0: logging.info("No need to test chip support package.") return # limit commit times if len(csp_result) != 1: logging.error("You commit {0} csp packages at one time.".format(len(csp_result))) logging.error("But you can commit only one csp package once, so you should modify the index you commit.") logging.error("Please check the list following:") logging.error(csp_result) exit(1) logging.info("csp update : {0}".format(csp_result)) with open("csp_update_url.json", "w") as f: f.write(str(json.dumps(csp_result, indent=4))) @staticmethod def bsp_to_test(bsp_result): if len(bsp_result) == 0: logging.info("No need to test board support package.") return # limit commit times if len(bsp_result) != 1: logging.error("You commit {0} csp packages at one time.".format(len(bsp_result))) logging.error("But you can commit only one csp package once, so you should modify the index you commit.") logging.error("Please check the list following:") logging.error(bsp_result) exit(1) logging.info("bsp update : {0}".format(bsp_result)) with open("bsp_update_url.json", "w") as f: f.write(str(json.dumps(bsp_result, indent=4))) def get_update_list(self): response = requests.get("https://www.rt-thread.org/studio/sdkmanager/get/index") # check chip update information csp_last_csp_list = json.loads(response.text)["children"][1] csp_new_csp_list = self.index_all["children"][1] csp_result = self.get_differ_from_index(csp_last_csp_list, csp_new_csp_list) self.csp_to_test(csp_result) # check board update information bsp_last_bsp_list = json.loads(response.text)["children"][2] bsp_new_bsp_list = self.index_all["children"][2] bsp_result = self.get_differ_from_index(bsp_last_bsp_list, bsp_new_bsp_list) self.bsp_to_test(bsp_result) # sdk package need to be update and sync last_sdk_list = json.loads(response.text)["children"] new_sdk_list = self.index_all["children"] result = self.get_differ_from_index(last_sdk_list, new_sdk_list) if len(result) == 0: logging.info("no packages need test and update: {0}".format(result)) else: logging.info("packages need test and update: {0}".format(result)) return result class SdkSyncPackages: def __init__(self, update_list, new_index): self.update_list = update_list self.new_index = new_index @staticmethod def is_master_repo(): if 'IS_MASTER_REPO' in os.environ: return True else: return False def do_sync_csp_packages(self): logging.info("update list: {0}".format(self.update_list)) if len(self.update_list) == 0: logging.info("Update list is empty, no need to sync.") return url = self.update_list[0] logging.info(url) tmp = url.split('/') logging.info(tmp[4]) # get packages repository work_path = r'sync_local_repo/github_mirror' mirror_file = r'sync_local_repo/github_mirror_file' mirror_url = 'https://gitee.com/RT-Thread-Studio-Mirror' mirror_org_name = "RT-Thread-Studio-Mirror" if 'username' in os.environ: logging.info("Find sync token") username = os.environ['username'] password = os.environ['password'] client_id = os.environ['client_id'] client_secret = os.environ['client_secret'] payload = 'grant_type=password&username={0}&password={1}&client_id={2}' \ '&client_secret={3}&scope=' \ 'user_info projects pull_requests issues notes keys hook groups gists'.format(username, password, client_id, client_secret) token = PackagesSync.get_access_token(payload) packages_update = PackagesSync( work_path, mirror_file, mirror_url, token, mirror_org_name) # create new repo in mirror packages_update.create_repo_in_gitee(tmp[4]) # clone repo and push packages_update.fetch_packages_from_git(url) else: logging.info("No sync token") def sync_csp_packages(self): logging.info("Ready to sync csp or bsp packages") self.do_sync_csp_packages() @staticmethod def do_update_sdk_ide_index(index): headers = { "Content-Type": "application/json" } try: r = requests.post(os.environ["UPDATE_SDK_INDEX_ADDRESS"], data=json.dumps(index), headers=headers ) if r.status_code == requests.codes.ok: logging.info("Update sdk index successful.") else: logging.error("Error code {0}".format(r.status_code)) r = requests.post(os.environ["UPDATE_SDK_ABROAD_INDEX_ADDRESS"], data=json.dumps(index), headers=headers ) if r.status_code == requests.codes.ok: logging.info("Update abroad sdk index successful.") else: logging.error("Error code {0}".format(r.status_code)) except Exception as e: logging.error('Error message:%s' % e) @staticmethod def packages_info_mirror_register(packages_json_file): with open(packages_json_file, 'rb') as f: json_content = f.read() try: package_json_register = json.loads(json_content.decode('utf-8')) except Exception as e: logging.error("Error message: {0}.".format(e)) package_json_register["name"] = "RT-Thread_Studio_" + package_json_register["name"] for item in package_json_register['releases']: url = item['url'] if url.startswith('https://github.com/'): if url.endswith('.git'): tmp = url.split('/') repo = tmp[4] replace_url = "https://gitee.com/RT-Thread-Studio-Mirror" + '/' + repo item['url'] = replace_url if url == "https://github.com/RT-Thread/rt-thread.git": item['url'] = "https://gitee.com/rtthread/rt-thread.git" else: new_zip_url = url.replace('https://github.com', 'https://gitee.com') tmp = new_zip_url.split('/') tmp[3] = "RT-Thread-Studio-Mirror" tmp[5] = 'repository/archive' file_replace_url = '/'.join(tmp) item['url'] = file_replace_url logging.debug(package_json_register) payload_register = { "packages": [{} ] } payload_register["packages"][0] = package_json_register try: data = json.dumps(payload_register).encode("utf-8") except Exception as e: logging.error("Error message: {0}.".format(e)) try: request = urllib.request.Request(os.environ["MIRROR_REG_URL"], data, { 'content-type': 'application/json'}) response = urllib.request.urlopen(request) resp = response.read() except Exception as e: logging.error("Error message: {0}.".format(e)) print('======>Software package registration failed.') else: logging.info("{0} register successful.".format(package_json_register["name"])) def do_update_sdk_mirror_server_index(self): folder_walk_result = os.walk("..") for path, d, filelist in folder_walk_result: for filename in filelist: if filename == 'index.json': index_content = StudioSdkManagerIndex.get_json_obj_from_file(os.path.join(path, filename)) if "releases" in index_content: self.packages_info_mirror_register(os.path.join(path, filename)) def update_sdk_index(self): if 'UPDATE_SDK_INDEX_ADDRESS' in os.environ: logging.info("Begin to update sdk IDE index") self.do_update_sdk_ide_index(self.new_index) logging.info("Begin to update sdk mirror server index") self.do_update_sdk_mirror_server_index() else: logging.info("No need to update sdk index") def main(): init_logger() generate_all_index = StudioSdkManagerIndex("../index.json") index_content = generate_all_index.generate_all_index("index_all.json") # 1. sdk index schema checking generate_all_index.index_schema_check(index_content) # 2. get packages need to test and sync update_list = generate_all_index.get_update_list() # 3. sync updated sdk package and sdk index sync = SdkSyncPackages(update_list, index_content) if sync.is_master_repo(): sync.sync_csp_packages() sync.update_sdk_index() else: logging.info("No need to sync csp or bsp packages") if __name__ == "__main__": main()
[]
[]
[ "client_id", "username", "UPDATE_SDK_INDEX_ADDRESS", "client_secret", "UPDATE_SDK_ABROAD_INDEX_ADDRESS", "password", "MIRROR_REG_URL" ]
[]
["client_id", "username", "UPDATE_SDK_INDEX_ADDRESS", "client_secret", "UPDATE_SDK_ABROAD_INDEX_ADDRESS", "password", "MIRROR_REG_URL"]
python
7
0
src/python/hooks/hooks_debug.go
package hooks import ( "fmt" "os" "github.com/cloudfoundry/libbuildpack" ) type hooks1 struct { libbuildpack.DefaultHook } type hooks2 struct { libbuildpack.DefaultHook } func init() { if os.Getenv("BP_DEBUG") != "" { libbuildpack.AddHook(hooks1{}) libbuildpack.AddHook(hooks2{}) } } func (h hooks1) BeforeCompile(compiler *libbuildpack.Stager) error { fmt.Println("HOOKS 1: BeforeCompile") return nil } func (h hooks2) AfterCompile(compiler *libbuildpack.Stager) error { fmt.Println("HOOKS 2: AfterCompile") return nil }
[ "\"BP_DEBUG\"" ]
[]
[ "BP_DEBUG" ]
[]
["BP_DEBUG"]
go
1
0
testframework/config.go
package testframework import ( "bufio" "fmt" "os" "strings" "time" ) var TIMEOUT = setTimeout() func setTimeout() time.Duration { if os.Getenv("SLOW_MACHINE") == "1" { return 180 * time.Second } return 60 * time.Second } func WriteConfig(filename string, config map[string]string, regtestConfig map[string]string, sectionName string) { b := []byte{} for k, v := range config { b = append(b, []byte(fmt.Sprintf("%s=%s\n", k, v))...) } if regtestConfig != nil { b = append(b, []byte(fmt.Sprintf("[%s]\n", sectionName))...) for k, v := range regtestConfig { b = append(b, []byte(fmt.Sprintf("%s=%s\n", k, v))...) } } os.WriteFile(filename, b, os.ModePerm) } func ReadConfig(filename string) (map[string]string, error) { file, err := os.Open(filename) if err != nil { return nil, err } defer file.Close() conf := map[string]string{} scanner := bufio.NewScanner(file) for scanner.Scan() { if strings.Contains(scanner.Text(), "#") || !strings.Contains(scanner.Text(), "=") { continue } parts := strings.Split(scanner.Text(), "=") conf[parts[0]] = parts[1] } if err := scanner.Err(); err != nil { return nil, err } return conf, nil }
[ "\"SLOW_MACHINE\"" ]
[]
[ "SLOW_MACHINE" ]
[]
["SLOW_MACHINE"]
go
1
0
functests/utils/client/clients.go
package client import ( "os" "github.com/golang/glog" clientconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" clientmachineconfigv1 "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/typed/machineconfiguration.openshift.io/v1" discovery "k8s.io/client-go/discovery" appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) // Client defines the client set that will be used for testing var Client *ClientSet func init() { Client = New("") } // ClientSet provides the struct to talk with relevant API type ClientSet struct { corev1client.CoreV1Interface clientconfigv1.ConfigV1Interface clientmachineconfigv1.MachineconfigurationV1Interface appsv1client.AppsV1Interface discovery.DiscoveryInterface } // New returns a *ClientBuilder with the given kubeconfig. func New(kubeconfig string) *ClientSet { var config *rest.Config var err error if kubeconfig == "" { kubeconfig = os.Getenv("KUBECONFIG") } if kubeconfig != "" { glog.V(4).Infof("Loading kube client config from path %q", kubeconfig) config, err = clientcmd.BuildConfigFromFlags("", kubeconfig) } else { glog.V(4).Infof("Using in-cluster kube client config") config, err = rest.InClusterConfig() } if err != nil { panic(err) } clientSet := &ClientSet{} clientSet.CoreV1Interface = corev1client.NewForConfigOrDie(config) clientSet.ConfigV1Interface = clientconfigv1.NewForConfigOrDie(config) clientSet.MachineconfigurationV1Interface = clientmachineconfigv1.NewForConfigOrDie(config) clientSet.AppsV1Interface = appsv1client.NewForConfigOrDie(config) clientSet.DiscoveryInterface = discovery.NewDiscoveryClientForConfigOrDie(config) return clientSet }
[ "\"KUBECONFIG\"" ]
[]
[ "KUBECONFIG" ]
[]
["KUBECONFIG"]
go
1
0
util/log/clog.go
// Go support for leveled logs, analogous to https://code.google.com/p/google-clog/ // // Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Original version (c) Google. // Author (fork from https://github.com/golang/glog): Tobias Schottdorf package log import ( "bufio" "bytes" "encoding/json" "errors" "fmt" "io" stdLog "log" "os" "path/filepath" "runtime" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/cockroachdb/cockroach/util/caller" "github.com/cockroachdb/cockroach/util/encoding" gogoproto "github.com/gogo/protobuf/proto" ) // Severity identifies the sort of log: info, warning etc. It also implements // the flag.Value interface. The -stderrthreshold flag is of type Severity and // should be modified only through the flag.Value interface. The values match // the corresponding constants in C++. type Severity int32 // sync/atomic int32 // These constants identify the log levels in order of increasing Severity. // A message written to a high-Severity log file is also written to each // lower-Severity log file. const ( InfoLog Severity = iota WarningLog ErrorLog FatalLog NumSeverity = 4 ) const severityChar = "IWEF" // severityName provides a mapping from Severity level to a string. var severityName = []string{ InfoLog: "INFO", WarningLog: "WARNING", ErrorLog: "ERROR", FatalLog: "FATAL", } // get returns the value of the Severity. func (s *Severity) get() Severity { return Severity(atomic.LoadInt32((*int32)(s))) } // set sets the value of the Severity. func (s *Severity) set(val Severity) { atomic.StoreInt32((*int32)(s), int32(val)) } // String is part of the flag.Value interface. func (s *Severity) String() string { return strconv.FormatInt(int64(*s), 10) } // Set is part of the flag.Value interface. func (s *Severity) Set(value string) error { var threshold Severity // Is it a known name? if v, ok := SeverityByName(value); ok { threshold = v } else { v, err := strconv.Atoi(value) if err != nil { return err } threshold = Severity(v) } logging.stderrThreshold.set(threshold) return nil } // Name returns the string representation of the severity (i.e. ERROR, INFO). func (s *Severity) Name() string { return severityName[s.get()] } // SeverityByName attempts to parse the passed in string into a severity. (i.e. // ERROR, INFO). If it succeeds, the returned bool is set to true. func SeverityByName(s string) (Severity, bool) { s = strings.ToUpper(s) for i, name := range severityName { if name == s { return Severity(i), true } } return 0, false } // colorProfile defines escape sequences which provide color in // terminals. Some terminals support 8 colors, some 256, others // none at all. type colorProfile struct { infoPrefix []byte warnPrefix []byte errorPrefix []byte timePrefix []byte } // For terms with 8-color support. var colorProfile8 = &colorProfile{ infoPrefix: []byte("\033[0;36;49m"), warnPrefix: []byte("\033[0;33;49m"), errorPrefix: []byte("\033[0;31;49m"), timePrefix: []byte("\033[2;37;49m"), } // For terms with 256-color support. var colorProfile256 = &colorProfile{ infoPrefix: []byte("\033[38;5;33m"), warnPrefix: []byte("\033[38;5;214m"), errorPrefix: []byte("\033[38;5;160m"), timePrefix: []byte("\033[38;5;246m"), } // OutputStats tracks the number of output lines and bytes written. type outputStats struct { lines int64 bytes int64 } // Lines returns the number of lines written. func (s *outputStats) Lines() int64 { return atomic.LoadInt64(&s.lines) } // Bytes returns the number of bytes written. func (s *outputStats) Bytes() int64 { return atomic.LoadInt64(&s.bytes) } // Stats tracks the number of lines of output and number of bytes // per severity level. Values must be read with atomic.LoadInt64. var Stats struct { Info, Warning, Error outputStats } var severityStats = [NumSeverity]*outputStats{ InfoLog: &Stats.Info, WarningLog: &Stats.Warning, ErrorLog: &Stats.Error, } // Level is exported because it appears in the arguments to V and is // the type of the v flag, which can be set programmatically. // It's a distinct type because we want to discriminate it from logType. // Variables of type level are only changed under logging.mu. // The --verbosity flag is read only with atomic ops, so the state of the logging // module is consistent. // Level is treated as a sync/atomic int32. // Level specifies a level of verbosity for V logs. *Level implements // flag.Value; the --verbosity flag is of type Level and should be modified // only through the flag.Value interface. type level int32 // get returns the value of the Level. func (l *level) get() level { return level(atomic.LoadInt32((*int32)(l))) } // set sets the value of the Level. func (l *level) set(val level) { atomic.StoreInt32((*int32)(l), int32(val)) } // String is part of the flag.Value interface. func (l *level) String() string { return strconv.FormatInt(int64(*l), 10) } // Set is part of the flag.Value interface. func (l *level) Set(value string) error { v, err := strconv.Atoi(value) if err != nil { return err } logging.mu.Lock() defer logging.mu.Unlock() logging.setVState(level(v), logging.vmodule.filter, false) return nil } // moduleSpec represents the setting of the --vmodule flag. type moduleSpec struct { filter []modulePat } // modulePat contains a filter for the --vmodule flag. // It holds a verbosity level and a file pattern to match. type modulePat struct { pattern string literal bool // The pattern is a literal string level level } // match reports whether the file matches the pattern. It uses a string // comparison if the pattern contains no metacharacters. func (m *modulePat) match(file string) bool { if m.literal { return file == m.pattern } match, _ := filepath.Match(m.pattern, file) return match } func (m *moduleSpec) String() string { // Lock because the type is not atomic. TODO: clean this up. logging.mu.Lock() defer logging.mu.Unlock() var b bytes.Buffer for i, f := range m.filter { if i > 0 { b.WriteRune(',') } fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) } return b.String() } var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") // Syntax: --vmodule=recordio=2,file=1,gfs*=3 func (m *moduleSpec) Set(value string) error { var filter []modulePat for _, pat := range strings.Split(value, ",") { if len(pat) == 0 { // Empty strings such as from a trailing comma can be ignored. continue } patLev := strings.Split(pat, "=") if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { return errVmoduleSyntax } pattern := patLev[0] v, err := strconv.Atoi(patLev[1]) if err != nil { return errors.New("syntax error: expect comma-separated list of filename=N") } if v < 0 { return errors.New("negative value for vmodule level") } if v == 0 { continue // Ignore. It's harmless but no point in paying the overhead. } // TODO: check syntax of filter? filter = append(filter, modulePat{pattern, isLiteral(pattern), level(v)}) } logging.mu.Lock() defer logging.mu.Unlock() logging.setVState(logging.verbosity, filter, true) return nil } // isLiteral reports whether the pattern is a literal string, that is, has no metacharacters // that require filepath.Match to be called to match the pattern. func isLiteral(pattern string) bool { return !strings.ContainsAny(pattern, `\*?[]`) } // traceLocation represents the setting of the -log_backtrace_at flag. type traceLocation struct { file string line int } // isSet reports whether the trace location has been specified. // logging.mu is held. func (t *traceLocation) isSet() bool { return t.line > 0 } // match reports whether the specified file and line matches the trace location. // The argument file name is the full path, not the basename specified in the flag. // logging.mu is held. func (t *traceLocation) match(file string, line int) bool { if t.line != line { return false } if i := strings.LastIndex(file, "/"); i >= 0 { file = file[i+1:] } return t.file == file } func (t *traceLocation) String() string { // Lock because the type is not atomic. TODO: clean this up. logging.mu.Lock() defer logging.mu.Unlock() return fmt.Sprintf("%s:%d", t.file, t.line) } var errTraceSyntax = errors.New("syntax error: expect file.go:234") // Syntax: -log_backtrace_at=gopherflakes.go:234 // Note that unlike vmodule the file extension is included here. func (t *traceLocation) Set(value string) error { if value == "" { // Unset. logging.mu.Lock() defer logging.mu.Unlock() t.line = 0 t.file = "" return nil } fields := strings.Split(value, ":") if len(fields) != 2 { return errTraceSyntax } file, line := fields[0], fields[1] if !strings.Contains(file, ".") { return errTraceSyntax } v, err := strconv.Atoi(line) if err != nil { return errTraceSyntax } if v <= 0 { return errors.New("negative or zero value for level") } logging.mu.Lock() defer logging.mu.Unlock() t.line = v t.file = file return nil } // EntryDecoder reads successive encoded log entries from the input // buffer. Each entry is preceded by a single big-ending uint32 // describing the next entry's length. type EntryDecoder struct { in io.Reader } // NewEntryDecoder creates a new instance of EntryDecoder. func NewEntryDecoder(in io.Reader) *EntryDecoder { return &EntryDecoder{in: in} } // Decode decodes the next log entry into the provided protobuf message. func (lr *EntryDecoder) Decode(entry *LogEntry) error { // Read the next log entry. szBuf := make([]byte, 4) n, err := lr.in.Read(szBuf) if err != nil { return err } _, sz := encoding.DecodeUint32(szBuf) buf := make([]byte, sz) n, err = lr.in.Read(buf) if err != nil { return err } if err := gogoproto.Unmarshal(buf[:n], entry); err != nil { return err } return nil } type baseEntryReader struct { buf []byte ld *EntryDecoder format func(entry *LogEntry) []byte } // Read implements the io.Reader interface. func (hr *baseEntryReader) Read(p []byte) (int, error) { var n int for { if len(hr.buf) != 0 { copied := copy(p, hr.buf) hr.buf = hr.buf[copied:] n += copied p = p[copied:] if len(p) == 0 { return n, nil } } entry := &LogEntry{} if err := hr.ld.Decode(entry); err != nil { return n, err } hr.buf = hr.format(entry) } } // NewTermEntryReader returns a reader for log files containing // encoded entries for use from a terminal. If the --color flag is // set, and the terminal supports colors, then log output will be // colorized. func NewTermEntryReader(reader io.Reader) io.Reader { tr := &baseEntryReader{ld: NewEntryDecoder(reader)} colors := logging.shouldColorize() tr.format = func(entry *LogEntry) []byte { return formatLogEntry(entry, colors) } return tr } // NewJSONEntryReader returns a reader for log files containing // encoded entries in JSON format. func NewJSONEntryReader(reader io.Reader) io.Reader { jr := &baseEntryReader{ld: NewEntryDecoder(reader)} jr.format = func(entry *LogEntry) []byte { data, err := json.MarshalIndent(entry, "", " ") if err != nil { return []byte(fmt.Sprintf("{\"error\": %q}", err)) } return data } return jr } // flushSyncWriter is the interface satisfied by logging destinations. type flushSyncWriter interface { Flush() error Sync() error io.Writer } // formatHeader formats a log header using the provided file name and // line number. Log lines are colorized depending on severity. // // Log lines have this form: // Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... // where the fields are defined as follows: // L A single character, representing the log level (eg 'I' for INFO) // mm The month (zero padded; ie May is '05') // dd The day (zero padded) // hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds // threadid The space-padded thread ID as returned by GetTID() // file The file name // line The line number // msg The user-supplied message func formatHeader(s Severity, now time.Time, threadID int32, file string, line int32, colors *colorProfile) *buffer { buf := logging.getBuffer() if line < 0 { line = 0 // not a real line number, but acceptable to someDigits } if s > FatalLog { s = InfoLog // for safety. } tmp := buf.tmp[:len(buf.tmp)] var n int if colors != nil { var prefix []byte switch s { case InfoLog: prefix = colors.infoPrefix case WarningLog: prefix = colors.warnPrefix case ErrorLog, FatalLog: prefix = colors.errorPrefix } n += copy(tmp, prefix) } // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. // It's worth about 3X. Fprintf is hard. _, month, day := now.Date() hour, minute, second := now.Clock() // Lmmdd hh:mm:ss.uuuuuu threadid file:line] tmp[n] = severityChar[s] n++ n += buf.twoDigits(n, int(month)) n += buf.twoDigits(n, day) if colors != nil { n += copy(tmp[n:], colors.timePrefix) // gray for time, file & line } tmp[n] = ' ' n++ n += buf.twoDigits(n, hour) tmp[n] = ':' n++ n += buf.twoDigits(n, minute) tmp[n] = ':' n++ n += buf.twoDigits(n, second) tmp[n] = '.' n++ n += buf.nDigits(6, n, now.Nanosecond()/1000, '0') tmp[n] = ' ' n++ n += buf.someDigits(n, int(threadID)) tmp[n] = ' ' n++ buf.Write(tmp[:n]) buf.WriteString(file) tmp[0] = ':' n = buf.someDigits(1, int(line)) n++ // Extra space between the header and the actual message for scannability. tmp[n] = ' ' n++ if colors != nil { n += copy(tmp[n:], []byte("\033[0m")) // reset } tmp[n] = ' ' n++ buf.Write(tmp[:n]) return buf } // Some custom tiny helper functions to print the log header efficiently. const digits = "0123456789" // twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. // Returns two. func (buf *buffer) twoDigits(i, d int) int { buf.tmp[i+1] = digits[d%10] d /= 10 buf.tmp[i] = digits[d%10] return 2 } // nDigits formats an n-digit integer at buf.tmp[i], // padding with pad on the left. // It assumes d >= 0. Returns n. func (buf *buffer) nDigits(n, i, d int, pad byte) int { j := n - 1 for ; j >= 0 && d > 0; j-- { buf.tmp[i+j] = digits[d%10] d /= 10 } for ; j >= 0; j-- { buf.tmp[i+j] = pad } return n } // someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. func (buf *buffer) someDigits(i, d int) int { // Print into the top, then copy down. We know there's space for at least // a 10-digit number. j := len(buf.tmp) for { j-- buf.tmp[j] = digits[d%10] d /= 10 if d == 0 { break } } return copy(buf.tmp[i:], buf.tmp[j:]) } func formatLogEntry(entry *LogEntry, colors *colorProfile) []byte { buf := formatHeader(Severity(entry.Severity), time.Unix(entry.Time/1E9, entry.Time%1E9), entry.ThreadID, entry.File, entry.Line, colors) var args []interface{} for _, arg := range entry.Args { args = append(args, arg.Str) } if len(entry.Format) == 0 { buf.WriteString(fmt.Sprint(args...)) } else { buf.WriteString(fmt.Sprintf(entry.Format, args...)) } buf.WriteByte('\n') if len(entry.Stacks) > 0 { buf.Write(entry.Stacks) } defer logging.putBuffer(buf) return buf.Bytes() } func init() { // Default stderrThreshold is so high that nothing gets through. logging.stderrThreshold = NumSeverity { tmpStr := "" logDir = &tmpStr } logging.setVState(0, nil, false) osExitFunc = os.Exit go logging.flushDaemon() } // Flush flushes all pending log I/O. func Flush() { logging.lockAndFlushAll() } // loggingT collects all the global state of the logging setup. type loggingT struct { // Boolean flags. Not handled atomically because the flag.Value interface // does not let us avoid the =true, and that shorthand is necessary for // compatibility. TODO: does this matter enough to fix? Seems unlikely. toStderr bool // The -logtostderr flag. alsoToStderr bool // The -alsologtostderr flag. color string // The -color flag. hasColorProfile *bool // Non-nil if the color profile has been determined colorProfile *colorProfile // Set via call to getTermColorProfile // Level flag. Handled atomically. stderrThreshold Severity // The -stderrthreshold flag. // freeList is a list of byte buffers, maintained under freeListMu. freeList *buffer // freeListMu maintains the free list. It is separate from the main mutex // so buffers can be grabbed and printed to without holding the main lock, // for better parallelization. freeListMu sync.Mutex // mu protects the remaining elements of this structure and is // used to synchronize logging. mu sync.Mutex // file holds writer for each of the log types. file [NumSeverity]flushSyncWriter // pcs is used in V to avoid an allocation when computing the caller's PC. pcs [1]uintptr // vmap is a cache of the V Level for each V() call site, identified by PC. // It is wiped whenever the vmodule flag changes state. vmap map[uintptr]level // filterLength stores the length of the vmodule filter chain. If greater // than zero, it means vmodule is enabled. It may be read safely // using sync.LoadInt32, but is only modified under mu. filterLength int32 // traceLocation is the state of the -log_backtrace_at flag. traceLocation traceLocation // These flags are modified only under lock, although verbosity may be fetched // safely using atomic.LoadInt32. vmodule moduleSpec // The state of the --vmodule flag. verbosity level // V logging level, the value of the --verbosity flag/ } // buffer holds a byte Buffer for reuse. The zero value is ready for use. type buffer struct { bytes.Buffer tmp [64]byte // temporary byte array for creating headers. next *buffer } var logging loggingT // setVState sets a consistent state for V logging. // l.mu is held. func (l *loggingT) setVState(verbosity level, filter []modulePat, setFilter bool) { // Turn verbosity off so V will not fire while we are in transition. logging.verbosity.set(0) // Ditto for filter length. atomic.StoreInt32(&logging.filterLength, 0) // Set the new filters and wipe the pc->Level map if the filter has changed. if setFilter { logging.vmodule.filter = filter logging.vmap = make(map[uintptr]level) } // Things are consistent now, so enable filtering and verbosity. // They are enabled in order opposite to that in V. atomic.StoreInt32(&logging.filterLength, int32(len(filter))) logging.verbosity.set(verbosity) } // getBuffer returns a new, ready-to-use buffer. func (l *loggingT) getBuffer() *buffer { l.freeListMu.Lock() b := l.freeList if b != nil { l.freeList = b.next } l.freeListMu.Unlock() if b == nil { b = new(buffer) } else { b.next = nil b.Reset() } return b } // putBuffer returns a buffer to the free list. func (l *loggingT) putBuffer(b *buffer) { if b.Len() >= 256 { // Let big buffers die a natural death. return } l.freeListMu.Lock() b.next = l.freeList l.freeList = b l.freeListMu.Unlock() } func (l *loggingT) print(s Severity, args ...interface{}) { file, line, _ := caller.Lookup(1) entry := LogEntry{} setLogEntry(nil, "", args, &entry) l.outputLogEntry(s, file, line, false, &entry) } // outputLogEntry marshals a log entry proto into bytes, and writes // the data to the log files. If a trace location is set, stack traces // are added to the entry before marshaling. func (l *loggingT) outputLogEntry(s Severity, file string, line int, alsoToStderr bool, entry *LogEntry) { l.mu.Lock() // Set additional details in log entry. now := time.Now() entry.Severity = int32(s) entry.Time = now.UnixNano() entry.ThreadID = int32(pid) // TODO: should be TID entry.File = file entry.Line = int32(line) // On fatal log, set all stacks. if s == FatalLog { entry.Stacks = stacks(true) logExitFunc = func(error) {} // If we get a write error, we'll still exit. } else if l.traceLocation.isSet() { if l.traceLocation.match(file, line) { entry.Stacks = stacks(false) } } if l.toStderr { _, _ = os.Stderr.Write(l.processForStderr(entry)) } else { if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { _, _ = os.Stderr.Write(l.processForStderr(entry)) } if l.file[s] == nil { if err := l.createFiles(s); err != nil { _, _ = os.Stderr.Write(l.processForStderr(entry)) // Make sure the message appears somewhere. l.exit(err) } } data := encodeLogEntry(entry) switch s { case FatalLog: l.file[FatalLog].Write(data) fallthrough case ErrorLog: l.file[ErrorLog].Write(data) fallthrough case WarningLog: l.file[WarningLog].Write(data) fallthrough case InfoLog: l.file[InfoLog].Write(data) } if stats := severityStats[s]; stats != nil { atomic.AddInt64(&stats.lines, 1) atomic.AddInt64(&stats.bytes, int64(len(data))) } } l.mu.Unlock() // Flush and exit on fatal logging. if s == FatalLog { // If we got here via Exit rather than Fatal, print no stacks. timeoutFlush(10 * time.Second) if atomic.LoadUint32(&fatalNoStacks) > 0 { osExitFunc(1) } else { osExitFunc(255) // C++ uses -1, which is silly because it's anded with 255 anyway. } } } func encodeLogEntry(entry *LogEntry) []byte { // Marshal log entry. entryData, err := gogoproto.Marshal(entry) if err != nil { panic(fmt.Sprintf("unable to marshal log entry: %s", err)) } // Encode the length of the data first, followed by the encoded data. data := encoding.EncodeUint32([]byte(nil), uint32(len(entryData))) return append(data, entryData...) } // processForStderr formats a log entry for output to standard error. func (l *loggingT) processForStderr(entry *LogEntry) []byte { return formatLogEntry(entry, l.shouldColorize()) } // shouldColorize returns whether output should be colorized. func (l *loggingT) shouldColorize() *colorProfile { if l.color == "auto" { return l.getTermColorProfile() } return nil } // checkForColorTerm attempts to verify that stderr is a character // device and if so, that the terminal supports color output. func (l *loggingT) getTermColorProfile() *colorProfile { if l.hasColorProfile == nil { var color bool fi, _ := os.Stderr.Stat() // get the FileInfo struct describing the standard input. if (fi.Mode() & os.ModeCharDevice) != 0 { term := os.Getenv("TERM") switch term { case "ansi", "xterm-color": l.colorProfile = colorProfile8 color = true case "xterm-256color": l.colorProfile = colorProfile256 color = true } } l.hasColorProfile = &color } return l.colorProfile } // timeoutFlush calls Flush and returns when it completes or after timeout // elapses, whichever happens first. This is needed because the hooks invoked // by Flush may deadlock when clog.Fatal is called from a hook that holds // a lock. func timeoutFlush(timeout time.Duration) { done := make(chan bool, 1) go func() { Flush() // calls logging.lockAndFlushAll() done <- true }() select { case <-done: case <-time.After(timeout): fmt.Fprintln(os.Stderr, "clog: Flush took longer than", timeout) } } // stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. func stacks(all bool) []byte { // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. n := 10000 if all { n = 100000 } var trace []byte for i := 0; i < 5; i++ { trace = make([]byte, n) nbytes := runtime.Stack(trace, all) if nbytes < len(trace) { return trace[:nbytes] } n *= 2 } return trace } // logExitFunc provides a simple mechanism to override the default behavior // of exiting on error. Used in testing and to guarantee we reach a required exit // for fatal logs. Instead, exit could be a function rather than a method but that // would make its use clumsier. var logExitFunc func(error) var osExitFunc func(int) // exit is called if there is trouble creating or writing log files. // It flushes the logs and exits the program; there's no point in hanging around. // l.mu is held. func (l *loggingT) exit(err error) { fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) // If logExitFunc is set, we do that instead of exiting. if logExitFunc != nil { logExitFunc(err) return } l.flushAll() osExitFunc(2) } // syncBuffer joins a bufio.Writer to its underlying file, providing access to the // file's Sync method and providing a wrapper for the Write method that provides log // file rotation. There are conflicting methods, so the file cannot be embedded. // l.mu is held for all its methods. type syncBuffer struct { logger *loggingT *bufio.Writer file *os.File sev Severity nbytes uint64 // The number of bytes written to this file } func (sb *syncBuffer) Sync() error { return sb.file.Sync() } func (sb *syncBuffer) Write(p []byte) (n int, err error) { if sb.nbytes+uint64(len(p)) >= MaxSize { if err := sb.rotateFile(time.Now()); err != nil { sb.logger.exit(err) } } n, err = sb.Writer.Write(p) sb.nbytes += uint64(n) if err != nil { sb.logger.exit(err) } return } // rotateFile closes the syncBuffer's file and starts a new one. func (sb *syncBuffer) rotateFile(now time.Time) error { if sb.file != nil { if err := sb.Flush(); err != nil { return err } if err := sb.file.Close(); err != nil { return err } } var err error sb.file, _, err = create(sb.sev, now) sb.nbytes = 0 if err != nil { return err } sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) // Write header. file, line, _ := caller.Lookup(0) for _, format := range []string{ fmt.Sprintf("Running on machine: %s", host), fmt.Sprintf("Binary: Built with %s %s for %s/%s", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH), } { entry := LogEntry{ Time: time.Now().UnixNano(), File: file, Line: int32(line), Format: format, } n, err := sb.file.Write(encodeLogEntry(&entry)) if err != nil { panic(err) } sb.nbytes += uint64(n) } return err } // bufferSize sizes the buffer associated with each log file. It's large // so that log records can accumulate without the logging thread blocking // on disk I/O. The flushDaemon will block instead. const bufferSize = 256 * 1024 // removeFiles clears all the log files. func (l *loggingT) removeFiles() error { l.mu.Lock() defer l.mu.Unlock() return l.removeFilesLocked() } func (l *loggingT) removeFilesLocked() error { for s := FatalLog; s >= InfoLog; s-- { if sb, ok := l.file[s].(*syncBuffer); ok { if err := sb.file.Close(); err != nil { return err } if err := os.Remove(sb.file.Name()); err != nil { return err } } l.file[s] = nil } return nil } // createFiles creates all the log files for severity from sev down to InfoLog. // l.mu is held. func (l *loggingT) createFiles(sev Severity) error { now := time.Now() // Files are created in decreasing severity order, so as soon as we find one // has already been created, we can stop. for s := sev; s >= InfoLog && l.file[s] == nil; s-- { sb := &syncBuffer{ logger: l, sev: s, } if err := sb.rotateFile(now); err != nil { return err } l.file[s] = sb } return nil } const flushInterval = 30 * time.Second // flushDaemon periodically flushes the log file buffers. func (l *loggingT) flushDaemon() { // doesn't need to be Stop()'d as the loop never escapes for range time.Tick(flushInterval) { l.lockAndFlushAll() } } // lockAndFlushAll is like flushAll but locks l.mu first. func (l *loggingT) lockAndFlushAll() { l.mu.Lock() l.flushAll() l.mu.Unlock() } // flushAll flushes all the logs and attempts to "sync" their data to disk. // l.mu is held. func (l *loggingT) flushAll() { // Flush from fatal down, in case there's trouble flushing. for s := FatalLog; s >= InfoLog; s-- { file := l.file[s] if file != nil { _ = file.Flush() // ignore error _ = file.Sync() // ignore error } } } // CopyStandardLogTo arranges for messages written to the Go "log" package's // default logs to also appear in the Google logs for the named and lower // severities. Subsequent changes to the standard log's default output location // or format may break this behavior. // // Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not // recognized, CopyStandardLogTo panics. func CopyStandardLogTo(name string) { sev, ok := SeverityByName(name) if !ok { panic(fmt.Sprintf("CopyStandardLogTo(%q): unrecognized Severity name", name)) } // Set a log format that captures the user's file and line: // d.go:23: message stdLog.SetFlags(stdLog.Lshortfile) stdLog.SetOutput(logBridge(sev)) } // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. type logBridge Severity // Write parses the standard logging line and passes its components to the // logger for Severity(lb). func (lb logBridge) Write(b []byte) (n int, err error) { var ( file = "???" line = 1 text string ) // Split "d.go:23: message" into "d.go", "23", and "message". if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { text = fmt.Sprintf("bad log format: %s", b) } else { file = string(parts[0]) text = string(parts[2][1 : len(parts[2])-1]) // skip leading space and trailing newline line, err = strconv.Atoi(string(parts[1])) if err != nil { text = fmt.Sprintf("bad line number: %s", b) line = 1 } } // printWithFileLine with alsoToStderr=true, so standard log messages // always appear on standard error. entry := &LogEntry{ Format: text, } logging.outputLogEntry(Severity(lb), file, line, true, entry) return len(b), nil } // setV computes and remembers the V level for a given PC // when vmodule is enabled. // File pattern matching takes the basename of the file, stripped // of its .go suffix, and uses filepath.Match, which is a little more // general than the *? matching used in C++. // l.mu is held. func (l *loggingT) setV(pc uintptr) level { fn := runtime.FuncForPC(pc) file, _ := fn.FileLine(pc) // The file is something like /a/b/c/d.go. We want just the d. if strings.HasSuffix(file, ".go") { file = file[:len(file)-3] } if slash := strings.LastIndex(file, "/"); slash >= 0 { file = file[slash+1:] } for _, filter := range l.vmodule.filter { if filter.match(file) { l.vmap[pc] = filter.level return filter.level } } l.vmap[pc] = 0 return 0 } func v(level level) bool { return VDepth(level, 1) } // VDepth reports whether verbosity at the call site is at least the requested // level. func VDepth(level level, depth int) bool { // This function tries hard to be cheap unless there's work to do. // The fast path is two atomic loads and compares. // Here is a cheap but safe test to see if V logging is enabled globally. if logging.verbosity.get() >= level { return true } // It's off globally but it vmodule may still be set. // Here is another cheap but safe test to see if vmodule is enabled. if atomic.LoadInt32(&logging.filterLength) > 0 { // Now we need a proper lock to use the logging structure. The pcs field // is shared so we must lock before accessing it. This is fairly expensive, // but if V logging is enabled we're slow anyway. logging.mu.Lock() defer logging.mu.Unlock() if runtime.Callers(2+depth, logging.pcs[:]) == 0 { return false } v, ok := logging.vmap[logging.pcs[0]] if !ok { v = logging.setV(logging.pcs[0]) } return v >= level } return false } // fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. // It allows Exit and relatives to use the Fatal logs. var fatalNoStacks uint32
[ "\"TERM\"" ]
[]
[ "TERM" ]
[]
["TERM"]
go
1
0
ResNet-50/SVHN/train.py
import torchvision import torchvision.transforms as transforms import torch import torch.utils.data import resnet from torch.autograd import Variable from torch import nn import early_stop from tqdm import tqdm import os,sys import numpy as np os.environ["CUDA_VISIBLE_DEVICES"] = "2" train_globa_step=0 val_globa_step=0 wd=1e-50 learning_rate=1e-4 epochs=100 batch_size=300 torch.backends.cudnn.benchmark = True transform=transforms.Compose([ torchvision.transforms.Resize((64,64)), torchvision.transforms.ToTensor(), ]) trainset = torchvision.datasets.SVHN(root='./data',split='train', download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=16) transform_test=transforms.Compose([torchvision.transforms.Resize((64,64)), transforms.ToTensor(), ]) testset = torchvision.datasets.SVHN(root='./data', split='test', download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=16) n = resnet.resnet101().cuda() weight_p, bias_p = [],[] for name, p in n.named_parameters(): if 'bias' in name: bias_p += [p] else: weight_p += [p] trans_params = list(map(id, n.trans_conv.parameters())) class_params = list(map(id, n.group2.parameters())) base_params = filter(lambda p: id(p) not in trans_params, n.parameters()) base_params = filter(lambda p: id(p) not in class_params, base_params) loss1 =nn.MSELoss() loss1.cuda() loss2=nn.CrossEntropyLoss() loss2.cuda() optimizer = torch.optim.Adam([{'params': base_params}, {'params':n.trans_conv.parameters(),'lr':learning_rate}, {'params':n.group2.parameters(),'lr':learning_rate}], lr=learning_rate,weight_decay=wd) opt = torch.optim.Adam([{'params': base_params}, {'params':n.trans_conv.parameters(),'lr':learning_rate}], lr=learning_rate,weight_decay=wd) if os.path.exists('bestmodel_params.pkl'): checkpoint = torch.load('bestmodel_params.pkl') n.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['opt_state_dict']) opt.load_state_dict(checkpoint['opt_state_dict2']) sch=torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,factor=0.1,patience=10) es=early_stop.EarlyStopping('max',patience=20) for epoch in range(epochs): loadertrain = tqdm(trainloader, desc='{} E{:03d}'.format('train', epoch), ncols=0) loadertest = tqdm(testloader, desc='{} E{:03d}'.format('test', epoch), ncols=0) epoch_loss = 0.0 correct=0.0 total=0.0 total2=0.0 correct2=0.0 for x_train, y_train in loadertrain: n.train() x_train, y_train = Variable(x_train.cuda()),Variable(y_train.cuda()) x_noise=torch.FloatTensor(x_train.size(0),3,64,64).uniform_(-0.01,0.01) x_noise=torch.clamp(x_noise,-0.01,0.01) x_train_noise=x_train+Variable(x_noise.cuda()) y_pre,c_pre = n(x_train_noise) y_pre=y_pre.cuda() n.zero_grad() optimizer.zero_grad() loss = loss1(torch.mul(y_pre,1.0), torch.mul( x_train,1.0)) if loss.item()>3: loss.backward(retain_graph=True) torch.nn.utils.clip_grad_norm_(n.parameters(), 5.0) opt.step() epoch_loss += loss.data.item() _, predicted = torch.max(c_pre.data, 1) total += y_train.size(0) correct += predicted.eq(y_train.data).cuda().sum() torch.cuda.empty_cache() else: loss_cl=loss2(c_pre,y_train) loss_sum=torch.mul(loss,1/1)+loss_cl loss_sum.backward(retain_graph=True) torch.nn.utils.clip_grad_norm_(n.parameters(), 5.0) optimizer.step() epoch_loss += loss_sum.data.item() _, predicted = torch.max(c_pre.data, 1) total += y_train.size(0) correct += predicted.eq(y_train.data).cuda().sum() train_globa_step+=1 torch.cuda.empty_cache() if loss.item()<3: y_pre2, c_pre2 = n(y_pre) y_pre2 = y_pre2.cuda() n.zero_grad() optimizer.zero_grad() lossreg2 = loss1(torch.mul(y_pre2, 1.0), torch.mul( x_train, 1.0)) loss_cl2 = loss2(c_pre2, y_train) _, predicted2 = torch.max(c_pre2.data, 1) total2 += y_train.size(0) correct2 += predicted2.eq(y_train.data).cuda().sum() loss_sum2 = torch.mul(lossreg2, 1 / 1) + loss_cl2 loss_sum2.backward() torch.nn.utils.clip_grad_norm_(n.parameters(), 5.0) optimizer.step() torch.cuda.empty_cache() if train_globa_step% 20==0: n.eval() checkpoint = { 'state_dict': n.state_dict(), 'opt_state_dict': optimizer.state_dict(), 'opt_state_dict2':opt.state_dict(), 'epoch': epoch } torch.save(checkpoint, 'model_params.pkl') fmt = '{:.4f}'.format loadertrain.set_postfix(loss=fmt(loss.data.item()), acc=fmt(correct.item() / total * 100)) if (epoch) % 1 ==0: test_loss = 0.0 correct = 0.0 total = 0.0 n.eval() with torch.no_grad(): for x_test, y_test in loadertest: x_test, y_test = Variable(x_test.cuda()), Variable(y_test.cuda()) y_pre, c_pre = n(x_test) y_pre = y_pre.cuda() loss_cl = loss2(c_pre, y_test) loss = loss1(torch.mul(y_pre,1.0), torch.mul( x_test,1.0)) loss_sum = torch.mul(loss,1/1) + loss_cl test_loss += loss_sum.data.item() _, predicted = torch.max(c_pre.data, 1) total += y_test.size(0) correct += predicted.eq(y_test.data).cuda().sum() val_globa_step+=1 fmt = '{:.4f}'.format loadertest.set_postfix(loss=fmt(loss_sum.data.item()), acc=fmt(correct.item() / total * 100)) sch.step(test_loss) fl=es.step(correct.item()/total*100, n,optimizer,opt,epoch) if fl: torch.cuda.empty_cache() sys.exit(0) torch.cuda.empty_cache()
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
cli/make_prebuilts.py
import os import sys import argparse import blobconverter import itertools try: from artifactory import ArtifactoryPath except: print("Package \"artifactory\" not found! Please run {} -m pip install artifactory".format(sys.executable), file=sys.stderr) sys.exit(1) parser = argparse.ArgumentParser() parser.add_argument('username', help='Artifactory username, can be also set using env variable ARTIFACTORY_USERNAME', default=os.getenv('ARTIFACTORY_USERNAME')) parser.add_argument('password', help='Artifactory password, can be also set using env variable ARTIFACTORY_PASSWORD', default=os.getenv('ARTIFACTORY_PASSWORD')) parser.add_argument('-url', '--blobconverter-url', type=str, help="URL to custom BlobConverter URL to be used for conversion", required=False) args = parser.parse_args() if None in (args.username, args.password): parser.print_help() sys.exit(1) if args.blobconverter_url is not None: blobconverter.set_defaults(url=args.blobconverter_url) path = ArtifactoryPath("https://artifacts.luxonis.com/artifactory/blobconverter-backup/blobs", auth=(args.username, args.password)) if not path.exists(): path.mkdir() priority_models = ["mobilenet-ssd", "efficientnet-b0", "vehicle-license-plate-detection-barrier-0106", "vehicle-detection-adas-0002", "license-plate-recognition-barrier-0007" "vehicle-attributes-recognition-barrier-0039", "face-detection-retail-0004", "landmarks-regression-retail-0009"] backup_shaves = range(1, 17) for model_name, shaves in itertools.product(priority_models, backup_shaves): print("Deploying {} with {} shaves...".format(model_name, shaves)) try: path.deploy_file(blobconverter.from_zoo(model_name, shaves=shaves)) except Exception as ex: print("Deployment failed due to {}".format(str(ex))) for model_name in blobconverter.zoo_list(): if model_name in priority_models: continue print("Deploying {}...".format(model_name)) try: path.deploy_file(blobconverter.from_zoo(model_name)) except Exception as ex: print("Deployment failed due to {}".format(str(ex)))
[]
[]
[ "ARTIFACTORY_PASSWORD", "ARTIFACTORY_USERNAME" ]
[]
["ARTIFACTORY_PASSWORD", "ARTIFACTORY_USERNAME"]
python
2
0
tests/test_functional_misc.py
#!/usr/bin/python3 -OO # Copyright 2007-2020 The SABnzbd-Team <[email protected]> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ tests.test_functional_misc - Functional tests of various functions """ import shutil import subprocess import sys import sabnzbd.encoding from tests.testhelper import * class SABnzbdShowLoggingTest(SABnzbdBaseTest): def test_showlog(self): """ Test the output of the filtered-log button """ # Basic URL-fetching, easier than Selenium file download log_result = get_url_result("status/showlog") # Make sure it has basic log stuff assert "The log" in log_result assert "Full executable path" in log_result # Make sure sabnzbd.ini was appended assert "__encoding__ = utf-8" in log_result assert "[misc]" in log_result class TestSamplePostProc: def test_sample_post_proc(self): """ Make sure we don't break things """ # Set parameters script_params = [ "somedir222", "nzbname", "frènch_german_demö", "Index12", "Cat88", "MyGroup", "PP0", "https://example.com/", ] script_call = [sys.executable, "scripts/Sample-PostProc.py", "server"] script_call.extend(script_params) # Set parameters via env env = os.environ.copy() env["SAB_VERSION"] = "frènch_german_demö_version" # Run script and check output script_call = subprocess.Popen(script_call, stdout=subprocess.PIPE, env=env) script_output, errs = script_call.communicate(timeout=15) # This is a bit bad, since we use our own function # But in a way it is also a test if the function does its job! script_output = sabnzbd.encoding.platform_btou(script_output) # Check if all parameters are there for param in script_params: assert param in script_output assert env["SAB_VERSION"] in script_output class TestExtractPot: def test_extract_pot(self): """ Simple test if translation extraction still works """ script_call = [sys.executable, "tools/extract_pot.py"] # Run script and check output script_call = subprocess.Popen(script_call, stdout=subprocess.PIPE) script_output, errs = script_call.communicate(timeout=15) script_output = sabnzbd.encoding.platform_btou(script_output) # Success message? assert "Creating POT file" in script_output assert "Finished creating POT file" in script_output assert "Post-process POT file" in script_output assert "Finished post-process POT file" in script_output assert "Creating email POT file" in script_output assert "Finished creating email POT file" in script_output # Check if the file was modified less than 30 seconds ago cur_time = time.time() assert (cur_time - os.path.getmtime("po/main/SABnzbd.pot")) < 30 assert (cur_time - os.path.getmtime("po/email/SABemail.pot")) < 30 @pytest.mark.skipif(sys.platform.startswith("win"), reason="Skipping on Windows") @pytest.mark.skipif(sys.platform.startswith("darwin"), reason="Fails for now due to PyObjC problem") class TestDaemonizing(SABnzbdBaseTest): def test_daemonizing(self): """ Simple test to see if daemon-mode still works. Also test removal of large "sabnzbd.error.log" We inherit from SABnzbdBaseTest so we can use it's clean-up logic! """ daemon_host = "localhost" daemon_port = 23456 ini_location = os.path.join(SAB_CACHE_DIR, "daemon_test") # Create large output-file error_log_path = os.path.join(ini_location, sabnzbd.cfg.log_dir(), sabnzbd.constants.DEF_LOG_ERRFILE) os.makedirs(os.path.dirname(error_log_path), exist_ok=True) with open(error_log_path, "wb") as large_log: large_log.seek(6 * 1024 * 1024) large_log.write(b"\1") # We need the basic-config to set the API-key # Otherwise we can't shut it down at the end shutil.copyfile(os.path.join(SAB_BASE_DIR, "sabnzbd.basic.ini"), os.path.join(ini_location, "sabnzbd.ini")) # Combine it all into the script call script_call = [ sys.executable, "SABnzbd.py", "-d", "-s", "%s:%s" % (daemon_host, daemon_port), "-f", ini_location, "--pid", ini_location, ] # Run script and check output script_call = subprocess.Popen(script_call, stdout=subprocess.PIPE) script_output, errs = script_call.communicate(timeout=15) # No error or output should be thrown by main process assert not script_output assert not errs # It should be online after 3 seconds time.sleep(3.0) assert "version" in get_api_result("version", daemon_host, daemon_port) # Did it create the PID file pid_file = os.path.join(ini_location, "sabnzbd-%d.pid" % daemon_port) assert os.path.exists(pid_file) # Did it remove the bad log file? assert os.path.exists(error_log_path) assert os.path.getsize(error_log_path) < 1024 # Let's shut it down and give it some time to do so get_url_result("shutdown", daemon_host, daemon_port) time.sleep(3.0)
[]
[]
[]
[]
[]
python
0
0
cmd/generate-lint/generate-lint.go
package main import ( "encoding/json" "fmt" "io/ioutil" "os" "os/exec" "path" "strings" "github.com/evergreen-ci/shrub" "github.com/google/shlex" "github.com/mongodb/grip" "github.com/pkg/errors" ) const ( lintPrefix = "lint" lintVariant = "lint" lintGroup = "lint-group" commitMaxHosts = 4 patchMaxHosts = 1 evergreenLintTask = "evergreen" jsonFilename = "bin/generate-lint.json" scriptsDir = "scripts" packagePrefix = "github.com/evergreen-ci/evergreen" ) // whatChanged returns a list of files that have changed in the working // directory. First, it tries diffing changed files against the merge base. If // there are no changes, this is not a patch build, so it diffs HEAD against HEAD~. func whatChanged() ([]string, error) { mergeBaseCmd := exec.Command("git", "merge-base", "main@{upstream}", "HEAD") base, err := mergeBaseCmd.Output() if err != nil { return nil, errors.Wrap(err, "problem getting merge-base") } diffCmd := exec.Command("git", "diff", strings.TrimSpace(string(base)), "--name-only") files, err := diffCmd.Output() if err != nil { return nil, errors.Wrap(err, "problem getting diff") } var split []string // if there is no diff, this is not a patch build if len(files) == 0 { return []string{}, nil } split = strings.Split(strings.TrimSpace(string(files)), "\n") return split, nil } // targetsFromChangedFiles returns a list of make targets. func targetsFromChangedFiles(files []string) ([]string, error) { targets := map[string]struct{}{} for _, f := range files { filePath := strings.TrimSpace(f) if strings.HasSuffix(filePath, ".go") { dir := path.Dir(filePath) if dir == scriptsDir { continue } // We can't run make targets on packages in the cmd directory // because the packages contain dashes. if strings.HasPrefix(dir, "vendor") || strings.HasPrefix(dir, "cmd") { continue } if dir == "." || dir == "main" { targets["evergreen"] = struct{}{} } else { targets[strings.Replace(dir, "/", "-", -1)] = struct{}{} } } } targetSlice := []string{} for t := range targets { targetSlice = append(targetSlice, t) } return targetSlice, nil } // makeTask returns a task map that can be marshaled into a JSON document. func makeTarget(target string) string { return fmt.Sprintf("%s-%s", lintPrefix, target) } func getAllTargets() ([]string, error) { var targets []string gobin := os.Getenv("GO_BIN_PATH") if gobin == "" { gobin = "go" } args, _ := shlex.Split(fmt.Sprintf("%s list -f '{{ join .Deps \"\\n\"}}' cmd/evergreen/evergreen.go", gobin)) cmd := exec.Command(args[0], args[1:]...) allPackages, err := cmd.Output() if err != nil { return nil, errors.Wrap(err, "problem getting diff") } split := strings.Split(strings.TrimSpace(string(allPackages)), "\n") for _, p := range split { if strings.HasPrefix(p, fmt.Sprintf("%s/vendor", packagePrefix)) { continue } if !strings.HasPrefix(p, packagePrefix) { continue } if p == packagePrefix { targets = append(targets, "evergreen") continue } p = strings.TrimPrefix(p, packagePrefix) p = strings.TrimPrefix(p, "/") p = strings.Replace(p, "/", "-", -1) targets = append(targets, p) } return targets, nil } // generateTasks returns a map of tasks to generate. func generateTasks() (*shrub.Configuration, error) { changes, err := whatChanged() if err != nil { return nil, err } var targets []string var maxHosts int if len(changes) == 0 { maxHosts = commitMaxHosts targets, err = getAllTargets() if err != nil { return nil, err } } else { maxHosts = patchMaxHosts targets, err = targetsFromChangedFiles(changes) if err != nil { return nil, err } } conf := &shrub.Configuration{} if len(targets) == 0 { return conf, nil } lintTargets := []string{} for _, t := range targets { name := makeTarget(t) conf.Task(name).MustHaveTestResults(true).FunctionWithVars("run-make", map[string]string{"target": name}) lintTargets = append(lintTargets, name) } group := conf.TaskGroup(lintGroup).SetMaxHosts(maxHosts) group.SetupGroup.Command().Type("system").Command("git.get_project").Param("directory", "gopath/src/github.com/evergreen-ci/evergreen") group.SetupGroup.Command().Function("setup-credentials") cmd := group.SetupGroup.Command().Function("run-make") cmd.Vars = map[string]string{"target": "get-go-imports"} group.TeardownTask.Command().Function("attach-test-results") group.TeardownTask.Command().Function("remove-test-results") group.Task(lintTargets...) conf.Variant(lintVariant).AddTasks(lintGroup) return conf, nil } func main() { generate, err := generateTasks() if err != nil { grip.EmergencyFatal(err) } jsonBytes, _ := json.MarshalIndent(generate, "", " ") grip.Error(ioutil.WriteFile(jsonFilename, jsonBytes, 0644)) }
[ "\"GO_BIN_PATH\"" ]
[]
[ "GO_BIN_PATH" ]
[]
["GO_BIN_PATH"]
go
1
0
starshipcreator/starshipcreator/wsgi.py
""" WSGI config for starshipcreator project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "starshipcreator.settings") application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
kubernetes-model/vendor/github.com/openshift/origin/pkg/apps/generated/internalclientset/scheme/register.go
/** * Copyright (C) 2015 Red Hat, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package scheme import ( apps "github.com/openshift/origin/pkg/apps/apis/apps/install" announced "k8s.io/apimachinery/pkg/apimachinery/announced" registered "k8s.io/apimachinery/pkg/apimachinery/registered" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" os "os" ) var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var Registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS")) var GroupFactoryRegistry = make(announced.APIGroupFactoryRegistry) func init() { v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) Install(GroupFactoryRegistry, Registry, Scheme) } // Install registers the API group and adds types to a scheme func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { apps.Install(groupFactoryRegistry, registry, scheme) }
[ "\"KUBE_API_VERSIONS\"" ]
[]
[ "KUBE_API_VERSIONS" ]
[]
["KUBE_API_VERSIONS"]
go
1
0
HACKERRANK/Problem Solving/Warmup/A Very Big Sum/AVeryBigSum.go
package main import ( "bufio" "fmt" "io" "os" "strconv" "strings" ) func aVeryBigSum(ar []int64) int64 { sum := int64(0) for i := 0; i < len(ar); i++ { sum += ar[i] } return sum } func main() { reader := bufio.NewReaderSize(os.Stdin, 1024*1024) stdout, err := os.Create(os.Getenv("OUTPUT_PATH")) checkError(err) defer stdout.Close() writer := bufio.NewWriterSize(stdout, 1024*1024) arCount, err := strconv.ParseInt(readLine(reader), 10, 64) checkError(err) arTemp := strings.Split(readLine(reader), " ") var ar []int64 for i := 0; i < int(arCount); i++ { arItem, err := strconv.ParseInt(arTemp[i], 10, 64) checkError(err) ar = append(ar, arItem) } result := aVeryBigSum(ar) fmt.Fprintf(writer, "%d\n", result) writer.Flush() } func readLine(reader *bufio.Reader) string { str, _, err := reader.ReadLine() if err == io.EOF { return "" } return strings.TrimRight(string(str), "\r\n") } func checkError(err error) { if err != nil { panic(err) } }
[ "\"OUTPUT_PATH\"" ]
[]
[ "OUTPUT_PATH" ]
[]
["OUTPUT_PATH"]
go
1
0
src/bot.py
import os import telebot # Initialize bot bot = telebot.TeleBot(os.environ["TELEGRAM_BOT_TOKEN"], parse_mode="MARKDOWN")
[]
[]
[ "TELEGRAM_BOT_TOKEN" ]
[]
["TELEGRAM_BOT_TOKEN"]
python
1
0
methods.py
import os def add_source_files(self, sources, filetype, lib_env=None, shared=False): import glob import string # if not lib_objects: if not lib_env: lib_env = self if type(filetype) == type(""): dir = self.Dir('.').abspath list = glob.glob(dir + "/" + filetype) for f in list: sources.append(self.Object(f)) else: for f in filetype: sources.append(self.Object(f)) def build_shader_header(target, source, env): for x in source: print x name = str(x) name = name[name.rfind("/") + 1:] name = name[name.rfind("\\") + 1:] name = name.replace(".", "_") fs = open(str(x), "r") fd = open(str(x) + ".h", "w") fd.write("/* this file has been generated by SCons, do not edit! */\n") fd.write("static const char *" + name + "=\n") line = fs.readline() while(line): line = line.replace("\r", "") line = line.replace("\n", "") line = line.replace("\\", "\\\\") line = line.replace("\"", "\\\"") fd.write("\"" + line + "\\n\"\n") line = fs.readline() fd.write(";\n") return 0 def build_glsl_header(filename): fs = open(filename, "r") line = fs.readline() vertex_lines = [] fragment_lines = [] uniforms = [] attributes = [] fbos = [] conditionals = [] texunits = [] texunit_names = [] ubos = [] ubo_names = [] reading = "" line_offset = 0 vertex_offset = 0 fragment_offset = 0 while(line): if (line.find("[vertex]") != -1): reading = "vertex" line = fs.readline() line_offset += 1 vertex_offset = line_offset continue if (line.find("[fragment]") != -1): reading = "fragment" line = fs.readline() line_offset += 1 fragment_offset = line_offset continue if (line.find("#ifdef ") != -1): ifdefline = line.replace("#ifdef ", "").strip() if (not ifdefline in conditionals): conditionals += [ifdefline] if (line.find("#elif defined(") != -1): ifdefline = line.replace("#elif defined(", "").strip() ifdefline = ifdefline.replace(")", "").strip() if (not ifdefline in conditionals): conditionals += [ifdefline] import re if re.search(r"^\s*uniform", line): if (line.lower().find("texunit:") != -1): # texture unit texunit = str(int(line[line.find(":") + 1:].strip())) uline = line[:line.lower().find("//")] uline = uline.replace("uniform", "") uline = uline.replace(";", "") lines = uline.split(",") for x in lines: x = x.strip() x = x[x.rfind(" ") + 1:] if (x.find("[") != -1): # unfiorm array x = x[:x.find("[")] if (not x in texunit_names): texunits += [(x, texunit)] texunit_names += [x] elif (line.lower().find("ubo:") != -1): # ubo uboidx = str(int(line[line.find(":") + 1:].strip())) uline = line[:line.lower().find("//")] uline = uline[uline.find("uniform") + len("uniform"):] uline = uline.replace(";", "") uline = uline.replace("{", "").strip() lines = uline.split(",") for x in lines: x = x.strip() x = x[x.rfind(" ") + 1:] if (x.find("[") != -1): # unfiorm array x = x[:x.find("[")] if (not x in ubo_names): ubos += [(x, uboidx)] ubo_names += [x] else: uline = line.replace("uniform", "") uline = uline.replace(";", "") lines = uline.split(",") for x in lines: x = x.strip() x = x[x.rfind(" ") + 1:] if (x.find("[") != -1): # unfiorm array x = x[:x.find("[")] if (not x in uniforms): uniforms += [x] if ((line.strip().find("in ") == 0 or line.strip().find("attribute ") == 0) and line.find("attrib:") != -1): uline = line.replace("in ", "") uline = uline.replace("attribute ", "") uline = uline.replace(";", "") uline = uline[uline.find(" "):].strip() if (uline.find("//") != -1): name, bind = uline.split("//") if (bind.find("attrib:") != -1): name = name.strip() bind = bind.replace("attrib:", "").strip() attributes += [(name, bind)] if (line.strip().find("out ") == 0): uline = line.replace("out", "").strip() uline = uline.replace(";", "") uline = uline[uline.find(" "):].strip() if (uline.find("//") != -1): name, bind = uline.split("//") if (bind.find("drawbuffer:") != -1): name = name.strip() bind = bind.replace("drawbuffer:", "").strip() fbos += [(name, bind)] line = line.replace("\r", "") line = line.replace("\n", "") line = line.replace("\\", "\\\\") line = line.replace("\"", "\\\"") # line=line+"\\n\\" no need to anymore if (reading == "vertex"): vertex_lines += [line] if (reading == "fragment"): fragment_lines += [line] line = fs.readline() line_offset += 1 fs.close() out_file = filename + ".h" fd = open(out_file, "w") fd.write("/* WARNING, THIS FILE WAS GENERATED, DO NOT EDIT */\n") out_file_base = out_file out_file_base = out_file_base[out_file_base.rfind("/") + 1:] out_file_base = out_file_base[out_file_base.rfind("\\") + 1:] # print("out file "+out_file+" base " +out_file_base) out_file_ifdef = out_file_base.replace(".", "_").upper() fd.write("#ifndef " + out_file_ifdef + "\n") fd.write("#define " + out_file_ifdef + "\n") out_file_class = out_file_base.replace(".glsl.h", "").title().replace("_", "").replace(".", "") + "ShaderGL" fd.write("\n\n") fd.write("#include \"drivers/opengl/shader_gl.h\"\n\n\n") fd.write("class " + out_file_class + " : public ShaderGL {\n\n") fd.write("\t virtual String get_shader_name() const { return \"" + out_file_class + "\"; }\n") fd.write("public:\n\n") if (len(conditionals)): fd.write("\tenum Conditionals {\n") for x in conditionals: fd.write("\t\t" + x + ",\n") fd.write("\t};\n\n") if (len(uniforms)): fd.write("\tenum Uniforms {\n") for x in uniforms: fd.write("\t\t" + x.upper() + ",\n") fd.write("\t};\n\n") fd.write("\t_FORCE_INLINE_ int get_uniform(Uniforms p_uniform) const { return _get_uniform(p_uniform); }\n\n") if (len(conditionals)): fd.write("\t_FORCE_INLINE_ void set_conditional(Conditionals p_conditional,bool p_enable) { _set_conditional(p_conditional,p_enable); }\n\n") fd.write("\t#define _FU if (get_uniform(p_uniform)<0) return; ERR_FAIL_COND( get_active()!=this );\n\n ") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, bool p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value?1:0); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_value) { _FU glUniform1f(get_uniform(p_uniform),p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, double p_value) { _FU glUniform1f(get_uniform(p_uniform),p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint8_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int8_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint16_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int16_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint32_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int32_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n") #fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint64_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n"); #fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int64_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n"); fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, unsigned long p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, long p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Color& p_color) { _FU GLfloat col[4]={p_color.r,p_color.g,p_color.b,p_color.a}; glUniform4fv(get_uniform(p_uniform),1,col); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector2& p_vec2) { _FU GLfloat vec2[2]={p_vec2.x,p_vec2.y}; glUniform2fv(get_uniform(p_uniform),1,vec2); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector3& p_vec3) { _FU GLfloat vec3[3]={p_vec3.x,p_vec3.y,p_vec3.z}; glUniform3fv(get_uniform(p_uniform),1,vec3); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b) { _FU glUniform2f(get_uniform(p_uniform),p_a,p_b); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c) { _FU glUniform3f(get_uniform(p_uniform),p_a,p_b,p_c); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c, float p_d) { _FU glUniform4f(get_uniform(p_uniform),p_a,p_b,p_c,p_d); }\n\n") fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Transform& p_transform) { _FU const Transform &tr = p_transform; GLfloat matrix[16]={ /* build a 16x16 matrix */ tr.basis.elements[0][0], tr.basis.elements[1][0], tr.basis.elements[2][0], 0, tr.basis.elements[0][1], tr.basis.elements[1][1], tr.basis.elements[2][1], 0, tr.basis.elements[0][2], tr.basis.elements[1][2], tr.basis.elements[2][2], 0, tr.origin.x, tr.origin.y, tr.origin.z, 1 }; glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix); } """) fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Transform2D& p_transform) { _FU const Transform2D &tr = p_transform; GLfloat matrix[16]={ /* build a 16x16 matrix */ tr.elements[0][0], tr.elements[0][1], 0, 0, tr.elements[1][0], tr.elements[1][1], 0, 0, 0, 0, 1, 0, tr.elements[2][0], tr.elements[2][1], 0, 1 }; glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix); } """) fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const CameraMatrix& p_matrix) { _FU GLfloat matrix[16]; for (int i=0;i<4;i++) { for (int j=0;j<4;j++) { matrix[i*4+j]=p_matrix.matrix[i][j]; } } glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix); }; """) fd.write("\n\n#undef _FU\n\n\n") fd.write("\tvirtual void init() {\n\n") if (len(conditionals)): fd.write("\t\tstatic const char* _conditional_strings[]={\n") if (len(conditionals)): for x in conditionals: fd.write("\t\t\t\"#define " + x + "\\n\",\n") fd.write("\t\t};\n\n") else: fd.write("\t\tstatic const char **_conditional_strings=NULL;\n") if (len(uniforms)): fd.write("\t\tstatic const char* _uniform_strings[]={\n") if (len(uniforms)): for x in uniforms: fd.write("\t\t\t\"" + x + "\",\n") fd.write("\t\t};\n\n") else: fd.write("\t\tstatic const char **_uniform_strings=NULL;\n") if (len(attributes)): fd.write("\t\tstatic AttributePair _attribute_pairs[]={\n") for x in attributes: fd.write("\t\t\t{\"" + x[0] + "\"," + x[1] + "},\n") fd.write("\t\t};\n\n") else: fd.write("\t\tstatic AttributePair *_attribute_pairs=NULL;\n") if (len(fbos)): fd.write("\t\tstatic FBOPair _fbo_pairs[]={\n") for x in fbos: fd.write("\t\t\t{\"" + x[0] + "\"," + x[1] + "},\n") fd.write("\t\t};\n\n") else: fd.write("\t\tstatic FBOPair *_fbo_pairs=NULL;\n") if (len(ubos)): fd.write("\t\tstatic UBOPair _ubo_pairs[]={\n") for x in ubos: fd.write("\t\t\t{\"" + x[0] + "\"," + x[1] + "},\n") fd.write("\t\t};\n\n") else: fd.write("\t\tstatic UBOPair *_ubo_pairs=NULL;\n") if (len(texunits)): fd.write("\t\tstatic TexUnitPair _texunit_pairs[]={\n") for x in texunits: fd.write("\t\t\t{\"" + x[0] + "\"," + x[1] + "},\n") fd.write("\t\t};\n\n") else: fd.write("\t\tstatic TexUnitPair *_texunit_pairs=NULL;\n") fd.write("\t\tstatic const char* _vertex_code=\"\\\n") for x in vertex_lines: fd.write("\t\t\t" + x + "\n") fd.write("\t\t\";\n\n") fd.write("\t\tstatic const int _vertex_code_start=" + str(vertex_offset) + ";\n") fd.write("\t\tstatic const char* _fragment_code=\"\\\n") for x in fragment_lines: fd.write("\t\t\t" + x + "\n") fd.write("\t\t\";\n\n") fd.write("\t\tstatic const int _fragment_code_start=" + str(fragment_offset) + ";\n") fd.write("\t\tsetup(_conditional_strings," + str(len(conditionals)) + ",_uniform_strings," + str(len(uniforms)) + ",_attribute_pairs," + str(len(attributes)) + ",_fbo_pairs," + str(len(fbos)) + ",_ubo_pairs," + str(len(ubos)) + ",_texunit_pairs," + str(len(texunits)) + ",_vertex_code,_fragment_code,_vertex_code_start,_fragment_code_start);\n") fd.write("\t};\n\n") fd.write("};\n\n") fd.write("#endif\n\n") fd.close() def build_glsl_headers(target, source, env): for x in source: build_glsl_header(str(x)) return 0 def build_hlsl_dx9_header(filename): fs = open(filename, "r") line = fs.readline() vertex_lines = [] fragment_lines = [] uniforms = [] fragment_uniforms = [] attributes = [] fbos = [] conditionals = [] reading = "" line_offset = 0 vertex_offset = 0 fragment_offset = 0 while(line): if (line.find("[vertex]") != -1): reading = "vertex" line = fs.readline() line_offset += 1 vertex_offset = line_offset continue if (line.find("[fragment]") != -1): reading = "fragment" line = fs.readline() line_offset += 1 fragment_offset = line_offset continue if (line.find("#ifdef ") != -1): ifdefline = line.replace("#ifdef ", "").strip() if (not ifdefline in conditionals): conditionals += [ifdefline] if (line.find("#elif defined(") != -1): ifdefline = line.replace("#elif defined(", "").strip() ifdefline = ifdefline.replace(")", "").strip() if (not ifdefline in conditionals): conditionals += [ifdefline] if (line.find("uniform") != -1): uline = line.replace("uniform", "") uline = uline.replace(";", "") lines = uline.split(",") for x in lines: x = x.strip() x = x[x.rfind(" ") + 1:] if (x.find("[") != -1): # unfiorm array x = x[:x.find("[")] if (not x in uniforms): uniforms += [x] fragment_uniforms += [reading == "fragment"] line = line.replace("\r", "") line = line.replace("\n", "") line = line.replace("\\", "\\\\") line = line.replace("\"", "\\\"") line = line + "\\n\\" if (reading == "vertex"): vertex_lines += [line] if (reading == "fragment"): fragment_lines += [line] line = fs.readline() line_offset += 1 fs.close() out_file = filename + ".h" fd = open(out_file, "w") fd.write("/* WARNING, THIS FILE WAS GENERATED, DO NOT EDIT */\n") out_file_base = out_file out_file_base = out_file_base[out_file_base.rfind("/") + 1:] out_file_base = out_file_base[out_file_base.rfind("\\") + 1:] # print("out file "+out_file+" base " +out_file_base) out_file_ifdef = out_file_base.replace(".", "_").upper() fd.write("#ifndef " + out_file_ifdef + "\n") fd.write("#define " + out_file_ifdef + "\n") out_file_class = out_file_base.replace(".hlsl.h", "").title().replace("_", "").replace(".", "") + "ShaderDX9" fd.write("\n\n") fd.write("#include \"drivers/directx9/shader_dx9.h\"\n\n\n") fd.write("class " + out_file_class + " : public ShaderDX9 {\n\n") fd.write("\t virtual String get_shader_name() const { return \"" + out_file_class + "\"; }\n") fd.write("public:\n\n") if (len(conditionals)): fd.write("\tenum Conditionals {\n") for x in conditionals: fd.write("\t\t" + x + ",\n") fd.write("\t};\n\n") if (len(uniforms)): fd.write("\tenum Uniforms {\n") for x in uniforms: fd.write("\t\t" + x.upper() + ",\n") fd.write("\t};\n\n") if (len(conditionals)): fd.write("\t_FORCE_INLINE_ void set_conditional(Conditionals p_conditional,bool p_enable) { _set_conditional(p_conditional,p_enable); }\n\n") fd.write("\t#define _FU if (!_uniform_valid(p_uniform)) return; ERR_FAIL_COND( get_active()!=this );\n\n ") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, bool p_value) { _FU set_uniformb(p_uniform,p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_value) { _FU set_uniformf(p_uniform,p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, double p_value) { _FU set_uniformf(p_uniform,p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint8_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int8_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint16_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int16_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint32_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int32_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n") #fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint64_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n"); #fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int64_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n"); fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, unsigned long p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, long p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Color& p_color) { _FU float col[4]={p_color.r,p_color.g,p_color.b,p_color.a}; set_uniformfv(p_uniform,col); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector2& p_vec2) { _FU float vec2[4]={p_vec2.x,p_vec2.y,0,0}; set_uniformfv(p_uniform,vec2); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector3& p_vec3) { _FU float vec3[4]={p_vec3.x,p_vec3.y,p_vec3.z,0}; set_uniformfv(p_uniform,vec3); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b) { _FU float vec2[4]={p_a,p_b,0,0}; set_uniformfv(p_uniform,vec2); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c) { _FU float vec3[4]={p_a,p_b,p_c,0}; set_uniformfv(p_uniform,vec3); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c, float p_d) { _FU float vec4[4]={p_a,p_b,p_c,p_d}; set_uniformfv(p_uniform,vec4); }\n\n") fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Transform& p_transform) { _FU const Transform &tr = p_transform; float matrix[16]={ /* build a 16x16 matrix */ tr.basis.elements[0][0], tr.basis.elements[0][1], tr.basis.elements[0][2], tr.origin.x, tr.basis.elements[1][0], tr.basis.elements[1][1], tr.basis.elements[1][2], tr.origin.y, tr.basis.elements[2][0], tr.basis.elements[2][1], tr.basis.elements[2][2], tr.origin.z, 0, 0, 0, 1 }; set_uniformfv(p_uniform,&matrix[0],4); } """) fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const CameraMatrix& p_matrix) { _FU float matrix[16]; for (int i=0;i<4;i++) { for (int j=0;j<4;j++) { matrix[i*4+j]=p_matrix.matrix[j][i]; } } set_uniformfv(p_uniform,&matrix[0],4); }; """) fd.write("\n\n#undef _FU\n\n\n") fd.write("\tvirtual void init(IDirect3DDevice9 *p_device,ShaderSupport p_version) {\n\n") if (len(conditionals)): fd.write("\t\tstatic const char* _conditional_strings[]={\n") if (len(conditionals)): for x in conditionals: fd.write("\t\t\t\"" + x + "\",\n") fd.write("\t\t};\n\n") else: fd.write("\t\tstatic const char **_conditional_strings=NULL;\n") if (len(uniforms)): fd.write("\t\tstatic const char* _uniform_strings[]={\n") if (len(uniforms)): for x in uniforms: fd.write("\t\t\t\"" + x + "\",\n") fd.write("\t\t};\n\n") fd.write("\t\tstatic const bool _fragment_uniforms[]={\n") if (len(uniforms)): for x in fragment_uniforms: if (x): fd.write("\t\t\ttrue,\n") else: fd.write("\t\t\tfalse,\n") fd.write("\t\t};\n\n") else: fd.write("\t\tstatic const char **_uniform_strings=NULL;\n") fd.write("\t\tstatic const bool *_fragment_uniforms=NULL;\n") fd.write("\t\tstatic const char* _vertex_code=\"\\\n") for x in vertex_lines: fd.write("\t\t\t" + x + "\n") fd.write("\t\t\";\n\n") fd.write("\t\tstatic const int _vertex_code_start=" + str(vertex_offset) + ";\n") fd.write("\t\tstatic const char* _fragment_code=\"\\\n") for x in fragment_lines: fd.write("\t\t\t" + x + "\n") fd.write("\t\t\";\n\n") fd.write("\t\tstatic const int _fragment_code_start=" + str(fragment_offset) + ";\n") fd.write("\t\tsetup(p_device,p_version,_conditional_strings," + str(len(conditionals)) + ",_uniform_strings," + str(len(uniforms)) + ",_fragment_uniforms,_vertex_code,_fragment_code,_vertex_code_start,_fragment_code_start);\n") fd.write("\t};\n\n") fd.write("};\n\n") fd.write("#endif\n\n") fd.close() def build_hlsl_dx9_headers(target, source, env): for x in source: build_hlsl_dx9_header(str(x)) return 0 class LegacyGLHeaderStruct: def __init__(self): self.vertex_lines = [] self.fragment_lines = [] self.uniforms = [] self.attributes = [] self.feedbacks = [] self.fbos = [] self.conditionals = [] self.enums = {} self.texunits = [] self.texunit_names = [] self.ubos = [] self.ubo_names = [] self.vertex_included_files = [] self.fragment_included_files = [] self.reading = "" self.line_offset = 0 self.vertex_offset = 0 self.fragment_offset = 0 def include_file_in_legacygl_header(filename, header_data, depth): fs = open(filename, "r") line = fs.readline() while(line): if (line.find("[vertex]") != -1): header_data.reading = "vertex" line = fs.readline() header_data.line_offset += 1 header_data.vertex_offset = header_data.line_offset continue if (line.find("[fragment]") != -1): header_data.reading = "fragment" line = fs.readline() header_data.line_offset += 1 header_data.fragment_offset = header_data.line_offset continue while(line.find("#include ") != -1): includeline = line.replace("#include ", "").strip()[1:-1] import os.path included_file = os.path.relpath(os.path.dirname(filename) + "/" + includeline) if (not included_file in header_data.vertex_included_files and header_data.reading == "vertex"): header_data.vertex_included_files += [included_file] if(include_file_in_legacygl_header(included_file, header_data, depth + 1) == None): print "Error in file '" + filename + "': #include " + includeline + "could not be found!" elif (not included_file in header_data.fragment_included_files and header_data.reading == "fragment"): header_data.fragment_included_files += [included_file] if(include_file_in_legacygl_header(included_file, header_data, depth + 1) == None): print "Error in file '" + filename + "': #include " + includeline + "could not be found!" line = fs.readline() if (line.find("#ifdef ") != -1 or line.find("#elif defined(") != -1): if (line.find("#ifdef ") != -1): ifdefline = line.replace("#ifdef ", "").strip() else: ifdefline = line.replace("#elif defined(", "").strip() ifdefline = ifdefline.replace(")", "").strip() if (line.find("_EN_") != -1): enumbase = ifdefline[:ifdefline.find("_EN_")] ifdefline = ifdefline.replace("_EN_", "_") line = line.replace("_EN_", "_") # print(enumbase+":"+ifdefline); if (enumbase not in header_data.enums): header_data.enums[enumbase] = [] if (ifdefline not in header_data.enums[enumbase]): header_data.enums[enumbase].append(ifdefline) elif (not ifdefline in header_data.conditionals): header_data.conditionals += [ifdefline] if (line.find("uniform") != -1 and line.lower().find("texunit:") != -1): # texture unit texunitstr = line[line.find(":") + 1:].strip() if (texunitstr == "auto"): texunit = "-1" else: texunit = str(int(texunitstr)) uline = line[:line.lower().find("//")] uline = uline.replace("uniform", "") uline = uline.replace("highp", "") uline = uline.replace(";", "") lines = uline.split(",") for x in lines: x = x.strip() x = x[x.rfind(" ") + 1:] if (x.find("[") != -1): # unfiorm array x = x[:x.find("[")] if (not x in header_data.texunit_names): header_data.texunits += [(x, texunit)] header_data.texunit_names += [x] elif (line.find("uniform") != -1 and line.lower().find("ubo:") != -1): # uniform buffer object ubostr = line[line.find(":") + 1:].strip() ubo = str(int(ubostr)) uline = line[:line.lower().find("//")] uline = uline[uline.find("uniform") + len("uniform"):] uline = uline.replace("highp", "") uline = uline.replace(";", "") uline = uline.replace("{", "").strip() lines = uline.split(",") for x in lines: x = x.strip() x = x[x.rfind(" ") + 1:] if (x.find("[") != -1): # unfiorm array x = x[:x.find("[")] if (not x in header_data.ubo_names): header_data.ubos += [(x, ubo)] header_data.ubo_names += [x] elif (line.find("uniform") != -1 and line.find("{") == -1 and line.find(";") != -1): uline = line.replace("uniform", "") uline = uline.replace(";", "") lines = uline.split(",") for x in lines: x = x.strip() x = x[x.rfind(" ") + 1:] if (x.find("[") != -1): # unfiorm array x = x[:x.find("[")] if (not x in header_data.uniforms): header_data.uniforms += [x] if (line.strip().find("attribute ") == 0 and line.find("attrib:") != -1): uline = line.replace("in ", "") uline = uline.replace("attribute ", "") uline = uline.replace("highp ", "") uline = uline.replace(";", "") uline = uline[uline.find(" "):].strip() if (uline.find("//") != -1): name, bind = uline.split("//") if (bind.find("attrib:") != -1): name = name.strip() bind = bind.replace("attrib:", "").strip() header_data.attributes += [(name, bind)] if (line.strip().find("out ") == 0 and line.find("tfb:") != -1): uline = line.replace("out ", "") uline = uline.replace("highp ", "") uline = uline.replace(";", "") uline = uline[uline.find(" "):].strip() if (uline.find("//") != -1): name, bind = uline.split("//") if (bind.find("tfb:") != -1): name = name.strip() bind = bind.replace("tfb:", "").strip() header_data.feedbacks += [(name, bind)] line = line.replace("\r", "") line = line.replace("\n", "") # line=line.replace("\\","\\\\") # line=line.replace("\"","\\\"") # line=line+"\\n\\" if (header_data.reading == "vertex"): header_data.vertex_lines += [line] if (header_data.reading == "fragment"): header_data.fragment_lines += [line] line = fs.readline() header_data.line_offset += 1 fs.close() return header_data def build_legacygl_header(filename, include, class_suffix, output_attribs): header_data = LegacyGLHeaderStruct() include_file_in_legacygl_header(filename, header_data, 0) out_file = filename + ".h" fd = open(out_file, "w") enum_constants = [] fd.write("/* WARNING, THIS FILE WAS GENERATED, DO NOT EDIT */\n") out_file_base = out_file out_file_base = out_file_base[out_file_base.rfind("/") + 1:] out_file_base = out_file_base[out_file_base.rfind("\\") + 1:] # print("out file "+out_file+" base " +out_file_base) out_file_ifdef = out_file_base.replace(".", "_").upper() fd.write("#ifndef " + out_file_ifdef + class_suffix + "_120\n") fd.write("#define " + out_file_ifdef + class_suffix + "_120\n") out_file_class = out_file_base.replace(".glsl.h", "").title().replace("_", "").replace(".", "") + "Shader" + class_suffix fd.write("\n\n") fd.write("#include \"" + include + "\"\n\n\n") fd.write("class " + out_file_class + " : public Shader" + class_suffix + " {\n\n") fd.write("\t virtual String get_shader_name() const { return \"" + out_file_class + "\"; }\n") fd.write("public:\n\n") if (len(header_data.conditionals)): fd.write("\tenum Conditionals {\n") for x in header_data.conditionals: fd.write("\t\t" + x.upper() + ",\n") fd.write("\t};\n\n") if (len(header_data.uniforms)): fd.write("\tenum Uniforms {\n") for x in header_data.uniforms: fd.write("\t\t" + x.upper() + ",\n") fd.write("\t};\n\n") fd.write("\t_FORCE_INLINE_ int get_uniform(Uniforms p_uniform) const { return _get_uniform(p_uniform); }\n\n") if (len(header_data.conditionals)): fd.write("\t_FORCE_INLINE_ void set_conditional(Conditionals p_conditional,bool p_enable) { _set_conditional(p_conditional,p_enable); }\n\n") fd.write("\t#define _FU if (get_uniform(p_uniform)<0) return; ERR_FAIL_COND( get_active()!=this );\n\n ") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_value) { _FU glUniform1f(get_uniform(p_uniform),p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, double p_value) { _FU glUniform1f(get_uniform(p_uniform),p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint8_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int8_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint16_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int16_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint32_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int32_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n") #fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint64_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n"); #fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int64_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n"); #fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, unsigned long p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n"); #fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, long p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n"); fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Color& p_color) { _FU GLfloat col[4]={p_color.r,p_color.g,p_color.b,p_color.a}; glUniform4fv(get_uniform(p_uniform),1,col); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector2& p_vec2) { _FU GLfloat vec2[2]={p_vec2.x,p_vec2.y}; glUniform2fv(get_uniform(p_uniform),1,vec2); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector3& p_vec3) { _FU GLfloat vec3[3]={p_vec3.x,p_vec3.y,p_vec3.z}; glUniform3fv(get_uniform(p_uniform),1,vec3); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b) { _FU glUniform2f(get_uniform(p_uniform),p_a,p_b); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c) { _FU glUniform3f(get_uniform(p_uniform),p_a,p_b,p_c); }\n\n") fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c, float p_d) { _FU glUniform4f(get_uniform(p_uniform),p_a,p_b,p_c,p_d); }\n\n") fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Transform& p_transform) { _FU const Transform &tr = p_transform; GLfloat matrix[16]={ /* build a 16x16 matrix */ tr.basis.elements[0][0], tr.basis.elements[1][0], tr.basis.elements[2][0], 0, tr.basis.elements[0][1], tr.basis.elements[1][1], tr.basis.elements[2][1], 0, tr.basis.elements[0][2], tr.basis.elements[1][2], tr.basis.elements[2][2], 0, tr.origin.x, tr.origin.y, tr.origin.z, 1 }; glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix); } """) fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Transform2D& p_transform) { _FU const Transform2D &tr = p_transform; GLfloat matrix[16]={ /* build a 16x16 matrix */ tr.elements[0][0], tr.elements[0][1], 0, 0, tr.elements[1][0], tr.elements[1][1], 0, 0, 0, 0, 1, 0, tr.elements[2][0], tr.elements[2][1], 0, 1 }; glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix); } """) fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const CameraMatrix& p_matrix) { _FU GLfloat matrix[16]; for (int i=0;i<4;i++) { for (int j=0;j<4;j++) { matrix[i*4+j]=p_matrix.matrix[i][j]; } } glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix); }; """) fd.write("\n\n#undef _FU\n\n\n") fd.write("\tvirtual void init() {\n\n") enum_value_count = 0 if (len(header_data.enums)): fd.write("\t\t//Written using math, given nonstandarity of 64 bits integer constants..\n") fd.write("\t\tstatic const Enum _enums[]={\n") bitofs = len(header_data.conditionals) enum_vals = [] for xv in header_data.enums: x = header_data.enums[xv] bits = 1 amt = len(x) # print(x) while(2**bits < amt): bits += 1 # print("amount: "+str(amt)+" bits "+str(bits)); strs = "{" for i in range(amt): strs += "\"#define " + x[i] + "\\n\"," v = {} v["set_mask"] = "uint64_t(" + str(i) + ")<<" + str(bitofs) v["clear_mask"] = "((uint64_t(1)<<40)-1) ^ (((uint64_t(1)<<" + str(bits) + ") - 1)<<" + str(bitofs) + ")" enum_vals.append(v) enum_constants.append(x[i]) strs += "NULL}" fd.write("\t\t\t{(uint64_t(1<<" + str(bits) + ")-1)<<" + str(bitofs) + "," + str(bitofs) + "," + strs + "},\n") bitofs += bits fd.write("\t\t};\n\n") fd.write("\t\tstatic const EnumValue _enum_values[]={\n") enum_value_count = len(enum_vals) for x in enum_vals: fd.write("\t\t\t{" + x["set_mask"] + "," + x["clear_mask"] + "},\n") fd.write("\t\t};\n\n") else: fd.write("\t\tstatic const Enum *_enums=NULL;\n") fd.write("\t\tstatic const EnumValue *_enum_values=NULL;\n") conditionals_found = [] if (len(header_data.conditionals)): fd.write("\t\tstatic const char* _conditional_strings[]={\n") if (len(header_data.conditionals)): for x in header_data.conditionals: fd.write("\t\t\t\"#define " + x + "\\n\",\n") conditionals_found.append(x) fd.write("\t\t};\n\n") else: fd.write("\t\tstatic const char **_conditional_strings=NULL;\n") if (len(header_data.uniforms)): fd.write("\t\tstatic const char* _uniform_strings[]={\n") if (len(header_data.uniforms)): for x in header_data.uniforms: fd.write("\t\t\t\"" + x + "\",\n") fd.write("\t\t};\n\n") else: fd.write("\t\tstatic const char **_uniform_strings=NULL;\n") if output_attribs: if (len(header_data.attributes)): fd.write("\t\tstatic AttributePair _attribute_pairs[]={\n") for x in header_data.attributes: fd.write("\t\t\t{\"" + x[0] + "\"," + x[1] + "},\n") fd.write("\t\t};\n\n") else: fd.write("\t\tstatic AttributePair *_attribute_pairs=NULL;\n") feedback_count = 0 if (len(header_data.feedbacks)): fd.write("\t\tstatic const Feedback _feedbacks[]={\n") for x in header_data.feedbacks: name = x[0] cond = x[1] if (cond in conditionals_found): fd.write("\t\t\t{\"" + name + "\"," + str(conditionals_found.index(cond)) + "},\n") else: fd.write("\t\t\t{\"" + name + "\",-1},\n") feedback_count += 1 fd.write("\t\t};\n\n") else: fd.write("\t\tstatic const Feedback* _feedbacks=NULL;\n") if (len(header_data.texunits)): fd.write("\t\tstatic TexUnitPair _texunit_pairs[]={\n") for x in header_data.texunits: fd.write("\t\t\t{\"" + x[0] + "\"," + x[1] + "},\n") fd.write("\t\t};\n\n") else: fd.write("\t\tstatic TexUnitPair *_texunit_pairs=NULL;\n") if (len(header_data.ubos)): fd.write("\t\tstatic UBOPair _ubo_pairs[]={\n") for x in header_data.ubos: fd.write("\t\t\t{\"" + x[0] + "\"," + x[1] + "},\n") fd.write("\t\t};\n\n") else: fd.write("\t\tstatic UBOPair *_ubo_pairs=NULL;\n") fd.write("\t\tstatic const char _vertex_code[]={\n") for x in header_data.vertex_lines: for i in range(len(x)): fd.write(str(ord(x[i])) + ",") fd.write(str(ord('\n')) + ",") fd.write("\t\t0};\n\n") fd.write("\t\tstatic const int _vertex_code_start=" + str(header_data.vertex_offset) + ";\n") fd.write("\t\tstatic const char _fragment_code[]={\n") for x in header_data.fragment_lines: for i in range(len(x)): fd.write(str(ord(x[i])) + ",") fd.write(str(ord('\n')) + ",") fd.write("\t\t0};\n\n") fd.write("\t\tstatic const int _fragment_code_start=" + str(header_data.fragment_offset) + ";\n") if output_attribs: fd.write("\t\tsetup(_conditional_strings," + str(len(header_data.conditionals)) + ",_uniform_strings," + str(len(header_data.uniforms)) + ",_attribute_pairs," + str(len(header_data.attributes)) + ", _texunit_pairs," + str(len(header_data.texunits)) + ",_ubo_pairs," + str(len(header_data.ubos)) + ",_feedbacks," + str(feedback_count) + ",_vertex_code,_fragment_code,_vertex_code_start,_fragment_code_start);\n") else: fd.write("\t\tsetup(_conditional_strings," + str(len(header_data.conditionals)) + ",_uniform_strings," + str(len(header_data.uniforms)) + ",_texunit_pairs," + str(len(header_data.texunits)) + ",_enums," + str(len(header_data.enums)) + ",_enum_values," + str(enum_value_count) + ",_ubo_pairs," + str(len(header_data.ubos)) + ",_feedbacks," + str(feedback_count) + ",_vertex_code,_fragment_code,_vertex_code_start,_fragment_code_start);\n") fd.write("\t};\n\n") if (len(enum_constants)): fd.write("\tenum EnumConditionals {\n") for x in enum_constants: fd.write("\t\t" + x.upper() + ",\n") fd.write("\t};\n\n") fd.write("\tvoid set_enum_conditional(EnumConditionals p_cond) { _set_enum_conditional(p_cond); }\n") fd.write("};\n\n") fd.write("#endif\n\n") fd.close() def build_legacygl_headers(target, source, env): for x in source: build_legacygl_header(str(x), include="drivers/legacygl/shader_lgl.h", class_suffix="LGL", output_attribs=False) return 0 def build_gles2_headers(target, source, env): for x in source: build_legacygl_header(str(x), include="drivers/gles2/shader_gles2.h", class_suffix="GLES2", output_attribs=True) def build_gles3_headers(target, source, env): for x in source: build_legacygl_header(str(x), include="drivers/gles3/shader_gles3.h", class_suffix="GLES3", output_attribs=True) def update_version(): rev = "custom_build" if (os.getenv("BUILD_REVISION") != None): rev = os.getenv("BUILD_REVISION") print("Using custom revision: " + rev) import version f = open("core/version.h", "wb") f.write("#define VERSION_SHORT_NAME " + str(version.short_name) + "\n") f.write("#define VERSION_NAME " + str(version.name) + "\n") f.write("#define VERSION_MAJOR " + str(version.major) + "\n") f.write("#define VERSION_MINOR " + str(version.minor) + "\n") if (hasattr(version, 'patch')): f.write("#define VERSION_PATCH " + str(version.patch) + "\n") f.write("#define VERSION_REVISION " + str(rev) + "\n") f.write("#define VERSION_STATUS " + str(version.status) + "\n") import datetime f.write("#define VERSION_YEAR " + str(datetime.datetime.now().year) + "\n") def parse_cg_file(fname, uniforms, sizes, conditionals): import re fs = open(fname, "r") line = fs.readline() while line: if re.match(r"^\s*uniform", line): res = re.match(r"uniform ([\d\w]*) ([\d\w]*)") type = res.groups(1) name = res.groups(2) uniforms.append(name) if (type.find("texobj") != -1): sizes.append(1) else: t = re.match(r"float(\d)x(\d)", type) if t: sizes.append(int(t.groups(1)) * int(t.groups(2))) else: t = re.match(r"float(\d)", type) sizes.append(int(t.groups(1))) if line.find("[branch]") != -1: conditionals.append(name) line = fs.readline() def build_cg_shader(sname): vp_uniforms = [] vp_uniform_sizes = [] vp_conditionals = [] parse_cg_file("vp_" + sname + ".cg", vp_uniforms, vp_uniform_sizes, vp_conditionals) fp_uniforms = [] fp_uniform_sizes = [] fp_conditionals = [] parse_cg_file("fp_" + sname + ".cg", fp_uniforms, fp_uniform_sizes, fp_conditionals) fd = open("shader_" + sname + ".cg.h", "w") fd.write('\n#include "shader_cell.h"\n') fd.write("\nclass Shader_" + sname + " : public ShaderCell {\n") fd.write("\n\tstatic struct VertexUniforms[] = {\n") offset = 0 for i in range(0, len(vp_uniforms)): fd.write('\t\t{ "%s", %d, %d },\n' % (vp_uniforms[i], offset, vp_uniform_sizes[i])) offset = offset + vp_uniform_sizes[i] fd.write("\t};\n\n") fd.write("public:\n\n") fd.write("\tenum {\n") for i in range(0, len(vp_uniforms)): fd.write('\t\tVP_%s,\n' % vp_uniforms[i].upper()) fd.write("\t};\n") import glob def detect_modules(): module_list = [] includes_cpp = "" register_cpp = "" unregister_cpp = "" files = glob.glob("modules/*") files.sort() # so register_module_types does not change that often, and also plugins are registered in alphabetic order for x in files: if (not os.path.isdir(x)): continue x = x.replace("modules/", "") # rest of world x = x.replace("modules\\", "") # win32 module_list.append(x) try: with open("modules/" + x + "/register_types.h"): includes_cpp += '#include "modules/' + x + '/register_types.h"\n' register_cpp += '#ifdef MODULE_' + x.upper() + '_ENABLED\n' register_cpp += '\tregister_' + x + '_types();\n' register_cpp += '#endif\n' unregister_cpp += '#ifdef MODULE_' + x.upper() + '_ENABLED\n' unregister_cpp += '\tunregister_' + x + '_types();\n' unregister_cpp += '#endif\n' except IOError: pass modules_cpp = """ // modules.cpp - THIS FILE IS GENERATED, DO NOT EDIT!!!!!!! #include "register_module_types.h" """ + includes_cpp + """ void register_module_types() { """ + register_cpp + """ } void unregister_module_types() { """ + unregister_cpp + """ } """ f = open("modules/register_module_types.cpp", "wb") f.write(modules_cpp) return module_list def win32_spawn(sh, escape, cmd, args, env): import subprocess newargs = ' '.join(args[1:]) cmdline = cmd + " " + newargs startupinfo = subprocess.STARTUPINFO() #startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW for e in env: if type(env[e]) != type(""): env[e] = str(env[e]) proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=startupinfo, shell=False, env=env) data, err = proc.communicate() rv = proc.wait() if rv: print "=====" print err print "=====" return rv """ def win32_spawn(sh, escape, cmd, args, spawnenv): import win32file import win32event import win32process import win32security for var in spawnenv: spawnenv[var] = spawnenv[var].encode('ascii', 'replace') sAttrs = win32security.SECURITY_ATTRIBUTES() StartupInfo = win32process.STARTUPINFO() newargs = ' '.join(map(escape, args[1:])) cmdline = cmd + " " + newargs # check for any special operating system commands if cmd == 'del': for arg in args[1:]: win32file.DeleteFile(arg) exit_code = 0 else: # otherwise execute the command. hProcess, hThread, dwPid, dwTid = win32process.CreateProcess(None, cmdline, None, None, 1, 0, spawnenv, None, StartupInfo) win32event.WaitForSingleObject(hProcess, win32event.INFINITE) exit_code = win32process.GetExitCodeProcess(hProcess) win32file.CloseHandle(hProcess); win32file.CloseHandle(hThread); return exit_code """ def android_add_maven_repository(self, url): self.android_maven_repos.append(url) def android_add_dependency(self, depline): self.android_dependencies.append(depline) def android_add_java_dir(self, subpath): base_path = self.Dir(".").abspath + "/modules/" + self.current_module + "/" + subpath self.android_java_dirs.append(base_path) def android_add_res_dir(self, subpath): base_path = self.Dir(".").abspath + "/modules/" + self.current_module + "/" + subpath self.android_res_dirs.append(base_path) def android_add_aidl_dir(self, subpath): base_path = self.Dir(".").abspath + "/modules/" + self.current_module + "/" + subpath self.android_aidl_dirs.append(base_path) def android_add_jni_dir(self, subpath): base_path = self.Dir(".").abspath + "/modules/" + self.current_module + "/" + subpath self.android_jni_dirs.append(base_path) def android_add_gradle_plugin(self, plugin): self.android_gradle_plugins.append(plugin) def android_add_gradle_classpath(self, classpath): self.android_gradle_classpath.append(classpath) def android_add_default_config(self, config): self.android_default_config.append(config) def android_add_to_manifest(self, file): base_path = self.Dir(".").abspath + "/modules/" + self.current_module + "/" + file f = open(base_path, "rb") self.android_manifest_chunk += f.read() def android_add_to_permissions(self, file): base_path = self.Dir(".").abspath + "/modules/" + self.current_module + "/" + file f = open(base_path, "rb") self.android_permission_chunk += f.read() def android_add_to_attributes(self, file): base_path = self.Dir(".").abspath + "/modules/" + self.current_module + "/" + file f = open(base_path, "rb") self.android_appattributes_chunk += f.read() def disable_module(self): self.disabled_modules.append(self.current_module) def use_windows_spawn_fix(self, platform=None): if (os.name != "nt"): return # not needed, only for windows # On Windows, due to the limited command line length, when creating a static library # from a very high number of objects SCons will invoke "ar" once per object file; # that makes object files with same names to be overwritten so the last wins and # the library looses symbols defined by overwritten objects. # By enabling quick append instead of the default mode (replacing), libraries will # got built correctly regardless the invokation strategy. # Furthermore, since SCons will rebuild the library from scratch when an object file # changes, no multiple versions of the same object file will be present. self.Replace(ARFLAGS='q') import subprocess def mySubProcess(cmdline, env): prefix = "" if(platform == 'javascript'): prefix = "python.exe " startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW proc = subprocess.Popen(prefix + cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=startupinfo, shell=False, env=env) data, err = proc.communicate() rv = proc.wait() if rv: print "=====" print err print "=====" return rv def mySpawn(sh, escape, cmd, args, env): newargs = ' '.join(args[1:]) cmdline = cmd + " " + newargs rv = 0 env = {str(key): str(value) for key, value in env.iteritems()} if len(cmdline) > 32000 and cmd.endswith("ar"): cmdline = cmd + " " + args[1] + " " + args[2] + " " for i in range(3, len(args)): rv = mySubProcess(cmdline + args[i], env) if rv: break else: rv = mySubProcess(cmdline, env) return rv self['SPAWN'] = mySpawn def split_lib(self, libname): import string env = self num = 0 cur_base = "" max_src = 64 list = [] lib_list = [] for f in getattr(env, libname + "_sources"): fname = "" if type(f) == type(""): fname = env.File(f).path else: fname = env.File(f)[0].path fname = fname.replace("\\", "/") base = string.join(fname.split("/")[:2], "/") if base != cur_base and len(list) > max_src: if num > 0: lib = env.Library(libname + str(num), list) lib_list.append(lib) list = [] num = num + 1 cur_base = base list.append(f) lib = env.Library(libname + str(num), list) lib_list.append(lib) if len(lib_list) > 0: import os, sys if os.name == 'posix' and sys.platform == 'msys': env.Replace(ARFLAGS=['rcsT']) lib = env.Library(libname + "_collated", lib_list) lib_list = [lib] lib_base = [] env.add_source_files(lib_base, "*.cpp") lib_list.insert(0, env.Library(libname, lib_base)) env.Prepend(LIBS=lib_list) def save_active_platforms(apnames, ap): for x in ap: pth = x + "/logo.png" # print("open path: "+pth) pngf = open(pth, "rb") b = pngf.read(1) str = " /* AUTOGENERATED FILE, DO NOT EDIT */ \n" str += " static const unsigned char _" + x[9:] + "_logo[]={" while(len(b) == 1): str += hex(ord(b)) b = pngf.read(1) if (len(b) == 1): str += "," str += "};\n" wf = x + "/logo.h" logow = open(wf, "wb") logow.write(str) def no_verbose(sys, env): # If the output is not a terminal, do nothing if not sys.stdout.isatty(): return colors = {} colors['cyan'] = '\033[96m' colors['purple'] = '\033[95m' colors['blue'] = '\033[94m' colors['green'] = '\033[92m' colors['yellow'] = '\033[93m' colors['red'] = '\033[91m' colors['end'] = '\033[0m' compile_source_message = '%sCompiling %s==> %s$SOURCE%s' % (colors['blue'], colors['purple'], colors['yellow'], colors['end']) java_compile_source_message = '%sCompiling %s==> %s$SOURCE%s' % (colors['blue'], colors['purple'], colors['yellow'], colors['end']) compile_shared_source_message = '%sCompiling shared %s==> %s$SOURCE%s' % (colors['blue'], colors['purple'], colors['yellow'], colors['end']) link_program_message = '%sLinking Program %s==> %s$TARGET%s' % (colors['red'], colors['purple'], colors['yellow'], colors['end']) link_library_message = '%sLinking Static Library %s==> %s$TARGET%s' % (colors['red'], colors['purple'], colors['yellow'], colors['end']) ranlib_library_message = '%sRanlib Library %s==> %s$TARGET%s' % (colors['red'], colors['purple'], colors['yellow'], colors['end']) link_shared_library_message = '%sLinking Shared Library %s==> %s$TARGET%s' % (colors['red'], colors['purple'], colors['yellow'], colors['end']) java_library_message = '%sCreating Java Archive %s==> %s$TARGET%s' % (colors['red'], colors['purple'], colors['yellow'], colors['end']) env.Append(CXXCOMSTR=[compile_source_message]) env.Append(CCCOMSTR=[compile_source_message]) env.Append(SHCCCOMSTR=[compile_shared_source_message]) env.Append(SHCXXCOMSTR=[compile_shared_source_message]) env.Append(ARCOMSTR=[link_library_message]) env.Append(RANLIBCOMSTR=[ranlib_library_message]) env.Append(SHLINKCOMSTR=[link_shared_library_message]) env.Append(LINKCOMSTR=[link_program_message]) env.Append(JARCOMSTR=[java_library_message]) env.Append(JAVACCOMSTR=[java_compile_source_message]) def detect_visual_c_compiler_version(tools_env): # tools_env is the variable scons uses to call tools that execute tasks, SCons's env['ENV'] that executes tasks... # (see the SCons documentation for more information on what it does)... # in order for this function to be well encapsulated i choose to force it to receive SCons's TOOLS env (env['ENV'] # and not scons setup environment (env)... so make sure you call the right environment on it or it will fail to detect # the proper vc version that will be called # These is no flag to give to visual c compilers to set the architecture, ie scons bits argument (32,64,ARM etc) # There are many different cl.exe files that are run, and each one compiles & links to a different architecture # As far as I know, the only way to figure out what compiler will be run when Scons calls cl.exe via Program() # is to check the PATH varaible and figure out which one will be called first. Code bellow does that and returns: # the following string values: # "" Compiler not detected # "amd64" Native 64 bit compiler # "amd64_x86" 64 bit Cross Compiler for 32 bit # "x86" Native 32 bit compiler # "x86_amd64" 32 bit Cross Compiler for 64 bit # There are other architectures, but Godot does not support them currently, so this function does not detect arm/amd64_arm # and similar architectures/compilers # Set chosen compiler to "not detected" vc_chosen_compiler_index = -1 vc_chosen_compiler_str = "" # find() works with -1 so big ifs bellow are needed... the simplest solution, in fact # First test if amd64 and amd64_x86 compilers are present in the path vc_amd64_compiler_detection_index = tools_env["PATH"].find(tools_env["VCINSTALLDIR"] + "BIN\\amd64;") if(vc_amd64_compiler_detection_index > -1): vc_chosen_compiler_index = vc_amd64_compiler_detection_index vc_chosen_compiler_str = "amd64" vc_amd64_x86_compiler_detection_index = tools_env["PATH"].find(tools_env["VCINSTALLDIR"] + "BIN\\amd64_x86;") if(vc_amd64_x86_compiler_detection_index > -1 and (vc_chosen_compiler_index == -1 or vc_chosen_compiler_index > vc_amd64_x86_compiler_detection_index)): vc_chosen_compiler_index = vc_amd64_x86_compiler_detection_index vc_chosen_compiler_str = "amd64_x86" # Now check the 32 bit compilers vc_x86_compiler_detection_index = tools_env["PATH"].find(tools_env["VCINSTALLDIR"] + "BIN;") if(vc_x86_compiler_detection_index > -1 and (vc_chosen_compiler_index == -1 or vc_chosen_compiler_index > vc_x86_compiler_detection_index)): vc_chosen_compiler_index = vc_x86_compiler_detection_index vc_chosen_compiler_str = "x86" vc_x86_amd64_compiler_detection_index = tools_env["PATH"].find(tools_env['VCINSTALLDIR'] + "BIN\\x86_amd64;") if(vc_x86_amd64_compiler_detection_index > -1 and (vc_chosen_compiler_index == -1 or vc_chosen_compiler_index > vc_x86_amd64_compiler_detection_index)): vc_chosen_compiler_index = vc_x86_amd64_compiler_detection_index vc_chosen_compiler_str = "x86_amd64" # debug help # print vc_amd64_compiler_detection_index # print vc_amd64_x86_compiler_detection_index # print vc_x86_compiler_detection_index # print vc_x86_amd64_compiler_detection_index # print "chosen "+str(vc_chosen_compiler_index)+ " | "+str(vc_chosen_compiler_str) return vc_chosen_compiler_str def precious_program(env, program, sources, **args): program = env.ProgramOriginal(program, sources, **args) env.Precious(program) return program
[]
[]
[ "BUILD_REVISION" ]
[]
["BUILD_REVISION"]
python
1
0
snxconnect.py
#!/usr/bin/python from __future__ import print_function, unicode_literals import os import os.path import sys import socket try : import httplib import urllib2 import ssl from urllib2 import build_opener, HTTPCookieProcessor, Request from urllib import urlencode from httplib import IncompleteRead rsatype = long except ImportError : from urllib.request import build_opener, HTTPCookieProcessor, Request from urllib.parse import urlencode from http.client import IncompleteRead rsatype = int try : from cookielib import LWPCookieJar except ImportError : from http.cookiejar import LWPCookieJar from bs4 import BeautifulSoup from getpass import getpass from argparse import ArgumentParser from netrc import netrc, NetrcParseError from Crypto.PublicKey import RSA from struct import pack, unpack from subprocess import Popen, PIPE from snxvpnversion import VERSION """ Todo: - timeout can be retrieved at /sslvpn/Portal/LoggedIn Function to do this is RetrieveTimeoutVal (url_above) in portal This seems to be in seconds. But my portal always displays "nNextTimeout = 6;" content-type is text/javascript. - We may want to get the RSA parameters from the javascript in the received html, RSA pubkey will probably be different for different deployments. - Log debug logs to syslog """ if sys.version_info >= (3,) : def b_ord (x) : return x else : def b_ord (x) : return ord (x) def iterbytes (x) : """ Compatibility with python3: Iterating over bytes returns int. Adding insult to injury calling bytes (23) will return a bytes object with length 23 filled with b'\0'. So we do this. Note that we will have to flatten iterators like the one resulting from a call to reversed. >>> a = [] >>> for k in iterbytes (b'abcdef') : ... a.append (k) >>> for k in iterbytes (reversed (b'abcdef')) : ... a.append (k) >>> print (repr (b''.join (a)).lstrip ('b')) 'abcdeffedcba' """ if sys.version_info >= (3,) : x = bytes (x) else : x = b''.join (x) for i in range (len (x)) : yield (x [i:i+1]) # end def iterbytes class VerifiedHTTPSConnection(httplib.HTTPSConnection): def connect(self): # overrides the version in httplib so that we can fiddle with cert options sock = socket.create_connection((self.host, self.port), self.timeout) if self._tunnel_host: self.sock = sock self._tunnel() self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, cert_reqs=ssl.CERT_NONE ) #cert_reqs=ssl.CERT_REQUIRED, #ca_certs="PATH_TO_VALID_CA_CRT") # wraps https connections with ssl certificate verification class VerifiedHTTPSHandler(urllib2.HTTPSHandler): def __init__(self, connection_class = VerifiedHTTPSConnection): self.specialized_conn_class = connection_class urllib2.HTTPSHandler.__init__(self) def https_open(self, req): return self.do_open(self.specialized_conn_class, req) class HTML_Requester (object) : def __init__ (self, args) : self.modulus = None self.exponent = None self.args = args self.jar = j = LWPCookieJar () self.has_cookies = False if self.args.cookiefile : self.has_cookies = True try : j.load (self.args.cookiefile, ignore_discard = True) except IOError : self.has_cookies = False https_handler = VerifiedHTTPSHandler() self.opener = build_opener (https_handler, HTTPCookieProcessor (j)) self.nextfile = args.file # end def __init__ def call_snx (self) : """ The snx binary usually lives in the default snxpath and is setuid root. We call it with the undocumented '-Z' option. When everything is well it forks a subprocess and exists (daemonize). If an error occurs before forking we get the result back on one of the file descriptors. If everything goes well, the forked snx process opens a port on localhost:7776 (I've found no way this can be configured) and waits for us to pass the binary-encoded parameters via this socket. It later sends back an answer. It seems to keep the socket open, so we do another read to wait for snx to terminate. """ sp = self.args.snxpath snx = Popen ([sp, '-Z'], stdin = PIPE, stdout = PIPE, stderr = PIPE) stdout, stderr = snx.communicate ('') rc = snx.returncode if rc != 0 : print ("SNX terminated with error: %d %s%s" % (rc, stdout, stderr)) sock = socket.socket (socket.AF_INET, socket.SOCK_STREAM) sock.connect (("127.0.0.1", 7776)) sock.sendall (self.snx_info) answer = sock.recv (4096) if self.args.debug : f = open ('snxanswer', 'wb') f.write (answer) f.close () print ("SNX connected, to leave VPN open, leave this running!") answer = sock.recv (4096) # should block until snx dies # end def call_snx def debug (self, s) : if self.args.debug : print (s) # end def debug def generate_snx_info (self) : """ Communication with SNX (originally by the java framework) is done via an undocumented binary format. We try to reproduce this here. We asume native byte-order but we don't know if snx binaries exist for other architectures with a different byte-order. """ magic = b'\x13\x11\x00\x00' length = 0x3d0 gw_ip = socket.gethostbyname(self.extender_vars ['host_name']) gw_int = unpack("!I", socket.inet_aton(gw_ip))[0] fmt = b'=4sLL64sL6s256s256s128s256sH' info = pack \ ( fmt , magic , length , gw_int , self.extender_vars ['host_name'] , int (self.extender_vars ['port']) , b'' , self.extender_vars ['server_cn'] , self.extender_vars ['user_name'] , self.extender_vars ['password'] , self.extender_vars ['server_fingerprint'] , 1 # ??? ) assert len (info) == length + 8 # magic + length self.snx_info = info # end def generate_snx_info def login (self) : if self.has_cookies : self.debug ("has cookie") self.nextfile = 'Portal/Main' self.open () self.debug (self.purl) if self.purl.endswith ('Portal/Main') : self.open (self.args.extender) self.parse_extender () self.generate_snx_info () return True else : # Forget Cookies, otherwise we get a 400 bad request later self.jar.clear () self.next_file (self.purl) self.debug (self.nextfile) self.open () self.debug (self.purl) # Get the RSA parameters from the javascript in the received html for script in self.soup.find_all ('script') : if 'RSA' in script.attrs.get ('src', '') : self.next_file (script ['src']) self.debug (self.nextfile) break else : print ('No RSA javascript file found, cannot login') return self.open (do_soup = False) self.parse_rsa_params () if not self.modulus : # Error message already given in parse_rsa_params return for form in self.soup.find_all ('form') : if 'id' in form.attrs and form ['id'] == 'loginForm' : self.next_file (form ['action']) assert form ['method'] == 'post' break self.debug (self.nextfile) enc = PW_Encode (modulus = self.modulus, exponent = self.exponent) d = dict \ ( password = enc.encrypt (self.args.password) , userName = self.args.username , selectedRealm = self.args.realm , loginType = self.args.login_type , vpid_prefix = self.args.vpid_prefix , HeightData = self.args.height_data ) self.open (data = urlencode (d)) self.debug (self.purl) self.debug (self.info) if self.args.multi_challenge : while 'MultiChallenge' in self.purl : d = self.parse_pw_response () otp = getpass ('One-time Password: ') d ['password'] = enc.encrypt (otp) self.debug ("nextfile: %s" % self.nextfile) self.debug ("purl: %s" % self.purl) self.open (data = urlencode (d)) self.debug ("info: %s" % self.info) if self.purl.endswith ('Login/ActivateLogin') : if self.args.save_cookies : self.jar.save (self.args.cookiefile, ignore_discard = True) self.debug ("purl: %s" % self.purl) self.open('sslvpn/Login/ActivateLogin?ActivateLogin=activate&LangSelect=en_US&submit=Continue&HeightData=') if self.purl.endswith ('Portal/Main') : if self.args.save_cookies : self.jar.save (self.args.cookiefile, ignore_discard = True) self.debug ("purl: %s" % self.purl) self.open (self.args.extender) self.debug (self.purl) self.debug (self.info) self.parse_extender () self.generate_snx_info () return True else : print ("Unexpected response, try again.") self.debug ("purl: %s" % self.purl) return # end def login def next_file (self, fname) : if fname.startswith ('/') : self.nextfile = fname.lstrip ('/') elif fname.startswith ('http') : self.nextfile = fname.split ('/', 3)[-1] else : dir = self.nextfile.split ('/') dir = dir [:-1] fn = fname.split ('/') self.nextfile = '/'.join (dir + fn) # We might try to remove '..' elements in the future # end def next_file def open (self, filepart = None, data = None, do_soup = True) : filepart = filepart or self.nextfile url = '/'.join (('%s:/' % self.args.protocol, self.args.host, filepart)) if data : data = data.encode ('ascii') rq = Request (url, data) self.f = f = self.opener.open (rq, timeout = 10) if do_soup : # Sometimes we get incomplete read. So we read everything # the server sent us and hope this is ok. Note: This means # we cannot pass the file to BeautifulSoup but need to read # everything here. try: page = f.read () except IncompleteRead as e: page = e.partial self.soup = BeautifulSoup (page, "lxml") self.purl = f.geturl () self.info = f.info () # end def open def parse_extender (self) : """ The SNX extender page contains the necessary credentials for connecting the VPN. This information then passed to the snx program via a socket. """ for script in self.soup.find_all ('script') : if '/* Extender.user_name' in script.text : break else : print ("Error retrieving extender variables") return for line in script.text.split ('\n') : if '/* Extender.user_name' in line : break stmts = line.split (';') vars = {} for stmt in stmts : try : lhs, rhs = stmt.split ('=') except ValueError : break try : lhs = lhs.split ('.', 1)[1].strip () except IndexError : continue rhs = rhs.strip ().strip ('"') vars [lhs] = rhs.encode ('utf-8') self.extender_vars = vars # end def parse_extender def parse_pw_response (self) : """ The password response contains another form where the one-time password (in our case received via a message to the phone) must be entered. """ for form in self.soup.find_all ('form') : if 'name' in form.attrs and form ['name'] == 'MCForm' : self.next_file (form ['action']) assert form ['method'] == 'post' break d = {} for input in form.find_all ('input') : if input.attrs.get ('type') == 'password' : continue if 'name' not in input.attrs : continue if input ['name'] in ('password', 'btnCancel') : continue d [input ['name']] = input.attrs.get ('value', '') return d # end def parse_pw_response def parse_rsa_params (self) : keys = ('modulus', 'exponent') vars = {} for line in self.f : line = line.decode ('utf-8') for k in keys : if 'var %s' % k in line : val = line.strip ().rstrip (';') val = val.split ('=', 1) [-1] val = val.strip ().strip ("'") vars [k] = val break if len (vars) == 2 : break else : print ('No RSA parameters found, cannot login') return self.debug (repr (vars)) self.modulus = rsatype (vars ['modulus'], 16) self.exponent = rsatype (vars ['exponent'], 16) # end def parse_rsa_params # end class HTML_Requester class PW_Encode (object) : """ RSA encryption module with special padding and reversing to be compatible with checkpoints implementation. Test with non-random padding to get known value: >>> p = PW_Encode (testing = True) >>> print (p.encrypt ('xyzzy')) 451c2d5b491ee22d6f7cdc5a20f320914668f8e01337625dfb7e0917b16750cfbafe38bfcb68824b30d5cc558fa1c6d542ff12ac8e1085b7a9040f624ab39f625cabd77d1d024c111e42fede782e089400d2c9b1d6987c0005698178222e8500243f12762bebba841eae331d17b290f80bca6c3f8a49522fb926646c24db3627 >>> print (p.encrypt ('XYZZYxyzzyXYZZYxyzzy')) a529e86cf80dd131e3bdae1f6dbab76f67f674e42041dde801ebdb790ab0637d56cc82f52587f2d4d34d26c490eee3a1ebfd80df18ec41c4440370b1ecb2dec3f811e09d2248635dd8aab60a97293ec0315a70bf024b33e8a8a02582fbabc98dd72d913530151e78b47119924f45b711b9a1189d5eec5a20e6f9bc1d44bfd554 """ def __init__ (self, modulus = None, exponent = None, testing = False) : m = rsatype \ ( b'c87e9e96ffde3ec47c3f116ea5ac0e15' b'34490b3da6dbbedae1af50dc32bf1012' b'bdb7e1ff67237e0302b48c8731f343ff' b'644662de2bb21d2b033127660e525d58' b'889f8f6f05744906dddc8f4b85e0916b' b'5d9cf5b87093ed260238674f143801b7' b'e58a18795adc9acefaf0f378326fea19' b'9ac6e5a88be83a52d4a77b3bba5f1aed' , 16 ) e = rsatype (b'010001', 16) m = modulus or m e = exponent or e self.pubkey = RSA.construct ((m, e)) self.testing = testing # end def __init__ def pad (self, txt) : l = (self.pubkey.size () + 7) >> 3 r = [] r.append (b'\0') # Note that first reversing and then encoding to utf-8 would # *not* be correct! for x in iterbytes (reversed (txt.encode ('utf-8'))) : r.append (x) r.append (b'\0') n = l - len (r) - 2 if self.testing : r.append (b'\1' * n) else : r.append (os.urandom (n)) r.append (b'\x02') r.append (b'\x00') return b''.join (reversed (r)) # end def pad def encrypt (self, password) : x = self.pad (password) e = self.pubkey.encrypt (x, '')[0] e = ''.join ('%02x' % b_ord (c) for c in reversed (e)) return e # end def encrypt # end class PW_Encode def main () : # First try to parse config-file ~/.snxvpnrc: home = os.environ.get ('HOME') cfgf = None if home : try : cfgf = open (os.path.join (home, '.snxvpnrc'), 'rb') except (OSError, IOError) : pass cfg = {} boolopts = ['debug', 'save_cookies', 'multi_challenge'] if cfgf : for line in cfgf : line = line.strip ().decode ('utf-8') if line.startswith ('#') : continue k, v = line.split (None, 1) if k == 'server' : k = 'host' k = k.replace ('-', '_') if k in boolopts : v = (v.lower () in ('true', 'yes')) cfg [k] = v host = cfg.get ('host', '') cookiefile = cfg.get ('cookiefile', '%s/.snxcookies' % home) cmd = ArgumentParser () cmd.add_argument \ ( '-c', '--cookiefile' , help = 'Specify cookiefile to save and attempt reconnect' ' default="%(default)s"' , default = cookiefile ) cmd.add_argument \ ( '-D', '--debug' , help = 'Debug handshake' , action = 'store_true' , default = cfg.get ('debug', None) ) cmd.add_argument \ ( '-F', '--file' , help = 'File part of URL default="%(default)s"' , default = cfg.get ('file', 'sslvpn/Login/Login') ) cmd.add_argument \ ( '-E', '--extender' , help = 'File part of URL default="%(default)s"' , default = cfg.get ('extender', 'sslvpn/SNX/extender') ) cmd.add_argument \ ( '-H', '--host' , help = 'Host part of URL default="%(default)s"' , default = host , required = not host ) cmd.add_argument \ ( '--height-data' , help = 'Height data in form, default "%(default)s"' , default = cfg.get ('height_data', '') ) cmd.add_argument \ ( '-L', '--login-type' , help = 'Login type, default="%(default)s"' , default = cfg.get ('login_type', 'Standard') ) cmd.add_argument \ ( '-MC', '--multi-challenge' , help = 'MultiChallenge, default="%(default)s"' , default = cfg.get ('multi_challenge', False) ) cmd.add_argument \ ( '-P', '--password' , help = 'Login password, not a good idea to specify on commandline' , default = cfg.get ('password', None) ) cmd.add_argument \ ( '-p', '--protocol' , help = 'http or https, should *always* be https except for tests' , default = cfg.get ('protocol', 'https') ) cmd.add_argument \ ( '-R', '--realm' , help = 'Selected realm, default="%(default)s"' , default = cfg.get ('realm', 'ssl_vpn') ) cmd.add_argument \ ( '-s', '--save-cookies' , help = 'Save cookies to %(cookiefile)s, might be a security risk,' ' default is off' % locals () , action = 'store_true' , default = cfg.get ('save_cookies', False) ) cmd.add_argument \ ( '-S', '--snxpath' , help = 'snx binary to call, default="%(default)s", you might' ' want a full path here' , default = cfg.get ('snxpath', 'snx') ) cmd.add_argument \ ( '-U', '--username' , help = 'Login username, default="%(default)s"' , default = cfg.get ('username', '') ) cmd.add_argument \ ( '-V', '--vpid-prefix' , help = 'VPID prefix, default "%(default)s"' , default = cfg.get ('vpid_prefix', '') ) cmd.add_argument \ ( '--version' , help = 'Display version and exit' , action = 'store_true' ) args = cmd.parse_args () if args.version : print ("snxconnect version %s by Ralf Schlatterbeck" % VERSION) sys.exit (0) if not args.username or not args.password : n = a = None try : n = netrc () except (IOError, NetrcParseError) : pass if n : a = n.authenticators (args.host) if a : un, dummy, pw = a if not args.username : args.username = un if not args.password : args.password = pw if not args.password : args.password = getpass ('Password: ') rq = HTML_Requester (args) result = rq.login () if result : rq.call_snx () # end def main () if __name__ == '__main__' : main ()
[]
[]
[]
[]
[]
python
0
0
hospital/manage.py
#!/usr/bin/env python import os import sys if __name__ == '__main__': os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hospital.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
fuse/ipns/ipns_unix.go
// +build !nofuse // package fuse/ipns implements a fuse filesystem that interfaces // with ipns, the naming system for ipfs. package ipns import ( "errors" "fmt" "os" fuse "github.com/ipfs/go-ipfs/Godeps/_workspace/src/bazil.org/fuse" fs "github.com/ipfs/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs" "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" key "github.com/ipfs/go-ipfs/blocks/key" core "github.com/ipfs/go-ipfs/core" dag "github.com/ipfs/go-ipfs/merkledag" mfs "github.com/ipfs/go-ipfs/mfs" path "github.com/ipfs/go-ipfs/path" ft "github.com/ipfs/go-ipfs/unixfs" ci "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/crypto" ) func init() { if os.Getenv("IPFS_FUSE_DEBUG") != "" { fuse.Debug = func(msg interface{}) { fmt.Println(msg) } } } var log = logging.Logger("fuse/ipns") // FileSystem is the readwrite IPNS Fuse Filesystem. type FileSystem struct { Ipfs *core.IpfsNode RootNode *Root } // NewFileSystem constructs new fs using given core.IpfsNode instance. func NewFileSystem(ipfs *core.IpfsNode, sk ci.PrivKey, ipfspath, ipnspath string) (*FileSystem, error) { kmap := map[string]ci.PrivKey{ "local": sk, } root, err := CreateRoot(ipfs, kmap, ipfspath, ipnspath) if err != nil { return nil, err } return &FileSystem{Ipfs: ipfs, RootNode: root}, nil } // Root constructs the Root of the filesystem, a Root object. func (f *FileSystem) Root() (fs.Node, error) { log.Debug("Filesystem, get root") return f.RootNode, nil } func (f *FileSystem) Destroy() { err := f.RootNode.Close() if err != nil { log.Errorf("Error Shutting Down Filesystem: %s\n", err) } } // Root is the root object of the filesystem tree. type Root struct { Ipfs *core.IpfsNode Keys map[string]ci.PrivKey // Used for symlinking into ipfs IpfsRoot string IpnsRoot string LocalDirs map[string]fs.Node Roots map[string]*keyRoot LocalLinks map[string]*Link } func ipnsPubFunc(ipfs *core.IpfsNode, k ci.PrivKey) mfs.PubFunc { return func(ctx context.Context, key key.Key) error { return ipfs.Namesys.Publish(ctx, k, path.FromKey(key)) } } func loadRoot(ctx context.Context, rt *keyRoot, ipfs *core.IpfsNode, name string) (fs.Node, error) { p, err := path.ParsePath("/ipns/" + name) if err != nil { log.Errorf("mkpath %s: %s", name, err) return nil, err } node, err := core.Resolve(ctx, ipfs, p) if err != nil { log.Errorf("looking up %s: %s", p, err) return nil, err } root, err := mfs.NewRoot(ctx, ipfs.DAG, node, ipnsPubFunc(ipfs, rt.k)) if err != nil { return nil, err } rt.root = root switch val := root.GetValue().(type) { case *mfs.Directory: return &Directory{dir: val}, nil case *mfs.File: return &FileNode{fi: val}, nil default: return nil, errors.New("unrecognized type") } panic("not reached") } type keyRoot struct { k ci.PrivKey alias string root *mfs.Root } func CreateRoot(ipfs *core.IpfsNode, keys map[string]ci.PrivKey, ipfspath, ipnspath string) (*Root, error) { ldirs := make(map[string]fs.Node) roots := make(map[string]*keyRoot) links := make(map[string]*Link) for alias, k := range keys { pkh, err := k.GetPublic().Hash() if err != nil { return nil, err } name := key.Key(pkh).B58String() kr := &keyRoot{k: k, alias: alias} fsn, err := loadRoot(ipfs.Context(), kr, ipfs, name) if err != nil { return nil, err } roots[name] = kr ldirs[name] = fsn // set up alias symlink links[alias] = &Link{ Target: name, } } return &Root{ Ipfs: ipfs, IpfsRoot: ipfspath, IpnsRoot: ipnspath, Keys: keys, LocalDirs: ldirs, LocalLinks: links, Roots: roots, }, nil } // Attr returns file attributes. func (*Root) Attr(ctx context.Context, a *fuse.Attr) error { log.Debug("Root Attr") a.Mode = os.ModeDir | 0111 // -rw+x return nil } // Lookup performs a lookup under this node. func (s *Root) Lookup(ctx context.Context, name string) (fs.Node, error) { switch name { case "mach_kernel", ".hidden", "._.": // Just quiet some log noise on OS X. return nil, fuse.ENOENT } if lnk, ok := s.LocalLinks[name]; ok { return lnk, nil } nd, ok := s.LocalDirs[name] if ok { switch nd := nd.(type) { case *Directory: return nd, nil case *FileNode: return nd, nil default: return nil, fuse.EIO } } // other links go through ipns resolution and are symlinked into the ipfs mountpoint resolved, err := s.Ipfs.Namesys.Resolve(s.Ipfs.Context(), name) if err != nil { log.Warningf("ipns: namesys resolve error: %s", err) return nil, fuse.ENOENT } segments := resolved.Segments() if segments[0] == "ipfs" { p := path.Join(resolved.Segments()[1:]) return &Link{s.IpfsRoot + "/" + p}, nil } log.Error("Invalid path.Path: ", resolved) return nil, errors.New("invalid path from ipns record") } func (r *Root) Close() error { for _, mr := range r.Roots { err := mr.root.Close() if err != nil { return err } } return nil } // Forget is called when the filesystem is unmounted. probably. // see comments here: http://godoc.org/bazil.org/fuse/fs#FSDestroyer func (r *Root) Forget() { err := r.Close() if err != nil { log.Error(err) } } // ReadDirAll reads a particular directory. Will show locally available keys // as well as a symlink to the peerID key func (r *Root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { log.Debug("Root ReadDirAll") var listing []fuse.Dirent for alias, k := range r.Keys { pub := k.GetPublic() hash, err := pub.Hash() if err != nil { continue } ent := fuse.Dirent{ Name: key.Key(hash).B58String(), Type: fuse.DT_Dir, } link := fuse.Dirent{ Name: alias, Type: fuse.DT_Link, } listing = append(listing, ent, link) } return listing, nil } // Directory is wrapper over an mfs directory to satisfy the fuse fs interface type Directory struct { dir *mfs.Directory } type FileNode struct { fi *mfs.File } // File is wrapper over an mfs file to satisfy the fuse fs interface type File struct { fi mfs.FileDescriptor } // Attr returns the attributes of a given node. func (d *Directory) Attr(ctx context.Context, a *fuse.Attr) error { log.Debug("Directory Attr") a.Mode = os.ModeDir | 0555 a.Uid = uint32(os.Getuid()) a.Gid = uint32(os.Getgid()) return nil } // Attr returns the attributes of a given node. func (fi *FileNode) Attr(ctx context.Context, a *fuse.Attr) error { log.Debug("File Attr") size, err := fi.fi.Size() if err != nil { // In this case, the dag node in question may not be unixfs return fmt.Errorf("fuse/ipns: failed to get file.Size(): %s", err) } a.Mode = os.FileMode(0666) a.Size = uint64(size) a.Uid = uint32(os.Getuid()) a.Gid = uint32(os.Getgid()) return nil } // Lookup performs a lookup under this node. func (s *Directory) Lookup(ctx context.Context, name string) (fs.Node, error) { child, err := s.dir.Child(name) if err != nil { // todo: make this error more versatile. return nil, fuse.ENOENT } switch child := child.(type) { case *mfs.Directory: return &Directory{dir: child}, nil case *mfs.File: return &FileNode{fi: child}, nil default: // NB: if this happens, we do not want to continue, unpredictable behaviour // may occur. panic("invalid type found under directory. programmer error.") } } // ReadDirAll reads the link structure as directory entries func (dir *Directory) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { var entries []fuse.Dirent listing, err := dir.dir.List() if err != nil { return nil, err } for _, entry := range listing { dirent := fuse.Dirent{Name: entry.Name} switch mfs.NodeType(entry.Type) { case mfs.TDir: dirent.Type = fuse.DT_Dir case mfs.TFile: dirent.Type = fuse.DT_File } entries = append(entries, dirent) } if len(entries) > 0 { return entries, nil } return nil, fuse.ENOENT } func (fi *File) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { _, err := fi.fi.Seek(req.Offset, os.SEEK_SET) if err != nil { return err } fisize, err := fi.fi.Size() if err != nil { return err } select { case <-ctx.Done(): return ctx.Err() default: } readsize := min(req.Size, int(fisize-req.Offset)) n, err := fi.fi.CtxReadFull(ctx, resp.Data[:readsize]) resp.Data = resp.Data[:n] return err } func (fi *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { // TODO: at some point, ensure that WriteAt here respects the context wrote, err := fi.fi.WriteAt(req.Data, req.Offset) if err != nil { return err } resp.Size = wrote return nil } func (fi *File) Flush(ctx context.Context, req *fuse.FlushRequest) error { errs := make(chan error, 1) go func() { errs <- fi.fi.Flush() }() select { case err := <-errs: return err case <-ctx.Done(): return ctx.Err() } } func (fi *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { if req.Valid.Size() { cursize, err := fi.fi.Size() if err != nil { return err } if cursize != int64(req.Size) { err := fi.fi.Truncate(int64(req.Size)) if err != nil { return err } } } return nil } // Fsync flushes the content in the file to disk, but does not // update the dag tree internally func (fi *FileNode) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { errs := make(chan error, 1) go func() { errs <- fi.fi.Sync() }() select { case err := <-errs: return err case <-ctx.Done(): return ctx.Err() } } func (fi *File) Forget() { err := fi.fi.Sync() if err != nil { log.Debug("Forget file error: ", err) } } func (dir *Directory) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { child, err := dir.dir.Mkdir(req.Name) if err != nil { return nil, err } return &Directory{dir: child}, nil } func (fi *FileNode) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) { var mfsflag int switch { case req.Flags.IsReadOnly(): mfsflag = mfs.OpenReadOnly case req.Flags.IsWriteOnly(): mfsflag = mfs.OpenWriteOnly case req.Flags.IsReadWrite(): mfsflag = mfs.OpenReadWrite default: return nil, errors.New("unsupported flag type") } fd, err := fi.fi.Open(mfsflag, true) if err != nil { return nil, err } if req.Flags&fuse.OpenTruncate != 0 { if req.Flags.IsReadOnly() { log.Error("tried to open a readonly file with truncate") return nil, fuse.ENOTSUP } log.Info("Need to truncate file!") err := fd.Truncate(0) if err != nil { return nil, err } } else if req.Flags&fuse.OpenAppend != 0 { log.Info("Need to append to file!") if req.Flags.IsReadOnly() { log.Error("tried to open a readonly file with append") return nil, fuse.ENOTSUP } _, err := fd.Seek(0, os.SEEK_END) if err != nil { log.Error("seek reset failed: ", err) return nil, err } } return &File{fi: fd}, nil } func (fi *File) Release(ctx context.Context, req *fuse.ReleaseRequest) error { return fi.fi.Close() } func (dir *Directory) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) { // New 'empty' file nd := &dag.Node{Data: ft.FilePBData(nil, 0)} err := dir.dir.AddChild(req.Name, nd) if err != nil { return nil, nil, err } child, err := dir.dir.Child(req.Name) if err != nil { return nil, nil, err } fi, ok := child.(*mfs.File) if !ok { return nil, nil, errors.New("child creation failed") } nodechild := &FileNode{fi: fi} var openflag int switch { case req.Flags.IsReadOnly(): openflag = mfs.OpenReadOnly case req.Flags.IsWriteOnly(): openflag = mfs.OpenWriteOnly case req.Flags.IsReadWrite(): openflag = mfs.OpenReadWrite default: return nil, nil, errors.New("unsupported open mode") } fd, err := fi.Open(openflag, true) if err != nil { return nil, nil, err } return nodechild, &File{fi: fd}, nil } func (dir *Directory) Remove(ctx context.Context, req *fuse.RemoveRequest) error { err := dir.dir.Unlink(req.Name) if err != nil { return fuse.ENOENT } return nil } // Rename implements NodeRenamer func (dir *Directory) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fs.Node) error { cur, err := dir.dir.Child(req.OldName) if err != nil { return err } err = dir.dir.Unlink(req.OldName) if err != nil { return err } switch newDir := newDir.(type) { case *Directory: nd, err := cur.GetNode() if err != nil { return err } err = newDir.dir.AddChild(req.NewName, nd) if err != nil { return err } case *FileNode: log.Error("Cannot move node into a file!") return fuse.EPERM default: log.Error("Unknown node type for rename target dir!") return errors.New("Unknown fs node type!") } return nil } func min(a, b int) int { if a < b { return a } return b } // to check that out Node implements all the interfaces we want type ipnsRoot interface { fs.Node fs.HandleReadDirAller fs.NodeStringLookuper } var _ ipnsRoot = (*Root)(nil) type ipnsDirectory interface { fs.HandleReadDirAller fs.Node fs.NodeCreater fs.NodeMkdirer fs.NodeRemover fs.NodeRenamer fs.NodeStringLookuper } var _ ipnsDirectory = (*Directory)(nil) type ipnsFile interface { fs.HandleFlusher fs.HandleReader fs.HandleWriter fs.HandleReleaser } type ipnsFileNode interface { fs.Node fs.NodeFsyncer fs.NodeOpener } var _ ipnsFileNode = (*FileNode)(nil) var _ ipnsFile = (*File)(nil)
[ "\"IPFS_FUSE_DEBUG\"" ]
[]
[ "IPFS_FUSE_DEBUG" ]
[]
["IPFS_FUSE_DEBUG"]
go
1
0
search_test.go
package themovieapi import ( "os" "reflect" "testing" "github.com/joho/godotenv" ) func TestGetGengre(t *testing.T) { type args struct { apiKey string langu string } type tyTests struct { name string args args want TypGengres wantErr bool } var tests []tyTests { var myArgs args var myTests tyTests myArgs.apiKey = "" myArgs.langu = "de-DE" myTests.name = "Liefert die Gengre" myTests.args = myArgs myTests.want.Genres = []EleGenres{ {28, "Action"}, {12, "Abenteuer"}, {16, "Animation"}, {35, "Komödie"}, {80, "Krimi"}, {99, "Dokumentarfilm"}, {18, "Drama"}, {10751, "Familie"}, {14, "Fantasy"}, {36, "Historie"}, {27, "Horror"}, {10402, "Musik"}, {9648, "Mystery"}, {10749, "Liebesfilm"}, {878, "Science Fiction"}, {10770, "TV-Film"}, {53, "Thriller"}, {10752, "Kriegsfilm"}, {37, "Western"}} tests = append(tests, myTests) } //apikey for the movie DB godotenv.Load() if os.Getenv("apikey") != "" { tests[0].args.apiKey = os.Getenv("apikey") } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := GetGengre(tt.args.apiKey, tt.args.langu) if (err != nil) != tt.wantErr { t.Errorf("GetGengre() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("GetGengre() = %v, want %v", got, tt.want) } }) } } func Test_ChangeUmlauteAll(t *testing.T) { type args struct { iStr string } tests := []struct { name string args args want string }{ {"Testet german umlaute", args{"Die Schöne und das Biest"}, "Die Schoene und das Biest"}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := ChangeUmlauteAll(tt.args.iStr); got != tt.want { t.Errorf("changeUmlauteAll() = %v, want %v", got, tt.want) } }) } } func TestGetMovieDetail(t *testing.T) { type args struct { apiKey string langu string movieID int } tests := []struct { name string args args want MovieDetailResponse wantErr bool }{ {name: "Test FilmDetail", args: args{"", "de-DE", 335797}, want: MovieDetailResponse{ OriginalTitle: "Sing", }, wantErr: false, }, } //apikey for the movie DB godotenv.Load() if os.Getenv("apikey") != "" { tests[0].args.apiKey = os.Getenv("apikey") } //tests. for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := GetMovieDetail(tt.args.apiKey, tt.args.langu, tt.args.movieID) if (err != nil) != tt.wantErr { t.Errorf("GetMovieDetail() error = %v, wantErr %v", err, tt.wantErr) return } if got.OriginalTitle != tt.want.OriginalTitle { t.Errorf("GetMovieDetail() = %v, want %v", got, tt.want) } }) } } func TestGetSearchMovie(t *testing.T) { type args struct { apiKey string langu string query string page int } type tyResults []struct { Popularity float32 `json:"popularity,omitempty"` ID int `json:"id,omitempty"` Video bool `json:"video,omitempty"` VoteCount int `json:"vote_count,omitempty"` VoteAverage float32 `json:"vote_average,omitempty"` Title string `json:"title,omitempty"` ReleaseDate string `json:"release_date,omitempty"` OriginalLanguage string `json:"original_language,omitempty"` OriginalTitle string `json:"original_title,omitempty"` GenreIds []int `json:"genre_ids,omitempty"` BackdropPath string `json:"backdrop_path,omitempty"` Adult bool `json:"adult,omitempty"` Overview string `json:"overview,omitempty"` PosterPath string `json:"poster_path,omitempty"` } tests := []struct { name string args args want SearchMovieResponse wantErr bool }{ {name: "Suche Film", args: args{ apiKey: "", langu: "de-DE", query: "Sing", page: 1, }, want: SearchMovieResponse{ Results: tyResults{{ OriginalTitle: "Sing", }}, }, wantErr: false, }, } //apikey for the movie DB godotenv.Load() if os.Getenv("apikey") != "" { tests[0].args.apiKey = os.Getenv("apikey") } tests[0].want.Results = append(tests[0].want.Results) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := GetSearchMovie(tt.args.apiKey, tt.args.langu, tt.args.query, "", tt.args.page) if (err != nil) != tt.wantErr { t.Errorf("GetSearchMovie() error = %v, wantErr %v", err, tt.wantErr) return } if got.Results[0].OriginalTitle != tt.want.Results[0].OriginalTitle { t.Errorf("GetSearchMovie() = %v, want %v", got, tt.want) } }) } }
[ "\"apikey\"", "\"apikey\"", "\"apikey\"", "\"apikey\"", "\"apikey\"", "\"apikey\"" ]
[]
[ "apikey" ]
[]
["apikey"]
go
1
0
jsonde.gui/src/main/java/com/jsonde/gui/dialog/about/AboutDialog.java
package com.jsonde.gui.dialog.about; import java.awt.BorderLayout; import java.awt.Container; import java.awt.FlowLayout; import java.awt.event.ActionEvent; import java.util.Map; import javax.swing.AbstractAction; import javax.swing.Action; import javax.swing.BorderFactory; import javax.swing.JButton; import javax.swing.JLabel; import javax.swing.JPanel; import javax.swing.JScrollPane; import javax.swing.JTextArea; import com.jsonde.gui.dialog.JSondeDialog; import com.jsonde.util.file.FileUtils; /** * * @author admin * */ public class AboutDialog extends JSondeDialog { public AboutDialog() { setSize(620, 460); setTitle("jSonde Errors"); setResizable(false); setModal(true); setBounds(getFrameBounds()); Container contentPane = getContentPane(); contentPane.setLayout(new BorderLayout()); JLabel label = new JLabel("<html>jSonde version 1.1.0 https://github.com/bedrin/jsonde Copyright (c) 2015"); label.setBorder(BorderFactory.createEmptyBorder(5,5,5,5)); contentPane.add(label, BorderLayout.NORTH); StringBuilder stringBuffer = new StringBuilder(); stringBuffer. append("System Properties:"). append(FileUtils.LINE_SEPARATOR). append(FileUtils.LINE_SEPARATOR); for (Map.Entry entry : System.getProperties().entrySet()) { stringBuffer. append(entry.getKey()). append(" = "). append(entry.getValue()). append(FileUtils.LINE_SEPARATOR); } stringBuffer. append(FileUtils.LINE_SEPARATOR). append(FileUtils.LINE_SEPARATOR). append("Environment Variables:"). append(FileUtils.LINE_SEPARATOR). append(FileUtils.LINE_SEPARATOR); for (Map.Entry<String,String> entry : System.getenv().entrySet()) { stringBuffer. append(entry.getKey()). append(" = "). append(entry.getValue()). append(FileUtils.LINE_SEPARATOR); } // todo fill string buffer JTextArea errorsTextArea = new JTextArea(stringBuffer.toString()); errorsTextArea.setEditable(false); errorsTextArea.setBorder(BorderFactory.createEmptyBorder(0,5,0,5)); JScrollPane scrollPane = new JScrollPane(); scrollPane.setVerticalScrollBarPolicy(JScrollPane.VERTICAL_SCROLLBAR_AS_NEEDED); scrollPane.setHorizontalScrollBarPolicy(JScrollPane.HORIZONTAL_SCROLLBAR_AS_NEEDED); scrollPane.setViewportView(errorsTextArea); contentPane.add(scrollPane, BorderLayout.CENTER); JButton closeButton = new JButton(new AbstractAction() { { putValue(Action.NAME, "Close"); } public void actionPerformed(ActionEvent e) { setVisible(false); } }); JPanel closeButtonPanel = new JPanel(); closeButtonPanel.setLayout(new FlowLayout(FlowLayout.RIGHT)); closeButtonPanel.add(closeButton); contentPane.add(closeButtonPanel, BorderLayout.SOUTH); } }
[]
[]
[]
[]
[]
java
0
0
uploader/helper/helper.go
package helper import ( "context" "fmt" "inspirationalquotes/uploader/util" "os" "github.com/joho/godotenv" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" ) func ConnectDB() *mongo.Collection { config, err := GetConfiguration() if err != nil { util.Exit(err) } // Set client options clientOptions := options.Client().ApplyURI(config.ConnectionString) // Connect to MongoDB client, err := mongo.Connect(context.TODO(), clientOptions) if err != nil { util.Exit(err) } fmt.Println("Connected to MongoDB!") collection := client.Database(config.DBName).Collection(config.CollectionName) return collection } // Configuration model type Configuration struct { ConnectionString string DBName string CollectionName string } // GetConfiguration method basically populate configuration information from .env and return Configuration model func GetConfiguration() (Configuration, error) { err := godotenv.Load("./.env") if err != nil { return Configuration{}, err } configuration := Configuration{ os.Getenv("CONNECTION_STRING"), os.Getenv("DB_NAME"), os.Getenv("COLLECTION_NAME"), } return configuration, nil }
[ "\"CONNECTION_STRING\"", "\"DB_NAME\"", "\"COLLECTION_NAME\"" ]
[]
[ "COLLECTION_NAME", "DB_NAME", "CONNECTION_STRING" ]
[]
["COLLECTION_NAME", "DB_NAME", "CONNECTION_STRING"]
go
3
0
bokeh/settings.py
from __future__ import absolute_import import logging import os from os.path import join, dirname, abspath, exists class Settings(object): _prefix = "BOKEH_" debugjs = False @property def _environ(self): return os.environ def _get(self, key, default=None): return self._environ.get(self._prefix + key, default) @property def _is_dev(self): return self._get_bool("DEV", False) def _dev_or_default(self, default, dev): return dev if dev is not None and self._is_dev else default def _get_str(self, key, default, dev=None): return self._get(key, self._dev_or_default(default, dev)) def _get_bool(self, key, default, dev=None): value = self._get(key) if value is None: value = self._dev_or_default(default, dev) elif value.lower() in ["true", "yes", "on", "1"]: value = True elif value.lower() in ["false", "no", "off", "0"]: value = False else: raise ValueError("invalid value %r for boolean property %s%s" % (value, self._prefix, key)) return value def browser(self, default=None): return self._get_str("BROWSER", default, "none") def resources(self, default=None): return self._get_str("RESOURCES", default, "absolute-dev") def rootdir(self, default=None): return self._get_str("ROOTDIR", default) def version(self, default=None): return self._get_str("VERSION", default) def local_docs_cdn(self, default=None): return self._get_str("LOCAL_DOCS_CDN", default) def released_docs(self, default=None): return self._get_bool("RELEASED_DOCS", default, False) def minified(self, default=None): return self._get_bool("MINIFIED", default, False) def log_level(self, default=None): return self._get_str("LOG_LEVEL", default, "debug") def py_log_level(self, default='none'): level = self._get_str("PY_LOG_LEVEL", default, "debug") LEVELS = {'debug': logging.DEBUG, 'info' : logging.INFO, 'warn' : logging.WARNING, 'error': logging.ERROR, 'fatal': logging.CRITICAL, 'none' : None} return LEVELS[level] def pretty(self, default=None): return self._get_bool("PRETTY", default, True) def simple_ids(self, default=None): return self._get_bool("SIMPLE_IDS", default, True) def notebook_resources(self, default=None): return self._get_str("NOTEBOOK_RESOURCES", default) def notebook_verbose(self, default=None): return self._get_bool("NOTEBOOK_VERBOSE", default) def notebook_hide_banner(self, default=None): return self._get_bool("NOTEBOOK_HIDE_BANNER", default) def notebook_skip_load(self, default=None): return self._get_bool("NOTEBOOK_SKIP_LOAD", default) """ Server settings go here: """ def serverdir(self): return join(dirname(abspath(__file__)), 'server') def bokehjssrcdir(self): if self.debugjs: basedir = dirname(dirname(self.serverdir())) bokehjsdir = join(basedir, 'bokehjs', 'src') if exists(bokehjsdir): return bokehjsdir return None def bokehjsdir(self): if self.debugjs: basedir = dirname(dirname(self.serverdir())) bokehjsdir = join(basedir, 'bokehjs', 'build') if exists(bokehjsdir): return bokehjsdir return join(self.serverdir(), 'static') def js_files(self): bokehjsdir = self.bokehjsdir() js_files = [] for root, dirnames, files in os.walk(bokehjsdir): for fname in files: if fname.endswith(".js") and 'vendor' not in root: js_files.append(join(root, fname)) return js_files def css_files(self): bokehjsdir = self.bokehjsdir() js_files = [] for root, dirnames, files in os.walk(bokehjsdir): for fname in files: if fname.endswith(".css") and 'vendor' not in root: js_files.append(join(root, fname)) return js_files settings = Settings() del Settings
[]
[]
[]
[]
[]
python
0
0
tests/integration/sync_test.go
//go:build integration package integration import ( "context" "os" "testing" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/kong/deck/cmd" "github.com/kong/deck/dump" "github.com/kong/deck/utils" "github.com/kong/go-kong/kong" ) var deckCmd = cmd.RootCmdOnlyForDocsAndTest func getKongAddress() string { address := os.Getenv("DECK_KONG_ADDR") if address != "" { return address } return "http://localhost:8001" } // cleanUpEnv removes all existing entities from Kong. func cleanUpEnv(t *testing.T) error { deckCmd.SetArgs([]string{"reset", "--force"}) if err := deckCmd.Execute(); err != nil { t.Errorf(err.Error()) } return nil } func getTestClient() (*kong.Client, error) { return utils.GetKongClient(utils.KongClientConfig{ Address: getKongAddress(), }) } func testKongState( t *testing.T, client *kong.Client, expectedServices []*kong.Service, ) { ctx := context.Background() // Get entities from Kong services, err := dump.GetAllServices(ctx, client, []string{}) if err != nil { t.Errorf(err.Error()) } opt := []cmp.Option{ cmpopts.IgnoreFields(kong.Service{}, "ID", "CreatedAt", "UpdatedAt"), } if diff := cmp.Diff(services, expectedServices, opt...); diff != "" { t.Errorf(diff) } } func Test_Sync(t *testing.T) { // setup stage client, err := getTestClient() if err != nil { t.Errorf(err.Error()) } tests := []struct { name string kongFile string expectedServices []*kong.Service }{ { name: "creates a service", kongFile: "testdata/sync/create_service/kong.yaml", expectedServices: []*kong.Service{ { Name: kong.String("svc1"), ConnectTimeout: kong.Int(60000), Host: kong.String("mockbin.org"), Port: kong.Int(80), Protocol: kong.String("http"), ReadTimeout: kong.Int(60000), Retries: kong.Int(5), WriteTimeout: kong.Int(60000), Tags: nil, }, }, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { if err := cleanUpEnv(t); err != nil { t.Errorf(err.Error()) } deckCmd.SetArgs([]string{"sync", "-s", tc.kongFile}) if err := deckCmd.Execute(); err != nil { t.Errorf(err.Error()) } testKongState(t, client, tc.expectedServices) }) } }
[ "\"DECK_KONG_ADDR\"" ]
[]
[ "DECK_KONG_ADDR" ]
[]
["DECK_KONG_ADDR"]
go
1
0
glearn/crf/ixx_glodon/model/config/__init__.py
# coding=utf-8 u""" User: xulin Date: 13-6-6 Time: 上午11:19 """ import os from model.config import development as config, production ENV = os.environ.get('MODEL_ENV', 'development') print 'MODEL: %s' % ENV if ENV == 'development': import development as config if ENV == 'production': import production as config
[]
[]
[ "MODEL_ENV" ]
[]
["MODEL_ENV"]
python
1
0
enforcer/store/filesystem/filesystem_store_test.go
/* Copyright 2018 Ignasi Barrera Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package filesystem import ( "encoding/json" "fmt" "io/ioutil" "os" "testing" "github.com/padmeio/padme/policy" ) var ( testFile = fmt.Sprintf("%v/src/github.com/padmeio/padme/policy/test_policy.json", os.Getenv("GOPATH")) store = LocalPolicyRepository{FilePath: "/tmp/padme-enforcer.json"} ) func loadTestPolicy(path string) *policy.PolicyBundle { bytes, err := ioutil.ReadFile(path) if err != nil { panic(fmt.Sprintf("Unable to read test policy file: %v", err)) } bundle := &policy.PolicyBundle{} if err = json.Unmarshal(bytes, bundle); err != nil { panic(fmt.Sprintf("Unable to deserialize PolicyBundle: %v", err)) } return bundle } func TestFileSystemPolicyStore(t *testing.T) { bundle := loadTestPolicy(testFile) if err := store.Save(bundle); err != nil { t.Errorf("Could not persist policy bundle: %v", err) } reloaded, err := store.Get() if err != nil { t.Errorf("Could not read the policy from the policy store: %v", err) } if bundle.Description != reloaded.Description || len(bundle.Policies) != len(reloaded.Policies) { t.Errorf("The persisted policy and the loaded one differ: %v %v", bundle, reloaded) } }
[ "\"GOPATH\"" ]
[]
[ "GOPATH" ]
[]
["GOPATH"]
go
1
0
ignite/pkg/gitpod/gitpod.go
package gitpod import ( "bytes" "context" "os" "strings" "github.com/ignite-hq/cli/ignite/pkg/cmdrunner/exec" "github.com/ignite-hq/cli/ignite/pkg/cmdrunner/step" ) // IsOnGitpod reports whether if running on Gitpod or not. func IsOnGitpod() bool { return os.Getenv("GITPOD_WORKSPACE_ID") != "" } func URLForPort(ctx context.Context, port string) (string, error) { buf := bytes.Buffer{} if err := exec.Exec(ctx, []string{"gp", "url", port}, exec.StepOption(step.Stdout(&buf))); err != nil { return "", err } return strings.TrimSpace(buf.String()), nil }
[ "\"GITPOD_WORKSPACE_ID\"" ]
[]
[ "GITPOD_WORKSPACE_ID" ]
[]
["GITPOD_WORKSPACE_ID"]
go
1
0
hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.registry.secure; import java.io.File; import java.lang.reflect.Constructor; import java.lang.reflect.Method; import java.nio.charset.Charset; import java.security.Principal; import java.security.PrivilegedExceptionAction; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import javax.security.auth.Subject; import javax.security.auth.callback.CallbackHandler; import javax.security.auth.kerberos.KerberosPrincipal; import javax.security.auth.login.LoginContext; import javax.security.auth.login.LoginException; import org.apache.zookeeper.Environment; import org.apache.zookeeper.data.ACL; import org.apache.commons.io.FileUtils; import org.apache.hadoop.security.HadoopKerberosName; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.authentication.util.KerberosUtil; import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity; import org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions; import static org.apache.hadoop.security.authentication.util.KerberosName.DEFAULT_MECHANISM; import static org.apache.hadoop.security.authentication.util.KerberosName.MECHANISM_HADOOP; import static org.apache.hadoop.security.authentication.util.KerberosName.MECHANISM_MIT; import static org.apache.hadoop.util.PlatformName.IBM_JAVA; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Verify that logins work */ public class TestSecureLogins extends AbstractSecureRegistryTest { private static final Logger LOG = LoggerFactory.getLogger(TestSecureLogins.class); @Test public void testHasRealm() throws Throwable { assertNotNull(getRealm()); LOG.info("ZK principal = {}", getPrincipalAndRealm(ZOOKEEPER_LOCALHOST)); } @Test public void testJaasFileSetup() throws Throwable { // the JVM has seemed inconsistent on setting up here assertNotNull("jaasFile", jaasFile); String confFilename = System.getProperty(Environment.JAAS_CONF_KEY); assertEquals(jaasFile.getAbsolutePath(), confFilename); } @Test public void testJaasFileBinding() throws Throwable { // the JVM has seemed inconsistent on setting up here assertNotNull("jaasFile", jaasFile); RegistrySecurity.bindJVMtoJAASFile(jaasFile); String confFilename = System.getProperty(Environment.JAAS_CONF_KEY); assertEquals(jaasFile.getAbsolutePath(), confFilename); } @Test public void testClientLogin() throws Throwable { LoginContext client = login(ALICE_LOCALHOST, ALICE_CLIENT_CONTEXT, keytab_alice); try { logLoginDetails(ALICE_LOCALHOST, client); String confFilename = System.getProperty(Environment.JAAS_CONF_KEY); assertNotNull("Unset: "+ Environment.JAAS_CONF_KEY, confFilename); String config = FileUtils.readFileToString(new File(confFilename), Charset.defaultCharset()); LOG.info("{}=\n{}", confFilename, config); RegistrySecurity.setZKSaslClientProperties(ALICE, ALICE_CLIENT_CONTEXT); } finally { client.logout(); } } @Test public void testZKServerContextLogin() throws Throwable { LoginContext client = login(ZOOKEEPER_LOCALHOST, ZOOKEEPER_SERVER_CONTEXT, keytab_zk); logLoginDetails(ZOOKEEPER_LOCALHOST, client); client.logout(); } @Test public void testServerLogin() throws Throwable { LoginContext loginContext = createLoginContextZookeeperLocalhost(); loginContext.login(); loginContext.logout(); } public LoginContext createLoginContextZookeeperLocalhost() throws LoginException { String principalAndRealm = getPrincipalAndRealm(ZOOKEEPER_LOCALHOST); Set<Principal> principals = new HashSet<Principal>(); principals.add(new KerberosPrincipal(ZOOKEEPER_LOCALHOST)); Subject subject = new Subject(false, principals, new HashSet<Object>(), new HashSet<Object>()); return new LoginContext("", subject, null, KerberosConfiguration.createServerConfig(ZOOKEEPER_LOCALHOST, keytab_zk)); } @Test public void testKerberosAuth() throws Throwable { File krb5conf = getKdc().getKrb5conf(); String krbConfig = FileUtils.readFileToString(krb5conf, Charset.defaultCharset()); LOG.info("krb5.conf at {}:\n{}", krb5conf, krbConfig); Subject subject = new Subject(); Class<?> kerb5LoginClass = Class.forName(KerberosUtil.getKrb5LoginModuleName()); Constructor<?> kerb5LoginConstr = kerb5LoginClass.getConstructor(); Object kerb5LoginObject = kerb5LoginConstr.newInstance(); final Map<String, String> options = new HashMap<String, String>(); options.put("debug", "true"); if (IBM_JAVA) { options.put("useKeytab", keytab_alice.getAbsolutePath().startsWith("file://") ? keytab_alice.getAbsolutePath() : "file://" + keytab_alice.getAbsolutePath()); options.put("principal", ALICE_LOCALHOST); options.put("refreshKrb5Config", "true"); options.put("credsType", "both"); String ticketCache = System.getenv("KRB5CCNAME"); if (ticketCache != null) { // IBM JAVA only respect system property and not env variable // The first value searched when "useDefaultCcache" is used. System.setProperty("KRB5CCNAME", ticketCache); options.put("useDefaultCcache", "true"); options.put("renewTGT", "true"); } } else { options.put("keyTab", keytab_alice.getAbsolutePath()); options.put("principal", ALICE_LOCALHOST); options.put("doNotPrompt", "true"); options.put("isInitiator", "true"); options.put("refreshKrb5Config", "true"); options.put("renewTGT", "true"); options.put("storeKey", "true"); options.put("useKeyTab", "true"); options.put("useTicketCache", "true"); } Method methodInitialize = kerb5LoginObject.getClass().getMethod("initialize", Subject.class, CallbackHandler.class, Map.class, Map.class); methodInitialize.invoke(kerb5LoginObject, subject, null, new HashMap<String, String>(), options); Method methodLogin = kerb5LoginObject.getClass().getMethod("login"); boolean loginOk = (Boolean) methodLogin.invoke(kerb5LoginObject); assertTrue("Failed to login", loginOk); Method methodCommit = kerb5LoginObject.getClass().getMethod("commit"); boolean commitOk = (Boolean) methodCommit.invoke(kerb5LoginObject); assertTrue("Failed to Commit", commitOk); } @Test public void testDefaultRealmValid() throws Throwable { String defaultRealm = KerberosUtil.getDefaultRealm(); assertNotEmpty("No default Kerberos Realm", defaultRealm); LOG.info("Default Realm '{}'", defaultRealm); } @Test public void testKerberosRulesValid() throws Throwable { assertTrue("!KerberosName.hasRulesBeenSet()", KerberosName.hasRulesBeenSet()); String rules = KerberosName.getRules(); assertEquals(kerberosRule, rules); LOG.info(rules); } @Test public void testValidKerberosName() throws Throwable { KerberosName.setRuleMechanism(MECHANISM_HADOOP); new HadoopKerberosName(ZOOKEEPER).getShortName(); // MECHANISM_MIT allows '/' and '@' in username KerberosName.setRuleMechanism(MECHANISM_MIT); new HadoopKerberosName(ZOOKEEPER).getShortName(); new HadoopKerberosName(ZOOKEEPER_LOCALHOST).getShortName(); new HadoopKerberosName(ZOOKEEPER_REALM).getShortName(); new HadoopKerberosName(ZOOKEEPER_LOCALHOST_REALM).getShortName(); KerberosName.setRuleMechanism(DEFAULT_MECHANISM); } @Test public void testUGILogin() throws Throwable { UserGroupInformation ugi = loginUGI(ZOOKEEPER, keytab_zk); RegistrySecurity.UgiInfo ugiInfo = new RegistrySecurity.UgiInfo(ugi); LOG.info("logged in as: {}", ugiInfo); assertTrue("security is not enabled: " + ugiInfo, UserGroupInformation.isSecurityEnabled()); assertTrue("login is keytab based: " + ugiInfo, ugi.isFromKeytab()); // now we are here, build a SASL ACL ACL acl = ugi.doAs(new PrivilegedExceptionAction<ACL>() { @Override public ACL run() throws Exception { return registrySecurity.createSaslACLFromCurrentUser(0); } }); assertEquals(ZOOKEEPER_REALM, acl.getId().getId()); assertEquals(ZookeeperConfigOptions.SCHEME_SASL, acl.getId().getScheme()); registrySecurity.addSystemACL(acl); } }
[ "\"KRB5CCNAME\"" ]
[]
[ "KRB5CCNAME" ]
[]
["KRB5CCNAME"]
java
1
0
server/services/metrics/metrics.go
package metrics import ( "os" "github.com/prometheus/client_golang/prometheus" ) const ( MetricsNamespace = "focalboard" MetricsSubsystemBlocks = "blocks" MetricsSubsystemWorkspaces = "workspaces" MetricsSubsystemSystem = "system" MetricsCloudInstallationLabel = "installationId" ) type InstanceInfo struct { Version string BuildNum string Edition string InstallationID string } // Metrics used to instrumentate metrics in prometheus. type Metrics struct { registry *prometheus.Registry instance *prometheus.GaugeVec startTime prometheus.Gauge loginCount prometheus.Counter loginFailCount prometheus.Counter blocksInsertedCount prometheus.Counter blocksPatchedCount prometheus.Counter blocksDeletedCount prometheus.Counter blockCount *prometheus.GaugeVec workspaceCount prometheus.Gauge blockLastActivity prometheus.Gauge } // NewMetrics Factory method to create a new metrics collector. func NewMetrics(info InstanceInfo) *Metrics { m := &Metrics{} m.registry = prometheus.NewRegistry() options := prometheus.ProcessCollectorOpts{ Namespace: MetricsNamespace, } m.registry.MustRegister(prometheus.NewProcessCollector(options)) m.registry.MustRegister(prometheus.NewGoCollector()) additionalLabels := map[string]string{} if info.InstallationID != "" { additionalLabels[MetricsCloudInstallationLabel] = os.Getenv("MM_CLOUD_INSTALLATION_ID") } m.loginCount = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: MetricsNamespace, Subsystem: MetricsSubsystemSystem, Name: "login_total", Help: "Total number of logins.", ConstLabels: additionalLabels, }) m.registry.MustRegister(m.loginCount) m.loginFailCount = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: MetricsNamespace, Subsystem: MetricsSubsystemSystem, Name: "login_fail_total", Help: "Total number of failed logins.", ConstLabels: additionalLabels, }) m.registry.MustRegister(m.loginFailCount) m.instance = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: MetricsNamespace, Subsystem: MetricsSubsystemSystem, Name: "focalboard_instance_info", Help: "Instance information for Focalboard.", ConstLabels: additionalLabels, }, []string{"Version", "BuildNum", "Edition"}) m.registry.MustRegister(m.instance) m.instance.WithLabelValues(info.Version, info.BuildNum, info.Edition).Set(1) m.startTime = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: MetricsNamespace, Subsystem: MetricsSubsystemSystem, Name: "server_start_time", Help: "The time the server started.", ConstLabels: additionalLabels, }) m.startTime.SetToCurrentTime() m.registry.MustRegister(m.startTime) m.blocksInsertedCount = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: MetricsNamespace, Subsystem: MetricsSubsystemBlocks, Name: "blocks_inserted_total", Help: "Total number of blocks inserted.", ConstLabels: additionalLabels, }) m.registry.MustRegister(m.blocksInsertedCount) m.blocksPatchedCount = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: MetricsNamespace, Subsystem: MetricsSubsystemBlocks, Name: "blocks_patched_total", Help: "Total number of blocks patched.", ConstLabels: additionalLabels, }) m.registry.MustRegister(m.blocksPatchedCount) m.blocksDeletedCount = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: MetricsNamespace, Subsystem: MetricsSubsystemBlocks, Name: "blocks_deleted_total", Help: "Total number of blocks deleted.", ConstLabels: additionalLabels, }) m.registry.MustRegister(m.blocksDeletedCount) m.blockCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: MetricsNamespace, Subsystem: MetricsSubsystemBlocks, Name: "blocks_total", Help: "Total number of blocks.", ConstLabels: additionalLabels, }, []string{"BlockType"}) m.registry.MustRegister(m.blockCount) m.workspaceCount = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: MetricsNamespace, Subsystem: MetricsSubsystemWorkspaces, Name: "workspaces_total", Help: "Total number of workspaces.", ConstLabels: additionalLabels, }) m.registry.MustRegister(m.workspaceCount) m.blockLastActivity = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: MetricsNamespace, Subsystem: MetricsSubsystemBlocks, Name: "blocks_last_activity", Help: "Time of last block insert, update, delete.", ConstLabels: additionalLabels, }) m.registry.MustRegister(m.blockLastActivity) return m } func (m *Metrics) IncrementLoginCount(num int) { if m != nil { m.loginCount.Add(float64(num)) } } func (m *Metrics) IncrementLoginFailCount(num int) { if m != nil { m.loginFailCount.Add(float64(num)) } } func (m *Metrics) IncrementBlocksInserted(num int) { if m != nil { m.blocksInsertedCount.Add(float64(num)) m.blockLastActivity.SetToCurrentTime() } } func (m *Metrics) IncrementBlocksPatched(num int) { if m != nil { m.blocksPatchedCount.Add(float64(num)) m.blockLastActivity.SetToCurrentTime() } } func (m *Metrics) IncrementBlocksDeleted(num int) { if m != nil { m.blocksDeletedCount.Add(float64(num)) m.blockLastActivity.SetToCurrentTime() } } func (m *Metrics) ObserveBlockCount(blockType string, count int64) { if m != nil { m.blockCount.WithLabelValues(blockType).Set(float64(count)) } } func (m *Metrics) ObserveWorkspaceCount(count int64) { if m != nil { m.workspaceCount.Set(float64(count)) } }
[ "\"MM_CLOUD_INSTALLATION_ID\"" ]
[]
[ "MM_CLOUD_INSTALLATION_ID" ]
[]
["MM_CLOUD_INSTALLATION_ID"]
go
1
0
tests/oauth2/rfc6749/test_utils.py
from __future__ import absolute_import, unicode_literals import datetime import os from ...unittest import TestCase from oauthlib.oauth2.rfc6749.utils import escape, host_from_uri from oauthlib.oauth2.rfc6749.utils import generate_age from oauthlib.oauth2.rfc6749.utils import is_secure_transport from oauthlib.oauth2.rfc6749.utils import params_from_uri class UtilsTests(TestCase): def test_escape(self): """Assert that we are only escaping unicode""" self.assertRaises(ValueError, escape, b"I am a string type. Not a unicode type.") self.assertEqual(escape("I am a unicode type."), "I%20am%20a%20unicode%20type.") def test_host_from_uri(self): """Test if hosts and ports are properly extracted from URIs. This should be done according to the MAC Authentication spec. Defaults ports should be provided when none is present in the URI. """ self.assertEqual(host_from_uri('http://a.b-c.com:8080'), ('a.b-c.com', '8080')) self.assertEqual(host_from_uri('https://a.b.com:8080'), ('a.b.com', '8080')) self.assertEqual(host_from_uri('http://www.example.com'), ('www.example.com', '80')) self.assertEqual(host_from_uri('https://www.example.com'), ('www.example.com', '443')) def test_is_secure_transport(self): """Test check secure uri.""" if 'DEBUG' in os.environ: del os.environ['DEBUG'] self.assertTrue(is_secure_transport('https://example.com')) self.assertFalse(is_secure_transport('http://example.com')) os.environ['DEBUG'] = '1' self.assertTrue(is_secure_transport('http://example.com')) del os.environ['DEBUG'] def test_params_from_uri(self): self.assertEqual(params_from_uri('http://i.b/?foo=bar&g&scope=a+d'), {'foo': 'bar', 'g': '', 'scope': ['a', 'd']}) def test_generate_age(self): issue_time = datetime.datetime.now() - datetime.timedelta( days=3, minutes=1, seconds=4) self.assertGreater(float(generate_age(issue_time)), 259263.0)
[]
[]
[ "DEBUG" ]
[]
["DEBUG"]
python
1
0
scripts/fetch_all_metrics.py
""" Generate METRICS-%Y-%m-%d.json for all the repositories tracked by repos-to-include.txt and save them in their respective _data directory. """ import datetime import json import os import requests import graphql_queries print("LOG: Assuming the current path to be the root of the metrics repository.") PATH_TO_METRICS_DATA = "_data" PATH_TO_METADATA = "_metadata" GITHUB_USERNAME = os.environ["GH_USERNAME"] GITHUB_OAUTH_TOKEN = os.environ["OAUTH_TOKEN"] GITHUB_API_ENDPOINT = "https://api.github.com/graphql" DATESTAMP = datetime.datetime.now().date().isoformat() # Read repos-to-include.txt all_orgs = [] # Track orgs and all its repos e.g. twitter, twitterdev all_repos = [] # Track specific repositories e.g. ['pantsbuild/pants', 'pantsbuild/pex'] with open("repos-to-include.txt") as f: for line in f: owner, repo = line.split("/") repo = repo.rstrip("\n") if repo == "*": all_orgs.append(owner) else: all_repos.append(owner + "/" + repo) print("Orgs to track", all_orgs) print("Repos to track", all_repos) def fetch_one_page(query_string, variables): """ Request the GitHub GraphQL API One time, the request failed but it passed after retrying. This was the response - ``` Error in GitHub API query. Status Code : 502, Response: {'data': 'null', 'errors': [{'message': 'Something went wrong while executing your query. This may be the result of a timeout, or it could be a GitHub bug. Please include `80A9:464B:6AEAA9:78BA60:5B748709` when reporting this issue.'}]} ``` This function makes 5 trials before giving up """ headers = { "Content-Type": "application/json", } attempt = 0 while(attempt<=5): r = requests.post(GITHUB_API_ENDPOINT, json={"query": query_string, "variables": variables}, auth=(GITHUB_USERNAME, GITHUB_OAUTH_TOKEN)) if r.status_code == 200: return r.json() break else: attempt += 1 print("\n\n\n Error in GitHub API query. Status Code : {}, Response: {}".format(r.status_code, r.json())) print("\n Trying again... ({}/5)".format(attempt)) raise Exception("Error in GitHub API query. Status Code : {}, Response: {}".format(r.status_code, r.json())) all_org_edges = [] # All the repos in the org with their stats for org in all_orgs: # Combine the paginated responses from the API has_next_page = False end_cursor = None num_of_pages = 0 while True: # print("Num of pages", num_of_pages) variables = json.dumps({"owner": org, "endCursor": end_cursor}) print("Sending request") response = fetch_one_page(graphql_queries.org_all_repos, variables) print("Received request") repository_edges = response["data"]["organization"]["repositories"]["edges"] all_org_edges.extend(repository_edges) pageInfo = response["data"]["organization"]["repositories"]["pageInfo"] has_next_page = pageInfo["hasNextPage"] # print("has_next_page", has_next_page) end_cursor = pageInfo["endCursor"] # print("end_cursor", end_cursor) num_of_pages += 1 if not has_next_page: break # print("LOG: Fetched all the org repositories. Count:", len(all_org_edges)) # Fetch individual repositories' data all_repo_edges = [] # All individual repos for item in all_repos: owner, repo = item.split("/") variables = json.dumps({"owner": owner, "repo": repo, "endCursor": None}) response = fetch_one_page(graphql_queries.repo_wise, variables) # print("response for", repo, response) all_repo_edges.append(response["data"]) # print("TYPE", type(response["data"])) # print("LOG: Fetched all the individual repos as well. Count:", len(all_repo_edges)) # Repos to exclude from the project repos_to_exclude = set() with open("repos-to-exclude.txt", "r") as f: for line in f: repo = line.rstrip("\n") repos_to_exclude.add(repo) # Convert all_org_edges to DATA_JSON with the following format - # key: value :: repo_full_name: repo_data DATA_JSON = {} for edge in all_org_edges: if edge["node"]["isPrivate"]: continue if edge["node"]["nameWithOwner"] in repos_to_exclude: print("Skipping", edge["node"]["nameWithOwner"], "(from repos-to-exclude.txt)") continue repo_full_name = edge["node"]["nameWithOwner"] repo_data = edge["node"] DATA_JSON[repo_full_name] = repo_data for edge in all_repo_edges: if edge["repository"]["isPrivate"]: continue repo_full_name = edge["repository"]["nameWithOwner"] repo_data = edge["repository"] DATA_JSON[repo_full_name] = repo_data # Simplify the format of json for repo in DATA_JSON: old_json = DATA_JSON[repo] new_json = { 'datestamp': DATESTAMP, 'nameWithOwner': old_json['nameWithOwner'], 'name': old_json['name'], 'commits': old_json['defaultBranchRef']['target']['history']['totalCount'], 'forkCount': old_json['forkCount'], 'issues': old_json['issues']['totalCount'], 'openIssues': old_json['openIssues']['totalCount'], 'closedIssues': old_json['closedIssues']['totalCount'], 'pullRequests': old_json['pullRequests']['totalCount'], 'openPullRequests': old_json['openPullRequests']['totalCount'], 'mergedPullRequests': old_json['mergedPullRequests']['totalCount'], 'closedPullRequests': old_json['closedPullRequests']['totalCount'], 'stargazers': old_json['stargazers']['totalCount'], 'watchers': old_json['watchers']['totalCount'], } DATA_JSON[repo] = new_json # Update _metadata/projects_tracked.json PROJECTS_TRACKED = {} orgs_tracked = set() repos_tracked = set() for reponame in DATA_JSON: orgname = reponame.split("/")[0] orgs_tracked.add(orgname) repos_tracked.add(reponame) PROJECTS_TRACKED['orgs'] = list(orgs_tracked) PROJECTS_TRACKED['projects'] = {} for reponame in repos_tracked: org, repo = reponame.split("/") try: PROJECTS_TRACKED['projects'][org].append(repo) except KeyError: PROJECTS_TRACKED['projects'][org] = [repo] with open(os.path.join(PATH_TO_METADATA, "projects_tracked.json"), "w+") as f: json.dump(PROJECTS_TRACKED, f) # Add CHAOSS specific metrics # The API endpoint is http://twitter.augurlabs.io/api/unstable/ # And the API documentation is at https://osshealth.github.io/augur/api/index.html # Save the respective JSON files for repo in DATA_JSON: owner, project = repo.split("/") # Create directories inside twitter/metrics/_data if they don't exist owner_dir_path = "{}/{}".format(PATH_TO_METRICS_DATA, owner) repo_dir_path = "{}/{}".format(owner_dir_path, project) os.makedirs(repo_dir_path, exist_ok=True) # if not os.path.isdir(owner_dir_path): # os.mkdir(owner_dir_path) # if not os.path.isdir(repo_dir_path): # os.mkdir(repo_dir_path) # Save the json file with a timestamp file_name = "METRICS-" + DATESTAMP + ".json" with open(repo_dir_path + "/" + file_name, "w+") as f: json.dump(DATA_JSON[repo], f) print("LOG: Saving", file_name, "for", repo)
[]
[]
[ "GH_USERNAME", "OAUTH_TOKEN" ]
[]
["GH_USERNAME", "OAUTH_TOKEN"]
python
2
0
nitjcompiler/wsgi.py
""" WSGI config for nitjcompiler project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nitjcompiler.settings") application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
var/spack/repos/builtin/packages/esmf/package.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os from spack import * class Esmf(MakefilePackage): """The Earth System Modeling Framework (ESMF) is high-performance, flexible software infrastructure for building and coupling weather, climate, and related Earth science applications. The ESMF defines an architecture for composing complex, coupled modeling systems and includes data structures and utilities for developing individual models.""" homepage = "https://www.earthsystemcog.org/projects/esmf/" url = 'https://github.com/esmf-org/esmf/archive/ESMF_8_0_1.tar.gz' maintainers = ['climbfuji'] version('8.2.0', sha256='3693987aba2c8ae8af67a0e222bea4099a48afe09b8d3d334106f9d7fc311485') version('8.1.1', sha256='58c2e739356f21a1b32673aa17a713d3c4af9d45d572f4ba9168c357d586dc75') version('8.0.1', sha256='9172fb73f3fe95c8188d889ee72fdadb4f978b1d969e1d8e401e8d106def1d84') version('8.0.0', sha256='051dca45f9803d7e415c0ea146df15ce487fb55f0fce18ca61d96d4dba0c8774') version('7.1.0r', sha256='ae9a5edb8d40ae97a35cbd4bd00b77061f995c77c43d36334dbb95c18b00a889') variant('mpi', default=True, description='Build with MPI support') variant('external-lapack', default=False, description='Build with external LAPACK support') variant('netcdf', default=True, description='Build with NetCDF support') variant('pnetcdf', default=True, description='Build with pNetCDF support') variant('xerces', default=True, description='Build with Xerces support') variant('pio', default=True, description='Enable ParallelIO support') variant('debug', default=False, description='Make a debuggable version of the library') # Required dependencies depends_on('zlib') depends_on('libxml2') # Optional dependencies depends_on('mpi', when='+mpi') depends_on('lapack@3:', when='+external-lapack') depends_on('[email protected]:', when='+netcdf') depends_on('[email protected]:', when='+netcdf') depends_on('[email protected]:', when='+pnetcdf') depends_on('[email protected]:', when='+xerces') # Testing dependencies depends_on('perl', type='test') # Make esmf build with newer intel versions patch('intel.patch', when='@:7.0 %intel@17:') # Make esmf build with newer gcc versions # https://sourceforge.net/p/esmf/esmf/ci/3706bf758012daebadef83d6575c477aeff9c89b/ patch('gcc.patch', when='@:7.0 %gcc@6:') # Fix undefined reference errors with mvapich2 # https://sourceforge.net/p/esmf/esmf/ci/34de0ccf556ba75d35c9687dae5d9f666a1b2a18/ patch('mvapich2.patch', when='@:7.0') # Allow different directories for creation and # installation of dynamic libraries on OSX: patch('darwin_dylib_install_name.patch', when='platform=darwin @:7.0') # Missing include file for newer gcc compilers # https://trac.macports.org/ticket/57493 patch('cstddef.patch', when='@7.1.0r %gcc@8:') # Make script from mvapich2.patch executable @when('@:7.0') @run_before('build') def chmod_scripts(self): chmod = which('chmod') chmod('+x', 'scripts/libs.mvapich2f90') def url_for_version(self, version): if version < Version('8.0.0'): return "http://www.earthsystemmodeling.org/esmf_releases/public/ESMF_{0}/esmf_{0}_src.tar.gz".format(version.underscored) else: return "https://github.com/esmf-org/esmf/archive/ESMF_{0}.tar.gz".format(version.underscored) def edit(self, spec, prefix): # Installation instructions can be found at: # http://www.earthsystemmodeling.org/esmf_releases/last_built/ESMF_usrdoc/node9.html # Unset any environment variables that may influence the installation. for var in os.environ: if var.startswith('ESMF_'): os.environ.pop(var) ###################################### # Build and Installation Directories # ###################################### # The environment variable ESMF_DIR must be set to the full pathname # of the top level ESMF directory before building the framework. os.environ['ESMF_DIR'] = os.getcwd() # This variable specifies the prefix of the installation path used # with the install target. os.environ['ESMF_INSTALL_PREFIX'] = prefix # Installation subdirectories default to: # bin/binO/Linux.gfortran.64.default.default os.environ['ESMF_INSTALL_BINDIR'] = 'bin' os.environ['ESMF_INSTALL_LIBDIR'] = 'lib' os.environ['ESMF_INSTALL_MODDIR'] = 'include' # Allow compiler flags to carry through from compiler spec os.environ['ESMF_CXXCOMPILEOPTS'] = \ ' '.join(spec.compiler_flags['cxxflags']) os.environ['ESMF_F90COMPILEOPTS'] = \ ' '.join(spec.compiler_flags['fflags']) # ESMF will simply not build with Intel using backing GCC 8, in that # case you need to point to something older, below is commented but is # an example # os.environ['ESMF_CXXCOMPILEOPTS'] = \ # '-O2 -std=c++11 -gcc-name=/usr/bin/gcc' # os.environ['ESMF_F90COMPILEOPTS'] = \ # '-O2 -gcc-name=/usr/bin/gcc' ############ # Compiler # ############ # ESMF_COMPILER must be set to select which Fortran and # C++ compilers are being used to build the ESMF library. if self.compiler.name == 'gcc': os.environ['ESMF_COMPILER'] = 'gfortran' gfortran_major_version = int(spack.compiler.get_compiler_version_output( self.compiler.fc, '-dumpversion').split('.')[0]) elif self.compiler.name == 'intel': os.environ['ESMF_COMPILER'] = 'intel' elif self.compiler.name in ['clang', 'apple-clang']: os.environ['ESMF_COMPILER'] = 'gfortranclang' gfortran_major_version = int(spack.compiler.get_compiler_version_output( self.compiler.fc, '-dumpversion').split('.')[0]) elif self.compiler.name == 'nag': os.environ['ESMF_COMPILER'] = 'nag' elif self.compiler.name == 'pgi': os.environ['ESMF_COMPILER'] = 'pgi' else: msg = "The compiler you are building with, " msg += "'{0}', is not supported by ESMF." raise InstallError(msg.format(self.compiler.name)) if '+mpi' in spec: os.environ['ESMF_CXX'] = spec['mpi'].mpicxx os.environ['ESMF_F90'] = spec['mpi'].mpifc else: os.environ['ESMF_CXX'] = os.environ['CXX'] os.environ['ESMF_F90'] = os.environ['FC'] # This environment variable controls the build option. if '+debug' in spec: # Build a debuggable version of the library. os.environ['ESMF_BOPT'] = 'g' else: # Build an optimized version of the library. os.environ['ESMF_BOPT'] = 'O' if self.compiler.name in ['gcc', 'clang', 'apple-clang'] and \ gfortran_major_version >= 10: os.environ['ESMF_F90COMPILEOPTS'] = '-fallow-argument-mismatch' ####### # OS # ####### # ESMF_OS must be set for Cray systems if 'platform=cray' in self.spec: os.environ['ESMF_OS'] = 'Unicos' ####### # MPI # ####### # ESMF_COMM must be set to indicate which MPI implementation # is used to build the ESMF library. if '+mpi' in spec: if 'platform=cray' in self.spec: os.environ['ESMF_COMM'] = 'mpi' elif '^mvapich2' in spec: os.environ['ESMF_COMM'] = 'mvapich2' elif '^mpich' in spec: # [email protected] does not include configs for mpich3, # so we start with the configs for mpich2: os.environ['ESMF_COMM'] = 'mpich2' # The mpich 3 series split apart the Fortran and C bindings, # so we link the Fortran libraries when building C programs: os.environ['ESMF_CXXLINKLIBS'] = '-lmpifort' elif '^openmpi' in spec: os.environ['ESMF_COMM'] = 'openmpi' elif '^intel-parallel-studio+mpi' in spec or \ '^intel-mpi' in spec or \ '^intel-oneapi-mpi' in spec: os.environ['ESMF_COMM'] = 'intelmpi' else: # Force use of the single-processor MPI-bypass library. os.environ['ESMF_COMM'] = 'mpiuni' ########## # LAPACK # ########## if '+external-lapack' in spec: # A system-dependent external LAPACK/BLAS installation is used # to satisfy the external dependencies of the LAPACK-dependent # ESMF code. os.environ['ESMF_LAPACK'] = 'system' # FIXME: determine whether or not we need to set this # Specifies the path where the LAPACK library is located. # os.environ['ESMF_LAPACK_LIBPATH'] = spec['lapack'].prefix.lib # Specifies the linker directive needed to link the LAPACK library # to the application. os.environ['ESMF_LAPACK_LIBS'] = spec['lapack'].libs.link_flags # noqa else: os.environ['ESMF_LAPACK'] = 'internal' ########## # NetCDF # ########## if '+netcdf' in spec: # ESMF provides the ability to read Grid and Mesh data in # NetCDF format. if spec.satisfies('^[email protected]:'): # ESMF_NETCDF_LIBS will be set to "-lnetcdff -lnetcdf". # This option is useful for systems which have the Fortran # and C bindings archived in seperate library files. os.environ['ESMF_NETCDF'] = 'split' else: # ESMF_NETCDF_LIBS will be set to "-lnetcdf". # This option is useful when the Fortran and C bindings # are archived together in the same library file. os.environ['ESMF_NETCDF'] = 'standard' # FIXME: determine whether or not we need to set these. # ESMF_NETCDF_INCLUDE # ESMF_NETCDF_LIBPATH ################### # Parallel-NetCDF # ################### if '+pnetcdf' in spec: # ESMF provides the ability to write Mesh weights # using Parallel-NetCDF. # When defined, enables the use of Parallel-NetCDF. # ESMF_PNETCDF_LIBS will be set to "-lpnetcdf". os.environ['ESMF_PNETCDF'] = 'standard' # FIXME: determine whether or not we need to set these. # ESMF_PNETCDF_INCLUDE # ESMF_PNETCDF_LIBPATH ############## # ParallelIO # ############## if '+pio' in spec and '+mpi' in spec: # ESMF provides the ability to read and write data in both binary # and NetCDF formats through ParallelIO (PIO), a third-party IO # software library that is integrated in the ESMF library. # PIO-dependent features will be enabled and will use the # PIO library that is included and built with ESMF. os.environ['ESMF_PIO'] = 'internal' else: # Disables PIO-dependent code. os.environ['ESMF_PIO'] = 'OFF' ########## # XERCES # ########## if '+xerces' in spec: # ESMF provides the ability to read Attribute data in # XML file format via the XERCES C++ library. # ESMF_XERCES_LIBS will be set to "-lxerces-c". os.environ['ESMF_XERCES'] = 'standard' # FIXME: determine if the following are needed # ESMF_XERCES_INCLUDE # ESMF_XERCES_LIBPATH def check(self): make('check', parallel=False) def setup_dependent_build_environment(self, env, dependent_spec): env.set('ESMFMKFILE', os.path.join(self.prefix.lib, 'esmf.mk')) def setup_run_environment(self, env): env.set('ESMFMKFILE', os.path.join(self.prefix.lib, 'esmf.mk'))
[]
[]
[ "ESMF_OS", "ESMF_INSTALL_LIBDIR", "ESMF_F90", "ESMF_LAPACK_LIBS", "ESMF_COMPILER", "ESMF_DIR", "FC", "ESMF_NETCDF", "ESMF_CXXCOMPILEOPTS", "ESMF_XERCES", "ESMF_INSTALL_PREFIX", "ESMF_BOPT", "ESMF_PNETCDF", "ESMF_INSTALL_BINDIR", "CXX", "ESMF_CXXLINKLIBS", "ESMF_INSTALL_MODDIR", "ESMF_F90COMPILEOPTS", "ESMF_LAPACK_LIBPATH", "ESMF_COMM", "ESMF_PIO", "ESMF_LAPACK", "ESMF_CXX" ]
[]
["ESMF_OS", "ESMF_INSTALL_LIBDIR", "ESMF_F90", "ESMF_LAPACK_LIBS", "ESMF_COMPILER", "ESMF_DIR", "FC", "ESMF_NETCDF", "ESMF_CXXCOMPILEOPTS", "ESMF_XERCES", "ESMF_INSTALL_PREFIX", "ESMF_BOPT", "ESMF_PNETCDF", "ESMF_INSTALL_BINDIR", "CXX", "ESMF_CXXLINKLIBS", "ESMF_INSTALL_MODDIR", "ESMF_F90COMPILEOPTS", "ESMF_LAPACK_LIBPATH", "ESMF_COMM", "ESMF_PIO", "ESMF_LAPACK", "ESMF_CXX"]
python
23
0
setup.py
from setuptools import setup, Extension, Distribution import setuptools.command.build_ext import sys import sysconfig import os import os.path import distutils.sysconfig import itertools from glob import iglob def _get_turbodbc_libname(): builder = setuptools.command.build_ext.build_ext(Distribution()) full_name = builder.get_ext_filename('libturbodbc') without_lib = full_name.split('lib', 1)[-1] without_so = without_lib.rsplit('.so', 1)[0] return without_so def _get_distutils_build_directory(): """ Returns the directory distutils uses to build its files. We need this directory since we build extensions which have to link other ones. """ pattern = "lib.{platform}-{major}.{minor}" return os.path.join('build', pattern.format(platform=sysconfig.get_platform(), major=sys.version_info[0], minor=sys.version_info[1])) def _get_source_files(directory): path = os.path.join('src', directory) iterable_sources = (iglob(os.path.join(root,'*.cpp')) for root, dirs, files in os.walk(path)) source_files = itertools.chain.from_iterable(iterable_sources) return list(source_files) def _remove_strict_prototype_option_from_distutils_config(): strict_prototypes = '-Wstrict-prototypes' config = distutils.sysconfig.get_config_vars() for key, value in config.items(): if strict_prototypes in str(value): config[key] = config[key].replace(strict_prototypes, '') _remove_strict_prototype_option_from_distutils_config() def _has_arrow_headers(): try: import pyarrow return True except: return False def _has_numpy_headers(): try: import numpy return True except: return False class _deferred_pybind11_include(object): def __str__(self): import pybind11 return pybind11.get_include() extra_compile_args = [] hidden_visibility_args = [] include_dirs = ['include/', _deferred_pybind11_include()] library_dirs = [_get_distutils_build_directory()] python_module_link_args = [] base_library_link_args = [] if sys.platform == 'darwin': extra_compile_args.append('--std=c++11') extra_compile_args.append('--stdlib=libc++') extra_compile_args.append('-mmacosx-version-min=10.9') hidden_visibility_args.append('-fvisibility=hidden') include_dirs.append(os.getenv('UNIXODBC_INCLUDE_DIR', '/usr/local/include/')) library_dirs.append(os.getenv('UNIXODBC_LIBRARY_DIR', '/usr/local/lib/')) from distutils import sysconfig vars = sysconfig.get_config_vars() vars['LDSHARED'] = vars['LDSHARED'].replace('-bundle', '') python_module_link_args.append('-bundle') builder = setuptools.command.build_ext.build_ext(Distribution()) full_name = builder.get_ext_filename('libturbodbc') base_library_link_args.append('-Wl,-dylib_install_name,@loader_path/{}'.format(full_name)) base_library_link_args.append('-dynamiclib') odbclib = 'odbc' elif sys.platform == 'win32': extra_compile_args.append('-DNOMINMAX') if 'BOOST_ROOT' in os.environ: include_dirs.append(os.getenv('BOOST_ROOT')) library_dirs.append(os.path.join(os.getenv('BOOST_ROOT'), "stage", "lib")) library_dirs.append(os.path.join(os.getenv('BOOST_ROOT'), "lib64-msvc-14.0")) else: print("warning: BOOST_ROOT enviroment variable not set") odbclib = 'odbc32' else: extra_compile_args.append('--std=c++11') hidden_visibility_args.append('-fvisibility=hidden') python_module_link_args.append("-Wl,-rpath,$ORIGIN") if 'UNIXODBC_INCLUDE_DIR' in os.environ: include_dirs.append(os.getenv('UNIXODBC_INCLUDE_DIR')) if 'UNIXODBC_LIBRARY_DIR' in os.environ: library_dirs.append(os.getenv('UNIXODBC_LIBRARY_DIR')) odbclib = 'odbc' def get_extension_modules(): extension_modules = [] """ Extension module which is actually a plain C++ library without Python bindings """ turbodbc_sources = _get_source_files('cpp_odbc') + _get_source_files('turbodbc') turbodbc_library = Extension('libturbodbc', sources=turbodbc_sources, include_dirs=include_dirs, extra_compile_args=extra_compile_args, extra_link_args=base_library_link_args, libraries=[odbclib], library_dirs=library_dirs) if sys.platform == "win32": turbodbc_libs = [] else: turbodbc_libs = [_get_turbodbc_libname()] extension_modules.append(turbodbc_library) """ An extension module which contains the main Python bindings for turbodbc """ turbodbc_python_sources = _get_source_files('turbodbc_python') if sys.platform == "win32": turbodbc_python_sources = turbodbc_sources + turbodbc_python_sources turbodbc_python = Extension('turbodbc_intern', sources=turbodbc_python_sources, include_dirs=include_dirs, extra_compile_args=extra_compile_args + hidden_visibility_args, libraries=[odbclib] + turbodbc_libs, extra_link_args=python_module_link_args, library_dirs=library_dirs) extension_modules.append(turbodbc_python) """ An extension module which contains Python bindings which require numpy support to work. Not included in the standard Python bindings so this can stay optional. """ if _has_numpy_headers(): import numpy turbodbc_numpy_sources = _get_source_files('turbodbc_numpy') if sys.platform == "win32": turbodbc_numpy_sources = turbodbc_sources + turbodbc_numpy_sources turbodbc_numpy = Extension('turbodbc_numpy_support', sources=turbodbc_numpy_sources, include_dirs=include_dirs + [numpy.get_include()], extra_compile_args=extra_compile_args + hidden_visibility_args, libraries=[odbclib] + turbodbc_libs, extra_link_args=python_module_link_args, library_dirs=library_dirs) extension_modules.append(turbodbc_numpy) """ An extension module which contains Python bindings which require Apache Arrow support to work. Not included in the standard Python bindings so this can stay optional. """ if _has_arrow_headers(): import pyarrow pyarrow_location = os.path.dirname(pyarrow.__file__) # For now, assume that we build against bundled pyarrow releases. pyarrow_include_dir = os.path.join(pyarrow_location, 'include') turbodbc_arrow_sources = _get_source_files('turbodbc_arrow') pyarrow_module_link_args = list(python_module_link_args) if sys.platform == "win32": turbodbc_arrow_sources = turbodbc_sources + turbodbc_arrow_sources elif sys.platform == "darwin": pyarrow_module_link_args.append('-Wl,-rpath,@loader_path/pyarrow') else: pyarrow_module_link_args.append("-Wl,-rpath,$ORIGIN/pyarrow") turbodbc_arrow = Extension('turbodbc_arrow_support', sources=turbodbc_arrow_sources, include_dirs=include_dirs + [pyarrow_include_dir], extra_compile_args=extra_compile_args + hidden_visibility_args, libraries=[odbclib, 'arrow', 'arrow_python'] + turbodbc_libs, extra_link_args=pyarrow_module_link_args, library_dirs=library_dirs + [pyarrow_location]) extension_modules.append(turbodbc_arrow) return extension_modules here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'README.md')) as f: long_description = f.read() setup(name = 'turbodbc', version = '4.1.2', description = 'turbodbc is a Python DB API 2.0 compatible ODBC driver', long_description=long_description, long_description_content_type='text/markdown', include_package_data = True, url = 'https://github.com/blue-yonder/turbodbc', author='Michael Koenig', author_email = '[email protected]', packages = ['turbodbc'], extras_require={ 'arrow': ['pyarrow>=0.15,<2.0'], 'numpy': 'numpy>=1.16.0' }, classifiers = ['Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: POSIX :: Linux', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Programming Language :: C++', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: Implementation :: CPython', 'Topic :: Database'], ext_modules = get_extension_modules(), install_requires=['pybind11>=2.2.0'] )
[]
[]
[ "BOOST_ROOT", "UNIXODBC_INCLUDE_DIR", "UNIXODBC_LIBRARY_DIR" ]
[]
["BOOST_ROOT", "UNIXODBC_INCLUDE_DIR", "UNIXODBC_LIBRARY_DIR"]
python
3
0
test/functional/test_framework/test_framework.py
#!/usr/bin/env python3 # Copyright (c) 2014-2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Base class for RPC testing.""" import configparser from enum import Enum import argparse import logging import os import pdb import random import re import shutil import subprocess import sys import tempfile import time from typing import List from .authproxy import JSONRPCException from . import coverage from .p2p import NetworkThread from .test_node import TestNode from .util import ( MAX_NODES, PortSeed, assert_equal, check_json_precision, get_datadir_path, initialize_datadir, p2p_port, wait_until_helper, ) class TestStatus(Enum): PASSED = 1 FAILED = 2 SKIPPED = 3 TEST_EXIT_PASSED = 0 TEST_EXIT_FAILED = 1 TEST_EXIT_SKIPPED = 77 TMPDIR_PREFIX = "bitcoin_func_test_" class SkipTest(Exception): """This exception is raised to skip a test""" def __init__(self, message): self.message = message class BitcoinTestMetaClass(type): """Metaclass for BitcoinTestFramework. Ensures that any attempt to register a subclass of `BitcoinTestFramework` adheres to a standard whereby the subclass overrides `set_test_params` and `run_test` but DOES NOT override either `__init__` or `main`. If any of those standards are violated, a ``TypeError`` is raised.""" def __new__(cls, clsname, bases, dct): if not clsname == 'BitcoinTestFramework': if not ('run_test' in dct and 'set_test_params' in dct): raise TypeError("BitcoinTestFramework subclasses must override " "'run_test' and 'set_test_params'") if '__init__' in dct or 'main' in dct: raise TypeError("BitcoinTestFramework subclasses may not override " "'__init__' or 'main'") return super().__new__(cls, clsname, bases, dct) class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): """Base class for a bitcoin test script. Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods. Individual tests can also override the following methods to customize the test setup: - add_options() - setup_chain() - setup_network() - setup_nodes() The __init__() and main() methods should not be overridden. This class also contains various public and private helper methods.""" def __init__(self): """Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method""" self.chain: str = 'regtest' self.setup_clean_chain: bool = False self.nodes: List[TestNode] = [] self.network_thread = None self.rpc_timeout = 60 # Wait for up to 60 seconds for the RPC server to respond self.supports_cli = True self.bind_to_localhost_only = True self.parse_args() self.default_wallet_name = "default_wallet" if self.options.descriptors else "" self.wallet_data_filename = "wallet.dat" # Optional list of wallet names that can be set in set_test_params to # create and import keys to. If unset, default is len(nodes) * # [default_wallet_name]. If wallet names are None, wallet creation is # skipped. If list is truncated, wallet creation is skipped and keys # are not imported. self.wallet_names = None # By default the wallet is not required. Set to true by skip_if_no_wallet(). # When False, we ignore wallet_names regardless of what it is. self.requires_wallet = False self.set_test_params() assert self.wallet_names is None or len(self.wallet_names) <= self.num_nodes if self.options.timeout_factor == 0 : self.options.timeout_factor = 99999 self.rpc_timeout = int(self.rpc_timeout * self.options.timeout_factor) # optionally, increase timeout by a factor def main(self): """Main function. This should not be overridden by the subclass test scripts.""" assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()" try: self.setup() self.run_test() except JSONRPCException: self.log.exception("JSONRPC error") self.success = TestStatus.FAILED except SkipTest as e: self.log.warning("Test Skipped: %s" % e.message) self.success = TestStatus.SKIPPED except AssertionError: self.log.exception("Assertion failed") self.success = TestStatus.FAILED except KeyError: self.log.exception("Key error") self.success = TestStatus.FAILED except subprocess.CalledProcessError as e: self.log.exception("Called Process failed with '{}'".format(e.output)) self.success = TestStatus.FAILED except Exception: self.log.exception("Unexpected exception caught during testing") self.success = TestStatus.FAILED except KeyboardInterrupt: self.log.warning("Exiting after keyboard interrupt") self.success = TestStatus.FAILED finally: exit_code = self.shutdown() sys.exit(exit_code) def parse_args(self): previous_releases_path = os.getenv("PREVIOUS_RELEASES_DIR") or os.getcwd() + "/releases" parser = argparse.ArgumentParser(usage="%(prog)s [options]") parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true", help="Leave bitcoinds and test.* datadir on exit or error") parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true", help="Don't stop bitcoinds after the test execution") parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"), help="Directory for caching pregenerated datadirs (default: %(default)s)") parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs") parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO", help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.") parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true", help="Print out all RPC calls as they are made") parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int, help="The seed to use for assigning port numbers (default: current process id)") parser.add_argument("--previous-releases", dest="prev_releases", action="store_true", default=os.path.isdir(previous_releases_path) and bool(os.listdir(previous_releases_path)), help="Force test of previous releases (default: %(default)s)") parser.add_argument("--coveragedir", dest="coveragedir", help="Write tested RPC commands into this directory") parser.add_argument("--configfile", dest="configfile", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"), help="Location of the test framework config file (default: %(default)s)") parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true", help="Attach a python debugger if test fails") parser.add_argument("--usecli", dest="usecli", default=False, action="store_true", help="use bitcoin-cli instead of RPC for all commands") parser.add_argument("--perf", dest="perf", default=False, action="store_true", help="profile running nodes with perf for the duration of the test") parser.add_argument("--valgrind", dest="valgrind", default=False, action="store_true", help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown, valgrind 3.14 or later required") parser.add_argument("--randomseed", type=int, help="set a random seed for deterministically reproducing a previous test run") parser.add_argument('--timeout-factor', dest="timeout_factor", type=float, default=1.0, help='adjust test timeouts by a factor. Setting it to 0 disables all timeouts') group = parser.add_mutually_exclusive_group() group.add_argument("--descriptors", action='store_const', const=True, help="Run test using a descriptor wallet", dest='descriptors') group.add_argument("--legacy-wallet", action='store_const', const=False, help="Run test using legacy wallets", dest='descriptors') self.add_options(parser) self.options = parser.parse_args() self.options.previous_releases_path = previous_releases_path config = configparser.ConfigParser() config.read_file(open(self.options.configfile)) self.config = config if self.options.descriptors is None: # Prefer BDB unless it isn't available if self.is_bdb_compiled(): self.options.descriptors = False elif self.is_sqlite_compiled(): self.options.descriptors = True else: # If neither are compiled, tests requiring a wallet will be skipped and the value of self.options.descriptors won't matter # It still needs to exist and be None in order for tests to work however. self.options.descriptors = None def setup(self): """Call this method to start up the test framework object with options set.""" PortSeed.n = self.options.port_seed check_json_precision() self.options.cachedir = os.path.abspath(self.options.cachedir) config = self.config fname_bitcoind = os.path.join( config["environment"]["BUILDDIR"], "src", "bitcoind" + config["environment"]["EXEEXT"], ) fname_bitcoincli = os.path.join( config["environment"]["BUILDDIR"], "src", "bitcoin-cli" + config["environment"]["EXEEXT"], ) self.options.bitcoind = os.getenv("BITCOIND", default=fname_bitcoind) self.options.bitcoincli = os.getenv("BITCOINCLI", default=fname_bitcoincli) os.environ['PATH'] = os.pathsep.join([ os.path.join(config['environment']['BUILDDIR'], 'src'), os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'), os.environ['PATH'] ]) # Set up temp directory and start logging if self.options.tmpdir: self.options.tmpdir = os.path.abspath(self.options.tmpdir) os.makedirs(self.options.tmpdir, exist_ok=False) else: self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX) self._start_logging() # Seed the PRNG. Note that test runs are reproducible if and only if # a single thread accesses the PRNG. For more information, see # https://docs.python.org/3/library/random.html#notes-on-reproducibility. # The network thread shouldn't access random. If we need to change the # network thread to access randomness, it should instantiate its own # random.Random object. seed = self.options.randomseed if seed is None: seed = random.randrange(sys.maxsize) else: self.log.debug("User supplied random seed {}".format(seed)) random.seed(seed) self.log.debug("PRNG seed is: {}".format(seed)) self.log.debug('Setting up network thread') self.network_thread = NetworkThread() self.network_thread.start() if self.options.usecli: if not self.supports_cli: raise SkipTest("--usecli specified but test does not support using CLI") self.skip_if_no_cli() self.skip_test_if_missing_module() self.setup_chain() self.setup_network() self.success = TestStatus.PASSED def shutdown(self): """Call this method to shut down the test framework object.""" if self.success == TestStatus.FAILED and self.options.pdbonfailure: print("Testcase failed. Attaching python debugger. Enter ? for help") pdb.set_trace() self.log.debug('Closing down network thread') self.network_thread.close() if not self.options.noshutdown: self.log.info("Stopping nodes") if self.nodes: self.stop_nodes() else: for node in self.nodes: node.cleanup_on_exit = False self.log.info("Note: bitcoinds were not stopped and may still be running") should_clean_up = ( not self.options.nocleanup and not self.options.noshutdown and self.success != TestStatus.FAILED and not self.options.perf ) if should_clean_up: self.log.info("Cleaning up {} on exit".format(self.options.tmpdir)) cleanup_tree_on_exit = True elif self.options.perf: self.log.warning("Not cleaning up dir {} due to perf data".format(self.options.tmpdir)) cleanup_tree_on_exit = False else: self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir)) cleanup_tree_on_exit = False if self.success == TestStatus.PASSED: self.log.info("Tests successful") exit_code = TEST_EXIT_PASSED elif self.success == TestStatus.SKIPPED: self.log.info("Test skipped") exit_code = TEST_EXIT_SKIPPED else: self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir) self.log.error("") self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir)) self.log.error("") self.log.error("If this failure happened unexpectedly or intermittently, please file a bug and provide a link or upload of the combined log.") self.log.error(self.config['environment']['PACKAGE_BUGREPORT']) self.log.error("") exit_code = TEST_EXIT_FAILED # Logging.shutdown will not remove stream- and filehandlers, so we must # do it explicitly. Handlers are removed so the next test run can apply # different log handler settings. # See: https://docs.python.org/3/library/logging.html#logging.shutdown for h in list(self.log.handlers): h.flush() h.close() self.log.removeHandler(h) rpc_logger = logging.getLogger("BitcoinRPC") for h in list(rpc_logger.handlers): h.flush() rpc_logger.removeHandler(h) if cleanup_tree_on_exit: shutil.rmtree(self.options.tmpdir) self.nodes.clear() return exit_code # Methods to override in subclass test scripts. def set_test_params(self): """Tests must override this method to change default values for number of nodes, topology, etc""" raise NotImplementedError def add_options(self, parser): """Override this method to add command-line options to the test""" pass def skip_test_if_missing_module(self): """Override this method to skip a test if a module is not compiled""" pass def setup_chain(self): """Override this method to customize blockchain setup""" self.log.info("Initializing test directory " + self.options.tmpdir) if self.setup_clean_chain: self._initialize_chain_clean() else: self._initialize_chain() def setup_network(self): """Override this method to customize test network topology""" self.setup_nodes() # Connect the nodes as a "chain". This allows us # to split the network between nodes 1 and 2 to get # two halves that can work on competing chains. # # Topology looks like this: # node0 <-- node1 <-- node2 <-- node3 # # If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To # ensure block propagation, all nodes will establish outgoing connections toward node0. # See fPreferredDownload in net_processing. # # If further outbound connections are needed, they can be added at the beginning of the test with e.g. # self.connect_nodes(1, 2) for i in range(self.num_nodes - 1): self.connect_nodes(i + 1, i) self.sync_all() def setup_nodes(self): """Override this method to customize test node setup""" extra_args = [[]] * self.num_nodes if hasattr(self, "extra_args"): extra_args = self.extra_args self.add_nodes(self.num_nodes, extra_args) self.start_nodes() if self.requires_wallet: self.import_deterministic_coinbase_privkeys() if not self.setup_clean_chain: for n in self.nodes: assert_equal(n.getblockchaininfo()["blocks"], 199) # To ensure that all nodes are out of IBD, the most recent block # must have a timestamp not too old (see IsInitialBlockDownload()). self.log.debug('Generate a block with current time') block_hash = self.nodes[0].generate(1)[0] block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0) for n in self.nodes: n.submitblock(block) chain_info = n.getblockchaininfo() assert_equal(chain_info["blocks"], 200) assert_equal(chain_info["initialblockdownload"], False) def import_deterministic_coinbase_privkeys(self): for i in range(self.num_nodes): self.init_wallet(i) def init_wallet(self, i): wallet_name = self.default_wallet_name if self.wallet_names is None else self.wallet_names[i] if i < len(self.wallet_names) else False if wallet_name is not False: n = self.nodes[i] if wallet_name is not None: n.createwallet(wallet_name=wallet_name, descriptors=self.options.descriptors, load_on_startup=True) n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase') def run_test(self): """Tests must override this method to define test logic""" raise NotImplementedError # Public helper methods. These can be accessed by the subclass test scripts. def add_nodes(self, num_nodes: int, extra_args=None, *, rpchost=None, binary=None, binary_cli=None, versions=None): """Instantiate TestNode objects. Should only be called once after the nodes have been specified in set_test_params().""" def get_bin_from_version(version, bin_name, bin_default): if not version: return bin_default return os.path.join( self.options.previous_releases_path, re.sub( r'\.0$', '', # remove trailing .0 for point releases 'v{}.{}.{}.{}'.format( (version % 100000000) // 1000000, (version % 1000000) // 10000, (version % 10000) // 100, (version % 100) // 1, ), ), 'bin', bin_name, ) if self.bind_to_localhost_only: extra_confs = [["bind=127.0.0.1"]] * num_nodes else: extra_confs = [[]] * num_nodes if extra_args is None: extra_args = [[]] * num_nodes if versions is None: versions = [None] * num_nodes if binary is None: binary = [get_bin_from_version(v, 'bitcoind', self.options.bitcoind) for v in versions] if binary_cli is None: binary_cli = [get_bin_from_version(v, 'bitcoin-cli', self.options.bitcoincli) for v in versions] assert_equal(len(extra_confs), num_nodes) assert_equal(len(extra_args), num_nodes) assert_equal(len(versions), num_nodes) assert_equal(len(binary), num_nodes) assert_equal(len(binary_cli), num_nodes) for i in range(num_nodes): test_node_i = TestNode( i, get_datadir_path(self.options.tmpdir, i), chain=self.chain, rpchost=rpchost, timewait=self.rpc_timeout, timeout_factor=self.options.timeout_factor, bitcoind=binary[i], bitcoin_cli=binary_cli[i], version=versions[i], coverage_dir=self.options.coveragedir, cwd=self.options.tmpdir, extra_conf=extra_confs[i], extra_args=extra_args[i], use_cli=self.options.usecli, start_perf=self.options.perf, use_valgrind=self.options.valgrind, descriptors=self.options.descriptors, ) self.nodes.append(test_node_i) if not test_node_i.version_is_at_least(170000): # adjust conf for pre 17 conf_file = test_node_i.bitcoinconf with open(conf_file, 'r', encoding='utf8') as conf: conf_data = conf.read() with open(conf_file, 'w', encoding='utf8') as conf: conf.write(conf_data.replace('[regtest]', '')) def start_node(self, i, *args, **kwargs): """Start a bitcoind""" node = self.nodes[i] node.start(*args, **kwargs) node.wait_for_rpc_connection() if self.options.coveragedir is not None: coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc) def start_nodes(self, extra_args=None, *args, **kwargs): """Start multiple bitcoinds""" if extra_args is None: extra_args = [None] * self.num_nodes assert_equal(len(extra_args), self.num_nodes) try: for i, node in enumerate(self.nodes): node.start(extra_args[i], *args, **kwargs) for node in self.nodes: node.wait_for_rpc_connection() except: # If one node failed to start, stop the others self.stop_nodes() raise if self.options.coveragedir is not None: for node in self.nodes: coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc) def stop_node(self, i, expected_stderr='', wait=0): """Stop a bitcoind test node""" self.nodes[i].stop_node(expected_stderr, wait=wait) def stop_nodes(self, wait=0): """Stop multiple bitcoind test nodes""" for node in self.nodes: # Issue RPC to stop nodes node.stop_node(wait=wait, wait_until_stopped=False) for node in self.nodes: # Wait for nodes to stop node.wait_until_stopped() def restart_node(self, i, extra_args=None): """Stop and start a test node""" self.stop_node(i) self.start_node(i, extra_args) def wait_for_node_exit(self, i, timeout): self.nodes[i].process.wait(timeout) def connect_nodes(self, a, b): def connect_nodes_helper(from_connection, node_num): ip_port = "127.0.0.1:" + str(p2p_port(node_num)) from_connection.addnode(ip_port, "onetry") # poll until version handshake complete to avoid race conditions # with transaction relaying # See comments in net_processing: # * Must have a version message before anything else # * Must have a verack message before anything else wait_until_helper(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo())) wait_until_helper(lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in from_connection.getpeerinfo())) connect_nodes_helper(self.nodes[a], b) def disconnect_nodes(self, a, b): def disconnect_nodes_helper(from_connection, node_num): def get_peer_ids(): result = [] for peer in from_connection.getpeerinfo(): if "testnode{}".format(node_num) in peer['subver']: result.append(peer['id']) return result peer_ids = get_peer_ids() if not peer_ids: self.log.warning("disconnect_nodes: {} and {} were not connected".format( from_connection.index, node_num, )) return for peer_id in peer_ids: try: from_connection.disconnectnode(nodeid=peer_id) except JSONRPCException as e: # If this node is disconnected between calculating the peer id # and issuing the disconnect, don't worry about it. # This avoids a race condition if we're mass-disconnecting peers. if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED raise # wait to disconnect wait_until_helper(lambda: not get_peer_ids(), timeout=5) disconnect_nodes_helper(self.nodes[a], b) def split_network(self): """ Split the network of four nodes into nodes 0/1 and 2/3. """ self.disconnect_nodes(1, 2) self.sync_all(self.nodes[:2]) self.sync_all(self.nodes[2:]) def join_network(self): """ Join the (previously split) network halves together. """ self.connect_nodes(1, 2) self.sync_all() def sync_blocks(self, nodes=None, wait=1, timeout=60): """ Wait until everybody has the same tip. sync_blocks needs to be called with an rpc_connections set that has least one node already synced to the latest, stable tip, otherwise there's a chance it might return before all nodes are stably synced. """ rpc_connections = nodes or self.nodes timeout = int(timeout * self.options.timeout_factor) stop_time = time.time() + timeout while time.time() <= stop_time: best_hash = [x.getbestblockhash() for x in rpc_connections] if best_hash.count(best_hash[0]) == len(rpc_connections): return # Check that each peer has at least one connection assert (all([len(x.getpeerinfo()) for x in rpc_connections])) time.sleep(wait) raise AssertionError("Block sync timed out after {}s:{}".format( timeout, "".join("\n {!r}".format(b) for b in best_hash), )) def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True): """ Wait until everybody has the same transactions in their memory pools """ rpc_connections = nodes or self.nodes timeout = int(timeout * self.options.timeout_factor) stop_time = time.time() + timeout while time.time() <= stop_time: pool = [set(r.getrawmempool()) for r in rpc_connections] if pool.count(pool[0]) == len(rpc_connections): if flush_scheduler: for r in rpc_connections: r.syncwithvalidationinterfacequeue() return # Check that each peer has at least one connection assert (all([len(x.getpeerinfo()) for x in rpc_connections])) time.sleep(wait) raise AssertionError("Mempool sync timed out after {}s:{}".format( timeout, "".join("\n {!r}".format(m) for m in pool), )) def sync_all(self, nodes=None): self.sync_blocks(nodes) self.sync_mempools(nodes) def wait_until(self, test_function, timeout=60): return wait_until_helper(test_function, timeout=timeout, timeout_factor=self.options.timeout_factor) # Private helper methods. These should not be accessed by the subclass test scripts. def _start_logging(self): # Add logger and logging handlers self.log = logging.getLogger('TestFramework') self.log.setLevel(logging.DEBUG) # Create file handler to log all messages fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8') fh.setLevel(logging.DEBUG) # Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel. ch = logging.StreamHandler(sys.stdout) # User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper() ch.setLevel(ll) # Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted) formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S') formatter.converter = time.gmtime fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger self.log.addHandler(fh) self.log.addHandler(ch) if self.options.trace_rpc: rpc_logger = logging.getLogger("BitcoinRPC") rpc_logger.setLevel(logging.DEBUG) rpc_handler = logging.StreamHandler(sys.stdout) rpc_handler.setLevel(logging.DEBUG) rpc_logger.addHandler(rpc_handler) def _initialize_chain(self): """Initialize a pre-mined blockchain for use by the test. Create a cache of a 199-block-long chain Afterward, create num_nodes copies from the cache.""" CACHE_NODE_ID = 0 # Use node 0 to create the cache for all other nodes cache_node_dir = get_datadir_path(self.options.cachedir, CACHE_NODE_ID) assert self.num_nodes <= MAX_NODES if not os.path.isdir(cache_node_dir): self.log.debug("Creating cache directory {}".format(cache_node_dir)) initialize_datadir(self.options.cachedir, CACHE_NODE_ID, self.chain) self.nodes.append( TestNode( CACHE_NODE_ID, cache_node_dir, chain=self.chain, extra_conf=["bind=127.0.0.1"], extra_args=['-disablewallet'], rpchost=None, timewait=self.rpc_timeout, timeout_factor=self.options.timeout_factor, bitcoind=self.options.bitcoind, bitcoin_cli=self.options.bitcoincli, coverage_dir=None, cwd=self.options.tmpdir, descriptors=self.options.descriptors, )) self.start_node(CACHE_NODE_ID) cache_node = self.nodes[CACHE_NODE_ID] # Wait for RPC connections to be ready cache_node.wait_for_rpc_connection() # Set a time in the past, so that blocks don't end up in the future cache_node.setmocktime(cache_node.getblockheader(cache_node.getbestblockhash())['time']) # Create a 199-block-long chain; each of the 4 first nodes # gets 25 mature blocks and 25 immature. # The 4th node gets only 24 immature blocks so that the very last # block in the cache does not age too much (have an old tip age). # This is needed so that we are out of IBD when the test starts, # see the tip age check in IsInitialBlockDownload(). for i in range(8): cache_node.generatetoaddress( nblocks=25 if i != 7 else 24, address=TestNode.PRIV_KEYS[i % 4].address, ) assert_equal(cache_node.getblockchaininfo()["blocks"], 199) # Shut it down, and clean up cache directories: self.stop_nodes() self.nodes = [] def cache_path(*paths): return os.path.join(cache_node_dir, self.chain, *paths) os.rmdir(cache_path('wallets')) # Remove empty wallets dir for entry in os.listdir(cache_path()): if entry not in ['chainstate', 'blocks']: # Only keep chainstate and blocks folder os.remove(cache_path(entry)) for i in range(self.num_nodes): self.log.debug("Copy cache directory {} to node {}".format(cache_node_dir, i)) to_dir = get_datadir_path(self.options.tmpdir, i) shutil.copytree(cache_node_dir, to_dir) initialize_datadir(self.options.tmpdir, i, self.chain) # Overwrite port/rpcport in bitcoin.conf def _initialize_chain_clean(self): """Initialize empty blockchain for use by the test. Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization.""" for i in range(self.num_nodes): initialize_datadir(self.options.tmpdir, i, self.chain) def skip_if_no_py3_zmq(self): """Attempt to import the zmq package and skip the test if the import fails.""" try: import zmq # noqa except ImportError: raise SkipTest("python3-zmq module not available.") def skip_if_no_bitcoind_zmq(self): """Skip the running test if bitcoind has not been compiled with zmq support.""" if not self.is_zmq_compiled(): raise SkipTest("bitcoind has not been built with zmq enabled.") def skip_if_no_wallet(self): """Skip the running test if wallet has not been compiled.""" self.requires_wallet = True if not self.is_wallet_compiled(): raise SkipTest("wallet has not been compiled.") if self.options.descriptors: self.skip_if_no_sqlite() else: self.skip_if_no_bdb() def skip_if_no_sqlite(self): """Skip the running test if sqlite has not been compiled.""" if not self.is_sqlite_compiled(): raise SkipTest("sqlite has not been compiled.") def skip_if_no_bdb(self): """Skip the running test if BDB has not been compiled.""" if not self.is_bdb_compiled(): raise SkipTest("BDB has not been compiled.") def skip_if_no_wallet_tool(self): """Skip the running test if bitcoin-wallet has not been compiled.""" if not self.is_wallet_tool_compiled(): raise SkipTest("bitcoin-wallet has not been compiled") def skip_if_no_cli(self): """Skip the running test if bitcoin-cli has not been compiled.""" if not self.is_cli_compiled(): raise SkipTest("bitcoin-cli has not been compiled.") def skip_if_no_previous_releases(self): """Skip the running test if previous releases are not available.""" if not self.has_previous_releases(): raise SkipTest("previous releases not available or disabled") def has_previous_releases(self): """Checks whether previous releases are present and enabled.""" if not os.path.isdir(self.options.previous_releases_path): if self.options.prev_releases: raise AssertionError("Force test of previous releases but releases missing: {}".format( self.options.previous_releases_path)) return self.options.prev_releases def skip_if_no_external_signer(self): """Skip the running test if external signer support has not been compiled.""" if not self.is_external_signer_compiled(): raise SkipTest("external signer support has not been compiled.") def is_cli_compiled(self): """Checks whether bitcoin-cli was compiled.""" return self.config["components"].getboolean("ENABLE_CLI") def is_external_signer_compiled(self): """Checks whether external signer support was compiled.""" return self.config["components"].getboolean("ENABLE_EXTERNAL_SIGNER") def is_wallet_compiled(self): """Checks whether the wallet module was compiled.""" return self.config["components"].getboolean("ENABLE_WALLET") def is_wallet_tool_compiled(self): """Checks whether bitcoin-wallet was compiled.""" return self.config["components"].getboolean("ENABLE_WALLET_TOOL") def is_zmq_compiled(self): """Checks whether the zmq module was compiled.""" return self.config["components"].getboolean("ENABLE_ZMQ") def is_sqlite_compiled(self): """Checks whether the wallet module was compiled with Sqlite support.""" return self.config["components"].getboolean("USE_SQLITE") def is_bdb_compiled(self): """Checks whether the wallet module was compiled with BDB support.""" return self.config["components"].getboolean("USE_BDB")
[]
[]
[ "BITCOINCLI", "PREVIOUS_RELEASES_DIR", "PATH", "BITCOIND" ]
[]
["BITCOINCLI", "PREVIOUS_RELEASES_DIR", "PATH", "BITCOIND"]
python
4
0
services/billing-aws/src/main/java/com/epam/dlab/configuration/FreeMarkerConfig.java
/*************************************************************************** Copyright (c) 2016, EPAM SYSTEMS INC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ****************************************************************************/ package com.epam.dlab.configuration; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileReader; import java.io.IOException; import java.io.InputStream; import java.io.OutputStreamWriter; import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import com.google.common.base.Throwables; import freemarker.template.Configuration; import freemarker.template.Template; import freemarker.template.TemplateException; /** Provides Apache FreeMarker the template engine for the billing configuration. */ public class FreeMarkerConfig { /** Create and return the input stream for the configuration file. * @param filename the name of configuration file. * @throws IOException */ public InputStream getInputStream(final String filename) throws IOException { try { Configuration conf = new Configuration(Configuration.VERSION_2_3_22); Template template = new Template("billing-config", new FileReader(new File(filename)), conf); Map<String, Object> dataModel = getDataModel(); ByteArrayOutputStream streamBuffer = new ByteArrayOutputStream(); template.process(dataModel, new OutputStreamWriter(streamBuffer, StandardCharsets.UTF_8)); byte[] buffer = streamBuffer.toByteArray(); return new ByteArrayInputStream(buffer); } catch (TemplateException e) { throw Throwables.propagate(e); } } /** Create and return JVM and OS properties. */ private Map<String, Object> getDataModel() { Map<String, Object> dataModel = new HashMap<>(); Iterator<Object> sysProps = System.getProperties().keySet().iterator(); while (sysProps.hasNext()) { String key = (String) sysProps.next(); dataModel.put(key, System.getProperties().getProperty(key)); } dataModel.putAll(System.getenv()); dataModel.put("env", System.getenv()); dataModel.put("sys", System.getProperties()); return dataModel; } }
[]
[]
[]
[]
[]
java
0
0
cmd/githubCreateIssue_generated.go
// Code generated by piper's step-generator. DO NOT EDIT. package cmd import ( "fmt" "os" "time" "github.com/SAP/jenkins-library/pkg/config" "github.com/SAP/jenkins-library/pkg/log" "github.com/SAP/jenkins-library/pkg/splunk" "github.com/SAP/jenkins-library/pkg/telemetry" "github.com/spf13/cobra" ) type githubCreateIssueOptions struct { APIURL string `json:"apiUrl,omitempty"` Body string `json:"body,omitempty"` BodyFilePath string `json:"bodyFilePath,omitempty"` Owner string `json:"owner,omitempty"` Repository string `json:"repository,omitempty"` Title string `json:"title,omitempty"` Token string `json:"token,omitempty"` } // GithubCreateIssueCommand Create a new GitHub issue. func GithubCreateIssueCommand() *cobra.Command { const STEP_NAME = "githubCreateIssue" metadata := githubCreateIssueMetadata() var stepConfig githubCreateIssueOptions var startTime time.Time var logCollector *log.CollectorHook var createGithubCreateIssueCmd = &cobra.Command{ Use: STEP_NAME, Short: "Create a new GitHub issue.", Long: `This step allows you to create a new GitHub issue. You will be able to use this step for example for regular jobs to report into your repository in case of new security findings.`, PreRunE: func(cmd *cobra.Command, _ []string) error { startTime = time.Now() log.SetStepName(STEP_NAME) log.SetVerbose(GeneralConfig.Verbose) path, _ := os.Getwd() fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path} log.RegisterHook(fatalHook) err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile) if err != nil { log.SetErrorCategory(log.ErrorConfiguration) return err } log.RegisterSecret(stepConfig.Token) if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 { sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID) log.RegisterHook(&sentryHook) } if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 { logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID} log.RegisterHook(logCollector) } return nil }, Run: func(_ *cobra.Command, _ []string) { telemetryData := telemetry.CustomData{} telemetryData.ErrorCode = "1" handler := func() { config.RemoveVaultSecretFiles() telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds()) telemetryData.ErrorCategory = log.GetErrorCategory().String() telemetry.Send(&telemetryData) if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 { splunk.Send(&telemetryData, logCollector) } } log.DeferExitHandler(handler) defer handler() telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME) if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 { splunk.Initialize(GeneralConfig.CorrelationID, GeneralConfig.HookConfig.SplunkConfig.Dsn, GeneralConfig.HookConfig.SplunkConfig.Token, GeneralConfig.HookConfig.SplunkConfig.Index, GeneralConfig.HookConfig.SplunkConfig.SendLogs) } githubCreateIssue(stepConfig, &telemetryData) telemetryData.ErrorCode = "0" log.Entry().Info("SUCCESS") }, } addGithubCreateIssueFlags(createGithubCreateIssueCmd, &stepConfig) return createGithubCreateIssueCmd } func addGithubCreateIssueFlags(cmd *cobra.Command, stepConfig *githubCreateIssueOptions) { cmd.Flags().StringVar(&stepConfig.APIURL, "apiUrl", `https://api.github.com`, "Set the GitHub API url.") cmd.Flags().StringVar(&stepConfig.Body, "body", os.Getenv("PIPER_body"), "Defines the content of the issue, e.g. using markdown syntax.") cmd.Flags().StringVar(&stepConfig.BodyFilePath, "bodyFilePath", os.Getenv("PIPER_bodyFilePath"), "Defines the path to a file containing the markdown content for the issue. This can be used instead of [`body`](#body)") cmd.Flags().StringVar(&stepConfig.Owner, "owner", os.Getenv("PIPER_owner"), "Name of the GitHub organization.") cmd.Flags().StringVar(&stepConfig.Repository, "repository", os.Getenv("PIPER_repository"), "Name of the GitHub repository.") cmd.Flags().StringVar(&stepConfig.Title, "title", os.Getenv("PIPER_title"), "Defines the title for the Issue.") cmd.Flags().StringVar(&stepConfig.Token, "token", os.Getenv("PIPER_token"), "GitHub personal access token as per https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line.") cmd.MarkFlagRequired("apiUrl") cmd.MarkFlagRequired("owner") cmd.MarkFlagRequired("repository") cmd.MarkFlagRequired("title") cmd.MarkFlagRequired("token") } // retrieve step metadata func githubCreateIssueMetadata() config.StepData { var theMetaData = config.StepData{ Metadata: config.StepMetadata{ Name: "githubCreateIssue", Aliases: []config.Alias{}, Description: "Create a new GitHub issue.", }, Spec: config.StepSpec{ Inputs: config.StepInputs{ Parameters: []config.StepParameters{ { Name: "apiUrl", ResourceRef: []config.ResourceReference{}, Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{{Name: "githubApiUrl"}}, }, { Name: "body", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: false, Aliases: []config.Alias{}, }, { Name: "bodyFilePath", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: false, Aliases: []config.Alias{}, }, { Name: "owner", ResourceRef: []config.ResourceReference{ { Name: "commonPipelineEnvironment", Param: "github/owner", }, }, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{{Name: "githubOrg"}}, }, { Name: "repository", ResourceRef: []config.ResourceReference{ { Name: "commonPipelineEnvironment", Param: "github/repository", }, }, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{{Name: "githubRepo"}}, }, { Name: "title", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{}, }, { Name: "token", ResourceRef: []config.ResourceReference{ { Name: "githubTokenCredentialsId", Type: "secret", }, { Name: "", Paths: []string{"$(vaultPath)/github", "$(vaultBasePath)/$(vaultPipelineName)/github", "$(vaultBasePath)/GROUP-SECRETS/github"}, Type: "vaultSecret", }, }, Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{{Name: "githubToken"}, {Name: "access_token"}}, }, }, }, }, } return theMetaData }
[ "\"PIPER_body\"", "\"PIPER_bodyFilePath\"", "\"PIPER_owner\"", "\"PIPER_repository\"", "\"PIPER_title\"", "\"PIPER_token\"" ]
[]
[ "PIPER_title", "PIPER_body", "PIPER_token", "PIPER_bodyFilePath", "PIPER_repository", "PIPER_owner" ]
[]
["PIPER_title", "PIPER_body", "PIPER_token", "PIPER_bodyFilePath", "PIPER_repository", "PIPER_owner"]
go
6
0
basic-apps/env-vars/render-template.py
import sys, os, json, re from flask import Flask, render_template app = Flask(__name__) @app.route("/") def main(): return render_template('environ.html',vars=sorted(os.environ.items()), json=json, re=re) if __name__ == "__main__": app.run(host='0.0.0.0', port=int(os.environ.get('PORT','8080')))
[]
[]
[ "PORT" ]
[]
["PORT"]
python
1
0
generic_store/manage.py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): """Run administrative tasks.""" os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'generic_store.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[]
[]
[]
[]
[]
python
0
0
tests/integration/local/helpers.go
package tests import ( "context" "math/rand" "os" "path/filepath" "strings" "testing" "time" "get.porter.sh/plugin/kubernetes/pkg/kubernetes" portercontext "get.porter.sh/porter/pkg/context" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8s "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) type TestPlugin struct { *kubernetes.Plugin TestContext *portercontext.TestContext } // NewTestPlugin initializes a plugin test client, with the output buffered, and an in-memory file system. func NewTestPlugin(t *testing.T) *TestPlugin { c := portercontext.NewTestContext(t) m := &TestPlugin{ Plugin: &kubernetes.Plugin{ Context: c.Context, }, TestContext: c, } return m } // GenerateNamespaceName generates a random namespace name func GenerateNamespaceName() string { rand.Seed(time.Now().UTC().UnixNano()) bytes := make([]byte, 10) for i := 0; i < 10; i++ { bytes[i] = byte(97 + rand.Intn(26)) } return string(bytes) } func GetKubernetesConfig(t *testing.T) *restclient.Config { var err error var config *restclient.Config var kubeconfigfile string if kubeconfigfile = os.Getenv("KUBECONFIG"); kubeconfigfile == "" { kubeconfigfile = filepath.Join(os.Getenv("HOME"), ".kube", "config") } if _, err = os.Stat(kubeconfigfile); err != nil { if os.IsNotExist(err) { // If the kubeconfig file does not exist then try in cluster config config, err = clientcmd.BuildConfigFromFlags("", "") } } else { config, err = clientcmd.BuildConfigFromFlags("", kubeconfigfile) } require.NoError(t, err) require.NotNil(t, config) return config } func CreateNamespace(t *testing.T) string { nsName := GenerateNamespaceName() clientSet, err := k8s.NewForConfig(GetKubernetesConfig(t)) require.NoError(t, err) ns := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: nsName}, TypeMeta: metav1.TypeMeta{Kind: "Namespace"}} _, err = clientSet.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) require.NoError(t, err) return nsName } func DeleteNamespace(t *testing.T, nsName string) string { clientSet, err := k8s.NewForConfig(GetKubernetesConfig(t)) require.NoError(t, err) err = clientSet.CoreV1().Namespaces().Delete(context.Background(), nsName, metav1.DeleteOptions{}) require.NoError(t, err) return nsName } func CreateSecret(t *testing.T, nsName string, key string, name string, value string) { clientSet, err := k8s.NewForConfig(GetKubernetesConfig(t)) name = strings.ToLower(name) require.NoError(t, err) data := make(map[string][]byte, 1) data[key] = []byte(value) secret := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: name}, Data: data} _, err = clientSet.CoreV1().Secrets(nsName).Create(context.Background(), secret, metav1.CreateOptions{}) require.NoError(t, err) } func RunningInKubernetes() bool { return len(os.Getenv("KUBERNETES_SERVICE_HOST")) > 0 }
[ "\"KUBECONFIG\"", "\"HOME\"", "\"KUBERNETES_SERVICE_HOST\"" ]
[]
[ "KUBERNETES_SERVICE_HOST", "HOME", "KUBECONFIG" ]
[]
["KUBERNETES_SERVICE_HOST", "HOME", "KUBECONFIG"]
go
3
0
paddlex/interpret/core/_session_preparation.py
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. import os import os.path as osp import paddle.fluid as fluid import paddlex as pdx import numpy as np from paddle.fluid.param_attr import ParamAttr from paddlex.interpret.as_data_reader.readers import preprocess_image def gen_user_home(): if "HOME" in os.environ: home_path = os.environ["HOME"] if os.path.exists(home_path) and os.path.isdir(home_path): return home_path return os.path.expanduser('~') def paddle_get_fc_weights(var_name="fc_0.w_0"): fc_weights = fluid.global_scope().find_var(var_name).get_tensor() return np.array(fc_weights) def paddle_resize(extracted_features, outsize): resized_features = fluid.layers.resize_bilinear(extracted_features, outsize) return resized_features def compute_features_for_kmeans(data_content): root_path = gen_user_home() root_path = osp.join(root_path, '.paddlex') h_pre_models = osp.join(root_path, "pre_models") if not osp.exists(h_pre_models): if not osp.exists(root_path): os.makedirs(root_path) url = "https://bj.bcebos.com/paddlex/interpret/pre_models.tar.gz" pdx.utils.download_and_decompress(url, path=root_path) def conv_bn_layer(input, num_filters, filter_size, stride=1, groups=1, act=None, name=None, is_test=True, global_name=''): conv = fluid.layers.conv2d( input=input, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=(filter_size - 1) // 2, groups=groups, act=None, param_attr=ParamAttr(name=global_name + name + "_weights"), bias_attr=False, name=global_name + name + '.conv2d.output.1') if name == "conv1": bn_name = "bn_" + name else: bn_name = "bn" + name[3:] return fluid.layers.batch_norm( input=conv, act=act, name=global_name + bn_name + '.output.1', param_attr=ParamAttr(global_name + bn_name + '_scale'), bias_attr=ParamAttr(global_name + bn_name + '_offset'), moving_mean_name=global_name + bn_name + '_mean', moving_variance_name=global_name + bn_name + '_variance', use_global_stats=is_test ) startup_prog = fluid.default_startup_program().clone(for_test=True) prog = fluid.Program() with fluid.program_guard(prog, startup_prog): with fluid.unique_name.guard(): image_op = fluid.data(name='image', shape=[None, 3, 224, 224], dtype='float32') conv = conv_bn_layer( input=image_op, num_filters=32, filter_size=3, stride=2, act='relu', name='conv1_1') conv = conv_bn_layer( input=conv, num_filters=32, filter_size=3, stride=1, act='relu', name='conv1_2') conv = conv_bn_layer( input=conv, num_filters=64, filter_size=3, stride=1, act='relu', name='conv1_3') extracted_features = conv resized_features = fluid.layers.resize_bilinear(extracted_features, image_op.shape[2:]) gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0)) place = fluid.CUDAPlace(gpu_id) # place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(startup_prog) fluid.io.load_persistables(exe, h_pre_models, prog) images = preprocess_image(data_content) # transpose to [N, 3, H, W], scaled to [0.0, 1.0] result = exe.run(prog, fetch_list=[resized_features], feed={'image': images}) return result[0][0]
[]
[]
[ "FLAGS_selected_gpus", "HOME" ]
[]
["FLAGS_selected_gpus", "HOME"]
python
2
0
students/K33401/Kunal_Shubham/lab2/homework_board-main/manage.py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): """Run administrative tasks.""" os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'homework_board.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[]
[]
[]
[]
[]
python
0
0
config.py
import os class Config: UPLOADED_PHOTOS_DEST ='app/static/photos' SECRET_KEY = 'SECRET' SQLALCHEMY_TRACK_MODIFICATIONS = False # email configurations MAIL_SERVER = 'smtp.googlemail.com' MAIL_PORT = 587 MAIL_USE_TLS = True MAIL_USERNAME = os.environ.get("MAIL_USERNAME") MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD") class ProdConfig(Config): SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL") class DevConfig(Config): SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://manu:39906875@localhost/pitch' DEBUG = True class TestConfig(Config): SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://manu:39906875@localhost/pitch_test' config_options = { 'development':DevConfig, 'production':ProdConfig, 'test':TestConfig }
[]
[]
[ "MAIL_PASSWORD", "DATABASE_URL", "MAIL_USERNAME" ]
[]
["MAIL_PASSWORD", "DATABASE_URL", "MAIL_USERNAME"]
python
3
0
relay/relay_test.go
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package relay import ( "bytes" "context" "fmt" "io/ioutil" "os" "path/filepath" "strconv" "testing" "time" "github.com/DATA-DOG/go-sqlmock" gmysql "github.com/go-mysql-org/go-mysql/mysql" "github.com/go-mysql-org/go-mysql/replication" . "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/parser" "github.com/pingcap/dm/dm/config" "github.com/pingcap/dm/pkg/binlog/event" "github.com/pingcap/dm/pkg/conn" "github.com/pingcap/dm/pkg/gtid" "github.com/pingcap/dm/pkg/log" "github.com/pingcap/dm/pkg/utils" "github.com/pingcap/dm/relay/reader" "github.com/pingcap/dm/relay/retry" "github.com/pingcap/dm/relay/transformer" "github.com/pingcap/dm/relay/writer" ) var _ = Suite(&testRelaySuite{}) func TestSuite(t *testing.T) { TestingT(t) } type testRelaySuite struct{} func (t *testRelaySuite) SetUpSuite(c *C) { c.Assert(log.InitLogger(&log.Config{}), IsNil) } func newRelayCfg(c *C, flavor string) *Config { dbCfg := getDBConfigForTest() return &Config{ EnableGTID: false, // position mode, so auto-positioning can work Flavor: flavor, RelayDir: c.MkDir(), ServerID: 12321, From: config.DBConfig{ Host: dbCfg.Host, Port: dbCfg.Port, User: dbCfg.User, Password: dbCfg.Password, }, ReaderRetry: retry.ReaderRetryConfig{ BackoffRollback: 200 * time.Millisecond, BackoffMax: 1 * time.Second, BackoffMin: 1 * time.Millisecond, BackoffJitter: true, BackoffFactor: 2, }, } } func getDBConfigForTest() config.DBConfig { host := os.Getenv("MYSQL_HOST") if host == "" { host = "127.0.0.1" } port, _ := strconv.Atoi(os.Getenv("MYSQL_PORT")) if port == 0 { port = 3306 } user := os.Getenv("MYSQL_USER") if user == "" { user = "root" } password := os.Getenv("MYSQL_PSWD") return config.DBConfig{ Host: host, Port: port, User: user, Password: password, } } // mockReader is used only for relay testing. type mockReader struct { result reader.Result err error } func (r *mockReader) Start() error { return nil } func (r *mockReader) Close() error { return nil } func (r *mockReader) GetEvent(ctx context.Context) (reader.Result, error) { select { case <-ctx.Done(): return reader.Result{}, ctx.Err() default: } return r.result, r.err } // mockWriter is used only for relay testing. type mockWriter struct { result writer.Result err error latestEvent *replication.BinlogEvent } func (w *mockWriter) Start() error { return nil } func (w *mockWriter) Close() error { return nil } func (w *mockWriter) Recover(ctx context.Context) (writer.RecoverResult, error) { return writer.RecoverResult{}, nil } func (w *mockWriter) WriteEvent(ev *replication.BinlogEvent) (writer.Result, error) { w.latestEvent = ev // hold it return w.result, w.err } func (w *mockWriter) Flush() error { return nil } func (t *testRelaySuite) TestTryRecoverLatestFile(c *C) { var ( uuid = "24ecd093-8cec-11e9-aa0d-0242ac170002" uuidWithSuffix = fmt.Sprintf("%s.000001", uuid) previousGTIDSetStr = "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-14,53bfca22-690d-11e7-8a62-18ded7a37b78:1-495,406a3f61-690d-11e7-87c5-6c92bf46f384:123-456" latestGTIDStr1 = "3ccc475b-2343-11e7-be21-6c0b84d59f30:14" latestGTIDStr2 = "53bfca22-690d-11e7-8a62-18ded7a37b78:495" recoverGTIDSetStr = "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-17,53bfca22-690d-11e7-8a62-18ded7a37b78:1-505,406a3f61-690d-11e7-87c5-6c92bf46f384:1-456" // 406a3f61-690d-11e7-87c5-6c92bf46f384:123-456 --> 406a3f61-690d-11e7-87c5-6c92bf46f384:1-456 greaterGITDSetStr = "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-20,53bfca22-690d-11e7-8a62-18ded7a37b78:1-510,406a3f61-690d-11e7-87c5-6c92bf46f384:123-456" filename = "mysql-bin.000001" startPos = gmysql.Position{Name: filename, Pos: 123} parser2 = parser.New() relayCfg = newRelayCfg(c, gmysql.MySQLFlavor) r = NewRelay(relayCfg).(*Relay) ) c.Assert(failpoint.Enable("github.com/pingcap/dm/pkg/utils/GetGTIDPurged", `return("406a3f61-690d-11e7-87c5-6c92bf46f384:1-122")`), IsNil) //nolint:errcheck defer failpoint.Disable("github.com/pingcap/dm/pkg/utils/GetGTIDPurged") cfg := getDBConfigForTest() conn.InitMockDB(c) db, err := conn.DefaultDBProvider.Apply(cfg) c.Assert(err, IsNil) r.db = db c.Assert(r.Init(context.Background()), IsNil) // purge old relay dir f, err := os.Create(filepath.Join(r.cfg.RelayDir, "old_relay_log")) c.Assert(err, IsNil) f.Close() c.Assert(r.PurgeRelayDir(), IsNil) files, err := ioutil.ReadDir(r.cfg.RelayDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 0) c.Assert(r.meta.Load(), IsNil) // no file specified, no need to recover c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) // save position into meta c.Assert(r.meta.AddDir(uuid, &startPos, nil, 0), IsNil) // relay log file does not exists, no need to recover c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) // use a generator to generate some binlog events previousGTIDSet, err := gtid.ParserGTID(relayCfg.Flavor, previousGTIDSetStr) c.Assert(err, IsNil) latestGTID1, err := gtid.ParserGTID(relayCfg.Flavor, latestGTIDStr1) c.Assert(err, IsNil) latestGTID2, err := gtid.ParserGTID(relayCfg.Flavor, latestGTIDStr2) c.Assert(err, IsNil) g, events, data := genBinlogEventsWithGTIDs(c, relayCfg.Flavor, previousGTIDSet, latestGTID1, latestGTID2) // write events into relay log file err = ioutil.WriteFile(filepath.Join(r.meta.Dir(), filename), data, 0o600) c.Assert(err, IsNil) // all events/transactions are complete, no need to recover c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) // now, we will update position/GTID set in meta to latest location in relay logs lastEvent := events[len(events)-1] pos := startPos pos.Pos = lastEvent.Header.LogPos t.verifyMetadata(c, r, uuidWithSuffix, pos, recoverGTIDSetStr, []string{uuidWithSuffix}) // write some invalid data into the relay log file f, err = os.OpenFile(filepath.Join(r.meta.Dir(), filename), os.O_WRONLY|os.O_APPEND, 0o600) c.Assert(err, IsNil) _, err = f.Write([]byte("invalid event data")) c.Assert(err, IsNil) f.Close() // write a greater GTID sets in meta greaterGITDSet, err := gtid.ParserGTID(relayCfg.Flavor, greaterGITDSetStr) c.Assert(err, IsNil) c.Assert(r.SaveMeta(startPos, greaterGITDSet), IsNil) // invalid data truncated, meta updated c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) _, latestPos := r.meta.Pos() c.Assert(latestPos, DeepEquals, gmysql.Position{Name: filename, Pos: g.LatestPos}) _, latestGTIDs := r.meta.GTID() recoverGTIDSet, err := gtid.ParserGTID(relayCfg.Flavor, recoverGTIDSetStr) c.Assert(err, IsNil) c.Assert(latestGTIDs.Equal(recoverGTIDSet), IsTrue) // verifyMetadata is not enough // no relay log file need to recover c.Assert(r.SaveMeta(minCheckpoint, latestGTIDs), IsNil) c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) _, latestPos = r.meta.Pos() c.Assert(latestPos, DeepEquals, minCheckpoint) _, latestGTIDs = r.meta.GTID() c.Assert(latestGTIDs.Contain(g.LatestGTID), IsTrue) } func (t *testRelaySuite) TestTryRecoverMeta(c *C) { var ( uuid = "24ecd093-8cec-11e9-aa0d-0242ac170002" previousGTIDSetStr = "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-14,53bfca22-690d-11e7-8a62-18ded7a37b78:1-495,406a3f61-690d-11e7-87c5-6c92bf46f384:123-456" latestGTIDStr1 = "3ccc475b-2343-11e7-be21-6c0b84d59f30:14" latestGTIDStr2 = "53bfca22-690d-11e7-8a62-18ded7a37b78:495" // if no @@gtid_purged, 406a3f61-690d-11e7-87c5-6c92bf46f384:123-456 should be not changed recoverGTIDSetStr = "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-17,53bfca22-690d-11e7-8a62-18ded7a37b78:1-505,406a3f61-690d-11e7-87c5-6c92bf46f384:123-456" filename = "mysql-bin.000001" startPos = gmysql.Position{Name: filename, Pos: 123} parser2 = parser.New() relayCfg = newRelayCfg(c, gmysql.MySQLFlavor) r = NewRelay(relayCfg).(*Relay) ) cfg := getDBConfigForTest() conn.InitMockDB(c) db, err := conn.DefaultDBProvider.Apply(cfg) c.Assert(err, IsNil) r.db = db c.Assert(r.Init(context.Background()), IsNil) recoverGTIDSet, err := gtid.ParserGTID(relayCfg.Flavor, recoverGTIDSetStr) c.Assert(err, IsNil) c.Assert(r.meta.AddDir(uuid, &startPos, nil, 0), IsNil) c.Assert(r.meta.Load(), IsNil) // use a generator to generate some binlog events previousGTIDSet, err := gtid.ParserGTID(relayCfg.Flavor, previousGTIDSetStr) c.Assert(err, IsNil) latestGTID1, err := gtid.ParserGTID(relayCfg.Flavor, latestGTIDStr1) c.Assert(err, IsNil) latestGTID2, err := gtid.ParserGTID(relayCfg.Flavor, latestGTIDStr2) c.Assert(err, IsNil) g, _, data := genBinlogEventsWithGTIDs(c, relayCfg.Flavor, previousGTIDSet, latestGTID1, latestGTID2) // write events into relay log file err = ioutil.WriteFile(filepath.Join(r.meta.Dir(), filename), data, 0o600) c.Assert(err, IsNil) // write some invalid data into the relay log file to trigger a recover. f, err := os.OpenFile(filepath.Join(r.meta.Dir(), filename), os.O_WRONLY|os.O_APPEND, 0o600) c.Assert(err, IsNil) _, err = f.Write([]byte("invalid event data")) c.Assert(err, IsNil) f.Close() // recover with empty GTIDs. c.Assert(failpoint.Enable("github.com/pingcap/dm/pkg/utils/GetGTIDPurged", `return("")`), IsNil) //nolint:errcheck defer failpoint.Disable("github.com/pingcap/dm/pkg/utils/GetGTIDPurged") c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) _, latestPos := r.meta.Pos() c.Assert(latestPos, DeepEquals, gmysql.Position{Name: filename, Pos: g.LatestPos}) _, latestGTIDs := r.meta.GTID() c.Assert(latestGTIDs.Equal(recoverGTIDSet), IsTrue) // write some invalid data into the relay log file again. f, err = os.OpenFile(filepath.Join(r.meta.Dir(), filename), os.O_WRONLY|os.O_APPEND, 0o600) c.Assert(err, IsNil) _, err = f.Write([]byte("invalid event data")) c.Assert(err, IsNil) f.Close() // recover with the subset of GTIDs (previous GTID set). c.Assert(r.SaveMeta(startPos, previousGTIDSet), IsNil) c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) _, latestPos = r.meta.Pos() c.Assert(latestPos, DeepEquals, gmysql.Position{Name: filename, Pos: g.LatestPos}) _, latestGTIDs = r.meta.GTID() c.Assert(latestGTIDs.Equal(recoverGTIDSet), IsTrue) } // genBinlogEventsWithGTIDs generates some binlog events used by testFileUtilSuite and testFileWriterSuite. // now, its generated events including 3 DDL and 10 DML. func genBinlogEventsWithGTIDs(c *C, flavor string, previousGTIDSet, latestGTID1, latestGTID2 gtid.Set) (*event.Generator, []*replication.BinlogEvent, []byte) { var ( serverID uint32 = 11 latestPos uint32 latestXID uint64 = 10 allEvents = make([]*replication.BinlogEvent, 0, 50) allData bytes.Buffer ) // use a binlog event generator to generate some binlog events. g, err := event.NewGenerator(flavor, serverID, latestPos, latestGTID1, previousGTIDSet, latestXID) c.Assert(err, IsNil) // file header with FormatDescriptionEvent and PreviousGTIDsEvent events, data, err := g.GenFileHeader() c.Assert(err, IsNil) allEvents = append(allEvents, events...) allData.Write(data) // CREATE DATABASE/TABLE, 3 DDL queries := []string{ "CREATE DATABASE `db`", "CREATE TABLE `db`.`tbl1` (c1 INT)", "CREATE TABLE `db`.`tbl2` (c1 INT)", } for _, query := range queries { events, data, err = g.GenDDLEvents("db", query) c.Assert(err, IsNil) allEvents = append(allEvents, events...) allData.Write(data) } // DMLs, 10 DML g.LatestGTID = latestGTID2 // use another latest GTID with different SID/DomainID var ( tableID uint64 = 8 columnType = []byte{gmysql.MYSQL_TYPE_LONG} eventType = replication.WRITE_ROWS_EVENTv2 schema = "db" table = "tbl1" ) for i := 0; i < 10; i++ { insertRows := make([][]interface{}, 0, 1) insertRows = append(insertRows, []interface{}{int32(i)}) dmlData := []*event.DMLData{ { TableID: tableID, Schema: schema, Table: table, ColumnType: columnType, Rows: insertRows, }, } events, data, err = g.GenDMLEvents(eventType, dmlData) c.Assert(err, IsNil) allEvents = append(allEvents, events...) allData.Write(data) } return g, allEvents, allData.Bytes() } func (t *testRelaySuite) TestHandleEvent(c *C) { // NOTE: we can test metrics later. var ( reader2 = &mockReader{} transformer2 = transformer.NewTransformer(parser.New()) writer2 = &mockWriter{} relayCfg = newRelayCfg(c, gmysql.MariaDBFlavor) r = NewRelay(relayCfg).(*Relay) eventHeader = &replication.EventHeader{ Timestamp: uint32(time.Now().Unix()), ServerID: 11, } binlogPos = gmysql.Position{Name: "mysql-bin.666888", Pos: 4} rotateEv, _ = event.GenRotateEvent(eventHeader, 123, []byte(binlogPos.Name), uint64(binlogPos.Pos)) queryEv, _ = event.GenQueryEvent(eventHeader, 123, 0, 0, 0, nil, nil, []byte("CREATE DATABASE db_relay_test")) ) cfg := getDBConfigForTest() conn.InitMockDB(c) db, err := conn.DefaultDBProvider.Apply(cfg) c.Assert(err, IsNil) r.db = db c.Assert(r.Init(context.Background()), IsNil) // NOTE: we can mock meta later. c.Assert(r.meta.Load(), IsNil) c.Assert(r.meta.AddDir("24ecd093-8cec-11e9-aa0d-0242ac170002", nil, nil, 0), IsNil) // attach GTID sets to QueryEv queryEv2 := queryEv.Event.(*replication.QueryEvent) queryEv2.GSet, _ = gmysql.ParseGTIDSet(relayCfg.Flavor, "1-2-3") ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel() // reader return with an error for _, reader2.err = range []error{ errors.New("reader error for testing"), replication.ErrChecksumMismatch, replication.ErrSyncClosed, replication.ErrNeedSyncAgain, } { _, handleErr := r.handleEvents(ctx, reader2, transformer2, writer2) c.Assert(errors.Cause(handleErr), Equals, reader2.err) } // reader return valid event reader2.err = nil reader2.result.Event = rotateEv // writer return error writer2.err = errors.New("writer error for testing") // return with the annotated writer error _, err = r.handleEvents(ctx, reader2, transformer2, writer2) c.Assert(errors.Cause(err), Equals, writer2.err) // after handle rotate event, we save and flush the meta immediately c.Assert(r.meta.Dirty(), Equals, false) // writer without error writer2.err = nil _, err = r.handleEvents(ctx, reader2, transformer2, writer2) // returned when ctx timeout c.Assert(errors.Cause(err), Equals, ctx.Err()) // check written event c.Assert(writer2.latestEvent, Equals, reader2.result.Event) // check meta _, pos := r.meta.Pos() _, gs := r.meta.GTID() c.Assert(pos, DeepEquals, binlogPos) c.Assert(gs.String(), Equals, "") // no GTID sets in event yet ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel2() // write a QueryEvent with GTID sets reader2.result.Event = queryEv _, err = r.handleEvents(ctx2, reader2, transformer2, writer2) c.Assert(errors.Cause(err), Equals, ctx.Err()) // check written event c.Assert(writer2.latestEvent, Equals, reader2.result.Event) // check meta _, pos = r.meta.Pos() _, gs = r.meta.GTID() c.Assert(pos.Name, Equals, binlogPos.Name) c.Assert(pos.Pos, Equals, queryEv.Header.LogPos) c.Assert(gs.Origin(), DeepEquals, queryEv2.GSet) // got GTID sets // transformer return ignorable for the event reader2.err = nil reader2.result.Event = &replication.BinlogEvent{ Header: &replication.EventHeader{EventType: replication.HEARTBEAT_EVENT}, Event: &replication.GenericEvent{}, } ctx4, cancel4 := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel4() _, err = r.handleEvents(ctx4, reader2, transformer2, writer2) c.Assert(errors.Cause(err), Equals, ctx.Err()) select { case <-ctx4.Done(): default: c.Fatalf("ignorable event for transformer not ignored") } // writer return ignorable for the event reader2.result.Event = queryEv writer2.result.Ignore = true ctx5, cancel5 := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel5() _, err = r.handleEvents(ctx5, reader2, transformer2, writer2) c.Assert(errors.Cause(err), Equals, ctx.Err()) select { case <-ctx5.Done(): default: c.Fatalf("ignorable event for writer not ignored") } } func (t *testRelaySuite) TestReSetupMeta(c *C) { ctx, cancel := context.WithTimeout(context.Background(), utils.DefaultDBTimeout) defer cancel() var ( relayCfg = newRelayCfg(c, gmysql.MySQLFlavor) r = NewRelay(relayCfg).(*Relay) ) cfg := getDBConfigForTest() mockDB := conn.InitMockDB(c) db, err := conn.DefaultDBProvider.Apply(cfg) c.Assert(err, IsNil) r.db = db c.Assert(r.Init(context.Background()), IsNil) // empty metadata c.Assert(r.meta.Load(), IsNil) t.verifyMetadata(c, r, "", minCheckpoint, "", nil) // open connected DB and get its UUID defer func() { r.db.Close() r.db = nil }() mockGetServerUUID(mockDB) uuid, err := utils.GetServerUUID(ctx, r.db.DB, r.cfg.Flavor) c.Assert(err, IsNil) // re-setup meta with start pos adjusted r.cfg.EnableGTID = true r.cfg.BinlogGTID = "24ecd093-8cec-11e9-aa0d-0242ac170002:1-23" r.cfg.BinLogName = "mysql-bin.000005" c.Assert(r.setSyncConfig(), IsNil) // all adjusted gset should be empty since we didn't flush logs emptyGTID, err := gtid.ParserGTID(r.cfg.Flavor, "") c.Assert(err, IsNil) mockGetServerUUID(mockDB) mockGetRandomServerID(mockDB) // mock AddGSetWithPurged mockDB.ExpectQuery("select @@GLOBAL.gtid_purged").WillReturnRows(sqlmock.NewRows([]string{"@@GLOBAL.gtid_purged"}).AddRow("")) c.Assert(failpoint.Enable("github.com/pingcap/dm/pkg/binlog/reader/MockGetEmptyPreviousGTIDFromGTIDSet", "return()"), IsNil) //nolint:errcheck defer failpoint.Disable("github.com/pingcap/dm/pkg/binlog/reader/MockGetEmptyPreviousGTIDFromGTIDSet") c.Assert(r.reSetupMeta(ctx), IsNil) uuid001 := fmt.Sprintf("%s.000001", uuid) t.verifyMetadata(c, r, uuid001, gmysql.Position{Name: r.cfg.BinLogName, Pos: 4}, emptyGTID.String(), []string{uuid001}) // re-setup meta again, often happen when connecting a server behind a VIP. mockGetServerUUID(mockDB) mockGetRandomServerID(mockDB) mockDB.ExpectQuery("select @@GLOBAL.gtid_purged").WillReturnRows(sqlmock.NewRows([]string{"@@GLOBAL.gtid_purged"}).AddRow("")) c.Assert(r.reSetupMeta(ctx), IsNil) uuid002 := fmt.Sprintf("%s.000002", uuid) t.verifyMetadata(c, r, uuid002, minCheckpoint, emptyGTID.String(), []string{uuid001, uuid002}) r.cfg.BinLogName = "mysql-bin.000002" r.cfg.BinlogGTID = "24ecd093-8cec-11e9-aa0d-0242ac170002:1-50,24ecd093-8cec-11e9-aa0d-0242ac170003:1-50" r.cfg.UUIDSuffix = 2 mockGetServerUUID(mockDB) mockGetRandomServerID(mockDB) mockDB.ExpectQuery("select @@GLOBAL.gtid_purged").WillReturnRows(sqlmock.NewRows([]string{"@@GLOBAL.gtid_purged"}).AddRow("")) c.Assert(r.reSetupMeta(ctx), IsNil) t.verifyMetadata(c, r, uuid002, gmysql.Position{Name: r.cfg.BinLogName, Pos: 4}, emptyGTID.String(), []string{uuid002}) // re-setup meta again, often happen when connecting a server behind a VIP. mockGetServerUUID(mockDB) mockGetRandomServerID(mockDB) mockDB.ExpectQuery("select @@GLOBAL.gtid_purged").WillReturnRows(sqlmock.NewRows([]string{"@@GLOBAL.gtid_purged"}).AddRow("")) c.Assert(r.reSetupMeta(ctx), IsNil) uuid003 := fmt.Sprintf("%s.000003", uuid) t.verifyMetadata(c, r, uuid003, minCheckpoint, emptyGTID.String(), []string{uuid002, uuid003}) c.Assert(mockDB.ExpectationsWereMet(), IsNil) } func (t *testRelaySuite) verifyMetadata(c *C, r *Relay, uuidExpected string, posExpected gmysql.Position, gsStrExpected string, uuidsExpected []string) { uuid, pos := r.meta.Pos() _, gs := r.meta.GTID() gsExpected, err := gtid.ParserGTID(gmysql.MySQLFlavor, gsStrExpected) c.Assert(err, IsNil) c.Assert(uuid, Equals, uuidExpected) c.Assert(pos, DeepEquals, posExpected) c.Assert(gs.Equal(gsExpected), IsTrue) indexFile := filepath.Join(r.cfg.RelayDir, utils.UUIDIndexFilename) UUIDs, err := utils.ParseUUIDIndex(indexFile) c.Assert(err, IsNil) c.Assert(UUIDs, DeepEquals, uuidsExpected) }
[ "\"MYSQL_HOST\"", "\"MYSQL_PORT\"", "\"MYSQL_USER\"", "\"MYSQL_PSWD\"" ]
[]
[ "MYSQL_PORT", "MYSQL_USER", "MYSQL_PSWD", "MYSQL_HOST" ]
[]
["MYSQL_PORT", "MYSQL_USER", "MYSQL_PSWD", "MYSQL_HOST"]
go
4
0
examples/IntradayBarExample.py
# IntradayBarExample.py from __future__ import print_function from __future__ import absolute_import import copy import datetime from optparse import OptionParser, Option, OptionValueError import os import platform as plat import sys if sys.version_info >= (3, 8) and plat.system().lower() == "windows": # pylint: disable=no-member with os.add_dll_directory(os.getenv('BLPAPI_LIBDIR')): import blpapi else: import blpapi BAR_DATA = blpapi.Name("barData") BAR_TICK_DATA = blpapi.Name("barTickData") OPEN = blpapi.Name("open") HIGH = blpapi.Name("high") LOW = blpapi.Name("low") CLOSE = blpapi.Name("close") VOLUME = blpapi.Name("volume") NUM_EVENTS = blpapi.Name("numEvents") TIME = blpapi.Name("time") RESPONSE_ERROR = blpapi.Name("responseError") SESSION_TERMINATED = blpapi.Name("SessionTerminated") CATEGORY = blpapi.Name("category") MESSAGE = blpapi.Name("message") def checkDateTime(_option, opt, value): try: return datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S") except ValueError as ex: raise OptionValueError( "option {0}: invalid datetime value: {1} ({2})".format( opt, value, ex)) class ExampleOption(Option): TYPES = Option.TYPES + ("datetime",) TYPE_CHECKER = copy.copy(Option.TYPE_CHECKER) TYPE_CHECKER["datetime"] = checkDateTime def parseCmdLine(): parser = OptionParser(description="Retrieve intraday bars.", epilog="Notes: " + "1) All times are in GMT. " + "2) Only one security can be specified. " + "3) Only one event can be specified.", option_class=ExampleOption) parser.add_option("-a", "--ip", dest="host", help="server name or IP (default: %default)", metavar="ipAddress", default="localhost") parser.add_option("-p", dest="port", type="int", help="server port (default: %default)", metavar="tcpPort", default=8194) parser.add_option("-s", dest="security", help="security (default: %default)", metavar="security", default="IBM US Equity") parser.add_option("-e", dest="event", help="event (default: %default)", metavar="event", default="TRADE") parser.add_option("-b", dest="barInterval", type="int", help="bar interval (default: %default)", metavar="barInterval", default=60) parser.add_option("--sd", dest="startDateTime", type="datetime", help="start date/time (default: %default)", metavar="startDateTime", default=datetime.datetime(2008, 8, 11, 15, 30, 0)) parser.add_option("--ed", dest="endDateTime", type="datetime", help="end date/time (default: %default)", metavar="endDateTime", default=datetime.datetime(2008, 8, 11, 15, 35, 0)) parser.add_option("-g", dest="gapFillInitialBar", help="gapFillInitialBar", action="store_true", default=False) options,_ = parser.parse_args() return options def printErrorInfo(leadingStr, errorInfo): print("%s%s (%s)" % (leadingStr, errorInfo.getElementAsString(CATEGORY), errorInfo.getElementAsString(MESSAGE))) def processMessage(msg): data = msg.getElement(BAR_DATA).getElement(BAR_TICK_DATA) print("Datetime\t\tOpen\t\tHigh\t\tLow\t\tClose\t\tNumEvents\tVolume") for bar in data.values(): time = bar.getElementAsDatetime(TIME) open_ = bar.getElementAsFloat(OPEN) high = bar.getElementAsFloat(HIGH) low = bar.getElementAsFloat(LOW) close = bar.getElementAsFloat(CLOSE) numEvents = bar.getElementAsInteger(NUM_EVENTS) volume = bar.getElementAsInteger(VOLUME) print("%s\t\t%.3f\t\t%.3f\t\t%.3f\t\t%.3f\t\t%d\t\t%d" % \ (time.strftime("%m/%d/%y %H:%M"), open_, high, low, close, numEvents, volume)) def processResponseEvent(event): for msg in event: print(msg) if msg.hasElement(RESPONSE_ERROR): printErrorInfo("REQUEST FAILED: ", msg.getElement(RESPONSE_ERROR)) continue processMessage(msg) def sendIntradayBarRequest(session, options): refDataService = session.getService("//blp/refdata") request = refDataService.createRequest("IntradayBarRequest") # only one security/eventType per request request.set("security", options.security) request.set("eventType", options.event) request.set("interval", options.barInterval) # All times are in GMT if not options.startDateTime or not options.endDateTime: tradedOn = getPreviousTradingDate() if tradedOn: startTime = datetime.datetime.combine(tradedOn, datetime.time(15, 30)) request.set("startDateTime", startTime) endTime = datetime.datetime.combine(tradedOn, datetime.time(15, 35)) request.set("endDateTime", endTime) else: if options.startDateTime and options.endDateTime: request.set("startDateTime", options.startDateTime) request.set("endDateTime", options.endDateTime) if options.gapFillInitialBar: request.set("gapFillInitialBar", True) print("Sending Request:", request) session.sendRequest(request) def eventLoop(session): done = False while not done: # nextEvent() method below is called with a timeout to let # the program catch Ctrl-C between arrivals of new events event = session.nextEvent(500) if event.eventType() == blpapi.Event.PARTIAL_RESPONSE: print("Processing Partial Response") processResponseEvent(event) elif event.eventType() == blpapi.Event.RESPONSE: print("Processing Response") processResponseEvent(event) done = True else: for msg in event: if event.eventType() == blpapi.Event.SESSION_STATUS: if msg.messageType() == SESSION_TERMINATED: done = True def getPreviousTradingDate(): tradedOn = datetime.date.today() while True: try: tradedOn -= datetime.timedelta(days=1) except OverflowError: return None if tradedOn.weekday() not in [5, 6]: return tradedOn def main(): options = parseCmdLine() # Fill SessionOptions sessionOptions = blpapi.SessionOptions() sessionOptions.setServerHost(options.host) sessionOptions.setServerPort(options.port) print("Connecting to %s:%s" % (options.host, options.port)) # Create a Session session = blpapi.Session(sessionOptions) # Start a Session if not session.start(): print("Failed to start session.") return try: # Open service to get historical data from if not session.openService("//blp/refdata"): print("Failed to open //blp/refdata") return sendIntradayBarRequest(session, options) # wait for events from session. eventLoop(session) finally: # Stop the session session.stop() if __name__ == "__main__": print("IntradayBarExample") try: main() except KeyboardInterrupt: print("Ctrl+C pressed. Stopping...") __copyright__ = """ Copyright 2012. Bloomberg Finance L.P. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """
[]
[]
[ "BLPAPI_LIBDIR" ]
[]
["BLPAPI_LIBDIR"]
python
1
0
pkg/integrations/mysqld_exporter/mysqld-exporter.go
// Package mysqld_exporter embeds https://github.com/prometheus/mysqld_exporter package mysqld_exporter //nolint:golint import ( "context" "fmt" "os" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/grafana/agent/pkg/integrations" "github.com/grafana/agent/pkg/integrations/config" "github.com/prometheus/mysqld_exporter/collector" ) var DefaultConfig = Config{ LockWaitTimeout: 2, InfoSchemaProcessListProcessesByUser: true, InfoSchemaProcessListProcessesByHost: true, InfoSchemaTablesDatabases: "*", PerfSchemaEventsStatementsLimit: 250, PerfSchemaEventsStatementsTimeLimit: 86400, PerfSchemaEventsStatementsTextLimit: 120, PerfSchemaFileInstancesFilter: ".*", PerfSchemaFileInstancesRemovePrefix: "/var/lib/mysql", HeartbeatDatabase: "heartbeat", HeartbeatTable: "heartbeat", } // Config controls the mysqld_exporter integration. type Config struct { Common config.Common `yaml:",inline"` // DataSourceName to use to connect to MySQL. DataSourceName string `yaml:"data_source_name"` // Collectors to mark as enabled in addition to the default. EnableCollectors []string `yaml:"enable_collectors"` // Collectors to explicitly mark as disabled. DisableCollectors []string `yaml:"disable_collectors"` // Overrides the default set of enabled collectors with the given list. SetCollectors []string `yaml:"set_collectors"` // Collector-wide options LockWaitTimeout int `yaml:"lock_wait_timeout"` LogSlowFilter bool `yaml:"log_slow_filter"` // Collector-specific config options InfoSchemaProcessListMinTime int `yaml:"info_schema_processlist_min_time"` InfoSchemaProcessListProcessesByUser bool `yaml:"info_schema_processlist_processes_by_user"` InfoSchemaProcessListProcessesByHost bool `yaml:"info_schema_processlist_processes_by_host"` InfoSchemaTablesDatabases string `yaml:"info_schema_tables_databases"` PerfSchemaEventsStatementsLimit int `yaml:"perf_schema_eventsstatements_limit"` PerfSchemaEventsStatementsTimeLimit int `yaml:"perf_schema_eventsstatements_time_limit"` PerfSchemaEventsStatementsTextLimit int `yaml:"perf_schema_eventsstatements_digtext_text_limit"` PerfSchemaFileInstancesFilter string `yaml:"perf_schema_file_instances_filter"` PerfSchemaFileInstancesRemovePrefix string `yaml:"perf_schema_file_instances_remove_prefix"` HeartbeatDatabase string `yaml:"heartbeat_database"` HeartbeatTable string `yaml:"heartbeat_table"` HeartbeatUTC bool `yaml:"heartbeat_utc"` MySQLUserPrivileges bool `yaml:"mysql_user_privileges"` } // UnmarshalYAML implements yaml.Unmarshaler for Config. func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { *c = DefaultConfig type plain Config return unmarshal((*plain)(c)) } func (c *Config) Name() string { return "mysqld_exporter" } func (c *Config) CommonConfig() config.Common { return c.Common } func (c *Config) NewIntegration(l log.Logger) (integrations.Integration, error) { return New(l, c) } func init() { integrations.RegisterIntegration(&Config{}) } // New creates a new mysqld_exporter integration. The integration scrapes // metrics from a mysqld process. func New(log log.Logger, c *Config) (integrations.Integration, error) { dsn := c.DataSourceName if len(dsn) == 0 { dsn = os.Getenv("MYSQLD_EXPORTER_DATA_SOURCE_NAME") } if len(dsn) == 0 { return nil, fmt.Errorf("cannot create mysqld_exporter; neither mysqld_exporter.data_source_name or $MYSQLD_EXPORTER_DATA_SOURCE_NAME is set") } scrapers := GetScrapers(c) exporter := collector.New(context.Background(), dsn, collector.NewMetrics(), scrapers, log, collector.Config{ LockTimeout: c.LockWaitTimeout, SlowLogFilter: c.LogSlowFilter, }) level.Debug(log).Log("msg", "enabled mysqld_exporter scrapers") for _, scraper := range scrapers { level.Debug(log).Log("scraper", scraper.Name()) } return integrations.NewCollectorIntegration( c.Name(), integrations.WithCollectors(exporter), ), nil } // GetScrapers returns the set of *enabled* scrapers from the config. // Configurable scrapers will have their configuration filled out matching the // Config's settings. func GetScrapers(c *Config) []collector.Scraper { scrapers := map[collector.Scraper]bool{ &collector.ScrapeAutoIncrementColumns{}: false, &collector.ScrapeBinlogSize{}: false, &collector.ScrapeClientStat{}: false, &collector.ScrapeEngineInnodbStatus{}: false, &collector.ScrapeEngineTokudbStatus{}: false, &collector.ScrapeGlobalStatus{}: true, &collector.ScrapeGlobalVariables{}: true, &collector.ScrapeInfoSchemaInnodbTablespaces{}: false, &collector.ScrapeInnodbCmpMem{}: true, &collector.ScrapeInnodbCmp{}: true, &collector.ScrapeInnodbMetrics{}: false, &collector.ScrapePerfEventsStatementsSum{}: false, &collector.ScrapePerfEventsWaits{}: false, &collector.ScrapePerfFileEvents{}: false, &collector.ScrapePerfIndexIOWaits{}: false, &collector.ScrapePerfReplicationApplierStatsByWorker{}: false, &collector.ScrapePerfReplicationGroupMemberStats{}: false, &collector.ScrapePerfReplicationGroupMembers{}: false, &collector.ScrapePerfTableIOWaits{}: false, &collector.ScrapePerfTableLockWaits{}: false, &collector.ScrapeQueryResponseTime{}: true, &collector.ScrapeReplicaHost{}: false, &collector.ScrapeSchemaStat{}: false, &collector.ScrapeSlaveHosts{}: false, &collector.ScrapeSlaveStatus{}: true, &collector.ScrapeTableStat{}: false, &collector.ScrapeUserStat{}: false, // Collectors that have configuration &collector.ScrapeHeartbeat{ Database: c.HeartbeatDatabase, Table: c.HeartbeatTable, UTC: c.HeartbeatUTC, }: false, &collector.ScrapePerfEventsStatements{ Limit: c.PerfSchemaEventsStatementsLimit, TimeLimit: c.PerfSchemaEventsStatementsTimeLimit, DigestTextLimit: c.PerfSchemaEventsStatementsTextLimit, }: false, &collector.ScrapePerfFileInstances{ Filter: c.PerfSchemaFileInstancesFilter, RemovePrefix: c.PerfSchemaFileInstancesRemovePrefix, }: false, &collector.ScrapeProcesslist{ ProcessListMinTime: c.InfoSchemaProcessListMinTime, ProcessesByHostFlag: c.InfoSchemaProcessListProcessesByHost, ProcessesByUserFlag: c.InfoSchemaProcessListProcessesByUser, }: false, &collector.ScrapeTableSchema{ Databases: c.InfoSchemaTablesDatabases, }: false, &collector.ScrapeUser{ Privileges: c.MySQLUserPrivileges, }: false, } // Override the defaults with the provided set of collectors if // set_collectors has at least one element in it. if len(c.SetCollectors) != 0 { customDefaults := map[string]struct{}{} for _, c := range c.SetCollectors { customDefaults[c] = struct{}{} } for scraper := range scrapers { _, enable := customDefaults[scraper.Name()] scrapers[scraper] = enable } } // Explicitly disable/enable specific collectors. for _, c := range c.DisableCollectors { for scraper := range scrapers { if scraper.Name() == c { scrapers[scraper] = false break } } } for _, c := range c.EnableCollectors { for scraper := range scrapers { if scraper.Name() == c { scrapers[scraper] = true break } } } enabledScrapers := []collector.Scraper{} for scraper, enabled := range scrapers { if enabled { enabledScrapers = append(enabledScrapers, scraper) } } return enabledScrapers }
[ "\"MYSQLD_EXPORTER_DATA_SOURCE_NAME\"" ]
[]
[ "MYSQLD_EXPORTER_DATA_SOURCE_NAME" ]
[]
["MYSQLD_EXPORTER_DATA_SOURCE_NAME"]
go
1
0
cmd/argo/commands/common.go
package commands import ( "fmt" "os" "strconv" "strings" "github.com/spf13/cobra" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" ) // Global variables var ( jobStatusIconMap map[wfv1.NodePhase]string nodeTypeIconMap map[wfv1.NodeType]string noColor bool ) func init() { cobra.OnInitialize(initializeSession) } // ANSI escape codes const ( escape = "\x1b" noFormat = 0 Bold = 1 FgBlack = 30 FgRed = 31 FgGreen = 32 FgYellow = 33 FgBlue = 34 FgMagenta = 35 FgCyan = 36 FgWhite = 37 FgDefault = 39 ) // Default status for printWorkflow const DefaultStatus = "" func initializeSession() { jobStatusIconMap = map[wfv1.NodePhase]string{ wfv1.NodePending: ansiFormat("◷", FgYellow), wfv1.NodeRunning: ansiFormat("●", FgCyan), wfv1.NodeSucceeded: ansiFormat("✔", FgGreen), wfv1.NodeSkipped: ansiFormat("○", FgDefault), wfv1.NodeFailed: ansiFormat("✖", FgRed), wfv1.NodeError: ansiFormat("⚠", FgRed), } nodeTypeIconMap = map[wfv1.NodeType]string{ wfv1.NodeTypeSuspend: ansiFormat("ǁ", FgCyan), } } func ansiColorCode(s string) int { i := 0 for _, c := range s { i += int(c) } colors := []int{FgRed, FgGreen, FgYellow, FgBlue, FgMagenta, FgCyan, FgWhite} return colors[i%len(colors)] } // ansiFormat wraps ANSI escape codes to a string to format the string to a desired color. // NOTE: we still apply formatting even if there is no color formatting desired. // The purpose of doing this is because when we apply ANSI color escape sequences to our // output, this confuses the tabwriter library which miscalculates widths of columns and // misaligns columns. By always applying a ANSI escape sequence (even when we don't want // color, it provides more consistent string lengths so that tabwriter can calculate // widths correctly. func ansiFormat(s string, codes ...int) string { if noColor || os.Getenv("TERM") == "dumb" || len(codes) == 0 { return s } codeStrs := make([]string, len(codes)) for i, code := range codes { codeStrs[i] = strconv.Itoa(code) } sequence := strings.Join(codeStrs, ";") return fmt.Sprintf("%s[%sm%s%s[%dm", escape, sequence, s, escape, noFormat) }
[ "\"TERM\"" ]
[]
[ "TERM" ]
[]
["TERM"]
go
1
0
vendor/github.com/containers/storage/drivers/overlay/overlay.go
// +build linux package overlay import ( "bytes" "fmt" "io" "io/ioutil" "os" "os/exec" "path" "path/filepath" "strconv" "strings" "sync" "syscall" graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/drivers/overlayutils" "github.com/containers/storage/drivers/quota" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/fsutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/locker" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/ostree" "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/system" units "github.com/docker/go-units" rsystem "github.com/opencontainers/runc/libcontainer/system" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( // untar defines the untar method untar = chrootarchive.UntarUncompressed ) // This backend uses the overlay union filesystem for containers // with diff directories for each layer. // This version of the overlay driver requires at least kernel // 4.0.0 in order to support mounting multiple diff directories. // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers // below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost // layers. The overlay itself is mounted in the "merged" directory, // and the "work" dir is needed for overlay to work. // The "link" file for each layer contains a unique string for the layer. // Under the "l" directory at the root there will be a symbolic link // with that unique string pointing the "diff" directory for the layer. // The symbolic links are used to reference lower layers in the "lower" // file and on mount. The links are used to shorten the total length // of a layer reference without requiring changes to the layer identifier // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount // syscall. A hard upper limit of 128 lower layers is enforced to ensure // that mounts do not fail due to length. const ( linkDir = "l" lowerFile = "lower" maxDepth = 128 // idLength represents the number of random characters // which can be used to create the unique link identifer // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation // is true (512 is a buffer for label metadata). // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) idLength = 26 ) type overlayOptions struct { imageStores []string quota quota.Quota mountProgram string ostreeRepo string skipMountHome bool mountOptions string ignoreChownErrors bool } // Driver contains information about the home directory and the list of active mounts that are created using this driver. type Driver struct { name string home string runhome string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter quotaCtl *quota.Control options overlayOptions naiveDiff graphdriver.DiffDriver supportsDType bool usingMetacopy bool locker *locker.Locker convert map[string]bool } var ( backingFs = "<unknown>" projectQuotaSupported = false useNaiveDiffLock sync.Once useNaiveDiffOnly bool ) func init() { graphdriver.Register("overlay", Init) graphdriver.Register("overlay2", Init) } // Init returns the a native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error. // If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned. func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { opts, err := parseOptions(options.DriverOptions) if err != nil { return nil, err } fsMagic, err := graphdriver.GetFSMagic(home) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs if opts.mountProgram == "" { switch fsMagic { case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: logrus.Errorf("'overlay' is not supported over %s", backingFs) return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s", backingFs) } } rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) if err != nil { return nil, err } // Create the driver home dir if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { return nil, err } runhome := filepath.Join(options.RunRoot, filepath.Base(home)) if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { return nil, err } var usingMetacopy bool var supportsDType bool if opts.mountProgram != "" { supportsDType = true } else { feature := "overlay" overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature) if err == nil { if overlayCacheResult { logrus.Debugf("cached value indicated that overlay is supported") } else { logrus.Debugf("cached value indicated that overlay is not supported") } supportsDType = overlayCacheResult if !supportsDType { return nil, errors.New(overlayCacheText) } } else { supportsDType, err = supportsOverlay(home, fsMagic, rootUID, rootGID) if err != nil { os.Remove(filepath.Join(home, linkDir)) os.Remove(home) patherr, ok := err.(*os.PathError) if ok && patherr.Err == syscall.ENOSPC { return nil, err } err = errors.Wrap(err, "kernel does not support overlay fs") if err2 := cachedFeatureRecord(runhome, feature, false, err.Error()); err2 != nil { return nil, errors.Wrapf(err2, "error recording overlay not being supported (%v)", err) } return nil, err } if err = cachedFeatureRecord(runhome, feature, supportsDType, ""); err != nil { return nil, errors.Wrap(err, "error recording overlay support status") } } feature = fmt.Sprintf("metacopy(%s)", opts.mountOptions) metacopyCacheResult, _, err := cachedFeatureCheck(runhome, feature) if err == nil { if metacopyCacheResult { logrus.Debugf("cached value indicated that metacopy is being used") } else { logrus.Debugf("cached value indicated that metacopy is not being used") } usingMetacopy = metacopyCacheResult } else { usingMetacopy, err = doesMetacopy(home, opts.mountOptions) if err == nil { if usingMetacopy { logrus.Debugf("overlay test mount indicated that metacopy is being used") } else { logrus.Debugf("overlay test mount indicated that metacopy is not being used") } if err = cachedFeatureRecord(runhome, feature, usingMetacopy, ""); err != nil { return nil, errors.Wrap(err, "error recording metacopy-being-used status") } } else { logrus.Warnf("overlay test mount did not indicate whether or not metacopy is being used: %v", err) return nil, err } } } if !opts.skipMountHome { if err := mount.MakePrivate(home); err != nil { return nil, err } } if opts.ostreeRepo != "" { if err := ostree.CreateOSTreeRepository(opts.ostreeRepo, rootUID, rootGID); err != nil { return nil, err } } d := &Driver{ name: "overlay", home: home, runhome: runhome, uidMaps: options.UIDMaps, gidMaps: options.GIDMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, usingMetacopy: usingMetacopy, locker: locker.New(), options: *opts, convert: make(map[string]bool), } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)) if backingFs == "xfs" { // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true } else if opts.quota.Size > 0 { return nil, fmt.Errorf("Storage option overlay.size not supported. Filesystem does not support Project Quota: %v", err) } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay.size is used. return nil, fmt.Errorf("Storage option overlay.size only supported for backingFS XFS. Found %v", backingFs) } logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v, usingMetacopy=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff(), d.usingMetacopy) return d, nil } func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case ".override_kernel_check", "overlay.override_kernel_check", "overlay2.override_kernel_check": logrus.Warnf("overlay: override_kernel_check option was specified, but is no longer necessary") case ".mountopt", "overlay.mountopt", "overlay2.mountopt": o.mountOptions = val case ".size", "overlay.size", "overlay2.size": logrus.Debugf("overlay: size=%s", val) size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) case ".imagestore", "overlay.imagestore", "overlay2.imagestore": logrus.Debugf("overlay: imagestore=%s", val) // Additional read only image stores to use for lower paths for _, store := range strings.Split(val, ",") { store = filepath.Clean(store) if !filepath.IsAbs(store) { return nil, fmt.Errorf("overlay: image path %q is not absolute. Can not be relative", store) } st, err := os.Stat(store) if err != nil { return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %v", store, err) } if !st.IsDir() { return nil, fmt.Errorf("overlay: image path %q must be a directory", store) } o.imageStores = append(o.imageStores, store) } case ".mount_program", "overlay.mount_program", "overlay2.mount_program": logrus.Debugf("overlay: mount_program=%s", val) _, err := os.Stat(val) if err != nil { return nil, fmt.Errorf("overlay: can't stat program %s: %v", val, err) } o.mountProgram = val case "overlay2.ostree_repo", "overlay.ostree_repo", ".ostree_repo": logrus.Debugf("overlay: ostree_repo=%s", val) if !ostree.OstreeSupport() { return nil, fmt.Errorf("overlay: ostree_repo specified but support for ostree is missing") } o.ostreeRepo = val case ".ignore_chown_errors", "overlay2.ignore_chown_errors", "overlay.ignore_chown_errors": logrus.Debugf("overlay: ignore_chown_errors=%s", val) o.ignoreChownErrors, err = strconv.ParseBool(val) if err != nil { return nil, err } case "overlay2.skip_mount_home", "overlay.skip_mount_home", ".skip_mount_home": logrus.Debugf("overlay: skip_mount_home=%s", val) o.skipMountHome, err = strconv.ParseBool(val) if err != nil { return nil, err } default: return nil, fmt.Errorf("overlay: Unknown option %s", key) } } return o, nil } func cachedFeatureSet(feature string, set bool) string { if set { return fmt.Sprintf("%s-true", feature) } return fmt.Sprintf("%s-false", feature) } func cachedFeatureCheck(runhome, feature string) (supported bool, text string, err error) { content, err := ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true))) if err == nil { return true, string(content), nil } content, err = ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false))) if err == nil { return false, string(content), nil } return false, "", err } func cachedFeatureRecord(runhome, feature string, supported bool, text string) (err error) { f, err := os.Create(filepath.Join(runhome, cachedFeatureSet(feature, supported))) if f != nil { if text != "" { fmt.Fprintf(f, "%s", text) } f.Close() } return err } func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) { // We can try to modprobe overlay first exec.Command("modprobe", "overlay").Run() layerDir, err := ioutil.TempDir(home, "compat") if err != nil { patherr, ok := err.(*os.PathError) if ok && patherr.Err == syscall.ENOSPC { return false, err } } if err == nil { // Check if reading the directory's contents populates the d_type field, which is required // for proper operation of the overlay filesystem. supportsDType, err = fsutils.SupportsDType(layerDir) if err != nil { return false, err } if !supportsDType { return false, overlayutils.ErrDTypeNotSupported("overlay", backingFs) } // Try a test mount in the specific location we're looking at using. mergedDir := filepath.Join(layerDir, "merged") lower1Dir := filepath.Join(layerDir, "lower1") lower2Dir := filepath.Join(layerDir, "lower2") upperDir := filepath.Join(layerDir, "upper") workDir := filepath.Join(layerDir, "work") defer func() { // Permitted to fail, since the various subdirectories // can be empty or not even there, and the home might // legitimately be not empty _ = unix.Unmount(mergedDir, unix.MNT_DETACH) _ = os.RemoveAll(layerDir) _ = os.Remove(home) }() _ = idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID) _ = idtools.MkdirAs(lower1Dir, 0700, rootUID, rootGID) _ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID) _ = idtools.MkdirAs(upperDir, 0700, rootUID, rootGID) _ = idtools.MkdirAs(workDir, 0700, rootUID, rootGID) flags := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", lower1Dir, lower2Dir, upperDir, workDir) if len(flags) < unix.Getpagesize() { err := mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) if err == nil { logrus.Debugf("overlay test mount with multiple lowers succeeded") return supportsDType, nil } else { logrus.Debugf("overlay test mount with multiple lowers failed %v", err) } } flags = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower1Dir, upperDir, workDir) if len(flags) < unix.Getpagesize() { err := mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) if err == nil { logrus.Errorf("overlay test mount with multiple lowers failed, but succeeded with a single lower") return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay") } else { logrus.Debugf("overlay test mount with a single lower failed %v", err) } } logrus.Errorf("'overlay' is not supported over %s at %q", backingFs, home) return supportsDType, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s at %q", backingFs, home) } logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") } func (d *Driver) useNaiveDiff() bool { useNaiveDiffLock.Do(func() { if d.options.mountProgram != "" { useNaiveDiffOnly = true return } feature := fmt.Sprintf("native-diff(%s)", d.options.mountOptions) nativeDiffCacheResult, nativeDiffCacheText, err := cachedFeatureCheck(d.runhome, feature) if err == nil { if nativeDiffCacheResult { logrus.Debugf("cached value indicated that native-diff is usable") } else { logrus.Debugf("cached value indicated that native-diff is not being used") logrus.Warn(nativeDiffCacheText) } useNaiveDiffOnly = !nativeDiffCacheResult return } if err := doesSupportNativeDiff(d.home, d.options.mountOptions); err != nil { nativeDiffCacheText = fmt.Sprintf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err) logrus.Warn(nativeDiffCacheText) useNaiveDiffOnly = true } cachedFeatureRecord(d.runhome, feature, !useNaiveDiffOnly, nativeDiffCacheText) }) return useNaiveDiffOnly } func (d *Driver) String() string { return d.name } // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!d.useNaiveDiff())}, {"Using metacopy", strconv.FormatBool(d.usingMetacopy)}, } } // Metadata returns meta data about the overlay driver such as // LowerDir, UpperDir, WorkDir and MergeDir used to store data. func (d *Driver) Metadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } metadata := map[string]string{ "WorkDir": path.Join(dir, "work"), "MergedDir": path.Join(dir, "merged"), "UpperDir": path.Join(dir, "diff"), } lowerDirs, err := d.getLowerDirs(id) if err != nil { return nil, err } if len(lowerDirs) > 0 { metadata["LowerDir"] = strings.Join(lowerDirs, ":") } return metadata, nil } // Cleanup any state created by overlay which should be cleaned when daemon // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { return mount.Unmount(d.home) } // CreateFromTemplate creates a layer with the same contents and parent as another layer. func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { if readWrite { return d.CreateReadWrite(id, template, opts) } return d.Create(id, template, opts) } // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") } if opts == nil { opts = &graphdriver.CreateOpts{ StorageOpt: map[string]string{}, } } if _, ok := opts.StorageOpt["size"]; !ok { if opts.StorageOpt == nil { opts.StorageOpt = map[string]string{} } opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) } return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { if opts != nil && len(opts.StorageOpt) != 0 { if _, ok := opts.StorageOpt["size"]; ok { return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } if d.options.ostreeRepo != "" { d.convert[id] = true } return d.create(id, parent, opts) } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) uidMaps := d.uidMaps gidMaps := d.gidMaps if opts != nil && opts.IDMappings != nil { uidMaps = opts.IDMappings.UIDs() gidMaps = opts.IDMappings.GIDs() } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) if err != nil { return err } // Make the link directory if it does not exist if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { return err } if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { return err } if parent != "" { st, err := system.Stat(d.dir(parent)) if err != nil { return err } rootUID = int(st.UID()) rootGID = int(st.GID()) } if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() if opts != nil && len(opts.StorageOpt) > 0 { driver := &Driver{} if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { return err } if driver.options.quota.Size > 0 { // Set container disk quota limit if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { return err } } } if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil { return err } lid := generateID(idLength) if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { return err } if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { return err } // if no parent directory, create a dummy lower directory and skip writing a "lowers" file if parent == "" { return idtools.MkdirAs(path.Join(dir, "empty"), 0700, rootUID, rootGID) } lower, err := d.getLower(parent) if err != nil { return err } if lower != "" { if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } return nil } // Parse overlay storage options func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { // Read size to set the disk project quota per container for key, val := range storageOpt { key := strings.ToLower(key) switch key { case "size": size, err := units.RAMInBytes(val) if err != nil { return err } driver.options.quota.Size = uint64(size) default: return fmt.Errorf("Unknown option %s", key) } } return nil } func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return "", err } // Read Parent link fileA parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } lowers := []string{path.Join(linkDir, string(parentLink))} parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) } if len(lowers) > maxDepth { return "", errors.New("max depth exceeded") } return strings.Join(lowers, ":"), nil } func (d *Driver) dir(id string) string { newpath := path.Join(d.home, id) if _, err := os.Stat(newpath); err != nil { for _, p := range d.AdditionalImageStores() { l := path.Join(p, d.name, id) _, err = os.Stat(l) if err == nil { return l } } } return newpath } func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lower := d.dir(s) lp, err := os.Readlink(lower) if err != nil { return nil, err } lowersArray = append(lowersArray, path.Clean(d.dir(path.Join("link", lp)))) } } else if !os.IsNotExist(err) { return nil, err } return lowersArray, nil } func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMap) string { if uidMaps == nil { uidMaps = d.uidMaps } if gidMaps == nil { gidMaps = d.gidMaps } if uidMaps != nil { var uids, gids bytes.Buffer for _, i := range uidMaps { if uids.Len() > 0 { uids.WriteString(":") } uids.WriteString(fmt.Sprintf("%d:%d:%d", i.ContainerID, i.HostID, i.Size)) } for _, i := range gidMaps { if gids.Len() > 0 { gids.WriteString(":") } gids.WriteString(fmt.Sprintf("%d:%d:%d", i.ContainerID, i.HostID, i.Size)) } return fmt.Sprintf("%s,uidmapping=%s,gidmapping=%s", opts, uids.String(), gids.String()) } return opts } // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) // Ignore errors, we don't want to fail if the ostree branch doesn't exist, if d.options.ostreeRepo != "" { ostree.DeleteOSTree(d.options.ostreeRepo, id) } dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logrus.Debugf("Failed to remove link: %v", err) } } if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // recreateSymlinks goes through the driver's home directory and checks if the diff directory // under each layer has a symlink created for it under the linkDir. If the symlink does not // exist, it creates them func (d *Driver) recreateSymlinks() error { // List all the directories under the home directory dirs, err := ioutil.ReadDir(d.home) if err != nil { return fmt.Errorf("error reading driver home directory %q: %v", d.home, err) } // This makes the link directory if it doesn't exist rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { return err } for _, dir := range dirs { // Skip over the linkDir and anything that is not a directory if dir.Name() == linkDir || !dir.Mode().IsDir() { continue } // Read the "link" file under each layer to get the name of the symlink data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link")) if err != nil { return fmt.Errorf("error reading name of symlink for %q: %v", dir, err) } linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n")) // Check if the symlink exists, and if it doesn't create it again with the name we // got from the "link" file _, err = os.Stat(linkPath) if err != nil && os.IsNotExist(err) { if err := os.Symlink(path.Join("..", dir.Name(), "diff"), linkPath); err != nil { return err } } else if err != nil { return fmt.Errorf("error trying to stat %q: %v", linkPath, err) } } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { return d.get(id, false, options) } func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return "", err } readWrite := true // fuse-overlayfs doesn't support working without an upperdir. if d.options.mountProgram == "" { for _, o := range options.Options { if o == "ro" { readWrite = false break } } } lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil && !os.IsNotExist(err) { return "", err } // absLowers is the list of lowers as absolute paths, which works well with additional stores. absLowers := []string{} // relLowers is the list of lowers as paths relative to the driver's home directory. relLowers := []string{} // Check if $link/../diff{1-*} exist. If they do, add them, in order, as the front of the lowers // lists that we're building. "diff" itself is the upper, so it won't be in the lists. link, err := ioutil.ReadFile(path.Join(dir, "link")) if err != nil { return "", err } diffN := 1 _, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) for err == nil { absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN))) relLowers = append(relLowers, dumbJoin(string(link), "..", nameWithSuffix("diff", diffN))) diffN++ _, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) } // For each lower, resolve its path, and append it and any additional diffN // directories to the lowers list. for _, l := range strings.Split(string(lowers), ":") { if l == "" { continue } lower := "" newpath := path.Join(d.home, l) if _, err := os.Stat(newpath); err != nil { for _, p := range d.AdditionalImageStores() { lower = path.Join(p, d.name, l) if _, err2 := os.Stat(lower); err2 == nil { break } lower = "" } // if it is a "not found" error, that means the symlinks were lost in a sudden reboot // so call the recreateSymlinks function to go through all the layer dirs and recreate // the symlinks with the name from their respective "link" files if lower == "" && os.IsNotExist(err) { logrus.Warnf("Can't stat lower layer %q because it does not exist. Going through storage to recreate the missing symlinks.", newpath) if err := d.recreateSymlinks(); err != nil { return "", fmt.Errorf("error recreating the missing symlinks: %v", err) } lower = newpath } else if lower == "" { return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err) } } else { lower = newpath } absLowers = append(absLowers, lower) relLowers = append(relLowers, l) diffN = 1 _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) for err == nil { absLowers = append(absLowers, dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) relLowers = append(relLowers, dumbJoin(l, "..", nameWithSuffix("diff", diffN))) diffN++ _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) } } // If the lowers list is still empty, use an empty lower so that we can still force an // SELinux context for the mount. // if we are doing a readOnly mount, and there is only one lower // We should just return the lower directory, no reason to mount. if !readWrite { if len(absLowers) == 0 { return path.Join(dir, "empty"), nil } if len(absLowers) == 1 { return absLowers[0], nil } } if len(absLowers) == 0 { absLowers = append(absLowers, path.Join(dir, "empty")) relLowers = append(relLowers, path.Join(id, "empty")) } // user namespace requires this to move a directory from lower to upper. rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return "", err } diffDir := path.Join(dir, "diff") if readWrite { if err := idtools.MkdirAllAs(diffDir, 0755, rootUID, rootGID); err != nil && !os.IsExist(err) { return "", err } } mergedDir := path.Join(dir, "merged") // Create the driver merged dir if err := idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { return "", err } if count := d.ctr.Increment(mergedDir); count > 1 { return mergedDir, nil } defer func() { if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { logrus.Errorf("error unmounting %v: %v", mergedDir, mntErr) } } } }() var opts string if readWrite { opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, path.Join(dir, "work")) } else { opts = fmt.Sprintf("lowerdir=%s", strings.Join(absLowers, ":")) } if len(options.Options) > 0 { opts = fmt.Sprintf("%s,%s", strings.Join(options.Options, ","), opts) } else if d.options.mountOptions != "" { opts = fmt.Sprintf("%s,%s", d.options.mountOptions, opts) } mountData := label.FormatMountLabel(opts, options.MountLabel) mountFunc := unix.Mount mountTarget := mergedDir pageSize := unix.Getpagesize() // Use relative paths and mountFrom when the mount data has exceeded // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. if d.options.mountProgram != "" { mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { if !disableShifting { label = d.optsAppendMappings(label, options.UidMaps, options.GidMaps) } mountProgram := exec.Command(d.options.mountProgram, "-o", label, target) mountProgram.Dir = d.home var b bytes.Buffer mountProgram.Stderr = &b err := mountProgram.Run() if err != nil { output := b.String() if output == "" { output = "<stderr empty>" } return errors.Wrapf(err, "using mount program %s: %s", d.options.mountProgram, output) } return nil } } else if len(mountData) > pageSize { //FIXME: We need to figure out to get this to work with additional stores if readWrite { diffDir := path.Join(id, "diff") opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), diffDir, path.Join(id, "work")) } else { opts = fmt.Sprintf("lowerdir=%s", strings.Join(absLowers, ":")) } mountData = label.FormatMountLabel(opts, options.MountLabel) if len(mountData) > pageSize { return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, "merged") } flags, data := mount.ParseOptions(mountData) logrus.Debugf("overlay: mount_data=%s", mountData) if err := mountFunc("overlay", mountTarget, "overlay", uintptr(flags), data); err != nil { return "", fmt.Errorf("error creating overlay mount to %s: %v", mountTarget, err) } return mergedDir, nil } // Put unmounts the mount path created for the give id. func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return err } mountpoint := path.Join(dir, "merged") if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) { return err } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) { logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logrus.Debugf("Failed to remove mountpoint %s overlay: %s - %v", id, mountpoint, err) } return nil } // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } // isParent returns if the passed in parent is the direct parent of the passed in layer func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) if err != nil { return false } if parent == "" && len(lowers) > 0 { return false } parentDir := d.dir(parent) var ld string if len(lowers) > 0 { ld = filepath.Dir(lowers[0]) } if ld == "" && parent == "" { return true } return ld == parentDir } func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat { whiteoutFormat := archive.OverlayWhiteoutFormat if d.options.mountProgram != "" { // If we are using a mount program, we are most likely running // as an unprivileged user that cannot use mknod, so fallback to the // AUFS whiteout format. whiteoutFormat = archive.AUFSWhiteoutFormat } return whiteoutFormat } // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) { if !d.isParent(id, parent) { if d.options.ignoreChownErrors { options.IgnoreChownErrors = d.options.ignoreChownErrors } return d.naiveDiff.ApplyDiff(id, parent, options) } idMappings := options.Mappings if idMappings == nil { idMappings = &idtools.IDMappings{} } applyDir := d.getDiffPath(id) logrus.Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(options.Diff, applyDir, &archive.TarOptions{ UIDMaps: idMappings.UIDs(), GIDMaps: idMappings.GIDs(), IgnoreChownErrors: d.options.ignoreChownErrors, WhiteoutFormat: d.getWhiteoutFormat(), InUserNS: rsystem.RunningInUserNS(), }); err != nil { return 0, err } _, convert := d.convert[id] if convert { if err := ostree.ConvertToOSTree(d.options.ostreeRepo, applyDir, id); err != nil { return 0, err } } return directory.Size(applyDir) } func (d *Driver) getDiffPath(id string) string { dir := d.dir(id) return path.Join(dir, "diff") } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { if d.useNaiveDiff() || !d.isParent(id, parent) { return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel) } return directory.Size(d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) { if d.useNaiveDiff() || !d.isParent(id, parent) { return d.naiveDiff.Diff(id, idMappings, parent, parentMappings, mountLabel) } if idMappings == nil { idMappings = &idtools.IDMappings{} } lowerDirs, err := d.getLowerDirs(id) if err != nil { return nil, err } diffPath := d.getDiffPath(id) logrus.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: idMappings.UIDs(), GIDMaps: idMappings.GIDs(), WhiteoutFormat: d.getWhiteoutFormat(), WhiteoutData: lowerDirs, }) } // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { if d.useNaiveDiff() || !d.isParent(id, parent) { return d.naiveDiff.Changes(id, idMappings, parent, parentMappings, mountLabel) } // Overlay doesn't have snapshots, so we need to get changes from all parent // layers. diffPath := d.getDiffPath(id) layers, err := d.getLowerDirs(id) if err != nil { return nil, err } return archive.OverlayChanges(layers, diffPath) } // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { return d.options.imageStores } // UpdateLayerIDMap updates ID mappings in a from matching the ones specified // by toContainer to those specified by toHost. func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { var err error dir := d.dir(id) diffDir := filepath.Join(dir, "diff") rootUID, rootGID := 0, 0 if toHost != nil { rootUID, rootGID, err = idtools.GetRootUIDGID(toHost.UIDs(), toHost.GIDs()) if err != nil { return err } } // Mount the new layer and handle ownership changes and possible copy_ups in it. options := graphdriver.MountOpts{ MountLabel: mountLabel, Options: strings.Split(d.options.mountOptions, ","), } layerFs, err := d.get(id, true, options) if err != nil { return err } err = graphdriver.ChownPathByMaps(layerFs, toContainer, toHost) if err != nil { if err2 := d.Put(id); err2 != nil { logrus.Errorf("%v; error unmounting %v: %v", err, id, err2) } return err } if err = d.Put(id); err != nil { return err } // Rotate the diff directories. i := 0 _, err = os.Stat(nameWithSuffix(diffDir, i)) for err == nil { i++ _, err = os.Stat(nameWithSuffix(diffDir, i)) } for i > 0 { err = os.Rename(nameWithSuffix(diffDir, i-1), nameWithSuffix(diffDir, i)) if err != nil { return err } i-- } // Re-create the directory that we're going to use as the upper layer. if err := idtools.MkdirAs(diffDir, 0755, rootUID, rootGID); err != nil { return err } return nil } // SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS func (d *Driver) SupportsShifting() bool { if os.Getenv("_TEST_FORCE_SUPPORT_SHIFTING") == "yes-please" { return true } return d.options.mountProgram != "" } // dumbJoin is more or less a dumber version of filepath.Join, but one which // won't Clean() the path, allowing us to append ".." as a component and trust // pathname resolution to do some non-obvious work. func dumbJoin(names ...string) string { if len(names) == 0 { return string(os.PathSeparator) } return strings.Join(names, string(os.PathSeparator)) } func nameWithSuffix(name string, number int) string { if number == 0 { return name } return fmt.Sprintf("%s%d", name, number) }
[ "\"_TEST_FORCE_SUPPORT_SHIFTING\"" ]
[]
[ "_TEST_FORCE_SUPPORT_SHIFTING" ]
[]
["_TEST_FORCE_SUPPORT_SHIFTING"]
go
1
0
appengine/app/main.go
package main import ( "crypto/rsa" "crypto/x509" "encoding/pem" "io/ioutil" "log" "net/http" "os" "strings" "github.com/aktsk/kalvados/server" ) func init() { // Get a private key var key *rsa.PrivateKey keyFile, err := os.Open("key.pem") defer keyFile.Close() if err == nil { keyPEM, err := ioutil.ReadAll(keyFile) if err != nil { log.Fatal(err) } keyDER, _ := pem.Decode(keyPEM) key, err = x509.ParsePKCS1PrivateKey(keyDER.Bytes) if err != nil { log.Fatal(err) } } else { keyPEM := os.Getenv("PRIVATE_KEY") if keyPEM != "" { keyPEM = revertPEM(keyPEM) keyDER, _ := pem.Decode([]byte(keyPEM)) key, err = x509.ParsePKCS1PrivateKey(keyDER.Bytes) if err != nil { log.Fatal(err) } } } // Get a certificate var cert *x509.Certificate certFile, err := os.Open("cert.pem") defer certFile.Close() if err == nil { certPEM, err := ioutil.ReadAll(certFile) if err != nil { log.Fatal(err) } certDER, _ := pem.Decode(certPEM) cert, err = x509.ParseCertificate(certDER.Bytes) if err != nil { log.Fatal(err) } } else { certPEM := os.Getenv("CERTIFICATE") if certPEM != "" { certPEM = revertPEM(certPEM) certDER, _ := pem.Decode([]byte(certPEM)) cert, err = x509.ParseCertificate(certDER.Bytes) if err != nil { log.Fatal(err) } } } http.HandleFunc("/", server.Encode(key, cert)) } // It seems GAE/Go can not handle environment variables that has // return code. So in YAML file, I use ">-" to replace return // code with white space. This function is for reverting back PEM data // to original. func revertPEM(c string) string { c = strings.Replace(c, " ", "\n", -1) c = strings.Replace(c, "\nCERTIFICATE", " CERTIFICATE", -1) c = strings.Replace(c, "\nPRIVATE\nKEY", " PRIVATE KEY", -1) return c }
[ "\"PRIVATE_KEY\"", "\"CERTIFICATE\"" ]
[]
[ "CERTIFICATE", "PRIVATE_KEY" ]
[]
["CERTIFICATE", "PRIVATE_KEY"]
go
2
0
utils/utils.go
/* * [2013] - [2018] Avi Networks Incorporated * All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package utils import ( "encoding/json" "hash/fnv" "math/rand" "net" "net/url" "os" "reflect" "strings" "sync" "time" corev1 "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" kubeinformers "k8s.io/client-go/informers" oshiftinformers "github.com/openshift/client-go/route/informers/externalversions" oshiftclientset "github.com/openshift/client-go/route/clientset/versioned" "k8s.io/client-go/tools/cache" ) var CtrlVersion string func init() { //Setting the package-wide version CtrlVersion = os.Getenv("CTRL_VERSION") if CtrlVersion == "" { CtrlVersion = "18.2.2" } } func IsV4(addr string) bool { ip := net.ParseIP(addr) v4 := ip.To4() if v4 == nil { return false } else { return true } } /* * Port name is either "http" or "http-suffix" * Following Istio named port convention * https://istio.io/docs/setup/kubernetes/spec-requirements/ * TODO: Define matching ports in configmap and make it configurable */ func IsSvcHttp(svc_name string, port int32) bool { if svc_name == "http" { return true } else if strings.HasPrefix(svc_name, "http-") { return true } else if (port == 80) || (port == 443) || (port == 8080) || (port == 8443) { return true } else { return false } } func AviUrlToObjType(aviurl string) (string, error) { url, err := url.Parse(aviurl) if err != nil { AviLog.Warning.Printf("aviurl %v parse error", aviurl) return "", err } path := url.EscapedPath() elems := strings.Split(path, "/") return elems[2], nil } /* * Hash key to pick workqueue & GoRoutine. Hash needs to ensure that K8S * objects that map to the same Avi objects hash to the same wq. E.g. * Routes that share the same "host" should hash to the same wq, so "host" * is the hash key for Routes. For objects like Service, it can be ns:name */ func CrudHashKey(obj_type string, obj interface{}) string { var ns, name string switch obj_type { case "Endpoints": ep := obj.(*corev1.Endpoints) ns = ep.Namespace name = ep.Name case "Service": svc := obj.(*corev1.Service) ns = svc.Namespace name = svc.Name case "Ingress": ing := obj.(*extensions.Ingress) ns = ing.Namespace name = ing.Name default: AviLog.Error.Printf("Unknown obj_type %s obj %v", obj_type, obj) return ":" } return ns + ":" + name } func Bkt(key string, num_workers uint32) uint32 { bkt := Hash(key) & (num_workers - 1) return bkt } // DeepCopy deepcopies a to b using json marshaling func DeepCopy(a, b interface{}) { byt, _ := json.Marshal(a) json.Unmarshal(byt, b) } func Hash(s string) uint32 { h := fnv.New32a() h.Write([]byte(s)) return h.Sum32() } var letters = []rune("abcdefghijklmnopqrstuvwxyz1234567890") func RandomSeq(n int) string { b := make([]rune, n) for i := range b { b[i] = letters[rand.Intn(len(letters))] } return string(b) } var informer sync.Once var informerInstance *Informers func instantiateInformers(kubeClient KubeClientIntf, registeredInformers []string, ocs oshiftclientset.Interface) *Informers { cs := kubeClient.ClientSet kubeInformerFactory := kubeinformers.NewSharedInformerFactory(cs, time.Second*30) informers := &Informers{} for _, informer := range registeredInformers { switch informer { case ServiceInformer: informers.ServiceInformer = kubeInformerFactory.Core().V1().Services() case PodInformer: informers.PodInformer = kubeInformerFactory.Core().V1().Pods() case EndpointInformer: informers.EpInformer = kubeInformerFactory.Core().V1().Endpoints() case SecretInformer: informers.SecretInformer = kubeInformerFactory.Core().V1().Secrets() case IngressInformer: informers.IngressInformer = kubeInformerFactory.Extensions().V1beta1().Ingresses() case RouteInformer: if ocs != nil { oshiftInformerFactory := oshiftinformers.NewSharedInformerFactory(ocs, time.Second*30) informers.RouteInformer = oshiftInformerFactory.Route().V1().Routes() } } } return informers } /* * Returns a set of informers. By default the informer set would be instantiated once and reused for subsequent calls. * Extra arguments can be passed in form of key value pairs. * "instanciateOnce" <bool> : If false, then a new set of informers would be returned for each call. * "oshiftclient" <oshiftclientset.Interface> : Informer for openshift route has to be registered using openshiftclient */ func NewInformers(kubeClient KubeClientIntf, registeredInformers []string, args ...map[string]interface{}) *Informers { var oshiftclient oshiftclientset.Interface var instantiateOnce, ok bool = true, true if len(args) > 0 { for k, v := range args[0] { switch k { case INFORMERS_INSTANTIATE_ONCE: instantiateOnce, ok = v.(bool) if !ok { AviLog.Warning.Printf("arg instantiateOnce is not of type bool") } case INFORMERS_OPENSHIFT_CLIENT: oshiftclient, ok = v.(oshiftclientset.Interface) if !ok { AviLog.Warning.Printf("arg oshiftclient is not of type oshiftclientset.Interface") } default: AviLog.Warning.Printf("Unknown Key %s in args", k) } } } if !instantiateOnce { return instantiateInformers(kubeClient, registeredInformers, oshiftclient) } informer.Do(func() { informerInstance = instantiateInformers(kubeClient, registeredInformers, oshiftclient) }) return informerInstance } func GetInformers() *Informers { if informerInstance == nil { AviLog.Error.Fatal("Cannot retrieve the informers since it's not initialized yet.") return nil } return informerInstance } func Stringify(serialize interface{}) string { json_marshalled, _ := json.Marshal(serialize) return string(json_marshalled) } func ExtractGatewayNamespace(key string) (string, string) { segments := strings.Split(key, "/") if len(segments) == 2 { return segments[0], segments[1] } return "", "" } func HasElem(s interface{}, elem interface{}) bool { arrV := reflect.ValueOf(s) if arrV.Kind() == reflect.Slice { for i := 0; i < arrV.Len(); i++ { // XXX - panics if slice element points to an unexported struct field // see https://golang.org/pkg/reflect/#Value.Interface if arrV.Index(i).Interface() == elem { return true } } } return false } func ObjKey(obj interface{}) string { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { AviLog.Warning.Print(err) } return key }
[ "\"CTRL_VERSION\"" ]
[]
[ "CTRL_VERSION" ]
[]
["CTRL_VERSION"]
go
1
0
staged.go
package main import ( "flag" "fmt" "io/ioutil" "log" "os" "os/exec" "path" "path/filepath" "strings" ) func Usage() { fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) fmt.Fprintf(os.Stderr, " %s COMMAND..\n", os.Args[0]) flag.PrintDefaults() } func get_git_dir() (string, error) { cmd := exec.Command("git", "rev-parse", "--git-dir") cmd.Stderr = os.Stderr out, err := cmd.Output() if err != nil { return "", err } if len(out) == 0 || out[len(out)-1] != '\n' { return "", fmt.Errorf("git directory looks wrong: %q", out) } return string(out[:len(out)-1]), nil } func get_toplevel() (string, error) { cmd := exec.Command("git", "rev-parse", "--show-toplevel") cmd.Stderr = os.Stderr out, err := cmd.Output() if err != nil { return "", err } if len(out) == 0 || out[len(out)-1] != '\n' { return "", fmt.Errorf("toplevel looks wrong: %q", out) } return string(out[:len(out)-1]), nil } // The subdirectory of the git worktree we are currently in, or empty // string for root of worktree. func get_git_prefix() (string, error) { cmd := exec.Command("git", "rev-parse", "--show-prefix") cmd.Stderr = os.Stderr out, err := cmd.Output() if err != nil { return "", err } if len(out) == 0 { return "", nil } if out[len(out)-1] != '\n' { return "", fmt.Errorf("prefix looks wrong: %q", out) } return string(out[:len(out)-1]), nil } // Check whether the path is under a $GOPATH/src directory. If so, // return the subtree path from there on. If not, return empty string. func is_inside_gopath(p string) (string, error) { // i am butchering the difference between filepath and path. // i don't really care, right now. abs, err := filepath.Abs(p) if err != nil { return "", fmt.Errorf("cannot make path absolute: %v", err) } abs = path.Clean(abs) for _, gopath := range filepath.SplitList(os.Getenv("GOPATH")) { gopath, err := filepath.Abs(gopath) if err != nil { return "", fmt.Errorf("cannot make path absolute: %v", err) } gopath = path.Clean(gopath) src := path.Join(gopath, "src") + "/" if strings.HasPrefix(abs, src) { rest := abs[len(src):] return rest, nil } } return "", nil } // Remove a variable from the environment. Returns the new // environment. func unsetenv(env []string, name string) []string { prefix := name + "=" i := 0 for { if i >= len(env) { break } for strings.HasPrefix(env[i], prefix) { if i+1 < len(env) { copy(env[i:], env[i+1:]) } env = env[:len(env)-1] // don't increment i, look at this position again continue } i++ } return env } func run(command string, args ...string) (err error) { gitdir, err := get_git_dir() if err != nil { return fmt.Errorf("cannot find git directory: %v", err) } gitdir, err = filepath.Abs(gitdir) if err != nil { return fmt.Errorf("cannot make git dir absolute: %v", err) } toplevel, err := get_toplevel() if err != nil { return fmt.Errorf("cannot find toplevel directory: %v", err) } prefix, err := get_git_prefix() if err != nil { return fmt.Errorf("cannot find worktree prefix: %v", err) } tmp, err := ioutil.TempDir(gitdir, "staged-") if err != nil { return fmt.Errorf("cannot create tempdir: %v", err) } defer func() { err2 := os.RemoveAll(tmp) if err2 != nil { err2 = fmt.Errorf("tempdir cleanup failed: %v", err2) if err == nil { // override the success with this cleanup error err = err2 } else { // do not lose this message, even if we have something more important log.Print(err2) } } }() env := os.Environ() checkout_dir := tmp inside_gopath, err := is_inside_gopath(toplevel) if err != nil { return fmt.Errorf("cannot determine gopath: %v", err) } if inside_gopath != "" { gopath_list := []string{checkout_dir} gopath_list = append(gopath_list, filepath.SplitList(os.Getenv("GOPATH"))...) gopath := strings.Join(gopath_list, string(filepath.ListSeparator)) // strip out existing GOPATH or we'll have duplicates // and undefined behavior env = unsetenv(env, "GOPATH") env = append(env, "GOPATH="+gopath) checkout_dir = path.Join(checkout_dir, "src", inside_gopath) } { cmd := exec.Command("git", "checkout-index", "--all", "--prefix="+checkout_dir+"/") cmd.Stderr = os.Stderr err = cmd.Run() if err != nil { return fmt.Errorf("cannot checkout index: %v", err) } } { cmd := exec.Command(command, args...) cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr // run in matching subdirectory; prefix may be empty // string, but Join handles that fine cmd.Dir = path.Join(checkout_dir, prefix) // sometimes we need to override GOPATH cmd.Env = env err = cmd.Run() if err != nil { return fmt.Errorf("command failed: %v", err) } } return nil } func main() { prog := path.Base(os.Args[0]) log.SetFlags(0) log.SetPrefix(prog + ": ") flag.Usage = Usage flag.Parse() if flag.NArg() < 1 { Usage() os.Exit(1) } args := []string{} if flag.NArg() > 1 { args = flag.Args()[1:] } err := run(flag.Arg(0), args...) if err != nil { log.Fatalf("%v", err) } }
[ "\"GOPATH\"", "\"GOPATH\"" ]
[]
[ "GOPATH" ]
[]
["GOPATH"]
go
1
0
python/paddle/fluid/parallel_executor.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function from . import core from . import framework from . import executor from . import compiler from .data_feeder import check_type import sys __all__ = ['ParallelExecutor'] ExecutionStrategy = core.ParallelExecutor.ExecutionStrategy BuildStrategy = core.ParallelExecutor.BuildStrategy class ParallelExecutor(object): """ :api_attr: Static Graph The ParallelExecutor is an upgraded version of :code:`fluid.Executor` that supports multi-node model training and testing based on the data-parallel mode. In data-parallel mode, ParallelExecutor will broadcast the parameters from Node0 to other nodes during construction and copy the input Program to other nodes from Node0 to make sure that the initial state on each node is the same. Each node runs the model independently and the parameters' gradient is aggregated between those nodes during backward computation, and then each node independently updates its parameters. If you use the GPU to run the model, i.e. use_cuda=True, the node refers to the GPU, ParallelExecutor will automatically get the GPU resources available on the current machine, users can also set the available GPU resources in the environment variable, for example: want to use GPU0, GPU1, export CUDA_VISIBLEDEVICES=0,1; If the operation is performed on the CPU, i.e. use_cuda=False, the node refers to the CPU. **Note: At this time, the user needs to manually add CPU_NUM to the environment variable and set the number of CPU devices. For example, export CPU_NUM=4, if the environment variable is not set, the executor will add the variable to the environment variable and set it to 1.** Args: use_cuda (bool): Whether to use CUDA or not. loss_name (str): This parameter is the name of the loss variable of the model. **Note: If it is data-parallel model training, you must set loss_name, otherwise, the results may be wrong**. The default is None. main_program (Program): This parameter represents the Program to be executed. If this parameter is not provided, that parameter is None, the program will be set to :code:`fluid.default_main_program()`. The default is None. share_vars_from(ParallelExecutor): If share_vars_from is set, the current ParallelExecutor will share the parameters with the ParallelExecutor specified by share_vars_from. This parameter needs to be set when model testing is required during model training, and the data parallel mode is used for training and testing. Since ParallelExecutor will only distribute parameter variables to other devices when it is first executed, the ParallelExecutor specified by share_vars_from must be run before the current ParallelExecutor. The default is None. exec_strategy(ExecutionStrategy): exec_strategy specifies the options that can be changed when running the current model, such as the thread pool size. For more information about exec_strategy, please refer to :code:`fluid.ExecutionStrategy`. The default is None. build_strategy(BuildStrategy): By configuring build_strategy, we can optimize the computational graph, such as operators' fusion in the computational graph and memory optimization during the execution of the computational graph. For more information about build_strategy, please refer to :code:`fluid.BuildStrategy`. The default is None. num_trainers(int): This parameter needs to be set in GPU distributed training. If the parameter value is greater than 1, NCCL will be initialized by multi-level nodes. Each node should have the same number of GPUs. The default is 1. trainer_id(int): This parameter needs to be set when performing GPU distributed training. This parameter must be used with the num_trainers parameter. Trainer_id indicates the "rank" of the current node. The trainer_id starts counting from 0. The default is 0. scope(Scope): Specifies the scope in which the program is executed. The default is fluid.global_scope(). Returns: ParallelExecutor: The initialized ParallelExecutor object. Raises: TypeError: If share_vars_from is provided, but not ParallelExecutor object. NOTES: 1. If you only use ParallelExecutor to do multi-card test, you don't need to set loss_name and share_vars_from. 2. If you need to train and test the model with ParallelExecutor, the share_vars_from must be set when building the ParallelExecutor corresponding to the model test. Otherwise, the parameters used in the model test and the model training are inconsistent. Examples: .. code-block:: python import paddle.fluid as fluid import numpy import os use_cuda = True place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() # NOTE: If you use CPU to run the program, you need # to specify the CPU_NUM, otherwise, fluid will use # all the number of the logic core as the CPU_NUM, # in that case, the batch size of the input should be # greater than CPU_NUM, if not, the process will be # failed by an exception. if not use_cuda: os.environ['CPU_NUM'] = str(2) exe = fluid.Executor(place) train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): data = fluid.data(name='X', shape=[None, 1], dtype='float32') hidden = fluid.layers.fc(input=data, size=10) loss = fluid.layers.mean(hidden) test_program = fluid.default_main_program().clone(for_test=True) fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) startup_program.random_seed=1 exe.run(startup_program) train_exe = fluid.ParallelExecutor(use_cuda=use_cuda, main_program=train_program, loss_name=loss.name) test_exe = fluid.ParallelExecutor(use_cuda=use_cuda, main_program=test_program, share_vars_from=train_exe) x = numpy.random.random(size=(10, 1)).astype('float32') loss_data, = train_exe.run(feed={"X": x}, fetch_list=[loss.name]) loss_data, = test_exe.run(feed={"X": x}, fetch_list=[loss.name]) """ def __init__(self, use_cuda, loss_name=None, main_program=None, share_vars_from=None, exec_strategy=None, build_strategy=None, num_trainers=1, trainer_id=0, scope=None): if build_strategy is None: build_strategy = BuildStrategy() # TODO(paddle-dev): trainer_id and num_trainers should be removed from parameter list. if num_trainers != 1 and build_strategy.num_trainers != num_trainers: sys.stderr.write( 'The value of build_strategy.num_trainers[%d] is overwritten ' 'by the passed num_trainers[%d].\n' % (build_strategy.num_trainers, num_trainers)) build_strategy.num_trainers = num_trainers if trainer_id != 0 and build_strategy.trainer_id != trainer_id: sys.stderr.write( 'The value of build_strategy.trainer_id[%d] is overwritten ' 'by the passed trainer_id[%d].\n' % (build_strategy.trainer_id, trainer_id)) build_strategy.trainer_id = trainer_id self._places = framework.cuda_places( ) if use_cuda else framework.cpu_places() self._scope = scope if scope is not None else executor.global_scope() main_program = main_program if main_program is not None \ else framework.default_main_program() self._compiled_program = compiler.CompiledProgram(main_program) if share_vars_from: assert isinstance( share_vars_from, ParallelExecutor ), "The share_vars_from should be ParallelExecutor." self._compiled_program.with_data_parallel( loss_name=loss_name, build_strategy=build_strategy, exec_strategy=exec_strategy, share_vars_from=share_vars_from._compiled_program if share_vars_from else None) self._place = core.CUDAPlace(0) if use_cuda else core.CPUPlace() self._exe = executor.Executor(self._place) def run(self, fetch_list, feed=None, feed_dict=None, return_numpy=True): """ This interface is used to run the current model. It should be noted that the executor will execute all the operators in the Program, and will not prune some operators in the Program according to the fetch_list. Args: fetch_list(list): This parameter represents the variables that need to be returned after the model runs. The default is None. feed(list|dict): This parameter represents the input variables of the model. If it is single card training, the feed is dict type, and if it is multi-card training, the parameter feed can be dict or list type variable. If the parameter type is dict, the data in the feed will be split and sent to multiple devices (CPU/GPU), that is to say, the input data will be evenly sent to different devices, so you should make sure the number of samples of the current mini-batch must be greater than the number of places; if the parameter type is list, those data are copied directly to each device, so the length of this list should be equal to the number of places. The default is None. feed_dict: Alias for feed parameter, for backward compatibility. This parameter has been deprecated. Default None. return_numpy(bool): This parameter indicates whether convert the fetched variables (the variable specified in the fetch list) to numpy.ndarray. if it is False, the type of the return value is a list of :code:`LoDTensor`. The default is True. Returns: List: The fetched result list. Raises: ValueError: If the feed is a list, but its length is not equal the length of active places, or its element's is not dict. NOTES: 1. If the feed parameter is dict type, the input data will be evenly distributed to different cards. For example, using two GPUs to run the model, the input sample number is 3, that is, [0, 1, 2], the sample number on GPU0 is 1, that is, [0], and the sample number on GPU1 is 2, that is, [1, 2]. If the number of samples is less than the number of devices, the program will throw an exception, so when running the model, you should make sure that the number of samples of the last batch of the data set should be greater than the number of CPU cores or GPU cards, if it is less than, it is recommended that the batch be discarded. 2. If the number of CPU cores or GPU cards available is greater than 1, the fetch results are spliced together in dimension 0 for the same variable values (variables in fetch_list) on different devices. Examples: .. code-block:: python import paddle.fluid as fluid import numpy import os use_cuda = True place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() # NOTE: If you use CPU to run the program, you need # to specify the CPU_NUM, otherwise, fluid will use # all the number of the logic core as the CPU_NUM, # in that case, the batch size of the input should be # greater than CPU_NUM, if not, the process will be # failed by an exception. if not use_cuda: os.environ['CPU_NUM'] = str(2) exe = fluid.Executor(place) train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): data = fluid.data(name='X', shape=[None, 1], dtype='float32') hidden = fluid.layers.fc(input=data, size=10) loss = fluid.layers.mean(hidden) fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) exe.run(startup_program) train_exe = fluid.ParallelExecutor(use_cuda=use_cuda, main_program=train_program, loss_name=loss.name) # If the feed is a dict: # the image will be split into devices. If there is two devices # each device will process an image with shape (5, 1) x = numpy.random.random(size=(10, 1)).astype('float32') loss_data, = train_exe.run(feed={"X": x}, fetch_list=[loss.name]) # If the feed is a list: # each device will process each element in the list. # the 1st device will process an image with shape (10, 1) # the 2nd device will process an image with shape (9, 1) # # you can use exe.device_count to get the device number. x2 = numpy.random.random(size=(9, 1)).astype('float32') loss_data, = train_exe.run(feed=[{"X": x}, {"X": x2}], fetch_list=[loss.name]) """ return self._exe.run(program=self._compiled_program, scope=self._scope, feed=feed, fetch_list=fetch_list, return_numpy=return_numpy) @property def device_count(self): return len(self._places) def drop_local_exe_scopes(self): """ Drop the local execution scopes immediately. In order to avoid frequently application and release of temporary variables, the strategy adopted by ParallelExecutor is to drop the local execution scopes after several iterations. ParallelExecutor provides the num_iteration_per_drop_scope option in :code:`fluid.ExecutionStrategy`, which indicates how many iterations are intervened to drop the local execution scopes. If the num_iteration_per_drop_scope value is 100, but you want to drop the local execution scopes after 50 iterations, you can call the interface manually. Returns: None Examples: .. code-block:: python import paddle.fluid as fluid import numpy import os use_cuda = True # NOTE: If you use CPU to run the program, you need # to specify the CPU_NUM, otherwise, fluid will use # all the number of the logic core as the CPU_NUM, # in that case, the batch size of the input should be # greater than CPU_NUM, if not, the process will be # failed by an exception. if not use_cuda: os.environ['CPU_NUM'] = str(2) train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): data = fluid.data(name='X', shape=[None, 1], dtype='float32') hidden = fluid.layers.fc(input=data, size=10) loss = fluid.layers.mean(hidden) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(startup_program) parallel_exe = fluid.ParallelExecutor(use_cuda=use_cuda, main_program=train_program, loss_name=loss.name) x = numpy.random.random(size=(10, 1)).astype('float32') loss_data, = parallel_exe.run(feed={"X": x}, fetch_list=[loss.name]) parallel_exe.drop_local_exe_scopes() """ check_type(self._compiled_program._executor, "the Executor of compiled program", core.ParallelExecutor, "ParallelExecutor.drop_local_exe_scopes") self._compiled_program._executor.drop_local_exe_scopes() # This API is used to check whether DropLocalExeScopes can work. def _need_create_local_exe_scopes(self): check_type(self._compiled_program._executor, "the Executor of compiled program", core.ParallelExecutor, "ParallelExecutor._need_create_local_exe_scopes") return self._compiled_program._executor._need_create_local_exe_scopes()
[]
[]
[ "CPU_NUM" ]
[]
["CPU_NUM"]
python
1
0
vendor/google.golang.org/genproto/googleapis/ads/googleads/v1/errors/customer_manager_link_error.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: google/ads/googleads/v1/errors/customer_manager_link_error.proto package errors // import "google.golang.org/genproto/googleapis/ads/googleads/v1/errors" import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import _ "google.golang.org/genproto/googleapis/api/annotations" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // Enum describing possible CustomerManagerLink errors. type CustomerManagerLinkErrorEnum_CustomerManagerLinkError int32 const ( // Enum unspecified. CustomerManagerLinkErrorEnum_UNSPECIFIED CustomerManagerLinkErrorEnum_CustomerManagerLinkError = 0 // The received error code is not known in this version. CustomerManagerLinkErrorEnum_UNKNOWN CustomerManagerLinkErrorEnum_CustomerManagerLinkError = 1 // No pending invitation. CustomerManagerLinkErrorEnum_NO_PENDING_INVITE CustomerManagerLinkErrorEnum_CustomerManagerLinkError = 2 // Attempt to operate on the same client more than once in the same call. CustomerManagerLinkErrorEnum_SAME_CLIENT_MORE_THAN_ONCE_PER_CALL CustomerManagerLinkErrorEnum_CustomerManagerLinkError = 3 // Manager account has the maximum number of linked accounts. CustomerManagerLinkErrorEnum_MANAGER_HAS_MAX_NUMBER_OF_LINKED_ACCOUNTS CustomerManagerLinkErrorEnum_CustomerManagerLinkError = 4 // If no active user on account it cannot be unlinked from its manager. CustomerManagerLinkErrorEnum_CANNOT_UNLINK_ACCOUNT_WITHOUT_ACTIVE_USER CustomerManagerLinkErrorEnum_CustomerManagerLinkError = 5 // Account should have at least one active owner on it before being // unlinked. CustomerManagerLinkErrorEnum_CANNOT_REMOVE_LAST_CLIENT_ACCOUNT_OWNER CustomerManagerLinkErrorEnum_CustomerManagerLinkError = 6 // Only account owners may change their permission role. CustomerManagerLinkErrorEnum_CANNOT_CHANGE_ROLE_BY_NON_ACCOUNT_OWNER CustomerManagerLinkErrorEnum_CustomerManagerLinkError = 7 // When a client's link to its manager is not active, the link role cannot // be changed. CustomerManagerLinkErrorEnum_CANNOT_CHANGE_ROLE_FOR_NON_ACTIVE_LINK_ACCOUNT CustomerManagerLinkErrorEnum_CustomerManagerLinkError = 8 // Attempt to link a child to a parent that contains or will contain // duplicate children. CustomerManagerLinkErrorEnum_DUPLICATE_CHILD_FOUND CustomerManagerLinkErrorEnum_CustomerManagerLinkError = 9 ) var CustomerManagerLinkErrorEnum_CustomerManagerLinkError_name = map[int32]string{ 0: "UNSPECIFIED", 1: "UNKNOWN", 2: "NO_PENDING_INVITE", 3: "SAME_CLIENT_MORE_THAN_ONCE_PER_CALL", 4: "MANAGER_HAS_MAX_NUMBER_OF_LINKED_ACCOUNTS", 5: "CANNOT_UNLINK_ACCOUNT_WITHOUT_ACTIVE_USER", 6: "CANNOT_REMOVE_LAST_CLIENT_ACCOUNT_OWNER", 7: "CANNOT_CHANGE_ROLE_BY_NON_ACCOUNT_OWNER", 8: "CANNOT_CHANGE_ROLE_FOR_NON_ACTIVE_LINK_ACCOUNT", 9: "DUPLICATE_CHILD_FOUND", } var CustomerManagerLinkErrorEnum_CustomerManagerLinkError_value = map[string]int32{ "UNSPECIFIED": 0, "UNKNOWN": 1, "NO_PENDING_INVITE": 2, "SAME_CLIENT_MORE_THAN_ONCE_PER_CALL": 3, "MANAGER_HAS_MAX_NUMBER_OF_LINKED_ACCOUNTS": 4, "CANNOT_UNLINK_ACCOUNT_WITHOUT_ACTIVE_USER": 5, "CANNOT_REMOVE_LAST_CLIENT_ACCOUNT_OWNER": 6, "CANNOT_CHANGE_ROLE_BY_NON_ACCOUNT_OWNER": 7, "CANNOT_CHANGE_ROLE_FOR_NON_ACTIVE_LINK_ACCOUNT": 8, "DUPLICATE_CHILD_FOUND": 9, } func (x CustomerManagerLinkErrorEnum_CustomerManagerLinkError) String() string { return proto.EnumName(CustomerManagerLinkErrorEnum_CustomerManagerLinkError_name, int32(x)) } func (CustomerManagerLinkErrorEnum_CustomerManagerLinkError) EnumDescriptor() ([]byte, []int) { return fileDescriptor_customer_manager_link_error_c62ba1cb86ea300c, []int{0, 0} } // Container for enum describing possible CustomerManagerLink errors. type CustomerManagerLinkErrorEnum struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CustomerManagerLinkErrorEnum) Reset() { *m = CustomerManagerLinkErrorEnum{} } func (m *CustomerManagerLinkErrorEnum) String() string { return proto.CompactTextString(m) } func (*CustomerManagerLinkErrorEnum) ProtoMessage() {} func (*CustomerManagerLinkErrorEnum) Descriptor() ([]byte, []int) { return fileDescriptor_customer_manager_link_error_c62ba1cb86ea300c, []int{0} } func (m *CustomerManagerLinkErrorEnum) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CustomerManagerLinkErrorEnum.Unmarshal(m, b) } func (m *CustomerManagerLinkErrorEnum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CustomerManagerLinkErrorEnum.Marshal(b, m, deterministic) } func (dst *CustomerManagerLinkErrorEnum) XXX_Merge(src proto.Message) { xxx_messageInfo_CustomerManagerLinkErrorEnum.Merge(dst, src) } func (m *CustomerManagerLinkErrorEnum) XXX_Size() int { return xxx_messageInfo_CustomerManagerLinkErrorEnum.Size(m) } func (m *CustomerManagerLinkErrorEnum) XXX_DiscardUnknown() { xxx_messageInfo_CustomerManagerLinkErrorEnum.DiscardUnknown(m) } var xxx_messageInfo_CustomerManagerLinkErrorEnum proto.InternalMessageInfo func init() { proto.RegisterType((*CustomerManagerLinkErrorEnum)(nil), "google.ads.googleads.v1.errors.CustomerManagerLinkErrorEnum") proto.RegisterEnum("google.ads.googleads.v1.errors.CustomerManagerLinkErrorEnum_CustomerManagerLinkError", CustomerManagerLinkErrorEnum_CustomerManagerLinkError_name, CustomerManagerLinkErrorEnum_CustomerManagerLinkError_value) } func init() { proto.RegisterFile("google/ads/googleads/v1/errors/customer_manager_link_error.proto", fileDescriptor_customer_manager_link_error_c62ba1cb86ea300c) } var fileDescriptor_customer_manager_link_error_c62ba1cb86ea300c = []byte{ // 496 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x6e, 0xd3, 0x40, 0x10, 0xc6, 0x69, 0x0a, 0x2d, 0x6c, 0x0f, 0x18, 0x4b, 0x95, 0x00, 0x95, 0x1e, 0xc2, 0xa1, 0x42, 0x08, 0x5b, 0x81, 0x9b, 0xb9, 0xb0, 0x59, 0x4f, 0x12, 0xab, 0xf6, 0xac, 0xe5, 0x7f, 0x01, 0x14, 0x69, 0x64, 0x9a, 0xc8, 0x8a, 0x9a, 0x78, 0x23, 0x3b, 0xed, 0xf3, 0x20, 0x8e, 0x3c, 0x0a, 0x8f, 0xc2, 0x13, 0x70, 0x03, 0x39, 0x1b, 0x47, 0x15, 0x22, 0x9c, 0x76, 0xb4, 0xf3, 0x9b, 0xef, 0xfb, 0xa4, 0x19, 0xf6, 0xa1, 0x50, 0xaa, 0x58, 0xcc, 0xec, 0x7c, 0x5a, 0xdb, 0xba, 0x6c, 0xaa, 0xdb, 0x9e, 0x3d, 0xab, 0x2a, 0x55, 0xd5, 0xf6, 0xd5, 0x4d, 0xbd, 0x56, 0xcb, 0x59, 0x45, 0xcb, 0xbc, 0xcc, 0x8b, 0x59, 0x45, 0x8b, 0x79, 0x79, 0x4d, 0x9b, 0xa6, 0xb5, 0xaa, 0xd4, 0x5a, 0x99, 0xe7, 0x7a, 0xcc, 0xca, 0xa7, 0xb5, 0xb5, 0x53, 0xb0, 0x6e, 0x7b, 0x96, 0x56, 0x78, 0x7e, 0xd6, 0x3a, 0xac, 0xe6, 0x76, 0x5e, 0x96, 0x6a, 0x9d, 0xaf, 0xe7, 0xaa, 0xac, 0xf5, 0x74, 0xf7, 0xeb, 0x21, 0x3b, 0x13, 0x5b, 0x8f, 0x40, 0x5b, 0xf8, 0xf3, 0xf2, 0x1a, 0x9a, 0x59, 0x28, 0x6f, 0x96, 0xdd, 0xdf, 0x1d, 0xf6, 0x74, 0x1f, 0x60, 0x3e, 0x66, 0x27, 0x29, 0xc6, 0x21, 0x08, 0x6f, 0xe0, 0x81, 0x6b, 0xdc, 0x33, 0x4f, 0xd8, 0x71, 0x8a, 0x97, 0x28, 0xc7, 0x68, 0x1c, 0x98, 0xa7, 0xec, 0x09, 0x4a, 0x0a, 0x01, 0x5d, 0x0f, 0x87, 0xe4, 0x61, 0xe6, 0x25, 0x60, 0x74, 0xcc, 0x0b, 0xf6, 0x32, 0xe6, 0x01, 0x90, 0xf0, 0x3d, 0xc0, 0x84, 0x02, 0x19, 0x01, 0x25, 0x23, 0x8e, 0x24, 0x51, 0x00, 0x85, 0x10, 0x91, 0xe0, 0xbe, 0x6f, 0x1c, 0x9a, 0x6f, 0xd8, 0xab, 0x80, 0x23, 0x1f, 0x42, 0x44, 0x23, 0x1e, 0x53, 0xc0, 0x3f, 0x12, 0xa6, 0x41, 0x1f, 0x22, 0x92, 0x03, 0xf2, 0x3d, 0xbc, 0x04, 0x97, 0xb8, 0x10, 0x32, 0xc5, 0x24, 0x36, 0xee, 0x37, 0xb8, 0xe0, 0x88, 0x32, 0xa1, 0x14, 0x9b, 0x6e, 0xdb, 0xa3, 0xb1, 0x97, 0x8c, 0x64, 0x9a, 0x10, 0x17, 0x89, 0x97, 0x01, 0xa5, 0x31, 0x44, 0xc6, 0x03, 0xf3, 0x35, 0xbb, 0xd8, 0xe2, 0x11, 0x04, 0x32, 0x03, 0xf2, 0x79, 0x9c, 0xb4, 0xa1, 0xda, 0x51, 0x39, 0x46, 0x88, 0x8c, 0xa3, 0x3b, 0xb0, 0x18, 0x71, 0x1c, 0x02, 0x45, 0xd2, 0x07, 0xea, 0x7f, 0x22, 0x94, 0xf8, 0x17, 0x7c, 0x6c, 0xbe, 0x65, 0xd6, 0x3f, 0xe0, 0x81, 0x8c, 0xb6, 0xf4, 0x26, 0xc5, 0xdd, 0x84, 0xc6, 0x43, 0xf3, 0x19, 0x3b, 0x75, 0xd3, 0xd0, 0xf7, 0x04, 0x4f, 0x80, 0xc4, 0xc8, 0xf3, 0x5d, 0x1a, 0xc8, 0x14, 0x5d, 0xe3, 0x51, 0xff, 0xd7, 0x01, 0xeb, 0x5e, 0xa9, 0xa5, 0xf5, 0xff, 0x3d, 0xf7, 0x5f, 0xec, 0xdb, 0x52, 0xd8, 0x2c, 0x3a, 0x3c, 0xf8, 0xec, 0x6e, 0x05, 0x0a, 0xb5, 0xc8, 0xcb, 0xc2, 0x52, 0x55, 0x61, 0x17, 0xb3, 0x72, 0x73, 0x06, 0xed, 0xe9, 0xad, 0xe6, 0xf5, 0xbe, 0x4b, 0x7c, 0xaf, 0x9f, 0x6f, 0x9d, 0xc3, 0x21, 0xe7, 0xdf, 0x3b, 0xe7, 0x43, 0x2d, 0xc6, 0xa7, 0xb5, 0xa5, 0xcb, 0xa6, 0xca, 0x7a, 0xd6, 0xc6, 0xb2, 0xfe, 0xd1, 0x02, 0x13, 0x3e, 0xad, 0x27, 0x3b, 0x60, 0x92, 0xf5, 0x26, 0x1a, 0xf8, 0xd9, 0xe9, 0xea, 0x5f, 0xc7, 0xe1, 0xd3, 0xda, 0x71, 0x76, 0x88, 0xe3, 0x64, 0x3d, 0xc7, 0xd1, 0xd0, 0x97, 0xa3, 0x4d, 0xba, 0x77, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xcf, 0xcb, 0x93, 0x3c, 0x26, 0x03, 0x00, 0x00, }
[]
[]
[]
[]
[]
go
null
null
null
python/bbgo/utils/grpc_utils.py
import os import grpc def read_binary(f): with open(f, 'rb') as fp: return fp.read() def get_grpc_cert_file_from_env(): cert_file = os.environ.get('BBGO_GRPC_CERT_FILE') return cert_file def get_grpc_key_file_from_env(): key_file = os.environ.get('BBGO_GRPC_KEY_FILE') return key_file def get_credentials_from_env(): key_file = get_grpc_key_file_from_env() private_key = read_binary(key_file) cert_file = get_grpc_cert_file_from_env() certificate_chain = read_binary(cert_file) private_key_certificate_chain_pairs = [(private_key, certificate_chain)] server_credentials = grpc.ssl_server_credentials(private_key_certificate_chain_pairs) return server_credentials def get_insecure_channel(host: str, port: int) -> grpc.Channel: address = f'{host}:{port}' return grpc.insecure_channel(address) def get_insecure_channel_from_env() -> grpc.Channel: host = os.environ.get('BBGO_GRPC_HOST') or '127.0.0.1' port = os.environ.get('BBGO_GRPC_PORT') or 50051 address = get_insecure_channel(host, port) return grpc.insecure_channel(address)
[]
[]
[ "BBGO_GRPC_CERT_FILE", "BBGO_GRPC_PORT", "BBGO_GRPC_KEY_FILE", "BBGO_GRPC_HOST" ]
[]
["BBGO_GRPC_CERT_FILE", "BBGO_GRPC_PORT", "BBGO_GRPC_KEY_FILE", "BBGO_GRPC_HOST"]
python
4
0
cmd/githubCreatePullRequest_generated.go
// Code generated by piper's step-generator. DO NOT EDIT. package cmd import ( "fmt" "os" "time" "github.com/SAP/jenkins-library/pkg/config" "github.com/SAP/jenkins-library/pkg/log" "github.com/SAP/jenkins-library/pkg/telemetry" "github.com/spf13/cobra" ) type githubCreatePullRequestOptions struct { Assignees []string `json:"assignees,omitempty"` Base string `json:"base,omitempty"` Body string `json:"body,omitempty"` APIURL string `json:"apiUrl,omitempty"` Head string `json:"head,omitempty"` Owner string `json:"owner,omitempty"` Repository string `json:"repository,omitempty"` ServerURL string `json:"serverUrl,omitempty"` Title string `json:"title,omitempty"` Token string `json:"token,omitempty"` Labels []string `json:"labels,omitempty"` } // GithubCreatePullRequestCommand Create a pull request on GitHub func GithubCreatePullRequestCommand() *cobra.Command { const STEP_NAME = "githubCreatePullRequest" metadata := githubCreatePullRequestMetadata() var stepConfig githubCreatePullRequestOptions var startTime time.Time var createGithubCreatePullRequestCmd = &cobra.Command{ Use: STEP_NAME, Short: "Create a pull request on GitHub", Long: `This step allows you to create a pull request on Github. It can for example be used for GitOps scenarios or for scenarios where you want to have a manual confirmation step which is delegated to a GitHub pull request.`, PreRunE: func(cmd *cobra.Command, args []string) error { startTime = time.Now() log.SetStepName(STEP_NAME) log.SetVerbose(GeneralConfig.Verbose) path, _ := os.Getwd() fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path} log.RegisterHook(fatalHook) err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile) if err != nil { return err } log.RegisterSecret(stepConfig.Token) if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 { sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID) log.RegisterHook(&sentryHook) } return nil }, Run: func(cmd *cobra.Command, args []string) { telemetryData := telemetry.CustomData{} telemetryData.ErrorCode = "1" handler := func() { telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds()) telemetry.Send(&telemetryData) } log.DeferExitHandler(handler) defer handler() telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME) githubCreatePullRequest(stepConfig, &telemetryData) telemetryData.ErrorCode = "0" log.Entry().Info("SUCCESS") }, } addGithubCreatePullRequestFlags(createGithubCreatePullRequestCmd, &stepConfig) return createGithubCreatePullRequestCmd } func addGithubCreatePullRequestFlags(cmd *cobra.Command, stepConfig *githubCreatePullRequestOptions) { cmd.Flags().StringSliceVar(&stepConfig.Assignees, "assignees", []string{}, "Login names of users to which the PR should be assigned to.") cmd.Flags().StringVar(&stepConfig.Base, "base", os.Getenv("PIPER_base"), "The name of the branch you want the changes pulled into.") cmd.Flags().StringVar(&stepConfig.Body, "body", os.Getenv("PIPER_body"), "The description text of the pull request in markdown format.") cmd.Flags().StringVar(&stepConfig.APIURL, "apiUrl", `https://api.github.com`, "Set the GitHub API url.") cmd.Flags().StringVar(&stepConfig.Head, "head", os.Getenv("PIPER_head"), "The name of the branch where your changes are implemented.") cmd.Flags().StringVar(&stepConfig.Owner, "owner", os.Getenv("PIPER_owner"), "Set the GitHub organization.") cmd.Flags().StringVar(&stepConfig.Repository, "repository", os.Getenv("PIPER_repository"), "Set the GitHub repository.") cmd.Flags().StringVar(&stepConfig.ServerURL, "serverUrl", `https://github.com`, "GitHub server url for end-user access.") cmd.Flags().StringVar(&stepConfig.Title, "title", os.Getenv("PIPER_title"), "Title of the pull request.") cmd.Flags().StringVar(&stepConfig.Token, "token", os.Getenv("PIPER_token"), "GitHub personal access token as per https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line") cmd.Flags().StringSliceVar(&stepConfig.Labels, "labels", []string{}, "Labels to be added to the pull request.") cmd.MarkFlagRequired("base") cmd.MarkFlagRequired("body") cmd.MarkFlagRequired("apiUrl") cmd.MarkFlagRequired("head") cmd.MarkFlagRequired("owner") cmd.MarkFlagRequired("repository") cmd.MarkFlagRequired("serverUrl") cmd.MarkFlagRequired("title") cmd.MarkFlagRequired("token") } // retrieve step metadata func githubCreatePullRequestMetadata() config.StepData { var theMetaData = config.StepData{ Metadata: config.StepMetadata{ Name: "githubCreatePullRequest", Aliases: []config.Alias{}, }, Spec: config.StepSpec{ Inputs: config.StepInputs{ Parameters: []config.StepParameters{ { Name: "assignees", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "[]string", Mandatory: false, Aliases: []config.Alias{}, }, { Name: "base", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{}, }, { Name: "body", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{}, }, { Name: "apiUrl", ResourceRef: []config.ResourceReference{}, Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{{Name: "githubApiUrl"}}, }, { Name: "head", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{}, }, { Name: "owner", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{{Name: "githubOrg"}}, }, { Name: "repository", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{{Name: "githubRepo"}}, }, { Name: "serverUrl", ResourceRef: []config.ResourceReference{}, Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{{Name: "githubServerUrl"}}, }, { Name: "title", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{}, }, { Name: "token", ResourceRef: []config.ResourceReference{}, Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{{Name: "githubToken"}}, }, { Name: "labels", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "[]string", Mandatory: false, Aliases: []config.Alias{}, }, }, }, }, } return theMetaData }
[ "\"PIPER_base\"", "\"PIPER_body\"", "\"PIPER_head\"", "\"PIPER_owner\"", "\"PIPER_repository\"", "\"PIPER_title\"", "\"PIPER_token\"" ]
[]
[ "PIPER_title", "PIPER_body", "PIPER_token", "PIPER_head", "PIPER_base", "PIPER_repository", "PIPER_owner" ]
[]
["PIPER_title", "PIPER_body", "PIPER_token", "PIPER_head", "PIPER_base", "PIPER_repository", "PIPER_owner"]
go
7
0
src/main.go
package main import ( "fmt" "os" "time" "github.com/Mic-U/gofeed_line_notifier/src/notifier" "github.com/Mic-U/gofeed_line_notifier/src/selector" "github.com/mmcdole/gofeed" ) var ( //RSSURL RSSフィードの URL RSSURL = os.Getenv("RSS_URL") ) func main() { fp := gofeed.NewParser() for { exec(fp) time.Sleep(time.Minute * 5) } } func exec(fp *gofeed.Parser) { feed, err := fp.ParseURL(RSSURL) if err != nil { fmt.Printf("Error: %s\n", err) return } items := feed.Items selected := selector.SelectNewNotifications(items) if len(selected) == 0 { fmt.Println("No new articles") return } // TODO 多すぎる場合にどうするかは要検討 if len(selected) > 5 { selected = selected[0:5] } messages := convertItemsToMessages(selected) postMessages := notifier.PostMessages{ Messages: messages, } notifier.PostBroadcast(postMessages) } func convertItemsToMessages(items []*gofeed.Item) []notifier.Message { messages := make([]notifier.Message, 0) for _, i := range items { message := notifier.Message{ Type: "text", Text: i.Title + "\n" + i.Link, } messages = append(messages, message) } return messages }
[ "\"RSS_URL\"" ]
[]
[ "RSS_URL" ]
[]
["RSS_URL"]
go
1
0
eventbus/gcp/eventbus_test.go
// Copyright (c) 2014 - The Event Horizon authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gcp import ( "crypto/rand" "encoding/hex" "fmt" "os" "testing" "time" eh "github.com/looplab/eventhorizon" "github.com/looplab/eventhorizon/eventbus" ) func TestAddHandlerIntegration(t *testing.T) { if testing.Short() { t.Skip("skipping integration test") } bus1, _, err := newTestEventBus("") if err != nil { t.Fatal("there should be no error:", err) } eventbus.TestAddHandler(t, bus1) } func TestEventBusIntegration(t *testing.T) { if testing.Short() { t.Skip("skipping integration test") } bus1, appID, err := newTestEventBus("") if err != nil { t.Fatal("there should be no error:", err) } bus2, _, err := newTestEventBus(appID) if err != nil { t.Fatal("there should be no error:", err) } t.Logf("using topic: %s_events", appID) eventbus.AcceptanceTest(t, bus1, bus2, time.Second) } func TestEventBusLoadtest(t *testing.T) { if testing.Short() { t.Skip("skipping integration test") } bus, appID, err := newTestEventBus("") if err != nil { t.Fatal("there should be no error:", err) } t.Logf("using topic: %s_events", appID) eventbus.LoadTest(t, bus) } func BenchmarkEventBus(b *testing.B) { bus, appID, err := newTestEventBus("") if err != nil { b.Fatal("there should be no error:", err) } b.Logf("using topic: %s_events", appID) eventbus.Benchmark(b, bus) } func newTestEventBus(appID string) (eh.EventBus, string, error) { // Connect to localhost if not running inside docker if os.Getenv("PUBSUB_EMULATOR_HOST") == "" { os.Setenv("PUBSUB_EMULATOR_HOST", "localhost:8793") } // Get a random app ID. if appID == "" { bts := make([]byte, 8) if _, err := rand.Read(bts); err != nil { return nil, "", fmt.Errorf("could not randomize app ID: %w", err) } appID = "app-" + hex.EncodeToString(bts) } bus, err := NewEventBus("project_id", appID) if err != nil { return nil, "", fmt.Errorf("could not create event bus: %w", err) } return bus, appID, nil }
[ "\"PUBSUB_EMULATOR_HOST\"" ]
[]
[ "PUBSUB_EMULATOR_HOST" ]
[]
["PUBSUB_EMULATOR_HOST"]
go
1
0
objectModel/Java/objectmodel/src/test/java/com/microsoft/commondatamodel/objectmodel/cdm/storage/AdlsAdapterTest.java
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. package com.microsoft.commondatamodel.objectmodel.cdm.storage; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import com.microsoft.commondatamodel.objectmodel.AdlsTestHelper; import com.microsoft.commondatamodel.objectmodel.TestHelper; import com.microsoft.commondatamodel.objectmodel.cdm.CdmCorpusDefinition; import com.microsoft.commondatamodel.objectmodel.cdm.CdmDocumentDefinition; import com.microsoft.commondatamodel.objectmodel.cdm.CdmManifestDefinition; import com.microsoft.commondatamodel.objectmodel.enums.CdmStatusLevel; import com.microsoft.commondatamodel.objectmodel.storage.AdlsAdapter; import com.microsoft.commondatamodel.objectmodel.storage.StorageAdapterBase.CacheContext; import com.microsoft.commondatamodel.objectmodel.utilities.JMapper; import com.microsoft.commondatamodel.objectmodel.utilities.network.TokenProvider; import org.testng.Assert; import org.testng.annotations.Test; import java.time.OffsetDateTime; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; import static org.testng.Assert.*; public class AdlsAdapterTest { private final String testSubpath = "Storage"; private static class FakeTokenProvider implements TokenProvider { public String getToken() { return "TOKEN"; } } private static void runWriteReadTest(AdlsAdapter adapter) { String filename = "WriteReadTest/" + System.getenv("USERNAME") + "_" + System.getenv("COMPUTERNAME") + "_Java.txt"; String writeContents = OffsetDateTime.now() + "\n" + filename; adapter.writeAsync(filename, writeContents).join(); String readContents = adapter.readAsync(filename).join(); assertEquals(writeContents, readContents); } private static void runCheckFileTimeTest(AdlsAdapter adapter) { OffsetDateTime offset1 = adapter.computeLastModifiedTimeAsync("/FileTimeTest/CheckFileTime.txt").join(); OffsetDateTime offset2 = adapter.computeLastModifiedTimeAsync("FileTimeTest/CheckFileTime.txt").join(); assertNotNull(offset1); assertNotNull(offset2); assertTrue(offset1.isEqual(offset2)); assertTrue(offset1.isBefore(OffsetDateTime.now())); } private static void runFileEnumTest(AdlsAdapter adapter) { CacheContext context = adapter.createFileQueryCacheContext(); try { List<String> files1 = adapter.fetchAllFilesAsync("/FileEnumTest/").join(); List<String> files2 = adapter.fetchAllFilesAsync("/FileEnumTest").join(); List<String> files3 = adapter.fetchAllFilesAsync("FileEnumTest/").join(); List<String> files4 = adapter.fetchAllFilesAsync("FileEnumTest").join(); // expect 100 files to be enumerated assertTrue(files1.size() == 100 && files2.size() == 100 && files3.size() == 100 && files4.size() == 100); // these calls should be fast due to cache final long startTime = System.currentTimeMillis(); for (int i = 0; i < files1.size(); i++) { assertTrue(files1.get(i).equals(files2.get(i)) && files1.get(i).equals(files3.get(i)) && files1.get(i).equals(files4.get(i))); adapter.computeLastModifiedTimeAsync(files1.get(i)).join(); } final long stopTime = System.currentTimeMillis(); assertTrue(stopTime - startTime < 100, "Cached file modified times"); } finally { context.dispose(); } } private static void runSpecialCharactersTest(AdlsAdapter adapter) { CdmCorpusDefinition corpus = new CdmCorpusDefinition(); corpus.getStorage().mount("adls", adapter); corpus.getStorage().setDefaultNamespace("adls"); try { CdmManifestDefinition manifest = corpus.<CdmManifestDefinition>fetchObjectAsync("default.manifest.cdm.json").join(); manifest.fileStatusCheckAsync().join(); assertEquals(manifest.getEntities().size(), 1); assertEquals(manifest.getEntities().get(0).getDataPartitions().size(), 2); assertEquals( "TestEntity-With=Special Characters/year=2020/TestEntity-partition-With=Special Characters-0.csv", manifest.getEntities().get(0).getDataPartitions().get(0).getLocation()); assertEquals( "TestEntity-With=Special Characters/year=2020/TestEntity-partition-With=Special Characters-1.csv", manifest.getEntities().get(0).getDataPartitions().get(1).getLocation()); } catch (Exception e) { Assert.fail(e.getMessage()); } } @Test public void adlsWriteReadSharedKey() { AdlsTestHelper.checkADLSEnvironment(); runWriteReadTest(AdlsTestHelper.createAdapterWithSharedKey()); } @Test public void adlsWriteReadClientId() { AdlsTestHelper.checkADLSEnvironment(); runWriteReadTest(AdlsTestHelper.createAdapterWithClientId()); } @Test public void adlsCheckFileTimeSharedKey() { AdlsTestHelper.checkADLSEnvironment(); runCheckFileTimeTest(AdlsTestHelper.createAdapterWithSharedKey()); } @Test public void adlsCheckFileTimeClientId() { AdlsTestHelper.checkADLSEnvironment(); runCheckFileTimeTest(AdlsTestHelper.createAdapterWithClientId()); } @Test public void adlsFileEnumSharedKey() { AdlsTestHelper.checkADLSEnvironment(); runFileEnumTest(AdlsTestHelper.createAdapterWithSharedKey()); } @Test public void adlsFileEnumClientId() { AdlsTestHelper.checkADLSEnvironment(); runFileEnumTest(AdlsTestHelper.createAdapterWithClientId()); } @Test public void adlsSpecialCharactersTest() { AdlsTestHelper.checkADLSEnvironment(); runSpecialCharactersTest(AdlsTestHelper.createAdapterWithClientId("PathWithSpecialCharactersAndUnescapedStringTest/Root-With=Special Characters:")); } /** * Tests if the adapter won't retry if a HttpStatusCode response with a code in AvoidRetryCodes is received. */ @Test public void testAvoidRetryCodes() { AdlsTestHelper.checkADLSEnvironment(); AdlsAdapter adlsAdapter = AdlsTestHelper.createAdapterWithSharedKey(); adlsAdapter.setNumberOfRetries(3); CdmCorpusDefinition corpus = new CdmCorpusDefinition(); corpus.getStorage().mount("adls", adlsAdapter); AtomicInteger count = new AtomicInteger(); corpus.setEventCallback((CdmStatusLevel level, String message) -> { if (message.indexOf("Response for request ") != -1) { count.getAndIncrement(); } }, CdmStatusLevel.Progress);; corpus.<CdmDocumentDefinition>fetchObjectAsync("adls:/inexistentFile.cdm.json").join(); Assert.assertEquals(count.get(), 1); } @Test public void createCorpusAndAdapterPath() { final String host1 = "storageaccount.dfs.core.windows.net"; final String root = "/fs"; AdlsAdapter adlsAdapter = new AdlsAdapter(host1, root, ""); final String adapterPath1 = "https://storageaccount.dfs.core.windows.net/fs/a/1.csv"; final String adapterPath2 = "https://storageaccount.dfs.core.windows.net:443/fs/a/2.csv"; final String adapterPath3 = "https://storageaccount.blob.core.windows.net/fs/a/3.csv"; final String adapterPath4 = "https://storageaccount.blob.core.windows.net:443/fs/a/4.csv"; final String corpusPath1 = adlsAdapter.createCorpusPath(adapterPath1); final String corpusPath2 = adlsAdapter.createCorpusPath(adapterPath2); final String corpusPath3 = adlsAdapter.createCorpusPath(adapterPath3); final String corpusPath4 = adlsAdapter.createCorpusPath(adapterPath4); assertEquals(corpusPath1, "/a/1.csv"); assertEquals(corpusPath2, "/a/2.csv"); assertEquals(corpusPath3, "/a/3.csv"); assertEquals(corpusPath4, "/a/4.csv"); assertEquals(adlsAdapter.createAdapterPath(corpusPath1), adapterPath1); assertEquals(adlsAdapter.createAdapterPath(corpusPath2), adapterPath2); assertEquals(adlsAdapter.createAdapterPath(corpusPath3), adapterPath3); assertEquals(adlsAdapter.createAdapterPath(corpusPath4), adapterPath4); // Check that an adapter path is correctly created from a corpus path with any namespace final String corpusPathWithNamespace1 = "adls:/test.json"; final String corpusPathWithNamespace2 = "mylake:/test.json"; final String expectedAdapterPath = "https://storageaccount.dfs.core.windows.net/fs/test.json"; assertEquals(adlsAdapter.createAdapterPath(corpusPathWithNamespace1), expectedAdapterPath); assertEquals(adlsAdapter.createAdapterPath(corpusPathWithNamespace2), expectedAdapterPath); // Check that an adapter path is correctly created from a corpus path with colons final String corpusPathWithColons = "namespace:/a/path:with:colons/some-file.json"; Assert.assertEquals(adlsAdapter.createAdapterPath(corpusPathWithColons), "https://storageaccount.dfs.core.windows.net/fs/a/path%3Awith%3Acolons/some-file.json"); Assert.assertEquals(adlsAdapter.createCorpusPath("https://storageaccount.dfs.core.windows.net/fs/a/path%3Awith%3Acolons/some-file.json"), "/a/path:with:colons/some-file.json"); Assert.assertEquals(adlsAdapter.createCorpusPath("https://storageaccount.dfs.core.windows.net/fs/a/path%3awith%3acolons/some-file.json"), "/a/path:with:colons/some-file.json"); // Check other special characters Assert.assertEquals(adlsAdapter.createAdapterPath("namespace:/a/path with=special=characters/some-file.json"), "https://storageaccount.dfs.core.windows.net/fs/a/path%20with%3Dspecial%3Dcharacters/some-file.json"); Assert.assertEquals(adlsAdapter.createCorpusPath("https://storageaccount.dfs.core.windows.net/fs/a/path%20with%3dspecial%3dcharacters/some-file.json"), "/a/path with=special=characters/some-file.json"); Assert.assertEquals(adlsAdapter.createCorpusPath("https://storageaccount.dfs.core.windows.net/fs/a/path%20with%3dspecial%3Dcharacters/some-file.json"), "/a/path with=special=characters/some-file.json"); // Check that an adapter path is null if the corpus path provided is null Assert.assertNull(adlsAdapter.createAdapterPath(null)); final String host2 = "storageaccount.blob.core.windows.net:8888"; adlsAdapter = new AdlsAdapter(host2, root, "", "", ""); final String adapterPath5 = "https://storageaccount.blob.core.windows.net:8888/fs/a/5.csv"; final String adapterPath6 = "https://storageaccount.dfs.core.windows.net:8888/fs/a/6.csv"; final String adapterPath7 = "https://storageaccount.blob.core.windows.net/fs/a/7.csv"; assertEquals(adlsAdapter.createCorpusPath(adapterPath5), "/a/5.csv"); assertEquals(adlsAdapter.createCorpusPath(adapterPath6), "/a/6.csv"); assertNull(adlsAdapter.createCorpusPath(adapterPath7)); } @Test public void fetchConfigAndUpdateConfig() { final AdlsAdapter adlsAdapter = new AdlsAdapter( "fake.dfs.core.windows.net", "fakeRoot", new FakeTokenProvider() ); try { String resultConfig = adlsAdapter.fetchConfig(); JsonNode adapterConfigJson = JMapper.MAP.readTree(resultConfig); adlsAdapter.updateConfig(adapterConfigJson.get("config").toString()); } catch (final Exception ex) { fail("AdlsAdapter initialized with token provider shouldn't throw exception when updating config."); } } /** * The secret property is not saved to the config.json file for security reasons. When * constructing and ADLS adapter from config, the user should be able to set the secret after the * adapter is constructed. */ @Test public void fetchConfigAndUpdateConfigWithoutSecret() { ObjectMapper mapper = new ObjectMapper(); ObjectNode config = mapper.createObjectNode(); config.put("root", "root"); config.put("hostname", "hostname"); config.put("tenant", "tenant"); config.put("clientId", "clientId"); try { final AdlsAdapter adlsAdapter1 = new AdlsAdapter(); adlsAdapter1.updateConfig(config.toString()); adlsAdapter1.setClientId("clientId"); adlsAdapter1.setSecret("secret"); adlsAdapter1.setSharedKey("sharedKey"); adlsAdapter1.setTokenProvider(new FakeTokenProvider()); } catch (final Exception ex) { fail("AdlsAdapter initialized without secret shouldn't throw exception when updating config."); } try { final AdlsAdapter adlsAdapter2 = new AdlsAdapter(); adlsAdapter2.setClientId("clientId"); adlsAdapter2.setSecret("secret"); adlsAdapter2.setSharedKey("sharedKey"); adlsAdapter2.setTokenProvider(new FakeTokenProvider()); adlsAdapter2.updateConfig(config.toString()); } catch (final Exception ex) { fail("AdlsAdapter initialized without secret shouldn't throw exception when updating config."); } } /** * Initialize Hostname and Root. */ @Test public void testInitializeHostnameAndRoot() { String host1 = "storageaccount.dfs.core.windows.net"; AdlsAdapter adlsAdapter1 = new AdlsAdapter(host1, "root-without-slash", ""); assertEquals(adlsAdapter1.getHostname(), "storageaccount.dfs.core.windows.net"); assertEquals(adlsAdapter1.getRoot(), "/root-without-slash"); String adapterPath1 = "https://storageaccount.dfs.core.windows.net/root-without-slash/a/1.csv"; String corpusPath1 = adlsAdapter1.createCorpusPath(adapterPath1); assertEquals(corpusPath1, "/a/1.csv"); assertEquals(adlsAdapter1.createAdapterPath(corpusPath1), adapterPath1); AdlsAdapter adlsAdapter1WithFolders = new AdlsAdapter(host1, "root-without-slash/folder1/folder2", ""); assertEquals(adlsAdapter1WithFolders.getRoot(), "/root-without-slash/folder1/folder2"); String adapterPath2 = "https://storageaccount.dfs.core.windows.net/root-without-slash/folder1/folder2/a/1.csv"; String corpusPath2 = adlsAdapter1WithFolders.createCorpusPath(adapterPath2); assertEquals(corpusPath2, "/a/1.csv"); assertEquals(adlsAdapter1WithFolders.createAdapterPath(corpusPath2), adapterPath2); AdlsAdapter adlsAdapter2 = new AdlsAdapter(host1, "/root-starts-with-slash", ""); assertEquals(adlsAdapter2.getRoot(), "/root-starts-with-slash"); AdlsAdapter adlsAdapter2WithFolders = new AdlsAdapter(host1, "/root-starts-with-slash/folder1/folder2", ""); assertEquals(adlsAdapter2WithFolders.getRoot(), "/root-starts-with-slash/folder1/folder2"); AdlsAdapter adlsAdapter3 = new AdlsAdapter(host1, "root-ends-with-slash/", ""); assertEquals(adlsAdapter3.getRoot(), "/root-ends-with-slash"); AdlsAdapter adlsAdapter3WithFolders = new AdlsAdapter(host1, "root-ends-with-slash/folder1/folder2/", ""); assertEquals(adlsAdapter3WithFolders.getRoot(), "/root-ends-with-slash/folder1/folder2"); AdlsAdapter adlsAdapter4 = new AdlsAdapter(host1, "/root-with-slashes/", ""); assertEquals(adlsAdapter4.getRoot(), "/root-with-slashes"); AdlsAdapter adlsAdapter4WithFolders = new AdlsAdapter(host1, "/root-with-slashes/folder1/folder2/", ""); assertEquals(adlsAdapter4WithFolders.getRoot(), "/root-with-slashes/folder1/folder2"); try { // Mount from config String config = TestHelper.getInputFileContent(testSubpath, "TestInitializeHostnameAndRoot", "config.json"); CdmCorpusDefinition corpus = new CdmCorpusDefinition(); corpus.getStorage().mountFromConfig(config); assertEquals(((AdlsAdapter)corpus.getStorage().fetchAdapter("adlsadapter1")).getRoot(), "/root-without-slash"); assertEquals(((AdlsAdapter)corpus.getStorage().fetchAdapter("adlsadapter2")).getRoot(), "/root-without-slash/folder1/folder2"); assertEquals(((AdlsAdapter)corpus.getStorage().fetchAdapter("adlsadapter3")).getRoot(), "/root-starts-with-slash/folder1/folder2"); assertEquals(((AdlsAdapter)corpus.getStorage().fetchAdapter("adlsadapter4")).getRoot(), "/root-ends-with-slash/folder1/folder2"); assertEquals(((AdlsAdapter)corpus.getStorage().fetchAdapter("adlsadapter5")).getRoot(), "/root-with-slashes/folder1/folder2"); } catch (Exception e) { Assert.fail(e.getMessage()); } } }
[ "\"USERNAME\"", "\"COMPUTERNAME\"" ]
[]
[ "COMPUTERNAME", "USERNAME" ]
[]
["COMPUTERNAME", "USERNAME"]
java
2
0
cmd/db/db.go
package main import( "bufio" "fmt" "os" "github.com/abworrall/dicebot/pkg/bot" "github.com/abworrall/dicebot/pkg/rules" "github.com/abworrall/dicebot/pkg/verbs" "github.com/abworrall/dicebot/pkg/state" ) // run this from the mnain dicebot dir, to puck up ./rules func main() { rules.Init("./data/") homedir,_ := os.UserHomeDir() statedir := homedir + "/var/dicebot" b := bot.New("dicebot", "db") vc := verbs.VerbContext{ ExternalUserId: "ABCDEF123456", StateManager: state.FileStateManager{statedir}, } /* Sadly, this all dies :( panic: runtime error: invalid memory address or nil pointer dereference [signal SIGSEGV: segmentation violation code=0x1 addr=0x30 pc=0x8fb180] goroutine 1 [running]: go.opencensus.io/trace.FromContext(...) /home/abw/go/pkg/mod/[email protected]/trace/trace.go:113 go.opencensus.io/trace.StartSpan(0x0, 0x0, 0xb1f1ed, 0x21, 0x0, 0x0, 0x0, 0x106c458, 0x203000, 0x203000) /home/abw/go/pkg/mod/[email protected]/trace/trace.go:172 +0x70 if os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") != "" && os.Getenv("GOOGLE_CLOUD_PROJECT") != "" { gcpProjectId := os.Getenv("GOOGLE_CLOUD_PROJECT") ctx := trace.NewContext(context.Background(), nil) //ctx := context.Background() vc.StateManager = state.NewGcpStateManager(ctx, gcpProjectId) fmt.Printf("(connecting to GCP !)\n") } */ reader := bufio.NewReader(os.Stdin) for { fmt.Print("> ") in, _ := reader.ReadString('\n') if out := b.ProcessLine(vc,in); out != "" { fmt.Printf("%s\n", out) } } }
[ "\"GOOGLE_APPLICATION_CREDENTIALS\"", "\"GOOGLE_CLOUD_PROJECT\"", "\"GOOGLE_CLOUD_PROJECT\"" ]
[]
[ "GOOGLE_CLOUD_PROJECT", "GOOGLE_APPLICATION_CREDENTIALS" ]
[]
["GOOGLE_CLOUD_PROJECT", "GOOGLE_APPLICATION_CREDENTIALS"]
go
2
0
eend/chainer_backend/train.py
#!/usr/bin/env python3 # Copyright 2019 Hitachi, Ltd. (author: Yusuke Fujita) # Licensed under the MIT license. # import os import numpy as np import chainer from chainer import optimizers from chainer import serializers from chainer import iterators from chainer import training from chainer.training import extensions from eend.chainer_backend.models import BLSTMDiarization from eend.chainer_backend.models import TransformerDiarization, TransformerEDADiarization, TransformerClusteringDiarization from eend.chainer_backend.transformer import NoamScheduler from eend.chainer_backend.updater import GradientAccumulationUpdater from eend.chainer_backend.diarization_dataset import KaldiDiarizationDataset from eend.chainer_backend.utils import use_single_gpu @chainer.dataset.converter() def _convert(batch, device): def to_device_batch(batch): if device is None: return batch batch_dst = [device.send(x) for x in batch] return batch_dst return {'xs': to_device_batch([x for x, _ in batch]), 'ts': to_device_batch([t for _, t in batch])} # pht2119 def get_n_train_speakers(train_set): """ Compute the number of speakers in a training set. Args: train_set: KaldiDiarizationDataset, the training set Returns: int, the number of speakers in the dataset """ return len(train_set.data.spk2utt) def train(args): """ Training model with chainer backend. This function is called from eend/bin/train.py with parsed command-line arguments. """ np.random.seed(args.seed) os.environ['CHAINER_SEED'] = str(args.seed) chainer.global_config.cudnn_deterministic = True train_set = KaldiDiarizationDataset( args.train_data_dir, chunk_size=args.num_frames, context_size=args.context_size, input_transform=args.input_transform, frame_size=args.frame_size, frame_shift=args.frame_shift, subsampling=args.subsampling, rate=args.sampling_rate, use_last_samples=True, label_delay=args.label_delay, n_speakers=args.num_speakers, shuffle=args.shuffle, ) dev_set = KaldiDiarizationDataset( args.valid_data_dir, chunk_size=args.num_frames, context_size=args.context_size, input_transform=args.input_transform, frame_size=args.frame_size, frame_shift=args.frame_shift, subsampling=args.subsampling, rate=args.sampling_rate, use_last_samples=True, label_delay=args.label_delay, n_speakers=args.num_speakers, shuffle=args.shuffle, ) # Prepare model Y, T = train_set.get_example(0) if args.model_type == 'BLSTM': assert args.num_speakers is not None model = BLSTMDiarization( in_size=Y.shape[1], n_speakers=args.num_speakers, hidden_size=args.hidden_size, n_layers=args.num_lstm_layers, embedding_layers=args.embedding_layers, embedding_size=args.embedding_size, dc_loss_ratio=args.dc_loss_ratio, ) elif args.model_type == 'Transformer': if args.use_attractor: model = TransformerEDADiarization( Y.shape[1], n_units=args.hidden_size, n_heads=args.transformer_encoder_n_heads, n_layers=args.transformer_encoder_n_layers, dropout=args.transformer_encoder_dropout, attractor_loss_ratio=args.attractor_loss_ratio, attractor_encoder_dropout=args.attractor_encoder_dropout, attractor_decoder_dropout=args.attractor_decoder_dropout, ) else: assert args.num_speakers is not None model = TransformerDiarization( args.num_speakers, Y.shape[1], n_units=args.hidden_size, n_heads=args.transformer_encoder_n_heads, n_layers=args.transformer_encoder_n_layers, dropout=args.transformer_encoder_dropout ) # pht2119 # model creation elif args.model_type == "TransformerClustering": print("Using TransformerClustering") model = TransformerClusteringDiarization( n_speakers=args.num_speakers, n_training_speakers=get_n_train_speakers(train_set), emb_size=args.embeddings_size, in_size=Y.shape[1], lambda_loss=args.lambda_loss, n_units=args.hidden_size, n_heads=args.transformer_encoder_n_heads, n_layers=args.transformer_encoder_n_layers, dropout=args.transformer_encoder_dropout ) else: raise ValueError('Possible model_type are "Transformer" and "BLSTM"') if args.gpu >= 0: gpuid = use_single_gpu() print('GPU device {} is used'.format(gpuid)) model.to_gpu() else: gpuid = -1 print('Prepared model') # Setup optimizer if args.optimizer == 'adam': optimizer = optimizers.Adam(alpha=args.lr) elif args.optimizer == 'sgd': optimizer = optimizers.SGD(lr=args.lr) elif args.optimizer == 'noam': optimizer = optimizers.Adam(alpha=0, beta1=0.9, beta2=0.98, eps=1e-9) else: raise ValueError(args.optimizer) optimizer.setup(model) if args.gradclip > 0: optimizer.add_hook( chainer.optimizer_hooks.GradientClipping(args.gradclip)) # Init/Resume if args.initmodel: print('Load model from', args.initmodel) serializers.load_npz(args.initmodel, model) train_iter = iterators.MultiprocessIterator( train_set, batch_size=args.batchsize, repeat=True, shuffle=True, # shared_mem=64000000, shared_mem=None, n_processes=4, n_prefetch=2) dev_iter = iterators.MultiprocessIterator( dev_set, batch_size=args.batchsize, repeat=False, shuffle=False, # shared_mem=64000000, shared_mem=None, n_processes=4, n_prefetch=2) if args.gradient_accumulation_steps > 1: updater = GradientAccumulationUpdater( train_iter, optimizer, converter=_convert, device=gpuid) else: updater = training.StandardUpdater( train_iter, optimizer, converter=_convert, device=gpuid) trainer = training.Trainer( updater, (args.max_epochs, 'epoch'), out=os.path.join(args.model_save_dir)) evaluator = extensions.Evaluator( dev_iter, model, converter=_convert, device=gpuid) trainer.extend(evaluator) if args.optimizer == 'noam': trainer.extend( NoamScheduler(args.hidden_size, warmup_steps=args.noam_warmup_steps, scale=args.noam_scale), trigger=(1, 'iteration')) if args.resume: chainer.serializers.load_npz(args.resume, trainer) # MICRO AVERAGE metrics = [ ('diarization_error', 'speaker_scored', 'DER'), ('speech_miss', 'speech_scored', 'SAD_MR'), ('speech_falarm', 'speech_scored', 'SAD_FR'), ('speaker_miss', 'speaker_scored', 'MI'), ('speaker_falarm', 'speaker_scored', 'FA'), ('speaker_error', 'speaker_scored', 'CF'), ('correct', 'frames', 'accuracy')] for num, den, name in metrics: trainer.extend(extensions.MicroAverage( 'main/{}'.format(num), 'main/{}'.format(den), 'main/{}'.format(name))) trainer.extend(extensions.MicroAverage( 'validation/main/{}'.format(num), 'validation/main/{}'.format(den), 'validation/main/{}'.format(name))) trainer.extend(extensions.LogReport(log_name='log_iter', trigger=(1000, 'iteration'))) trainer.extend(extensions.LogReport()) trainer.extend(extensions.PrintReport( ['epoch', 'main/loss', 'validation/main/loss', 'main/diarization_error_rate', 'validation/main/diarization_error_rate', 'elapsed_time'])) trainer.extend(extensions.PlotReport( ['main/loss', 'validation/main/loss'], x_key='epoch', file_name='loss.png')) trainer.extend(extensions.PlotReport( ['main/diarization_error_rate', 'validation/main/diarization_error_rate'], x_key='epoch', file_name='DER.png')) trainer.extend(extensions.ProgressBar(update_interval=100)) trainer.extend(extensions.snapshot( filename='snapshot_epoch-{.updater.epoch}')) trainer.extend(extensions.dump_graph('main/loss', out_name="cg.dot")) trainer.run() print('Finished!')
[]
[]
[ "CHAINER_SEED" ]
[]
["CHAINER_SEED"]
python
1
0
src/easy_share_portal/settings.py
""" Django settings for easy_share_portal project. Generated by 'django-admin startproject' using Django 1.10.1. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '-p#*z)pxk_n%69*5%fx9b4w4!#5sqlu2o(-rn+urre1t=3g52#' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'home', 'User', 'chat_start', 'post', 'channels', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'easy_share_portal.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR,'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'easy_share_portal.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/' MEDIA_URL = '/media/' STATICFILES_DIRS= [ os.path.join(BASE_DIR,"static"), ] STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR),"static_cdn") MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR),"media_cdn") # Channel seetings CHANNEL_LAYERS = { "default": { "BACKEND": "asgi_redis.RedisChannelLayer", "CONFIG": { "hosts": [os.environ.get('REDIS_URL', 'redis://localhost:6379')], }, "ROUTING": "easy_share_portal.routing.channel_routing", }, } # Sending mail EMAIL_USE_TLS = True EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_HOST_USER = '[email protected]' EMAIL_HOST_PASSWORD = 'hello12345'
[]
[]
[ "REDIS_URL" ]
[]
["REDIS_URL"]
python
1
0
gorackhd_test.go
package gorackhd import ( "encoding/json" "fmt" "log" "os" "testing" rc "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" apiclient "github.com/codedellemc/gorackhd/client" "github.com/codedellemc/gorackhd/client/lookups" "github.com/codedellemc/gorackhd/client/nodes" ) func TestNodeGetOperation(t *testing.T) { // create the transport transport := rc.New("localhost:9090", "/api/1.1", []string{"http"}) // configure the host. include port with environment variable. For instance the vagrant image would be localhost:9090 if os.Getenv("GORACKHD_ENDPOINT") != "" { transport.Host = os.Getenv("GORACKHD_ENDPOINT") } // create the API client, with the transport client := apiclient.New(transport, strfmt.Default) //use any function to do REST operations resp, err := client.Nodes.GetNodes(nil, nil) if err != nil { log.Fatal(err) } fmt.Printf("%#v\n", resp.Payload) } func TestNodeLookupOperation(t *testing.T) { // create the transport transport := rc.New("localhost:9090", "/api/1.1", []string{"http"}) // configure the host. include port with environment variable. For instance the vagrant image would be localhost:9090 if os.Getenv("GORACKHD_ENDPOINT") != "" { transport.Host = os.Getenv("GORACKHD_ENDPOINT") } // create the API client, with the transport client := apiclient.New(transport, strfmt.Default) nodeId := "56dde3441722c192796e3a38" params := lookups.NewGetLookupsParams() params = params.WithQ(&nodeId) //use any function to do REST operations resp, err := client.Lookups.GetLookups(params, nil) if err != nil { log.Fatal(err) } fmt.Printf("%#v\n", resp.Payload) } func TestNodePostOperation(t *testing.T) { // create the transport transport := rc.New("localhost:9090", "/api/1.1", []string{"http"}) // configure the host if os.Getenv("GORACKHD_ENDPOINT") != "" { transport.Host = os.Getenv("GORACKHD_ENDPOINT") } // create the API client, with the transport client := apiclient.New(transport, strfmt.Default) c := &Node{ ID: "1234abcd1234abcd1234abcd", Name: "somename", Type: "compute", ObmSettings: []*ObmSettings{&ObmSettings{ Service: "ipmi-obm-service", Config: &ObmConfig{ Host: "1.2.3.4", User: "myuser", Password: "mypass", }, }}, } b, err := json.Marshal(c) if err != nil { log.Fatal(err) } fmt.Println(string(b)) params := nodes.NewPostNodesParams() params = params.WithIdentifiers(c) resp, err := client.Nodes.PostNodes(params, nil) if err != nil { log.Fatal(err) } fmt.Printf("%#v\n", resp.Payload) } func TestNodeDeleteOperation(t *testing.T) { // create the transport transport := rc.New("localhost:9090", "/api/1.1", []string{"http"}) // configure the host. include port with environment variable. For instance the vagrant image would be localhost:9090 if os.Getenv("GORACKHD_ENDPOINT") != "" { transport.Host = os.Getenv("GORACKHD_ENDPOINT") } // create the API client, with the transport client := apiclient.New(transport, strfmt.Default) params := nodes.NewDeleteNodesIdentifierParams() params = params.WithIdentifier("1234abcd1234abcd1234abcd") resp, err := client.Nodes.DeleteNodesIdentifier(params, nil) if err != nil { log.Fatal(err) } fmt.Printf("%#v\n", resp) } type ObmConfig struct { Host string `json:"host"` User string `json:"user"` Password string `json:"password"` } type ObmSettings struct { Service string `json:"service"` Config *ObmConfig `json:"config"` } type Node struct { ID string `json:"id"` Name string `json:"name"` Type string `json:"type"` ObmSettings []*ObmSettings `json:"obmSettings"` }
[ "\"GORACKHD_ENDPOINT\"", "\"GORACKHD_ENDPOINT\"", "\"GORACKHD_ENDPOINT\"", "\"GORACKHD_ENDPOINT\"", "\"GORACKHD_ENDPOINT\"", "\"GORACKHD_ENDPOINT\"", "\"GORACKHD_ENDPOINT\"", "\"GORACKHD_ENDPOINT\"" ]
[]
[ "GORACKHD_ENDPOINT" ]
[]
["GORACKHD_ENDPOINT"]
go
1
0
submitit/local/test_debug.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import os from pathlib import Path from typing import Any, Tuple import pytest from ..core import utils from ..core.core import Job from ..core.job_environment import JobEnvironment from . import debug def test_debug_job(tmp_path: Path) -> None: def func(p: int) -> int: return 2 * p executor = debug.DebugExecutor(tmp_path) job = executor.submit(func, 4) assert job.result() == 8 with executor.batch(): job2 = executor.submit(func, 5) assert job2.result() == 10 # Check that job results are cached. job2.submission().function = None # type: ignore assert job2.result() == 10 def test_debug_job_array(tmp_path: Path) -> None: n = 5 data1, data2 = range(n), range(10, 10 + n) def g(x: int, y: int) -> int: assert x in data1 assert y in data2 return x + y executor = debug.DebugExecutor(tmp_path) jobs = executor.map_array(g, data1, data2) print(type(jobs[0])) print(jobs) assert list(map(g, data1, data2)) == [j.result() for j in jobs] def test_debug_error(tmp_path: Path) -> None: def failing_job() -> None: raise Exception("Failed on purpose") executor = debug.DebugExecutor(tmp_path) job = executor.submit(failing_job) exception = job.exception() assert isinstance(exception, Exception) message = exception.args[0] assert "Failed on purpose" in message def f_42() -> int: return 42 def test_debug_triggered(tmp_path: Path) -> None: def get_result(job: Job) -> Tuple[bool, Any]: assert isinstance(job, debug.DebugJob) return (job._submission._done, job._submission._result) executor = debug.DebugExecutor(tmp_path) for trigger in ("wait", "done", "exception", "results"): job = executor.submit(f_42) assert job.state == "QUEUED" assert get_result(job) == (False, None) getattr(job, trigger)() assert job.state == "DONE" assert get_result(job) == (True, 42) def test_cancel(tmp_path: Path) -> None: executor = debug.DebugExecutor(tmp_path) job = executor.submit(f_42) assert job.state == "QUEUED" job.cancel() assert job.state == "CANCELLED" with pytest.raises(utils.UncompletedJobError, match="was cancelled"): job.result() def test_job_environment(tmp_path: Path) -> None: executor = debug.DebugExecutor(tmp_path) def use_env(): env = JobEnvironment() assert env.num_nodes == 1 assert env.num_tasks == 1 assert env.node == 0 assert env.global_rank == 0 assert env.local_rank == 0 assert "DEBUG" in env.job_id job = executor.submit(use_env) job.result() # Check that we clean up the env after us. assert "SUBMITIT_DEBUG_JOB_ID" not in os.environ
[]
[]
[]
[]
[]
python
0
0
samples/wrapping-existing-ml-scripts-tutorial/ml_service/util/env_variables.py
import os from dataclasses import dataclass from typing import Optional from dotenv import load_dotenv # See /.env file @dataclass(frozen=True) class Env: load_dotenv() subscription_id: Optional[str] = os.getenv("SUBSCRIPTION_ID") resource_group: Optional[str] = os.getenv("RESOURCE_GROUP") aml_workspace_name: Optional[str] = os.getenv("AML_WORKSPACE_NAME") aml_workspace_location: Optional[str] = os.getenv("AML_WORKSPACE_LOCATION") aml_experiment_name: Optional[str] = os.getenv("AML_EXPERIMENT_NAME") aml_environment_name: Optional[str] = os.getenv("AML_ENVIRONMENT_NAME") aml_compute_name: Optional[str] = os.getenv("AML_COMPUTE_NAME") aml_model_name: Optional[str] = os.getenv("AML_MODEL_NAME") aml_pipeline_name: Optional[str] = os.getenv("AML_PIPELINE_NAME") aml_pipeline_endpoint_name: Optional[str] = os.getenv("AML_PIPELINE_ENDPOINT_NAME")
[]
[]
[ "AML_ENVIRONMENT_NAME", "AML_COMPUTE_NAME", "AML_PIPELINE_NAME", "AML_PIPELINE_ENDPOINT_NAME", "AML_WORKSPACE_LOCATION", "RESOURCE_GROUP", "AML_EXPERIMENT_NAME", "AML_MODEL_NAME", "SUBSCRIPTION_ID", "AML_WORKSPACE_NAME" ]
[]
["AML_ENVIRONMENT_NAME", "AML_COMPUTE_NAME", "AML_PIPELINE_NAME", "AML_PIPELINE_ENDPOINT_NAME", "AML_WORKSPACE_LOCATION", "RESOURCE_GROUP", "AML_EXPERIMENT_NAME", "AML_MODEL_NAME", "SUBSCRIPTION_ID", "AML_WORKSPACE_NAME"]
python
10
0
tools/checkspecs.py
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Attempt to check each interface in nipype """ # Stdlib imports import os import re import sys import warnings import black # Functions and classes class InterfaceChecker(object): """Class for checking all interface specifications """ def __init__( self, package_name, package_skip_patterns=None, module_skip_patterns=None, class_skip_patterns=None, ): r""" Initialize package for parsing Parameters ---------- package_name : string Name of the top-level package. *package_name* must be the name of an importable package package_skip_patterns : None or sequence of {strings, regexps} Sequence of strings giving URIs of packages to be excluded Operates on the package path, starting at (including) the first dot in the package path, after *package_name* - so, if *package_name* is ``sphinx``, then ``sphinx.util`` will result in ``.util`` being passed for earching by these regexps. If is None, gives default. Default is: ['\.tests$'] module_skip_patterns : None or sequence Sequence of strings giving URIs of modules to be excluded Operates on the module name including preceding URI path, back to the first dot after *package_name*. For example ``sphinx.util.console`` results in the string to search of ``.util.console`` If is None, gives default. Default is: ['\.setup$', '\._'] class_skip_patterns : None or sequence Sequence of strings giving classes to be excluded Default is: None """ if package_skip_patterns is None: package_skip_patterns = ["\\.tests$"] if module_skip_patterns is None: module_skip_patterns = ["\\.setup$", "\\._"] if class_skip_patterns: self.class_skip_patterns = class_skip_patterns else: self.class_skip_patterns = [] self.package_name = package_name self.package_skip_patterns = package_skip_patterns self.module_skip_patterns = module_skip_patterns def get_package_name(self): return self._package_name def set_package_name(self, package_name): """Set package_name""" # It's also possible to imagine caching the module parsing here self._package_name = package_name self.root_module = __import__(package_name) self.root_path = self.root_module.__path__[0] package_name = property( get_package_name, set_package_name, None, "get/set package_name" ) def _get_object_name(self, line): name = line.split()[1].split("(")[0].strip() # in case we have classes which are not derived from object # ie. old style classes return name.rstrip(":") def _uri2path(self, uri): """Convert uri to absolute filepath Parameters ---------- uri : string URI of python module to return path for Returns ------- path : None or string Returns None if there is no valid path for this URI Otherwise returns absolute file system path for URI """ if uri == self.package_name: return os.path.join(self.root_path, "__init__.py") path = uri.replace(".", os.path.sep) path = path.replace(self.package_name + os.path.sep, "") path = os.path.join(self.root_path, path) # XXX maybe check for extensions as well? if os.path.exists(path + ".py"): # file path += ".py" elif os.path.exists(os.path.join(path, "__init__.py")): path = os.path.join(path, "__init__.py") else: return None return path def _path2uri(self, dirpath): """ Convert directory path to uri """ relpath = dirpath.replace(self.root_path, self.package_name) if relpath.startswith(os.path.sep): relpath = relpath[1:] return relpath.replace(os.path.sep, ".") def _parse_module(self, uri): """ Parse module defined in *uri* """ filename = self._uri2path(uri) if filename is None: # nothing that we could handle here. return ([], []) f = open(filename, "rt") functions, classes = self._parse_lines(f, uri) f.close() return functions, classes def _parse_lines(self, linesource, module): """ Parse lines of text for functions and classes """ functions = [] classes = [] for line in linesource: if line.startswith("def ") and line.count("("): # exclude private stuff name = self._get_object_name(line) if not name.startswith("_"): functions.append(name) elif line.startswith("class "): # exclude private stuff name = self._get_object_name(line) if not name.startswith("_") and self._survives_exclude( ".".join((module, name)), "class" ): classes.append(name) else: pass functions.sort() classes.sort() return functions, classes @classmethod def _normalize_repr(cls, value): if isinstance(value, list): return "[{}]".format(", ".join(map(cls._normalize_repr, value))) if isinstance(value, tuple): if len(value) == 1: return "({},)".format(cls._normalize_repr(value[0])) return "({})".format(", ".join(map(cls._normalize_repr, value))) if isinstance(value, (str, bytes)): value = repr(value) if value[0] not in ('"', "'"): value = value[1:] else: value = repr(value) return value def test_specs(self, uri): """Check input and output specs in an uri Parameters ---------- uri : string python location of module - e.g 'sphinx.builder' Returns ------- """ from nipype.interfaces.base import BaseInterface # get the names of all classes and functions _, classes = self._parse_module(uri) if not classes: # print 'WARNING: Empty -',uri # dbg return None # Make a shorter version of the uri that omits the package name for # titles allowed_keys = [ "desc", "genfile", "xor", "requires", "desc", "nohash", "argstr", "position", "mandatory", "copyfile", "usedefault", "sep", "hash_files", "deprecated", "new_name", "min_ver", "max_ver", "name_source", "name_template", "keep_extension", "units", "output_name", "extensions", ] in_built = [ "type", "copy", "parent", "instance_handler", "comparison_mode", "array", "default", "editor", ] bad_specs = [] for c in classes: __import__(uri) try: with warnings.catch_warnings(): warnings.simplefilter("ignore") classinst = sys.modules[uri].__dict__[c] except Exception: continue if not issubclass(classinst, BaseInterface): continue testdir = os.path.join(*(uri.split(".")[:-1] + ["tests"])) if not os.path.exists(testdir): os.makedirs(testdir) nonautotest = os.path.join(testdir, "test_%s.py" % c) testfile = os.path.join(testdir, "test_auto_%s.py" % c) if os.path.exists(testfile): os.unlink(testfile) if not os.path.exists(nonautotest): cmd = [ "# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT", "from ..%s import %s" % (uri.split(".")[-1], c), "", ] cmd.append("\ndef test_%s_inputs():" % c) input_fields = "" for traitname, trait in sorted( classinst.input_spec().traits(transient=None).items() ): input_fields += "%s=dict(" % traitname for key, value in sorted(trait.__dict__.items()): if key in in_built or key == "desc": continue input_fields += "%s=%s,\n " % ( key, self._normalize_repr(value), ) input_fields += "),\n " cmd += [" input_map = dict(%s)" % input_fields] cmd += [" inputs = %s.input_spec()" % c] cmd += [ """ for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value""" ] fmt_cmd = black.format_str("\n".join(cmd), mode=black.FileMode()) with open(testfile, "wt") as fp: fp.writelines(fmt_cmd) else: print("%s has nonautotest" % c) for traitname, trait in sorted( classinst.input_spec().traits(transient=None).items() ): for key in sorted(trait.__dict__): if key in in_built: continue parent_metadata = [] if "parent" in trait.__dict__: parent_metadata = list(getattr(trait, "parent").__dict__.keys()) if ( key not in allowed_keys + classinst._additional_metadata + parent_metadata ): bad_specs.append([uri, c, "Inputs", traitname, key]) if ( key == "mandatory" and trait.mandatory is not None and not trait.mandatory ): bad_specs.append( [uri, c, "Inputs", traitname, "mandatory=False"] ) if key == "usedefault" and trait.__dict__[key] == False: bad_specs.append( [uri, c, "Inputs", traitname, "usedefault=False"] ) # checking if traits that have default_value different that the trits default one # also have `usedefault` specified; # excluding TraitCompound # excluding Enum: always has default value (the first value) # excluding Tuple: takes tuple of inner traits default values as default, but doesn't use it # for Range assuming that if default == low, it's likely that usedefault should be False # (for Range traits takes low as a default default if ( trait.trait_type.__class__.__name__ not in ["TraitCompound", "Tuple", "Enum"] and trait.default and "usedefault" not in trait.__dict__ and "requires" not in trait.__dict__ and "xor" not in trait.__dict__ ): if ( trait.trait_type.__class__.__name__ == "Range" and trait.default == trait.trait_type._low ): continue bad_specs.append( [ uri, c, "Inputs", traitname, "default value is set, no value for usedefault", ] ) if not classinst.output_spec: continue if not os.path.exists(nonautotest): cmd = ["\ndef test_%s_outputs():" % c] input_fields = "" for traitname, trait in sorted( classinst.output_spec().traits(transient=None).items() ): input_fields += "%s=dict(" % traitname for key, value in sorted(trait.__dict__.items()): if key in in_built or key == "desc": continue input_fields += "%s=%s,\n " % ( key, self._normalize_repr(value), ) input_fields += "),\n " cmd += [" output_map = dict(%s)" % input_fields] cmd += [" outputs = %s.output_spec()" % c] cmd += [ """ for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): assert getattr(outputs.traits()[key], metakey) == value""" ] fmt_cmd = black.format_str("\n".join(cmd), mode=black.FileMode()) with open(testfile, "at") as fp: fp.writelines("\n\n" + fmt_cmd) for traitname, trait in sorted( classinst.output_spec().traits(transient=None).items() ): for key in sorted(trait.__dict__): if key in in_built: continue parent_metadata = [] if "parent" in trait.__dict__: parent_metadata = list(getattr(trait, "parent").__dict__.keys()) if ( key not in allowed_keys + classinst._additional_metadata + parent_metadata ): bad_specs.append([uri, c, "Outputs", traitname, key]) return bad_specs def _survives_exclude(self, matchstr, match_type): """ Returns True if *matchstr* does not match patterns ``self.package_name`` removed from front of string if present Examples -------- >>> dw = ApiDocWriter('sphinx') >>> dw._survives_exclude('sphinx.okpkg', 'package') True >>> dw.package_skip_patterns.append('^\\.badpkg$') >>> dw._survives_exclude('sphinx.badpkg', 'package') False >>> dw._survives_exclude('sphinx.badpkg', 'module') True >>> dw._survives_exclude('sphinx.badmod', 'module') True >>> dw.module_skip_patterns.append('^\\.badmod$') >>> dw._survives_exclude('sphinx.badmod', 'module') False """ if match_type == "module": patterns = self.module_skip_patterns elif match_type == "package": patterns = self.package_skip_patterns elif match_type == "class": patterns = self.class_skip_patterns else: raise ValueError('Cannot interpret match type "%s"' % match_type) # Match to URI without package name L = len(self.package_name) if matchstr[:L] == self.package_name: matchstr = matchstr[L:] for pat in patterns: try: pat.search except AttributeError: pat = re.compile(pat) if pat.search(matchstr): return False return True def discover_modules(self): """ Return module sequence discovered from ``self.package_name`` Parameters ---------- None Returns ------- mods : sequence Sequence of module names within ``self.package_name`` Examples -------- """ modules = [self.package_name] # raw directory parsing for dirpath, dirnames, filenames in os.walk(self.root_path): # Check directory names for packages root_uri = self._path2uri(os.path.join(self.root_path, dirpath)) for dirname in dirnames[:]: # copy list - we modify inplace package_uri = ".".join((root_uri, dirname)) if self._uri2path(package_uri) and self._survives_exclude( package_uri, "package" ): modules.append(package_uri) else: dirnames.remove(dirname) # Check filenames for modules for filename in filenames: module_name = filename[:-3] module_uri = ".".join((root_uri, module_name)) if self._uri2path(module_uri) and self._survives_exclude( module_uri, "module" ): modules.append(module_uri) return sorted(modules) def check_modules(self): # write the list modules = self.discover_modules() checked_modules = [] for m in modules: bad_specs = self.test_specs(m) if bad_specs: checked_modules.extend(bad_specs) for bad_spec in checked_modules: print(":".join(bad_spec)) if __name__ == "__main__": os.environ["NIPYPE_NO_ET"] = "1" package = "nipype" ic = InterfaceChecker(package) # Packages that should not be included in generated API docs. ic.package_skip_patterns += [ "\.external$", "\.fixes$", "\.utils$", "\.pipeline", "\.testing", "\.caching", "\.workflows", ] """ # Modules that should not be included in generated API docs. ic.module_skip_patterns += ['\.version$', '\.interfaces\.base$', '\.interfaces\.matlab$', '\.interfaces\.rest$', '\.interfaces\.pymvpa$', '\.interfaces\.slicer\.generate_classes$', '\.interfaces\.spm\.base$', '\.interfaces\.traits', '\.pipeline\.alloy$', '\.pipeline\.s3_node_wrapper$', '.\testing', ] ic.class_skip_patterns += ['AFNI', 'ANTS', 'FSL', 'FS', 'Info', '^SPM', 'Tester', 'Spec$', 'Numpy', 'NipypeTester', ] """ ic.check_modules()
[]
[]
[ "NIPYPE_NO_ET" ]
[]
["NIPYPE_NO_ET"]
python
1
0
vendor/github.com/nats-io/nats.go/nats.go
// Copyright 2012-2019 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // A Go client for the NATS messaging system (https://nats.io). package nats import ( "bufio" "bytes" "crypto/tls" "crypto/x509" "encoding/base64" "encoding/json" "errors" "fmt" "io" "io/ioutil" "math/rand" "net" "net/url" "os" "path/filepath" "runtime" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/nats-io/jwt" "github.com/nats-io/nats.go/util" "github.com/nats-io/nkeys" "github.com/nats-io/nuid" ) // Default Constants const ( Version = "1.9.2" DefaultURL = "nats://127.0.0.1:4222" DefaultPort = 4222 DefaultMaxReconnect = 60 DefaultReconnectWait = 2 * time.Second DefaultTimeout = 2 * time.Second DefaultPingInterval = 2 * time.Minute DefaultMaxPingOut = 2 DefaultMaxChanLen = 8192 // 8k DefaultReconnectBufSize = 8 * 1024 * 1024 // 8MB RequestChanLen = 8 DefaultDrainTimeout = 30 * time.Second LangString = "go" ) const ( // STALE_CONNECTION is for detection and proper handling of stale connections. STALE_CONNECTION = "stale connection" // PERMISSIONS_ERR is for when nats server subject authorization has failed. PERMISSIONS_ERR = "permissions violation" // AUTHORIZATION_ERR is for when nats server user authorization has failed. AUTHORIZATION_ERR = "authorization violation" // AUTHENTICATION_EXPIRED_ERR is for when nats server user authorization has expired. AUTHENTICATION_EXPIRED_ERR = "user authentication expired" ) // Errors var ( ErrConnectionClosed = errors.New("nats: connection closed") ErrConnectionDraining = errors.New("nats: connection draining") ErrDrainTimeout = errors.New("nats: draining connection timed out") ErrConnectionReconnecting = errors.New("nats: connection reconnecting") ErrSecureConnRequired = errors.New("nats: secure connection required") ErrSecureConnWanted = errors.New("nats: secure connection not available") ErrBadSubscription = errors.New("nats: invalid subscription") ErrTypeSubscription = errors.New("nats: invalid subscription type") ErrBadSubject = errors.New("nats: invalid subject") ErrBadQueueName = errors.New("nats: invalid queue name") ErrSlowConsumer = errors.New("nats: slow consumer, messages dropped") ErrTimeout = errors.New("nats: timeout") ErrBadTimeout = errors.New("nats: timeout invalid") ErrAuthorization = errors.New("nats: authorization violation") ErrAuthExpired = errors.New("nats: authentication expired") ErrNoServers = errors.New("nats: no servers available for connection") ErrJsonParse = errors.New("nats: connect message, json parse error") ErrChanArg = errors.New("nats: argument needs to be a channel type") ErrMaxPayload = errors.New("nats: maximum payload exceeded") ErrMaxMessages = errors.New("nats: maximum messages delivered") ErrSyncSubRequired = errors.New("nats: illegal call on an async subscription") ErrMultipleTLSConfigs = errors.New("nats: multiple tls.Configs not allowed") ErrNoInfoReceived = errors.New("nats: protocol exception, INFO not received") ErrReconnectBufExceeded = errors.New("nats: outbound buffer limit exceeded") ErrInvalidConnection = errors.New("nats: invalid connection") ErrInvalidMsg = errors.New("nats: invalid message or message nil") ErrInvalidArg = errors.New("nats: invalid argument") ErrInvalidContext = errors.New("nats: invalid context") ErrNoDeadlineContext = errors.New("nats: context requires a deadline") ErrNoEchoNotSupported = errors.New("nats: no echo option not supported by this server") ErrClientIDNotSupported = errors.New("nats: client ID not supported by this server") ErrUserButNoSigCB = errors.New("nats: user callback defined without a signature handler") ErrNkeyButNoSigCB = errors.New("nats: nkey defined without a signature handler") ErrNoUserCB = errors.New("nats: user callback not defined") ErrNkeyAndUser = errors.New("nats: user callback and nkey defined") ErrNkeysNotSupported = errors.New("nats: nkeys not supported by the server") ErrStaleConnection = errors.New("nats: " + STALE_CONNECTION) ErrTokenAlreadySet = errors.New("nats: token and token handler both set") ErrMsgNotBound = errors.New("nats: message is not bound to subscription/connection") ErrMsgNoReply = errors.New("nats: message does not have a reply") ) func init() { rand.Seed(time.Now().UnixNano()) } // GetDefaultOptions returns default configuration options for the client. func GetDefaultOptions() Options { return Options{ AllowReconnect: true, MaxReconnect: DefaultMaxReconnect, ReconnectWait: DefaultReconnectWait, Timeout: DefaultTimeout, PingInterval: DefaultPingInterval, MaxPingsOut: DefaultMaxPingOut, SubChanLen: DefaultMaxChanLen, ReconnectBufSize: DefaultReconnectBufSize, DrainTimeout: DefaultDrainTimeout, } } // DEPRECATED: Use GetDefaultOptions() instead. // DefaultOptions is not safe for use by multiple clients. // For details see #308. var DefaultOptions = GetDefaultOptions() // Status represents the state of the connection. type Status int const ( DISCONNECTED = Status(iota) CONNECTED CLOSED RECONNECTING CONNECTING DRAINING_SUBS DRAINING_PUBS ) // ConnHandler is used for asynchronous events such as // disconnected and closed connections. type ConnHandler func(*Conn) // ConnErrHandler is used to process asynchronous events like // disconnected connection with the error (if any). type ConnErrHandler func(*Conn, error) // ErrHandler is used to process asynchronous errors encountered // while processing inbound messages. type ErrHandler func(*Conn, *Subscription, error) // UserJWTHandler is used to fetch and return the account signed // JWT for this user. type UserJWTHandler func() (string, error) // SignatureHandler is used to sign a nonce from the server while // authenticating with nkeys. The user should sign the nonce and // return the raw signature. The client will base64 encode this to // send to the server. type SignatureHandler func([]byte) ([]byte, error) // AuthTokenHandler is used to generate a new token. type AuthTokenHandler func() string // asyncCB is used to preserve order for async callbacks. type asyncCB struct { f func() next *asyncCB } type asyncCallbacksHandler struct { mu sync.Mutex cond *sync.Cond head *asyncCB tail *asyncCB } // Option is a function on the options for a connection. type Option func(*Options) error // CustomDialer can be used to specify any dialer, not necessarily // a *net.Dialer. type CustomDialer interface { Dial(network, address string) (net.Conn, error) } // Options can be used to create a customized connection. type Options struct { // Url represents a single NATS server url to which the client // will be connecting. If the Servers option is also set, it // then becomes the first server in the Servers array. Url string // Servers is a configured set of servers which this client // will use when attempting to connect. Servers []string // NoRandomize configures whether we will randomize the // server pool. NoRandomize bool // NoEcho configures whether the server will echo back messages // that are sent on this connection if we also have matching subscriptions. // Note this is supported on servers >= version 1.2. Proto 1 or greater. NoEcho bool // Name is an optional name label which will be sent to the server // on CONNECT to identify the client. Name string // Verbose signals the server to send an OK ack for commands // successfully processed by the server. Verbose bool // Pedantic signals the server whether it should be doing further // validation of subjects. Pedantic bool // Secure enables TLS secure connections that skip server // verification by default. NOT RECOMMENDED. Secure bool // TLSConfig is a custom TLS configuration to use for secure // transports. TLSConfig *tls.Config // AllowReconnect enables reconnection logic to be used when we // encounter a disconnect from the current server. AllowReconnect bool // MaxReconnect sets the number of reconnect attempts that will be // tried before giving up. If negative, then it will never give up // trying to reconnect. MaxReconnect int // ReconnectWait sets the time to backoff after attempting a reconnect // to a server that we were already connected to previously. ReconnectWait time.Duration // Timeout sets the timeout for a Dial operation on a connection. Timeout time.Duration // DrainTimeout sets the timeout for a Drain Operation to complete. DrainTimeout time.Duration // FlusherTimeout is the maximum time to wait for write operations // to the underlying connection to complete (including the flusher loop). FlusherTimeout time.Duration // PingInterval is the period at which the client will be sending ping // commands to the server, disabled if 0 or negative. PingInterval time.Duration // MaxPingsOut is the maximum number of pending ping commands that can // be awaiting a response before raising an ErrStaleConnection error. MaxPingsOut int // ClosedCB sets the closed handler that is called when a client will // no longer be connected. ClosedCB ConnHandler // DisconnectedCB sets the disconnected handler that is called // whenever the connection is disconnected. // Will not be called if DisconnectedErrCB is set // DEPRECATED. Use DisconnectedErrCB which passes error that caused // the disconnect event. DisconnectedCB ConnHandler // DisconnectedErrCB sets the disconnected error handler that is called // whenever the connection is disconnected. // Disconnected error could be nil, for instance when user explicitly closes the connection. // DisconnectedCB will not be called if DisconnectedErrCB is set DisconnectedErrCB ConnErrHandler // ReconnectedCB sets the reconnected handler called whenever // the connection is successfully reconnected. ReconnectedCB ConnHandler // DiscoveredServersCB sets the callback that is invoked whenever a new // server has joined the cluster. DiscoveredServersCB ConnHandler // AsyncErrorCB sets the async error handler (e.g. slow consumer errors) AsyncErrorCB ErrHandler // ReconnectBufSize is the size of the backing bufio during reconnect. // Once this has been exhausted publish operations will return an error. ReconnectBufSize int // SubChanLen is the size of the buffered channel used between the socket // Go routine and the message delivery for SyncSubscriptions. // NOTE: This does not affect AsyncSubscriptions which are // dictated by PendingLimits() SubChanLen int // UserJWT sets the callback handler that will fetch a user's JWT. UserJWT UserJWTHandler // Nkey sets the public nkey that will be used to authenticate // when connecting to the server. UserJWT and Nkey are mutually exclusive // and if defined, UserJWT will take precedence. Nkey string // SignatureCB designates the function used to sign the nonce // presented from the server. SignatureCB SignatureHandler // User sets the username to be used when connecting to the server. User string // Password sets the password to be used when connecting to a server. Password string // Token sets the token to be used when connecting to a server. Token string // TokenHandler designates the function used to generate the token to be used when connecting to a server. TokenHandler AuthTokenHandler // Dialer allows a custom net.Dialer when forming connections. // DEPRECATED: should use CustomDialer instead. Dialer *net.Dialer // CustomDialer allows to specify a custom dialer (not necessarily // a *net.Dialer). CustomDialer CustomDialer // UseOldRequestStyle forces the old method of Requests that utilize // a new Inbox and a new Subscription for each request. UseOldRequestStyle bool // NoCallbacksAfterClientClose allows preventing the invocation of // callbacks after Close() is called. Client won't receive notifications // when Close is invoked by user code. Default is to invoke the callbacks. NoCallbacksAfterClientClose bool } const ( // Scratch storage for assembling protocol headers scratchSize = 512 // The size of the bufio reader/writer on top of the socket. defaultBufSize = 32768 // The buffered size of the flush "kick" channel flushChanSize = 1 // Default server pool size srvPoolSize = 4 // NUID size nuidSize = 22 // Default port used if none is specified in given URL(s) defaultPortString = "4222" ) // A Conn represents a bare connection to a nats-server. // It can send and receive []byte payloads. // The connection is safe to use in multiple Go routines concurrently. type Conn struct { // Keep all members for which we use atomic at the beginning of the // struct and make sure they are all 64bits (or use padding if necessary). // atomic.* functions crash on 32bit machines if operand is not aligned // at 64bit. See https://github.com/golang/go/issues/599 Statistics mu sync.RWMutex // Opts holds the configuration of the Conn. // Modifying the configuration of a running Conn is a race. Opts Options wg sync.WaitGroup srvPool []*srv current *srv urls map[string]struct{} // Keep track of all known URLs (used by processInfo) conn net.Conn bw *bufio.Writer pending *bytes.Buffer fch chan struct{} info serverInfo ssid int64 subsMu sync.RWMutex subs map[int64]*Subscription ach *asyncCallbacksHandler pongs []chan struct{} scratch [scratchSize]byte status Status initc bool // true if the connection is performing the initial connect err error ps *parseState ptmr *time.Timer pout int ar bool // abort reconnect // New style response handler respSub string // The wildcard subject respScanf string // The scanf template to extract mux token respMux *Subscription // A single response subscription respMap map[string]chan *Msg // Request map for the response msg channels respRand *rand.Rand // Used for generating suffix } // A Subscription represents interest in a given subject. type Subscription struct { mu sync.Mutex sid int64 // Subject that represents this subscription. This can be different // than the received subject inside a Msg if this is a wildcard. Subject string // Optional queue group name. If present, all subscriptions with the // same name will form a distributed queue, and each message will // only be processed by one member of the group. Queue string delivered uint64 max uint64 conn *Conn mcb MsgHandler mch chan *Msg closed bool sc bool connClosed bool // Type of Subscription typ SubscriptionType // Async linked list pHead *Msg pTail *Msg pCond *sync.Cond // Pending stats, async subscriptions, high-speed etc. pMsgs int pBytes int pMsgsMax int pBytesMax int pMsgsLimit int pBytesLimit int dropped int } // Msg is a structure used by Subscribers and PublishMsg(). type Msg struct { Subject string Reply string Data []byte Sub *Subscription next *Msg barrier *barrierInfo } type barrierInfo struct { refs int64 f func() } // Tracks various stats received and sent on this connection, // including counts for messages and bytes. type Statistics struct { InMsgs uint64 OutMsgs uint64 InBytes uint64 OutBytes uint64 Reconnects uint64 } // Tracks individual backend servers. type srv struct { url *url.URL didConnect bool reconnects int lastAttempt time.Time lastErr error isImplicit bool tlsName string } type serverInfo struct { Id string `json:"server_id"` Host string `json:"host"` Port uint `json:"port"` Version string `json:"version"` AuthRequired bool `json:"auth_required"` TLSRequired bool `json:"tls_required"` MaxPayload int64 `json:"max_payload"` ConnectURLs []string `json:"connect_urls,omitempty"` Proto int `json:"proto,omitempty"` CID uint64 `json:"client_id,omitempty"` Nonce string `json:"nonce,omitempty"` } const ( // clientProtoZero is the original client protocol from 2009. // http://nats.io/documentation/internals/nats-protocol/ /* clientProtoZero */ _ = iota // clientProtoInfo signals a client can receive more then the original INFO block. // This can be used to update clients on other cluster members, etc. clientProtoInfo ) type connectInfo struct { Verbose bool `json:"verbose"` Pedantic bool `json:"pedantic"` UserJWT string `json:"jwt,omitempty"` Nkey string `json:"nkey,omitempty"` Signature string `json:"sig,omitempty"` User string `json:"user,omitempty"` Pass string `json:"pass,omitempty"` Token string `json:"auth_token,omitempty"` TLS bool `json:"tls_required"` Name string `json:"name"` Lang string `json:"lang"` Version string `json:"version"` Protocol int `json:"protocol"` Echo bool `json:"echo"` } // MsgHandler is a callback function that processes messages delivered to // asynchronous subscribers. type MsgHandler func(msg *Msg) // Connect will attempt to connect to the NATS system. // The url can contain username/password semantics. e.g. nats://derek:pass@localhost:4222 // Comma separated arrays are also supported, e.g. urlA, urlB. // Options start with the defaults but can be overridden. func Connect(url string, options ...Option) (*Conn, error) { opts := GetDefaultOptions() opts.Servers = processUrlString(url) for _, opt := range options { if opt != nil { if err := opt(&opts); err != nil { return nil, err } } } return opts.Connect() } // Options that can be passed to Connect. // Name is an Option to set the client name. func Name(name string) Option { return func(o *Options) error { o.Name = name return nil } } // Secure is an Option to enable TLS secure connections that skip server verification by default. // Pass a TLS Configuration for proper TLS. // NOTE: This should NOT be used in a production setting. func Secure(tls ...*tls.Config) Option { return func(o *Options) error { o.Secure = true // Use of variadic just simplifies testing scenarios. We only take the first one. if len(tls) > 1 { return ErrMultipleTLSConfigs } if len(tls) == 1 { o.TLSConfig = tls[0] } return nil } } // RootCAs is a helper option to provide the RootCAs pool from a list of filenames. // If Secure is not already set this will set it as well. func RootCAs(file ...string) Option { return func(o *Options) error { pool := x509.NewCertPool() for _, f := range file { rootPEM, err := ioutil.ReadFile(f) if err != nil || rootPEM == nil { return fmt.Errorf("nats: error loading or parsing rootCA file: %v", err) } ok := pool.AppendCertsFromPEM(rootPEM) if !ok { return fmt.Errorf("nats: failed to parse root certificate from %q", f) } } if o.TLSConfig == nil { o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} } o.TLSConfig.RootCAs = pool o.Secure = true return nil } } // ClientCert is a helper option to provide the client certificate from a file. // If Secure is not already set this will set it as well. func ClientCert(certFile, keyFile string) Option { return func(o *Options) error { cert, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { return fmt.Errorf("nats: error loading client certificate: %v", err) } cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) if err != nil { return fmt.Errorf("nats: error parsing client certificate: %v", err) } if o.TLSConfig == nil { o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} } o.TLSConfig.Certificates = []tls.Certificate{cert} o.Secure = true return nil } } // NoReconnect is an Option to turn off reconnect behavior. func NoReconnect() Option { return func(o *Options) error { o.AllowReconnect = false return nil } } // DontRandomize is an Option to turn off randomizing the server pool. func DontRandomize() Option { return func(o *Options) error { o.NoRandomize = true return nil } } // NoEcho is an Option to turn off messages echoing back from a server. // Note this is supported on servers >= version 1.2. Proto 1 or greater. func NoEcho() Option { return func(o *Options) error { o.NoEcho = true return nil } } // ReconnectWait is an Option to set the wait time between reconnect attempts. func ReconnectWait(t time.Duration) Option { return func(o *Options) error { o.ReconnectWait = t return nil } } // MaxReconnects is an Option to set the maximum number of reconnect attempts. func MaxReconnects(max int) Option { return func(o *Options) error { o.MaxReconnect = max return nil } } // PingInterval is an Option to set the period for client ping commands. func PingInterval(t time.Duration) Option { return func(o *Options) error { o.PingInterval = t return nil } } // MaxPingsOutstanding is an Option to set the maximum number of ping requests // that can go un-answered by the server before closing the connection. func MaxPingsOutstanding(max int) Option { return func(o *Options) error { o.MaxPingsOut = max return nil } } // ReconnectBufSize sets the buffer size of messages kept while busy reconnecting. func ReconnectBufSize(size int) Option { return func(o *Options) error { o.ReconnectBufSize = size return nil } } // Timeout is an Option to set the timeout for Dial on a connection. func Timeout(t time.Duration) Option { return func(o *Options) error { o.Timeout = t return nil } } // FlusherTimeout is an Option to set the write (and flush) timeout on a connection. func FlusherTimeout(t time.Duration) Option { return func(o *Options) error { o.FlusherTimeout = t return nil } } // DrainTimeout is an Option to set the timeout for draining a connection. func DrainTimeout(t time.Duration) Option { return func(o *Options) error { o.DrainTimeout = t return nil } } // DisconnectErrHandler is an Option to set the disconnected error handler. func DisconnectErrHandler(cb ConnErrHandler) Option { return func(o *Options) error { o.DisconnectedErrCB = cb return nil } } // DisconnectHandler is an Option to set the disconnected handler. // DEPRECATED: Use DisconnectErrHandler. func DisconnectHandler(cb ConnHandler) Option { return func(o *Options) error { o.DisconnectedCB = cb return nil } } // ReconnectHandler is an Option to set the reconnected handler. func ReconnectHandler(cb ConnHandler) Option { return func(o *Options) error { o.ReconnectedCB = cb return nil } } // ClosedHandler is an Option to set the closed handler. func ClosedHandler(cb ConnHandler) Option { return func(o *Options) error { o.ClosedCB = cb return nil } } // DiscoveredServersHandler is an Option to set the new servers handler. func DiscoveredServersHandler(cb ConnHandler) Option { return func(o *Options) error { o.DiscoveredServersCB = cb return nil } } // ErrorHandler is an Option to set the async error handler. func ErrorHandler(cb ErrHandler) Option { return func(o *Options) error { o.AsyncErrorCB = cb return nil } } // UserInfo is an Option to set the username and password to // use when not included directly in the URLs. func UserInfo(user, password string) Option { return func(o *Options) error { o.User = user o.Password = password return nil } } // Token is an Option to set the token to use // when a token is not included directly in the URLs // and when a token handler is not provided. func Token(token string) Option { return func(o *Options) error { if o.TokenHandler != nil { return ErrTokenAlreadySet } o.Token = token return nil } } // TokenHandler is an Option to set the token handler to use // when a token is not included directly in the URLs // and when a token is not set. func TokenHandler(cb AuthTokenHandler) Option { return func(o *Options) error { if o.Token != "" { return ErrTokenAlreadySet } o.TokenHandler = cb return nil } } // UserCredentials is a convenience function that takes a filename // for a user's JWT and a filename for the user's private Nkey seed. func UserCredentials(userOrChainedFile string, seedFiles ...string) Option { userCB := func() (string, error) { return userFromFile(userOrChainedFile) } var keyFile string if len(seedFiles) > 0 { keyFile = seedFiles[0] } else { keyFile = userOrChainedFile } sigCB := func(nonce []byte) ([]byte, error) { return sigHandler(nonce, keyFile) } return UserJWT(userCB, sigCB) } // UserJWT will set the callbacks to retrieve the user's JWT and // the signature callback to sign the server nonce. This an the Nkey // option are mutually exclusive. func UserJWT(userCB UserJWTHandler, sigCB SignatureHandler) Option { return func(o *Options) error { if userCB == nil { return ErrNoUserCB } if sigCB == nil { return ErrUserButNoSigCB } o.UserJWT = userCB o.SignatureCB = sigCB return nil } } // Nkey will set the public Nkey and the signature callback to // sign the server nonce. func Nkey(pubKey string, sigCB SignatureHandler) Option { return func(o *Options) error { o.Nkey = pubKey o.SignatureCB = sigCB if pubKey != "" && sigCB == nil { return ErrNkeyButNoSigCB } return nil } } // SyncQueueLen will set the maximum queue len for the internal // channel used for SubscribeSync(). func SyncQueueLen(max int) Option { return func(o *Options) error { o.SubChanLen = max return nil } } // Dialer is an Option to set the dialer which will be used when // attempting to establish a connection. // DEPRECATED: Should use CustomDialer instead. func Dialer(dialer *net.Dialer) Option { return func(o *Options) error { o.Dialer = dialer return nil } } // SetCustomDialer is an Option to set a custom dialer which will be // used when attempting to establish a connection. If both Dialer // and CustomDialer are specified, CustomDialer takes precedence. func SetCustomDialer(dialer CustomDialer) Option { return func(o *Options) error { o.CustomDialer = dialer return nil } } // UseOldRequestStyle is an Option to force usage of the old Request style. func UseOldRequestStyle() Option { return func(o *Options) error { o.UseOldRequestStyle = true return nil } } // NoCallbacksAfterClientClose is an Option to disable callbacks when user code // calls Close(). If close is initiated by any other condition, callbacks // if any will be invoked. func NoCallbacksAfterClientClose() Option { return func(o *Options) error { o.NoCallbacksAfterClientClose = true return nil } } // Handler processing // SetDisconnectHandler will set the disconnect event handler. // DEPRECATED: Use SetDisconnectErrHandler func (nc *Conn) SetDisconnectHandler(dcb ConnHandler) { if nc == nil { return } nc.mu.Lock() defer nc.mu.Unlock() nc.Opts.DisconnectedCB = dcb } // SetDisconnectErrHandler will set the disconnect event handler. func (nc *Conn) SetDisconnectErrHandler(dcb ConnErrHandler) { if nc == nil { return } nc.mu.Lock() defer nc.mu.Unlock() nc.Opts.DisconnectedErrCB = dcb } // SetReconnectHandler will set the reconnect event handler. func (nc *Conn) SetReconnectHandler(rcb ConnHandler) { if nc == nil { return } nc.mu.Lock() defer nc.mu.Unlock() nc.Opts.ReconnectedCB = rcb } // SetDiscoveredServersHandler will set the discovered servers handler. func (nc *Conn) SetDiscoveredServersHandler(dscb ConnHandler) { if nc == nil { return } nc.mu.Lock() defer nc.mu.Unlock() nc.Opts.DiscoveredServersCB = dscb } // SetClosedHandler will set the reconnect event handler. func (nc *Conn) SetClosedHandler(cb ConnHandler) { if nc == nil { return } nc.mu.Lock() defer nc.mu.Unlock() nc.Opts.ClosedCB = cb } // SetErrorHandler will set the async error handler. func (nc *Conn) SetErrorHandler(cb ErrHandler) { if nc == nil { return } nc.mu.Lock() defer nc.mu.Unlock() nc.Opts.AsyncErrorCB = cb } // Process the url string argument to Connect. // Return an array of urls, even if only one. func processUrlString(url string) []string { urls := strings.Split(url, ",") for i, s := range urls { urls[i] = strings.TrimSpace(s) } return urls } // Connect will attempt to connect to a NATS server with multiple options. func (o Options) Connect() (*Conn, error) { nc := &Conn{Opts: o} // Some default options processing. if nc.Opts.MaxPingsOut == 0 { nc.Opts.MaxPingsOut = DefaultMaxPingOut } // Allow old default for channel length to work correctly. if nc.Opts.SubChanLen == 0 { nc.Opts.SubChanLen = DefaultMaxChanLen } // Default ReconnectBufSize if nc.Opts.ReconnectBufSize == 0 { nc.Opts.ReconnectBufSize = DefaultReconnectBufSize } // Ensure that Timeout is not 0 if nc.Opts.Timeout == 0 { nc.Opts.Timeout = DefaultTimeout } // Check first for user jwt callback being defined and nkey. if nc.Opts.UserJWT != nil && nc.Opts.Nkey != "" { return nil, ErrNkeyAndUser } // Check if we have an nkey but no signature callback defined. if nc.Opts.Nkey != "" && nc.Opts.SignatureCB == nil { return nil, ErrNkeyButNoSigCB } // Allow custom Dialer for connecting using DialTimeout by default if nc.Opts.Dialer == nil { nc.Opts.Dialer = &net.Dialer{ Timeout: nc.Opts.Timeout, } } if err := nc.setupServerPool(); err != nil { return nil, err } // Create the async callback handler. nc.ach = &asyncCallbacksHandler{} nc.ach.cond = sync.NewCond(&nc.ach.mu) if err := nc.connect(); err != nil { return nil, err } // Spin up the async cb dispatcher on success go nc.ach.asyncCBDispatcher() return nc, nil } const ( _CRLF_ = "\r\n" _EMPTY_ = "" _SPC_ = " " _PUB_P_ = "PUB " ) const ( _OK_OP_ = "+OK" _ERR_OP_ = "-ERR" _PONG_OP_ = "PONG" _INFO_OP_ = "INFO" ) const ( conProto = "CONNECT %s" + _CRLF_ pingProto = "PING" + _CRLF_ pongProto = "PONG" + _CRLF_ subProto = "SUB %s %s %d" + _CRLF_ unsubProto = "UNSUB %d %s" + _CRLF_ okProto = _OK_OP_ + _CRLF_ ) // Return the currently selected server func (nc *Conn) currentServer() (int, *srv) { for i, s := range nc.srvPool { if s == nil { continue } if s == nc.current { return i, s } } return -1, nil } // Pop the current server and put onto the end of the list. Select head of list as long // as number of reconnect attempts under MaxReconnect. func (nc *Conn) selectNextServer() (*srv, error) { i, s := nc.currentServer() if i < 0 { return nil, ErrNoServers } sp := nc.srvPool num := len(sp) copy(sp[i:num-1], sp[i+1:num]) maxReconnect := nc.Opts.MaxReconnect if maxReconnect < 0 || s.reconnects < maxReconnect { nc.srvPool[num-1] = s } else { nc.srvPool = sp[0 : num-1] } if len(nc.srvPool) <= 0 { nc.current = nil return nil, ErrNoServers } nc.current = nc.srvPool[0] return nc.srvPool[0], nil } // Will assign the correct server to nc.current func (nc *Conn) pickServer() error { nc.current = nil if len(nc.srvPool) <= 0 { return ErrNoServers } for _, s := range nc.srvPool { if s != nil { nc.current = s return nil } } return ErrNoServers } const tlsScheme = "tls" // Create the server pool using the options given. // We will place a Url option first, followed by any // Server Options. We will randomize the server pool unless // the NoRandomize flag is set. func (nc *Conn) setupServerPool() error { nc.srvPool = make([]*srv, 0, srvPoolSize) nc.urls = make(map[string]struct{}, srvPoolSize) // Create srv objects from each url string in nc.Opts.Servers // and add them to the pool. for _, urlString := range nc.Opts.Servers { if err := nc.addURLToPool(urlString, false, false); err != nil { return err } } // Randomize if allowed to if !nc.Opts.NoRandomize { nc.shufflePool() } // Normally, if this one is set, Options.Servers should not be, // but we always allowed that, so continue to do so. if nc.Opts.Url != _EMPTY_ { // Add to the end of the array if err := nc.addURLToPool(nc.Opts.Url, false, false); err != nil { return err } // Then swap it with first to guarantee that Options.Url is tried first. last := len(nc.srvPool) - 1 if last > 0 { nc.srvPool[0], nc.srvPool[last] = nc.srvPool[last], nc.srvPool[0] } } else if len(nc.srvPool) <= 0 { // Place default URL if pool is empty. if err := nc.addURLToPool(DefaultURL, false, false); err != nil { return err } } // Check for Scheme hint to move to TLS mode. for _, srv := range nc.srvPool { if srv.url.Scheme == tlsScheme { // FIXME(dlc), this is for all in the pool, should be case by case. nc.Opts.Secure = true if nc.Opts.TLSConfig == nil { nc.Opts.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} } } } return nc.pickServer() } // Helper function to return scheme func (nc *Conn) connScheme() string { if nc.Opts.Secure { return tlsScheme } return "nats" } // Return true iff u.Hostname() is an IP address. func hostIsIP(u *url.URL) bool { return net.ParseIP(u.Hostname()) != nil } // addURLToPool adds an entry to the server pool func (nc *Conn) addURLToPool(sURL string, implicit, saveTLSName bool) error { if !strings.Contains(sURL, "://") { sURL = fmt.Sprintf("%s://%s", nc.connScheme(), sURL) } var ( u *url.URL err error ) for i := 0; i < 2; i++ { u, err = url.Parse(sURL) if err != nil { return err } if u.Port() != "" { break } // In case given URL is of the form "localhost:", just add // the port number at the end, otherwise, add ":4222". if sURL[len(sURL)-1] != ':' { sURL += ":" } sURL += defaultPortString } var tlsName string if implicit { curl := nc.current.url // Check to see if we do not have a url.User but current connected // url does. If so copy over. if u.User == nil && curl.User != nil { u.User = curl.User } // We are checking to see if we have a secure connection and are // adding an implicit server that just has an IP. If so we will remember // the current hostname we are connected to. if saveTLSName && hostIsIP(u) { tlsName = curl.Hostname() } } s := &srv{url: u, isImplicit: implicit, tlsName: tlsName} nc.srvPool = append(nc.srvPool, s) nc.urls[u.Host] = struct{}{} return nil } // shufflePool swaps randomly elements in the server pool func (nc *Conn) shufflePool() { if len(nc.srvPool) <= 1 { return } source := rand.NewSource(time.Now().UnixNano()) r := rand.New(source) for i := range nc.srvPool { j := r.Intn(i + 1) nc.srvPool[i], nc.srvPool[j] = nc.srvPool[j], nc.srvPool[i] } } func (nc *Conn) newBuffer() *bufio.Writer { var w io.Writer = nc.conn if nc.Opts.FlusherTimeout > 0 { w = &timeoutWriter{conn: nc.conn, timeout: nc.Opts.FlusherTimeout} } return bufio.NewWriterSize(w, defaultBufSize) } // createConn will connect to the server and wrap the appropriate // bufio structures. It will do the right thing when an existing // connection is in place. func (nc *Conn) createConn() (err error) { if nc.Opts.Timeout < 0 { return ErrBadTimeout } if _, cur := nc.currentServer(); cur == nil { return ErrNoServers } else { cur.lastAttempt = time.Now() } // We will auto-expand host names if they resolve to multiple IPs hosts := []string{} u := nc.current.url if net.ParseIP(u.Hostname()) == nil { addrs, _ := net.LookupHost(u.Hostname()) for _, addr := range addrs { hosts = append(hosts, net.JoinHostPort(addr, u.Port())) } } // Fall back to what we were given. if len(hosts) == 0 { hosts = append(hosts, u.Host) } // CustomDialer takes precedence. If not set, use Opts.Dialer which // is set to a default *net.Dialer (in Connect()) if not explicitly // set by the user. dialer := nc.Opts.CustomDialer if dialer == nil { // We will copy and shorten the timeout if we have multiple hosts to try. copyDialer := *nc.Opts.Dialer copyDialer.Timeout = copyDialer.Timeout / time.Duration(len(hosts)) dialer = &copyDialer } if len(hosts) > 1 && !nc.Opts.NoRandomize { rand.Shuffle(len(hosts), func(i, j int) { hosts[i], hosts[j] = hosts[j], hosts[i] }) } for _, host := range hosts { nc.conn, err = dialer.Dial("tcp", host) if err == nil { break } } if err != nil { return err } if nc.pending != nil && nc.bw != nil { // Move to pending buffer. nc.bw.Flush() } nc.bw = nc.newBuffer() return nil } // makeTLSConn will wrap an existing Conn using TLS func (nc *Conn) makeTLSConn() error { // Allow the user to configure their own tls.Config structure. var tlsCopy *tls.Config if nc.Opts.TLSConfig != nil { tlsCopy = util.CloneTLSConfig(nc.Opts.TLSConfig) } else { tlsCopy = &tls.Config{} } // If its blank we will override it with the current host if tlsCopy.ServerName == _EMPTY_ { if nc.current.tlsName != _EMPTY_ { tlsCopy.ServerName = nc.current.tlsName } else { h, _, _ := net.SplitHostPort(nc.current.url.Host) tlsCopy.ServerName = h } } nc.conn = tls.Client(nc.conn, tlsCopy) conn := nc.conn.(*tls.Conn) if err := conn.Handshake(); err != nil { return err } nc.bw = nc.newBuffer() return nil } // waitForExits will wait for all socket watcher Go routines to // be shutdown before proceeding. func (nc *Conn) waitForExits() { // Kick old flusher forcefully. select { case nc.fch <- struct{}{}: default: } // Wait for any previous go routines. nc.wg.Wait() } // Report the connected server's Url func (nc *Conn) ConnectedUrl() string { if nc == nil { return _EMPTY_ } nc.mu.RLock() defer nc.mu.RUnlock() if nc.status != CONNECTED { return _EMPTY_ } return nc.current.url.String() } // ConnectedAddr returns the connected server's IP func (nc *Conn) ConnectedAddr() string { if nc == nil { return _EMPTY_ } nc.mu.RLock() defer nc.mu.RUnlock() if nc.status != CONNECTED { return _EMPTY_ } return nc.conn.RemoteAddr().String() } // Report the connected server's Id func (nc *Conn) ConnectedServerId() string { if nc == nil { return _EMPTY_ } nc.mu.RLock() defer nc.mu.RUnlock() if nc.status != CONNECTED { return _EMPTY_ } return nc.info.Id } // Low level setup for structs, etc func (nc *Conn) setup() { nc.subs = make(map[int64]*Subscription) nc.pongs = make([]chan struct{}, 0, 8) nc.fch = make(chan struct{}, flushChanSize) // Setup scratch outbound buffer for PUB pub := nc.scratch[:len(_PUB_P_)] copy(pub, _PUB_P_) } // Process a connected connection and initialize properly. func (nc *Conn) processConnectInit() error { // Set our deadline for the whole connect process nc.conn.SetDeadline(time.Now().Add(nc.Opts.Timeout)) defer nc.conn.SetDeadline(time.Time{}) // Set our status to connecting. nc.status = CONNECTING // Process the INFO protocol received from the server err := nc.processExpectedInfo() if err != nil { return err } // Send the CONNECT protocol along with the initial PING protocol. // Wait for the PONG response (or any error that we get from the server). err = nc.sendConnect() if err != nil { return err } // Reset the number of PING sent out nc.pout = 0 // Start or reset Timer if nc.Opts.PingInterval > 0 { if nc.ptmr == nil { nc.ptmr = time.AfterFunc(nc.Opts.PingInterval, nc.processPingTimer) } else { nc.ptmr.Reset(nc.Opts.PingInterval) } } // Start the readLoop and flusher go routines, we will wait on both on a reconnect event. nc.wg.Add(2) go nc.readLoop() go nc.flusher() return nil } // Main connect function. Will connect to the nats-server func (nc *Conn) connect() error { var returnedErr error // Create actual socket connection // For first connect we walk all servers in the pool and try // to connect immediately. nc.mu.Lock() nc.initc = true // The pool may change inside the loop iteration due to INFO protocol. for i := 0; i < len(nc.srvPool); i++ { nc.current = nc.srvPool[i] if err := nc.createConn(); err == nil { // This was moved out of processConnectInit() because // that function is now invoked from doReconnect() too. nc.setup() err = nc.processConnectInit() if err == nil { nc.srvPool[i].didConnect = true nc.srvPool[i].reconnects = 0 nc.current.lastErr = nil returnedErr = nil break } else { returnedErr = err nc.mu.Unlock() nc.close(DISCONNECTED, false, err) nc.mu.Lock() nc.current = nil } } else { // Cancel out default connection refused, will trigger the // No servers error conditional if strings.Contains(err.Error(), "connection refused") { returnedErr = nil } } } nc.initc = false if returnedErr == nil && nc.status != CONNECTED { returnedErr = ErrNoServers } nc.mu.Unlock() return returnedErr } // This will check to see if the connection should be // secure. This can be dictated from either end and should // only be called after the INIT protocol has been received. func (nc *Conn) checkForSecure() error { // Check to see if we need to engage TLS o := nc.Opts // Check for mismatch in setups if o.Secure && !nc.info.TLSRequired { return ErrSecureConnWanted } else if nc.info.TLSRequired && !o.Secure { // Switch to Secure since server needs TLS. o.Secure = true } // Need to rewrap with bufio if o.Secure { if err := nc.makeTLSConn(); err != nil { return err } } return nil } // processExpectedInfo will look for the expected first INFO message // sent when a connection is established. The lock should be held entering. func (nc *Conn) processExpectedInfo() error { c := &control{} // Read the protocol err := nc.readOp(c) if err != nil { return err } // The nats protocol should send INFO first always. if c.op != _INFO_OP_ { return ErrNoInfoReceived } // Parse the protocol if err := nc.processInfo(c.args); err != nil { return err } if nc.Opts.Nkey != "" && nc.info.Nonce == "" { return ErrNkeysNotSupported } return nc.checkForSecure() } // Sends a protocol control message by queuing into the bufio writer // and kicking the flush Go routine. These writes are protected. func (nc *Conn) sendProto(proto string) { nc.mu.Lock() nc.bw.WriteString(proto) nc.kickFlusher() nc.mu.Unlock() } // Generate a connect protocol message, issuing user/password if // applicable. The lock is assumed to be held upon entering. func (nc *Conn) connectProto() (string, error) { o := nc.Opts var nkey, sig, user, pass, token, ujwt string u := nc.current.url.User if u != nil { // if no password, assume username is authToken if _, ok := u.Password(); !ok { token = u.Username() } else { user = u.Username() pass, _ = u.Password() } } else { // Take from options (possibly all empty strings) user = o.User pass = o.Password token = o.Token nkey = o.Nkey } // Look for user jwt. if o.UserJWT != nil { if jwt, err := o.UserJWT(); err != nil { return _EMPTY_, err } else { ujwt = jwt } if nkey != _EMPTY_ { return _EMPTY_, ErrNkeyAndUser } } if ujwt != _EMPTY_ || nkey != _EMPTY_ { if o.SignatureCB == nil { if ujwt == _EMPTY_ { return _EMPTY_, ErrNkeyButNoSigCB } return _EMPTY_, ErrUserButNoSigCB } sigraw, err := o.SignatureCB([]byte(nc.info.Nonce)) if err != nil { return _EMPTY_, err } sig = base64.RawURLEncoding.EncodeToString(sigraw) } if nc.Opts.TokenHandler != nil { if token != _EMPTY_ { return _EMPTY_, ErrTokenAlreadySet } token = nc.Opts.TokenHandler() } cinfo := connectInfo{o.Verbose, o.Pedantic, ujwt, nkey, sig, user, pass, token, o.Secure, o.Name, LangString, Version, clientProtoInfo, !o.NoEcho} b, err := json.Marshal(cinfo) if err != nil { return _EMPTY_, ErrJsonParse } // Check if NoEcho is set and we have a server that supports it. if o.NoEcho && nc.info.Proto < 1 { return _EMPTY_, ErrNoEchoNotSupported } return fmt.Sprintf(conProto, b), nil } // normalizeErr removes the prefix -ERR, trim spaces and remove the quotes. func normalizeErr(line string) string { s := strings.TrimSpace(strings.TrimPrefix(line, _ERR_OP_)) s = strings.TrimLeft(strings.TrimRight(s, "'"), "'") return s } // Send a connect protocol message to the server, issue user/password if // applicable. Will wait for a flush to return from the server for error // processing. func (nc *Conn) sendConnect() error { // Construct the CONNECT protocol string cProto, err := nc.connectProto() if err != nil { return err } // Write the protocol into the buffer _, err = nc.bw.WriteString(cProto) if err != nil { return err } // Add to the buffer the PING protocol _, err = nc.bw.WriteString(pingProto) if err != nil { return err } // Flush the buffer err = nc.bw.Flush() if err != nil { return err } // We don't want to read more than we need here, otherwise // we would need to transfer the excess read data to the readLoop. // Since in normal situations we just are looking for a PONG\r\n, // reading byte-by-byte here is ok. proto, err := nc.readProto() if err != nil { return err } // If opts.Verbose is set, handle +OK if nc.Opts.Verbose && proto == okProto { // Read the rest now... proto, err = nc.readProto() if err != nil { return err } } // We expect a PONG if proto != pongProto { // But it could be something else, like -ERR // Since we no longer use ReadLine(), trim the trailing "\r\n" proto = strings.TrimRight(proto, "\r\n") // If it's a server error... if strings.HasPrefix(proto, _ERR_OP_) { // Remove -ERR, trim spaces and quotes, and convert to lower case. proto = normalizeErr(proto) // Check if this is an auth error if authErr := checkAuthError(strings.ToLower(proto)); authErr != nil { // This will schedule an async error if we are in reconnect, // and keep track of the auth error for the current server. // If we have got the same error twice, this sets nc.ar to true to // indicate that the reconnect should be aborted (will be checked // in doReconnect()). nc.processAuthError(authErr) } return errors.New("nats: " + proto) } // Notify that we got an unexpected protocol. return fmt.Errorf("nats: expected '%s', got '%s'", _PONG_OP_, proto) } // This is where we are truly connected. nc.status = CONNECTED return nil } // reads a protocol one byte at a time. func (nc *Conn) readProto() (string, error) { var ( _buf = [10]byte{} buf = _buf[:0] b = [1]byte{} protoEnd = byte('\n') ) for { if _, err := nc.conn.Read(b[:1]); err != nil { // Do not report EOF error if err == io.EOF { return string(buf), nil } return "", err } buf = append(buf, b[0]) if b[0] == protoEnd { return string(buf), nil } } } // A control protocol line. type control struct { op, args string } // Read a control line and process the intended op. func (nc *Conn) readOp(c *control) error { br := bufio.NewReaderSize(nc.conn, defaultBufSize) line, err := br.ReadString('\n') if err != nil { return err } parseControl(line, c) return nil } // Parse a control line from the server. func parseControl(line string, c *control) { toks := strings.SplitN(line, _SPC_, 2) if len(toks) == 1 { c.op = strings.TrimSpace(toks[0]) c.args = _EMPTY_ } else if len(toks) == 2 { c.op, c.args = strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) } else { c.op = _EMPTY_ } } // flushReconnectPending will push the pending items that were // gathered while we were in a RECONNECTING state to the socket. func (nc *Conn) flushReconnectPendingItems() { if nc.pending == nil { return } if nc.pending.Len() > 0 { nc.bw.Write(nc.pending.Bytes()) } } // Stops the ping timer if set. // Connection lock is held on entry. func (nc *Conn) stopPingTimer() { if nc.ptmr != nil { nc.ptmr.Stop() } } // Try to reconnect using the option parameters. // This function assumes we are allowed to reconnect. func (nc *Conn) doReconnect(err error) { // We want to make sure we have the other watchers shutdown properly // here before we proceed past this point. nc.waitForExits() // FIXME(dlc) - We have an issue here if we have // outstanding flush points (pongs) and they were not // sent out, but are still in the pipe. // Hold the lock manually and release where needed below, // can't do defer here. nc.mu.Lock() // Clear any queued pongs, e.g. pending flush calls. nc.clearPendingFlushCalls() // Clear any errors. nc.err = nil // Perform appropriate callback if needed for a disconnect. // DisconnectedErrCB has priority over deprecated DisconnectedCB if nc.Opts.DisconnectedErrCB != nil { nc.ach.push(func() { nc.Opts.DisconnectedErrCB(nc, err) }) } else if nc.Opts.DisconnectedCB != nil { nc.ach.push(func() { nc.Opts.DisconnectedCB(nc) }) } // This is used to wait on go routines exit if we start them in the loop // but an error occurs after that. waitForGoRoutines := false for len(nc.srvPool) > 0 { cur, err := nc.selectNextServer() if err != nil { nc.err = err break } sleepTime := int64(0) // Sleep appropriate amount of time before the // connection attempt if connecting to same server // we just got disconnected from.. if time.Since(cur.lastAttempt) < nc.Opts.ReconnectWait { sleepTime = int64(nc.Opts.ReconnectWait - time.Since(cur.lastAttempt)) } // On Windows, createConn() will take more than a second when no // server is running at that address. So it could be that the // time elapsed between reconnect attempts is always > than // the set option. Release the lock to give a chance to a parallel // nc.Close() to break the loop. nc.mu.Unlock() if sleepTime <= 0 { runtime.Gosched() } else { time.Sleep(time.Duration(sleepTime)) } // If the readLoop, etc.. go routines were started, wait for them to complete. if waitForGoRoutines { nc.waitForExits() waitForGoRoutines = false } nc.mu.Lock() // Check if we have been closed first. if nc.isClosed() { break } // Mark that we tried a reconnect cur.reconnects++ // Try to create a new connection err = nc.createConn() // Not yet connected, retry... // Continue to hold the lock if err != nil { nc.err = nil continue } // We are reconnected nc.Reconnects++ // Process connect logic if nc.err = nc.processConnectInit(); nc.err != nil { // Check if we should abort reconnect. If so, break out // of the loop and connection will be closed. if nc.ar { break } nc.status = RECONNECTING // Reset the buffered writer to the pending buffer // (was set to a buffered writer on nc.conn in createConn) nc.bw.Reset(nc.pending) continue } // Clear possible lastErr under the connection lock after // a successful processConnectInit(). nc.current.lastErr = nil // Clear out server stats for the server we connected to.. cur.didConnect = true cur.reconnects = 0 // Send existing subscription state nc.resendSubscriptions() // Now send off and clear pending buffer nc.flushReconnectPendingItems() // Flush the buffer nc.err = nc.bw.Flush() if nc.err != nil { nc.status = RECONNECTING // Reset the buffered writer to the pending buffer (bytes.Buffer). nc.bw.Reset(nc.pending) // Stop the ping timer (if set) nc.stopPingTimer() // Since processConnectInit() returned without error, the // go routines were started, so wait for them to return // on the next iteration (after releasing the lock). waitForGoRoutines = true continue } // Done with the pending buffer nc.pending = nil // This is where we are truly connected. nc.status = CONNECTED // Queue up the reconnect callback. if nc.Opts.ReconnectedCB != nil { nc.ach.push(func() { nc.Opts.ReconnectedCB(nc) }) } // Release lock here, we will return below. nc.mu.Unlock() // Make sure to flush everything nc.Flush() return } // Call into close.. We have no servers left.. if nc.err == nil { nc.err = ErrNoServers } nc.mu.Unlock() nc.close(CLOSED, true, nil) } // processOpErr handles errors from reading or parsing the protocol. // The lock should not be held entering this function. func (nc *Conn) processOpErr(err error) { nc.mu.Lock() if nc.isConnecting() || nc.isClosed() || nc.isReconnecting() { nc.mu.Unlock() return } if nc.Opts.AllowReconnect && nc.status == CONNECTED { // Set our new status nc.status = RECONNECTING // Stop ping timer if set nc.stopPingTimer() if nc.conn != nil { nc.bw.Flush() nc.conn.Close() nc.conn = nil } // Create pending buffer before reconnecting. nc.pending = new(bytes.Buffer) nc.bw.Reset(nc.pending) go nc.doReconnect(err) nc.mu.Unlock() return } nc.status = DISCONNECTED nc.err = err nc.mu.Unlock() nc.close(CLOSED, true, nil) } // dispatch is responsible for calling any async callbacks func (ac *asyncCallbacksHandler) asyncCBDispatcher() { for { ac.mu.Lock() // Protect for spurious wakeups. We should get out of the // wait only if there is an element to pop from the list. for ac.head == nil { ac.cond.Wait() } cur := ac.head ac.head = cur.next if cur == ac.tail { ac.tail = nil } ac.mu.Unlock() // This signals that the dispatcher has been closed and all // previous callbacks have been dispatched. if cur.f == nil { return } // Invoke callback outside of handler's lock cur.f() } } // Add the given function to the tail of the list and // signals the dispatcher. func (ac *asyncCallbacksHandler) push(f func()) { ac.pushOrClose(f, false) } // Signals that we are closing... func (ac *asyncCallbacksHandler) close() { ac.pushOrClose(nil, true) } // Add the given function to the tail of the list and // signals the dispatcher. func (ac *asyncCallbacksHandler) pushOrClose(f func(), close bool) { ac.mu.Lock() defer ac.mu.Unlock() // Make sure that library is not calling push with nil function, // since this is used to notify the dispatcher that it should stop. if !close && f == nil { panic("pushing a nil callback") } cb := &asyncCB{f: f} if ac.tail != nil { ac.tail.next = cb } else { ac.head = cb } ac.tail = cb if close { ac.cond.Broadcast() } else { ac.cond.Signal() } } // readLoop() will sit on the socket reading and processing the // protocol from the server. It will dispatch appropriately based // on the op type. func (nc *Conn) readLoop() { // Release the wait group on exit defer nc.wg.Done() // Create a parseState if needed. nc.mu.Lock() if nc.ps == nil { nc.ps = &parseState{} } conn := nc.conn nc.mu.Unlock() if conn == nil { return } // Stack based buffer. b := make([]byte, defaultBufSize) for { if n, err := conn.Read(b); err != nil { nc.processOpErr(err) break } else if err = nc.parse(b[:n]); err != nil { nc.processOpErr(err) break } } // Clear the parseState here.. nc.mu.Lock() nc.ps = nil nc.mu.Unlock() } // waitForMsgs waits on the conditional shared with readLoop and processMsg. // It is used to deliver messages to asynchronous subscribers. func (nc *Conn) waitForMsgs(s *Subscription) { var closed bool var delivered, max uint64 // Used to account for adjustments to sub.pBytes when we wrap back around. msgLen := -1 for { s.mu.Lock() // Do accounting for last msg delivered here so we only lock once // and drain state trips after callback has returned. if msgLen >= 0 { s.pMsgs-- s.pBytes -= msgLen msgLen = -1 } if s.pHead == nil && !s.closed { s.pCond.Wait() } // Pop the msg off the list m := s.pHead if m != nil { s.pHead = m.next if s.pHead == nil { s.pTail = nil } if m.barrier != nil { s.mu.Unlock() if atomic.AddInt64(&m.barrier.refs, -1) == 0 { m.barrier.f() } continue } msgLen = len(m.Data) } mcb := s.mcb max = s.max closed = s.closed if !s.closed { s.delivered++ delivered = s.delivered } s.mu.Unlock() if closed { break } // Deliver the message. if m != nil && (max == 0 || delivered <= max) { mcb(m) } // If we have hit the max for delivered msgs, remove sub. if max > 0 && delivered >= max { nc.mu.Lock() nc.removeSub(s) nc.mu.Unlock() break } } // Check for barrier messages s.mu.Lock() for m := s.pHead; m != nil; m = s.pHead { if m.barrier != nil { s.mu.Unlock() if atomic.AddInt64(&m.barrier.refs, -1) == 0 { m.barrier.f() } s.mu.Lock() } s.pHead = m.next } s.mu.Unlock() } // processMsg is called by parse and will place the msg on the // appropriate channel/pending queue for processing. If the channel is full, // or the pending queue is over the pending limits, the connection is // considered a slow consumer. func (nc *Conn) processMsg(data []byte) { // Don't lock the connection to avoid server cutting us off if the // flusher is holding the connection lock, trying to send to the server // that is itself trying to send data to us. nc.subsMu.RLock() // Stats atomic.AddUint64(&nc.InMsgs, 1) atomic.AddUint64(&nc.InBytes, uint64(len(data))) sub := nc.subs[nc.ps.ma.sid] if sub == nil { nc.subsMu.RUnlock() return } // Copy them into string subj := string(nc.ps.ma.subject) reply := string(nc.ps.ma.reply) // Doing message create outside of the sub's lock to reduce contention. // It's possible that we end-up not using the message, but that's ok. // FIXME(dlc): Need to copy, should/can do COW? msgPayload := make([]byte, len(data)) copy(msgPayload, data) // FIXME(dlc): Should we recycle these containers? m := &Msg{Data: msgPayload, Subject: subj, Reply: reply, Sub: sub} sub.mu.Lock() // Subscription internal stats (applicable only for non ChanSubscription's) if sub.typ != ChanSubscription { sub.pMsgs++ if sub.pMsgs > sub.pMsgsMax { sub.pMsgsMax = sub.pMsgs } sub.pBytes += len(m.Data) if sub.pBytes > sub.pBytesMax { sub.pBytesMax = sub.pBytes } // Check for a Slow Consumer if (sub.pMsgsLimit > 0 && sub.pMsgs > sub.pMsgsLimit) || (sub.pBytesLimit > 0 && sub.pBytes > sub.pBytesLimit) { goto slowConsumer } } // We have two modes of delivery. One is the channel, used by channel // subscribers and syncSubscribers, the other is a linked list for async. if sub.mch != nil { select { case sub.mch <- m: default: goto slowConsumer } } else { // Push onto the async pList if sub.pHead == nil { sub.pHead = m sub.pTail = m sub.pCond.Signal() } else { sub.pTail.next = m sub.pTail = m } } // Clear SlowConsumer status. sub.sc = false sub.mu.Unlock() nc.subsMu.RUnlock() return slowConsumer: sub.dropped++ sc := !sub.sc sub.sc = true // Undo stats from above if sub.typ != ChanSubscription { sub.pMsgs-- sub.pBytes -= len(m.Data) } sub.mu.Unlock() nc.subsMu.RUnlock() if sc { // Now we need connection's lock and we may end-up in the situation // that we were trying to avoid, except that in this case, the client // is already experiencing client-side slow consumer situation. nc.mu.Lock() nc.err = ErrSlowConsumer if nc.Opts.AsyncErrorCB != nil { nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, sub, ErrSlowConsumer) }) } nc.mu.Unlock() } } // processPermissionsViolation is called when the server signals a subject // permissions violation on either publish or subscribe. func (nc *Conn) processPermissionsViolation(err string) { nc.mu.Lock() // create error here so we can pass it as a closure to the async cb dispatcher. e := errors.New("nats: " + err) nc.err = e if nc.Opts.AsyncErrorCB != nil { nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, e) }) } nc.mu.Unlock() } // processAuthError generally processing for auth errors. We want to do retries // unless we get the same error again. This allows us for instance to swap credentials // and have the app reconnect, but if nothing is changing we should bail. // This function will return true if the connection should be closed, false otherwise. // Connection lock is held on entry func (nc *Conn) processAuthError(err error) bool { nc.err = err if !nc.initc && nc.Opts.AsyncErrorCB != nil { nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) } // We should give up if we tried twice on this server and got the // same error. if nc.current.lastErr == err { nc.ar = true } else { nc.current.lastErr = err } return nc.ar } // flusher is a separate Go routine that will process flush requests for the write // bufio. This allows coalescing of writes to the underlying socket. func (nc *Conn) flusher() { // Release the wait group defer nc.wg.Done() // snapshot the bw and conn since they can change from underneath of us. nc.mu.Lock() bw := nc.bw conn := nc.conn fch := nc.fch nc.mu.Unlock() if conn == nil || bw == nil { return } for { if _, ok := <-fch; !ok { return } nc.mu.Lock() // Check to see if we should bail out. if !nc.isConnected() || nc.isConnecting() || bw != nc.bw || conn != nc.conn { nc.mu.Unlock() return } if bw.Buffered() > 0 { if err := bw.Flush(); err != nil { if nc.err == nil { nc.err = err } } } nc.mu.Unlock() } } // processPing will send an immediate pong protocol response to the // server. The server uses this mechanism to detect dead clients. func (nc *Conn) processPing() { nc.sendProto(pongProto) } // processPong is used to process responses to the client's ping // messages. We use pings for the flush mechanism as well. func (nc *Conn) processPong() { var ch chan struct{} nc.mu.Lock() if len(nc.pongs) > 0 { ch = nc.pongs[0] nc.pongs = nc.pongs[1:] } nc.pout = 0 nc.mu.Unlock() if ch != nil { ch <- struct{}{} } } // processOK is a placeholder for processing OK messages. func (nc *Conn) processOK() { // do nothing } // processInfo is used to parse the info messages sent // from the server. // This function may update the server pool. func (nc *Conn) processInfo(info string) error { if info == _EMPTY_ { return nil } ncInfo := serverInfo{} if err := json.Unmarshal([]byte(info), &ncInfo); err != nil { return err } // Copy content into connection's info structure. nc.info = ncInfo // The array could be empty/not present on initial connect, // if advertise is disabled on that server, or servers that // did not include themselves in the async INFO protocol. // If empty, do not remove the implicit servers from the pool. if len(ncInfo.ConnectURLs) == 0 { return nil } // Note about pool randomization: when the pool was first created, // it was randomized (if allowed). We keep the order the same (removing // implicit servers that are no longer sent to us). New URLs are sent // to us in no specific order so don't need extra randomization. hasNew := false // This is what we got from the server we are connected to. urls := nc.info.ConnectURLs // Transform that to a map for easy lookups tmp := make(map[string]struct{}, len(urls)) for _, curl := range urls { tmp[curl] = struct{}{} } // Walk the pool and removed the implicit servers that are no longer in the // given array/map sp := nc.srvPool for i := 0; i < len(sp); i++ { srv := sp[i] curl := srv.url.Host // Check if this URL is in the INFO protocol _, inInfo := tmp[curl] // Remove from the temp map so that at the end we are left with only // new (or restarted) servers that need to be added to the pool. delete(tmp, curl) // Keep servers that were set through Options, but also the one that // we are currently connected to (even if it is a discovered server). if !srv.isImplicit || srv.url == nc.current.url { continue } if !inInfo { // Remove from server pool. Keep current order. copy(sp[i:], sp[i+1:]) nc.srvPool = sp[:len(sp)-1] sp = nc.srvPool i-- } } // Figure out if we should save off the current non-IP hostname if we encounter a bare IP. saveTLS := nc.current != nil && !hostIsIP(nc.current.url) // If there are any left in the tmp map, these are new (or restarted) servers // and need to be added to the pool. for curl := range tmp { // Before adding, check if this is a new (as in never seen) URL. // This is used to figure out if we invoke the DiscoveredServersCB if _, present := nc.urls[curl]; !present { hasNew = true } nc.addURLToPool(fmt.Sprintf("%s://%s", nc.connScheme(), curl), true, saveTLS) } if hasNew && !nc.initc && nc.Opts.DiscoveredServersCB != nil { nc.ach.push(func() { nc.Opts.DiscoveredServersCB(nc) }) } return nil } // processAsyncInfo does the same than processInfo, but is called // from the parser. Calls processInfo under connection's lock // protection. func (nc *Conn) processAsyncInfo(info []byte) { nc.mu.Lock() // Ignore errors, we will simply not update the server pool... nc.processInfo(string(info)) nc.mu.Unlock() } // LastError reports the last error encountered via the connection. // It can be used reliably within ClosedCB in order to find out reason // why connection was closed for example. func (nc *Conn) LastError() error { if nc == nil { return ErrInvalidConnection } nc.mu.RLock() err := nc.err nc.mu.RUnlock() return err } // Check if the given error string is an auth error, and if so returns // the corresponding ErrXXX error, nil otherwise func checkAuthError(e string) error { if strings.HasPrefix(e, AUTHORIZATION_ERR) { return ErrAuthorization } if strings.HasPrefix(e, AUTHENTICATION_EXPIRED_ERR) { return ErrAuthExpired } return nil } // processErr processes any error messages from the server and // sets the connection's lastError. func (nc *Conn) processErr(ie string) { // Trim, remove quotes ne := normalizeErr(ie) // convert to lower case. e := strings.ToLower(ne) close := false // FIXME(dlc) - process Slow Consumer signals special. if e == STALE_CONNECTION { nc.processOpErr(ErrStaleConnection) } else if strings.HasPrefix(e, PERMISSIONS_ERR) { nc.processPermissionsViolation(ne) } else if authErr := checkAuthError(e); authErr != nil { nc.mu.Lock() close = nc.processAuthError(authErr) nc.mu.Unlock() } else { close = true nc.mu.Lock() nc.err = errors.New("nats: " + ne) nc.mu.Unlock() } if close { nc.close(CLOSED, true, nil) } } // kickFlusher will send a bool on a channel to kick the // flush Go routine to flush data to the server. func (nc *Conn) kickFlusher() { if nc.bw != nil { select { case nc.fch <- struct{}{}: default: } } } // Publish publishes the data argument to the given subject. The data // argument is left untouched and needs to be correctly interpreted on // the receiver. func (nc *Conn) Publish(subj string, data []byte) error { return nc.publish(subj, _EMPTY_, data) } // PublishMsg publishes the Msg structure, which includes the // Subject, an optional Reply and an optional Data field. func (nc *Conn) PublishMsg(m *Msg) error { if m == nil { return ErrInvalidMsg } return nc.publish(m.Subject, m.Reply, m.Data) } // PublishRequest will perform a Publish() excpecting a response on the // reply subject. Use Request() for automatically waiting for a response // inline. func (nc *Conn) PublishRequest(subj, reply string, data []byte) error { return nc.publish(subj, reply, data) } // Used for handrolled itoa const digits = "0123456789" // publish is the internal function to publish messages to a nats-server. // Sends a protocol data message by queuing into the bufio writer // and kicking the flush go routine. These writes should be protected. func (nc *Conn) publish(subj, reply string, data []byte) error { if nc == nil { return ErrInvalidConnection } if subj == "" { return ErrBadSubject } nc.mu.Lock() if nc.isClosed() { nc.mu.Unlock() return ErrConnectionClosed } if nc.isDrainingPubs() { nc.mu.Unlock() return ErrConnectionDraining } // Proactively reject payloads over the threshold set by server. msgSize := int64(len(data)) if msgSize > nc.info.MaxPayload { nc.mu.Unlock() return ErrMaxPayload } // Check if we are reconnecting, and if so check if // we have exceeded our reconnect outbound buffer limits. if nc.isReconnecting() { // Flush to underlying buffer. nc.bw.Flush() // Check if we are over if nc.pending.Len() >= nc.Opts.ReconnectBufSize { nc.mu.Unlock() return ErrReconnectBufExceeded } } msgh := nc.scratch[:len(_PUB_P_)] msgh = append(msgh, subj...) msgh = append(msgh, ' ') if reply != "" { msgh = append(msgh, reply...) msgh = append(msgh, ' ') } // We could be smarter here, but simple loop is ok, // just avoid strconv in fast path // FIXME(dlc) - Find a better way here. // msgh = strconv.AppendInt(msgh, int64(len(data)), 10) var b [12]byte var i = len(b) if len(data) > 0 { for l := len(data); l > 0; l /= 10 { i -= 1 b[i] = digits[l%10] } } else { i -= 1 b[i] = digits[0] } msgh = append(msgh, b[i:]...) msgh = append(msgh, _CRLF_...) _, err := nc.bw.Write(msgh) if err == nil { _, err = nc.bw.Write(data) } if err == nil { _, err = nc.bw.WriteString(_CRLF_) } if err != nil { nc.mu.Unlock() return err } nc.OutMsgs++ nc.OutBytes += uint64(len(data)) if len(nc.fch) == 0 { nc.kickFlusher() } nc.mu.Unlock() return nil } // respHandler is the global response handler. It will look up // the appropriate channel based on the last token and place // the message on the channel if possible. func (nc *Conn) respHandler(m *Msg) { nc.mu.Lock() // Just return if closed. if nc.isClosed() { nc.mu.Unlock() return } var mch chan *Msg // Grab mch rt := nc.respToken(m.Subject) if rt != _EMPTY_ { mch = nc.respMap[rt] // Delete the key regardless, one response only. delete(nc.respMap, rt) } else if len(nc.respMap) == 1 { // If the server has rewritten the subject, the response token (rt) // will not match (could be the case with JetStream). If that is the // case and there is a single entry, use that. for k, v := range nc.respMap { mch = v delete(nc.respMap, k) break } } nc.mu.Unlock() // Don't block, let Request timeout instead, mch is // buffered and we should delete the key before a // second response is processed. select { case mch <- m: default: return } } // Helper to setup and send new request style requests. Return the chan to receive the response. func (nc *Conn) createNewRequestAndSend(subj string, data []byte) (chan *Msg, string, error) { // Do setup for the new style if needed. if nc.respMap == nil { nc.initNewResp() } // Create new literal Inbox and map to a chan msg. mch := make(chan *Msg, RequestChanLen) respInbox := nc.newRespInbox() token := respInbox[respInboxPrefixLen:] nc.respMap[token] = mch if nc.respMux == nil { // Create the response subscription we will use for all new style responses. // This will be on an _INBOX with an additional terminal token. The subscription // will be on a wildcard. s, err := nc.subscribeLocked(nc.respSub, _EMPTY_, nc.respHandler, nil, false) if err != nil { nc.mu.Unlock() return nil, token, err } nc.respScanf = strings.Replace(nc.respSub, "*", "%s", -1) nc.respMux = s } nc.mu.Unlock() if err := nc.PublishRequest(subj, respInbox, data); err != nil { return nil, token, err } return mch, token, nil } // Request will send a request payload and deliver the response message, // or an error, including a timeout if no message was received properly. func (nc *Conn) Request(subj string, data []byte, timeout time.Duration) (*Msg, error) { if nc == nil { return nil, ErrInvalidConnection } nc.mu.Lock() // If user wants the old style. if nc.Opts.UseOldRequestStyle { nc.mu.Unlock() return nc.oldRequest(subj, data, timeout) } mch, token, err := nc.createNewRequestAndSend(subj, data) if err != nil { return nil, err } t := globalTimerPool.Get(timeout) defer globalTimerPool.Put(t) var ok bool var msg *Msg select { case msg, ok = <-mch: if !ok { return nil, ErrConnectionClosed } case <-t.C: nc.mu.Lock() delete(nc.respMap, token) nc.mu.Unlock() return nil, ErrTimeout } return msg, nil } // oldRequest will create an Inbox and perform a Request() call // with the Inbox reply and return the first reply received. // This is optimized for the case of multiple responses. func (nc *Conn) oldRequest(subj string, data []byte, timeout time.Duration) (*Msg, error) { inbox := NewInbox() ch := make(chan *Msg, RequestChanLen) s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, false) if err != nil { return nil, err } s.AutoUnsubscribe(1) defer s.Unsubscribe() err = nc.PublishRequest(subj, inbox, data) if err != nil { return nil, err } return s.NextMsg(timeout) } // InboxPrefix is the prefix for all inbox subjects. const ( InboxPrefix = "_INBOX." inboxPrefixLen = len(InboxPrefix) respInboxPrefixLen = inboxPrefixLen + nuidSize + 1 replySuffixLen = 8 // Gives us 62^8 rdigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" base = 62 ) // NewInbox will return an inbox string which can be used for directed replies from // subscribers. These are guaranteed to be unique, but can be shared and subscribed // to by others. func NewInbox() string { var b [inboxPrefixLen + nuidSize]byte pres := b[:inboxPrefixLen] copy(pres, InboxPrefix) ns := b[inboxPrefixLen:] copy(ns, nuid.Next()) return string(b[:]) } // Function to init new response structures. func (nc *Conn) initNewResp() { // _INBOX wildcard nc.respSub = fmt.Sprintf("%s.*", NewInbox()) nc.respMap = make(map[string]chan *Msg) nc.respRand = rand.New(rand.NewSource(time.Now().UnixNano())) } // newRespInbox creates a new literal response subject // that will trigger the mux subscription handler. // Lock should be held. func (nc *Conn) newRespInbox() string { if nc.respMap == nil { nc.initNewResp() } var b [respInboxPrefixLen + replySuffixLen]byte pres := b[:respInboxPrefixLen] copy(pres, nc.respSub) rn := nc.respRand.Int63() for i, l := respInboxPrefixLen, rn; i < len(b); i++ { b[i] = rdigits[l%base] l /= base } return string(b[:]) } // NewRespInbox is the new format used for _INBOX. func (nc *Conn) NewRespInbox() string { nc.mu.Lock() s := nc.newRespInbox() nc.mu.Unlock() return s } // respToken will return the last token of a literal response inbox // which we use for the message channel lookup. This needs to do a // scan to protect itself against the server changing the subject. // Lock should be held. func (nc *Conn) respToken(respInbox string) string { var token string n, err := fmt.Sscanf(respInbox, nc.respScanf, &token) if err != nil || n != 1 { return "" } return token } // Subscribe will express interest in the given subject. The subject // can have wildcards (partial:*, full:>). Messages will be delivered // to the associated MsgHandler. func (nc *Conn) Subscribe(subj string, cb MsgHandler) (*Subscription, error) { return nc.subscribe(subj, _EMPTY_, cb, nil, false) } // ChanSubscribe will express interest in the given subject and place // all messages received on the channel. // You should not close the channel until sub.Unsubscribe() has been called. func (nc *Conn) ChanSubscribe(subj string, ch chan *Msg) (*Subscription, error) { return nc.subscribe(subj, _EMPTY_, nil, ch, false) } // ChanQueueSubscribe will express interest in the given subject. // All subscribers with the same queue name will form the queue group // and only one member of the group will be selected to receive any given message, // which will be placed on the channel. // You should not close the channel until sub.Unsubscribe() has been called. // Note: This is the same than QueueSubscribeSyncWithChan. func (nc *Conn) ChanQueueSubscribe(subj, group string, ch chan *Msg) (*Subscription, error) { return nc.subscribe(subj, group, nil, ch, false) } // SubscribeSync will express interest on the given subject. Messages will // be received synchronously using Subscription.NextMsg(). func (nc *Conn) SubscribeSync(subj string) (*Subscription, error) { if nc == nil { return nil, ErrInvalidConnection } mch := make(chan *Msg, nc.Opts.SubChanLen) s, e := nc.subscribe(subj, _EMPTY_, nil, mch, true) return s, e } // QueueSubscribe creates an asynchronous queue subscriber on the given subject. // All subscribers with the same queue name will form the queue group and // only one member of the group will be selected to receive any given // message asynchronously. func (nc *Conn) QueueSubscribe(subj, queue string, cb MsgHandler) (*Subscription, error) { return nc.subscribe(subj, queue, cb, nil, false) } // QueueSubscribeSync creates a synchronous queue subscriber on the given // subject. All subscribers with the same queue name will form the queue // group and only one member of the group will be selected to receive any // given message synchronously using Subscription.NextMsg(). func (nc *Conn) QueueSubscribeSync(subj, queue string) (*Subscription, error) { mch := make(chan *Msg, nc.Opts.SubChanLen) s, e := nc.subscribe(subj, queue, nil, mch, true) return s, e } // QueueSubscribeSyncWithChan will express interest in the given subject. // All subscribers with the same queue name will form the queue group // and only one member of the group will be selected to receive any given message, // which will be placed on the channel. // You should not close the channel until sub.Unsubscribe() has been called. // Note: This is the same than ChanQueueSubscribe. func (nc *Conn) QueueSubscribeSyncWithChan(subj, queue string, ch chan *Msg) (*Subscription, error) { return nc.subscribe(subj, queue, nil, ch, false) } // badSubject will do quick test on whether a subject is acceptable. // Spaces are not allowed and all tokens should be > 0 in len. func badSubject(subj string) bool { if strings.ContainsAny(subj, " \t\r\n") { return true } tokens := strings.Split(subj, ".") for _, t := range tokens { if len(t) == 0 { return true } } return false } // badQueue will check a queue name for whitespace. func badQueue(qname string) bool { return strings.ContainsAny(qname, " \t\r\n") } // subscribe is the internal subscribe function that indicates interest in a subject. func (nc *Conn) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync bool) (*Subscription, error) { if nc == nil { return nil, ErrInvalidConnection } nc.mu.Lock() s, err := nc.subscribeLocked(subj, queue, cb, ch, isSync) nc.mu.Unlock() return s, err } func (nc *Conn) subscribeLocked(subj, queue string, cb MsgHandler, ch chan *Msg, isSync bool) (*Subscription, error) { if nc == nil { return nil, ErrInvalidConnection } if badSubject(subj) { return nil, ErrBadSubject } if queue != "" && badQueue(queue) { return nil, ErrBadQueueName } // Check for some error conditions. if nc.isClosed() { return nil, ErrConnectionClosed } if nc.isDraining() { return nil, ErrConnectionDraining } if cb == nil && ch == nil { return nil, ErrBadSubscription } sub := &Subscription{Subject: subj, Queue: queue, mcb: cb, conn: nc} // Set pending limits. sub.pMsgsLimit = DefaultSubPendingMsgsLimit sub.pBytesLimit = DefaultSubPendingBytesLimit // If we have an async callback, start up a sub specific // Go routine to deliver the messages. if cb != nil { sub.typ = AsyncSubscription sub.pCond = sync.NewCond(&sub.mu) go nc.waitForMsgs(sub) } else if !isSync { sub.typ = ChanSubscription sub.mch = ch } else { // Sync Subscription sub.typ = SyncSubscription sub.mch = ch } nc.subsMu.Lock() nc.ssid++ sub.sid = nc.ssid nc.subs[sub.sid] = sub nc.subsMu.Unlock() // We will send these for all subs when we reconnect // so that we can suppress here if reconnecting. if !nc.isReconnecting() { fmt.Fprintf(nc.bw, subProto, subj, queue, sub.sid) // Kick flusher if needed. if len(nc.fch) == 0 { nc.kickFlusher() } } return sub, nil } // NumSubscriptions returns active number of subscriptions. func (nc *Conn) NumSubscriptions() int { nc.mu.RLock() defer nc.mu.RUnlock() return len(nc.subs) } // Lock for nc should be held here upon entry func (nc *Conn) removeSub(s *Subscription) { nc.subsMu.Lock() delete(nc.subs, s.sid) nc.subsMu.Unlock() s.mu.Lock() defer s.mu.Unlock() // Release callers on NextMsg for SyncSubscription only if s.mch != nil && s.typ == SyncSubscription { close(s.mch) } s.mch = nil // Mark as invalid s.closed = true if s.pCond != nil { s.pCond.Broadcast() } } // SubscriptionType is the type of the Subscription. type SubscriptionType int // The different types of subscription types. const ( AsyncSubscription = SubscriptionType(iota) SyncSubscription ChanSubscription NilSubscription ) // Type returns the type of Subscription. func (s *Subscription) Type() SubscriptionType { if s == nil { return NilSubscription } s.mu.Lock() defer s.mu.Unlock() return s.typ } // IsValid returns a boolean indicating whether the subscription // is still active. This will return false if the subscription has // already been closed. func (s *Subscription) IsValid() bool { if s == nil { return false } s.mu.Lock() defer s.mu.Unlock() return s.conn != nil && !s.closed } // Drain will remove interest but continue callbacks until all messages // have been processed. func (s *Subscription) Drain() error { if s == nil { return ErrBadSubscription } s.mu.Lock() conn := s.conn s.mu.Unlock() if conn == nil { return ErrBadSubscription } return conn.unsubscribe(s, 0, true) } // Unsubscribe will remove interest in the given subject. func (s *Subscription) Unsubscribe() error { if s == nil { return ErrBadSubscription } s.mu.Lock() conn := s.conn closed := s.closed s.mu.Unlock() if conn == nil || conn.IsClosed() { return ErrConnectionClosed } if closed { return ErrBadSubscription } if conn.IsDraining() { return ErrConnectionDraining } return conn.unsubscribe(s, 0, false) } // checkDrained will watch for a subscription to be fully drained // and then remove it. func (nc *Conn) checkDrained(sub *Subscription) { if nc == nil || sub == nil { return } // This allows us to know that whatever we have in the client pending // is correct and the server will not send additional information. nc.Flush() // Once we are here we just wait for Pending to reach 0 or // any other state to exit this go routine. for { // check connection is still valid. if nc.IsClosed() { return } // Check subscription state sub.mu.Lock() conn := sub.conn closed := sub.closed pMsgs := sub.pMsgs sub.mu.Unlock() if conn == nil || closed || pMsgs == 0 { nc.mu.Lock() nc.removeSub(sub) nc.mu.Unlock() return } time.Sleep(100 * time.Millisecond) } } // AutoUnsubscribe will issue an automatic Unsubscribe that is // processed by the server when max messages have been received. // This can be useful when sending a request to an unknown number // of subscribers. func (s *Subscription) AutoUnsubscribe(max int) error { if s == nil { return ErrBadSubscription } s.mu.Lock() conn := s.conn closed := s.closed s.mu.Unlock() if conn == nil || closed { return ErrBadSubscription } return conn.unsubscribe(s, max, false) } // unsubscribe performs the low level unsubscribe to the server. // Use Subscription.Unsubscribe() func (nc *Conn) unsubscribe(sub *Subscription, max int, drainMode bool) error { nc.mu.Lock() // ok here, but defer is expensive defer nc.mu.Unlock() defer nc.kickFlusher() if nc.isClosed() { return ErrConnectionClosed } nc.subsMu.RLock() s := nc.subs[sub.sid] nc.subsMu.RUnlock() // Already unsubscribed if s == nil { return nil } maxStr := _EMPTY_ if max > 0 { s.max = uint64(max) maxStr = strconv.Itoa(max) } else if !drainMode { nc.removeSub(s) } if drainMode { go nc.checkDrained(sub) } // We will send these for all subs when we reconnect // so that we can suppress here. if !nc.isReconnecting() { fmt.Fprintf(nc.bw, unsubProto, s.sid, maxStr) } return nil } // NextMsg will return the next message available to a synchronous subscriber // or block until one is available. An error is returned if the subscription is invalid (ErrBadSubscription), // the connection is closed (ErrConnectionClosed), or the timeout is reached (ErrTimeout). func (s *Subscription) NextMsg(timeout time.Duration) (*Msg, error) { if s == nil { return nil, ErrBadSubscription } s.mu.Lock() err := s.validateNextMsgState() if err != nil { s.mu.Unlock() return nil, err } // snapshot mch := s.mch s.mu.Unlock() var ok bool var msg *Msg // If something is available right away, let's optimize that case. select { case msg, ok = <-mch: if !ok { return nil, s.getNextMsgErr() } if err := s.processNextMsgDelivered(msg); err != nil { return nil, err } else { return msg, nil } default: } // If we are here a message was not immediately available, so lets loop // with a timeout. t := globalTimerPool.Get(timeout) defer globalTimerPool.Put(t) select { case msg, ok = <-mch: if !ok { return nil, s.getNextMsgErr() } if err := s.processNextMsgDelivered(msg); err != nil { return nil, err } case <-t.C: return nil, ErrTimeout } return msg, nil } // validateNextMsgState checks whether the subscription is in a valid // state to call NextMsg and be delivered another message synchronously. // This should be called while holding the lock. func (s *Subscription) validateNextMsgState() error { if s.connClosed { return ErrConnectionClosed } if s.mch == nil { if s.max > 0 && s.delivered >= s.max { return ErrMaxMessages } else if s.closed { return ErrBadSubscription } } if s.mcb != nil { return ErrSyncSubRequired } if s.sc { s.sc = false return ErrSlowConsumer } return nil } // This is called when the sync channel has been closed. // The error returned will be either connection or subscription // closed depending on what caused NextMsg() to fail. func (s *Subscription) getNextMsgErr() error { s.mu.Lock() defer s.mu.Unlock() if s.connClosed { return ErrConnectionClosed } return ErrBadSubscription } // processNextMsgDelivered takes a message and applies the needed // accounting to the stats from the subscription, returning an // error in case we have the maximum number of messages have been // delivered already. It should not be called while holding the lock. func (s *Subscription) processNextMsgDelivered(msg *Msg) error { s.mu.Lock() nc := s.conn max := s.max // Update some stats. s.delivered++ delivered := s.delivered if s.typ == SyncSubscription { s.pMsgs-- s.pBytes -= len(msg.Data) } s.mu.Unlock() if max > 0 { if delivered > max { return ErrMaxMessages } // Remove subscription if we have reached max. if delivered == max { nc.mu.Lock() nc.removeSub(s) nc.mu.Unlock() } } return nil } // Queued returns the number of queued messages in the client for this subscription. // DEPRECATED: Use Pending() func (s *Subscription) QueuedMsgs() (int, error) { m, _, err := s.Pending() return int(m), err } // Pending returns the number of queued messages and queued bytes in the client for this subscription. func (s *Subscription) Pending() (int, int, error) { if s == nil { return -1, -1, ErrBadSubscription } s.mu.Lock() defer s.mu.Unlock() if s.conn == nil || s.closed { return -1, -1, ErrBadSubscription } if s.typ == ChanSubscription { return -1, -1, ErrTypeSubscription } return s.pMsgs, s.pBytes, nil } // MaxPending returns the maximum number of queued messages and queued bytes seen so far. func (s *Subscription) MaxPending() (int, int, error) { if s == nil { return -1, -1, ErrBadSubscription } s.mu.Lock() defer s.mu.Unlock() if s.conn == nil || s.closed { return -1, -1, ErrBadSubscription } if s.typ == ChanSubscription { return -1, -1, ErrTypeSubscription } return s.pMsgsMax, s.pBytesMax, nil } // ClearMaxPending resets the maximums seen so far. func (s *Subscription) ClearMaxPending() error { if s == nil { return ErrBadSubscription } s.mu.Lock() defer s.mu.Unlock() if s.conn == nil || s.closed { return ErrBadSubscription } if s.typ == ChanSubscription { return ErrTypeSubscription } s.pMsgsMax, s.pBytesMax = 0, 0 return nil } // Pending Limits const ( DefaultSubPendingMsgsLimit = 65536 DefaultSubPendingBytesLimit = 65536 * 1024 ) // PendingLimits returns the current limits for this subscription. // If no error is returned, a negative value indicates that the // given metric is not limited. func (s *Subscription) PendingLimits() (int, int, error) { if s == nil { return -1, -1, ErrBadSubscription } s.mu.Lock() defer s.mu.Unlock() if s.conn == nil || s.closed { return -1, -1, ErrBadSubscription } if s.typ == ChanSubscription { return -1, -1, ErrTypeSubscription } return s.pMsgsLimit, s.pBytesLimit, nil } // SetPendingLimits sets the limits for pending msgs and bytes for this subscription. // Zero is not allowed. Any negative value means that the given metric is not limited. func (s *Subscription) SetPendingLimits(msgLimit, bytesLimit int) error { if s == nil { return ErrBadSubscription } s.mu.Lock() defer s.mu.Unlock() if s.conn == nil || s.closed { return ErrBadSubscription } if s.typ == ChanSubscription { return ErrTypeSubscription } if msgLimit == 0 || bytesLimit == 0 { return ErrInvalidArg } s.pMsgsLimit, s.pBytesLimit = msgLimit, bytesLimit return nil } // Delivered returns the number of delivered messages for this subscription. func (s *Subscription) Delivered() (int64, error) { if s == nil { return -1, ErrBadSubscription } s.mu.Lock() defer s.mu.Unlock() if s.conn == nil || s.closed { return -1, ErrBadSubscription } return int64(s.delivered), nil } // Dropped returns the number of known dropped messages for this subscription. // This will correspond to messages dropped by violations of PendingLimits. If // the server declares the connection a SlowConsumer, this number may not be // valid. func (s *Subscription) Dropped() (int, error) { if s == nil { return -1, ErrBadSubscription } s.mu.Lock() defer s.mu.Unlock() if s.conn == nil || s.closed { return -1, ErrBadSubscription } return s.dropped, nil } // Respond allows a convenient way to respond to requests in service based subscriptions. func (m *Msg) Respond(data []byte) error { if m == nil || m.Sub == nil { return ErrMsgNotBound } if m.Reply == "" { return ErrMsgNoReply } m.Sub.mu.Lock() nc := m.Sub.conn m.Sub.mu.Unlock() // No need to check the connection here since the call to publish will do all the checking. return nc.Publish(m.Reply, data) } // FIXME: This is a hack // removeFlushEntry is needed when we need to discard queued up responses // for our pings as part of a flush call. This happens when we have a flush // call outstanding and we call close. func (nc *Conn) removeFlushEntry(ch chan struct{}) bool { nc.mu.Lock() defer nc.mu.Unlock() if nc.pongs == nil { return false } for i, c := range nc.pongs { if c == ch { nc.pongs[i] = nil return true } } return false } // The lock must be held entering this function. func (nc *Conn) sendPing(ch chan struct{}) { nc.pongs = append(nc.pongs, ch) nc.bw.WriteString(pingProto) // Flush in place. nc.bw.Flush() } // This will fire periodically and send a client origin // ping to the server. Will also check that we have received // responses from the server. func (nc *Conn) processPingTimer() { nc.mu.Lock() if nc.status != CONNECTED { nc.mu.Unlock() return } // Check for violation nc.pout++ if nc.pout > nc.Opts.MaxPingsOut { nc.mu.Unlock() nc.processOpErr(ErrStaleConnection) return } nc.sendPing(nil) nc.ptmr.Reset(nc.Opts.PingInterval) nc.mu.Unlock() } // FlushTimeout allows a Flush operation to have an associated timeout. func (nc *Conn) FlushTimeout(timeout time.Duration) (err error) { if nc == nil { return ErrInvalidConnection } if timeout <= 0 { return ErrBadTimeout } nc.mu.Lock() if nc.isClosed() { nc.mu.Unlock() return ErrConnectionClosed } t := globalTimerPool.Get(timeout) defer globalTimerPool.Put(t) // Create a buffered channel to prevent chan send to block // in processPong() if this code here times out just when // PONG was received. ch := make(chan struct{}, 1) nc.sendPing(ch) nc.mu.Unlock() select { case _, ok := <-ch: if !ok { err = ErrConnectionClosed } else { close(ch) } case <-t.C: err = ErrTimeout } if err != nil { nc.removeFlushEntry(ch) } return } // Flush will perform a round trip to the server and return when it // receives the internal reply. func (nc *Conn) Flush() error { return nc.FlushTimeout(60 * time.Second) } // Buffered will return the number of bytes buffered to be sent to the server. // FIXME(dlc) take into account disconnected state. func (nc *Conn) Buffered() (int, error) { nc.mu.RLock() defer nc.mu.RUnlock() if nc.isClosed() || nc.bw == nil { return -1, ErrConnectionClosed } return nc.bw.Buffered(), nil } // resendSubscriptions will send our subscription state back to the // server. Used in reconnects func (nc *Conn) resendSubscriptions() { // Since we are going to send protocols to the server, we don't want to // be holding the subsMu lock (which is used in processMsg). So copy // the subscriptions in a temporary array. nc.subsMu.RLock() subs := make([]*Subscription, 0, len(nc.subs)) for _, s := range nc.subs { subs = append(subs, s) } nc.subsMu.RUnlock() for _, s := range subs { adjustedMax := uint64(0) s.mu.Lock() if s.max > 0 { if s.delivered < s.max { adjustedMax = s.max - s.delivered } // adjustedMax could be 0 here if the number of delivered msgs // reached the max, if so unsubscribe. if adjustedMax == 0 { s.mu.Unlock() fmt.Fprintf(nc.bw, unsubProto, s.sid, _EMPTY_) continue } } s.mu.Unlock() fmt.Fprintf(nc.bw, subProto, s.Subject, s.Queue, s.sid) if adjustedMax > 0 { maxStr := strconv.Itoa(int(adjustedMax)) fmt.Fprintf(nc.bw, unsubProto, s.sid, maxStr) } } } // This will clear any pending flush calls and release pending calls. // Lock is assumed to be held by the caller. func (nc *Conn) clearPendingFlushCalls() { // Clear any queued pongs, e.g. pending flush calls. for _, ch := range nc.pongs { if ch != nil { close(ch) } } nc.pongs = nil } // This will clear any pending Request calls. // Lock is assumed to be held by the caller. func (nc *Conn) clearPendingRequestCalls() { if nc.respMap == nil { return } for key, ch := range nc.respMap { if ch != nil { close(ch) delete(nc.respMap, key) } } } // Low level close call that will do correct cleanup and set // desired status. Also controls whether user defined callbacks // will be triggered. The lock should not be held entering this // function. This function will handle the locking manually. func (nc *Conn) close(status Status, doCBs bool, err error) { nc.mu.Lock() if nc.isClosed() { nc.status = status nc.mu.Unlock() return } nc.status = CLOSED // Kick the Go routines so they fall out. nc.kickFlusher() nc.mu.Unlock() nc.mu.Lock() // Clear any queued pongs, e.g. pending flush calls. nc.clearPendingFlushCalls() // Clear any queued and blocking Requests. nc.clearPendingRequestCalls() // Stop ping timer if set. nc.stopPingTimer() nc.ptmr = nil // Need to close and set tcp conn to nil if reconnect loop has stopped, // otherwise we would incorrectly invoke Disconnect handler (if set) // down below. if nc.ar && nc.conn != nil { nc.conn.Close() nc.conn = nil } else if nc.conn != nil { // Go ahead and make sure we have flushed the outbound nc.bw.Flush() defer nc.conn.Close() } // Close sync subscriber channels and release any // pending NextMsg() calls. nc.subsMu.Lock() for _, s := range nc.subs { s.mu.Lock() // Release callers on NextMsg for SyncSubscription only if s.mch != nil && s.typ == SyncSubscription { close(s.mch) } s.mch = nil // Mark as invalid, for signaling to deliverMsgs s.closed = true // Mark connection closed in subscription s.connClosed = true // If we have an async subscription, signals it to exit if s.typ == AsyncSubscription && s.pCond != nil { s.pCond.Signal() } s.mu.Unlock() } nc.subs = nil nc.subsMu.Unlock() nc.status = status // Perform appropriate callback if needed for a disconnect. if doCBs { if nc.conn != nil { if nc.Opts.DisconnectedErrCB != nil { nc.ach.push(func() { nc.Opts.DisconnectedErrCB(nc, err) }) } else if nc.Opts.DisconnectedCB != nil { nc.ach.push(func() { nc.Opts.DisconnectedCB(nc) }) } } if nc.Opts.ClosedCB != nil { nc.ach.push(func() { nc.Opts.ClosedCB(nc) }) } } // If this is terminal, then we have to notify the asyncCB handler that // it can exit once all async cbs have been dispatched. if status == CLOSED { nc.ach.close() } nc.mu.Unlock() } // Close will close the connection to the server. This call will release // all blocking calls, such as Flush() and NextMsg() func (nc *Conn) Close() { if nc != nil { nc.close(CLOSED, !nc.Opts.NoCallbacksAfterClientClose, nil) } } // IsClosed tests if a Conn has been closed. func (nc *Conn) IsClosed() bool { nc.mu.RLock() defer nc.mu.RUnlock() return nc.isClosed() } // IsReconnecting tests if a Conn is reconnecting. func (nc *Conn) IsReconnecting() bool { nc.mu.RLock() defer nc.mu.RUnlock() return nc.isReconnecting() } // IsConnected tests if a Conn is connected. func (nc *Conn) IsConnected() bool { nc.mu.RLock() defer nc.mu.RUnlock() return nc.isConnected() } // drainConnection will run in a separate Go routine and will // flush all publishes and drain all active subscriptions. func (nc *Conn) drainConnection() { // Snapshot subs list. nc.mu.Lock() // Check again here if we are in a state to not process. if nc.isClosed() { nc.mu.Unlock() return } if nc.isConnecting() || nc.isReconnecting() { nc.mu.Unlock() // Move to closed state. nc.close(CLOSED, true, nil) return } subs := make([]*Subscription, 0, len(nc.subs)) for _, s := range nc.subs { subs = append(subs, s) } errCB := nc.Opts.AsyncErrorCB drainWait := nc.Opts.DrainTimeout nc.mu.Unlock() // for pushing errors with context. pushErr := func(err error) { nc.mu.Lock() nc.err = err if errCB != nil { nc.ach.push(func() { errCB(nc, nil, err) }) } nc.mu.Unlock() } // Do subs first for _, s := range subs { if err := s.Drain(); err != nil { // We will notify about these but continue. pushErr(err) } } // Wait for the subscriptions to drop to zero. timeout := time.Now().Add(drainWait) for time.Now().Before(timeout) { if nc.NumSubscriptions() == 0 { break } time.Sleep(10 * time.Millisecond) } // Check if we timed out. if nc.NumSubscriptions() != 0 { pushErr(ErrDrainTimeout) } // Flip State nc.mu.Lock() nc.status = DRAINING_PUBS nc.mu.Unlock() // Do publish drain via Flush() call. err := nc.FlushTimeout(5 * time.Second) if err != nil { pushErr(err) nc.close(CLOSED, true, nil) return } // Move to closed state. nc.close(CLOSED, true, nil) } // Drain will put a connection into a drain state. All subscriptions will // immediately be put into a drain state. Upon completion, the publishers // will be drained and can not publish any additional messages. Upon draining // of the publishers, the connection will be closed. Use the ClosedCB() // option to know when the connection has moved from draining to closed. func (nc *Conn) Drain() error { nc.mu.Lock() if nc.isClosed() { nc.mu.Unlock() return ErrConnectionClosed } if nc.isConnecting() || nc.isReconnecting() { nc.mu.Unlock() nc.close(CLOSED, true, nil) return ErrConnectionReconnecting } if nc.isDraining() { nc.mu.Unlock() return nil } nc.status = DRAINING_SUBS go nc.drainConnection() nc.mu.Unlock() return nil } // IsDraining tests if a Conn is in the draining state. func (nc *Conn) IsDraining() bool { nc.mu.RLock() defer nc.mu.RUnlock() return nc.isDraining() } // caller must lock func (nc *Conn) getServers(implicitOnly bool) []string { poolSize := len(nc.srvPool) var servers = make([]string, 0) for i := 0; i < poolSize; i++ { if implicitOnly && !nc.srvPool[i].isImplicit { continue } url := nc.srvPool[i].url servers = append(servers, fmt.Sprintf("%s://%s", url.Scheme, url.Host)) } return servers } // Servers returns the list of known server urls, including additional // servers discovered after a connection has been established. If // authentication is enabled, use UserInfo or Token when connecting with // these urls. func (nc *Conn) Servers() []string { nc.mu.RLock() defer nc.mu.RUnlock() return nc.getServers(false) } // DiscoveredServers returns only the server urls that have been discovered // after a connection has been established. If authentication is enabled, // use UserInfo or Token when connecting with these urls. func (nc *Conn) DiscoveredServers() []string { nc.mu.RLock() defer nc.mu.RUnlock() return nc.getServers(true) } // Status returns the current state of the connection. func (nc *Conn) Status() Status { nc.mu.RLock() defer nc.mu.RUnlock() return nc.status } // Test if Conn has been closed Lock is assumed held. func (nc *Conn) isClosed() bool { return nc.status == CLOSED } // Test if Conn is in the process of connecting func (nc *Conn) isConnecting() bool { return nc.status == CONNECTING } // Test if Conn is being reconnected. func (nc *Conn) isReconnecting() bool { return nc.status == RECONNECTING } // Test if Conn is connected or connecting. func (nc *Conn) isConnected() bool { return nc.status == CONNECTED || nc.isDraining() } // Test if Conn is in the draining state. func (nc *Conn) isDraining() bool { return nc.status == DRAINING_SUBS || nc.status == DRAINING_PUBS } // Test if Conn is in the draining state for pubs. func (nc *Conn) isDrainingPubs() bool { return nc.status == DRAINING_PUBS } // Stats will return a race safe copy of the Statistics section for the connection. func (nc *Conn) Stats() Statistics { // Stats are updated either under connection's mu or with atomic operations // for inbound stats in processMsg(). nc.mu.Lock() stats := Statistics{ InMsgs: atomic.LoadUint64(&nc.InMsgs), InBytes: atomic.LoadUint64(&nc.InBytes), OutMsgs: nc.OutMsgs, OutBytes: nc.OutBytes, Reconnects: nc.Reconnects, } nc.mu.Unlock() return stats } // MaxPayload returns the size limit that a message payload can have. // This is set by the server configuration and delivered to the client // upon connect. func (nc *Conn) MaxPayload() int64 { nc.mu.RLock() defer nc.mu.RUnlock() return nc.info.MaxPayload } // AuthRequired will return if the connected server requires authorization. func (nc *Conn) AuthRequired() bool { nc.mu.RLock() defer nc.mu.RUnlock() return nc.info.AuthRequired } // TLSRequired will return if the connected server requires TLS connections. func (nc *Conn) TLSRequired() bool { nc.mu.RLock() defer nc.mu.RUnlock() return nc.info.TLSRequired } // Barrier schedules the given function `f` to all registered asynchronous // subscriptions. // Only the last subscription to see this barrier will invoke the function. // If no subscription is registered at the time of this call, `f()` is invoked // right away. // ErrConnectionClosed is returned if the connection is closed prior to // the call. func (nc *Conn) Barrier(f func()) error { nc.mu.Lock() if nc.isClosed() { nc.mu.Unlock() return ErrConnectionClosed } nc.subsMu.Lock() // Need to figure out how many non chan subscriptions there are numSubs := 0 for _, sub := range nc.subs { if sub.typ == AsyncSubscription { numSubs++ } } if numSubs == 0 { nc.subsMu.Unlock() nc.mu.Unlock() f() return nil } barrier := &barrierInfo{refs: int64(numSubs), f: f} for _, sub := range nc.subs { sub.mu.Lock() if sub.mch == nil { msg := &Msg{barrier: barrier} // Push onto the async pList if sub.pTail != nil { sub.pTail.next = msg } else { sub.pHead = msg sub.pCond.Signal() } sub.pTail = msg } sub.mu.Unlock() } nc.subsMu.Unlock() nc.mu.Unlock() return nil } // GetClientID returns the client ID assigned by the server to which // the client is currently connected to. Note that the value may change if // the client reconnects. // This function returns ErrNoClientIDReturned if the server is of a // version prior to 1.2.0. func (nc *Conn) GetClientID() (uint64, error) { nc.mu.RLock() defer nc.mu.RUnlock() if nc.isClosed() { return 0, ErrConnectionClosed } if nc.info.CID == 0 { return 0, ErrClientIDNotSupported } return nc.info.CID, nil } // NkeyOptionFromSeed will load an nkey pair from a seed file. // It will return the NKey Option and will handle // signing of nonce challenges from the server. It will take // care to not hold keys in memory and to wipe memory. func NkeyOptionFromSeed(seedFile string) (Option, error) { kp, err := nkeyPairFromSeedFile(seedFile) if err != nil { return nil, err } // Wipe our key on exit. defer kp.Wipe() pub, err := kp.PublicKey() if err != nil { return nil, err } if !nkeys.IsValidPublicUserKey(pub) { return nil, fmt.Errorf("nats: Not a valid nkey user seed") } sigCB := func(nonce []byte) ([]byte, error) { return sigHandler(nonce, seedFile) } return Nkey(string(pub), sigCB), nil } // Just wipe slice with 'x', for clearing contents of creds or nkey seed file. func wipeSlice(buf []byte) { for i := range buf { buf[i] = 'x' } } func userFromFile(userFile string) (string, error) { path, err := expandPath(userFile) if err != nil { return _EMPTY_, fmt.Errorf("nats: %v", err) } contents, err := ioutil.ReadFile(path) if err != nil { return _EMPTY_, fmt.Errorf("nats: %v", err) } defer wipeSlice(contents) return jwt.ParseDecoratedJWT(contents) } func homeDir() (string, error) { if runtime.GOOS == "windows" { homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH") userProfile := os.Getenv("USERPROFILE") var home string if homeDrive == "" || homePath == "" { if userProfile == "" { return _EMPTY_, errors.New("nats: failed to get home dir, require %HOMEDRIVE% and %HOMEPATH% or %USERPROFILE%") } home = userProfile } else { home = filepath.Join(homeDrive, homePath) } return home, nil } home := os.Getenv("HOME") if home == "" { return _EMPTY_, errors.New("nats: failed to get home dir, require $HOME") } return home, nil } func expandPath(p string) (string, error) { p = os.ExpandEnv(p) if !strings.HasPrefix(p, "~") { return p, nil } home, err := homeDir() if err != nil { return _EMPTY_, err } return filepath.Join(home, p[1:]), nil } func nkeyPairFromSeedFile(seedFile string) (nkeys.KeyPair, error) { contents, err := ioutil.ReadFile(seedFile) if err != nil { return nil, fmt.Errorf("nats: %v", err) } defer wipeSlice(contents) return jwt.ParseDecoratedNKey(contents) } // Sign authentication challenges from the server. // Do not keep private seed in memory. func sigHandler(nonce []byte, seedFile string) ([]byte, error) { kp, err := nkeyPairFromSeedFile(seedFile) if err != nil { return nil, err } // Wipe our key on exit. defer kp.Wipe() sig, _ := kp.Sign(nonce) return sig, nil } type timeoutWriter struct { timeout time.Duration conn net.Conn err error } // Write implements the io.Writer interface. func (tw *timeoutWriter) Write(p []byte) (int, error) { if tw.err != nil { return 0, tw.err } var n int tw.conn.SetWriteDeadline(time.Now().Add(tw.timeout)) n, tw.err = tw.conn.Write(p) tw.conn.SetWriteDeadline(time.Time{}) return n, tw.err }
[ "\"HOMEDRIVE\"", "\"HOMEPATH\"", "\"USERPROFILE\"", "\"HOME\"" ]
[]
[ "USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE" ]
[]
["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"]
go
4
0
internal/pkg/utils/test_utils.go
/* * Copyright 2020 Nalej * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package utils import ( "github.com/onsi/gomega" "google.golang.org/grpc" "os" ) // RunIntegrationTests checks whether integration tests should be executed. func RunIntegrationTests() bool { var runIntegration = os.Getenv("RUN_INTEGRATION_TEST") return runIntegration == "true" } func GetConnection(address string) *grpc.ClientConn { conn, err := grpc.Dial(address, grpc.WithInsecure()) gomega.Expect(err).To(gomega.Succeed()) return conn } type TestResult struct { Token string Success bool Msg string }
[ "\"RUN_INTEGRATION_TEST\"" ]
[]
[ "RUN_INTEGRATION_TEST" ]
[]
["RUN_INTEGRATION_TEST"]
go
1
0