filename
stringlengths
4
198
content
stringlengths
25
939k
environment
sequence
variablearg
sequence
constarg
sequence
variableargjson
stringclasses
1 value
constargjson
stringlengths
2
3.9k
lang
stringclasses
3 values
constargcount
float64
0
129
variableargcount
float64
0
0
sentence
stringclasses
1 value
versions/delete_versions_test.go
package versions import ( "os" "sort" "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" "github.com/croman/delete-s3-versions/config" ) func setupBucketsAndObjects() map[string]*fakeBucket { fakeBuckets := map[string]*fakeBucket{} fakeBuckets["b1"] = &fakeBucket{ VersioningStatus: aws.String(s3.BucketVersioningStatusEnabled), Objects: map[string][]*fakeVersion{ "key1": []*fakeVersion{ &fakeVersion{ VersionID: "b1-key1-v1", LastModified: time.Now().Add(-10 * time.Hour), }, &fakeVersion{ VersionID: "b1-key1-v2", LastModified: time.Now().Add(-9 * time.Hour), }, &fakeVersion{ VersionID: "b1-key1-v2-deleted", LastModified: time.Now().Add(-8 * time.Hour), IsDeleteMarker: true, }, &fakeVersion{ VersionID: "b1-key1-v3", LastModified: time.Now().Add(-7 * time.Hour), }, }, "key2": []*fakeVersion{ &fakeVersion{ VersionID: "b1-key2-deleted", LastModified: time.Now().Add(-11 * time.Hour), IsDeleteMarker: true, }, &fakeVersion{ VersionID: "b1-key2-v1", LastModified: time.Now().Add(-10 * time.Hour), }, &fakeVersion{ VersionID: "b1-key2-v2", LastModified: time.Now().Add(-9 * time.Hour), }, }, }, } fakeBuckets["b2"] = &fakeBucket{ Objects: map[string][]*fakeVersion{ "key1": []*fakeVersion{ &fakeVersion{ VersionID: "b2-key1-v1", LastModified: time.Now().Add(-10 * time.Hour), }, &fakeVersion{ VersionID: "b2-key1-v2", LastModified: time.Now().Add(-9 * time.Hour), }, }, "key2": []*fakeVersion{ &fakeVersion{ VersionID: "b2-key2-v1", LastModified: time.Now().Add(-10 * time.Hour), }, }, }, } fakeBuckets["bucket-in-wrong-region"] = &fakeBucket{ Objects: map[string][]*fakeVersion{ "key1": []*fakeVersion{ &fakeVersion{ VersionID: "b3-key1-v1", LastModified: time.Now().Add(-10 * time.Hour), }, }, }, } return fakeBuckets } func getBasicTestService(fakeBuckets map[string]*fakeBucket) *s3Versions { return &s3Versions{ config: &config.Config{ S3Region: "eu-west-1", S3DisableSSL: os.Getenv("S3_DISABLE_SSL"), S3Endpoint: os.Getenv("S3_ENDPOINT"), BucketName: "*", BucketPrefix: "", VersionsCount: 1, Confirm: true, }, s3: newS3ApiMock(fakeBuckets, 3), } } func TestGetBuckets(t *testing.T) { fakeBuckets := setupBucketsAndObjects() s := getBasicTestService(fakeBuckets) buckets, err := s.getBuckets() require.Nil(t, err) sort.Strings(buckets) assert.Equal(t, []string{"b1", "b2", "bucket-in-wrong-region"}, buckets) } func TestGetBuckets_OneBucketConfig(t *testing.T) { fakeBuckets := setupBucketsAndObjects() s := getBasicTestService(fakeBuckets) s.config.BucketName = "b1" buckets, err := s.getBuckets() require.Nil(t, err) assert.Equal(t, []string{"b1"}, buckets) } func TestGetBuckets_MissingBucketConfig(t *testing.T) { fakeBuckets := setupBucketsAndObjects() s := getBasicTestService(fakeBuckets) s.config.BucketName = "missing-bucket" _, err := s.getBuckets() assert.True(t, strings.Index(err.Error(), "Bucket doesn't exist") > -1) } func TestVersioningEnabled(t *testing.T) { fakeBuckets := setupBucketsAndObjects() s := getBasicTestService(fakeBuckets) buckets, err := s.getBuckets() require.Nil(t, err) buckets, err = s.filterBucketsByVersioningEnabled(buckets) assert.Equal(t, []string{"b1"}, buckets) } func TestFindAndDelete(t *testing.T) { fakeBuckets := setupBucketsAndObjects() s := getBasicTestService(fakeBuckets) err := s.Delete() require.Nil(t, err) assert.Equal(t, 1, len(fakeBuckets["b1"].Objects["key1"])) assert.Equal(t, 1, len(fakeBuckets["b1"].Objects["key2"])) assert.Equal(t, 2, len(fakeBuckets["b2"].Objects["key1"])) } func TestSkipDelete(t *testing.T) { fakeBuckets := setupBucketsAndObjects() s := getBasicTestService(fakeBuckets) s.config.Confirm = false err := s.Delete() require.Nil(t, err) assert.Equal(t, 4, len(fakeBuckets["b1"].Objects["key1"])) assert.Equal(t, 3, len(fakeBuckets["b1"].Objects["key2"])) assert.Equal(t, 2, len(fakeBuckets["b2"].Objects["key1"])) } func TestS3Config(t *testing.T) { c := &config.Config{ S3Region: "eu-west-2", S3DisableSSL: "true", S3Endpoint: "http://localhost:1234", } assert.Equal(t, getS3Config(c), &aws.Config{ DisableSSL: aws.Bool(true), Endpoint: aws.String("http://localhost:1234"), Region: aws.String("eu-west-2"), S3ForcePathStyle: aws.Bool(true), }) c = &config.Config{ S3DisableSSL: "true", S3Endpoint: "http://localhost:1234", } assert.Equal(t, getS3Config(c), &aws.Config{ DisableSSL: aws.Bool(true), Endpoint: aws.String("http://localhost:1234"), Region: aws.String("eu-west-1"), S3ForcePathStyle: aws.Bool(true), }) }
[ "\"S3_DISABLE_SSL\"", "\"S3_ENDPOINT\"" ]
[]
[ "S3_ENDPOINT", "S3_DISABLE_SSL" ]
[]
["S3_ENDPOINT", "S3_DISABLE_SSL"]
go
2
0
oracle/pkg/database/dbdaemon/utils.go
// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package dbdaemon import ( "compress/gzip" "context" "fmt" "io" "os" "os/exec" "path/filepath" "strings" "cloud.google.com/go/storage" "k8s.io/klog/v2" ) const gsPrefix = "gs://" // Override library functions for the benefit of unit tests. var ( lsnrctl = func(databaseHome string) string { return filepath.Join(databaseHome, "bin", "lsnrctl") } rman = func(databaseHome string) string { return filepath.Join(databaseHome, "bin", "rman") } orapwd = func(databaseHome string) string { return filepath.Join(databaseHome, "bin", "orapwd") } impdp = func(databaseHome string) string { return filepath.Join(databaseHome, "bin", "impdp") } expdp = func(databaseHome string) string { return filepath.Join(databaseHome, "bin", "expdp") } ) const ( contentTypePlainText = "plain/text" contentTypeGZ = "application/gzip" ) // osUtil was defined for tests. type osUtil interface { runCommand(bin string, params []string) error isReturnCodeEqual(err error, code int) bool createFile(file string, content io.Reader) error removeFile(file string) error } type osUtilImpl struct { } func (o *osUtilImpl) runCommand(bin string, params []string) error { ohome := os.Getenv("ORACLE_HOME") klog.InfoS("executing command with args", "cmd", bin, "params", params, "ORACLE_SID", os.Getenv("ORACLE_SID"), "ORACLE_HOME", ohome, "TNS_ADMIN", os.Getenv("TNS_ADMIN")) switch bin { case lsnrctl(ohome), rman(ohome), orapwd(ohome), impdp(ohome), expdp(ohome): default: klog.InfoS("command not supported", "bin", bin) return fmt.Errorf("command %q is not supported", bin) } cmd := exec.Command(bin) cmd.Args = append(cmd.Args, params...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr return cmd.Run() } func (o *osUtilImpl) isReturnCodeEqual(err error, code int) bool { if exitError, ok := err.(*exec.ExitError); ok { return exitError.ExitCode() == code } return false } func (o *osUtilImpl) createFile(file string, content io.Reader) error { dir := filepath.Dir(file) if err := os.MkdirAll(dir, 0750); err != nil { return fmt.Errorf("couldn't create dir err: %v", err) } f, err := os.Create(file) // truncates if file exists. if err != nil { return fmt.Errorf("couldn't create file err: %v", err) } defer func() { if err := f.Close(); err != nil { klog.Warningf("failed to close %v: %v", f, err) } }() if _, err := io.Copy(f, content); err != nil { return fmt.Errorf("copying contents failed: %v", err) } return nil } func (o *osUtilImpl) removeFile(file string) error { return os.Remove(file) } // gcsUtil contains helper methods for reading/writing GCS objects. type gcsUtil interface { // download returns an io.ReadCloser for GCS object at given gcsPath. download(ctx context.Context, gcsPath string) (io.ReadCloser, error) // uploadFile uploads contents of a file at filepath to gcsPath location in // GCS and sets object's contentType. // If gcsPath ends with .gz it also compresses the uploaded contents // and sets object's content type to application/gzip. uploadFile(ctx context.Context, gcsPath, filepath, contentType string) error // splitURI takes a GCS URI and splits it into bucket and object names. If the URI does not have // the gs:// scheme, or the URI doesn't specify both a bucket and an object name, returns an error. splitURI(url string) (bucket, name string, err error) } type gcsUtilImpl struct{} func (g *gcsUtilImpl) download(ctx context.Context, gcsPath string) (io.ReadCloser, error) { bucket, name, err := g.splitURI(gcsPath) if err != nil { return nil, err } client, err := storage.NewClient(ctx) if err != nil { return nil, fmt.Errorf("failed to init GCS client: %v", err) } defer client.Close() reader, err := client.Bucket(bucket).Object(name).NewReader(ctx) if err != nil { return nil, fmt.Errorf("failed to read URL %s: %v", gcsPath, err) } return reader, nil } func (g *gcsUtilImpl) uploadFile(ctx context.Context, gcsPath, filePath, contentType string) error { bucket, name, err := g.splitURI(gcsPath) if err != nil { return err } f, err := os.Open(filePath) if err != nil { return err } defer func() { if err := f.Close(); err != nil { klog.Warningf("failed to close %v: %v", f, err) } }() client, err := storage.NewClient(ctx) if err != nil { return fmt.Errorf("failed to init GCS client: %v", err) } defer client.Close() b := client.Bucket(bucket) // check if bucket exists and it is accessible if _, err := b.Attrs(ctx); err != nil { return err } gcsWriter := b.Object(name).NewWriter(ctx) gcsWriter.ContentType = contentType defer gcsWriter.Close() var writer io.WriteCloser = gcsWriter if strings.HasSuffix(gcsPath, ".gz") { gcsWriter.ContentType = contentTypeGZ writer = gzip.NewWriter(gcsWriter) defer writer.Close() } _, err = io.Copy(writer, f) if err != nil { return fmt.Errorf("failed to write file %s to %s: %v", filePath, gcsPath, err) } return nil } func (g *gcsUtilImpl) splitURI(url string) (bucket, name string, err error) { u := strings.TrimPrefix(url, gsPrefix) if u == url { return "", "", fmt.Errorf("URL %q is missing the %q prefix", url, gsPrefix) } if i := strings.Index(u, "/"); i >= 2 { return u[:i], u[i+1:], nil } return "", "", fmt.Errorf("URL %q does not specify a bucket and a name", url) }
[ "\"ORACLE_HOME\"", "\"ORACLE_SID\"", "\"TNS_ADMIN\"" ]
[]
[ "ORACLE_SID", "ORACLE_HOME", "TNS_ADMIN" ]
[]
["ORACLE_SID", "ORACLE_HOME", "TNS_ADMIN"]
go
3
0
edalize/quartus.py
import logging import os.path import os import platform import subprocess import re import xml.etree.ElementTree as ET from functools import partial from edalize.edatool import Edatool logger = logging.getLogger(__name__) class Quartus(Edatool): argtypes = ['vlogdefine', 'vlogparam', 'generic'] # Define Standard edition to be our default version isPro = False makefile_template = {False : "quartus-std-makefile.j2", True : "quartus-pro-makefile.j2"} @classmethod def get_doc(cls, api_ver): if api_ver == 0: return {'description' : "The Quartus backend supports Intel Quartus Std and Pro editions to build systems and program the FPGA", 'members' : [ {'name' : 'family', 'type' : 'String', 'desc' : 'FPGA family (e.g. Cyclone V)'}, {'name' : 'device', 'type' : 'String', 'desc' : 'FPGA device (e.g. 5CSXFC6D6F31C8ES)'}, {'name' : 'cable', 'type' : 'String', 'desc' : "Specifies the FPGA's JTAG programming cable. Use the tool `jtagconfig` to determine the available cables."}, {'name' : 'board_device_index', 'type' : 'String', 'desc' : "Specifies the FPGA's device number in the JTAG chain. The device index specifies the device where the flash programmer looks for the Nios® II JTAG debug module. JTAG devices are numbered relative to the JTAG chain, starting at 1. Use the tool `jtagconfig` to determine the index."}, {'name' : 'pnr', 'type' : 'String', 'desc' : 'P&R tool. Allowed values are quartus (default), dse (to run Design Space Explorer) and none (to just run synthesis)'}], 'lists' : [ {'name' : 'dse_options', 'type' : 'String', 'desc' : 'Options for DSE (Design Space Explorer)'}, {'name' : 'quartus_options', 'type' : 'String', 'desc' : 'Additional options for Quartus'}, ]} """ Initial setup of the class This calls the parent constructor, but also identifies whether the current system is using a Standard or Pro edition of Quartus. """ def __init__(self, edam=None, work_root=None, eda_api=None): if not edam: edam = eda_api super(Quartus, self).__init__(edam, work_root) # Acquire quartus_sh identification information from available tool if # possible. We always default to version 18.1 Standard if a problem is encountered version = { 'major': '18', 'minor': '1', 'patch': '0', 'date': '01/01/2019', 'edition': 'Standard' } try: qsh_text = subprocess.Popen(["quartus_sh", "--version"], stdout=subprocess.PIPE, env=os.environ).communicate()[0] # Attempt to pattern match the output. Examples include # Version 16.1.2 Build 203 01/18/2017 SJ Standard Edition # Version 17.1.2 Build 304 01/31/2018 SJ Pro Edition version_exp = r'Version (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+) ' + \ r'Build (?P<build>\d+) (?P<date>\d{2}/\d{2}/\d{4}) (?:\w+) ' + \ r'(?P<edition>(Lite|Standard|Pro)) Edition' match = re.search(version_exp, str(qsh_text)) if match != None: version = match.groupdict() except: # It is possible for this to have been run on a box without # Quartus being installed. Allow these errors to be ignored logger.warning("Unable to recognise Quartus version via quartus_sh") self.isPro = (version['edition'] == "Pro") # Quartus Pro 17 and later use 1/0 for boolean generics. Other editions # and versions use "true"/"false" strings if (version['edition'] != "Pro") or (int(version['major']) < 17): self.jinja_env.filters['generic_value_str'] = \ partial(self.jinja_env.filters['generic_value_str'], bool_is_str=True) """ Configuration is the first phase of the build This writes the project TCL files and Makefile. It first collects all sources, IPs and constraints and then writes them to the TCL file along with the build steps. """ def configure_main(self): (src_files, incdirs) = self._get_fileset_files(force_slash=True) self.jinja_env.filters['src_file_filter'] = self.src_file_filter self.jinja_env.filters['qsys_file_filter'] = self.qsys_file_filter has_vhdl2008 = 'vhdlSource-2008' in [x.file_type for x in src_files] has_qsys = 'QSYS' in [x.file_type for x in src_files] escaped_name = self.name.replace(".", "_") template_vars = { 'name' : escaped_name, 'src_files' : src_files, 'incdirs' : incdirs, 'tool_options' : self.tool_options, 'toplevel' : self.toplevel, 'vlogparam' : self.vlogparam, 'vlogdefine' : self.vlogdefine, 'generic' : self.generic, 'has_vhdl2008' : has_vhdl2008 } # Render Makefile based on detected version self.render_template(self.makefile_template[self.isPro], 'Makefile', { 'name' : escaped_name, 'src_files' : src_files, 'tool_options' : self.tool_options}) # Render the TCL project file self.render_template('quartus-project.tcl.j2', escaped_name + '.tcl', template_vars) # Helper to extract file type def file_type(self, f): return f.file_type.split('-')[0] # Filter for just QSYS files. This verifies that they are compatible # with the identified Quartus version def qsys_file_filter(self, f): name = '' if self.file_type(f) == 'QSYS': # Compatibility checks try: qsysTree = ET.parse(os.path.join(self.work_root, f.name)) try: tool = qsysTree.find('component').attrib['tool'] if tool == 'QsysPro' and self.isPro: name = f.name except (AttributeError, KeyError): # Either a component wasn't found in the QSYS file, or it # had no associated tool information. Make the assumption # it was a Standard edition file if not self.isPro: name = f.name except (ET.ParseError, IOError): logger.warning("Unable to parse QSYS file " + f.name) # Give QSYS files special attributes to make the logic in # the Jinja2 templates much simplier setattr(f, "simplename", os.path.basename(f.name).split('.qsys')[0]) setattr(f, "srcdir", os.path.dirname(f.name)) setattr(f, "dstdir", os.path.join('qsys', f.simplename)) return name # Allow the templates to get source file information def src_file_filter(self, f): def _append_library(f): s = "" if f.logical_name: s += ' -library ' + f.logical_name return s def _handle_qsys(t, f): # Quartus Pro just passes QSYS files onto the compiler, but Standard # expects to see them sepecified as QIP. The Makefile is responsible # for creating that QIP file for Standard edition in a known place # which can be used below if self.isPro: return _handle_src(t, f) else: f.name = os.path.join(f.dstdir, f.simplename + '.qip') f.file_type = 'QIP' return _handle_src('QIP_FILE', f) def _handle_src(t, f): s = 'set_global_assignment -name ' + t s += _append_library(f) s += ' ' + f.name return s def _handle_tcl(f): return "source " + f.name file_mapping = { 'verilogSource' : partial(_handle_src, 'VERILOG_FILE'), 'systemVerilogSource' : partial(_handle_src, 'SYSTEMVERILOG_FILE'), 'vhdlSource' : partial(_handle_src, 'VHDL_FILE'), 'SDC' : partial(_handle_src, 'SDC_FILE'), 'QSYS' : partial(_handle_qsys, 'QSYS_FILE'), 'QIP' : partial(_handle_src, 'QIP_FILE'), 'IP' : partial(_handle_src, 'IP_FILE'), 'tclSource' : partial(_handle_tcl), } _file_type = self.file_type(f) if _file_type in file_mapping: return file_mapping[_file_type](f) elif _file_type == 'user': return '' else: _s = "{} has unknown file type '{}'" logger.warning(_s.format(f.name, f.file_type)) return '' def build_main(self): logger.info("Building") args = [] if 'pnr' in self.tool_options: if self.tool_options['pnr'] == 'quartus': pass elif self.tool_options['pnr'] == 'dse': args.append('dse') elif self.tool_options['pnr'] == 'none': args.append('syn') self._run_tool('make', args) """ Program the FPGA """ def run_main(self): args = ['--mode=jtag'] if 'cable' in self.tool_options: args += ['-c', self.tool_options['cable']] args += ['-o'] args += ['p;' + self.name.replace('.', '_') + '.sof'] if 'pnr' in self.tool_options: if self.tool_options['pnr'] == 'quartus': pass elif self.tool_options['pnr'] == 'dse': return elif self.tool_options['pnr'] == 'none': return if 'board_device_index' in self.tool_options: args[-1] += "@" + self.tool_options['board_device_index'] self._run_tool('quartus_pgm', args)
[]
[]
[]
[]
[]
python
0
0
vendor/github.com/hashicorp/go-plugin/plugin_test.go
package plugin import ( "fmt" "log" "net/rpc" "os" "os/exec" "testing" "time" ) // testAPIVersion is the ProtocolVersion we use for testing. var testHandshake = HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "TEST_MAGIC_COOKIE", MagicCookieValue: "test", } // testInterface is the test interface we use for plugins. type testInterface interface { Double(int) int } // testInterfacePlugin is the implementation of Plugin to create // RPC client/server implementations for testInterface. type testInterfacePlugin struct{} func (p *testInterfacePlugin) Server(b *MuxBroker) (interface{}, error) { return &testInterfaceServer{Impl: new(testInterfaceImpl)}, nil } func (p *testInterfacePlugin) Client(b *MuxBroker, c *rpc.Client) (interface{}, error) { return &testInterfaceClient{Client: c}, nil } // testInterfaceImpl implements testInterface concretely type testInterfaceImpl struct{} func (i *testInterfaceImpl) Double(v int) int { return v * 2 } // testInterfaceClient implements testInterface to communicate over RPC type testInterfaceClient struct { Client *rpc.Client } func (impl *testInterfaceClient) Double(v int) int { var resp int err := impl.Client.Call("Plugin.Double", v, &resp) if err != nil { panic(err) } return resp } // testInterfaceServer is the RPC server for testInterfaceClient type testInterfaceServer struct { Broker *MuxBroker Impl testInterface } func (s *testInterfaceServer) Double(arg int, resp *int) error { *resp = s.Impl.Double(arg) return nil } // testPluginMap can be used for tests as a plugin map var testPluginMap = map[string]Plugin{ "test": new(testInterfacePlugin), } func helperProcess(s ...string) *exec.Cmd { cs := []string{"-test.run=TestHelperProcess", "--"} cs = append(cs, s...) env := []string{ "GO_WANT_HELPER_PROCESS=1", "PLUGIN_MIN_PORT=10000", "PLUGIN_MAX_PORT=25000", } cmd := exec.Command(os.Args[0], cs...) cmd.Env = append(env, os.Environ()...) return cmd } // This is not a real test. This is just a helper process kicked off by // tests. func TestHelperProcess(*testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } defer os.Exit(0) args := os.Args for len(args) > 0 { if args[0] == "--" { args = args[1:] break } args = args[1:] } if len(args) == 0 { fmt.Fprintf(os.Stderr, "No command\n") os.Exit(2) } cmd, args := args[0], args[1:] switch cmd { case "bad-version": fmt.Printf("%d|%d1|tcp|:1234\n", CoreProtocolVersion, testHandshake.ProtocolVersion) <-make(chan int) case "invalid-rpc-address": fmt.Println("lolinvalid") case "mock": fmt.Printf("%d|%d|tcp|:1234\n", CoreProtocolVersion, testHandshake.ProtocolVersion) <-make(chan int) case "start-timeout": time.Sleep(1 * time.Minute) os.Exit(1) case "stderr": fmt.Printf("%d|%d|tcp|:1234\n", CoreProtocolVersion, testHandshake.ProtocolVersion) log.Println("HELLO") log.Println("WORLD") case "stdin": fmt.Printf("%d|%d|tcp|:1234\n", CoreProtocolVersion, testHandshake.ProtocolVersion) data := make([]byte, 5) if _, err := os.Stdin.Read(data); err != nil { log.Printf("stdin read error: %s", err) os.Exit(100) } if string(data) == "hello" { os.Exit(0) } os.Exit(1) case "test-interface": Serve(&ServeConfig{ HandshakeConfig: testHandshake, Plugins: testPluginMap, }) // Shouldn't reach here but make sure we exit anyways os.Exit(0) case "test-interface-daemon": // Serve! Serve(&ServeConfig{ HandshakeConfig: testHandshake, Plugins: testPluginMap, }) // Shouldn't reach here but make sure we exit anyways os.Exit(0) default: fmt.Fprintf(os.Stderr, "Unknown command: %q\n", cmd) os.Exit(2) } }
[ "\"GO_WANT_HELPER_PROCESS\"" ]
[]
[ "GO_WANT_HELPER_PROCESS" ]
[]
["GO_WANT_HELPER_PROCESS"]
go
1
0
vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend_test.go
package postgresql import ( "database/sql" "fmt" "log" "os" "testing" "github.com/hashicorp/vault/logical" logicaltest "github.com/hashicorp/vault/logical/testing" "github.com/lib/pq" "github.com/mitchellh/mapstructure" ) func TestBackend_basic(t *testing.T) { b, _ := Factory(logical.TestBackendConfig()) d1 := map[string]interface{}{ "connection_url": os.Getenv("PG_URL"), } d2 := map[string]interface{}{ "value": os.Getenv("PG_URL"), } logicaltest.Test(t, logicaltest.TestCase{ AcceptanceTest: true, PreCheck: func() { testAccPreCheck(t) }, Backend: b, Steps: []logicaltest.TestStep{ testAccStepConfig(t, d1, false), testAccStepRole(t), testAccStepReadCreds(t, b, "web"), testAccStepConfig(t, d2, false), testAccStepRole(t), testAccStepReadCreds(t, b, "web"), }, }) } func TestBackend_roleCrud(t *testing.T) { b, _ := Factory(logical.TestBackendConfig()) d := map[string]interface{}{ "connection_url": os.Getenv("PG_URL"), } logicaltest.Test(t, logicaltest.TestCase{ AcceptanceTest: true, PreCheck: func() { testAccPreCheck(t) }, Backend: b, Steps: []logicaltest.TestStep{ testAccStepConfig(t, d, false), testAccStepRole(t), testAccStepReadRole(t, "web", testRole), testAccStepDeleteRole(t, "web"), testAccStepReadRole(t, "web", ""), }, }) } func TestBackend_configConnection(t *testing.T) { b := Backend() d1 := map[string]interface{}{ "value": os.Getenv("PG_URL"), } d2 := map[string]interface{}{ "connection_url": os.Getenv("PG_URL"), } d3 := map[string]interface{}{ "value": os.Getenv("PG_URL"), "connection_url": os.Getenv("PG_URL"), } d4 := map[string]interface{}{} logicaltest.Test(t, logicaltest.TestCase{ AcceptanceTest: true, PreCheck: func() { testAccPreCheck(t) }, Backend: b, Steps: []logicaltest.TestStep{ testAccStepConfig(t, d1, false), testAccStepConfig(t, d2, false), testAccStepConfig(t, d3, false), testAccStepConfig(t, d4, true), }, }) } func testAccPreCheck(t *testing.T) { if v := os.Getenv("PG_URL"); v == "" { t.Fatal("PG_URL must be set for acceptance tests") } } func testAccStepConfig(t *testing.T, d map[string]interface{}, expectError bool) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "config/connection", Data: d, ErrorOk: true, Check: func(resp *logical.Response) error { if expectError { if resp.Data == nil { return fmt.Errorf("data is nil") } var e struct { Error string `mapstructure:"error"` } if err := mapstructure.Decode(resp.Data, &e); err != nil { return err } if len(e.Error) == 0 { return fmt.Errorf("expected error, but write succeeded.") } return nil } else if resp != nil { return fmt.Errorf("response should be nil") } return nil }, } } func testAccStepRole(t *testing.T) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "roles/web", Data: map[string]interface{}{ "sql": testRole, }, } } func testAccStepDeleteRole(t *testing.T, n string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.DeleteOperation, Path: "roles/" + n, } } func testAccStepReadCreds(t *testing.T, b logical.Backend, name string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.ReadOperation, Path: "creds/" + name, Check: func(resp *logical.Response) error { var d struct { Username string `mapstructure:"username"` Password string `mapstructure:"password"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err } log.Printf("[WARN] Generated credentials: %v", d) conn, err := pq.ParseURL(os.Getenv("PG_URL")) if err != nil { t.Fatal(err) } conn += " timezone=utc" db, err := sql.Open("postgres", conn) if err != nil { t.Fatal(err) } returnedRows := func() int { stmt, err := db.Prepare(fmt.Sprintf( "SELECT DISTINCT schemaname FROM pg_tables WHERE has_table_privilege('%s', 'information_schema.role_column_grants', 'select');", d.Username)) if err != nil { return -1 } defer stmt.Close() rows, err := stmt.Query() if err != nil { return -1 } defer rows.Close() i := 0 for rows.Next() { i++ } return i } userRows := returnedRows() if userRows != 2 { t.Fatalf("did not get expected number of rows, got %d", userRows) } resp, err = b.HandleRequest(&logical.Request{ Operation: logical.RevokeOperation, Secret: &logical.Secret{ InternalData: map[string]interface{}{ "secret_type": "creds", "username": d.Username, }, }, }) if err != nil { return err } if resp != nil { if resp.IsError() { return fmt.Errorf("Error on resp: %#v", *resp) } } userRows = returnedRows() // User shouldn't exist so returnedRows() should encounter an error and exit with -1 if userRows != -1 { t.Fatalf("did not get expected number of rows, got %d", userRows) } return nil }, } } func testAccStepReadRole(t *testing.T, name string, sql string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.ReadOperation, Path: "roles/" + name, Check: func(resp *logical.Response) error { if resp == nil { if sql == "" { return nil } return fmt.Errorf("bad: %#v", resp) } var d struct { SQL string `mapstructure:"sql"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err } if d.SQL != sql { return fmt.Errorf("bad: %#v", resp) } return nil }, } } const testRole = ` CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}'; GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; `
[ "\"PG_URL\"", "\"PG_URL\"", "\"PG_URL\"", "\"PG_URL\"", "\"PG_URL\"", "\"PG_URL\"", "\"PG_URL\"", "\"PG_URL\"", "\"PG_URL\"" ]
[]
[ "PG_URL" ]
[]
["PG_URL"]
go
1
0
backend/go/handlers/app/main.go
package main import ( "fmt" "net/http" "os" "time" "encoding/json" "io/ioutil" "net/url" "github.com/aws/aws-lambda-go/events" "github.com/aws/aws-lambda-go/lambda" "github.com/awslabs/aws-lambda-go-api-proxy/gin" "github.com/gin-gonic/gin" ) var ginLambda *ginadapter.GinLambda func init() { } type SiteSettings struct { ID int `db:"id, json:"Id"` Title string `db:"title, json:"Title"` SlackUrl string `db:"slack_url, json:"SlackUrl"` SlackToken string `db:"slack_token, json:"SlackToken"` SlackInviteToken string `db:"slack_invite_token, json:"SlackInviteToken"` SlackInviteTitle string `db:"slack_invite_title, json:"SlackInviteTitle"` SlackInviteMessage string `db:"slack_invite_message, json:"SlackInviteMessage"` SlackId string `db:"slack_id, json:"SlackId"` BackgroundImage string `db:"background_image, json:"BackgroundImage"` HtmlTextColor string `db:"html_text_color, json:"HmlTextColor"` RecaptchaSiteKey string `db:"recaptcha_site, json:"RecaptchaSiteKey"` CommunityName string `db:"recaptcha_site, json:"CommunityName"` } type InviteData struct { Email string `json:"email"` RecaptchaToken string `json:"recaptchaToken"` } type RecaptchaResponse struct { Success bool `json:"success"` Score float64 `json:"score"` Action string `json:"action"` ChallengeTS time.Time `json:"challenge_ts"` Hostname string `json:"hostname"` ErrorCodes []string `json:"error-codes"` } type SlackResponse struct { Ok bool `json:"ok"` Error string `json:"error"` Warning string `json:"warning"` } const recaptchaServerName = "https://www.google.com/recaptcha/api/siteverify" // Check uses the client ip address and the challenge code func ReCaptchaCheck(remoteip, response string) (r RecaptchaResponse, err error) { recaptchaPrivateKey := os.Getenv("RECAPTCHA_SECRET") fmt.Println("KEY IS : ", recaptchaPrivateKey) resp, err := http.PostForm(recaptchaServerName, url.Values{"secret": {recaptchaPrivateKey}, "remoteip": {remoteip}, "response": {response}}) if err != nil { return } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return } err = json.Unmarshal(body, &r) if err != nil { fmt.Println("Read error: got invalid JSON:") fmt.Println(err) return } return } // Slack invite email func InviteUser(email string) (r SlackResponse, err error) { // SlackResponse slackInviteUrl := fmt.Sprintf("https://%s.slack.com/api/users.admin.invite", os.Getenv("SLACK_TEAM")) resp, err := http.PostForm(slackInviteUrl, url.Values{"email": {email}, "token": {os.Getenv("SLACK_OAUTH_TOKEN")}, "channels": {os.Getenv("SLACK_CHANNEL")}, "set_active": {"true"}}) if err != nil { return } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return } err = json.Unmarshal(body, &r) if err != nil { fmt.Println("Read error: got invalid JSON:") fmt.Println(err) return } return } func getSiteSettings(c *gin.Context) { _siteSettings := &SiteSettings{ ID: 1, RecaptchaSiteKey: os.Getenv("RECAPTCHA_SITE_KEY"), SlackInviteTitle: os.Getenv("SLACK_INVITE_TITLE"), SlackInviteMessage: os.Getenv("SLACK_INVITE_MESSAGE"), BackgroundImage: os.Getenv("BACKGROUND_IMAGE"), HtmlTextColor: os.Getenv("HTML_TEXT_COLOR"), SlackId: os.Getenv("SLACK_TEAM"), } c.JSON(http.StatusOK, _siteSettings) } func postInvite(c *gin.Context) { var err error c.Header("Content-Type", "application/json; charset=utf-8") p := InviteData{} if err = c.BindJSON(&p); err != nil { c.JSON(http.StatusBadRequest, gin.H{ "error": "json decoding : " + err.Error(), "status": http.StatusBadRequest, }) return } resp, err := ReCaptchaCheck(c.ClientIP(), p.RecaptchaToken) if err != nil || !resp.Success { c.JSON(http.StatusForbidden, resp) return } respSlack, errSlack := InviteUser(p.Email) if errSlack != nil || !respSlack.Ok { c.JSON(http.StatusForbidden, respSlack) return } // Ok at this point we can submit to the slack api. c.JSON(http.StatusOK, respSlack) return } func Handler(req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) { if ginLambda == nil { // stdout and stderr are sent to AWS CloudWatch Logs r := gin.Default() // REST API Queries here r.GET("/api/settings", getSiteSettings) r.POST("/api/invite", postInvite) ginLambda = ginadapter.New(r) } // If no name is provided in the HTTP request body, throw an error return ginLambda.Proxy(req) } func main() { if os.Getenv("TYPE") == "WEBSITE" { lambda.Start(WebsitePublic) } else { lambda.Start(Handler) } }
[ "\"RECAPTCHA_SECRET\"", "\"SLACK_TEAM\"", "\"SLACK_OAUTH_TOKEN\"", "\"SLACK_CHANNEL\"", "\"RECAPTCHA_SITE_KEY\"", "\"SLACK_INVITE_TITLE\"", "\"SLACK_INVITE_MESSAGE\"", "\"BACKGROUND_IMAGE\"", "\"HTML_TEXT_COLOR\"", "\"SLACK_TEAM\"", "\"TYPE\"" ]
[]
[ "RECAPTCHA_SECRET", "BACKGROUND_IMAGE", "SLACK_CHANNEL", "RECAPTCHA_SITE_KEY", "SLACK_INVITE_TITLE", "TYPE", "HTML_TEXT_COLOR", "SLACK_INVITE_MESSAGE", "SLACK_OAUTH_TOKEN", "SLACK_TEAM" ]
[]
["RECAPTCHA_SECRET", "BACKGROUND_IMAGE", "SLACK_CHANNEL", "RECAPTCHA_SITE_KEY", "SLACK_INVITE_TITLE", "TYPE", "HTML_TEXT_COLOR", "SLACK_INVITE_MESSAGE", "SLACK_OAUTH_TOKEN", "SLACK_TEAM"]
go
10
0
pkg/hotweb/hotweb.go
package hotweb import ( "fmt" "log" "net/http" "os" "path" "strings" "sync" "text/template" "time" "github.com/gorilla/websocket" "github.com/progrium/hotweb/pkg/esbuild" "github.com/progrium/hotweb/pkg/jsexports" "github.com/progrium/hotweb/pkg/makefs" "github.com/progrium/watcher" "github.com/spf13/afero" ) const ( DefaultWatchInterval = time.Millisecond * 100 ) var ( InternalPath = "/.hotweb" ReloadExport = "noHMR" ) func debug(args ...interface{}) { if os.Getenv("HOTWEB_DEBUG") != "" { log.Println(append([]interface{}{"hotweb:"}, args...)...) } } type Handler struct { Fs *makefs.Fs ServeRoot string Prefix string IgnoreDirs []string WatchInterval time.Duration Upgrader websocket.Upgrader Watcher *watcher.Watcher fileserver http.Handler clients sync.Map } func newWriteWatcher(fs afero.Fs, root string) (*watcher.Watcher, error) { w := watcher.New() w.SetFileSystem(fs) w.SetMaxEvents(1) w.FilterOps(watcher.Write) return w, w.AddRecursive(root) } type Config struct { Filesystem afero.Fs ServeRoot string // abs path in filesystem to serve Prefix string // optional http path prefix JsxFactory string InternalPath string ReloadExport string WatchInterval time.Duration IgnoreDirs []string } func New(cfg Config) *Handler { if cfg.WatchInterval == 0 { cfg.WatchInterval = DefaultWatchInterval } if cfg.Filesystem == nil { cfg.Filesystem = afero.NewOsFs() } if cfg.ServeRoot == "" { cfg.ServeRoot = "/" } // TODO: short term config setup, refactor fs := cfg.Filesystem serveRoot := cfg.ServeRoot prefix := cfg.Prefix if cfg.JsxFactory != "" { esbuild.JsxFactory = cfg.JsxFactory } if cfg.InternalPath != "" { InternalPath = cfg.InternalPath } if cfg.ReloadExport != "" { ReloadExport = cfg.ReloadExport } cache := afero.NewMemMapFs() mfs := makefs.New(fs, cache) var watcher *watcher.Watcher var err error watcher, err = newWriteWatcher(fs, serveRoot) if err != nil { panic(err) } mfs.Register(".js", ".jsx", func(fs afero.Fs, dst, src string) ([]byte, error) { debug("building", dst) return esbuild.BuildFile(fs, src) }) httpFs := afero.NewHttpFs(mfs).Dir(serveRoot) prefix = path.Join("/", prefix) return &Handler{ Fs: mfs, ServeRoot: serveRoot, Prefix: prefix, IgnoreDirs: cfg.IgnoreDirs, Upgrader: websocket.Upgrader{ ReadBufferSize: 1024, WriteBufferSize: 1024, }, Watcher: watcher, WatchInterval: cfg.WatchInterval, fileserver: http.StripPrefix(prefix, http.FileServer(httpFs)), } } func (m *Handler) MatchHTTP(r *http.Request) bool { if strings.HasPrefix(r.URL.Path, path.Join(m.Prefix, InternalPath)) { return true } if strings.HasPrefix(r.URL.Path, m.Prefix) { fsPath := path.Join(m.ServeRoot, strings.TrimPrefix(r.URL.Path, m.Prefix)) if ok, _ := afero.Exists(m.Fs, fsPath); ok { return true } } return false } func (m *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { mux := http.NewServeMux() mux.HandleFunc(path.Join(m.Prefix, InternalPath, ClientFilename), m.handleClientModule) mux.HandleFunc(path.Join(m.Prefix, InternalPath), m.handleWebSocket) if len(m.Prefix) > 1 { mux.HandleFunc(m.Prefix+"/", m.handleFileProxy) } else { mux.HandleFunc(m.Prefix, m.handleFileProxy) } mux.ServeHTTP(w, r) } func (m *Handler) isValidJS(r *http.Request) bool { return !m.isIgnored(r) && isJavaScript(r) && !hiddenFilePrefix(r) } func (m *Handler) isIgnored(r *http.Request) bool { for _, path := range m.IgnoreDirs { if path != "" && strings.HasPrefix(r.URL.Path, path) { return true } } return false } func (m *Handler) handleFileProxy(w http.ResponseWriter, r *http.Request) { if m.isValidJS(r) && r.URL.RawQuery == "" { m.handleModuleProxy(w, r) return } m.fileserver.ServeHTTP(w, r) } func (m *Handler) handleClientModule(w http.ResponseWriter, r *http.Request) { tmpl := template.Must(template.New("client").Parse(ClientSourceTmpl)) w.Header().Set("content-type", "text/javascript") tmpl.Execute(w, map[string]interface{}{ "Debug": os.Getenv("HOTWEB_DEBUG") != "", "Endpoint": fmt.Sprintf("ws://%s%s", r.Host, path.Dir(r.URL.Path)), }) } func (m *Handler) handleModuleProxy(w http.ResponseWriter, r *http.Request) { tmpl := template.Must(template.New("proxy").Parse(ModuleProxyTmpl)) fsPath := path.Join(m.ServeRoot, strings.TrimPrefix(r.URL.Path, m.Prefix)) src, err := afero.ReadFile(m.Fs, fsPath) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) debug(err) return } exports, err := jsexports.Exports(src) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) debug(err) return } w.Header().Set("content-type", "text/javascript") tmpl.Execute(w, map[string]interface{}{ "Path": r.URL.Path, "Exports": exports, "Reload": contains(exports, ReloadExport), "ClientPath": path.Join(m.Prefix, InternalPath, ClientFilename), }) } func (m *Handler) handleWebSocket(w http.ResponseWriter, r *http.Request) { conn, err := m.Upgrader.Upgrade(w, r, nil) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } defer conn.Close() ch := make(chan string) m.clients.Store(ch, struct{}{}) debug("new websocket connection") for filepath := range ch { err := conn.WriteJSON(map[string]interface{}{ "path": path.Join(m.Prefix, strings.TrimPrefix(filepath, m.ServeRoot)), }) if err != nil { m.clients.Delete(ch) if !strings.Contains(err.Error(), "broken pipe") { debug(err) } return } } } func (m *Handler) Watch() error { if m.Watcher == nil { return fmt.Errorf("hotweb: no watcher to watch filesystem") } go func() { for { select { case event := <-m.Watcher.Event: debug("detected change", event.Path) m.clients.Range(func(k, v interface{}) bool { k.(chan string) <- event.Path return true }) case err := <-m.Watcher.Error: debug(err) case <-m.Watcher.Closed: return } } }() return m.Watcher.Start(m.WatchInterval) } func isJavaScript(r *http.Request) bool { return contains([]string{".mjs", ".js", ".jsx"}, path.Ext(r.URL.Path)) } func hiddenFilePrefix(r *http.Request) bool { return path.Base(r.URL.Path)[0] == '_' || path.Base(r.URL.Path)[0] == '.' } func contains(s []string, e string) bool { for _, a := range s { if a == e { return true } } return false }
[ "\"HOTWEB_DEBUG\"", "\"HOTWEB_DEBUG\"" ]
[]
[ "HOTWEB_DEBUG" ]
[]
["HOTWEB_DEBUG"]
go
1
0
pkg/bootstrap/option/convert.go
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package option import ( "encoding/json" "fmt" "net" "os" "strings" cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" auth "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "github.com/envoyproxy/go-control-plane/pkg/conversion" "github.com/envoyproxy/go-control-plane/pkg/wellknown" "github.com/gogo/protobuf/types" pstruct "github.com/golang/protobuf/ptypes/struct" "github.com/golang/protobuf/ptypes/wrappers" meshAPI "istio.io/api/mesh/v1alpha1" networkingAPI "istio.io/api/networking/v1alpha3" "istio.io/istio/pilot/pkg/model" "istio.io/istio/pilot/pkg/networking/util" authn_model "istio.io/istio/pilot/pkg/security/model" "istio.io/istio/pkg/security" "istio.io/pkg/log" ) // TransportSocket wraps UpstreamTLSContext type TransportSocket struct { Name string `json:"name,omitempty"` TypedConfig *pstruct.Struct `json:"typed_config,omitempty"` } func keepaliveConverter(value *networkingAPI.ConnectionPoolSettings_TCPSettings_TcpKeepalive) convertFunc { return func(*instance) (interface{}, error) { upstreamConnectionOptions := &cluster.UpstreamConnectionOptions{ TcpKeepalive: &core.TcpKeepalive{}, } if value.Probes > 0 { upstreamConnectionOptions.TcpKeepalive.KeepaliveProbes = &wrappers.UInt32Value{Value: value.Probes} } if value.Time != nil && value.Time.Seconds > 0 { upstreamConnectionOptions.TcpKeepalive.KeepaliveTime = &wrappers.UInt32Value{Value: uint32(value.Time.Seconds)} } if value.Interval != nil && value.Interval.Seconds > 0 { upstreamConnectionOptions.TcpKeepalive.KeepaliveInterval = &wrappers.UInt32Value{Value: uint32(value.Interval.Seconds)} } return convertToJSON(upstreamConnectionOptions), nil } } func transportSocketConverter(tls *networkingAPI.ClientTLSSettings, sniName string, metadata *model.BootstrapNodeMetadata, isH2 bool) convertFunc { return func(*instance) (interface{}, error) { tlsContext := tlsContextConvert(tls, sniName, metadata) if tlsContext == nil { return "", nil } if !isH2 { tlsContext.CommonTlsContext.AlpnProtocols = nil } // This double conversion is to encode the typed config and get it out as struct // so that convertToJSON properly encodes the structure. Since this is just for // bootstrap generation this is better than having our custom structs. tlsContextStruct, _ := conversion.MessageToStruct(util.MessageToAny(tlsContext)) transportSocket := &TransportSocket{ Name: wellknown.TransportSocketTls, TypedConfig: tlsContextStruct, } return convertToJSON(transportSocket), nil } } // TODO(ramaraochavali): Unify this code with cluster upstream TLS settings logic. func tlsContextConvert(tls *networkingAPI.ClientTLSSettings, sniName string, metadata *model.BootstrapNodeMetadata) *auth.UpstreamTlsContext { tlsContext := &auth.UpstreamTlsContext{ CommonTlsContext: &auth.CommonTlsContext{}, } switch tls.Mode { case networkingAPI.ClientTLSSettings_SIMPLE: res := security.SdsCertificateConfig{ CaCertificatePath: model.GetOrDefault(metadata.TLSClientRootCert, tls.CaCertificates), } tlsContext.CommonTlsContext.ValidationContextType = &auth.CommonTlsContext_CombinedValidationContext{ CombinedValidationContext: &auth.CommonTlsContext_CombinedCertificateValidationContext{ DefaultValidationContext: &auth.CertificateValidationContext{MatchSubjectAltNames: util.StringToExactMatch(tls.SubjectAltNames)}, ValidationContextSdsSecretConfig: authn_model.ConstructSdsSecretConfig(res.GetRootResourceName()), }, } tlsContext.CommonTlsContext.AlpnProtocols = util.ALPNH2Only tlsContext.Sni = tls.Sni case networkingAPI.ClientTLSSettings_MUTUAL: res := security.SdsCertificateConfig{ CertificatePath: model.GetOrDefault(metadata.TLSClientCertChain, tls.ClientCertificate), PrivateKeyPath: model.GetOrDefault(metadata.TLSClientKey, tls.PrivateKey), CaCertificatePath: model.GetOrDefault(metadata.TLSClientRootCert, tls.CaCertificates), } if len(res.GetResourceName()) > 0 { tlsContext.CommonTlsContext.TlsCertificateSdsSecretConfigs = append(tlsContext.CommonTlsContext.TlsCertificateSdsSecretConfigs, authn_model.ConstructSdsSecretConfig(res.GetResourceName())) } tlsContext.CommonTlsContext.ValidationContextType = &auth.CommonTlsContext_CombinedValidationContext{ CombinedValidationContext: &auth.CommonTlsContext_CombinedCertificateValidationContext{ DefaultValidationContext: &auth.CertificateValidationContext{MatchSubjectAltNames: util.StringToExactMatch(tls.SubjectAltNames)}, ValidationContextSdsSecretConfig: authn_model.ConstructSdsSecretConfig(res.GetRootResourceName()), }, } tlsContext.CommonTlsContext.AlpnProtocols = util.ALPNH2Only tlsContext.Sni = tls.Sni case networkingAPI.ClientTLSSettings_ISTIO_MUTUAL: tlsContext.CommonTlsContext.TlsCertificateSdsSecretConfigs = append(tlsContext.CommonTlsContext.TlsCertificateSdsSecretConfigs, authn_model.ConstructSdsSecretConfig(authn_model.SDSDefaultResourceName)) tlsContext.CommonTlsContext.ValidationContextType = &auth.CommonTlsContext_CombinedValidationContext{ CombinedValidationContext: &auth.CommonTlsContext_CombinedCertificateValidationContext{ DefaultValidationContext: &auth.CertificateValidationContext{MatchSubjectAltNames: util.StringToExactMatch(tls.SubjectAltNames)}, ValidationContextSdsSecretConfig: authn_model.ConstructSdsSecretConfig(authn_model.SDSRootResourceName), }, } tlsContext.CommonTlsContext.AlpnProtocols = util.ALPNInMeshH2 tlsContext.Sni = tls.Sni // For ISTIO_MUTUAL if custom SNI is not provided, use the default SNI name. if len(tls.Sni) == 0 { tlsContext.Sni = sniName } default: // No TLS. return nil } return tlsContext } func nodeMetadataConverter(metadata *model.BootstrapNodeMetadata, rawMeta map[string]interface{}) convertFunc { return func(*instance) (interface{}, error) { marshalString, err := marshalMetadata(metadata, rawMeta) if err != nil { return "", err } return marshalString, nil } } func sanConverter(sans []string) convertFunc { return func(*instance) (interface{}, error) { matchers := []string{} for _, s := range sans { matchers = append(matchers, fmt.Sprintf(`{"exact":"%s"}`, s)) } return "[" + strings.Join(matchers, ",") + "]", nil } } func addressConverter(addr string) convertFunc { return func(o *instance) (interface{}, error) { host, port, err := net.SplitHostPort(addr) if err != nil { return nil, fmt.Errorf("unable to parse %s address %q: %v", o.name, addr, err) } if host == "$(HOST_IP)" { // Replace host with HOST_IP env var if it is "$(HOST_IP)". // This is to support some tracer setting (Datadog, Zipkin), where "$(HOST_IP)"" is used for address. // Tracer address used to be specified within proxy container params, and thus could be interpreted with pod HOST_IP env var. // Now tracer config is passed in with mesh config volumn at gateway, k8s env var interpretation does not work. // This is to achieve the same interpretation as k8s. hostIPEnv := os.Getenv("HOST_IP") if hostIPEnv != "" { host = hostIPEnv } } return fmt.Sprintf("{\"address\": \"%s\", \"port_value\": %s}", host, port), nil } } func durationConverter(value *types.Duration) convertFunc { return func(*instance) (interface{}, error) { return value.String(), nil } } // openCensusAgentContextConverter returns a converter that returns the list of // distributed trace contexts to propagate with envoy. func openCensusAgentContextConverter(contexts []meshAPI.Tracing_OpenCensusAgent_TraceContext) convertFunc { allContexts := `["TRACE_CONTEXT","GRPC_TRACE_BIN","CLOUD_TRACE_CONTEXT","B3"]` return func(*instance) (interface{}, error) { if len(contexts) == 0 { return allContexts, nil } var envoyContexts []string for _, c := range contexts { switch c { // Ignore UNSPECIFIED case meshAPI.Tracing_OpenCensusAgent_W3C_TRACE_CONTEXT: envoyContexts = append(envoyContexts, "TRACE_CONTEXT") case meshAPI.Tracing_OpenCensusAgent_GRPC_BIN: envoyContexts = append(envoyContexts, "GRPC_TRACE_BIN") case meshAPI.Tracing_OpenCensusAgent_CLOUD_TRACE_CONTEXT: envoyContexts = append(envoyContexts, "CLOUD_TRACE_CONTEXT") case meshAPI.Tracing_OpenCensusAgent_B3: envoyContexts = append(envoyContexts, "B3") } } return convertToJSON(envoyContexts), nil } } func convertToJSON(v interface{}) string { if v == nil { return "" } b, err := json.Marshal(v) if err != nil { log.Error(err.Error()) return "" } return string(b) } // marshalMetadata combines type metadata and untyped metadata and marshals to json // This allows passing arbitrary metadata to Envoy, while still supported typed metadata for known types func marshalMetadata(metadata *model.BootstrapNodeMetadata, rawMeta map[string]interface{}) (string, error) { b, err := json.Marshal(metadata) if err != nil { return "", err } var output map[string]interface{} if err := json.Unmarshal(b, &output); err != nil { return "", err } // Add all untyped metadata for k, v := range rawMeta { // Do not override fields, as we may have made modifications to the type metadata // This means we will only add "unknown" fields here if _, f := output[k]; !f { output[k] = v } } res, err := json.Marshal(output) if err != nil { return "", err } return string(res), nil }
[ "\"HOST_IP\"" ]
[]
[ "HOST_IP" ]
[]
["HOST_IP"]
go
1
0
microservices/transportation/main.go
package main import ( "fmt" "log" "os" "github.com/Risath18/xplored-transportation/handlers" "github.com/Risath18/xplored-transportation/routes" "github.com/gofiber/fiber/v2" "github.com/joho/godotenv" _ "github.com/lib/pq" ) func main() { app := fiber.New() err := godotenv.Load("../../.env") if err != nil { log.Fatal("Error loading .env file") } handlers.InitAirportCodes() //Configure all Routes routes.RegisterRoutes(app) //Formatting port PLANNER_PORT := os.Getenv("TRANSPORTATION_PORT") port := fmt.Sprintf(":%s", PLANNER_PORT) app.Listen(port) }
[ "\"TRANSPORTATION_PORT\"" ]
[]
[ "TRANSPORTATION_PORT" ]
[]
["TRANSPORTATION_PORT"]
go
1
0
mod/github.com/hashicorp/[email protected]/physical/gcs/gcs_test.go
package gcs import ( "context" "fmt" "math/rand" "os" "testing" "time" "cloud.google.com/go/storage" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" "google.golang.org/api/googleapi" ) func testCleanup(t testing.TB, client *storage.Client, bucket string) { t.Helper() ctx := context.Background() if err := client.Bucket(bucket).Delete(ctx); err != nil { if terr, ok := err.(*googleapi.Error); !ok || terr.Code != 404 { t.Fatal(err) } } } func TestBackend(t *testing.T) { projectID := os.Getenv("GOOGLE_PROJECT_ID") if projectID == "" { t.Skip("GOOGLE_PROJECT_ID not set") } r := rand.New(rand.NewSource(time.Now().UnixNano())).Int() bucket := fmt.Sprintf("vault-gcs-testacc-%d", r) ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { t.Fatal(err) } testCleanup(t, client, bucket) defer testCleanup(t, client, bucket) b := client.Bucket(bucket) if err := b.Create(context.Background(), projectID, nil); err != nil { t.Fatal(err) } backend, err := NewBackend(map[string]string{ "bucket": bucket, "ha_enabled": "false", }, logging.NewVaultLogger(log.Trace)) if err != nil { t.Fatal(err) } physical.ExerciseBackend(t, backend) physical.ExerciseBackend_ListPrefix(t, backend) }
[ "\"GOOGLE_PROJECT_ID\"" ]
[]
[ "GOOGLE_PROJECT_ID" ]
[]
["GOOGLE_PROJECT_ID"]
go
1
0
utils/build_swift/build_swift/driver_arguments.py
# This source file is part of the Swift.org open source project # # Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors # Licensed under Apache License v2.0 with Runtime Library Exception # # See https://swift.org/LICENSE.txt for license information # See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors from __future__ import absolute_import, unicode_literals import multiprocessing import os import android.adb.commands from swift_build_support.swift_build_support import targets from swift_build_support.swift_build_support.targets import \ StdlibDeploymentTarget from . import argparse from . import defaults __all__ = [ 'create_argument_parser', ] class _ApplyDefaultsArgumentParser(argparse.ArgumentParser): """Wrapper class around the default ArgumentParser that allows for post-processing the parsed argument namespace to apply default argument transformations. """ def __init__(self, apply_defaults=None, *args, **kwargs): self._apply_defaults = apply_defaults super(_ApplyDefaultsArgumentParser, self).__init__(*args, **kwargs) def parse_known_args(self, args=None, namespace=None): args, argv = super(_ApplyDefaultsArgumentParser, self)\ .parse_known_args(args, namespace) self._apply_defaults(args) return args, argv def _apply_default_arguments(args): """Preprocess argument namespace to apply default behaviors. """ # Build cmark if any cmark-related options were specified. if (args.cmark_build_variant is not None): args.build_cmark = True # Build LLDB if any LLDB-related options were specified. if args.lldb_build_variant is not None or \ args.lldb_assertions is not None or \ args.lldb_build_with_xcode is not None: args.build_lldb = True # Set the default build variant. if args.build_variant is None: args.build_variant = 'Debug' if args.llvm_build_variant is None: args.llvm_build_variant = args.build_variant if args.swift_build_variant is None: args.swift_build_variant = args.build_variant if args.swift_stdlib_build_variant is None: args.swift_stdlib_build_variant = args.build_variant if args.cmark_build_variant is None: args.cmark_build_variant = args.swift_build_variant if args.lldb_build_variant is None: args.lldb_build_variant = args.build_variant if args.lldb_build_with_xcode is None: args.lldb_build_with_xcode = '0' if args.foundation_build_variant is None: args.foundation_build_variant = args.build_variant if args.libdispatch_build_variant is None: args.libdispatch_build_variant = args.build_variant if args.libicu_build_variant is None: args.libicu_build_variant = args.build_variant # Assertions are enabled by default. if args.assertions is None: args.assertions = True # Propagate the default assertions setting. if args.cmark_assertions is None: args.cmark_assertions = args.assertions if args.llvm_assertions is None: args.llvm_assertions = args.assertions if args.swift_assertions is None: args.swift_assertions = args.assertions if args.swift_stdlib_assertions is None: args.swift_stdlib_assertions = args.assertions if args.llbuild_assertions is None: args.llbuild_assertions = args.assertions if args.lldb_assertions is None: args.lldb_assertions = args.assertions # Set the default CMake generator. if args.cmake_generator is None: args.cmake_generator = 'Ninja' # --ios-all etc are not supported by open-source Swift. if args.ios_all: raise ValueError('error: --ios-all is unavailable in open-source ' 'Swift.\nUse --ios to skip iOS device tests.') if args.tvos_all: raise ValueError('error: --tvos-all is unavailable in open-source ' 'Swift.\nUse --tvos to skip tvOS device tests.') if args.watchos_all: raise ValueError('error: --watchos-all is unavailable in open-source ' 'Swift.\nUse --watchos to skip watchOS device tests.') # --skip-{ios,tvos,watchos} or --skip-build-{ios,tvos,watchos} are # merely shorthands for --skip-build-{**os}-{device,simulator} if not args.ios or not args.build_ios: args.build_ios_device = False args.build_ios_simulator = False if not args.tvos or not args.build_tvos: args.build_tvos_device = False args.build_tvos_simulator = False if not args.watchos or not args.build_watchos: args.build_watchos_device = False args.build_watchos_simulator = False if not args.android or not args.build_android: args.build_android = False # --test-paths implies --test and/or --validation-test # depending on what directories/files have been specified. if args.test_paths: for path in args.test_paths: if path.startswith('test'): args.test = True elif path.startswith('validation-test'): args.test = True args.validation_test = True # --validation-test implies --test. if args.validation_test: args.test = True # --test-optimized implies --test. if args.test_optimized: args.test = True # --test-optimize-size implies --test. if args.test_optimize_for_size: args.test = True # --test-optimize-none-with-implicit-dynamic implies --test. if args.test_optimize_none_with_implicit_dynamic: args.test = True # If none of tests specified skip swift stdlib test on all platforms if not args.test and not args.validation_test and not args.long_test: args.test_linux = False args.test_freebsd = False args.test_cygwin = False args.test_osx = False args.test_ios = False args.test_tvos = False args.test_watchos = False args.test_android = False args.test_swiftpm = False args.test_swiftsyntax = False args.test_indexstoredb = False args.test_sourcekitlsp = False args.test_skstresstester = False args.test_swiftevolve = False args.test_toolchainbenchmarks = False # --skip-test-ios is merely a shorthand for host and simulator tests. if not args.test_ios: args.test_ios_host = False args.test_ios_simulator = False # --skip-test-tvos is merely a shorthand for host and simulator tests. if not args.test_tvos: args.test_tvos_host = False args.test_tvos_simulator = False # --skip-test-watchos is merely a shorthand for host and simulator # --tests. if not args.test_watchos: args.test_watchos_host = False args.test_watchos_simulator = False # --skip-build-{ios,tvos,watchos}-{device,simulator} implies # --skip-test-{ios,tvos,watchos}-{host,simulator} if not args.build_ios_device: args.test_ios_host = False if not args.build_ios_simulator: args.test_ios_simulator = False if not args.build_tvos_device: args.test_tvos_host = False if not args.build_tvos_simulator: args.test_tvos_simulator = False if not args.build_watchos_device: args.test_watchos_host = False if not args.build_watchos_simulator: args.test_watchos_simulator = False if not args.build_android: args.test_android = False args.test_android_host = False if not args.test_android: args.test_android_host = False if not args.host_test: args.test_ios_host = False args.test_tvos_host = False args.test_watchos_host = False args.test_android_host = False def create_argument_parser(): """Return a configured argument parser.""" # NOTE: USAGE, DESCRIPTION and EPILOG are defined at the bottom of the file parser = _ApplyDefaultsArgumentParser( apply_defaults=_apply_default_arguments, formatter_class=argparse.RawDescriptionHelpFormatter, usage=USAGE, description=DESCRIPTION, epilog=EPILOG) builder = parser.to_builder() # Prepare DSL functions option = builder.add_option set_defaults = builder.set_defaults in_group = builder.in_group mutually_exclusive_group = builder.mutually_exclusive_group # Prepare DSL actions append = builder.actions.append store = builder.actions.store store_true = builder.actions.store_true store_false = builder.actions.store_false store_int = builder.actions.store_int store_path = builder.actions.store_path toggle_true = builder.actions.toggle_true toggle_false = builder.actions.toggle_false unsupported = builder.actions.unsupported # ------------------------------------------------------------------------- # Top-level options option(['-n', '--dry-run'], store_true, help='print the commands that would be executed, but do not ' 'execute them') option('--dump-config', toggle_true, help='instead of building, write JSON to stdout containing ' 'various values used to build in this configuration') option('--legacy-impl', store_true('legacy_impl'), help='use legacy implementation') option('--build-runtime-with-host-compiler', toggle_true, help='Use the host compiler, not the self-built one to compile the ' 'Swift runtime') option(['-i', '--ios'], store_true, help='also build for iOS, but disallow tests that require an iOS ' 'device') option(['-I', '--ios-all'], store_true('ios_all'), help='also build for iOS, and allow all iOS tests') option(['--skip-local-build'], toggle_true('skip_local_build'), help='set to skip building for the local platform') option('--skip-ios', store_false('ios'), help='set to skip everything iOS-related') option('--tvos', toggle_true, help='also build for tvOS, but disallow tests that require a tvos ' 'device') option('--tvos-all', toggle_true('tvos_all'), help='also build for tvOS, and allow all tvOS tests') option('--skip-tvos', store_false('tvos'), help='set to skip everything tvOS-related') option('--watchos', toggle_true, help='also build for watchOS, but disallow tests that require an ' 'watchOS device') option('--watchos-all', toggle_true('watchos_all'), help='also build for Apple watchOS, and allow all Apple watchOS ' 'tests') option('--skip-watchos', store_false('watchos'), help='set to skip everything watchOS-related') option('--maccatalyst', toggle_true, help='Enable building Swift with macCatalyst support') option('--maccatalyst-ios-tests', toggle_true, help='When building for macCatalyst run tests with iOS-like ' 'target triple') option('--android', toggle_true, help='also build for Android') option('--swift-analyze-code-coverage', store, choices=['false', 'not-merged', 'merged'], # so CMake can see the inert mode as a false value default=defaults.SWIFT_ANALYZE_CODE_COVERAGE, help='enable code coverage analysis in Swift (false, not-merged, ' 'merged).') option('--build-subdir', store, metavar='PATH', help='name of the directory under $SWIFT_BUILD_ROOT where the ' 'build products will be placed') option('--install-prefix', store_path, default=targets.install_prefix(), help='The installation prefix. This is where built Swift products ' '(like bin, lib, and include) will be installed.') option('--install-symroot', store_path, help='the path to install debug symbols into') option('--install-destdir', store_path, help='the path to use as the filesystem root for the installation') option(['-j', '--jobs'], store_int('build_jobs'), default=multiprocessing.cpu_count(), help='the number of parallel build jobs to use') option('--darwin-xcrun-toolchain', store, help='the name of the toolchain to use on Darwin') option('--cmake', store_path(executable=True), help='the path to a CMake executable that will be used to build ' 'Swift') option('--show-sdks', toggle_true, help='print installed Xcode and SDK versions') option('--extra-swift-args', append, help='Pass through extra flags to swift in the form of a CMake ' 'list "module_regexp;flag". Can be called multiple times to ' 'add multiple such module_regexp flag pairs. All semicolons ' 'in flags must be escaped with a "\\"') option('--host-cc', store_path(executable=True), help='the absolute path to CC, the "clang" compiler for the host ' 'platform. Default is auto detected.') option('--host-cxx', store_path(executable=True), help='the absolute path to CXX, the "clang++" compiler for the ' 'host platform. Default is auto detected.') option('--cmake-c-launcher', store_path(executable=True), default=os.environ.get('C_COMPILER_LAUNCHER', None), help='the absolute path to set CMAKE_C_COMPILER_LAUNCHER') option('--cmake-cxx-launcher', store_path(executable=True), default=os.environ.get('CXX_COMPILER_LAUNCHER', None), help='the absolute path to set CMAKE_CXX_COMPILER_LAUNCHER') option('--host-lipo', store_path(executable=True), help='the absolute path to lipo. Default is auto detected.') option('--host-libtool', store_path(executable=True), help='the absolute path to libtool. Default is auto detected.') option('--distcc', toggle_true, default=os.environ.get('USE_DISTCC') == '1', help='use distcc in pump mode') option('--enable-asan', toggle_true, help='enable Address Sanitizer') option('--enable-ubsan', toggle_true, help='enable Undefined Behavior Sanitizer') option('--enable-tsan', toggle_true, help='enable Thread Sanitizer for swift tools') option('--enable-tsan-runtime', toggle_true, help='enable Thread Sanitizer on the swift runtime') option('--enable-lsan', toggle_true, help='enable Leak Sanitizer for swift tools') option('--enable-sanitize-coverage', toggle_true, help='enable sanitizer coverage for swift tools. Necessary for ' 'fuzzing swiftc') option('--compiler-vendor', store, choices=['none', 'apple'], default=defaults.COMPILER_VENDOR, help='Compiler vendor name') option('--clang-compiler-version', store, type=argparse.ClangVersionType(), metavar='MAJOR.MINOR.PATCH', help='string that indicates a compiler version for Clang') option('--clang-user-visible-version', store, type=argparse.ClangVersionType(), default=defaults.CLANG_USER_VISIBLE_VERSION, metavar='MAJOR.MINOR.PATCH', help='User-visible version of the embedded Clang and LLVM ' 'compilers') option('--swift-compiler-version', store, type=argparse.SwiftVersionType(), metavar='MAJOR.MINOR', help='string that indicates a compiler version for Swift') option('--swift-user-visible-version', store, type=argparse.SwiftVersionType(), default=defaults.SWIFT_USER_VISIBLE_VERSION, metavar='MAJOR.MINOR', help='User-visible version of the embedded Swift compiler') option('--darwin-deployment-version-osx', store, default=defaults.DARWIN_DEPLOYMENT_VERSION_OSX, metavar='MAJOR.MINOR', help='minimum deployment target version for OS X') option('--darwin-deployment-version-ios', store, default=defaults.DARWIN_DEPLOYMENT_VERSION_IOS, metavar='MAJOR.MINOR', help='minimum deployment target version for iOS') option('--darwin-deployment-version-tvos', store, default=defaults.DARWIN_DEPLOYMENT_VERSION_TVOS, metavar='MAJOR.MINOR', help='minimum deployment target version for tvOS') option('--darwin-deployment-version-watchos', store, default=defaults.DARWIN_DEPLOYMENT_VERSION_WATCHOS, metavar='MAJOR.MINOR', help='minimum deployment target version for watchOS') option('--extra-cmake-options', append, type=argparse.ShellSplitType(), help='Pass through extra options to CMake in the form of comma ' 'separated options "-DCMAKE_VAR1=YES,-DCMAKE_VAR2=/tmp". Can ' 'be called multiple times to add multiple such options.') option('--build-args', store, type=argparse.ShellSplitType(), default=[], help='arguments to the build tool. This would be prepended to the ' 'default argument that is "-j8" when CMake generator is ' '"Ninja".') option('--verbose-build', toggle_true, help='print the commands executed during the build') option('--lto', store('lto_type'), choices=['thin', 'full'], const='full', default=None, metavar='LTO_TYPE', help='use lto optimization on llvm/swift tools. This does not ' 'imply using lto on the swift standard library or runtime. ' 'Options: thin, full. If no optional arg is provided, full is ' 'chosen by default') option('--clang-profile-instr-use', store_path, help='profile file to use for clang PGO') option('--llvm-max-parallel-lto-link-jobs', store_int, default=defaults.LLVM_MAX_PARALLEL_LTO_LINK_JOBS, metavar='COUNT', help='the maximum number of parallel link jobs to use when ' 'compiling llvm') option('--swift-tools-max-parallel-lto-link-jobs', store_int, default=defaults.SWIFT_MAX_PARALLEL_LTO_LINK_JOBS, metavar='COUNT', help='the maximum number of parallel link jobs to use when ' 'compiling swift tools.') option('--disable-guaranteed-normal-arguments', store_true, help='Disable guaranteed normal arguments') option('--enable-stdlibcore-exclusivity-checking', store_true, help='Enable exclusivity checking in stdlibCore') option('--force-optimized-typechecker', store_true, help='Force the type checker to be built with ' 'optimization') option('--lit-args', store, default='-sv', metavar='LITARGS', help='lit args to use when testing') option('--coverage-db', store_path, help='coverage database to use when prioritizing testing') # ------------------------------------------------------------------------- in_group('Host and cross-compilation targets') option('--host-target', store, default=StdlibDeploymentTarget.host_target().name, help='The host target. LLVM, Clang, and Swift will be built for ' 'this target. The built LLVM and Clang will be used to ' 'compile Swift for the cross-compilation targets.') option('--cross-compile-hosts', append, type=argparse.ShellSplitType(), default=[], help='A space separated list of targets to cross-compile host ' 'Swift tools for. Can be used multiple times.') option('--stdlib-deployment-targets', store, type=argparse.ShellSplitType(), default=None, help='The targets to compile or cross-compile the Swift standard ' 'library for. %(default)s by default.' ' Comma separated list: {}'.format( ' '.join(StdlibDeploymentTarget.get_target_names()))) option('--build-stdlib-deployment-targets', store, type=argparse.ShellSplitType(), default=['all'], help='A space-separated list that filters which of the configured ' 'targets to build the Swift standard library for, or "all".') option('--swift-darwin-supported-archs', store, metavar='ARCHS', help='Semicolon-separated list of architectures to configure on ' 'Darwin platforms. If left empty all default architectures ' 'are configured.') option('--swift-darwin-module-archs', store, metavar='ARCHS', help='Semicolon-separated list of architectures to configure Swift ' 'module-only targets on Darwin platforms. These targets are ' 'in addition to the full library targets.') # ------------------------------------------------------------------------- in_group('Options to select projects') option(['-l', '--lldb'], store_true('build_lldb'), help='build LLDB') option(['-b', '--llbuild'], store_true('build_llbuild'), help='build llbuild') option(['--libcxx'], store_true('build_libcxx'), help='build libcxx') option(['-p', '--swiftpm'], toggle_true('build_swiftpm'), help='build swiftpm') option(['--install-swiftpm'], toggle_true('install_swiftpm'), help='install swiftpm') option(['--swiftsyntax'], store_true('build_swiftsyntax'), help='build swiftSyntax') option(['--skstresstester'], store_true('build_skstresstester'), help='build the SourceKit stress tester') option(['--swiftevolve'], store_true('build_swiftevolve'), help='build the swift-evolve tool') option(['--indexstore-db'], toggle_true('build_indexstoredb'), help='build IndexStoreDB') option(['--sourcekit-lsp'], toggle_true('build_sourcekitlsp'), help='build SourceKitLSP') option('--install-swiftsyntax', toggle_true('install_swiftsyntax'), help='install SwiftSyntax') option('--swiftsyntax-verify-generated-files', toggle_true('swiftsyntax_verify_generated_files'), help='set to verify that the generated files in the source tree ' 'match the ones that would be generated from current master') option(['--install-pythonkit'], toggle_true('install_pythonkit'), help='install PythonKit') option(['--install-sourcekit-lsp'], toggle_true('install_sourcekitlsp'), help='install SourceKitLSP') option(['--install-skstresstester'], toggle_true('install_skstresstester'), help='install the SourceKit stress tester') option(['--install-swiftevolve'], toggle_true('install_swiftevolve'), help='install SwiftEvolve') option(['--toolchain-benchmarks'], toggle_true('build_toolchainbenchmarks'), help='build Swift Benchmarks using swiftpm against the just built ' 'toolchain') option('--xctest', toggle_true('build_xctest'), help='build xctest') option('--foundation', toggle_true('build_foundation'), help='build foundation') option('--libdispatch', toggle_true('build_libdispatch'), help='build libdispatch') option('--libicu', toggle_true('build_libicu'), help='build libicu') option('--playgroundsupport', toggle_true('build_playgroundsupport'), help='build PlaygroundSupport') option('--install-playgroundsupport', store_true('install_playgroundsupport'), help='install playground support') option('--pythonkit', store_true('build_pythonkit'), help='build PythonKit') option('--tensorflow-swift-apis', store_true('build_tensorflow_swift_apis'), help='build TensorFlow Swift APIs') option('--install-tensorflow-swift-apis', store_true('install_tensorflow_swift_apis'), help='install TensorFlow Swift APIs') option('--build-ninja', toggle_true, help='build the Ninja tool') option(['--build-libparser-only'], store_true('build_libparser_only'), help='build only libParser for SwiftSyntax') option('--skip-build-clang-tools-extra', toggle_false('build_clang_tools_extra'), default=True, help='skip building clang-tools-extra as part of llvm') # ------------------------------------------------------------------------- in_group('Extra actions to perform before or in addition to building') option(['-c', '--clean'], store_true, help='do a clean build') option('--export-compile-commands', toggle_true, help='generate compilation databases in addition to building') option('--symbols-package', store_path, help='if provided, an archive of the symbols directory will be ' 'generated at this path') # ------------------------------------------------------------------------- in_group('Build variant') with mutually_exclusive_group(): set_defaults(build_variant='Debug') option(['-d', '--debug'], store('build_variant'), const='Debug', help='build the Debug variant of everything (LLVM, Clang, ' 'Swift host tools, target Swift standard libraries, LLDB) ' '(default is %(default)s)') option(['-r', '--release-debuginfo'], store('build_variant'), const='RelWithDebInfo', help='build the RelWithDebInfo variant of everything (default ' 'is %(default)s)') option(['-R', '--release'], store('build_variant'), const='Release', help='build the Release variant of everything (default is ' '%(default)s)') # ------------------------------------------------------------------------- in_group('Override build variant for a specific project') option('--debug-llvm', store('llvm_build_variant'), const='Debug', help='build the Debug variant of LLVM') option('--debug-swift', store('swift_build_variant'), const='Debug', help='build the Debug variant of Swift host tools') option('--debug-swift-stdlib', store('swift_stdlib_build_variant'), const='Debug', help='build the Debug variant of the Swift standard library and ' ' SDK overlay') option('--debug-lldb', store('lldb_build_variant'), const='Debug', help='build the Debug variant of LLDB') option('--lldb-build-with-xcode', store('lldb_build_with_xcode'), const='1', help='build LLDB using xcodebuild, if possible') option('--lldb-build-with-cmake', store('lldb_build_with_xcode'), const='0', help='build LLDB using CMake') option('--debug-cmark', store('cmark_build_variant'), const='Debug', help='build the Debug variant of CommonMark') option('--debug-foundation', store('foundation_build_variant'), const='Debug', help='build the Debug variant of Foundation') option('--debug-libdispatch', store('libdispatch_build_variant'), const='Debug', help='build the Debug variant of libdispatch') option('--debug-libicu', store('libicu_build_variant'), const='Debug', help='build the Debug variant of libicu') # ------------------------------------------------------------------------- # Assertions group with mutually_exclusive_group(): set_defaults(assertions=True) # TODO: Convert to store_true option(['-a', '--assertions'], store, const=True, help='enable assertions in all projects') # TODO: Convert to store_false option(['-A', '--no-assertions'], store('assertions'), const=False, help='disable assertions in all projects') # ------------------------------------------------------------------------- in_group('Control assertions in a specific project') option('--cmark-assertions', store, const=True, help='enable assertions in CommonMark') option('--llvm-assertions', store, const=True, help='enable assertions in LLVM') option('--no-llvm-assertions', store('llvm_assertions'), const=False, help='disable assertions in LLVM') option('--swift-assertions', store, const=True, help='enable assertions in Swift') option('--no-swift-assertions', store('swift_assertions'), const=False, help='disable assertions in Swift') option('--swift-stdlib-assertions', store, const=True, help='enable assertions in the Swift standard library') option('--no-swift-stdlib-assertions', store('swift_stdlib_assertions'), const=False, help='disable assertions in the Swift standard library') option('--lldb-assertions', store, const=True, help='enable assertions in LLDB') option('--no-lldb-assertions', store('lldb_assertions'), const=False, help='disable assertions in LLDB') option('--llbuild-assertions', store, const=True, help='enable assertions in llbuild') option('--no-llbuild-assertions', store('llbuild_assertions'), const=False, help='disable assertions in llbuild') # ------------------------------------------------------------------------- in_group('Select the CMake generator') set_defaults(cmake_generator=defaults.CMAKE_GENERATOR) option(['-e', '--eclipse'], store('cmake_generator'), const='Eclipse CDT4 - Ninja', help="use CMake's Eclipse generator (%(default)s by default)") option(['-m', '--make'], store('cmake_generator'), const='Unix Makefiles', help="use CMake's Makefile generator (%(default)s by default)") option(['-x', '--xcode'], store('cmake_generator'), const='Xcode', help="use CMake's Xcode generator (%(default)s by default)") # ------------------------------------------------------------------------- in_group('Run tests') # NOTE: We can't merge -t and --test, because nargs='?' makes # `-ti` to be treated as `-t=i`. # FIXME: Convert to store_true action option('-t', store('test', const=True), help='test Swift after building') option('--test', toggle_true, help='test Swift after building') option('-T', store('validation_test', const=True), help='run the validation test suite (implies --test)') option('--validation-test', toggle_true, help='run the validation test suite (implies --test)') # FIXME: Convert to store_true action option('-o', store('test_optimized', const=True), help='run the test suite in optimized mode too (implies --test)') option('--test-optimized', toggle_true, help='run the test suite in optimized mode too (implies --test)') # FIXME: Convert to store_true action option('-s', store('test_optimize_for_size', const=True), help='run the test suite in optimize for size mode too ' '(implies --test)') option('--test-optimize-for-size', toggle_true, help='run the test suite in optimize for size mode too ' '(implies --test)') # FIXME: Convert to store_true action option('-y', store('test_optimize_none_with_implicit_dynamic', const=True), help='run the test suite in optimize none with implicit dynamic' ' mode too (implies --test)') option('--test-optimize-none-with-implicit-dynamic', toggle_true, help='run the test suite in optimize none with implicit dynamic' 'mode too (implies --test)') option('--long-test', toggle_true, help='run the long test suite') option('--stress-test', toggle_true, help='run the stress test suite') option('--host-test', toggle_true, help='run executable tests on host devices (such as iOS or tvOS)') option('--only-executable-test', toggle_true, help='Only run executable tests. Does nothing if host-test is not ' 'allowed') option('--only-non-executable-test', toggle_true, help='Only run non-executable tests.') option('--test-paths', append, type=argparse.ShellSplitType(), help='run tests located in specific directories and/or files ' '(implies --test and/or --validation-test)') option(['-B', '--benchmark'], store_true, help='run the Swift Benchmark Suite after building') option('--benchmark-num-o-iterations', store_int, default=3, help='if the Swift Benchmark Suite is run after building, run N ' 'iterations with -O') option('--benchmark-num-onone-iterations', store_int, default=3, help='if the Swift Benchmark Suite is run after building, run N ' 'iterations with -Onone') # We want to run the TSan (compiler-rt) libdispatch tests on Linux, where # libdispatch is just another library and not available by default. To do # so we build Clang/LLVM/libdispatch and use it to compile/run the TSan # libdispatch tests. option('--tsan-libdispatch-test', toggle_true, help='Builds a new toolchain including the libdispatch C library. ' 'Then re-builds the TSan runtime (compiler-rt) using this ' 'freshly-built Clang and runs the TSan libdispatch tests.') option('--skip-test-osx', toggle_false('test_osx'), help='skip testing Swift stdlibs for Mac OS X') option('--skip-test-linux', toggle_false('test_linux'), help='skip testing Swift stdlibs for Linux') option('--skip-test-freebsd', toggle_false('test_freebsd'), help='skip testing Swift stdlibs for FreeBSD') option('--skip-test-cygwin', toggle_false('test_cygwin'), help='skip testing Swift stdlibs for Cygwin') option('--test-pythonkit', toggle_true('test_pythonkit'), help='skip testing PythonKit') # ------------------------------------------------------------------------- in_group('Run build') option('--build-swift-dynamic-stdlib', toggle_true, default=True, help='build dynamic variants of the Swift standard library') option('--build-swift-static-stdlib', toggle_true, help='build static variants of the Swift standard library') option('--build-swift-dynamic-sdk-overlay', toggle_true, default=True, help='build dynamic variants of the Swift SDK overlay') option('--build-swift-static-sdk-overlay', toggle_true, help='build static variants of the Swift SDK overlay') option('--build-swift-stdlib-unittest-extra', toggle_true, help='Build optional StdlibUnittest components') option(['-S', '--skip-build'], store_true, help='generate build directory only without building') option('--skip-build-linux', toggle_false('build_linux'), help='skip building Swift stdlibs for Linux') option('--skip-build-freebsd', toggle_false('build_freebsd'), help='skip building Swift stdlibs for FreeBSD') option('--skip-build-cygwin', toggle_false('build_cygwin'), help='skip building Swift stdlibs for Cygwin') option('--skip-build-osx', toggle_false('build_osx'), help='skip building Swift stdlibs for MacOSX') option('--skip-build-ios', toggle_false('build_ios'), help='skip building Swift stdlibs for iOS') option('--skip-build-ios-device', toggle_false('build_ios_device'), help='skip building Swift stdlibs for iOS devices ' '(i.e. build simulators only)') option('--skip-build-ios-simulator', toggle_false('build_ios_simulator'), help='skip building Swift stdlibs for iOS simulator ' '(i.e. build devices only)') option('--skip-build-tvos', toggle_false('build_tvos'), help='skip building Swift stdlibs for tvOS') option('--skip-build-tvos-device', toggle_false('build_tvos_device'), help='skip building Swift stdlibs for tvOS devices ' '(i.e. build simulators only)') option('--skip-build-tvos-simulator', toggle_false('build_tvos_simulator'), help='skip building Swift stdlibs for tvOS simulator ' '(i.e. build devices only)') option('--skip-build-watchos', toggle_false('build_watchos'), help='skip building Swift stdlibs for watchOS') option('--skip-build-watchos-device', toggle_false('build_watchos_device'), help='skip building Swift stdlibs for watchOS devices ' '(i.e. build simulators only)') option('--skip-build-watchos-simulator', toggle_false('build_watchos_simulator'), help='skip building Swift stdlibs for watchOS simulator ' '(i.e. build devices only)') option('--skip-build-android', toggle_false('build_android'), help='skip building Swift stdlibs for Android') option('--skip-build-benchmarks', toggle_false('build_benchmarks'), help='skip building Swift Benchmark Suite') option('--build-external-benchmarks', toggle_true, help='skip building Swift Benchmark Suite') # ------------------------------------------------------------------------- in_group('Skip testing specified targets') option('--skip-test-ios', toggle_false('test_ios'), help='skip testing all iOS targets. Equivalent to specifying both ' '--skip-test-ios-simulator and --skip-test-ios-host') option('--skip-test-ios-simulator', toggle_false('test_ios_simulator'), help='skip testing iOS simulator targets') option('--skip-test-ios-32bit-simulator', toggle_false('test_ios_32bit_simulator'), help='skip testing iOS 32 bit simulator targets') option('--skip-test-ios-host', toggle_false('test_ios_host'), help='skip testing iOS device targets on the host machine (the ' 'phone itself)') option('--skip-test-tvos', toggle_false('test_tvos'), help='skip testing all tvOS targets. Equivalent to specifying both ' '--skip-test-tvos-simulator and --skip-test-tvos-host') option('--skip-test-tvos-simulator', toggle_false('test_tvos_simulator'), help='skip testing tvOS simulator targets') option('--skip-test-tvos-host', toggle_false('test_tvos_host'), help='skip testing tvOS device targets on the host machine (the ' 'TV itself)') option('--skip-test-watchos', toggle_false('test_watchos'), help='skip testing all tvOS targets. Equivalent to specifying both ' '--skip-test-watchos-simulator and --skip-test-watchos-host') option('--skip-test-watchos-simulator', toggle_false('test_watchos_simulator'), help='skip testing watchOS simulator targets') option('--skip-test-watchos-host', toggle_false('test_watchos_host'), help='skip testing watchOS device targets on the host machine (the ' 'watch itself)') option('--skip-test-android', toggle_false('test_android'), help='skip testing all Android targets.') option('--skip-test-android-host', toggle_false('test_android_host'), help='skip testing Android device targets on the host machine (the ' 'phone itself)') option('--skip-test-swiftpm', toggle_false('test_swiftpm'), help='skip testing swiftpm') option('--skip-test-swiftsyntax', toggle_false('test_swiftsyntax'), help='skip testing SwiftSyntax') option('--skip-test-indexstore-db', toggle_false('test_indexstoredb'), help='skip testing indexstore-db') option('--skip-test-sourcekit-lsp', toggle_false('test_sourcekitlsp'), help='skip testing sourcekit-lsp') option('--skip-test-playgroundsupport', toggle_false('test_playgroundsupport'), help='skip testing PlaygroundSupport') option('--skip-test-skstresstester', toggle_false('test_skstresstester'), help='skip testing the SourceKit Stress tester') option('--skip-test-swiftevolve', toggle_false('test_swiftevolve'), help='skip testing SwiftEvolve') option('--skip-test-toolchain-benchmarks', toggle_false('test_toolchainbenchmarks'), help='skip testing toolchain benchmarks') # ------------------------------------------------------------------------- in_group('Build settings specific for LLVM') option('--llvm-targets-to-build', store, default='X86;ARM;AArch64;PowerPC;SystemZ;Mips', help='LLVM target generators to build') # ------------------------------------------------------------------------- in_group('Build settings for Android') option('--android-ndk', store_path, help='An absolute path to the NDK that will be used as a libc ' 'implementation for Android builds') option('--android-api-level', store, default='21', help='The Android API level to target when building for Android. ' 'Currently only 21 or above is supported') option('--android-ndk-gcc-version', store, choices=['4.8', '4.9'], default='4.9', help='The GCC version to use when building for Android. Currently ' 'only 4.9 is supported. %(default)s is also the default ' 'value. This option may be used when experimenting with ' 'versions of the Android NDK not officially supported by ' 'Swift') option('--android-icu-uc', store_path, help='Path to libicuuc.so') option('--android-icu-uc-include', store_path, help='Path to a directory containing headers for libicuuc') option('--android-icu-i18n', store_path, help='Path to libicui18n.so') option('--android-icu-i18n-include', store_path, help='Path to a directory containing headers libicui18n') option('--android-icu-data', store_path, help='Path to libicudata.so') option('--android-deploy-device-path', store_path, default=android.adb.commands.DEVICE_TEMP_DIR, help='Path on an Android device to which built Swift stdlib ' 'products will be deployed. If running host tests, specify ' 'the "{}" directory.'.format( android.adb.commands.DEVICE_TEMP_DIR)) option('--android-arch', store, choices=['armv7', 'aarch64'], default='armv7', help='The Android target architecture when building for Android. ' 'Currently only armv7 and aarch64 are supported. ' '%(default)s is the default.') # ------------------------------------------------------------------------- in_group('Experimental language features') option('--enable-experimental-differentiable-programming', toggle_true, default=True, help='Enable experimental Swift differentiable programming language' ' features.') # ------------------------------------------------------------------------- in_group('Unsupported options') option('--build-jobs', unsupported) option('--common-cmake-options', unsupported) option('--only-execute', unsupported) option('--skip-test-optimize-for-size', unsupported) option('--skip-test-optimize-none-with-implicit-dynamic', unsupported) option('--skip-test-optimized', unsupported) # ------------------------------------------------------------------------- in_group('Build-script-impl arguments (for disambiguation)') # We need to list --skip-test-swift explicitly because otherwise argparse # will auto-expand arguments like --skip-test-swift to the only known # argument --skip-test-swiftevolve. # These arguments are forwarded to impl_args in migration.py option('--install-swift', toggle_true('impl_install_swift')) option('--skip-test-swift', toggle_true('impl_skip_test_swift')) # ------------------------------------------------------------------------- return builder.build() # ---------------------------------------------------------------------------- USAGE = """ %(prog)s [-h | --help] [OPTION ...] %(prog)s --preset=NAME [SUBSTITUTION ...] """ DESCRIPTION = """ Use this tool to build, test, and prepare binary distribution archives of Swift and related tools. Builds Swift (and, optionally, LLDB), incrementally, optionally testing it thereafter. Different build configurations are maintained in parallel. """ EPILOG = """ Using option presets: --preset-file=PATH load presets from the specified file --preset=NAME use the specified option preset The preset mode is mutually exclusive with other options. It is not possible to add ad-hoc customizations to a preset. This is a deliberate design decision. (Rationale: a preset is a certain important set of options that we want to keep in a centralized location. If you need to customize it, you should create another preset in a centralized location, rather than scattering the knowledge about the build across the system.) Presets support substitutions for controlled customizations. Substitutions are defined in the preset file. Values for substitutions are supplied using the name=value syntax on the command line. Any arguments not listed are forwarded directly to Swift's 'build-script-impl'. See that script's help for details. The listed build-script-impl arguments are only for disambiguation in the argument parser. Environment variables --------------------- This script respects a few environment variables, should you choose to set them: SWIFT_SOURCE_ROOT: a directory containing the source for LLVM, Clang, Swift. If this script is located in a Swift source directory, the location of SWIFT_SOURCE_ROOT will be inferred if the variable is not set. 'build-script' expects the sources to be laid out in the following way: $SWIFT_SOURCE_ROOT/llvm /clang /swift /lldb (optional) /llbuild (optional) /swiftpm (optional, requires llbuild) /swift-syntax (optional, requires swiftpm) /swift-stress-tester (optional, requires swift-syntax) /compiler-rt (optional) /swift-corelibs-xctest (optional) /swift-corelibs-foundation (optional) /swift-corelibs-libdispatch (optional) /icu (optional) SWIFT_BUILD_ROOT: a directory in which to create out-of-tree builds. Defaults to "$SWIFT_SOURCE_ROOT/build/". Preparing to run this script ---------------------------- See README.md for instructions on cloning Swift subprojects. If you intend to use the -l, -L, --lldb, or --debug-lldb options. That's it; you're ready to go! Examples -------- Given the above layout of sources, the simplest invocation of 'build-script' is just: [~/src/s]$ ./swift/utils/build-script This builds LLVM, Clang, Swift and Swift standard library in debug mode. All builds are incremental. To incrementally build changed files, repeat the same 'build-script' command. Typical uses of 'build-script' ------------------------------ To build everything with optimization without debug information: [~/src/s]$ ./swift/utils/build-script -R To run tests, add '-t': [~/src/s]$ ./swift/utils/build-script -R -t To run normal tests and validation tests, add '-T': [~/src/s]$ ./swift/utils/build-script -R -T To build LLVM+Clang with optimization without debug information, and a debuggable Swift compiler: [~/src/s]$ ./swift/utils/build-script -R --debug-swift To build a debuggable Swift standard library: [~/src/s]$ ./swift/utils/build-script -R --debug-swift-stdlib iOS build targets are always configured and present, but are not built by default. To build the standard library for OS X, iOS simulator and iOS device: [~/src/s]$ ./swift/utils/build-script -R -i To run OS X and iOS tests that don't require a device: [~/src/s]$ ./swift/utils/build-script -R -i -t To use 'make' instead of 'ninja', use '-m': [~/src/s]$ ./swift/utils/build-script -m -R To create Xcode projects that can build Swift, use '-x': [~/src/s]$ ./swift/utils/build-script -x -R Preset mode in build-script --------------------------- All buildbots and automated environments use 'build-script' in *preset mode*. In preset mode, the command line only specifies the preset name and allows limited customization (extra output paths). The actual options come from the selected preset in 'utils/build-presets.ini'. For example, to build like the incremental buildbot, run: [~/src/s]$ ./swift/utils/build-script --preset=buildbot_incremental To build with AddressSanitizer: [~/src/s]$ ./swift/utils/build-script --preset=asan To build a root for Xcode XYZ, '/tmp/xcode-xyz-root.tar.gz': [~/src/s]$ ./swift/utils/build-script --preset=buildbot_BNI_internal_XYZ \\ install_destdir="/tmp/install" install_symroot="/tmp/symroot" installable_package="/tmp/xcode-xyz-root.tar.gz" If you have your own favorite set of options, you can create your own, local, preset. For example, let's create a preset called 'ds' (which stands for Debug Swift): $ cat > ~/.swift-build-presets [preset: ds] release debug-swift debug-swift-stdlib test build-subdir=ds To use it, specify the '--preset=' argument: [~/src/s]$ ./swift/utils/build-script --preset=ds ./swift/utils/build-script: using preset 'ds', which expands to ./swift/utils/build-script --release --debug-swift --debug-swift-stdlib \ --test --build-subdir=ds -- ... Existing presets can be found in `utils/build-presets.ini` Philosophy ---------- While you can invoke CMake directly to build Swift, this tool will save you time by taking away the mechanical parts of the process, providing you controls for the important options. For all automated build environments, this tool is regarded as *the* *only* way to build Swift. This is not a technical limitation of the Swift build system. It is a policy decision aimed at making the builds uniform across all environments and easily reproducible by engineers who are not familiar with the details of the setups of other systems or automated environments. """
[]
[]
[ "C_COMPILER_LAUNCHER", "CXX_COMPILER_LAUNCHER", "USE_DISTCC" ]
[]
["C_COMPILER_LAUNCHER", "CXX_COMPILER_LAUNCHER", "USE_DISTCC"]
python
3
0
testhelpers/testhelpers.go
package testhelpers import ( "archive/tar" "context" "fmt" "io" "io/ioutil" "log" "math/rand" "net" "net/http" "os" "os/exec" "path/filepath" "reflect" "regexp" "strings" "sync" "testing" "time" "github.com/dgodd/dockerdial" dockertypes "github.com/docker/docker/api/types" "github.com/google/go-cmp/cmp" "github.com/buildpack/pack/archive" "github.com/buildpack/pack/docker" ) func RandString(n int) string { b := make([]byte, n) for i := range b { b[i] = 'a' + byte(rand.Intn(26)) } return string(b) } // Assert deep equality (and provide useful difference as a test failure) func AssertEq(t *testing.T, actual, expected interface{}) { t.Helper() if diff := cmp.Diff(actual, expected); diff != "" { t.Fatal(diff) } } // Assert the simplistic pointer (or literal value) equality func AssertSameInstance(t *testing.T, actual, expected interface{}) { t.Helper() if actual != expected { t.Fatalf("Expected %s and %s to be pointers to the variable", actual, expected) } } func AssertError(t *testing.T, actual error, expected string) { t.Helper() if actual == nil { t.Fatalf("Expected an error but got nil") } if !strings.Contains(actual.Error(), expected) { t.Fatalf(`Expected error to contain "%s", got "%s"`, expected, actual.Error()) } } func AssertContains(t *testing.T, actual, expected string) { t.Helper() if !strings.Contains(actual, expected) { t.Fatalf("Expected: '%s' to contain '%s'", actual, expected) } } func AssertContainsMatch(t *testing.T, actual, exp string) { t.Helper() regex := regexp.MustCompile(exp) matches := regex.FindAll([]byte(actual), -1) if len(matches) < 1 { t.Fatalf("Expected: '%s' to match expression '%s'", actual, exp) } } func AssertNotContains(t *testing.T, actual, expected string) { t.Helper() if strings.Contains(actual, expected) { t.Fatalf("Expected: '%s' to not contain '%s'", actual, expected) } } func AssertSliceContains(t *testing.T, slice []string, value string) { t.Helper() for _, s := range slice { if value == s { return } } t.Fatalf("Expected: '%s' to contain element '%s'", slice, value) } func AssertMatch(t *testing.T, actual string, expected string) { t.Helper() if !regexp.MustCompile(expected).MatchString(actual) { t.Fatalf("Expected: '%s' to match regex '%s'", actual, expected) } } func AssertNil(t *testing.T, actual interface{}) { t.Helper() if !isNil(actual) { t.Fatalf("Expected nil: %s", actual) } } func AssertNotNil(t *testing.T, actual interface{}) { t.Helper() if isNil(actual) { t.Fatal("Expected not nil") } } func isNil(value interface{}) bool { return value == nil || (reflect.TypeOf(value).Kind() == reflect.Ptr && reflect.ValueOf(value).IsNil()) } func AssertNotEq(t *testing.T, actual, expected interface{}) { t.Helper() if diff := cmp.Diff(actual, expected); diff == "" { t.Fatalf("Expected values to differ: %s", actual) } } func AssertDirContainsFileWithContents(t *testing.T, dir string, file string, expected string) { t.Helper() path := filepath.Join(dir, file) bytes, err := ioutil.ReadFile(path) AssertNil(t, err) if string(bytes) != expected { t.Fatalf("file %s in dir %s has wrong contents: %s != %s", file, dir, string(bytes), expected) } } var dockerCliVal *docker.Client var dockerCliOnce sync.Once var dockerCliErr error func dockerCli(t *testing.T) *docker.Client { dockerCliOnce.Do(func() { dockerCliVal, dockerCliErr = docker.New() }) AssertNil(t, dockerCliErr) return dockerCliVal } func proxyDockerHostPort(dockerCli *docker.Client, port string) error { ln, err := net.Listen("tcp", ":"+port) if err != nil { return err } go func() { // TODO exit somehow. for { conn, err := ln.Accept() if err != nil { log.Println(err) continue } go func(conn net.Conn) { defer conn.Close() c, err := dockerdial.Dial("tcp", "localhost:"+port) if err != nil { log.Println(err) return } defer c.Close() go io.Copy(c, conn) io.Copy(conn, c) }(conn) } }() return nil } func Eventually(t *testing.T, test func() bool, every time.Duration, timeout time.Duration) { t.Helper() ticker := time.NewTicker(every) defer ticker.Stop() timer := time.NewTimer(timeout) defer timer.Stop() for { select { case <-ticker.C: if test() { return } case <-timer.C: t.Fatalf("timeout on eventually: %v", timeout) } } } func ConfigurePackHome(t *testing.T, packHome, registryPort string) { t.Helper() tag := PackTag() AssertNil(t, ioutil.WriteFile(filepath.Join(packHome, "config.toml"), []byte(fmt.Sprintf(` default-stack-id = "io.buildpacks.stacks.bionic" default-builder = "%s" [[stacks]] id = "io.buildpacks.stacks.bionic" build-image = "%s" run-images = ["%s"] [[run-images]] image = "packs/run:%s" mirrors = ["%s"] `, DefaultBuilderImage(t, registryPort), DefaultBuildImage(t, registryPort), DefaultRunImage(t, registryPort), tag, DefaultRunImage(t, registryPort))), 0666)) } func CreateImageOnLocal(t *testing.T, dockerCli *docker.Client, repoName, dockerFile string) { ctx := context.Background() buildContext, err := archive.CreateSingleFileTarReader("Dockerfile", dockerFile) AssertNil(t, err) res, err := dockerCli.ImageBuild(ctx, buildContext, dockertypes.ImageBuildOptions{ Tags: []string{repoName}, SuppressOutput: true, Remove: true, ForceRemove: true, }) AssertNil(t, err) io.Copy(ioutil.Discard, res.Body) res.Body.Close() } func CreateImageOnRemote(t *testing.T, dockerCli *docker.Client, registryConfig *TestRegistryConfig, repoName, dockerFile string) string { t.Helper() imageName := registryConfig.RepoName(repoName) defer DockerRmi(dockerCli, imageName) CreateImageOnLocal(t, dockerCli, imageName, dockerFile) AssertNil(t, PushImage(dockerCli, imageName, registryConfig)) return imageName } func DockerRmi(dockerCli *docker.Client, repoNames ...string) error { var err error ctx := context.Background() for _, name := range repoNames { _, e := dockerCli.ImageRemove( ctx, name, dockertypes.ImageRemoveOptions{Force: true, PruneChildren: true}, ) if e != nil && err == nil { err = e } } return err } func PushImage(dockerCli *docker.Client, ref string, registryConfig *TestRegistryConfig) error { rc, err := dockerCli.ImagePush(context.Background(), ref, dockertypes.ImagePushOptions{RegistryAuth: registryConfig.RegistryAuth()}) if err != nil { return err } if _, err := io.Copy(ioutil.Discard, rc); err != nil { return err } return rc.Close() } const DefaultTag = "rc" func PackTag() string { tag := os.Getenv("PACK_TAG") if tag == "" { return DefaultTag } return tag } func HttpGet(t *testing.T, url string) string { t.Helper() txt, err := HttpGetE(url, map[string]string{}) AssertNil(t, err) return txt } func HttpGetE(url string, headers map[string]string) (string, error) { var client *http.Client if os.Getenv("DOCKER_HOST") == "" { client = http.DefaultClient } else { tr := &http.Transport{Dial: dockerdial.Dial} client = &http.Client{Transport: tr} } request, err := http.NewRequest("GET", url, nil) if err != nil { return "", err } for key, val := range headers { request.Header.Set(key, val) } resp, err := client.Do(request) if err != nil { return "", err } defer resp.Body.Close() if resp.StatusCode >= 300 { return "", fmt.Errorf("HTTP Status was bad: %s => %d", url, resp.StatusCode) } b, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err } return string(b), nil } func ImageID(t *testing.T, repoName string) string { t.Helper() inspect, _, err := dockerCli(t).ImageInspectWithRaw(context.Background(), repoName) AssertNil(t, err) return inspect.ID } func Run(t *testing.T, cmd *exec.Cmd) string { t.Helper() txt, err := RunE(cmd) AssertNil(t, err) return txt } func CleanDefaultImages(t *testing.T, registryPort string) { t.Helper() AssertNil(t, DockerRmi( dockerCli(t), DefaultRunImage(t, registryPort), DefaultBuildImage(t, registryPort), DefaultBuilderImage(t, registryPort), ), ) } func RunE(cmd *exec.Cmd) (string, error) { output, err := cmd.CombinedOutput() if err != nil { return string(output), fmt.Errorf("Failed to execute command: %v, %s, %s", cmd.Args, err, output) } return string(output), nil } func TryPullImage(dockerCli *docker.Client, ref string) error { return PullImageWithAuth(dockerCli, ref, "") } func PullImageWithAuth(dockerCli *docker.Client, ref, registryAuth string) error { rc, err := dockerCli.ImagePull(context.Background(), ref, dockertypes.ImagePullOptions{RegistryAuth: registryAuth}) if err != nil { return nil } if _, err := io.Copy(ioutil.Discard, rc); err != nil { return err } return rc.Close() } func RecursiveCopy(t *testing.T, src, dst string) { t.Helper() fis, err := ioutil.ReadDir(src) AssertNil(t, err) for _, fi := range fis { if fi.Mode().IsRegular() { srcFile, err := os.Open(filepath.Join(src, fi.Name())) AssertNil(t, err) dstFile, err := os.OpenFile(filepath.Join(dst, fi.Name()), os.O_RDWR|os.O_CREATE|os.O_TRUNC, fi.Mode()) AssertNil(t, err) _, err = io.Copy(dstFile, srcFile) AssertNil(t, err) modifiedtime := time.Time{} err = os.Chtimes(filepath.Join(dst, fi.Name()), modifiedtime, modifiedtime) AssertNil(t, err) } if fi.IsDir() { err = os.Mkdir(filepath.Join(dst, fi.Name()), fi.Mode()) AssertNil(t, err) RecursiveCopy(t, filepath.Join(src, fi.Name()), filepath.Join(dst, fi.Name())) } } modifiedtime := time.Time{} err = os.Chtimes(dst, modifiedtime, modifiedtime) AssertNil(t, err) err = os.Chmod(dst, 0775) AssertNil(t, err) } func UntarSingleFile(r io.Reader, fileName string) ([]byte, error) { tr := tar.NewReader(r) for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive return []byte{}, fmt.Errorf("file '%s' does not exist in tar", fileName) } if err != nil { return []byte{}, err } switch hdr.Typeflag { case tar.TypeReg, tar.TypeRegA: if hdr.Name == fileName { return ioutil.ReadAll(tr) } } } } func RequireDocker(t *testing.T) { _, ok := os.LookupEnv("NO_DOCKER") if ok { t.Skip("Skipping because docker daemon unavailable") } }
[ "\"PACK_TAG\"", "\"DOCKER_HOST\"" ]
[]
[ "DOCKER_HOST", "PACK_TAG" ]
[]
["DOCKER_HOST", "PACK_TAG"]
go
2
0
python/backup/pycovid.py
#!/usr/bin/python # -*- coding: utf-8 -*- """ This script """ __version__ = '0.0.1' __author__ = 'Márcio Garcia' __email__ = '[email protected]' __license__ = 'GPL' __status__ = 'Development' __date__ = '07/19/2020' if __name__ == '__main__': import os import sys import calendar import pandas as pd import pycovidfunc as cv from datetime import datetime, timedelta from time import ctime, sleep git_dir = r"C:\Program Files\Git\cmd" git_bin = os.path.join(git_dir, "git") os.putenv("GIT_PYTHON_GIT_EXECUTABLE", git_bin) os.environ.putenv("GIT_PYTHON_GIT_EXECUTABLE", git_bin) # Making sure that it is first in PATH sys.path = [git_dir] + sys.path os.environ["PATH"] = os.pathsep.join([git_dir]) + os.pathsep + os.environ["PATH"] # Only import git now, because that's when the path is checked! import git # Read the config file to check for data file information: # dir_config = r"C:\Users\user\Documents\GitHub\COVID-19\python\notebooks" # file_config = r"C:\Users\user\Documents\GitHub\COVID-19\python\notebooks\config.csv" curr_dir = os.path.dirname(__file__) if 'config.csv' in os.listdir(curr_dir): config = pd.read_csv(os.path.join(curr_dir,'config.csv'),index_col='var').fillna('-') else: raise FileNotFoundError('No configuration file "config.csv" found.') who_data_dir = config.loc['who_data_dir'].path repo = git.Repo(config.loc['git_repo'].path) upstream_repo = repo.remotes.upstream # Pull upstream base repo and check for modified files: lm_frame = cv.get_date_modified(who_data_dir) g = git.Git(upstream_repo) g.pull('upstream','master') repo = git.Repo(config.loc['git_repo'].path) lm_after_git_pull = cv.get_date_modified(who_data_dir) count_modified = 0 for idx in lm_frame.index: new_last_modified = lm_after_git_pull.loc[idx].last_modified if lm_frame.loc[idx].last_modified != new_last_modified: count_modified += 1 who_file_list = os.listdir(who_data_dir) for file in who_file_list: if not file.endswith('.csv'): who_file_list.remove(file) # Compare the latest WHO file to the raw data update information # and calculates the number of files to update: report=[] report.append('\n----------PYCOVID.PY SCRIPT EXECUTION REPORT-----------\n') report.append('\n'+ 'Local time: ' + ctime() + '\n\n') flag = True # flag to indicate update new_db = config.loc['raw_data','update'] if count_modified != 0 or new_db: string = '{} existing file(s) updated since last pull\n'.format(count_modified) print(string) report.append(string) report.append('generating new database...\n') print('generating new database...\n') try: df = cv.raw_data_formatter(who_file_list,who_data_dir) new_date = pd.to_datetime(who_file_list[-1].split(sep='.')[0]) last_update = datetime.strftime(new_date,format='%m-%d-%Y') raw_data_path = config.loc['raw_data'].path province_data_path = config.loc['province_data'].path config.loc['raw_data','last_update'] = last_update df.to_csv(raw_data_path, index=False) # Creating the province report file: df = df[df['Province/State'] != '-'] df = df[df['Province/State'] != 'Recovered'] columns = ['Province/State','Country/Region','Date','Confirmed', 'Active','Recovered','Deaths'] df = df[columns].groupby(['Province/State','Country/Region', 'Date']).sum().reset_index() df.columns = ['Province', 'Country', 'Date', 'Confirmed', 'Active', 'Recovered', 'Deaths'] df.to_csv(province_data_path,index=False) config.loc['province_data','last_update'] = last_update config.to_csv('config.csv') report.append('new database generated succesfully!\n') print('new database created succesfully!') except: report.append('process aborted. No new database generated.\n') print('process aborted. No new database generated') else: last_update = pd.to_datetime(config.loc['raw_data'].last_update) latest_who_file_date = pd.to_datetime(who_file_list[-1].split(sep='.')[0]) files_to_update = (latest_who_file_date - last_update).days # Generating the list of new files to update the database if files_to_update != 0: list_of_new_files = [] for i in list(range(1,files_to_update + 1)): new_date = datetime.strftime((last_update + timedelta(days=i)).date(), format='%m-%d-%Y') list_of_new_files.append(new_date + '.csv') # Generating a dataframe with new information: df = cv.raw_data_formatter(list_of_new_files,who_data_dir) # Appending the new data to existing raw data file and updating # the raw data information in the config file: raw_data_path = config.loc['raw_data'].path province_data_path = config.loc['province_data'].path config.loc['raw_data','last_update'] = new_date df.to_csv(raw_data_path, mode='a', index=False, header=None) # Creating the province report file: df = df[df['Province/State'] != '-'] df = df[df['Province/State'] != 'Recovered'] columns = ['Province/State','Country/Region','Date','Confirmed', 'Active','Recovered','Deaths'] df = df[columns].groupby(['Province/State','Country/Region', 'Date']).sum().reset_index() df.columns = ['Province', 'Country', 'Date', 'Confirmed', 'Active', 'Recovered', 'Deaths'] df.to_csv(province_data_path, mode='a', index=False, header=None) config.loc['province_data','last_update'] = new_date config.to_csv('config.csv') A_str = 'No existing files were updated\n' B_str = '%d new file(s) found. All files appended into the raw data file\n' % (files_to_update) report.append(A_str) report.append(B_str) print(A_str) print(B_str) else: flag = False A_str = 'No existing files were updated\n' B_str = '0 new files found. No further action necessary\n' print(A_str) print(B_str) report.append(A_str) report.append(B_str) if flag: string = 'Creating world data file...\n' report.append(string) print(string) try: df = pd.read_csv(config.loc['raw_data'].path) country_report = cv.world_data_formatter(df) country_report.to_json(config.loc['formatted_data'].path,orient='records') report.append('World data report created succesfully!\n') print('World data report created succesfully!\n') new_date = pd.to_datetime(who_file_list[-1].split(sep='.')[0]) last_update = datetime.strftime(new_date,format='%m-%d-%Y') config.loc['formatted_data','last_update'] = last_update config.to_csv('config.csv') # Commit changes to github: report.append('-----------\n') report.append('list of diff on github repository:\n') report.append(repo.git.diff(None, name_only=True)+'\n') report.append('commit to github repository\n') report = cv.commit_to_repo(repo,log=report) report = cv.repo_info(repo,log=report) except: report.append('World data report creation aborted. Please verify the raw data file.\n') # Create the execution report in the directory of the log files: actual_month = calendar.month_name[datetime.now().month] year_dir = os.path.join(curr_dir+'\log',str(datetime.now().year)) year_dir_exists = os.path.exists(year_dir) month_dir = os.path.join(year_dir,actual_month) month_dir_exists = os.path.exists(month_dir) if not year_dir_exists: os.mkdir(year_dir) if not month_dir_exists: os.mkdir(month_dir) os.chdir(month_dir) log_name = datetime.now().strftime(format='%Y-%m-%d') + '.txt' log = open(log_name,'+a') log.writelines(report) log.close() sleep(3)
[]
[]
[ "PATH" ]
[]
["PATH"]
python
1
0
docs/conf.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals import os extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.coverage', 'sphinx.ext.doctest', 'sphinx.ext.extlinks', 'sphinx.ext.ifconfig', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx.ext.viewcode', ] if os.getenv('SPELLCHECK'): extensions += 'sphinxcontrib.spelling', spelling_show_suggestions = True spelling_lang = 'en_US' source_suffix = '.rst' master_doc = 'index' project = 'make country codes' year = '2019' author = 'Evan Wheeler' copyright = '{0}, {1}'.format(year, author) version = release = '0.0.0' pygments_style = 'trac' templates_path = ['.'] extlinks = { 'issue': ('https://github.com/ewheeler/make-country-codes/issues/%s', '#'), 'pr': ('https://github.com/ewheeler/make-country-codes/pull/%s', 'PR #'), } # on_rtd is whether we are on readthedocs.org on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: # only set the theme if we're building docs locally html_theme = 'sphinx_rtd_theme' html_use_smartypants = True html_last_updated_fmt = '%b %d, %Y' html_split_index = False html_sidebars = { '**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'], } html_short_title = '%s-%s' % (project, version) napoleon_use_ivar = True napoleon_use_rtype = False napoleon_use_param = False
[]
[]
[ "SPELLCHECK", "READTHEDOCS" ]
[]
["SPELLCHECK", "READTHEDOCS"]
python
2
0
pkg/plugin/v2/api.go
/* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v2 import ( "bufio" "errors" "fmt" "io/ioutil" "os" "path/filepath" "strings" "github.com/spf13/pflag" "sigs.k8s.io/kubebuilder/v2/pkg/model" "sigs.k8s.io/kubebuilder/v2/pkg/model/config" "sigs.k8s.io/kubebuilder/v2/pkg/model/resource" "sigs.k8s.io/kubebuilder/v2/pkg/plugin" "sigs.k8s.io/kubebuilder/v2/pkg/plugin/internal/cmdutil" "sigs.k8s.io/kubebuilder/v2/pkg/plugin/internal/util" "sigs.k8s.io/kubebuilder/v2/pkg/plugin/v2/scaffolds" "sigs.k8s.io/kubebuilder/v2/plugins/addon" ) type createAPISubcommand struct { config *config.Config // pattern indicates that we should use a plugin to build according to a pattern pattern string resource *resource.Options // Check if we have to scaffold resource and/or controller resourceFlag *pflag.Flag controllerFlag *pflag.Flag doResource bool doController bool // force indicates that the resource should be created even if it already exists force bool // runMake indicates whether to run make or not after scaffolding APIs runMake bool } var ( _ plugin.CreateAPISubcommand = &createAPISubcommand{} _ cmdutil.RunOptions = &createAPISubcommand{} ) func (p createAPISubcommand) UpdateContext(ctx *plugin.Context) { ctx.Description = `Scaffold a Kubernetes API by creating a Resource definition and / or a Controller. create resource will prompt the user for if it should scaffold the Resource and / or Controller. To only scaffold a Controller for an existing Resource, select "n" for Resource. To only define the schema for a Resource without writing a Controller, select "n" for Controller. After the scaffold is written, api will run make on the project. ` ctx.Examples = fmt.Sprintf(` # Create a frigates API with Group: ship, Version: v1beta1 and Kind: Frigate %s create api --group ship --version v1beta1 --kind Frigate # Edit the API Scheme nano api/v1beta1/frigate_types.go # Edit the Controller nano controllers/frigate/frigate_controller.go # Edit the Controller Test nano controllers/frigate/frigate_controller_test.go # Install CRDs into the Kubernetes cluster using kubectl apply make install # Regenerate code and run against the Kubernetes cluster configured by ~/.kube/config make run `, ctx.CommandName) } func (p *createAPISubcommand) BindFlags(fs *pflag.FlagSet) { fs.BoolVar(&p.runMake, "make", true, "if true, run make after generating files") fs.BoolVar(&p.doResource, "resource", true, "if set, generate the resource without prompting the user") p.resourceFlag = fs.Lookup("resource") fs.BoolVar(&p.doController, "controller", true, "if set, generate the controller without prompting the user") p.controllerFlag = fs.Lookup("controller") if os.Getenv("KUBEBUILDER_ENABLE_PLUGINS") != "" { fs.StringVar(&p.pattern, "pattern", "", "generates an API following an extension pattern (addon)") } fs.BoolVar(&p.force, "force", false, "attempt to create resource even if it already exists") p.resource = &resource.Options{} fs.StringVar(&p.resource.Kind, "kind", "", "resource Kind") fs.StringVar(&p.resource.Group, "group", "", "resource Group") fs.StringVar(&p.resource.Version, "version", "", "resource Version") fs.BoolVar(&p.resource.Namespaced, "namespaced", true, "resource is namespaced") } func (p *createAPISubcommand) InjectConfig(c *config.Config) { p.config = c } func (p *createAPISubcommand) Run() error { return cmdutil.Run(p) } func (p *createAPISubcommand) Validate() error { if err := p.resource.ValidateV2(); err != nil { return err } reader := bufio.NewReader(os.Stdin) if !p.resourceFlag.Changed { fmt.Println("Create Resource [y/n]") p.doResource = util.YesNo(reader) } if !p.controllerFlag.Changed { fmt.Println("Create Controller [y/n]") p.doController = util.YesNo(reader) } // In case we want to scaffold a resource API we need to do some checks if p.doResource { // Check that resource doesn't exist or flag force was set if !p.force && p.config.HasResource(p.resource.GVK()) { return errors.New("API resource already exists") } // Check that the provided group can be added to the project if !p.config.MultiGroup && len(p.config.Resources) != 0 && !p.config.HasGroup(p.resource.Group) { return fmt.Errorf("multiple groups are not allowed by default, to enable multi-group visit %s", "kubebuilder.io/migration/multi-group.html") } } return nil } func (p *createAPISubcommand) GetScaffolder() (cmdutil.Scaffolder, error) { // Load the boilerplate bp, err := ioutil.ReadFile(filepath.Join("hack", "boilerplate.go.txt")) // nolint:gosec if err != nil { return nil, fmt.Errorf("unable to load boilerplate: %v", err) } // Load the requested plugins plugins := make([]model.Plugin, 0) switch strings.ToLower(p.pattern) { case "": // Default pattern case "addon": plugins = append(plugins, &addon.Plugin{}) default: return nil, fmt.Errorf("unknown pattern %q", p.pattern) } // Create the actual resource from the resource options res := p.resource.NewResource(p.config, p.doResource) return scaffolds.NewAPIScaffolder(p.config, string(bp), res, p.doResource, p.doController, plugins), nil } func (p *createAPISubcommand) PostScaffold() error { // Load the requested plugins switch strings.ToLower(p.pattern) { case "": // Default pattern case "addon": // Ensure that we are pinning sigs.k8s.io/kubebuilder-declarative-pattern version err := util.RunCmd("Get controller runtime", "go", "get", "sigs.k8s.io/kubebuilder-declarative-pattern@"+scaffolds.KbDeclarativePattern) if err != nil { return err } default: return fmt.Errorf("unknown pattern %q", p.pattern) } if p.runMake { return util.RunCmd("Running make", "make") } return nil }
[ "\"KUBEBUILDER_ENABLE_PLUGINS\"" ]
[]
[ "KUBEBUILDER_ENABLE_PLUGINS" ]
[]
["KUBEBUILDER_ENABLE_PLUGINS"]
go
1
0
utils/fs/expanduser_test.go
package fs import ( "os" "testing" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" ) func TestExpandUserNothingToExpand(t *testing.T) { expected := "does/not/expand" path, err := ExpandUser(expected) assert.NilError(t, err) assert.Equal(t, expected, path) } func TestExpandUserJustTilde(t *testing.T) { path, err := ExpandUser("~") assert.NilError(t, err) assert.Equal(t, os.Getenv("HOME"), path) } func TestExpandUserCurrentUser(t *testing.T) { path, err := ExpandUser("~/rest/of/path") expected := os.Getenv("HOME") + "/rest/of/path" assert.NilError(t, err) assert.Equal(t, expected, path) } func TestExpandUserOtherUser(t *testing.T) { _, err := ExpandUser("~otheruser/rest/of/path") assert.Check(t, is.Error(err, "expanding ~user/ paths are not supported yet")) }
[ "\"HOME\"", "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
vendor/github.com/sendgrid/sendgrid-go/examples/user/user.go
package main import ( "fmt" "github.com/sendgrid/sendgrid-go" "log" "os" ) /////////////////////////////////////////////////// // Get a user's account information. // GET /user/account func Getausersaccountinformation() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/account", host) request.Method = "GET" response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Retrieve your credit balance // GET /user/credits func Retrieveyourcreditbalance() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/credits", host) request.Method = "GET" response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Update your account email address // PUT /user/email func Updateyouraccountemailaddress() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/email", host) request.Method = "PUT" request.Body = []byte(` { "email": "[email protected]" }`) response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Retrieve your account email address // GET /user/email func Retrieveyouraccountemailaddress() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/email", host) request.Method = "GET" response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Update your password // PUT /user/password func Updateyourpassword() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/password", host) request.Method = "PUT" request.Body = []byte(` { "new_password": "new_password", "old_password": "old_password" }`) response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Update a user's profile // PATCH /user/profile func Updateausersprofile() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/profile", host) request.Method = "PATCH" request.Body = []byte(` { "city": "Orange", "first_name": "Example", "last_name": "User" }`) response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Get a user's profile // GET /user/profile func Getausersprofile() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/profile", host) request.Method = "GET" response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Cancel or pause a scheduled send // POST /user/scheduled_sends func Cancelorpauseascheduledsend() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/scheduled_sends", host) request.Method = "POST" request.Body = []byte(` { "batch_id": "YOUR_BATCH_ID", "status": "pause" }`) response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Retrieve all scheduled sends // GET /user/scheduled_sends func Retrieveallscheduledsends() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/scheduled_sends", host) request.Method = "GET" response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Update user scheduled send information // PATCH /user/scheduled_sends/{batch_id} func Updateuserscheduledsendinformation() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/scheduled_sends/{batch_id}", host) request.Method = "PATCH" request.Body = []byte(` { "status": "pause" }`) response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Retrieve scheduled send // GET /user/scheduled_sends/{batch_id} func Retrievescheduledsend() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/scheduled_sends/{batch_id}", host) request.Method = "GET" response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Delete a cancellation or pause of a scheduled send // DELETE /user/scheduled_sends/{batch_id} func Deleteacancellationorpauseofascheduledsend() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/scheduled_sends/{batch_id}", host) request.Method = "DELETE" response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Update Enforced TLS settings // PATCH /user/settings/enforced_tls func UpdateEnforcedTLSsettings() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/settings/enforced_tls", host) request.Method = "PATCH" request.Body = []byte(` { "require_tls": true, "require_valid_cert": false }`) response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Retrieve current Enforced TLS settings. // GET /user/settings/enforced_tls func RetrievecurrentEnforcedTLSsettings() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/settings/enforced_tls", host) request.Method = "GET" response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Update your username // PUT /user/username func Updateyourusername() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/username", host) request.Method = "PUT" request.Body = []byte(` { "username": "test_username" }`) response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Retrieve your username // GET /user/username func Retrieveyourusername() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/username", host) request.Method = "GET" response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Update Event Notification Settings // PATCH /user/webhooks/event/settings func UpdateEventNotificationSettings() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/event/settings", host) request.Method = "PATCH" request.Body = []byte(` { "bounce": true, "click": true, "deferred": true, "delivered": true, "dropped": true, "enabled": true, "group_resubscribe": true, "group_unsubscribe": true, "open": true, "processed": true, "spam_report": true, "unsubscribe": true, "url": "url" }`) response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Retrieve Event Webhook settings // GET /user/webhooks/event/settings func RetrieveEventWebhooksettings() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/event/settings", host) request.Method = "GET" response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Test Event Notification Settings // POST /user/webhooks/event/test func TestEventNotificationSettings() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/event/test", host) request.Method = "POST" request.Body = []byte(` { "url": "url" }`) response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Create a parse setting // POST /user/webhooks/parse/settings func Createaparsesetting() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/parse/settings", host) request.Method = "POST" request.Body = []byte(` { "hostname": "myhostname.com", "send_raw": false, "spam_check": true, "url": "http://email.myhosthame.com" }`) response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Retrieve all parse settings // GET /user/webhooks/parse/settings func Retrieveallparsesettings() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/parse/settings", host) request.Method = "GET" response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Update a parse setting // PATCH /user/webhooks/parse/settings/{hostname} func Updateaparsesetting() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/parse/settings/{hostname}", host) request.Method = "PATCH" request.Body = []byte(` { "send_raw": true, "spam_check": false, "url": "http://newdomain.com/parse" }`) response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Retrieve a specific parse setting // GET /user/webhooks/parse/settings/{hostname} func Retrieveaspecificparsesetting() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/parse/settings/{hostname}", host) request.Method = "GET" response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Delete a parse setting // DELETE /user/webhooks/parse/settings/{hostname} func Deleteaparsesetting() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/parse/settings/{hostname}", host) request.Method = "DELETE" response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } /////////////////////////////////////////////////// // Retrieves Inbound Parse Webhook statistics. // GET /user/webhooks/parse/stats func RetrievesInboundParseWebhookstatistics() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/parse/stats", host) request.Method = "GET" queryParams := make(map[string]string) queryParams["aggregated_by"] = "day" queryParams["limit"] = "test_string" queryParams["start_date"] = "2016-01-01" queryParams["end_date"] = "2016-04-01" queryParams["offset"] = "test_string" request.QueryParams = queryParams response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } func main() { // add your function calls here }
[ "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"", "\"YOUR_SENDGRID_APIKEY\"" ]
[]
[ "YOUR_SENDGRID_APIKEY" ]
[]
["YOUR_SENDGRID_APIKEY"]
go
1
0
Migratory Birds.py
''' Problem Statement: https://www.hackerrank.com/challenges/migratory-birds/problem @Coded by TSG, 2020 ''' import math import os import random import re import sys # Complete the migratoryBirds function below. def migratoryBirds(arr): bird_freq = [0, 0, 0, 0, 0, 0] for i in range(len(arr)): bird_freq[arr[i]] += 1 return bird_freq.index(max(bird_freq)) if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') arr_count = int(input().strip()) arr = list(map(int, input().rstrip().split())) result = migratoryBirds(arr) fptr.write(str(result) + '\n') fptr.close()
[]
[]
[ "OUTPUT_PATH" ]
[]
["OUTPUT_PATH"]
python
1
0
hubblestack/extmods/returners/sqlite.py
# -*- coding: utf-8 -*- """ Return hubble data to sqlite (intended for testing) """ from __future__ import absolute_import import json import logging import os import salt.returners try: import sqlite3 GOT_SQLI = True except Exception: GOT_SQLI = False __virtualname__ = 'sqlite' log = logging.getLogger(__virtualname__) _CONN = None def __virtual__(): """ Return virtual name of the module. :return: The virtual name of the module. """ if GOT_SQLI: return __virtualname__ return False, "sqlite3 module is missing" def _get_options(ret=None): """ Get the sqlite dumpster options from configs :return: options """ defaults = {'dumpster': '/var/log/hubblestack/returns.sqlite'} attrs = {'dumpster': 'dumpster'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__, defaults=defaults) log.debug("_options: %s", _options) return _options def _get_conn(): """ Establish a connection (if not connected) and return it :return: connection """ _options = _get_options() global _CONN if not _CONN: _p = _options.get('dumpster', 'hubble-returns.db') _d = os.path.dirname(_p) if _d and not os.path.isdir(_d): log.debug('creating directory %s', _d) os.makedirs(_d, 0o755) log.debug('connecting to database in %s', _p) _CONN = sqlite3.connect(_options.get('dumpster', 'hubble-returns.db')) log.debug('creating ret table') _CONN.execute('''create table if not exists ret( jid text, id text, fun text, fun_args json, ret json)''') return _CONN """ ##### just for reference ## {u'fun': u'hubble.audit', ## u'fun_args': [u'cve.scan-v2'], ## u'id': u'hostname.here', ## u'jid': u'20180117091736565184', ## u'return': {u'Compliance': u'0%', ## u'Failure': [{u'ruby2.3-2.3.3-2~16.04.5': u'Ruby vulnerabilities'}, ## {u'libjavascriptcoregtk-4.0-18-2.18.5-0ubuntu0.16.04.1': u'WebKitGTK+ vulnerabilities'}, ## {u'libwebkit2gtk-4.0-37-2.18.5-0ubuntu0.16.04.1': u'WebKitGTK+ vulnerabilities'}, ## {u'libruby2.3-2.3.3-2~16.04.5': u'Ruby vulnerabilities'}, ## {u'linux-image-generic-4.4.0.109.114': u'Linux kernel regression'}, ## {u'libgdk-pixbuf2.0-0-2.32.2-1ubuntu1.4': u'GDK-PixBuf vulnerabilities'}], ## u'Success': []}} """ def _put(ret): """ Add item to sqlite """ conn = _get_conn() # identify lists of events list_of_events = False if isinstance(ret, (list, tuple)): num_events = sum([0 if isinstance(i, dict) else 1 for i in ret]) if num_events == 0: list_of_events = True if list_of_events: for item in ret: _put(item) return for item in ret: if isinstance(ret[item], (list, tuple, dict)): ret[item] = json.dumps(ret[item]) # try to get sqlite queries to show not-ints for jids ret['jid'] = str(ret.get('jid', '??')) log.info("logging jid=%s in sqlite dumpster", ret['jid']) for i in ('id', 'fun', 'fun_args', 'return', 'Failure', 'Success'): if i not in ret: ret[i] = None # conn.execute("insert into ret values(:jid,:id,:fun,json(:fun_args),json(:return))", x) # json() isn't added until later than sqlite-3.7.17 (the centos7 version)... conn.execute("insert into ret values(:jid,:id,:fun,:fun_args,:return)", ret) conn.commit() def returner(ret): """ The main returner function that sends ret data to sqlite """ _put(ret)
[]
[]
[]
[]
[]
python
null
null
null
influxql/engine.go
package influxql import ( "encoding/binary" "fmt" "hash/fnv" "math" "sort" "strings" "time" ) // DB represents an interface for creating transactions. type DB interface { Begin() (Tx, error) } // Tx represents a transaction. // The Tx must be opened before being used. type Tx interface { // Opens and closes the transaction. Open() error Close() error // SetNow sets the current time to be used throughout the transaction. SetNow(time.Time) // Creates a list of iterators for a simple select statement. // // The statement must adhere to the following rules: // 1. It can only have a single VarRef field. // 2. It can only have a single source measurement. CreateIterators(*SelectStatement) ([]Iterator, error) } // Iterator represents a forward-only iterator over a set of points. type Iterator interface { // Tags returns the encoded dimensional tag values. Tags() string // Next returns the next value from the iterator. Next() (key int64, value interface{}) } // Planner represents an object for creating execution plans. type Planner struct { DB DB // Returns the current time. Defaults to time.Now(). Now func() time.Time } // NewPlanner returns a new instance of Planner. func NewPlanner(db DB) *Planner { return &Planner{ DB: db, Now: time.Now, } } func (p *Planner) Plan(stmt *SelectStatement) (*Executor, error) { now := p.Now() // Clone the statement to be planned. // Replace instances of "now()" with the current time. stmt = stmt.Clone() stmt.Condition = Reduce(stmt.Condition, &nowValuer{Now: now}) // Begin an unopened transaction. tx, err := p.DB.Begin() if err != nil { return nil, err } // Create the executor. e := newExecutor(tx, stmt) // Determine group by tag keys. interval, tags, err := stmt.Dimensions.Normalize() if err != nil { return nil, err } e.interval = interval e.tags = tags // Generate a processor for each field. e.processors = make([]Processor, len(stmt.Fields)) for i, f := range stmt.Fields { p, err := p.planField(e, f) if err != nil { return nil, err } e.processors[i] = p } return e, nil } func (p *Planner) planField(e *Executor, f *Field) (Processor, error) { return p.planExpr(e, f.Expr) } func (p *Planner) planExpr(e *Executor, expr Expr) (Processor, error) { switch expr := expr.(type) { case *VarRef: return p.planRawQuery(e, expr) case *Call: return p.planCall(e, expr) case *BinaryExpr: return p.planBinaryExpr(e, expr) case *ParenExpr: return p.planExpr(e, expr.Expr) case *NumberLiteral: return newLiteralProcessor(expr.Val), nil case *StringLiteral: return newLiteralProcessor(expr.Val), nil case *BooleanLiteral: return newLiteralProcessor(expr.Val), nil case *TimeLiteral: return newLiteralProcessor(expr.Val), nil case *DurationLiteral: return newLiteralProcessor(expr.Val), nil } panic("unreachable") } // planCall generates a processor for a function call. func (p *Planner) planRawQuery(e *Executor, v *VarRef) (Processor, error) { // Convert the statement to a simplified substatement for the single field. stmt, err := e.stmt.Substatement(v) if err != nil { return nil, err } // Retrieve a list of iterators for the substatement. itrs, err := e.tx.CreateIterators(stmt) if err != nil { return nil, err } // Create mapper and reducer. mappers := make([]*Mapper, len(itrs)) for i, itr := range itrs { mappers[i] = NewMapper(MapRawQuery, itr, e.interval) } r := NewReducer(ReduceRawQuery, mappers) r.name = lastIdent(stmt.Source.(*Measurement).Name) return r, nil } // planCall generates a processor for a function call. func (p *Planner) planCall(e *Executor, c *Call) (Processor, error) { // Ensure there is a single argument. if c.Name == "percentile" { if len(c.Args) != 2 { return nil, fmt.Errorf("expected two arguments for percentile()") } } else if len(c.Args) != 1 { return nil, fmt.Errorf("expected one argument for %s()", c.Name) } // Ensure the argument is a variable reference. ref, ok := c.Args[0].(*VarRef) if !ok { return nil, fmt.Errorf("expected field argument in %s()", c.Name) } // Convert the statement to a simplified substatement for the single field. stmt, err := e.stmt.Substatement(ref) if err != nil { return nil, err } // Retrieve a list of iterators for the substatement. itrs, err := e.tx.CreateIterators(stmt) if err != nil { return nil, err } // Retrieve map & reduce functions by name. var mapFn MapFunc var reduceFn ReduceFunc switch strings.ToLower(c.Name) { case "count": mapFn, reduceFn = MapCount, ReduceSum case "sum": mapFn, reduceFn = MapSum, ReduceSum case "mean": mapFn, reduceFn = MapMean, ReduceMean case "percentile": lit, ok := c.Args[1].(*NumberLiteral) if !ok { return nil, fmt.Errorf("expected float argument in percentile()") } mapFn, reduceFn = MapEcho, ReducePercentile(lit.Val) default: return nil, fmt.Errorf("function not found: %q", c.Name) } // Create mapper and reducer. mappers := make([]*Mapper, len(itrs)) for i, itr := range itrs { mappers[i] = NewMapper(mapFn, itr, e.interval) } r := NewReducer(reduceFn, mappers) r.name = lastIdent(stmt.Source.(*Measurement).Name) return r, nil } // planBinaryExpr generates a processor for a binary expression. // A binary expression represents a join operator between two processors. func (p *Planner) planBinaryExpr(e *Executor, expr *BinaryExpr) (Processor, error) { // Create processor for LHS. lhs, err := p.planExpr(e, expr.LHS) if err != nil { return nil, fmt.Errorf("lhs: %s", err) } // Create processor for RHS. rhs, err := p.planExpr(e, expr.RHS) if err != nil { return nil, fmt.Errorf("rhs: %s", err) } // Combine processors. return newBinaryExprEvaluator(e, expr.Op, lhs, rhs), nil } // Executor represents the implementation of Executor. // It executes all reducers and combines their result into a row. type Executor struct { tx Tx // transaction stmt *SelectStatement // original statement processors []Processor // per-field processors interval time.Duration // group by interval tags []string // dimensional tag keys } // newExecutor returns an executor associated with a transaction and statement. func newExecutor(tx Tx, stmt *SelectStatement) *Executor { return &Executor{ tx: tx, stmt: stmt, } } // Execute begins execution of the query and returns a channel to receive rows. func (e *Executor) Execute() (<-chan *Row, error) { // Open transaction. if err := e.tx.Open(); err != nil { return nil, err } // Initialize processors. for _, p := range e.processors { p.Process() } // Create output channel and stream data in a separate goroutine. out := make(chan *Row, 0) go e.execute(out) return out, nil } // execute runs in a separate separate goroutine and streams data from processors. func (e *Executor) execute(out chan *Row) { // Ensure the transaction closes after execution. defer e.tx.Close() // TODO: Support multi-value rows. // Initialize map of rows by encoded tagset. rows := make(map[string]*Row) // Combine values from each processor. loop: for { // Retrieve values from processors and write them to the approprite // row based on their tagset. for i, p := range e.processors { // Retrieve data from the processor. m, ok := <-p.C() if !ok { break loop } // Set values on returned row. for k, v := range m { // Lookup row values and populate data. values := e.createRowValuesIfNotExists(rows, e.processors[0].Name(), k.Timestamp, k.Values) values[i+1] = v } } } // Normalize rows and values. // This converts the timestamps from nanoseconds to microseconds. a := make(Rows, 0, len(rows)) for _, row := range rows { for _, values := range row.Values { values[0] = values[0].(int64) / int64(time.Microsecond) } a = append(a, row) } sort.Sort(a) // Send rows to the channel. for _, row := range a { out <- row } // Mark the end of the output channel. close(out) } // creates a new value set if one does not already exist for a given tagset + timestamp. func (e *Executor) createRowValuesIfNotExists(rows map[string]*Row, name string, timestamp int64, tagset string) []interface{} { // TODO: Add "name" to lookup key. // Find row by tagset. var row *Row if row = rows[tagset]; row == nil { row = &Row{Name: name} // Create tag map. row.Tags = make(map[string]string) for i, v := range UnmarshalStrings([]byte(tagset)) { row.Tags[e.tags[i]] = v } // Create column names. row.Columns = make([]string, 1, len(e.stmt.Fields)+1) row.Columns[0] = "time" for i, f := range e.stmt.Fields { name := f.Name() if name == "" { name = fmt.Sprintf("col%d", i) } row.Columns = append(row.Columns, name) } // Save to lookup. rows[tagset] = row } // If no values exist or last value doesn't match the timestamp then create new. if len(row.Values) == 0 || row.Values[len(row.Values)-1][0] != timestamp { values := make([]interface{}, len(e.processors)+1) values[0] = timestamp row.Values = append(row.Values, values) } return row.Values[len(row.Values)-1] } // Mapper represents an object for processing iterators. type Mapper struct { fn MapFunc // map function itr Iterator // iterators interval int64 // grouping interval } // NewMapper returns a new instance of Mapper with a given function and interval. func NewMapper(fn MapFunc, itr Iterator, interval time.Duration) *Mapper { return &Mapper{ fn: fn, itr: itr, interval: interval.Nanoseconds(), } } // Map executes the mapper's function against the iterator. // Returns a nil emitter if no data was found. func (m *Mapper) Map() *Emitter { e := NewEmitter(1) go m.run(e) return e } func (m *Mapper) run(e *Emitter) { // Close emitter when we're done. defer func() { _ = e.Close() }() // Wrap iterator with buffer. bufItr := &bufIterator{itr: m.itr} // Determine the start time. var tmin int64 if m.interval > 0 { // Align start time to interval. tmin, _ = bufItr.Peek() tmin -= (tmin % m.interval) } for { // Set the upper bound of the interval. if m.interval > 0 { bufItr.tmax = tmin + m.interval - 1 } // Execute the map function. m.fn(bufItr, e, tmin) // Exit if there was only one interval or no more data is available. if bufItr.EOF() { break } // Move the interval forward. tmin += m.interval } } // bufIterator represents a buffer iterator. type bufIterator struct { itr Iterator // underlying iterator tmax int64 // maximum key buf struct { key int64 value interface{} } buffered bool } // Tags returns the encoded dimensional values for the iterator. func (i *bufIterator) Tags() string { return i.itr.Tags() } // Next returns the next key/value pair from the iterator. func (i *bufIterator) Next() (key int64, value interface{}) { // Read the key/value pair off the buffer or underlying iterator. if i.buffered { i.buffered = false } else { i.buf.key, i.buf.value = i.itr.Next() } key, value = i.buf.key, i.buf.value // If key is greater than tmax then put it back on the buffer. if i.tmax != 0 && key > i.tmax { i.buffered = true return 0, nil } return key, value } // Peek returns the next key/value pair but does not move the iterator forward. func (i *bufIterator) Peek() (key int64, value interface{}) { key, value = i.Next() i.buffered = true return } // EOF returns true if there is no more data in the underlying iterator. func (i *bufIterator) EOF() bool { i.Peek(); return i.buf.key == 0 } // MapFunc represents a function used for mapping iterators. type MapFunc func(Iterator, *Emitter, int64) // MapCount computes the number of values in an iterator. func MapCount(itr Iterator, e *Emitter, tmin int64) { n := 0 for k, _ := itr.Next(); k != 0; k, _ = itr.Next() { n++ } e.Emit(Key{tmin, itr.Tags()}, float64(n)) } // MapSum computes the summation of values in an iterator. func MapSum(itr Iterator, e *Emitter, tmin int64) { n := float64(0) for k, v := itr.Next(); k != 0; k, v = itr.Next() { n += v.(float64) } e.Emit(Key{tmin, itr.Tags()}, n) } // Processor represents an object for joining reducer output. type Processor interface { Process() Name() string C() <-chan map[Key]interface{} } // Reducer represents an object for processing mapper output. // Implements processor. type Reducer struct { name string fn ReduceFunc // reduce function mappers []*Mapper // child mappers c <-chan map[Key]interface{} } // NewReducer returns a new instance of reducer. func NewReducer(fn ReduceFunc, mappers []*Mapper) *Reducer { return &Reducer{ fn: fn, mappers: mappers, } } // C returns the output channel. func (r *Reducer) C() <-chan map[Key]interface{} { return r.c } // Name returns the source name. func (r *Reducer) Name() string { return r.name } func (r *Reducer) Process() { r.Reduce() } // Reduce executes the reducer's function against all output from the mappers. func (r *Reducer) Reduce() *Emitter { inputs := make([]<-chan map[Key]interface{}, len(r.mappers)) for i, m := range r.mappers { inputs[i] = m.Map().C() } e := NewEmitter(1) r.c = e.C() go r.run(e, inputs) return e } func (r *Reducer) run(e *Emitter, inputs []<-chan map[Key]interface{}) { // Close emitter when we're done. defer func() { _ = e.Close() }() // Buffer all the inputs. bufInputs := make([]*bufInput, len(inputs)) for i, input := range inputs { bufInputs[i] = &bufInput{c: input} } // Stream data from the inputs and reduce. for { // Read all data from the inputers with the same timestamp. timestamp := int64(0) for _, bufInput := range bufInputs { rec := bufInput.peek() if rec == nil { continue } if timestamp == 0 || rec.Key.Timestamp < timestamp { timestamp = rec.Key.Timestamp } } data := make(map[Key][]interface{}) for _, bufInput := range bufInputs { for { rec := bufInput.read() if rec == nil { break } if rec.Key.Timestamp != timestamp { bufInput.unread(rec) break } data[rec.Key] = append(data[rec.Key], rec.Value) } } if len(data) == 0 { break } // Sort keys. keys := make(keySlice, 0, len(data)) for k := range data { keys = append(keys, k) } sort.Sort(keys) // Reduce each key. for _, k := range keys { r.fn(k, data[k], e) } } } type bufInput struct { buf *Record c <-chan map[Key]interface{} } func (i *bufInput) read() *Record { if i.buf != nil { rec := i.buf i.buf = nil return rec } m, _ := <-i.c return mapToRecord(m) } func (i *bufInput) unread(rec *Record) { i.buf = rec } func (i *bufInput) peek() *Record { rec := i.read() i.unread(rec) return rec } type Record struct { Key Key Value interface{} } func mapToRecord(m map[Key]interface{}) *Record { for k, v := range m { return &Record{k, v} } return nil } // ReduceFunc represents a function used for reducing mapper output. type ReduceFunc func(Key, []interface{}, *Emitter) // ReduceSum computes the sum of values for each key. func ReduceSum(key Key, values []interface{}, e *Emitter) { var n float64 for _, v := range values { n += v.(float64) } e.Emit(key, n) } // MapMean computes the count and sum of values in an iterator to be combined by the reducer. func MapMean(itr Iterator, e *Emitter, tmin int64) { out := &meanMapOutput{} for k, v := itr.Next(); k != 0; k, v = itr.Next() { out.Count++ out.Sum += v.(float64) } e.Emit(Key{tmin, itr.Tags()}, out) } type meanMapOutput struct { Count int Sum float64 } // ReduceMean computes the mean of values for each key. func ReduceMean(key Key, values []interface{}, e *Emitter) { out := &meanMapOutput{} for _, v := range values { val := v.(*meanMapOutput) out.Count += val.Count out.Sum += val.Sum } e.Emit(key, out.Sum/float64(out.Count)) } // MapEcho emits the data points for each group by interval func MapEcho(itr Iterator, e *Emitter, tmin int64) { var values []interface{} for k, v := itr.Next(); k != 0; k, v = itr.Next() { values = append(values, v) } e.Emit(Key{tmin, itr.Tags()}, values) } // ReducePercentile computes the percentile of values for each key. func ReducePercentile(percentile float64) ReduceFunc { return func(key Key, values []interface{}, e *Emitter) { var allValues []float64 for _, v := range values { vals := v.([]interface{}) for _, v := range vals { allValues = append(allValues, v.(float64)) } } sort.Float64s(allValues) length := len(allValues) index := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1 if index < 0 || index >= len(allValues) { e.Emit(key, 0.0) } e.Emit(key, allValues[index]) } } func MapRawQuery(itr Iterator, e *Emitter, tmin int64) { for k, v := itr.Next(); k != 0; k, v = itr.Next() { e.Emit(Key{k, itr.Tags()}, v) } } type rawQueryMapOutput struct { timestamp int64 value interface{} } func ReduceRawQuery(key Key, values []interface{}, e *Emitter) { for _, v := range values { e.Emit(key, v) } } // binaryExprEvaluator represents a processor for combining two processors. type binaryExprEvaluator struct { executor *Executor // parent executor lhs, rhs Processor // processors op Token // operation c chan map[Key]interface{} } // newBinaryExprEvaluator returns a new instance of binaryExprEvaluator. func newBinaryExprEvaluator(e *Executor, op Token, lhs, rhs Processor) *binaryExprEvaluator { return &binaryExprEvaluator{ executor: e, op: op, lhs: lhs, rhs: rhs, c: make(chan map[Key]interface{}, 0), } } // Process begins streaming values from the lhs/rhs processors func (e *binaryExprEvaluator) Process() { e.lhs.Process() e.rhs.Process() go e.run() } // C returns the streaming data channel. func (e *binaryExprEvaluator) C() <-chan map[Key]interface{} { return e.c } // name returns the source name. func (e *binaryExprEvaluator) Name() string { return "" } // run runs the processor loop to read subprocessor output and combine it. func (e *binaryExprEvaluator) run() { for { // Read LHS value. lhs, ok := <-e.lhs.C() if !ok { break } // Read RHS value. rhs, ok := <-e.rhs.C() if !ok { break } // Merge maps. m := make(map[Key]interface{}) for k, v := range lhs { m[k] = e.eval(v, rhs[k]) } for k, v := range rhs { // Skip value if already processed in lhs loop. if _, ok := m[k]; ok { continue } m[k] = e.eval(float64(0), v) } // Return value. e.c <- m } // Mark the channel as complete. close(e.c) } // eval evaluates two values using the evaluator's operation. func (e *binaryExprEvaluator) eval(lhs, rhs interface{}) interface{} { switch e.op { case ADD: return lhs.(float64) + rhs.(float64) case SUB: return lhs.(float64) - rhs.(float64) case MUL: return lhs.(float64) * rhs.(float64) case DIV: rhs := rhs.(float64) if rhs == 0 { return float64(0) } return lhs.(float64) / rhs default: // TODO: Validate operation & data types. panic("invalid operation: " + e.op.String()) } } // literalProcessor represents a processor that continually sends a literal value. type literalProcessor struct { val interface{} c chan map[Key]interface{} done chan chan struct{} } // newLiteralProcessor returns a literalProcessor for a given value. func newLiteralProcessor(val interface{}) *literalProcessor { return &literalProcessor{ val: val, c: make(chan map[Key]interface{}, 0), done: make(chan chan struct{}, 0), } } // C returns the streaming data channel. func (p *literalProcessor) C() <-chan map[Key]interface{} { return p.c } // Process continually returns a literal value with a "0" key. func (p *literalProcessor) Process() { go p.run() } // run executes the processor loop. func (p *literalProcessor) run() { for { select { case ch := <-p.done: close(ch) return case p.c <- map[Key]interface{}{Key{}: p.val}: } } } // stop stops the processor from sending values. func (p *literalProcessor) stop() { syncClose(p.done) } // name returns the source name. func (p *literalProcessor) Name() string { return "" } // syncClose closes a "done" channel and waits for a response. func syncClose(done chan chan struct{}) { ch := make(chan struct{}, 0) done <- ch <-ch } // Key represents a key returned by a Mapper or Reducer. type Key struct { Timestamp int64 Values string } type keySlice []Key func (p keySlice) Len() int { return len(p) } func (p keySlice) Less(i, j int) bool { return p[i].Timestamp < p[j].Timestamp || p[i].Values < p[j].Values } func (p keySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // Emitter provides bufferred emit/flush of key/value pairs. type Emitter struct { c chan map[Key]interface{} } // NewEmitter returns a new instance of Emitter with a buffer size of n. func NewEmitter(n int) *Emitter { return &Emitter{ c: make(chan map[Key]interface{}, n), } } // Close closes the emitter's output channel. func (e *Emitter) Close() error { close(e.c); return nil } // C returns the emitter's output channel. func (e *Emitter) C() <-chan map[Key]interface{} { return e.c } // Emit sets a key and value on the emitter's bufferred data. func (e *Emitter) Emit(key Key, value interface{}) { e.c <- map[Key]interface{}{key: value} } // Row represents a single row returned from the execution of a statement. type Row struct { Name string `json:"name,omitempty"` Tags map[string]string `json:"tags,omitempty"` Columns []string `json:"columns"` Values [][]interface{} `json:"values,omitempty"` Err error `json:"err,omitempty"` } // tagsHash returns a hash of tag key/value pairs. func (r *Row) tagsHash() uint64 { h := fnv.New64a() keys := r.tagsKeys() for _, k := range keys { h.Write([]byte(k)) h.Write([]byte(r.Tags[k])) } return h.Sum64() } // tagKeys returns a sorted list of tag keys. func (r *Row) tagsKeys() []string { a := make([]string, len(r.Tags)) for k := range r.Tags { a = append(a, k) } sort.Strings(a) return a } // Rows represents a list of rows that can be sorted consistently by name/tag. type Rows []*Row func (p Rows) Len() int { return len(p) } func (p Rows) Less(i, j int) bool { // Sort by name first. if p[i].Name != p[j].Name { return p[i].Name < p[j].Name } // Sort by tag set hash. Tags don't have a meaningful sort order so we // just compute a hash and sort by that instead. This allows the tests // to receive rows in a predictable order every time. return p[i].tagsHash() < p[j].tagsHash() } func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // MarshalStrings encodes an array of strings into a byte slice. func MarshalStrings(a []string) (ret []byte) { for _, s := range a { // Create a slice for len+data b := make([]byte, 2+len(s)) binary.BigEndian.PutUint16(b[0:2], uint16(len(s))) copy(b[2:], s) // Append it to the full byte slice. ret = append(ret, b...) } return } // UnmarshalStrings decodes a byte slice into an array of strings. func UnmarshalStrings(b []byte) (ret []string) { for { // If there's no more data then exit. if len(b) == 0 { return } // Decode size + data. n := binary.BigEndian.Uint16(b[0:2]) ret = append(ret, string(b[2:n+2])) // Move the byte slice forward and retry. b = b[n+2:] } }
[]
[]
[]
[]
[]
go
null
null
null
adaptation_layer/tasks.py
# Copyright 2019 CNIT, Francesco Lombardo, Matteo Pergolesi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import Dict from urllib.error import HTTPError import redis from celery import Celery from celery.utils.log import get_task_logger from requests import post, RequestException import iwf_repository from driver.osm import OSM from error_handler import ServerError, Error IWFREPO = os.getenv('IWFREPO', 'false').lower() redis_host = os.getenv('REDIS_HOST') if os.getenv('REDIS_HOST') else 'redis' redis_port = int(os.getenv('REDIS_PORT')) if os.getenv('REDIS_PORT') else 6379 # TTL for key in redis KEY_TTL = 86400 celery = Celery('tasks', broker='redis://{0}:{1}/0'.format(redis_host, redis_port), backend='redis://{0}:{1}/0'.format(redis_host, redis_port)) if IWFREPO == 'true': celery.conf.beat_schedule = { 'add_post_osm_vims_periodic': { 'task': 'tasks.post_osm_vims', 'schedule': iwf_repository.interval }, 'add_osm_notifications': { 'task': 'tasks.osm_notifications', 'schedule': 5.0 } } celery.conf.timezone = 'UTC' logger = get_task_logger(__name__) redis_client = redis.Redis( host=redis_host, port=redis_port, db=1, decode_responses=True) @celery.task def post_osm_vims(): osm_list = [] try: osm_list = iwf_repository.find_nfvos_by_type('osm') except (ServerError, HTTPError)as e: logger.warning('error with iwf repository. skip post_osm_vims') logger.debug(str(e)) for osm in osm_list: osm_vims = [] if osm['credentials']: try: driver = OSM(iwf_repository.convert_cred(osm)) osm_vims, headers = driver.get_vim_list() except Error as e: logger.warning( 'error contacting osm {0}:{1}'.format( osm['credentials']['host'], osm['credentials']['port'], )) logger.debug(str(e)) continue for v in osm_vims: try: iwf_repository.post_vim_safe(v, osm['_links']['self']['href']) except (ServerError, HTTPError)as e: logger.warning('error with iwf repository. skip vim') logger.debug(str(e)) @celery.task def osm_notifications(): osm_list = [] try: osm_list = iwf_repository.find_nfvos_by_type('osm') except (ServerError, HTTPError)as e: logger.warning('error with iwf repository, skip osm_notifications') logger.debug(str(e)) for osm in osm_list: ops = [] if osm['credentials']: try: driver = OSM(iwf_repository.convert_cred(osm)) ops, headers = driver.get_op_list({'args': {}}) except Error as e: logger.warning( 'error contacting osm {0}:{1}'.format( osm['credentials']['host'], osm['credentials']['port'], )) logger.debug(str(e)) continue for op in ops: last_s = redis_client.get(op['id']) logger.debug('last_s from redis: {}'.format(last_s)) if not last_s or last_s != op['operationState']: logger.info('different op state, send notification') logger.debug('{},{}'.format(last_s, op['operationState'])) redis_client.setex(op['id'], KEY_TTL, op['operationState']) notify_payload = { "nsInstanceId": op['nsInstanceId'], "nsLcmOpOccId": op['id'], "operation": op['lcmOperationType'], "notificationType": "NsLcmOperationOccurrenceNotification", "timestamp": op['startTime'], "operationState": op['operationState'] } logger.debug(notify_payload) forward_notification.delay(notify_payload) @celery.task def forward_notification(notification: Dict): if IWFREPO == 'false': logger.warning('iwf repository disabled, ignore notification') return None subs = [] try: subs = iwf_repository.search_subs_by_ns_instance( notification['nsInstanceId']) except (ServerError, HTTPError)as e: logger.warning('error with iwf repository. skip post_osm_vims') logger.debug(str(e)) if not subs: logger.warning('no subscriptions for nsInstanceId {0}'.format( notification['nsInstanceId'])) for s in subs: try: if notification['notificationType'] in s['notificationTypes']: resp = post(s['callbackUri'], json=notification) resp.raise_for_status() logger.info( 'Notification sent to {0}'.format(s['callbackUri'])) except RequestException as e: logger.warning( 'Cannot send notification to {}'.format(s['callbackUri'])) logger.debug(str(e))
[]
[]
[ "REDIS_HOST", "REDIS_PORT", "IWFREPO" ]
[]
["REDIS_HOST", "REDIS_PORT", "IWFREPO"]
python
3
0
auth/auth.go
package auth import ( "context" "errors" "os" "time" "github.com/golang-jwt/jwt" "github.com/google/uuid" "github.com/varunamachi/libx/errx" ) type AuthData map[string]interface{} var ( ErrAuthentication = errors.New("auth.user.authenticationError") ErrUserRetrieval = errors.New("auth.user.retrievalError") ErrToken = errors.New("auth.user.authTokenError") ) type Authenticator interface { Authenticate(gtx context.Context, authData AuthData) error } type UserGetter interface { GetUser(gtx context.Context, authData AuthData) (User, error) } type UserAuthenticator interface { Authenticator UserGetter } // Login - authenticates the user, get's the user information and generates a // JWT token. The user and the token are then returned. And in case of error // the error is returned func Login( gtx context.Context, authr UserAuthenticator, data AuthData) (User, string, error) { if err := authr.Authenticate(gtx, data); err != nil { return nil, "", errx.Errf(err, "failed to authenticate user") } user, err := authr.GetUser(gtx, data) if err != nil { return nil, "", errx.Errf(err, "failed to retrieve user") } token := jwt.New(jwt.SigningMethodHS256) claims := token.Claims.(jwt.MapClaims) claims["userId"] = user.Id() claims["exp"] = time.Now().Add(time.Hour * 24).Unix() signed, err := token.SignedString(GetJWTKey()) if err != nil { return nil, "", errx.Errf(err, "failed to generate session token") } return user, signed, nil } //GetJWTKey - gives a unique JWT key func GetJWTKey() []byte { jwtKey := os.Getenv("VLIBX_JWT_KEY") if len(jwtKey) == 0 { jwtKey = uuid.NewString() // TODO - may be need to do something better os.Setenv("VLIBX_JWT_KEY", jwtKey) } return []byte(jwtKey) }
[ "\"VLIBX_JWT_KEY\"" ]
[]
[ "VLIBX_JWT_KEY" ]
[]
["VLIBX_JWT_KEY"]
go
1
0
metricbeat/module/etcd/self/self_integration_test.go
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // +build integration package self import ( "os" "testing" "github.com/stretchr/testify/assert" "github.com/elastic/beats/libbeat/tests/compose" mbtest "github.com/elastic/beats/metricbeat/mb/testing" ) func TestData(t *testing.T) { compose.EnsureUp(t, "etcd") f := mbtest.NewEventFetcher(t, getConfig()) err := mbtest.WriteEvent(f, t) if err != nil { t.Fatal("write", err) } } func TestFetch(t *testing.T) { compose.EnsureUp(t, "etcd") f := mbtest.NewEventFetcher(t, getConfig()) event, err := f.Fetch() if !assert.NoError(t, err) { t.FailNow() } assert.NotNil(t, event) t.Logf("%s/%s event: %+v", f.Module().Name(), f.Name(), event) } func getConfig() map[string]interface{} { return map[string]interface{}{ "module": "etcd", "metricsets": []string{"self"}, "hosts": []string{GetEnvHost() + ":" + GetEnvPort()}, } } func GetEnvHost() string { host := os.Getenv("ETCD_HOST") if len(host) == 0 { host = "127.0.0.1" } return host } func GetEnvPort() string { port := os.Getenv("ETCD_PORT") if len(port) == 0 { port = "2379" } return port }
[ "\"ETCD_HOST\"", "\"ETCD_PORT\"" ]
[]
[ "ETCD_PORT", "ETCD_HOST" ]
[]
["ETCD_PORT", "ETCD_HOST"]
go
2
0
examples/v2/cloud-workload-security/UpdateCloudWorkloadSecurityAgentRule.go
// Update a Cloud Workload Security Agent rule returns "OK" response package main import ( "context" "encoding/json" "fmt" "os" datadog "github.com/DataDog/datadog-api-client-go/api/v2/datadog" ) func main() { // there is a valid "agent_rule" in the system AgentRuleDataID := os.Getenv("AGENT_RULE_DATA_ID") body := datadog.CloudWorkloadSecurityAgentRuleUpdateRequest{ Data: datadog.CloudWorkloadSecurityAgentRuleUpdateData{ Attributes: datadog.CloudWorkloadSecurityAgentRuleUpdateAttributes{ Description: datadog.PtrString("Test Agent rule"), Enabled: datadog.PtrBool(true), Expression: datadog.PtrString(`exec.file.name == "sh"`), }, Type: datadog.CLOUDWORKLOADSECURITYAGENTRULETYPE_AGENT_RULE, }, } ctx := datadog.NewDefaultContext(context.Background()) configuration := datadog.NewConfiguration() apiClient := datadog.NewAPIClient(configuration) resp, r, err := apiClient.CloudWorkloadSecurityApi.UpdateCloudWorkloadSecurityAgentRule(ctx, AgentRuleDataID, body) if err != nil { fmt.Fprintf(os.Stderr, "Error when calling `CloudWorkloadSecurityApi.UpdateCloudWorkloadSecurityAgentRule`: %v\n", err) fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r) } responseContent, _ := json.MarshalIndent(resp, "", " ") fmt.Fprintf(os.Stdout, "Response from `CloudWorkloadSecurityApi.UpdateCloudWorkloadSecurityAgentRule`:\n%s\n", responseContent) }
[ "\"AGENT_RULE_DATA_ID\"" ]
[]
[ "AGENT_RULE_DATA_ID" ]
[]
["AGENT_RULE_DATA_ID"]
go
1
0
owasp-top10-2017-apps/a3/snake-pro/app/api/routes.go
package api import ( "fmt" "net/http" "os" "time" jwt "github.com/dgrijalva/jwt-go" db "github.com/globocom/secDevLabs/owasp-top10-2017-apps/a3/snake-pro/app/db/mongo" "github.com/globocom/secDevLabs/owasp-top10-2017-apps/a3/snake-pro/app/pass" "github.com/globocom/secDevLabs/owasp-top10-2017-apps/a3/snake-pro/app/types" "github.com/google/uuid" "github.com/labstack/echo" ) // HealthCheck is the heath check function. func HealthCheck(c echo.Context) error { return c.String(http.StatusOK, "WORKING!\n") } // WriteCookie writes a cookie into echo Context func WriteCookie(c echo.Context, jwt string) error { cookie := new(http.Cookie) cookie.Name = "sessionIDsnake" cookie.Value = jwt c.SetCookie(cookie) return c.String(http.StatusOK, "") } // ReadCookie reads a cookie from echo Context. func ReadCookie(c echo.Context) (string, error) { cookie, err := c.Cookie("sessionIDsnake") if err != nil { return "", err } return cookie.Value, err } // Register registers a new user into MongoDB. func Register(c echo.Context) error { userData := types.UserData{} err := c.Bind(&userData) if err != nil { // error binding JSON return c.JSON(http.StatusBadRequest, map[string]string{"result": "error", "details": "Invalid Input."}) } if userData.Password != userData.RepeatPassword { return c.JSON(http.StatusBadRequest, map[string]string{"result": "error", "details": "Passwords do not match."}) } newGUID1 := uuid.Must(uuid.NewRandom()) userData.UserID = newGUID1.String() userData.HighestScore = 0 err = db.RegisterUser(userData) if err != nil { // could not register this user into MongoDB (or MongoDB err connection) return c.JSON(http.StatusInternalServerError, map[string]string{"result": "error", "details": "Error user data2."}) } msgUser := fmt.Sprintf("User %s created!", userData.Username) return c.String(http.StatusOK, msgUser) } // Login checks MongoDB if this user exists and then returns a JWT session cookie. func Login(c echo.Context) error { loginAttempt := types.LoginAttempt{} err := c.Bind(&loginAttempt) if err != nil { return c.JSON(http.StatusBadRequest, map[string]string{"result": "error", "details": "Error login."}) } // input validation missing! do it later! userDataQuery := map[string]interface{}{"username": loginAttempt.Username} userDataResult, err := db.GetUserData(userDataQuery) if err != nil { // could not find this user in MongoDB (or MongoDB err connection) return c.JSON(http.StatusForbidden, map[string]string{"result": "error", "details": "Error login."}) } validPass := pass.CheckPass(userDataResult.Password, loginAttempt.Password) if !validPass { // wrong password return c.JSON(http.StatusForbidden, map[string]string{"result": "error", "details": "Error login."}) } // Create token token := jwt.New(jwt.SigningMethodHS256) // Set claims claims := token.Claims.(jwt.MapClaims) claims["name"] = userDataResult.Username claims["exp"] = time.Now().Add(time.Hour * 72).Unix() // Generate encoded token and send it as response. t, err := token.SignedString([]byte(os.Getenv("SECRET_KEY"))) if err != nil { return err } err = WriteCookie(c, t) if err != nil { return c.JSON(http.StatusBadRequest, map[string]string{"result": "error", "details": "Error login5."}) } c.Response().Header().Set("Content-type", "text/html") messageLogon := fmt.Sprintf("Hello, %s! Welcome to SnakePro", userDataResult.Username) // err = c.Redirect(http.StatusFound, "http://www.localhost:10033/game/ranking") return c.String(http.StatusOK, messageLogon) }
[ "\"SECRET_KEY\"" ]
[]
[ "SECRET_KEY" ]
[]
["SECRET_KEY"]
go
1
0
pbl/asgi.py
""" ASGI config for pbl project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pbl.settings") application = get_asgi_application()
[]
[]
[]
[]
[]
python
0
0
app/job/main/mcn/dao/dao_test.go
package dao import ( "flag" "os" "testing" "go-common/app/job/main/mcn/conf" ) var ( d *Dao ) func TestMain(m *testing.M) { if os.Getenv("DEPLOY_ENV") != "" { flag.Set("app_id", "main.archive.mcn-job") flag.Set("conf_token", "6214e16a2d849e375d899cbe37df31d1") flag.Set("tree_id", "58888") flag.Set("conf_version", "docker-1") flag.Set("deploy_env", "uat") flag.Set("conf_host", "config.bilibili.co") flag.Set("conf_path", "/tmp") flag.Set("region", "sh") flag.Set("zone", "sh001") } else { flag.Set("conf", "../cmd/mcn-job-test.toml") } if os.Getenv("UT_LOCAL_TEST") != "" { flag.Set("conf", "../cmd/mcn-job-test.toml") } flag.Parse() if err := conf.Init(); err != nil { panic(err) } d = New(conf.Conf) os.Exit(m.Run()) }
[ "\"DEPLOY_ENV\"", "\"UT_LOCAL_TEST\"" ]
[]
[ "DEPLOY_ENV", "UT_LOCAL_TEST" ]
[]
["DEPLOY_ENV", "UT_LOCAL_TEST"]
go
2
0
src/HackerRank/exercicios/intro/DateAndTime.java
package HackerRank.exercicios.intro; import java.io.*; import java.time.LocalDate; /** * @author Bruno; * Retornar o nome do dia - getDay (Monday, Tuesday, ..., Sunday), * dado uma especifica data MM/DD/YYYY; */ class Result { public static String findDay(int month, int day, int year) { return LocalDate.of(year, month, day) .getDayOfWeek() .name(); } } public class DateAndTime { public static void main(String[] args) throws IOException { BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(System.in)); BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH"))); String[] firstMultipleInput = bufferedReader.readLine().replaceAll("\\s+$", "").split(" "); int month = Integer.parseInt(firstMultipleInput[0]); int day = Integer.parseInt(firstMultipleInput[1]); int year = Integer.parseInt(firstMultipleInput[2]); String res = Result.findDay(month, day, year); bufferedWriter.write(res); bufferedWriter.newLine(); bufferedReader.close(); bufferedWriter.close(); } }
[ "\"OUTPUT_PATH\"" ]
[]
[ "OUTPUT_PATH" ]
[]
["OUTPUT_PATH"]
java
1
0
tests/providers/test_steam_.py
import asyncio import concurrent from concurrent.futures import ThreadPoolExecutor from unittest import TestCase import os import cv2 from apscheduler.schedulers.background import BackgroundScheduler import bot from bot.providers import trainer_matches as tm from bot.duel_links_runtime import DuelLinkRunTime from bot.providers import Steam from bot.common import crop_image from bot.shared import alphabet from bot.utils.common import default_config class TestSteam(TestCase): provider = None __debug_pictures__ = False images_needed_debug = [ "street_replay.png", "home_page_steam.png", os.path.join("steam", "steam_pre_battle.png"), os.path.join("steam", "steam_back.png") ] def setUp(self): os.environ['LOG_CFG'] = r'D:\Sync\OneDrive\Yu-gi-oh_bot\config.ini' scheduler = BackgroundScheduler() dlRuntime = DuelLinkRunTime(default_config(r'D:\Sync\OneDrive\Yu-gi-oh_bot'), scheduler, False) self.provider = Steam(scheduler, default_config(r'D:\Sync\OneDrive\Yu-gi-oh_bot'), dlRuntime, False) self.provider.sleep_factor = 0.0 self.loop = asyncio.get_event_loop() self.loop.set_default_executor(ThreadPoolExecutor(2)) dlRuntime._loop = self.loop self.provider.is_process_running() def test_battle(self): self.fail() def test_check_if_battle(self): location = os.path.join(self.provider.assets, "steam", "steam_pre_battle.png") img = cv2.imread(location) self.assertTrue(self.provider.check_if_battle(img), "Is Battle") def test_check_battle_is_running(self): self.fail() def test_click_auto_duel(self): self.provider.click_auto_duel() def test_compare_with_back_button(self): img = os.path.join(self.provider.assets, "steam", "steam_back.png") t = tm.BoundingTrainer(img, bounding_area=self.provider.predefined.main_area) location = os.path.join(self.provider.assets, "back__.png") # t.show_area_bounded(self.provider.predefined.main_area, img) # t._debug = True self.assertTrue(t.get_matches(location, 3) is True, "Expecting a back button") # t.compare() def test_determine_autoduel_status(self): self.fail() def test_ensure_resolutions_matches(self): location = os.path.join(self.provider.assets, "steam", "download_update.png") img = cv2.imread(location) self.provider.ensure_resolutions_matches(img) img = img[0:100, 0:100] with self.assertRaises(bot.providers.BotSetupError) as context: self.provider.ensure_resolutions_matches(img) def test_is_process_running(self): self.fail() def test_key_escape(self): self.fail() def test_kill_process(self): self.fail() def test_method_name(self): self.fail() def test_pass_through_initial_screen(self): self.provider.is_process_running() test_function = lambda x: x is False with self.assertRaises(Exception) as context: self.provider.__generic_wait_for__('DuelLinks Landing Page', test_function, None) self.assertTrue('Maximum exception count' in str(context.exception)) self.provider.sleep_factor = 0.5 self.assertTrue(callable(self.provider.__is_initial_screen__)) with self.assertRaises(concurrent.futures._base.TimeoutError) as context: self.provider.__generic_wait_for__('DuelLinks Landing Page', test_function, self.provider.__is_initial_screen__, timeout=5) def test_start_process(self): self.fail() def test_scan(self): self.fail() def test_scan_for_ok(self): img = os.path.join(self.provider.assets, "steam", "steam_back.png") t = tm.BoundingTrainer(img, bounding_area=self.provider.predefined.main_area) location = os.path.join(self.provider.assets, "back__.png") # t.show_area_bounded(self.provider.predefined.main_area, img) # t._debug = True self.assertTrue(t.get_matches(location, 3) is True, "Expecting a back button") # t.compare() def test_scan_for_close(self): img = os.path.join(self.provider.assets, "steam", "steam_close.png") area = self.provider.predefined.main_area area['width'] = 400 t = tm.BoundingTrainer(img, bounding_area=area) location = os.path.join(self.provider.assets, "close.png") # t.show_area_bounded(self.provider.predefined.main_area, img) # t._debug = True self.assertTrue(t.get_matches(location, 3) is True, "Expecting a back button") img = os.path.join(self.provider.assets, "steam", "steam_ok.png") t = tm.BoundingTrainer(img, bounding_area=area) location = os.path.join(self.provider.assets, "close.png") t.get_matches(location, 3) # t.show_area_bounded(self.provider.predefined.main_area, img) # t._debug = True self.assertTrue(t.get_matches(location, 3) is False, "Is Ok button not close") def test_scan_for_download(self): img = os.path.join(self.provider.assets, "steam", "download_update.png") t = tm.BoundingTrainer(img, 500, 300, 600, 300) location = os.path.join(self.provider.assets, "download_button.png") # t.show_area(500, 300, 600, 300, img) self.assertTrue(t.get_matches(location, 3) is True, "Expecting a download button") def test_swipe_right(self): self.fail() # self.provider.swipe_right(0) def test_swipe_left(self): self.fail() # self.provider.swipe_left(0) def test_swipe_time(self): self.fail() def test_swipe(self): self.fail() def test_tap(self): x, y = self.provider.predefined.yugioh_initiate_link self.provider.tap(x, y) def test_verify_battle(self): location = os.path.join(self.provider.assets, "steam", "duel_variant_autoduel.png") img = cv2.imread(location) points, version = self.provider.verify_battle(img) self.assertTrue(version == 2) def test_wait_for(self): location = os.path.join(self.provider.assets, "steam", "ok_button_duel.png") img = cv2.imread(location) img = crop_image(img, **self.provider.predefined.ok_button_duel) word = self.provider.img_to_string(img, alphabet).lower() self.assertTrue(word == 'ok') def test_wait_for_notifications(self): self.fail() def test_battle_icons(self): self.provider.is_process_running() img = self.provider.get_img_from_screen_shot() area = self.provider.predefined.main_area area['height'] = 700 t = tm.BoundingTrainer(img, bounding_area=area) t.capture_white_circles()
[]
[]
[ "LOG_CFG" ]
[]
["LOG_CFG"]
python
1
0
cwl_schema.py
# # This file was autogenerated using schema-salad-tool --codegen=python # from __future__ import absolute_import import ruamel.yaml from ruamel.yaml.comments import CommentedBase, CommentedMap, CommentedSeq import re import os import traceback from typing import (Any, AnyStr, Callable, cast, Dict, List, Iterable, Tuple, TypeVar, Union, Text) import six lineno_re = re.compile(u"^(.*?:[0-9]+:[0-9]+: )(( *)(.*))") def _add_lc_filename(r, source): # type: (ruamel.yaml.comments.CommentedBase, AnyStr) -> None if isinstance(r, ruamel.yaml.comments.CommentedBase): r.lc.filename = source if isinstance(r, list): for d in r: _add_lc_filename(d, source) elif isinstance(r, dict): for d in six.itervalues(r): _add_lc_filename(d, source) def relname(source): # type: (Text) -> Text if source.startswith("file://"): source = source[7:] source = os.path.relpath(source) return source def add_lc_filename(r, source): # type: (ruamel.yaml.comments.CommentedBase, Text) -> None _add_lc_filename(r, relname(source)) def reflow(text, maxline, shift=""): # type: (Text, int, Text) -> Text if maxline < 20: maxline = 20 if len(text) > maxline: sp = text.rfind(' ', 0, maxline) if sp < 1: sp = text.find(' ', sp+1) if sp == -1: sp = len(text) if sp < len(text): return "%s\n%s%s" % (text[0:sp], shift, reflow(text[sp+1:], maxline, shift)) return text def indent(v, nolead=False, shift=u" ", bullet=u" "): # type: (Text, bool, Text, Text) -> Text if nolead: return v.splitlines()[0] + u"\n".join([shift + l for l in v.splitlines()[1:]]) else: def lineno(i, l): # type: (int, Text) -> Text r = lineno_re.match(l) if r is not None: return r.group(1) + (bullet if i == 0 else shift) + r.group(2) else: return (bullet if i == 0 else shift) + l return u"\n".join([lineno(i, l) for i, l in enumerate(v.splitlines())]) def bullets(textlist, bul): # type: (List[Text], Text) -> Text if len(textlist) == 1: return textlist[0] else: return "\n".join(indent(t, bullet=bul) for t in textlist) def strip_dup_lineno(text, maxline=None): # type: (Text, int) -> Text if maxline is None: maxline = int(os.environ.get("COLUMNS", "100")) pre = None msg = [] maxno = 0 for l in text.splitlines(): g = lineno_re.match(l) if not g: continue maxno = max(maxno, len(g.group(1))) for l in text.splitlines(): g = lineno_re.match(l) if not g: msg.append(l) continue if g.group(1) != pre: shift = maxno + len(g.group(3)) g2 = reflow(g.group(2), maxline-shift, " " * shift) pre = g.group(1) msg.append(pre + " " * (maxno-len(g.group(1))) + g2) else: g2 = reflow(g.group(2), maxline-maxno, " " * (maxno+len(g.group(3)))) msg.append(" " * maxno + g2) return "\n".join(msg) def cmap(d, lc=None, fn=None): # type: (Union[int, float, str, Text, Dict, List], List[int], Text) -> Union[int, float, str, Text, CommentedMap, CommentedSeq] if lc is None: lc = [0, 0, 0, 0] if fn is None: fn = "test" if isinstance(d, CommentedMap): fn = d.lc.filename if hasattr(d.lc, "filename") else fn for k,v in six.iteritems(d): if k in d.lc.data: d[k] = cmap(v, lc=d.lc.data[k], fn=fn) else: d[k] = cmap(v, lc, fn=fn) return d if isinstance(d, CommentedSeq): fn = d.lc.filename if hasattr(d.lc, "filename") else fn for k,v in enumerate(d): if k in d.lc.data: d[k] = cmap(v, lc=d.lc.data[k], fn=fn) else: d[k] = cmap(v, lc, fn=fn) return d if isinstance(d, dict): cm = CommentedMap() for k in sorted(d.keys()): v = d[k] if isinstance(v, CommentedBase): uselc = [v.lc.line, v.lc.col, v.lc.line, v.lc.col] vfn = v.lc.filename if hasattr(v.lc, "filename") else fn else: uselc = lc vfn = fn cm[k] = cmap(v, lc=uselc, fn=vfn) cm.lc.add_kv_line_col(k, uselc) cm.lc.filename = fn return cm if isinstance(d, list): cs = CommentedSeq() for k,v in enumerate(d): if isinstance(v, CommentedBase): uselc = [v.lc.line, v.lc.col, v.lc.line, v.lc.col] vfn = v.lc.filename if hasattr(v.lc, "filename") else fn else: uselc = lc vfn = fn cs.append(cmap(v, lc=uselc, fn=vfn)) cs.lc.add_kv_line_col(k, uselc) cs.lc.filename = fn return cs else: return d class SourceLine(object): def __init__(self, item, key=None, raise_type=six.text_type, include_traceback=False): # type: (Any, Any, Callable, bool) -> None self.item = item self.key = key self.raise_type = raise_type self.include_traceback = include_traceback def __enter__(self): # type: () -> SourceLine return self def __exit__(self, exc_type, # type: Any exc_value, # type: Any tb # type: Any ): # -> Any if not exc_value: return if self.include_traceback: raise self.makeError("\n".join(traceback.format_exception(exc_type, exc_value, tb))) else: raise self.makeError(six.text_type(exc_value)) def makeLead(self): # type: () -> Text if self.key is None or self.item.lc.data is None or self.key not in self.item.lc.data: return "%s:%i:%i:" % (self.item.lc.filename if hasattr(self.item.lc, "filename") else "", (self.item.lc.line or 0)+1, (self.item.lc.col or 0)+1) else: return "%s:%i:%i:" % (self.item.lc.filename if hasattr(self.item.lc, "filename") else "", (self.item.lc.data[self.key][0] or 0)+1, (self.item.lc.data[self.key][1] or 0)+1) def makeError(self, msg): # type: (Text) -> Any if not isinstance(self.item, ruamel.yaml.comments.CommentedBase): return self.raise_type(msg) errs = [] lead = self.makeLead() for m in msg.splitlines(): if bool(lineno_re.match(m)): errs.append(m) else: errs.append("%s %s" % (lead, m)) return self.raise_type("\n".join(errs)) import six from six.moves import urllib, StringIO import ruamel.yaml as yaml import copy import re from typing import List, Text, Dict, Union, Any, Sequence import uuid class ValidationException(Exception): pass class Savable(object): pass class LoadingOptions(object): def __init__(self, fetcher=None, namespaces=None, fileuri=None, copyfrom=None, schemas=None): if copyfrom is not None: self.idx = copyfrom.idx if fetcher is None: fetcher = copyfrom.fetcher if fileuri is None: fileuri = copyfrom.fileuri if namespaces is None: namespaces = copyfrom.namespaces if namespaces is None: schemas = copyfrom.schemas else: self.idx = {} if fetcher is None: import os import requests from cachecontrol.wrapper import CacheControl from cachecontrol.caches import FileCache from schema_salad.ref_resolver import DefaultFetcher if "HOME" in os.environ: session = CacheControl( requests.Session(), cache=FileCache(os.path.join(os.environ["HOME"], ".cache", "salad"))) elif "TMP" in os.environ: session = CacheControl( requests.Session(), cache=FileCache(os.path.join(os.environ["TMP"], ".cache", "salad"))) else: session = CacheControl( requests.Session(), cache=FileCache("/tmp", ".cache", "salad")) self.fetcher = DefaultFetcher({}, session) else: self.fetcher = fetcher self.fileuri = fileuri self.vocab = _vocab self.rvocab = _rvocab self.namespaces = namespaces self.schemas = schemas if namespaces is not None: self.vocab = self.vocab.copy() self.rvocab = self.rvocab.copy() for k,v in six.iteritems(namespaces): self.vocab[k] = v self.rvocab[v] = k def load_field(val, fieldtype, baseuri, loadingOptions): if isinstance(val, dict): if "$import" in val: return _document_load_by_url(fieldtype, loadingOptions.fetcher.urljoin(loadingOptions.fileuri, val["$import"]), loadingOptions) elif "$include" in val: val = loadingOptions.fetcher.fetch_text(loadingOptions.fetcher.urljoin(loadingOptions.fileuri, val["$include"])) return fieldtype.load(val, baseuri, loadingOptions) def save(val, top=True): if isinstance(val, Savable): return val.save(top=top) if isinstance(val, list): return [save(v, top=False) for v in val] return val def expand_url(url, # type: Union[str, Text] base_url, # type: Union[str, Text] loadingOptions, # type: LoadingOptions scoped_id=False, # type: bool vocab_term=False, # type: bool scoped_ref=None # type: int ): # type: (...) -> Text if not isinstance(url, six.string_types): return url url = Text(url) if url in (u"@id", u"@type"): return url if vocab_term and url in loadingOptions.vocab: return url if bool(loadingOptions.vocab) and u":" in url: prefix = url.split(u":")[0] if prefix in loadingOptions.vocab: url = loadingOptions.vocab[prefix] + url[len(prefix) + 1:] split = urllib.parse.urlsplit(url) if ((bool(split.scheme) and split.scheme in [u'http', u'https', u'file']) or url.startswith(u"$(") or url.startswith(u"${")): pass elif scoped_id and not bool(split.fragment): splitbase = urllib.parse.urlsplit(base_url) frg = u"" if bool(splitbase.fragment): frg = splitbase.fragment + u"/" + split.path else: frg = split.path pt = splitbase.path if splitbase.path != '' else "/" url = urllib.parse.urlunsplit( (splitbase.scheme, splitbase.netloc, pt, splitbase.query, frg)) elif scoped_ref is not None and not bool(split.fragment): splitbase = urllib.parse.urlsplit(base_url) sp = splitbase.fragment.split(u"/") n = scoped_ref while n > 0 and len(sp) > 0: sp.pop() n -= 1 sp.append(url) url = urllib.parse.urlunsplit(( splitbase.scheme, splitbase.netloc, splitbase.path, splitbase.query, u"/".join(sp))) else: url = loadingOptions.fetcher.urljoin(base_url, url) if vocab_term: split = urllib.parse.urlsplit(url) if bool(split.scheme): if url in loadingOptions.rvocab: return loadingOptions.rvocab[url] else: raise ValidationException("Term '%s' not in vocabulary" % url) return url class _Loader(object): def load(self, doc, baseuri, loadingOptions, docRoot=None): # type: (Any, Text, LoadingOptions, Union[Text, None]) -> Any pass class _AnyLoader(_Loader): def load(self, doc, baseuri, loadingOptions, docRoot=None): if doc is not None: return doc raise ValidationException("Expected non-null") class _PrimitiveLoader(_Loader): def __init__(self, tp): # type: (Union[type, Sequence[type]]) -> None self.tp = tp def load(self, doc, baseuri, loadingOptions, docRoot=None): if not isinstance(doc, self.tp): raise ValidationException("Expected a %s but got %s" % (self.tp, type(doc))) return doc def __repr__(self): return str(self.tp) class _ArrayLoader(_Loader): def __init__(self, items): # type: (_Loader) -> None self.items = items def load(self, doc, baseuri, loadingOptions, docRoot=None): if not isinstance(doc, list): raise ValidationException("Expected a list") r = [] errors = [] for i in range(0, len(doc)): try: lf = load_field(doc[i], _UnionLoader((self, self.items)), baseuri, loadingOptions) if isinstance(lf, list): r.extend(lf) else: r.append(lf) except ValidationException as e: errors.append(SourceLine(doc, i, str).makeError(six.text_type(e))) if errors: raise ValidationException("\n".join(errors)) return r def __repr__(self): return "array<%s>" % self.items class _EnumLoader(_Loader): def __init__(self, symbols): # type: (Sequence[Text]) -> None self.symbols = symbols def load(self, doc, baseuri, loadingOptions, docRoot=None): if doc in self.symbols: return doc else: raise ValidationException("Expected one of %s" % (self.symbols,)) class _RecordLoader(_Loader): def __init__(self, classtype): # type: (type) -> None self.classtype = classtype def load(self, doc, baseuri, loadingOptions, docRoot=None): if not isinstance(doc, dict): raise ValidationException("Expected a dict") return self.classtype(doc, baseuri, loadingOptions, docRoot=docRoot) def __repr__(self): return str(self.classtype) class _UnionLoader(_Loader): def __init__(self, alternates): # type: (Sequence[_Loader]) -> None self.alternates = alternates def load(self, doc, baseuri, loadingOptions, docRoot=None): errors = [] for t in self.alternates: try: return t.load(doc, baseuri, loadingOptions, docRoot=docRoot) except ValidationException as e: errors.append("tried %s but\n%s" % (t, indent(str(e)))) raise ValidationException(bullets(errors, "- ")) def __repr__(self): return " | ".join(str(a) for a in self.alternates) class _URILoader(_Loader): def __init__(self, inner, scoped_id, vocab_term, scoped_ref): # type: (_Loader, bool, bool, Union[int, None]) -> None self.inner = inner self.scoped_id = scoped_id self.vocab_term = vocab_term self.scoped_ref = scoped_ref def load(self, doc, baseuri, loadingOptions, docRoot=None): if isinstance(doc, list): doc = [expand_url(i, baseuri, loadingOptions, self.scoped_id, self.vocab_term, self.scoped_ref) for i in doc] if isinstance(doc, six.string_types): doc = expand_url(doc, baseuri, loadingOptions, self.scoped_id, self.vocab_term, self.scoped_ref) return self.inner.load(doc, baseuri, loadingOptions) class _TypeDSLLoader(_Loader): typeDSLregex = re.compile(u"^([^[?]+)(\[\])?(\?)?$") def __init__(self, inner, refScope): # type: (_Loader, Union[int, None]) -> None self.inner = inner self.refScope = refScope def resolve(self, doc, baseuri, loadingOptions): m = self.typeDSLregex.match(doc) if m: first = expand_url(m.group(1), baseuri, loadingOptions, False, True, self.refScope) second = third = None if bool(m.group(2)): second = {"type": "array", "items": first} #second = CommentedMap((("type", "array"), # ("items", first))) #second.lc.add_kv_line_col("type", lc) #second.lc.add_kv_line_col("items", lc) #second.lc.filename = filename if bool(m.group(3)): third = [u"null", second or first] #third = CommentedSeq([u"null", second or first]) #third.lc.add_kv_line_col(0, lc) #third.lc.add_kv_line_col(1, lc) #third.lc.filename = filename doc = third or second or first return doc def load(self, doc, baseuri, loadingOptions, docRoot=None): if isinstance(doc, list): r = [] for d in doc: if isinstance(d, six.string_types): resolved = self.resolve(d, baseuri, loadingOptions) if isinstance(resolved, list): for i in resolved: if i not in r: r.append(i) else: if resolved not in r: r.append(resolved) else: r.append(d) doc = r elif isinstance(doc, six.string_types): doc = self.resolve(doc, baseuri, loadingOptions) return self.inner.load(doc, baseuri, loadingOptions) class _IdMapLoader(_Loader): def __init__(self, inner, mapSubject, mapPredicate): # type: (_Loader, Text, Union[Text, None]) -> None self.inner = inner self.mapSubject = mapSubject self.mapPredicate = mapPredicate def load(self, doc, baseuri, loadingOptions, docRoot=None): if isinstance(doc, dict): r = [] for k in sorted(doc.keys()): val = doc[k] if isinstance(val, dict): v = copy.copy(val) if hasattr(val, 'lc'): v.lc.data = val.lc.data v.lc.filename = val.lc.filename else: if self.mapPredicate: v = {self.mapPredicate: val} else: raise ValidationException("No mapPredicate") v[self.mapSubject] = k r.append(v) doc = r return self.inner.load(doc, baseuri, loadingOptions) def _document_load(loader, doc, baseuri, loadingOptions): if isinstance(doc, six.string_types): return _document_load_by_url(loader, loadingOptions.fetcher.urljoin(baseuri, doc), loadingOptions) if isinstance(doc, dict): if "$namespaces" in doc: loadingOptions = LoadingOptions(copyfrom=loadingOptions, namespaces=doc["$namespaces"]) doc = {k: v for k,v in doc.items() if k != "$namespaces"} if "$schemas" in doc: loadingOptions = LoadingOptions(copyfrom=loadingOptions, schemas=doc["$schemas"]) doc = {k: v for k,v in doc.items() if k != "$schemas"} if "$base" in doc: baseuri = doc["$base"] if "$graph" in doc: return loader.load(doc["$graph"], baseuri, loadingOptions) else: return loader.load(doc, baseuri, loadingOptions, docRoot=baseuri) if isinstance(doc, list): return loader.load(doc, baseuri, loadingOptions) raise ValidationException() def _document_load_by_url(loader, url, loadingOptions): if url in loadingOptions.idx: return _document_load(loader, loadingOptions.idx[url], url, loadingOptions) text = loadingOptions.fetcher.fetch_text(url) if isinstance(text, bytes): textIO = StringIO(text.decode('utf-8')) else: textIO = StringIO(text) textIO.name = url # type: ignore result = yaml.round_trip_load(textIO) add_lc_filename(result, url) loadingOptions.idx[url] = result loadingOptions = LoadingOptions(copyfrom=loadingOptions, fileuri=url) return _document_load(loader, result, url, loadingOptions) def file_uri(path, split_frag=False): # type: (str, bool) -> str if path.startswith("file://"): return path if split_frag: pathsp = path.split("#", 2) frag = "#" + urllib.parse.quote(str(pathsp[1])) if len(pathsp) == 2 else "" urlpath = urllib.request.pathname2url(str(pathsp[0])) else: urlpath = urllib.request.pathname2url(path) frag = "" if urlpath.startswith("//"): return "file:%s%s" % (urlpath, frag) else: return "file://%s%s" % (urlpath, frag) def prefix_url(url, namespaces): for k,v in namespaces.items(): if url.startswith(v): return k+":"+url[len(v):] return url class RecordField(Savable): """ A field of a record. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'name' in doc: try: self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e))) else: self.name = None if self.name is None: if docRoot is not None: self.name = docRoot else: raise ValidationException("Missing name") baseuri = self.name if 'doc' in doc: try: self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e))) else: self.doc = None try: self.type = load_field(doc.get('type'), typedsl_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `name`, `doc`, `type`" % (k))) break if errors: raise ValidationException("Trying 'RecordField'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.name is not None: r['name'] = save(self.name, top=False) if self.doc is not None: r['doc'] = save(self.doc, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['name', 'doc', 'type']) class RecordSchema(Savable): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'fields' in doc: try: self.fields = load_field(doc.get('fields'), idmap_fields_union_of_None_type_or_array_of_RecordFieldLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'fields', str).makeError("the `fields` field is not valid because:\n"+str(e))) else: self.fields = None try: self.type = load_field(doc.get('type'), typedsl_Record_symbolLoader_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `fields`, `type`" % (k))) break if errors: raise ValidationException("Trying 'RecordSchema'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.fields is not None: r['fields'] = save(self.fields, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['fields', 'type']) class EnumSchema(Savable): """ Define an enumerated type. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions try: self.symbols = load_field(doc.get('symbols'), uri_array_of_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'symbols', str).makeError("the `symbols` field is not valid because:\n"+str(e))) try: self.type = load_field(doc.get('type'), typedsl_Enum_symbolLoader_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `symbols`, `type`" % (k))) break if errors: raise ValidationException("Trying 'EnumSchema'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.symbols is not None: r['symbols'] = save(self.symbols, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['symbols', 'type']) class ArraySchema(Savable): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions try: self.items = load_field(doc.get('items'), uri_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_False_True_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'items', str).makeError("the `items` field is not valid because:\n"+str(e))) try: self.type = load_field(doc.get('type'), typedsl_Array_symbolLoader_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `items`, `type`" % (k))) break if errors: raise ValidationException("Trying 'ArraySchema'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.items is not None: r['items'] = save(self.items, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['items', 'type']) class File(Savable): """ Represents a file (or group of files when `secondaryFiles` is provided) that will be accessible by tools using standard POSIX file system call API such as open(2) and read(2). Files are represented as objects with `class` of `File`. File objects have a number of properties that provide metadata about the file. The `location` property of a File is a URI that uniquely identifies the file. Implementations must support the file:// URI scheme and may support other schemes such as http://. The value of `location` may also be a relative reference, in which case it must be resolved relative to the URI of the document it appears in. Alternately to `location`, implementations must also accept the `path` property on File, which must be a filesystem path available on the same host as the CWL runner (for inputs) or the runtime environment of a command line tool execution (for command line tool outputs). If no `location` or `path` is specified, a file object must specify `contents` with the UTF-8 text content of the file. This is a "file literal". File literals do not correspond to external resources, but are created on disk with `contents` with when needed for a executing a tool. Where appropriate, expressions can return file literals to define new files on a runtime. The maximum size of `contents` is 64 kilobytes. The `basename` property defines the filename on disk where the file is staged. This may differ from the resource name. If not provided, `basename` must be computed from the last path part of `location` and made available to expressions. The `secondaryFiles` property is a list of File or Directory objects that must be staged in the same directory as the primary file. It is an error for file names to be duplicated in `secondaryFiles`. The `size` property is the size in bytes of the File. It must be computed from the resource and made available to expressions. The `checksum` field contains a cryptographic hash of the file content for use it verifying file contents. Implementations may, at user option, enable or disable computation of the `checksum` field for performance or other reasons. However, the ability to compute output checksums is required to pass the CWL conformance test suite. When executing a CommandLineTool, the files and secondary files may be staged to an arbitrary directory, but must use the value of `basename` for the filename. The `path` property must be file path in the context of the tool execution runtime (local to the compute node, or within the executing container). All computed properties should be available to expressions. File literals also must be staged and `path` must be set. When collecting CommandLineTool outputs, `glob` matching returns file paths (with the `path` property) and the derived properties. This can all be modified by `outputEval`. Alternately, if the file `cwl.output.json` is present in the output, `outputBinding` is ignored. File objects in the output must provide either a `location` URI or a `path` property in the context of the tool execution runtime (local to the compute node, or within the executing container). When evaluating an ExpressionTool, file objects must be referenced via `location` (the expression tool does not have access to files on disk so `path` is meaningless) or as file literals. It is legal to return a file object with an existing `location` but a different `basename`. The `loadContents` field of ExpressionTool inputs behaves the same as on CommandLineTool inputs, however it is not meaningful on the outputs. An ExpressionTool may forward file references from input to output by using the same value for `location`. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if doc.get('class') != 'File': raise ValidationException("Not a File") if 'location' in doc: try: self.location = load_field(doc.get('location'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'location', str).makeError("the `location` field is not valid because:\n"+str(e))) else: self.location = None if 'path' in doc: try: self.path = load_field(doc.get('path'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'path', str).makeError("the `path` field is not valid because:\n"+str(e))) else: self.path = None if 'basename' in doc: try: self.basename = load_field(doc.get('basename'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'basename', str).makeError("the `basename` field is not valid because:\n"+str(e))) else: self.basename = None if 'dirname' in doc: try: self.dirname = load_field(doc.get('dirname'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'dirname', str).makeError("the `dirname` field is not valid because:\n"+str(e))) else: self.dirname = None if 'nameroot' in doc: try: self.nameroot = load_field(doc.get('nameroot'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'nameroot', str).makeError("the `nameroot` field is not valid because:\n"+str(e))) else: self.nameroot = None if 'nameext' in doc: try: self.nameext = load_field(doc.get('nameext'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'nameext', str).makeError("the `nameext` field is not valid because:\n"+str(e))) else: self.nameext = None if 'checksum' in doc: try: self.checksum = load_field(doc.get('checksum'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'checksum', str).makeError("the `checksum` field is not valid because:\n"+str(e))) else: self.checksum = None if 'size' in doc: try: self.size = load_field(doc.get('size'), union_of_None_type_or_inttype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'size', str).makeError("the `size` field is not valid because:\n"+str(e))) else: self.size = None if 'secondaryFiles' in doc: try: self.secondaryFiles = load_field(doc.get('secondaryFiles'), union_of_None_type_or_array_of_union_of_FileLoader_or_DirectoryLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'secondaryFiles', str).makeError("the `secondaryFiles` field is not valid because:\n"+str(e))) else: self.secondaryFiles = None if 'format' in doc: try: self.format = load_field(doc.get('format'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'format', str).makeError("the `format` field is not valid because:\n"+str(e))) else: self.format = None if 'contents' in doc: try: self.contents = load_field(doc.get('contents'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'contents', str).makeError("the `contents` field is not valid because:\n"+str(e))) else: self.contents = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `class`, `location`, `path`, `basename`, `dirname`, `nameroot`, `nameext`, `checksum`, `size`, `secondaryFiles`, `format`, `contents`" % (k))) break if errors: raise ValidationException("Trying 'File'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] r['class'] = 'File' if self.location is not None: r['location'] = save(self.location, top=False) if self.path is not None: r['path'] = save(self.path, top=False) if self.basename is not None: r['basename'] = save(self.basename, top=False) if self.dirname is not None: r['dirname'] = save(self.dirname, top=False) if self.nameroot is not None: r['nameroot'] = save(self.nameroot, top=False) if self.nameext is not None: r['nameext'] = save(self.nameext, top=False) if self.checksum is not None: r['checksum'] = save(self.checksum, top=False) if self.size is not None: r['size'] = save(self.size, top=False) if self.secondaryFiles is not None: r['secondaryFiles'] = save(self.secondaryFiles, top=False) if self.format is not None: r['format'] = save(self.format, top=False) if self.contents is not None: r['contents'] = save(self.contents, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['class', 'location', 'path', 'basename', 'dirname', 'nameroot', 'nameext', 'checksum', 'size', 'secondaryFiles', 'format', 'contents']) class Directory(Savable): """ Represents a directory to present to a command line tool. Directories are represented as objects with `class` of `Directory`. Directory objects have a number of properties that provide metadata about the directory. The `location` property of a Directory is a URI that uniquely identifies the directory. Implementations must support the file:// URI scheme and may support other schemes such as http://. Alternately to `location`, implementations must also accept the `path` property on Direcotry, which must be a filesystem path available on the same host as the CWL runner (for inputs) or the runtime environment of a command line tool execution (for command line tool outputs). A Directory object may have a `listing` field. This is a list of File and Directory objects that are contained in the Directory. For each entry in `listing`, the `basename` property defines the name of the File or Subdirectory when staged to disk. If `listing` is not provided, the implementation must have some way of fetching the Directory listing at runtime based on the `location` field. If a Directory does not have `location`, it is a Directory literal. A Directory literal must provide `listing`. Directory literals must be created on disk at runtime as needed. The resources in a Directory literal do not need to have any implied relationship in their `location`. For example, a Directory listing may contain two files located on different hosts. It is the responsibility of the runtime to ensure that those files are staged to disk appropriately. Secondary files associated with files in `listing` must also be staged to the same Directory. When executing a CommandLineTool, Directories must be recursively staged first and have local values of `path` assigend. Directory objects in CommandLineTool output must provide either a `location` URI or a `path` property in the context of the tool execution runtime (local to the compute node, or within the executing container). An ExpressionTool may forward file references from input to output by using the same value for `location`. Name conflicts (the same `basename` appearing multiple times in `listing` or in any entry in `secondaryFiles` in the listing) is a fatal error. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if doc.get('class') != 'Directory': raise ValidationException("Not a Directory") if 'location' in doc: try: self.location = load_field(doc.get('location'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'location', str).makeError("the `location` field is not valid because:\n"+str(e))) else: self.location = None if 'path' in doc: try: self.path = load_field(doc.get('path'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'path', str).makeError("the `path` field is not valid because:\n"+str(e))) else: self.path = None if 'basename' in doc: try: self.basename = load_field(doc.get('basename'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'basename', str).makeError("the `basename` field is not valid because:\n"+str(e))) else: self.basename = None if 'listing' in doc: try: self.listing = load_field(doc.get('listing'), union_of_None_type_or_array_of_union_of_FileLoader_or_DirectoryLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'listing', str).makeError("the `listing` field is not valid because:\n"+str(e))) else: self.listing = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `class`, `location`, `path`, `basename`, `listing`" % (k))) break if errors: raise ValidationException("Trying 'Directory'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] r['class'] = 'Directory' if self.location is not None: r['location'] = save(self.location, top=False) if self.path is not None: r['path'] = save(self.path, top=False) if self.basename is not None: r['basename'] = save(self.basename, top=False) if self.listing is not None: r['listing'] = save(self.listing, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['class', 'location', 'path', 'basename', 'listing']) class SchemaBase(Savable): pass class Parameter(SchemaBase): """ Define an input or output parameter to a process. """ pass class InputBinding(Savable): pass class OutputBinding(Savable): pass class InputSchema(SchemaBase): pass class OutputSchema(SchemaBase): pass class InputRecordField(RecordField): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'name' in doc: try: self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e))) else: self.name = None if self.name is None: if docRoot is not None: self.name = docRoot else: raise ValidationException("Missing name") baseuri = self.name if 'doc' in doc: try: self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e))) else: self.doc = None try: self.type = load_field(doc.get('type'), typedsl_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) if 'inputBinding' in doc: try: self.inputBinding = load_field(doc.get('inputBinding'), union_of_None_type_or_CommandLineBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'inputBinding', str).makeError("the `inputBinding` field is not valid because:\n"+str(e))) else: self.inputBinding = None if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `name`, `doc`, `type`, `inputBinding`, `label`" % (k))) break if errors: raise ValidationException("Trying 'InputRecordField'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.name is not None: r['name'] = save(self.name, top=False) if self.doc is not None: r['doc'] = save(self.doc, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if self.inputBinding is not None: r['inputBinding'] = save(self.inputBinding, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['name', 'doc', 'type', 'inputBinding', 'label']) class InputRecordSchema(RecordSchema, InputSchema): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'name' in doc: try: self.name = load_field(doc.get('name'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e))) else: self.name = None if self.name is None: if docRoot is not None: self.name = docRoot else: self.name = "_:" + str(uuid.uuid4()) baseuri = self.name if 'fields' in doc: try: self.fields = load_field(doc.get('fields'), idmap_fields_union_of_None_type_or_array_of_InputRecordFieldLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'fields', str).makeError("the `fields` field is not valid because:\n"+str(e))) else: self.fields = None try: self.type = load_field(doc.get('type'), typedsl_Record_symbolLoader_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `fields`, `type`, `label`, `name`" % (k))) break if errors: raise ValidationException("Trying 'InputRecordSchema'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.name is not None: r['name'] = save(self.name, top=False) if self.fields is not None: r['fields'] = save(self.fields, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['fields', 'type', 'label', 'name']) class InputEnumSchema(EnumSchema, InputSchema): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'name' in doc: try: self.name = load_field(doc.get('name'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e))) else: self.name = None if self.name is None: if docRoot is not None: self.name = docRoot else: self.name = "_:" + str(uuid.uuid4()) baseuri = self.name try: self.symbols = load_field(doc.get('symbols'), uri_array_of_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'symbols', str).makeError("the `symbols` field is not valid because:\n"+str(e))) try: self.type = load_field(doc.get('type'), typedsl_Enum_symbolLoader_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None if 'inputBinding' in doc: try: self.inputBinding = load_field(doc.get('inputBinding'), union_of_None_type_or_CommandLineBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'inputBinding', str).makeError("the `inputBinding` field is not valid because:\n"+str(e))) else: self.inputBinding = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `symbols`, `type`, `label`, `name`, `inputBinding`" % (k))) break if errors: raise ValidationException("Trying 'InputEnumSchema'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.name is not None: r['name'] = save(self.name, top=False) if self.symbols is not None: r['symbols'] = save(self.symbols, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if self.inputBinding is not None: r['inputBinding'] = save(self.inputBinding, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['symbols', 'type', 'label', 'name', 'inputBinding']) class InputArraySchema(ArraySchema, InputSchema): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions try: self.items = load_field(doc.get('items'), uri_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_False_True_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'items', str).makeError("the `items` field is not valid because:\n"+str(e))) try: self.type = load_field(doc.get('type'), typedsl_Array_symbolLoader_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None if 'inputBinding' in doc: try: self.inputBinding = load_field(doc.get('inputBinding'), union_of_None_type_or_CommandLineBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'inputBinding', str).makeError("the `inputBinding` field is not valid because:\n"+str(e))) else: self.inputBinding = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `items`, `type`, `label`, `inputBinding`" % (k))) break if errors: raise ValidationException("Trying 'InputArraySchema'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.items is not None: r['items'] = save(self.items, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if self.inputBinding is not None: r['inputBinding'] = save(self.inputBinding, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['items', 'type', 'label', 'inputBinding']) class OutputRecordField(RecordField): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'name' in doc: try: self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e))) else: self.name = None if self.name is None: if docRoot is not None: self.name = docRoot else: raise ValidationException("Missing name") baseuri = self.name if 'doc' in doc: try: self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e))) else: self.doc = None try: self.type = load_field(doc.get('type'), typedsl_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) if 'outputBinding' in doc: try: self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'outputBinding', str).makeError("the `outputBinding` field is not valid because:\n"+str(e))) else: self.outputBinding = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `name`, `doc`, `type`, `outputBinding`" % (k))) break if errors: raise ValidationException("Trying 'OutputRecordField'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.name is not None: r['name'] = save(self.name, top=False) if self.doc is not None: r['doc'] = save(self.doc, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if self.outputBinding is not None: r['outputBinding'] = save(self.outputBinding, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['name', 'doc', 'type', 'outputBinding']) class OutputRecordSchema(RecordSchema, OutputSchema): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'fields' in doc: try: self.fields = load_field(doc.get('fields'), idmap_fields_union_of_None_type_or_array_of_OutputRecordFieldLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'fields', str).makeError("the `fields` field is not valid because:\n"+str(e))) else: self.fields = None try: self.type = load_field(doc.get('type'), typedsl_Record_symbolLoader_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `fields`, `type`, `label`" % (k))) break if errors: raise ValidationException("Trying 'OutputRecordSchema'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.fields is not None: r['fields'] = save(self.fields, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['fields', 'type', 'label']) class OutputEnumSchema(EnumSchema, OutputSchema): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions try: self.symbols = load_field(doc.get('symbols'), uri_array_of_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'symbols', str).makeError("the `symbols` field is not valid because:\n"+str(e))) try: self.type = load_field(doc.get('type'), typedsl_Enum_symbolLoader_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None if 'outputBinding' in doc: try: self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'outputBinding', str).makeError("the `outputBinding` field is not valid because:\n"+str(e))) else: self.outputBinding = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `symbols`, `type`, `label`, `outputBinding`" % (k))) break if errors: raise ValidationException("Trying 'OutputEnumSchema'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.symbols is not None: r['symbols'] = save(self.symbols, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if self.outputBinding is not None: r['outputBinding'] = save(self.outputBinding, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['symbols', 'type', 'label', 'outputBinding']) class OutputArraySchema(ArraySchema, OutputSchema): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions try: self.items = load_field(doc.get('items'), uri_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_False_True_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'items', str).makeError("the `items` field is not valid because:\n"+str(e))) try: self.type = load_field(doc.get('type'), typedsl_Array_symbolLoader_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None if 'outputBinding' in doc: try: self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'outputBinding', str).makeError("the `outputBinding` field is not valid because:\n"+str(e))) else: self.outputBinding = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `items`, `type`, `label`, `outputBinding`" % (k))) break if errors: raise ValidationException("Trying 'OutputArraySchema'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.items is not None: r['items'] = save(self.items, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if self.outputBinding is not None: r['outputBinding'] = save(self.outputBinding, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['items', 'type', 'label', 'outputBinding']) class InputParameter(Parameter): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'id' in doc: try: self.id = load_field(doc.get('id'), uri_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'id', str).makeError("the `id` field is not valid because:\n"+str(e))) else: self.id = None if self.id is None: if docRoot is not None: self.id = docRoot else: raise ValidationException("Missing id") baseuri = self.id if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None if 'secondaryFiles' in doc: try: self.secondaryFiles = load_field(doc.get('secondaryFiles'), union_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_union_of_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'secondaryFiles', str).makeError("the `secondaryFiles` field is not valid because:\n"+str(e))) else: self.secondaryFiles = None if 'streamable' in doc: try: self.streamable = load_field(doc.get('streamable'), union_of_None_type_or_booltype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'streamable', str).makeError("the `streamable` field is not valid because:\n"+str(e))) else: self.streamable = None if 'doc' in doc: try: self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e))) else: self.doc = None if 'format' in doc: try: self.format = load_field(doc.get('format'), uri_union_of_None_type_or_strtype_or_array_of_strtype_or_ExpressionLoader_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'format', str).makeError("the `format` field is not valid because:\n"+str(e))) else: self.format = None if 'inputBinding' in doc: try: self.inputBinding = load_field(doc.get('inputBinding'), union_of_None_type_or_CommandLineBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'inputBinding', str).makeError("the `inputBinding` field is not valid because:\n"+str(e))) else: self.inputBinding = None if 'default' in doc: try: self.default = load_field(doc.get('default'), union_of_None_type_or_Any_type, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'default', str).makeError("the `default` field is not valid because:\n"+str(e))) else: self.default = None if 'type' in doc: try: self.type = load_field(doc.get('type'), typedsl_union_of_None_type_or_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) else: self.type = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `label`, `secondaryFiles`, `streamable`, `doc`, `id`, `format`, `inputBinding`, `default`, `type`" % (k))) break if errors: raise ValidationException("Trying 'InputParameter'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.id is not None: r['id'] = save(self.id, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if self.secondaryFiles is not None: r['secondaryFiles'] = save(self.secondaryFiles, top=False) if self.streamable is not None: r['streamable'] = save(self.streamable, top=False) if self.doc is not None: r['doc'] = save(self.doc, top=False) if self.format is not None: r['format'] = save(self.format, top=False) if self.inputBinding is not None: r['inputBinding'] = save(self.inputBinding, top=False) if self.default is not None: r['default'] = save(self.default, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['label', 'secondaryFiles', 'streamable', 'doc', 'id', 'format', 'inputBinding', 'default', 'type']) class OutputParameter(Parameter): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'id' in doc: try: self.id = load_field(doc.get('id'), uri_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'id', str).makeError("the `id` field is not valid because:\n"+str(e))) else: self.id = None if self.id is None: if docRoot is not None: self.id = docRoot else: raise ValidationException("Missing id") baseuri = self.id if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None if 'secondaryFiles' in doc: try: self.secondaryFiles = load_field(doc.get('secondaryFiles'), union_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_union_of_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'secondaryFiles', str).makeError("the `secondaryFiles` field is not valid because:\n"+str(e))) else: self.secondaryFiles = None if 'streamable' in doc: try: self.streamable = load_field(doc.get('streamable'), union_of_None_type_or_booltype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'streamable', str).makeError("the `streamable` field is not valid because:\n"+str(e))) else: self.streamable = None if 'doc' in doc: try: self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e))) else: self.doc = None if 'outputBinding' in doc: try: self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'outputBinding', str).makeError("the `outputBinding` field is not valid because:\n"+str(e))) else: self.outputBinding = None if 'format' in doc: try: self.format = load_field(doc.get('format'), uri_union_of_None_type_or_strtype_or_ExpressionLoader_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'format', str).makeError("the `format` field is not valid because:\n"+str(e))) else: self.format = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `label`, `secondaryFiles`, `streamable`, `doc`, `id`, `outputBinding`, `format`" % (k))) break if errors: raise ValidationException("Trying 'OutputParameter'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.id is not None: r['id'] = save(self.id, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if self.secondaryFiles is not None: r['secondaryFiles'] = save(self.secondaryFiles, top=False) if self.streamable is not None: r['streamable'] = save(self.streamable, top=False) if self.doc is not None: r['doc'] = save(self.doc, top=False) if self.outputBinding is not None: r['outputBinding'] = save(self.outputBinding, top=False) if self.format is not None: r['format'] = save(self.format, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['label', 'secondaryFiles', 'streamable', 'doc', 'id', 'outputBinding', 'format']) class ProcessRequirement(Savable): """ A process requirement declares a prerequisite that may or must be fulfilled before executing a process. See [`Process.hints`](#process) and [`Process.requirements`](#process). Process requirements are the primary mechanism for specifying extensions to the CWL core specification. """ pass class Process(Savable): """ The base executable type in CWL is the `Process` object defined by the document. Note that the `Process` object is abstract and cannot be directly executed. """ pass class InlineJavascriptRequirement(ProcessRequirement): """ Indicates that the workflow platform must support inline Javascript expressions. If this requirement is not present, the workflow platform must not perform expression interpolatation. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if doc.get('class') != 'InlineJavascriptRequirement': raise ValidationException("Not a InlineJavascriptRequirement") if 'expressionLib' in doc: try: self.expressionLib = load_field(doc.get('expressionLib'), union_of_None_type_or_array_of_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'expressionLib', str).makeError("the `expressionLib` field is not valid because:\n"+str(e))) else: self.expressionLib = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `class`, `expressionLib`" % (k))) break if errors: raise ValidationException("Trying 'InlineJavascriptRequirement'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] r['class'] = 'InlineJavascriptRequirement' if self.expressionLib is not None: r['expressionLib'] = save(self.expressionLib, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['class', 'expressionLib']) class SchemaDefRequirement(ProcessRequirement): """ This field consists of an array of type definitions which must be used when interpreting the `inputs` and `outputs` fields. When a `type` field contain a IRI, the implementation must check if the type is defined in `schemaDefs` and use that definition. If the type is not found in `schemaDefs`, it is an error. The entries in `schemaDefs` must be processed in the order listed such that later schema definitions may refer to earlier schema definitions. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if doc.get('class') != 'SchemaDefRequirement': raise ValidationException("Not a SchemaDefRequirement") try: self.types = load_field(doc.get('types'), array_of_union_of_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'types', str).makeError("the `types` field is not valid because:\n"+str(e))) self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `class`, `types`" % (k))) break if errors: raise ValidationException("Trying 'SchemaDefRequirement'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] r['class'] = 'SchemaDefRequirement' if self.types is not None: r['types'] = save(self.types, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['class', 'types']) class EnvironmentDef(Savable): """ Define an environment variable that will be set in the runtime environment by the workflow platform when executing the command line tool. May be the result of executing an expression, such as getting a parameter from input. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions try: self.envName = load_field(doc.get('envName'), strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'envName', str).makeError("the `envName` field is not valid because:\n"+str(e))) try: self.envValue = load_field(doc.get('envValue'), union_of_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'envValue', str).makeError("the `envValue` field is not valid because:\n"+str(e))) self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `envName`, `envValue`" % (k))) break if errors: raise ValidationException("Trying 'EnvironmentDef'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.envName is not None: r['envName'] = save(self.envName, top=False) if self.envValue is not None: r['envValue'] = save(self.envValue, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['envName', 'envValue']) class CommandLineBinding(InputBinding): """ When listed under `inputBinding` in the input schema, the term "value" refers to the the corresponding value in the input object. For binding objects listed in `CommandLineTool.arguments`, the term "value" refers to the effective value after evaluating `valueFrom`. The binding behavior when building the command line depends on the data type of the value. If there is a mismatch between the type described by the input schema and the effective value, such as resulting from an expression evaluation, an implementation must use the data type of the effective value. - **string**: Add `prefix` and the string to the command line. - **number**: Add `prefix` and decimal representation to command line. - **boolean**: If true, add `prefix` to the command line. If false, add nothing. - **File**: Add `prefix` and the value of [`File.path`](#File) to the command line. - **array**: If `itemSeparator` is specified, add `prefix` and the join the array into a single string with `itemSeparator` separating the items. Otherwise first add `prefix`, then recursively process individual elements. - **object**: Add `prefix` only, and recursively add object fields for which `inputBinding` is specified. - **null**: Add nothing. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'loadContents' in doc: try: self.loadContents = load_field(doc.get('loadContents'), union_of_None_type_or_booltype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'loadContents', str).makeError("the `loadContents` field is not valid because:\n"+str(e))) else: self.loadContents = None if 'position' in doc: try: self.position = load_field(doc.get('position'), union_of_None_type_or_inttype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'position', str).makeError("the `position` field is not valid because:\n"+str(e))) else: self.position = None if 'prefix' in doc: try: self.prefix = load_field(doc.get('prefix'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'prefix', str).makeError("the `prefix` field is not valid because:\n"+str(e))) else: self.prefix = None if 'separate' in doc: try: self.separate = load_field(doc.get('separate'), union_of_None_type_or_booltype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'separate', str).makeError("the `separate` field is not valid because:\n"+str(e))) else: self.separate = None if 'itemSeparator' in doc: try: self.itemSeparator = load_field(doc.get('itemSeparator'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'itemSeparator', str).makeError("the `itemSeparator` field is not valid because:\n"+str(e))) else: self.itemSeparator = None if 'valueFrom' in doc: try: self.valueFrom = load_field(doc.get('valueFrom'), union_of_None_type_or_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'valueFrom', str).makeError("the `valueFrom` field is not valid because:\n"+str(e))) else: self.valueFrom = None if 'shellQuote' in doc: try: self.shellQuote = load_field(doc.get('shellQuote'), union_of_None_type_or_booltype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'shellQuote', str).makeError("the `shellQuote` field is not valid because:\n"+str(e))) else: self.shellQuote = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `loadContents`, `position`, `prefix`, `separate`, `itemSeparator`, `valueFrom`, `shellQuote`" % (k))) break if errors: raise ValidationException("Trying 'CommandLineBinding'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.loadContents is not None: r['loadContents'] = save(self.loadContents, top=False) if self.position is not None: r['position'] = save(self.position, top=False) if self.prefix is not None: r['prefix'] = save(self.prefix, top=False) if self.separate is not None: r['separate'] = save(self.separate, top=False) if self.itemSeparator is not None: r['itemSeparator'] = save(self.itemSeparator, top=False) if self.valueFrom is not None: r['valueFrom'] = save(self.valueFrom, top=False) if self.shellQuote is not None: r['shellQuote'] = save(self.shellQuote, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['loadContents', 'position', 'prefix', 'separate', 'itemSeparator', 'valueFrom', 'shellQuote']) class CommandOutputBinding(OutputBinding): """ Describes how to generate an output parameter based on the files produced by a CommandLineTool. The output parameter value is generated by applying these operations in the following order: - glob - loadContents - outputEval - secondaryFiles """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'glob' in doc: try: self.glob = load_field(doc.get('glob'), union_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'glob', str).makeError("the `glob` field is not valid because:\n"+str(e))) else: self.glob = None if 'loadContents' in doc: try: self.loadContents = load_field(doc.get('loadContents'), union_of_None_type_or_booltype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'loadContents', str).makeError("the `loadContents` field is not valid because:\n"+str(e))) else: self.loadContents = None if 'outputEval' in doc: try: self.outputEval = load_field(doc.get('outputEval'), union_of_None_type_or_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'outputEval', str).makeError("the `outputEval` field is not valid because:\n"+str(e))) else: self.outputEval = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `glob`, `loadContents`, `outputEval`" % (k))) break if errors: raise ValidationException("Trying 'CommandOutputBinding'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.glob is not None: r['glob'] = save(self.glob, top=False) if self.loadContents is not None: r['loadContents'] = save(self.loadContents, top=False) if self.outputEval is not None: r['outputEval'] = save(self.outputEval, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['glob', 'loadContents', 'outputEval']) class CommandInputRecordField(InputRecordField): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'name' in doc: try: self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e))) else: self.name = None if self.name is None: if docRoot is not None: self.name = docRoot else: raise ValidationException("Missing name") baseuri = self.name if 'doc' in doc: try: self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e))) else: self.doc = None try: self.type = load_field(doc.get('type'), typedsl_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) if 'inputBinding' in doc: try: self.inputBinding = load_field(doc.get('inputBinding'), union_of_None_type_or_CommandLineBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'inputBinding', str).makeError("the `inputBinding` field is not valid because:\n"+str(e))) else: self.inputBinding = None if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `name`, `doc`, `type`, `inputBinding`, `label`" % (k))) break if errors: raise ValidationException("Trying 'CommandInputRecordField'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.name is not None: r['name'] = save(self.name, top=False) if self.doc is not None: r['doc'] = save(self.doc, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if self.inputBinding is not None: r['inputBinding'] = save(self.inputBinding, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['name', 'doc', 'type', 'inputBinding', 'label']) class CommandInputRecordSchema(InputRecordSchema): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'name' in doc: try: self.name = load_field(doc.get('name'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e))) else: self.name = None if self.name is None: if docRoot is not None: self.name = docRoot else: self.name = "_:" + str(uuid.uuid4()) baseuri = self.name if 'fields' in doc: try: self.fields = load_field(doc.get('fields'), idmap_fields_union_of_None_type_or_array_of_CommandInputRecordFieldLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'fields', str).makeError("the `fields` field is not valid because:\n"+str(e))) else: self.fields = None try: self.type = load_field(doc.get('type'), typedsl_Record_symbolLoader_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `fields`, `type`, `label`, `name`" % (k))) break if errors: raise ValidationException("Trying 'CommandInputRecordSchema'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.name is not None: r['name'] = save(self.name, top=False) if self.fields is not None: r['fields'] = save(self.fields, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['fields', 'type', 'label', 'name']) class CommandInputEnumSchema(InputEnumSchema): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'name' in doc: try: self.name = load_field(doc.get('name'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e))) else: self.name = None if self.name is None: if docRoot is not None: self.name = docRoot else: self.name = "_:" + str(uuid.uuid4()) baseuri = self.name try: self.symbols = load_field(doc.get('symbols'), uri_array_of_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'symbols', str).makeError("the `symbols` field is not valid because:\n"+str(e))) try: self.type = load_field(doc.get('type'), typedsl_Enum_symbolLoader_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None if 'inputBinding' in doc: try: self.inputBinding = load_field(doc.get('inputBinding'), union_of_None_type_or_CommandLineBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'inputBinding', str).makeError("the `inputBinding` field is not valid because:\n"+str(e))) else: self.inputBinding = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `symbols`, `type`, `label`, `name`, `inputBinding`" % (k))) break if errors: raise ValidationException("Trying 'CommandInputEnumSchema'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.name is not None: r['name'] = save(self.name, top=False) if self.symbols is not None: r['symbols'] = save(self.symbols, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if self.inputBinding is not None: r['inputBinding'] = save(self.inputBinding, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['symbols', 'type', 'label', 'name', 'inputBinding']) class CommandInputArraySchema(InputArraySchema): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions try: self.items = load_field(doc.get('items'), uri_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_False_True_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'items', str).makeError("the `items` field is not valid because:\n"+str(e))) try: self.type = load_field(doc.get('type'), typedsl_Array_symbolLoader_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None if 'inputBinding' in doc: try: self.inputBinding = load_field(doc.get('inputBinding'), union_of_None_type_or_CommandLineBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'inputBinding', str).makeError("the `inputBinding` field is not valid because:\n"+str(e))) else: self.inputBinding = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `items`, `type`, `label`, `inputBinding`" % (k))) break if errors: raise ValidationException("Trying 'CommandInputArraySchema'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.items is not None: r['items'] = save(self.items, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if self.inputBinding is not None: r['inputBinding'] = save(self.inputBinding, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['items', 'type', 'label', 'inputBinding']) class CommandOutputRecordField(OutputRecordField): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'name' in doc: try: self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e))) else: self.name = None if self.name is None: if docRoot is not None: self.name = docRoot else: raise ValidationException("Missing name") baseuri = self.name if 'doc' in doc: try: self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e))) else: self.doc = None try: self.type = load_field(doc.get('type'), typedsl_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) if 'outputBinding' in doc: try: self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'outputBinding', str).makeError("the `outputBinding` field is not valid because:\n"+str(e))) else: self.outputBinding = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `name`, `doc`, `type`, `outputBinding`" % (k))) break if errors: raise ValidationException("Trying 'CommandOutputRecordField'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.name is not None: r['name'] = save(self.name, top=False) if self.doc is not None: r['doc'] = save(self.doc, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if self.outputBinding is not None: r['outputBinding'] = save(self.outputBinding, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['name', 'doc', 'type', 'outputBinding']) class CommandOutputRecordSchema(OutputRecordSchema): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'name' in doc: try: self.name = load_field(doc.get('name'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e))) else: self.name = None if self.name is None: if docRoot is not None: self.name = docRoot else: self.name = "_:" + str(uuid.uuid4()) baseuri = self.name if 'fields' in doc: try: self.fields = load_field(doc.get('fields'), idmap_fields_union_of_None_type_or_array_of_CommandOutputRecordFieldLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'fields', str).makeError("the `fields` field is not valid because:\n"+str(e))) else: self.fields = None try: self.type = load_field(doc.get('type'), typedsl_Record_symbolLoader_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `fields`, `type`, `label`, `name`" % (k))) break if errors: raise ValidationException("Trying 'CommandOutputRecordSchema'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.name is not None: r['name'] = save(self.name, top=False) if self.fields is not None: r['fields'] = save(self.fields, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['fields', 'type', 'label', 'name']) class CommandOutputEnumSchema(OutputEnumSchema): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions try: self.symbols = load_field(doc.get('symbols'), uri_array_of_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'symbols', str).makeError("the `symbols` field is not valid because:\n"+str(e))) try: self.type = load_field(doc.get('type'), typedsl_Enum_symbolLoader_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None if 'outputBinding' in doc: try: self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'outputBinding', str).makeError("the `outputBinding` field is not valid because:\n"+str(e))) else: self.outputBinding = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `symbols`, `type`, `label`, `outputBinding`" % (k))) break if errors: raise ValidationException("Trying 'CommandOutputEnumSchema'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.symbols is not None: r['symbols'] = save(self.symbols, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if self.outputBinding is not None: r['outputBinding'] = save(self.outputBinding, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['symbols', 'type', 'label', 'outputBinding']) class CommandOutputArraySchema(OutputArraySchema): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions try: self.items = load_field(doc.get('items'), uri_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_False_True_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'items', str).makeError("the `items` field is not valid because:\n"+str(e))) try: self.type = load_field(doc.get('type'), typedsl_Array_symbolLoader_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None if 'outputBinding' in doc: try: self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'outputBinding', str).makeError("the `outputBinding` field is not valid because:\n"+str(e))) else: self.outputBinding = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `items`, `type`, `label`, `outputBinding`" % (k))) break if errors: raise ValidationException("Trying 'CommandOutputArraySchema'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.items is not None: r['items'] = save(self.items, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if self.outputBinding is not None: r['outputBinding'] = save(self.outputBinding, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['items', 'type', 'label', 'outputBinding']) class CommandInputParameter(InputParameter): """ An input parameter for a CommandLineTool. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'id' in doc: try: self.id = load_field(doc.get('id'), uri_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'id', str).makeError("the `id` field is not valid because:\n"+str(e))) else: self.id = None if self.id is None: if docRoot is not None: self.id = docRoot else: raise ValidationException("Missing id") baseuri = self.id if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None if 'secondaryFiles' in doc: try: self.secondaryFiles = load_field(doc.get('secondaryFiles'), union_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_union_of_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'secondaryFiles', str).makeError("the `secondaryFiles` field is not valid because:\n"+str(e))) else: self.secondaryFiles = None if 'streamable' in doc: try: self.streamable = load_field(doc.get('streamable'), union_of_None_type_or_booltype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'streamable', str).makeError("the `streamable` field is not valid because:\n"+str(e))) else: self.streamable = None if 'doc' in doc: try: self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e))) else: self.doc = None if 'format' in doc: try: self.format = load_field(doc.get('format'), uri_union_of_None_type_or_strtype_or_array_of_strtype_or_ExpressionLoader_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'format', str).makeError("the `format` field is not valid because:\n"+str(e))) else: self.format = None if 'inputBinding' in doc: try: self.inputBinding = load_field(doc.get('inputBinding'), union_of_None_type_or_CommandLineBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'inputBinding', str).makeError("the `inputBinding` field is not valid because:\n"+str(e))) else: self.inputBinding = None if 'default' in doc: try: self.default = load_field(doc.get('default'), union_of_None_type_or_Any_type, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'default', str).makeError("the `default` field is not valid because:\n"+str(e))) else: self.default = None if 'type' in doc: try: self.type = load_field(doc.get('type'), typedsl_union_of_None_type_or_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) else: self.type = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `label`, `secondaryFiles`, `streamable`, `doc`, `id`, `format`, `inputBinding`, `default`, `type`" % (k))) break if errors: raise ValidationException("Trying 'CommandInputParameter'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.id is not None: r['id'] = save(self.id, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if self.secondaryFiles is not None: r['secondaryFiles'] = save(self.secondaryFiles, top=False) if self.streamable is not None: r['streamable'] = save(self.streamable, top=False) if self.doc is not None: r['doc'] = save(self.doc, top=False) if self.format is not None: r['format'] = save(self.format, top=False) if self.inputBinding is not None: r['inputBinding'] = save(self.inputBinding, top=False) if self.default is not None: r['default'] = save(self.default, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['label', 'secondaryFiles', 'streamable', 'doc', 'id', 'format', 'inputBinding', 'default', 'type']) class CommandOutputParameter(OutputParameter): """ An output parameter for a CommandLineTool. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'id' in doc: try: self.id = load_field(doc.get('id'), uri_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'id', str).makeError("the `id` field is not valid because:\n"+str(e))) else: self.id = None if self.id is None: if docRoot is not None: self.id = docRoot else: raise ValidationException("Missing id") baseuri = self.id if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None if 'secondaryFiles' in doc: try: self.secondaryFiles = load_field(doc.get('secondaryFiles'), union_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_union_of_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'secondaryFiles', str).makeError("the `secondaryFiles` field is not valid because:\n"+str(e))) else: self.secondaryFiles = None if 'streamable' in doc: try: self.streamable = load_field(doc.get('streamable'), union_of_None_type_or_booltype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'streamable', str).makeError("the `streamable` field is not valid because:\n"+str(e))) else: self.streamable = None if 'doc' in doc: try: self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e))) else: self.doc = None if 'outputBinding' in doc: try: self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'outputBinding', str).makeError("the `outputBinding` field is not valid because:\n"+str(e))) else: self.outputBinding = None if 'format' in doc: try: self.format = load_field(doc.get('format'), uri_union_of_None_type_or_strtype_or_ExpressionLoader_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'format', str).makeError("the `format` field is not valid because:\n"+str(e))) else: self.format = None if 'type' in doc: try: self.type = load_field(doc.get('type'), typedsl_union_of_None_type_or_CWLTypeLoader_or_stdoutLoader_or_stderrLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) else: self.type = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `label`, `secondaryFiles`, `streamable`, `doc`, `id`, `outputBinding`, `format`, `type`" % (k))) break if errors: raise ValidationException("Trying 'CommandOutputParameter'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.id is not None: r['id'] = save(self.id, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if self.secondaryFiles is not None: r['secondaryFiles'] = save(self.secondaryFiles, top=False) if self.streamable is not None: r['streamable'] = save(self.streamable, top=False) if self.doc is not None: r['doc'] = save(self.doc, top=False) if self.outputBinding is not None: r['outputBinding'] = save(self.outputBinding, top=False) if self.format is not None: r['format'] = save(self.format, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['label', 'secondaryFiles', 'streamable', 'doc', 'id', 'outputBinding', 'format', 'type']) class CommandLineTool(Process): """ This defines the schema of the CWL Command Line Tool Description document. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if doc.get('class') != 'CommandLineTool': raise ValidationException("Not a CommandLineTool") if 'id' in doc: try: self.id = load_field(doc.get('id'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'id', str).makeError("the `id` field is not valid because:\n"+str(e))) else: self.id = None if self.id is None: if docRoot is not None: self.id = docRoot else: self.id = "_:" + str(uuid.uuid4()) baseuri = self.id try: self.inputs = load_field(doc.get('inputs'), idmap_inputs_array_of_CommandInputParameterLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'inputs', str).makeError("the `inputs` field is not valid because:\n"+str(e))) try: self.outputs = load_field(doc.get('outputs'), idmap_outputs_array_of_CommandOutputParameterLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'outputs', str).makeError("the `outputs` field is not valid because:\n"+str(e))) if 'requirements' in doc: try: self.requirements = load_field(doc.get('requirements'), idmap_requirements_union_of_None_type_or_array_of_union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'requirements', str).makeError("the `requirements` field is not valid because:\n"+str(e))) else: self.requirements = None if 'hints' in doc: try: self.hints = load_field(doc.get('hints'), idmap_hints_union_of_None_type_or_array_of_Any_type, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'hints', str).makeError("the `hints` field is not valid because:\n"+str(e))) else: self.hints = None if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None if 'doc' in doc: try: self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e))) else: self.doc = None if 'cwlVersion' in doc: try: self.cwlVersion = load_field(doc.get('cwlVersion'), uri_union_of_None_type_or_CWLVersionLoader_False_True_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'cwlVersion', str).makeError("the `cwlVersion` field is not valid because:\n"+str(e))) else: self.cwlVersion = None if 'baseCommand' in doc: try: self.baseCommand = load_field(doc.get('baseCommand'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'baseCommand', str).makeError("the `baseCommand` field is not valid because:\n"+str(e))) else: self.baseCommand = None if 'arguments' in doc: try: self.arguments = load_field(doc.get('arguments'), union_of_None_type_or_array_of_union_of_strtype_or_ExpressionLoader_or_CommandLineBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'arguments', str).makeError("the `arguments` field is not valid because:\n"+str(e))) else: self.arguments = None if 'stdin' in doc: try: self.stdin = load_field(doc.get('stdin'), union_of_None_type_or_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'stdin', str).makeError("the `stdin` field is not valid because:\n"+str(e))) else: self.stdin = None if 'stderr' in doc: try: self.stderr = load_field(doc.get('stderr'), union_of_None_type_or_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'stderr', str).makeError("the `stderr` field is not valid because:\n"+str(e))) else: self.stderr = None if 'stdout' in doc: try: self.stdout = load_field(doc.get('stdout'), union_of_None_type_or_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'stdout', str).makeError("the `stdout` field is not valid because:\n"+str(e))) else: self.stdout = None if 'successCodes' in doc: try: self.successCodes = load_field(doc.get('successCodes'), union_of_None_type_or_array_of_inttype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'successCodes', str).makeError("the `successCodes` field is not valid because:\n"+str(e))) else: self.successCodes = None if 'temporaryFailCodes' in doc: try: self.temporaryFailCodes = load_field(doc.get('temporaryFailCodes'), union_of_None_type_or_array_of_inttype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'temporaryFailCodes', str).makeError("the `temporaryFailCodes` field is not valid because:\n"+str(e))) else: self.temporaryFailCodes = None if 'permanentFailCodes' in doc: try: self.permanentFailCodes = load_field(doc.get('permanentFailCodes'), union_of_None_type_or_array_of_inttype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'permanentFailCodes', str).makeError("the `permanentFailCodes` field is not valid because:\n"+str(e))) else: self.permanentFailCodes = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `id`, `inputs`, `outputs`, `requirements`, `hints`, `label`, `doc`, `cwlVersion`, `class`, `baseCommand`, `arguments`, `stdin`, `stderr`, `stdout`, `successCodes`, `temporaryFailCodes`, `permanentFailCodes`" % (k))) break if errors: raise ValidationException("Trying 'CommandLineTool'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] r['class'] = 'CommandLineTool' if self.id is not None: r['id'] = save(self.id, top=False) if self.inputs is not None: r['inputs'] = save(self.inputs, top=False) if self.outputs is not None: r['outputs'] = save(self.outputs, top=False) if self.requirements is not None: r['requirements'] = save(self.requirements, top=False) if self.hints is not None: r['hints'] = save(self.hints, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if self.doc is not None: r['doc'] = save(self.doc, top=False) if self.cwlVersion is not None: r['cwlVersion'] = save(self.cwlVersion, top=False) if self.baseCommand is not None: r['baseCommand'] = save(self.baseCommand, top=False) if self.arguments is not None: r['arguments'] = save(self.arguments, top=False) if self.stdin is not None: r['stdin'] = save(self.stdin, top=False) if self.stderr is not None: r['stderr'] = save(self.stderr, top=False) if self.stdout is not None: r['stdout'] = save(self.stdout, top=False) if self.successCodes is not None: r['successCodes'] = save(self.successCodes, top=False) if self.temporaryFailCodes is not None: r['temporaryFailCodes'] = save(self.temporaryFailCodes, top=False) if self.permanentFailCodes is not None: r['permanentFailCodes'] = save(self.permanentFailCodes, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['id', 'inputs', 'outputs', 'requirements', 'hints', 'label', 'doc', 'cwlVersion', 'class', 'baseCommand', 'arguments', 'stdin', 'stderr', 'stdout', 'successCodes', 'temporaryFailCodes', 'permanentFailCodes']) class DockerRequirement(ProcessRequirement): """ Indicates that a workflow component should be run in a [Docker](http://docker.com) container, and specifies how to fetch or build the image. If a CommandLineTool lists `DockerRequirement` under `hints` (or `requirements`), it may (or must) be run in the specified Docker container. The platform must first acquire or install the correct Docker image as specified by `dockerPull`, `dockerImport`, `dockerLoad` or `dockerFile`. The platform must execute the tool in the container using `docker run` with the appropriate Docker image and tool command line. The workflow platform may provide input files and the designated output directory through the use of volume bind mounts. The platform may rewrite file paths in the input object to correspond to the Docker bind mounted locations. When running a tool contained in Docker, the workflow platform must not assume anything about the contents of the Docker container, such as the presence or absence of specific software, except to assume that the generated command line represents a valid command within the runtime environment of the container. ## Interaction with other requirements If [EnvVarRequirement](#EnvVarRequirement) is specified alongside a DockerRequirement, the environment variables must be provided to Docker using `--env` or `--env-file` and interact with the container's preexisting environment as defined by Docker. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if doc.get('class') != 'DockerRequirement': raise ValidationException("Not a DockerRequirement") if 'dockerPull' in doc: try: self.dockerPull = load_field(doc.get('dockerPull'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'dockerPull', str).makeError("the `dockerPull` field is not valid because:\n"+str(e))) else: self.dockerPull = None if 'dockerLoad' in doc: try: self.dockerLoad = load_field(doc.get('dockerLoad'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'dockerLoad', str).makeError("the `dockerLoad` field is not valid because:\n"+str(e))) else: self.dockerLoad = None if 'dockerFile' in doc: try: self.dockerFile = load_field(doc.get('dockerFile'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'dockerFile', str).makeError("the `dockerFile` field is not valid because:\n"+str(e))) else: self.dockerFile = None if 'dockerImport' in doc: try: self.dockerImport = load_field(doc.get('dockerImport'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'dockerImport', str).makeError("the `dockerImport` field is not valid because:\n"+str(e))) else: self.dockerImport = None if 'dockerImageId' in doc: try: self.dockerImageId = load_field(doc.get('dockerImageId'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'dockerImageId', str).makeError("the `dockerImageId` field is not valid because:\n"+str(e))) else: self.dockerImageId = None if 'dockerOutputDirectory' in doc: try: self.dockerOutputDirectory = load_field(doc.get('dockerOutputDirectory'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'dockerOutputDirectory', str).makeError("the `dockerOutputDirectory` field is not valid because:\n"+str(e))) else: self.dockerOutputDirectory = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `class`, `dockerPull`, `dockerLoad`, `dockerFile`, `dockerImport`, `dockerImageId`, `dockerOutputDirectory`" % (k))) break if errors: raise ValidationException("Trying 'DockerRequirement'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] r['class'] = 'DockerRequirement' if self.dockerPull is not None: r['dockerPull'] = save(self.dockerPull, top=False) if self.dockerLoad is not None: r['dockerLoad'] = save(self.dockerLoad, top=False) if self.dockerFile is not None: r['dockerFile'] = save(self.dockerFile, top=False) if self.dockerImport is not None: r['dockerImport'] = save(self.dockerImport, top=False) if self.dockerImageId is not None: r['dockerImageId'] = save(self.dockerImageId, top=False) if self.dockerOutputDirectory is not None: r['dockerOutputDirectory'] = save(self.dockerOutputDirectory, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['class', 'dockerPull', 'dockerLoad', 'dockerFile', 'dockerImport', 'dockerImageId', 'dockerOutputDirectory']) class SoftwareRequirement(ProcessRequirement): """ A list of software packages that should be configured in the environment of the defined process. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if doc.get('class') != 'SoftwareRequirement': raise ValidationException("Not a SoftwareRequirement") try: self.packages = load_field(doc.get('packages'), idmap_packages_array_of_SoftwarePackageLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'packages', str).makeError("the `packages` field is not valid because:\n"+str(e))) self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `class`, `packages`" % (k))) break if errors: raise ValidationException("Trying 'SoftwareRequirement'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] r['class'] = 'SoftwareRequirement' if self.packages is not None: r['packages'] = save(self.packages, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['class', 'packages']) class SoftwarePackage(Savable): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions try: self.package = load_field(doc.get('package'), strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'package', str).makeError("the `package` field is not valid because:\n"+str(e))) if 'version' in doc: try: self.version = load_field(doc.get('version'), union_of_None_type_or_array_of_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'version', str).makeError("the `version` field is not valid because:\n"+str(e))) else: self.version = None if 'specs' in doc: try: self.specs = load_field(doc.get('specs'), union_of_None_type_or_array_of_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'specs', str).makeError("the `specs` field is not valid because:\n"+str(e))) else: self.specs = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `package`, `version`, `specs`" % (k))) break if errors: raise ValidationException("Trying 'SoftwarePackage'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.package is not None: r['package'] = save(self.package, top=False) if self.version is not None: r['version'] = save(self.version, top=False) if self.specs is not None: r['specs'] = save(self.specs, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['package', 'version', 'specs']) class Dirent(Savable): """ Define a file or subdirectory that must be placed in the designated output directory prior to executing the command line tool. May be the result of executing an expression, such as building a configuration file from a template. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'entryname' in doc: try: self.entryname = load_field(doc.get('entryname'), union_of_None_type_or_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'entryname', str).makeError("the `entryname` field is not valid because:\n"+str(e))) else: self.entryname = None try: self.entry = load_field(doc.get('entry'), union_of_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'entry', str).makeError("the `entry` field is not valid because:\n"+str(e))) if 'writable' in doc: try: self.writable = load_field(doc.get('writable'), union_of_None_type_or_booltype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'writable', str).makeError("the `writable` field is not valid because:\n"+str(e))) else: self.writable = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `entryname`, `entry`, `writable`" % (k))) break if errors: raise ValidationException("Trying 'Dirent'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.entryname is not None: r['entryname'] = save(self.entryname, top=False) if self.entry is not None: r['entry'] = save(self.entry, top=False) if self.writable is not None: r['writable'] = save(self.writable, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['entryname', 'entry', 'writable']) class InitialWorkDirRequirement(ProcessRequirement): """ Define a list of files and subdirectories that must be created by the workflow platform in the designated output directory prior to executing the command line tool. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if doc.get('class') != 'InitialWorkDirRequirement': raise ValidationException("Not a InitialWorkDirRequirement") try: self.listing = load_field(doc.get('listing'), union_of_array_of_union_of_FileLoader_or_DirectoryLoader_or_DirentLoader_or_strtype_or_ExpressionLoader_or_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'listing', str).makeError("the `listing` field is not valid because:\n"+str(e))) self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `class`, `listing`" % (k))) break if errors: raise ValidationException("Trying 'InitialWorkDirRequirement'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] r['class'] = 'InitialWorkDirRequirement' if self.listing is not None: r['listing'] = save(self.listing, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['class', 'listing']) class EnvVarRequirement(ProcessRequirement): """ Define a list of environment variables which will be set in the execution environment of the tool. See `EnvironmentDef` for details. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if doc.get('class') != 'EnvVarRequirement': raise ValidationException("Not a EnvVarRequirement") try: self.envDef = load_field(doc.get('envDef'), idmap_envDef_array_of_EnvironmentDefLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'envDef', str).makeError("the `envDef` field is not valid because:\n"+str(e))) self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `class`, `envDef`" % (k))) break if errors: raise ValidationException("Trying 'EnvVarRequirement'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] r['class'] = 'EnvVarRequirement' if self.envDef is not None: r['envDef'] = save(self.envDef, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['class', 'envDef']) class ShellCommandRequirement(ProcessRequirement): """ Modify the behavior of CommandLineTool to generate a single string containing a shell command line. Each item in the argument list must be joined into a string separated by single spaces and quoted to prevent intepretation by the shell, unless `CommandLineBinding` for that argument contains `shellQuote: false`. If `shellQuote: false` is specified, the argument is joined into the command string without quoting, which allows the use of shell metacharacters such as `|` for pipes. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if doc.get('class') != 'ShellCommandRequirement': raise ValidationException("Not a ShellCommandRequirement") self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `class`" % (k))) break if errors: raise ValidationException("Trying 'ShellCommandRequirement'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] r['class'] = 'ShellCommandRequirement' if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['class']) class ResourceRequirement(ProcessRequirement): """ Specify basic hardware resource requirements. "min" is the minimum amount of a resource that must be reserved to schedule a job. If "min" cannot be satisfied, the job should not be run. "max" is the maximum amount of a resource that the job shall be permitted to use. If a node has sufficient resources, multiple jobs may be scheduled on a single node provided each job's "max" resource requirements are met. If a job attempts to exceed its "max" resource allocation, an implementation may deny additional resources, which may result in job failure. If "min" is specified but "max" is not, then "max" == "min" If "max" is specified by "min" is not, then "min" == "max". It is an error if max < min. It is an error if the value of any of these fields is negative. If neither "min" nor "max" is specified for a resource, an implementation may provide a default. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if doc.get('class') != 'ResourceRequirement': raise ValidationException("Not a ResourceRequirement") if 'coresMin' in doc: try: self.coresMin = load_field(doc.get('coresMin'), union_of_None_type_or_inttype_or_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'coresMin', str).makeError("the `coresMin` field is not valid because:\n"+str(e))) else: self.coresMin = None if 'coresMax' in doc: try: self.coresMax = load_field(doc.get('coresMax'), union_of_None_type_or_inttype_or_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'coresMax', str).makeError("the `coresMax` field is not valid because:\n"+str(e))) else: self.coresMax = None if 'ramMin' in doc: try: self.ramMin = load_field(doc.get('ramMin'), union_of_None_type_or_inttype_or_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'ramMin', str).makeError("the `ramMin` field is not valid because:\n"+str(e))) else: self.ramMin = None if 'ramMax' in doc: try: self.ramMax = load_field(doc.get('ramMax'), union_of_None_type_or_inttype_or_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'ramMax', str).makeError("the `ramMax` field is not valid because:\n"+str(e))) else: self.ramMax = None if 'tmpdirMin' in doc: try: self.tmpdirMin = load_field(doc.get('tmpdirMin'), union_of_None_type_or_inttype_or_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'tmpdirMin', str).makeError("the `tmpdirMin` field is not valid because:\n"+str(e))) else: self.tmpdirMin = None if 'tmpdirMax' in doc: try: self.tmpdirMax = load_field(doc.get('tmpdirMax'), union_of_None_type_or_inttype_or_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'tmpdirMax', str).makeError("the `tmpdirMax` field is not valid because:\n"+str(e))) else: self.tmpdirMax = None if 'outdirMin' in doc: try: self.outdirMin = load_field(doc.get('outdirMin'), union_of_None_type_or_inttype_or_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'outdirMin', str).makeError("the `outdirMin` field is not valid because:\n"+str(e))) else: self.outdirMin = None if 'outdirMax' in doc: try: self.outdirMax = load_field(doc.get('outdirMax'), union_of_None_type_or_inttype_or_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'outdirMax', str).makeError("the `outdirMax` field is not valid because:\n"+str(e))) else: self.outdirMax = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `class`, `coresMin`, `coresMax`, `ramMin`, `ramMax`, `tmpdirMin`, `tmpdirMax`, `outdirMin`, `outdirMax`" % (k))) break if errors: raise ValidationException("Trying 'ResourceRequirement'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] r['class'] = 'ResourceRequirement' if self.coresMin is not None: r['coresMin'] = save(self.coresMin, top=False) if self.coresMax is not None: r['coresMax'] = save(self.coresMax, top=False) if self.ramMin is not None: r['ramMin'] = save(self.ramMin, top=False) if self.ramMax is not None: r['ramMax'] = save(self.ramMax, top=False) if self.tmpdirMin is not None: r['tmpdirMin'] = save(self.tmpdirMin, top=False) if self.tmpdirMax is not None: r['tmpdirMax'] = save(self.tmpdirMax, top=False) if self.outdirMin is not None: r['outdirMin'] = save(self.outdirMin, top=False) if self.outdirMax is not None: r['outdirMax'] = save(self.outdirMax, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['class', 'coresMin', 'coresMax', 'ramMin', 'ramMax', 'tmpdirMin', 'tmpdirMax', 'outdirMin', 'outdirMax']) class ExpressionToolOutputParameter(OutputParameter): def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'id' in doc: try: self.id = load_field(doc.get('id'), uri_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'id', str).makeError("the `id` field is not valid because:\n"+str(e))) else: self.id = None if self.id is None: if docRoot is not None: self.id = docRoot else: raise ValidationException("Missing id") baseuri = self.id if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None if 'secondaryFiles' in doc: try: self.secondaryFiles = load_field(doc.get('secondaryFiles'), union_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_union_of_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'secondaryFiles', str).makeError("the `secondaryFiles` field is not valid because:\n"+str(e))) else: self.secondaryFiles = None if 'streamable' in doc: try: self.streamable = load_field(doc.get('streamable'), union_of_None_type_or_booltype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'streamable', str).makeError("the `streamable` field is not valid because:\n"+str(e))) else: self.streamable = None if 'doc' in doc: try: self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e))) else: self.doc = None if 'outputBinding' in doc: try: self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'outputBinding', str).makeError("the `outputBinding` field is not valid because:\n"+str(e))) else: self.outputBinding = None if 'format' in doc: try: self.format = load_field(doc.get('format'), uri_union_of_None_type_or_strtype_or_ExpressionLoader_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'format', str).makeError("the `format` field is not valid because:\n"+str(e))) else: self.format = None if 'type' in doc: try: self.type = load_field(doc.get('type'), typedsl_union_of_None_type_or_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) else: self.type = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `label`, `secondaryFiles`, `streamable`, `doc`, `id`, `outputBinding`, `format`, `type`" % (k))) break if errors: raise ValidationException("Trying 'ExpressionToolOutputParameter'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.id is not None: r['id'] = save(self.id, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if self.secondaryFiles is not None: r['secondaryFiles'] = save(self.secondaryFiles, top=False) if self.streamable is not None: r['streamable'] = save(self.streamable, top=False) if self.doc is not None: r['doc'] = save(self.doc, top=False) if self.outputBinding is not None: r['outputBinding'] = save(self.outputBinding, top=False) if self.format is not None: r['format'] = save(self.format, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['label', 'secondaryFiles', 'streamable', 'doc', 'id', 'outputBinding', 'format', 'type']) class ExpressionTool(Process): """ Execute an expression as a Workflow step. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if doc.get('class') != 'ExpressionTool': raise ValidationException("Not a ExpressionTool") if 'id' in doc: try: self.id = load_field(doc.get('id'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'id', str).makeError("the `id` field is not valid because:\n"+str(e))) else: self.id = None if self.id is None: if docRoot is not None: self.id = docRoot else: self.id = "_:" + str(uuid.uuid4()) baseuri = self.id try: self.inputs = load_field(doc.get('inputs'), idmap_inputs_array_of_InputParameterLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'inputs', str).makeError("the `inputs` field is not valid because:\n"+str(e))) try: self.outputs = load_field(doc.get('outputs'), idmap_outputs_array_of_ExpressionToolOutputParameterLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'outputs', str).makeError("the `outputs` field is not valid because:\n"+str(e))) if 'requirements' in doc: try: self.requirements = load_field(doc.get('requirements'), idmap_requirements_union_of_None_type_or_array_of_union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'requirements', str).makeError("the `requirements` field is not valid because:\n"+str(e))) else: self.requirements = None if 'hints' in doc: try: self.hints = load_field(doc.get('hints'), idmap_hints_union_of_None_type_or_array_of_Any_type, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'hints', str).makeError("the `hints` field is not valid because:\n"+str(e))) else: self.hints = None if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None if 'doc' in doc: try: self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e))) else: self.doc = None if 'cwlVersion' in doc: try: self.cwlVersion = load_field(doc.get('cwlVersion'), uri_union_of_None_type_or_CWLVersionLoader_False_True_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'cwlVersion', str).makeError("the `cwlVersion` field is not valid because:\n"+str(e))) else: self.cwlVersion = None try: self.expression = load_field(doc.get('expression'), union_of_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'expression', str).makeError("the `expression` field is not valid because:\n"+str(e))) self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `id`, `inputs`, `outputs`, `requirements`, `hints`, `label`, `doc`, `cwlVersion`, `class`, `expression`" % (k))) break if errors: raise ValidationException("Trying 'ExpressionTool'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] r['class'] = 'ExpressionTool' if self.id is not None: r['id'] = save(self.id, top=False) if self.inputs is not None: r['inputs'] = save(self.inputs, top=False) if self.outputs is not None: r['outputs'] = save(self.outputs, top=False) if self.requirements is not None: r['requirements'] = save(self.requirements, top=False) if self.hints is not None: r['hints'] = save(self.hints, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if self.doc is not None: r['doc'] = save(self.doc, top=False) if self.cwlVersion is not None: r['cwlVersion'] = save(self.cwlVersion, top=False) if self.expression is not None: r['expression'] = save(self.expression, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['id', 'inputs', 'outputs', 'requirements', 'hints', 'label', 'doc', 'cwlVersion', 'class', 'expression']) class WorkflowOutputParameter(OutputParameter): """ Describe an output parameter of a workflow. The parameter must be connected to one or more parameters defined in the workflow that will provide the value of the output parameter. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'id' in doc: try: self.id = load_field(doc.get('id'), uri_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'id', str).makeError("the `id` field is not valid because:\n"+str(e))) else: self.id = None if self.id is None: if docRoot is not None: self.id = docRoot else: raise ValidationException("Missing id") baseuri = self.id if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None if 'secondaryFiles' in doc: try: self.secondaryFiles = load_field(doc.get('secondaryFiles'), union_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_union_of_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'secondaryFiles', str).makeError("the `secondaryFiles` field is not valid because:\n"+str(e))) else: self.secondaryFiles = None if 'streamable' in doc: try: self.streamable = load_field(doc.get('streamable'), union_of_None_type_or_booltype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'streamable', str).makeError("the `streamable` field is not valid because:\n"+str(e))) else: self.streamable = None if 'doc' in doc: try: self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e))) else: self.doc = None if 'outputBinding' in doc: try: self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'outputBinding', str).makeError("the `outputBinding` field is not valid because:\n"+str(e))) else: self.outputBinding = None if 'format' in doc: try: self.format = load_field(doc.get('format'), uri_union_of_None_type_or_strtype_or_ExpressionLoader_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'format', str).makeError("the `format` field is not valid because:\n"+str(e))) else: self.format = None if 'outputSource' in doc: try: self.outputSource = load_field(doc.get('outputSource'), uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_0, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'outputSource', str).makeError("the `outputSource` field is not valid because:\n"+str(e))) else: self.outputSource = None if 'linkMerge' in doc: try: self.linkMerge = load_field(doc.get('linkMerge'), union_of_None_type_or_LinkMergeMethodLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'linkMerge', str).makeError("the `linkMerge` field is not valid because:\n"+str(e))) else: self.linkMerge = None if 'type' in doc: try: self.type = load_field(doc.get('type'), typedsl_union_of_None_type_or_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e))) else: self.type = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `label`, `secondaryFiles`, `streamable`, `doc`, `id`, `outputBinding`, `format`, `outputSource`, `linkMerge`, `type`" % (k))) break if errors: raise ValidationException("Trying 'WorkflowOutputParameter'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.id is not None: r['id'] = save(self.id, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if self.secondaryFiles is not None: r['secondaryFiles'] = save(self.secondaryFiles, top=False) if self.streamable is not None: r['streamable'] = save(self.streamable, top=False) if self.doc is not None: r['doc'] = save(self.doc, top=False) if self.outputBinding is not None: r['outputBinding'] = save(self.outputBinding, top=False) if self.format is not None: r['format'] = save(self.format, top=False) if self.outputSource is not None: r['outputSource'] = save(self.outputSource, top=False) if self.linkMerge is not None: r['linkMerge'] = save(self.linkMerge, top=False) if self.type is not None: r['type'] = save(self.type, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['label', 'secondaryFiles', 'streamable', 'doc', 'id', 'outputBinding', 'format', 'outputSource', 'linkMerge', 'type']) class Sink(Savable): pass class WorkflowStepInput(Sink): """ The input of a workflow step connects an upstream parameter (from the workflow inputs, or the outputs of other workflows steps) with the input parameters of the underlying step. ## Input object A WorkflowStepInput object must contain an `id` field in the form `#fieldname` or `#prefix/fieldname`. When the `id` field contains a slash `/` the field name consists of the characters following the final slash (the prefix portion may contain one or more slashes to indicate scope). This defines a field of the workflow step input object with the value of the `source` parameter(s). ## Merging To merge multiple inbound data links, [MultipleInputFeatureRequirement](#MultipleInputFeatureRequirement) must be specified in the workflow or workflow step requirements. If the sink parameter is an array, or named in a [workflow scatter](#WorkflowStep) operation, there may be multiple inbound data links listed in the `source` field. The values from the input links are merged depending on the method specified in the `linkMerge` field. If not specified, the default method is "merge_nested". * **merge_nested** The input must be an array consisting of exactly one entry for each input link. If "merge_nested" is specified with a single link, the value from the link must be wrapped in a single-item list. * **merge_flattened** 1. The source and sink parameters must be compatible types, or the source type must be compatible with single element from the "items" type of the destination array parameter. 2. Source parameters which are arrays are concatenated. Source parameters which are single element types are appended as single elements. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'id' in doc: try: self.id = load_field(doc.get('id'), uri_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'id', str).makeError("the `id` field is not valid because:\n"+str(e))) else: self.id = None if self.id is None: if docRoot is not None: self.id = docRoot else: raise ValidationException("Missing id") baseuri = self.id if 'source' in doc: try: self.source = load_field(doc.get('source'), uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_2, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'source', str).makeError("the `source` field is not valid because:\n"+str(e))) else: self.source = None if 'linkMerge' in doc: try: self.linkMerge = load_field(doc.get('linkMerge'), union_of_None_type_or_LinkMergeMethodLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'linkMerge', str).makeError("the `linkMerge` field is not valid because:\n"+str(e))) else: self.linkMerge = None if 'default' in doc: try: self.default = load_field(doc.get('default'), union_of_None_type_or_Any_type, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'default', str).makeError("the `default` field is not valid because:\n"+str(e))) else: self.default = None if 'valueFrom' in doc: try: self.valueFrom = load_field(doc.get('valueFrom'), union_of_None_type_or_strtype_or_ExpressionLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'valueFrom', str).makeError("the `valueFrom` field is not valid because:\n"+str(e))) else: self.valueFrom = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `source`, `linkMerge`, `id`, `default`, `valueFrom`" % (k))) break if errors: raise ValidationException("Trying 'WorkflowStepInput'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.id is not None: r['id'] = save(self.id, top=False) if self.source is not None: r['source'] = save(self.source, top=False) if self.linkMerge is not None: r['linkMerge'] = save(self.linkMerge, top=False) if self.default is not None: r['default'] = save(self.default, top=False) if self.valueFrom is not None: r['valueFrom'] = save(self.valueFrom, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['source', 'linkMerge', 'id', 'default', 'valueFrom']) class WorkflowStepOutput(Savable): """ Associate an output parameter of the underlying process with a workflow parameter. The workflow parameter (given in the `id` field) be may be used as a `source` to connect with input parameters of other workflow steps, or with an output parameter of the process. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'id' in doc: try: self.id = load_field(doc.get('id'), uri_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'id', str).makeError("the `id` field is not valid because:\n"+str(e))) else: self.id = None if self.id is None: if docRoot is not None: self.id = docRoot else: raise ValidationException("Missing id") baseuri = self.id self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `id`" % (k))) break if errors: raise ValidationException("Trying 'WorkflowStepOutput'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.id is not None: r['id'] = save(self.id, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['id']) class WorkflowStep(Savable): """ A workflow step is an executable element of a workflow. It specifies the underlying process implementation (such as `CommandLineTool` or another `Workflow`) in the `run` field and connects the input and output parameters of the underlying process to workflow parameters. # Scatter/gather To use scatter/gather, [ScatterFeatureRequirement](#ScatterFeatureRequirement) must be specified in the workflow or workflow step requirements. A "scatter" operation specifies that the associated workflow step or subworkflow should execute separately over a list of input elements. Each job making up a scatter operation is independent and may be executed concurrently. The `scatter` field specifies one or more input parameters which will be scattered. An input parameter may be listed more than once. The declared type of each input parameter is implicitly becomes an array of items of the input parameter type. If a parameter is listed more than once, it becomes a nested array. As a result, upstream parameters which are connected to scattered parameters must be arrays. All output parameter types are also implicitly wrapped in arrays. Each job in the scatter results in an entry in the output array. If any scattered parameter runtime value is an empty array, all outputs are set to empty arrays and no work is done for the step, according to applicable scattering rules. If `scatter` declares more than one input parameter, `scatterMethod` describes how to decompose the input into a discrete set of jobs. * **dotproduct** specifies that each of the input arrays are aligned and one element taken from each array to construct each job. It is an error if all input arrays are not the same length. * **nested_crossproduct** specifies the Cartesian product of the inputs, producing a job for every combination of the scattered inputs. The output must be nested arrays for each level of scattering, in the order that the input arrays are listed in the `scatter` field. * **flat_crossproduct** specifies the Cartesian product of the inputs, producing a job for every combination of the scattered inputs. The output arrays must be flattened to a single level, but otherwise listed in the order that the input arrays are listed in the `scatter` field. # Subworkflows To specify a nested workflow as part of a workflow step, [SubworkflowFeatureRequirement](#SubworkflowFeatureRequirement) must be specified in the workflow or workflow step requirements. It is a fatal error if a workflow directly or indirectly invokes itself as a subworkflow (recursive workflows are not allowed). """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if 'id' in doc: try: self.id = load_field(doc.get('id'), uri_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'id', str).makeError("the `id` field is not valid because:\n"+str(e))) else: self.id = None if self.id is None: if docRoot is not None: self.id = docRoot else: raise ValidationException("Missing id") baseuri = self.id try: self.in_ = load_field(doc.get('in'), idmap_in__array_of_WorkflowStepInputLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'in', str).makeError("the `in` field is not valid because:\n"+str(e))) try: self.out = load_field(doc.get('out'), uri_union_of_array_of_union_of_strtype_or_WorkflowStepOutputLoader_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'out', str).makeError("the `out` field is not valid because:\n"+str(e))) if 'requirements' in doc: try: self.requirements = load_field(doc.get('requirements'), idmap_requirements_union_of_None_type_or_array_of_union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'requirements', str).makeError("the `requirements` field is not valid because:\n"+str(e))) else: self.requirements = None if 'hints' in doc: try: self.hints = load_field(doc.get('hints'), idmap_hints_union_of_None_type_or_array_of_Any_type, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'hints', str).makeError("the `hints` field is not valid because:\n"+str(e))) else: self.hints = None if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None if 'doc' in doc: try: self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e))) else: self.doc = None try: self.run = load_field(doc.get('run'), uri_union_of_strtype_or_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader_False_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'run', str).makeError("the `run` field is not valid because:\n"+str(e))) if 'scatter' in doc: try: self.scatter = load_field(doc.get('scatter'), uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_0, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'scatter', str).makeError("the `scatter` field is not valid because:\n"+str(e))) else: self.scatter = None if 'scatterMethod' in doc: try: self.scatterMethod = load_field(doc.get('scatterMethod'), uri_union_of_None_type_or_ScatterMethodLoader_False_True_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'scatterMethod', str).makeError("the `scatterMethod` field is not valid because:\n"+str(e))) else: self.scatterMethod = None self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `id`, `in`, `out`, `requirements`, `hints`, `label`, `doc`, `run`, `scatter`, `scatterMethod`" % (k))) break if errors: raise ValidationException("Trying 'WorkflowStep'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] if self.id is not None: r['id'] = save(self.id, top=False) if self.in_ is not None: r['in'] = save(self.in_, top=False) if self.out is not None: r['out'] = save(self.out, top=False) if self.requirements is not None: r['requirements'] = save(self.requirements, top=False) if self.hints is not None: r['hints'] = save(self.hints, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if self.doc is not None: r['doc'] = save(self.doc, top=False) if self.run is not None: r['run'] = save(self.run, top=False) if self.scatter is not None: r['scatter'] = save(self.scatter, top=False) if self.scatterMethod is not None: r['scatterMethod'] = save(self.scatterMethod, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['id', 'in', 'out', 'requirements', 'hints', 'label', 'doc', 'run', 'scatter', 'scatterMethod']) class Workflow(Process): """ A workflow describes a set of **steps** and the **dependencies** between those steps. When a step produces output that will be consumed by a second step, the first step is a dependency of the second step. When there is a dependency, the workflow engine must execute the preceeding step and wait for it to successfully produce output before executing the dependent step. If two steps are defined in the workflow graph that are not directly or indirectly dependent, these steps are **independent**, and may execute in any order or execute concurrently. A workflow is complete when all steps have been executed. Dependencies between parameters are expressed using the `source` field on [workflow step input parameters](#WorkflowStepInput) and [workflow output parameters](#WorkflowOutputParameter). The `source` field expresses the dependency of one parameter on another such that when a value is associated with the parameter specified by `source`, that value is propagated to the destination parameter. When all data links inbound to a given step are fufilled, the step is ready to execute. ## Workflow success and failure A completed step must result in one of `success`, `temporaryFailure` or `permanentFailure` states. An implementation may choose to retry a step execution which resulted in `temporaryFailure`. An implementation may choose to either continue running other steps of a workflow, or terminate immediately upon `permanentFailure`. * If any step of a workflow execution results in `permanentFailure`, then the workflow status is `permanentFailure`. * If one or more steps result in `temporaryFailure` and all other steps complete `success` or are not executed, then the workflow status is `temporaryFailure`. * If all workflow steps are executed and complete with `success`, then the workflow status is `success`. # Extensions [ScatterFeatureRequirement](#ScatterFeatureRequirement) and [SubworkflowFeatureRequirement](#SubworkflowFeatureRequirement) are available as standard [extensions](#Extensions_and_Metadata) to core workflow semantics. """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if doc.get('class') != 'Workflow': raise ValidationException("Not a Workflow") if 'id' in doc: try: self.id = load_field(doc.get('id'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'id', str).makeError("the `id` field is not valid because:\n"+str(e))) else: self.id = None if self.id is None: if docRoot is not None: self.id = docRoot else: self.id = "_:" + str(uuid.uuid4()) baseuri = self.id try: self.inputs = load_field(doc.get('inputs'), idmap_inputs_array_of_InputParameterLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'inputs', str).makeError("the `inputs` field is not valid because:\n"+str(e))) try: self.outputs = load_field(doc.get('outputs'), idmap_outputs_array_of_WorkflowOutputParameterLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'outputs', str).makeError("the `outputs` field is not valid because:\n"+str(e))) if 'requirements' in doc: try: self.requirements = load_field(doc.get('requirements'), idmap_requirements_union_of_None_type_or_array_of_union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'requirements', str).makeError("the `requirements` field is not valid because:\n"+str(e))) else: self.requirements = None if 'hints' in doc: try: self.hints = load_field(doc.get('hints'), idmap_hints_union_of_None_type_or_array_of_Any_type, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'hints', str).makeError("the `hints` field is not valid because:\n"+str(e))) else: self.hints = None if 'label' in doc: try: self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'label', str).makeError("the `label` field is not valid because:\n"+str(e))) else: self.label = None if 'doc' in doc: try: self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e))) else: self.doc = None if 'cwlVersion' in doc: try: self.cwlVersion = load_field(doc.get('cwlVersion'), uri_union_of_None_type_or_CWLVersionLoader_False_True_None, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'cwlVersion', str).makeError("the `cwlVersion` field is not valid because:\n"+str(e))) else: self.cwlVersion = None try: self.steps = load_field(doc.get('steps'), idmap_steps_union_of_array_of_WorkflowStepLoader, baseuri, loadingOptions) except ValidationException as e: errors.append(SourceLine(doc, 'steps', str).makeError("the `steps` field is not valid because:\n"+str(e))) self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `id`, `inputs`, `outputs`, `requirements`, `hints`, `label`, `doc`, `cwlVersion`, `class`, `steps`" % (k))) break if errors: raise ValidationException("Trying 'Workflow'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] r['class'] = 'Workflow' if self.id is not None: r['id'] = save(self.id, top=False) if self.inputs is not None: r['inputs'] = save(self.inputs, top=False) if self.outputs is not None: r['outputs'] = save(self.outputs, top=False) if self.requirements is not None: r['requirements'] = save(self.requirements, top=False) if self.hints is not None: r['hints'] = save(self.hints, top=False) if self.label is not None: r['label'] = save(self.label, top=False) if self.doc is not None: r['doc'] = save(self.doc, top=False) if self.cwlVersion is not None: r['cwlVersion'] = save(self.cwlVersion, top=False) if self.steps is not None: r['steps'] = save(self.steps, top=False) if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['id', 'inputs', 'outputs', 'requirements', 'hints', 'label', 'doc', 'cwlVersion', 'class', 'steps']) class SubworkflowFeatureRequirement(ProcessRequirement): """ Indicates that the workflow platform must support nested workflows in the `run` field of [WorkflowStep](#WorkflowStep). """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if doc.get('class') != 'SubworkflowFeatureRequirement': raise ValidationException("Not a SubworkflowFeatureRequirement") self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `class`" % (k))) break if errors: raise ValidationException("Trying 'SubworkflowFeatureRequirement'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] r['class'] = 'SubworkflowFeatureRequirement' if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['class']) class ScatterFeatureRequirement(ProcessRequirement): """ Indicates that the workflow platform must support the `scatter` and `scatterMethod` fields of [WorkflowStep](#WorkflowStep). """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if doc.get('class') != 'ScatterFeatureRequirement': raise ValidationException("Not a ScatterFeatureRequirement") self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `class`" % (k))) break if errors: raise ValidationException("Trying 'ScatterFeatureRequirement'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] r['class'] = 'ScatterFeatureRequirement' if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['class']) class MultipleInputFeatureRequirement(ProcessRequirement): """ Indicates that the workflow platform must support multiple inbound data links listed in the `source` field of [WorkflowStepInput](#WorkflowStepInput). """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if doc.get('class') != 'MultipleInputFeatureRequirement': raise ValidationException("Not a MultipleInputFeatureRequirement") self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `class`" % (k))) break if errors: raise ValidationException("Trying 'MultipleInputFeatureRequirement'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] r['class'] = 'MultipleInputFeatureRequirement' if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['class']) class StepInputExpressionRequirement(ProcessRequirement): """ Indicate that the workflow platform must support the `valueFrom` field of [WorkflowStepInput](#WorkflowStepInput). """ def __init__(self, _doc, baseuri, loadingOptions, docRoot=None): doc = copy.copy(_doc) if hasattr(_doc, 'lc'): doc.lc.data = _doc.lc.data doc.lc.filename = _doc.lc.filename errors = [] self.loadingOptions = loadingOptions if doc.get('class') != 'StepInputExpressionRequirement': raise ValidationException("Not a StepInputExpressionRequirement") self.extension_fields = {} for k in doc.keys(): if k not in self.attrs: if ":" in k: ex = expand_url(k, u"", loadingOptions, scoped_id=False, vocab_term=False) self.extension_fields[ex] = doc[k] else: errors.append(SourceLine(doc, k, str).makeError("invalid field `%s`, expected one of: `class`" % (k))) break if errors: raise ValidationException("Trying 'StepInputExpressionRequirement'\n"+"\n".join(errors)) def save(self, top=False): r = {} for ef in self.extension_fields: r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef] r['class'] = 'StepInputExpressionRequirement' if top and self.loadingOptions.namespaces: r["$namespaces"] = self.loadingOptions.namespaces return r attrs = frozenset(['class']) _vocab = { "Any": "https://w3id.org/cwl/salad#Any", "ArraySchema": "https://w3id.org/cwl/salad#ArraySchema", "CWLType": "https://w3id.org/cwl/cwl#CWLType", "CWLVersion": "https://w3id.org/cwl/cwl#CWLVersion", "CommandInputArraySchema": "https://w3id.org/cwl/cwl#CommandInputArraySchema", "CommandInputEnumSchema": "https://w3id.org/cwl/cwl#CommandInputEnumSchema", "CommandInputParameter": "https://w3id.org/cwl/cwl#CommandInputParameter", "CommandInputRecordField": "https://w3id.org/cwl/cwl#CommandInputRecordField", "CommandInputRecordSchema": "https://w3id.org/cwl/cwl#CommandInputRecordSchema", "CommandLineBinding": "https://w3id.org/cwl/cwl#CommandLineBinding", "CommandLineTool": "https://w3id.org/cwl/cwl#CommandLineTool", "CommandOutputArraySchema": "https://w3id.org/cwl/cwl#CommandOutputArraySchema", "CommandOutputBinding": "https://w3id.org/cwl/cwl#CommandOutputBinding", "CommandOutputEnumSchema": "https://w3id.org/cwl/cwl#CommandOutputEnumSchema", "CommandOutputParameter": "https://w3id.org/cwl/cwl#CommandOutputParameter", "CommandOutputRecordField": "https://w3id.org/cwl/cwl#CommandOutputRecordField", "CommandOutputRecordSchema": "https://w3id.org/cwl/cwl#CommandOutputRecordSchema", "Directory": "https://w3id.org/cwl/cwl#Directory", "Dirent": "https://w3id.org/cwl/cwl#Dirent", "DockerRequirement": "https://w3id.org/cwl/cwl#DockerRequirement", "EnumSchema": "https://w3id.org/cwl/salad#EnumSchema", "EnvVarRequirement": "https://w3id.org/cwl/cwl#EnvVarRequirement", "EnvironmentDef": "https://w3id.org/cwl/cwl#EnvironmentDef", "Expression": "https://w3id.org/cwl/cwl#Expression", "ExpressionPlaceholder": "https://w3id.org/cwl/cwl#ExpressionPlaceholder", "ExpressionTool": "https://w3id.org/cwl/cwl#ExpressionTool", "ExpressionToolOutputParameter": "https://w3id.org/cwl/cwl#ExpressionToolOutputParameter", "File": "https://w3id.org/cwl/cwl#File", "InitialWorkDirRequirement": "https://w3id.org/cwl/cwl#InitialWorkDirRequirement", "InlineJavascriptRequirement": "https://w3id.org/cwl/cwl#InlineJavascriptRequirement", "InputArraySchema": "https://w3id.org/cwl/cwl#InputArraySchema", "InputBinding": "https://w3id.org/cwl/cwl#InputBinding", "InputEnumSchema": "https://w3id.org/cwl/cwl#InputEnumSchema", "InputParameter": "https://w3id.org/cwl/cwl#InputParameter", "InputRecordField": "https://w3id.org/cwl/cwl#InputRecordField", "InputRecordSchema": "https://w3id.org/cwl/cwl#InputRecordSchema", "InputSchema": "https://w3id.org/cwl/cwl#InputSchema", "LinkMergeMethod": "https://w3id.org/cwl/cwl#LinkMergeMethod", "MultipleInputFeatureRequirement": "https://w3id.org/cwl/cwl#MultipleInputFeatureRequirement", "OutputArraySchema": "https://w3id.org/cwl/cwl#OutputArraySchema", "OutputBinding": "https://w3id.org/cwl/cwl#OutputBinding", "OutputEnumSchema": "https://w3id.org/cwl/cwl#OutputEnumSchema", "OutputParameter": "https://w3id.org/cwl/cwl#OutputParameter", "OutputRecordField": "https://w3id.org/cwl/cwl#OutputRecordField", "OutputRecordSchema": "https://w3id.org/cwl/cwl#OutputRecordSchema", "OutputSchema": "https://w3id.org/cwl/cwl#OutputSchema", "Parameter": "https://w3id.org/cwl/cwl#Parameter", "PrimitiveType": "https://w3id.org/cwl/salad#PrimitiveType", "Process": "https://w3id.org/cwl/cwl#Process", "ProcessRequirement": "https://w3id.org/cwl/cwl#ProcessRequirement", "RecordField": "https://w3id.org/cwl/salad#RecordField", "RecordSchema": "https://w3id.org/cwl/salad#RecordSchema", "ResourceRequirement": "https://w3id.org/cwl/cwl#ResourceRequirement", "ScatterFeatureRequirement": "https://w3id.org/cwl/cwl#ScatterFeatureRequirement", "ScatterMethod": "https://w3id.org/cwl/cwl#ScatterMethod", "SchemaBase": "https://w3id.org/cwl/cwl#SchemaBase", "SchemaDefRequirement": "https://w3id.org/cwl/cwl#SchemaDefRequirement", "ShellCommandRequirement": "https://w3id.org/cwl/cwl#ShellCommandRequirement", "Sink": "https://w3id.org/cwl/cwl#Sink", "SoftwarePackage": "https://w3id.org/cwl/cwl#SoftwarePackage", "SoftwareRequirement": "https://w3id.org/cwl/cwl#SoftwareRequirement", "StepInputExpressionRequirement": "https://w3id.org/cwl/cwl#StepInputExpressionRequirement", "SubworkflowFeatureRequirement": "https://w3id.org/cwl/cwl#SubworkflowFeatureRequirement", "Workflow": "https://w3id.org/cwl/cwl#Workflow", "WorkflowOutputParameter": "https://w3id.org/cwl/cwl#WorkflowOutputParameter", "WorkflowStep": "https://w3id.org/cwl/cwl#WorkflowStep", "WorkflowStepInput": "https://w3id.org/cwl/cwl#WorkflowStepInput", "WorkflowStepOutput": "https://w3id.org/cwl/cwl#WorkflowStepOutput", "array": "https://w3id.org/cwl/salad#array", "boolean": "http://www.w3.org/2001/XMLSchema#boolean", "dotproduct": "https://w3id.org/cwl/cwl#ScatterMethod/dotproduct", "double": "http://www.w3.org/2001/XMLSchema#double", "draft-2": "https://w3id.org/cwl/cwl#draft-2", "draft-3": "https://w3id.org/cwl/cwl#draft-3", "draft-3.dev1": "https://w3id.org/cwl/cwl#draft-3.dev1", "draft-3.dev2": "https://w3id.org/cwl/cwl#draft-3.dev2", "draft-3.dev3": "https://w3id.org/cwl/cwl#draft-3.dev3", "draft-3.dev4": "https://w3id.org/cwl/cwl#draft-3.dev4", "draft-3.dev5": "https://w3id.org/cwl/cwl#draft-3.dev5", "draft-4.dev1": "https://w3id.org/cwl/cwl#draft-4.dev1", "draft-4.dev2": "https://w3id.org/cwl/cwl#draft-4.dev2", "draft-4.dev3": "https://w3id.org/cwl/cwl#draft-4.dev3", "enum": "https://w3id.org/cwl/salad#enum", "flat_crossproduct": "https://w3id.org/cwl/cwl#ScatterMethod/flat_crossproduct", "float": "http://www.w3.org/2001/XMLSchema#float", "int": "http://www.w3.org/2001/XMLSchema#int", "long": "http://www.w3.org/2001/XMLSchema#long", "merge_flattened": "https://w3id.org/cwl/cwl#LinkMergeMethod/merge_flattened", "merge_nested": "https://w3id.org/cwl/cwl#LinkMergeMethod/merge_nested", "nested_crossproduct": "https://w3id.org/cwl/cwl#ScatterMethod/nested_crossproduct", "null": "https://w3id.org/cwl/salad#null", "record": "https://w3id.org/cwl/salad#record", "stderr": "https://w3id.org/cwl/cwl#stderr", "stdout": "https://w3id.org/cwl/cwl#stdout", "string": "http://www.w3.org/2001/XMLSchema#string", "v1.0": "https://w3id.org/cwl/cwl#v1.0", "v1.0.dev4": "https://w3id.org/cwl/cwl#v1.0.dev4", } _rvocab = { "https://w3id.org/cwl/salad#Any": "Any", "https://w3id.org/cwl/salad#ArraySchema": "ArraySchema", "https://w3id.org/cwl/cwl#CWLType": "CWLType", "https://w3id.org/cwl/cwl#CWLVersion": "CWLVersion", "https://w3id.org/cwl/cwl#CommandInputArraySchema": "CommandInputArraySchema", "https://w3id.org/cwl/cwl#CommandInputEnumSchema": "CommandInputEnumSchema", "https://w3id.org/cwl/cwl#CommandInputParameter": "CommandInputParameter", "https://w3id.org/cwl/cwl#CommandInputRecordField": "CommandInputRecordField", "https://w3id.org/cwl/cwl#CommandInputRecordSchema": "CommandInputRecordSchema", "https://w3id.org/cwl/cwl#CommandLineBinding": "CommandLineBinding", "https://w3id.org/cwl/cwl#CommandLineTool": "CommandLineTool", "https://w3id.org/cwl/cwl#CommandOutputArraySchema": "CommandOutputArraySchema", "https://w3id.org/cwl/cwl#CommandOutputBinding": "CommandOutputBinding", "https://w3id.org/cwl/cwl#CommandOutputEnumSchema": "CommandOutputEnumSchema", "https://w3id.org/cwl/cwl#CommandOutputParameter": "CommandOutputParameter", "https://w3id.org/cwl/cwl#CommandOutputRecordField": "CommandOutputRecordField", "https://w3id.org/cwl/cwl#CommandOutputRecordSchema": "CommandOutputRecordSchema", "https://w3id.org/cwl/cwl#Directory": "Directory", "https://w3id.org/cwl/cwl#Dirent": "Dirent", "https://w3id.org/cwl/cwl#DockerRequirement": "DockerRequirement", "https://w3id.org/cwl/salad#EnumSchema": "EnumSchema", "https://w3id.org/cwl/cwl#EnvVarRequirement": "EnvVarRequirement", "https://w3id.org/cwl/cwl#EnvironmentDef": "EnvironmentDef", "https://w3id.org/cwl/cwl#Expression": "Expression", "https://w3id.org/cwl/cwl#ExpressionPlaceholder": "ExpressionPlaceholder", "https://w3id.org/cwl/cwl#ExpressionTool": "ExpressionTool", "https://w3id.org/cwl/cwl#ExpressionToolOutputParameter": "ExpressionToolOutputParameter", "https://w3id.org/cwl/cwl#File": "File", "https://w3id.org/cwl/cwl#InitialWorkDirRequirement": "InitialWorkDirRequirement", "https://w3id.org/cwl/cwl#InlineJavascriptRequirement": "InlineJavascriptRequirement", "https://w3id.org/cwl/cwl#InputArraySchema": "InputArraySchema", "https://w3id.org/cwl/cwl#InputBinding": "InputBinding", "https://w3id.org/cwl/cwl#InputEnumSchema": "InputEnumSchema", "https://w3id.org/cwl/cwl#InputParameter": "InputParameter", "https://w3id.org/cwl/cwl#InputRecordField": "InputRecordField", "https://w3id.org/cwl/cwl#InputRecordSchema": "InputRecordSchema", "https://w3id.org/cwl/cwl#InputSchema": "InputSchema", "https://w3id.org/cwl/cwl#LinkMergeMethod": "LinkMergeMethod", "https://w3id.org/cwl/cwl#MultipleInputFeatureRequirement": "MultipleInputFeatureRequirement", "https://w3id.org/cwl/cwl#OutputArraySchema": "OutputArraySchema", "https://w3id.org/cwl/cwl#OutputBinding": "OutputBinding", "https://w3id.org/cwl/cwl#OutputEnumSchema": "OutputEnumSchema", "https://w3id.org/cwl/cwl#OutputParameter": "OutputParameter", "https://w3id.org/cwl/cwl#OutputRecordField": "OutputRecordField", "https://w3id.org/cwl/cwl#OutputRecordSchema": "OutputRecordSchema", "https://w3id.org/cwl/cwl#OutputSchema": "OutputSchema", "https://w3id.org/cwl/cwl#Parameter": "Parameter", "https://w3id.org/cwl/salad#PrimitiveType": "PrimitiveType", "https://w3id.org/cwl/cwl#Process": "Process", "https://w3id.org/cwl/cwl#ProcessRequirement": "ProcessRequirement", "https://w3id.org/cwl/salad#RecordField": "RecordField", "https://w3id.org/cwl/salad#RecordSchema": "RecordSchema", "https://w3id.org/cwl/cwl#ResourceRequirement": "ResourceRequirement", "https://w3id.org/cwl/cwl#ScatterFeatureRequirement": "ScatterFeatureRequirement", "https://w3id.org/cwl/cwl#ScatterMethod": "ScatterMethod", "https://w3id.org/cwl/cwl#SchemaBase": "SchemaBase", "https://w3id.org/cwl/cwl#SchemaDefRequirement": "SchemaDefRequirement", "https://w3id.org/cwl/cwl#ShellCommandRequirement": "ShellCommandRequirement", "https://w3id.org/cwl/cwl#Sink": "Sink", "https://w3id.org/cwl/cwl#SoftwarePackage": "SoftwarePackage", "https://w3id.org/cwl/cwl#SoftwareRequirement": "SoftwareRequirement", "https://w3id.org/cwl/cwl#StepInputExpressionRequirement": "StepInputExpressionRequirement", "https://w3id.org/cwl/cwl#SubworkflowFeatureRequirement": "SubworkflowFeatureRequirement", "https://w3id.org/cwl/cwl#Workflow": "Workflow", "https://w3id.org/cwl/cwl#WorkflowOutputParameter": "WorkflowOutputParameter", "https://w3id.org/cwl/cwl#WorkflowStep": "WorkflowStep", "https://w3id.org/cwl/cwl#WorkflowStepInput": "WorkflowStepInput", "https://w3id.org/cwl/cwl#WorkflowStepOutput": "WorkflowStepOutput", "https://w3id.org/cwl/salad#array": "array", "http://www.w3.org/2001/XMLSchema#boolean": "boolean", "https://w3id.org/cwl/cwl#ScatterMethod/dotproduct": "dotproduct", "http://www.w3.org/2001/XMLSchema#double": "double", "https://w3id.org/cwl/cwl#draft-2": "draft-2", "https://w3id.org/cwl/cwl#draft-3": "draft-3", "https://w3id.org/cwl/cwl#draft-3.dev1": "draft-3.dev1", "https://w3id.org/cwl/cwl#draft-3.dev2": "draft-3.dev2", "https://w3id.org/cwl/cwl#draft-3.dev3": "draft-3.dev3", "https://w3id.org/cwl/cwl#draft-3.dev4": "draft-3.dev4", "https://w3id.org/cwl/cwl#draft-3.dev5": "draft-3.dev5", "https://w3id.org/cwl/cwl#draft-4.dev1": "draft-4.dev1", "https://w3id.org/cwl/cwl#draft-4.dev2": "draft-4.dev2", "https://w3id.org/cwl/cwl#draft-4.dev3": "draft-4.dev3", "https://w3id.org/cwl/salad#enum": "enum", "https://w3id.org/cwl/cwl#ScatterMethod/flat_crossproduct": "flat_crossproduct", "http://www.w3.org/2001/XMLSchema#float": "float", "http://www.w3.org/2001/XMLSchema#int": "int", "http://www.w3.org/2001/XMLSchema#long": "long", "https://w3id.org/cwl/cwl#LinkMergeMethod/merge_flattened": "merge_flattened", "https://w3id.org/cwl/cwl#LinkMergeMethod/merge_nested": "merge_nested", "https://w3id.org/cwl/cwl#ScatterMethod/nested_crossproduct": "nested_crossproduct", "https://w3id.org/cwl/salad#null": "null", "https://w3id.org/cwl/salad#record": "record", "https://w3id.org/cwl/cwl#stderr": "stderr", "https://w3id.org/cwl/cwl#stdout": "stdout", "http://www.w3.org/2001/XMLSchema#string": "string", "https://w3id.org/cwl/cwl#v1.0": "v1.0", "https://w3id.org/cwl/cwl#v1.0.dev4": "v1.0.dev4", } strtype = _PrimitiveLoader((str, six.text_type)) inttype = _PrimitiveLoader(int) floattype = _PrimitiveLoader(float) booltype = _PrimitiveLoader(bool) None_type = _PrimitiveLoader(type(None)) Any_type = _AnyLoader() PrimitiveTypeLoader = _EnumLoader(("null", "boolean", "int", "long", "float", "double", "string",)) AnyLoader = _EnumLoader(("Any",)) RecordFieldLoader = _RecordLoader(RecordField) RecordSchemaLoader = _RecordLoader(RecordSchema) EnumSchemaLoader = _RecordLoader(EnumSchema) ArraySchemaLoader = _RecordLoader(ArraySchema) CWLVersionLoader = _EnumLoader(("draft-2", "draft-3.dev1", "draft-3.dev2", "draft-3.dev3", "draft-3.dev4", "draft-3.dev5", "draft-3", "draft-4.dev1", "draft-4.dev2", "draft-4.dev3", "v1.0.dev4", "v1.0",)) CWLTypeLoader = _EnumLoader(("File", "Directory",)) FileLoader = _RecordLoader(File) DirectoryLoader = _RecordLoader(Directory) SchemaBaseLoader = _RecordLoader(SchemaBase) ParameterLoader = _RecordLoader(Parameter) ExpressionLoader = _EnumLoader(("ExpressionPlaceholder",)) InputBindingLoader = _RecordLoader(InputBinding) OutputBindingLoader = _RecordLoader(OutputBinding) InputSchemaLoader = _RecordLoader(InputSchema) OutputSchemaLoader = _RecordLoader(OutputSchema) InputRecordFieldLoader = _RecordLoader(InputRecordField) InputRecordSchemaLoader = _RecordLoader(InputRecordSchema) InputEnumSchemaLoader = _RecordLoader(InputEnumSchema) InputArraySchemaLoader = _RecordLoader(InputArraySchema) OutputRecordFieldLoader = _RecordLoader(OutputRecordField) OutputRecordSchemaLoader = _RecordLoader(OutputRecordSchema) OutputEnumSchemaLoader = _RecordLoader(OutputEnumSchema) OutputArraySchemaLoader = _RecordLoader(OutputArraySchema) InputParameterLoader = _RecordLoader(InputParameter) OutputParameterLoader = _RecordLoader(OutputParameter) ProcessRequirementLoader = _RecordLoader(ProcessRequirement) ProcessLoader = _RecordLoader(Process) InlineJavascriptRequirementLoader = _RecordLoader(InlineJavascriptRequirement) SchemaDefRequirementLoader = _RecordLoader(SchemaDefRequirement) EnvironmentDefLoader = _RecordLoader(EnvironmentDef) CommandLineBindingLoader = _RecordLoader(CommandLineBinding) CommandOutputBindingLoader = _RecordLoader(CommandOutputBinding) CommandInputRecordFieldLoader = _RecordLoader(CommandInputRecordField) CommandInputRecordSchemaLoader = _RecordLoader(CommandInputRecordSchema) CommandInputEnumSchemaLoader = _RecordLoader(CommandInputEnumSchema) CommandInputArraySchemaLoader = _RecordLoader(CommandInputArraySchema) CommandOutputRecordFieldLoader = _RecordLoader(CommandOutputRecordField) CommandOutputRecordSchemaLoader = _RecordLoader(CommandOutputRecordSchema) CommandOutputEnumSchemaLoader = _RecordLoader(CommandOutputEnumSchema) CommandOutputArraySchemaLoader = _RecordLoader(CommandOutputArraySchema) CommandInputParameterLoader = _RecordLoader(CommandInputParameter) CommandOutputParameterLoader = _RecordLoader(CommandOutputParameter) stdoutLoader = _EnumLoader(("stdout",)) stderrLoader = _EnumLoader(("stderr",)) CommandLineToolLoader = _RecordLoader(CommandLineTool) DockerRequirementLoader = _RecordLoader(DockerRequirement) SoftwareRequirementLoader = _RecordLoader(SoftwareRequirement) SoftwarePackageLoader = _RecordLoader(SoftwarePackage) DirentLoader = _RecordLoader(Dirent) InitialWorkDirRequirementLoader = _RecordLoader(InitialWorkDirRequirement) EnvVarRequirementLoader = _RecordLoader(EnvVarRequirement) ShellCommandRequirementLoader = _RecordLoader(ShellCommandRequirement) ResourceRequirementLoader = _RecordLoader(ResourceRequirement) ExpressionToolOutputParameterLoader = _RecordLoader(ExpressionToolOutputParameter) ExpressionToolLoader = _RecordLoader(ExpressionTool) LinkMergeMethodLoader = _EnumLoader(("merge_nested", "merge_flattened",)) WorkflowOutputParameterLoader = _RecordLoader(WorkflowOutputParameter) SinkLoader = _RecordLoader(Sink) WorkflowStepInputLoader = _RecordLoader(WorkflowStepInput) WorkflowStepOutputLoader = _RecordLoader(WorkflowStepOutput) ScatterMethodLoader = _EnumLoader(("dotproduct", "nested_crossproduct", "flat_crossproduct",)) WorkflowStepLoader = _RecordLoader(WorkflowStep) WorkflowLoader = _RecordLoader(Workflow) SubworkflowFeatureRequirementLoader = _RecordLoader(SubworkflowFeatureRequirement) ScatterFeatureRequirementLoader = _RecordLoader(ScatterFeatureRequirement) MultipleInputFeatureRequirementLoader = _RecordLoader(MultipleInputFeatureRequirement) StepInputExpressionRequirementLoader = _RecordLoader(StepInputExpressionRequirement) uri_strtype_True_False_None = _URILoader(strtype, True, False, None) union_of_None_type_or_strtype = _UnionLoader((None_type, strtype,)) union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype = _UnionLoader((PrimitiveTypeLoader, RecordSchemaLoader, EnumSchemaLoader, ArraySchemaLoader, strtype,)) array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype = _ArrayLoader(union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype) union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype = _UnionLoader((PrimitiveTypeLoader, RecordSchemaLoader, EnumSchemaLoader, ArraySchemaLoader, strtype, array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype,)) typedsl_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype, 2) array_of_RecordFieldLoader = _ArrayLoader(RecordFieldLoader) union_of_None_type_or_array_of_RecordFieldLoader = _UnionLoader((None_type, array_of_RecordFieldLoader,)) idmap_fields_union_of_None_type_or_array_of_RecordFieldLoader = _IdMapLoader(union_of_None_type_or_array_of_RecordFieldLoader, 'name', 'type') Record_symbolLoader = _EnumLoader(("record",)) typedsl_Record_symbolLoader_2 = _TypeDSLLoader(Record_symbolLoader, 2) array_of_strtype = _ArrayLoader(strtype) uri_array_of_strtype_True_False_None = _URILoader(array_of_strtype, True, False, None) Enum_symbolLoader = _EnumLoader(("enum",)) typedsl_Enum_symbolLoader_2 = _TypeDSLLoader(Enum_symbolLoader, 2) uri_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_False_True_2 = _URILoader(union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype, False, True, 2) Array_symbolLoader = _EnumLoader(("array",)) typedsl_Array_symbolLoader_2 = _TypeDSLLoader(Array_symbolLoader, 2) File_classLoader = _EnumLoader(("File",)) uri_File_classLoader_False_True_None = _URILoader(File_classLoader, False, True, None) uri_union_of_None_type_or_strtype_False_False_None = _URILoader(union_of_None_type_or_strtype, False, False, None) union_of_None_type_or_inttype = _UnionLoader((None_type, inttype,)) union_of_FileLoader_or_DirectoryLoader = _UnionLoader((FileLoader, DirectoryLoader,)) array_of_union_of_FileLoader_or_DirectoryLoader = _ArrayLoader(union_of_FileLoader_or_DirectoryLoader) union_of_None_type_or_array_of_union_of_FileLoader_or_DirectoryLoader = _UnionLoader((None_type, array_of_union_of_FileLoader_or_DirectoryLoader,)) uri_union_of_None_type_or_strtype_True_False_None = _URILoader(union_of_None_type_or_strtype, True, False, None) Directory_classLoader = _EnumLoader(("Directory",)) uri_Directory_classLoader_False_True_None = _URILoader(Directory_classLoader, False, True, None) union_of_strtype_or_ExpressionLoader = _UnionLoader((strtype, ExpressionLoader,)) array_of_union_of_strtype_or_ExpressionLoader = _ArrayLoader(union_of_strtype_or_ExpressionLoader) union_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_union_of_strtype_or_ExpressionLoader = _UnionLoader((None_type, strtype, ExpressionLoader, array_of_union_of_strtype_or_ExpressionLoader,)) union_of_None_type_or_booltype = _UnionLoader((None_type, booltype,)) union_of_None_type_or_strtype_or_array_of_strtype = _UnionLoader((None_type, strtype, array_of_strtype,)) union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype = _UnionLoader((CWLTypeLoader, InputRecordSchemaLoader, InputEnumSchemaLoader, InputArraySchemaLoader, strtype,)) array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype = _ArrayLoader(union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype) union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype = _UnionLoader((CWLTypeLoader, InputRecordSchemaLoader, InputEnumSchemaLoader, InputArraySchemaLoader, strtype, array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype,)) typedsl_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype, 2) union_of_None_type_or_CommandLineBindingLoader = _UnionLoader((None_type, CommandLineBindingLoader,)) array_of_InputRecordFieldLoader = _ArrayLoader(InputRecordFieldLoader) union_of_None_type_or_array_of_InputRecordFieldLoader = _UnionLoader((None_type, array_of_InputRecordFieldLoader,)) idmap_fields_union_of_None_type_or_array_of_InputRecordFieldLoader = _IdMapLoader(union_of_None_type_or_array_of_InputRecordFieldLoader, 'name', 'type') uri_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_False_True_2 = _URILoader(union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype, False, True, 2) union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype = _UnionLoader((CWLTypeLoader, OutputRecordSchemaLoader, OutputEnumSchemaLoader, OutputArraySchemaLoader, strtype,)) array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype = _ArrayLoader(union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype) union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype = _UnionLoader((CWLTypeLoader, OutputRecordSchemaLoader, OutputEnumSchemaLoader, OutputArraySchemaLoader, strtype, array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype,)) typedsl_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype, 2) union_of_None_type_or_CommandOutputBindingLoader = _UnionLoader((None_type, CommandOutputBindingLoader,)) array_of_OutputRecordFieldLoader = _ArrayLoader(OutputRecordFieldLoader) union_of_None_type_or_array_of_OutputRecordFieldLoader = _UnionLoader((None_type, array_of_OutputRecordFieldLoader,)) idmap_fields_union_of_None_type_or_array_of_OutputRecordFieldLoader = _IdMapLoader(union_of_None_type_or_array_of_OutputRecordFieldLoader, 'name', 'type') uri_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_False_True_2 = _URILoader(union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype, False, True, 2) union_of_None_type_or_strtype_or_array_of_strtype_or_ExpressionLoader = _UnionLoader((None_type, strtype, array_of_strtype, ExpressionLoader,)) uri_union_of_None_type_or_strtype_or_array_of_strtype_or_ExpressionLoader_True_False_None = _URILoader(union_of_None_type_or_strtype_or_array_of_strtype_or_ExpressionLoader, True, False, None) union_of_None_type_or_Any_type = _UnionLoader((None_type, Any_type,)) union_of_None_type_or_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype = _UnionLoader((None_type, CWLTypeLoader, InputRecordSchemaLoader, InputEnumSchemaLoader, InputArraySchemaLoader, strtype, array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype,)) typedsl_union_of_None_type_or_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_None_type_or_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype, 2) union_of_None_type_or_strtype_or_ExpressionLoader = _UnionLoader((None_type, strtype, ExpressionLoader,)) uri_union_of_None_type_or_strtype_or_ExpressionLoader_True_False_None = _URILoader(union_of_None_type_or_strtype_or_ExpressionLoader, True, False, None) array_of_InputParameterLoader = _ArrayLoader(InputParameterLoader) idmap_inputs_array_of_InputParameterLoader = _IdMapLoader(array_of_InputParameterLoader, 'id', 'type') array_of_OutputParameterLoader = _ArrayLoader(OutputParameterLoader) idmap_outputs_array_of_OutputParameterLoader = _IdMapLoader(array_of_OutputParameterLoader, 'id', 'type') union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader = _UnionLoader((InlineJavascriptRequirementLoader, SchemaDefRequirementLoader, DockerRequirementLoader, SoftwareRequirementLoader, InitialWorkDirRequirementLoader, EnvVarRequirementLoader, ShellCommandRequirementLoader, ResourceRequirementLoader, SubworkflowFeatureRequirementLoader, ScatterFeatureRequirementLoader, MultipleInputFeatureRequirementLoader, StepInputExpressionRequirementLoader,)) array_of_union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader = _ArrayLoader(union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader) union_of_None_type_or_array_of_union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader = _UnionLoader((None_type, array_of_union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader,)) idmap_requirements_union_of_None_type_or_array_of_union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader = _IdMapLoader(union_of_None_type_or_array_of_union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader, 'class', 'None') array_of_Any_type = _ArrayLoader(Any_type) union_of_None_type_or_array_of_Any_type = _UnionLoader((None_type, array_of_Any_type,)) idmap_hints_union_of_None_type_or_array_of_Any_type = _IdMapLoader(union_of_None_type_or_array_of_Any_type, 'class', 'None') union_of_None_type_or_CWLVersionLoader = _UnionLoader((None_type, CWLVersionLoader,)) uri_union_of_None_type_or_CWLVersionLoader_False_True_None = _URILoader(union_of_None_type_or_CWLVersionLoader, False, True, None) uri_strtype_False_True_None = _URILoader(strtype, False, True, None) union_of_None_type_or_array_of_strtype = _UnionLoader((None_type, array_of_strtype,)) union_of_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader = _UnionLoader((InputRecordSchemaLoader, InputEnumSchemaLoader, InputArraySchemaLoader,)) array_of_union_of_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader = _ArrayLoader(union_of_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader) union_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_strtype = _UnionLoader((None_type, strtype, ExpressionLoader, array_of_strtype,)) union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype = _UnionLoader((CWLTypeLoader, CommandInputRecordSchemaLoader, CommandInputEnumSchemaLoader, CommandInputArraySchemaLoader, strtype,)) array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype = _ArrayLoader(union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype) union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype = _UnionLoader((CWLTypeLoader, CommandInputRecordSchemaLoader, CommandInputEnumSchemaLoader, CommandInputArraySchemaLoader, strtype, array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype,)) typedsl_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype, 2) array_of_CommandInputRecordFieldLoader = _ArrayLoader(CommandInputRecordFieldLoader) union_of_None_type_or_array_of_CommandInputRecordFieldLoader = _UnionLoader((None_type, array_of_CommandInputRecordFieldLoader,)) idmap_fields_union_of_None_type_or_array_of_CommandInputRecordFieldLoader = _IdMapLoader(union_of_None_type_or_array_of_CommandInputRecordFieldLoader, 'name', 'type') uri_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_False_True_2 = _URILoader(union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype, False, True, 2) union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype = _UnionLoader((CWLTypeLoader, CommandOutputRecordSchemaLoader, CommandOutputEnumSchemaLoader, CommandOutputArraySchemaLoader, strtype,)) array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype = _ArrayLoader(union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype) union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype = _UnionLoader((CWLTypeLoader, CommandOutputRecordSchemaLoader, CommandOutputEnumSchemaLoader, CommandOutputArraySchemaLoader, strtype, array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype,)) typedsl_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype, 2) array_of_CommandOutputRecordFieldLoader = _ArrayLoader(CommandOutputRecordFieldLoader) union_of_None_type_or_array_of_CommandOutputRecordFieldLoader = _UnionLoader((None_type, array_of_CommandOutputRecordFieldLoader,)) idmap_fields_union_of_None_type_or_array_of_CommandOutputRecordFieldLoader = _IdMapLoader(union_of_None_type_or_array_of_CommandOutputRecordFieldLoader, 'name', 'type') uri_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_False_True_2 = _URILoader(union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype, False, True, 2) union_of_None_type_or_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype = _UnionLoader((None_type, CWLTypeLoader, CommandInputRecordSchemaLoader, CommandInputEnumSchemaLoader, CommandInputArraySchemaLoader, strtype, array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype,)) typedsl_union_of_None_type_or_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_None_type_or_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype, 2) union_of_None_type_or_CWLTypeLoader_or_stdoutLoader_or_stderrLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype = _UnionLoader((None_type, CWLTypeLoader, stdoutLoader, stderrLoader, CommandOutputRecordSchemaLoader, CommandOutputEnumSchemaLoader, CommandOutputArraySchemaLoader, strtype, array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype,)) typedsl_union_of_None_type_or_CWLTypeLoader_or_stdoutLoader_or_stderrLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_None_type_or_CWLTypeLoader_or_stdoutLoader_or_stderrLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype, 2) array_of_CommandInputParameterLoader = _ArrayLoader(CommandInputParameterLoader) idmap_inputs_array_of_CommandInputParameterLoader = _IdMapLoader(array_of_CommandInputParameterLoader, 'id', 'type') array_of_CommandOutputParameterLoader = _ArrayLoader(CommandOutputParameterLoader) idmap_outputs_array_of_CommandOutputParameterLoader = _IdMapLoader(array_of_CommandOutputParameterLoader, 'id', 'type') union_of_strtype_or_ExpressionLoader_or_CommandLineBindingLoader = _UnionLoader((strtype, ExpressionLoader, CommandLineBindingLoader,)) array_of_union_of_strtype_or_ExpressionLoader_or_CommandLineBindingLoader = _ArrayLoader(union_of_strtype_or_ExpressionLoader_or_CommandLineBindingLoader) union_of_None_type_or_array_of_union_of_strtype_or_ExpressionLoader_or_CommandLineBindingLoader = _UnionLoader((None_type, array_of_union_of_strtype_or_ExpressionLoader_or_CommandLineBindingLoader,)) array_of_inttype = _ArrayLoader(inttype) union_of_None_type_or_array_of_inttype = _UnionLoader((None_type, array_of_inttype,)) array_of_SoftwarePackageLoader = _ArrayLoader(SoftwarePackageLoader) idmap_packages_array_of_SoftwarePackageLoader = _IdMapLoader(array_of_SoftwarePackageLoader, 'package', 'specs') union_of_FileLoader_or_DirectoryLoader_or_DirentLoader_or_strtype_or_ExpressionLoader = _UnionLoader((FileLoader, DirectoryLoader, DirentLoader, strtype, ExpressionLoader,)) array_of_union_of_FileLoader_or_DirectoryLoader_or_DirentLoader_or_strtype_or_ExpressionLoader = _ArrayLoader(union_of_FileLoader_or_DirectoryLoader_or_DirentLoader_or_strtype_or_ExpressionLoader) union_of_array_of_union_of_FileLoader_or_DirectoryLoader_or_DirentLoader_or_strtype_or_ExpressionLoader_or_strtype_or_ExpressionLoader = _UnionLoader((array_of_union_of_FileLoader_or_DirectoryLoader_or_DirentLoader_or_strtype_or_ExpressionLoader, strtype, ExpressionLoader,)) array_of_EnvironmentDefLoader = _ArrayLoader(EnvironmentDefLoader) idmap_envDef_array_of_EnvironmentDefLoader = _IdMapLoader(array_of_EnvironmentDefLoader, 'envName', 'envValue') union_of_None_type_or_inttype_or_strtype_or_ExpressionLoader = _UnionLoader((None_type, inttype, strtype, ExpressionLoader,)) union_of_None_type_or_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype = _UnionLoader((None_type, CWLTypeLoader, OutputRecordSchemaLoader, OutputEnumSchemaLoader, OutputArraySchemaLoader, strtype, array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype,)) typedsl_union_of_None_type_or_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_None_type_or_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype, 2) array_of_ExpressionToolOutputParameterLoader = _ArrayLoader(ExpressionToolOutputParameterLoader) idmap_outputs_array_of_ExpressionToolOutputParameterLoader = _IdMapLoader(array_of_ExpressionToolOutputParameterLoader, 'id', 'type') uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_0 = _URILoader(union_of_None_type_or_strtype_or_array_of_strtype, False, False, 0) union_of_None_type_or_LinkMergeMethodLoader = _UnionLoader((None_type, LinkMergeMethodLoader,)) uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_2 = _URILoader(union_of_None_type_or_strtype_or_array_of_strtype, False, False, 2) array_of_WorkflowStepInputLoader = _ArrayLoader(WorkflowStepInputLoader) idmap_in__array_of_WorkflowStepInputLoader = _IdMapLoader(array_of_WorkflowStepInputLoader, 'id', 'source') union_of_strtype_or_WorkflowStepOutputLoader = _UnionLoader((strtype, WorkflowStepOutputLoader,)) array_of_union_of_strtype_or_WorkflowStepOutputLoader = _ArrayLoader(union_of_strtype_or_WorkflowStepOutputLoader) union_of_array_of_union_of_strtype_or_WorkflowStepOutputLoader = _UnionLoader((array_of_union_of_strtype_or_WorkflowStepOutputLoader,)) uri_union_of_array_of_union_of_strtype_or_WorkflowStepOutputLoader_True_False_None = _URILoader(union_of_array_of_union_of_strtype_or_WorkflowStepOutputLoader, True, False, None) union_of_strtype_or_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader = _UnionLoader((strtype, CommandLineToolLoader, ExpressionToolLoader, WorkflowLoader,)) uri_union_of_strtype_or_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader_False_False_None = _URILoader(union_of_strtype_or_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader, False, False, None) union_of_None_type_or_ScatterMethodLoader = _UnionLoader((None_type, ScatterMethodLoader,)) uri_union_of_None_type_or_ScatterMethodLoader_False_True_None = _URILoader(union_of_None_type_or_ScatterMethodLoader, False, True, None) array_of_WorkflowOutputParameterLoader = _ArrayLoader(WorkflowOutputParameterLoader) idmap_outputs_array_of_WorkflowOutputParameterLoader = _IdMapLoader(array_of_WorkflowOutputParameterLoader, 'id', 'type') array_of_WorkflowStepLoader = _ArrayLoader(WorkflowStepLoader) union_of_array_of_WorkflowStepLoader = _UnionLoader((array_of_WorkflowStepLoader,)) idmap_steps_union_of_array_of_WorkflowStepLoader = _IdMapLoader(union_of_array_of_WorkflowStepLoader, 'id', 'None') union_of_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader = _UnionLoader((CommandLineToolLoader, ExpressionToolLoader, WorkflowLoader,)) array_of_union_of_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader = _ArrayLoader(union_of_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader) union_of_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader_or_array_of_union_of_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader = _UnionLoader((CommandLineToolLoader, ExpressionToolLoader, WorkflowLoader, array_of_union_of_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader,)) def load_document(doc, baseuri=None, loadingOptions=None): if baseuri is None: baseuri = file_uri(os.getcwd()) + "/" if loadingOptions is None: loadingOptions = LoadingOptions() return _document_load(union_of_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader_or_array_of_union_of_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader, doc, baseuri, loadingOptions)
[]
[]
[ "TMP", "HOME", "COLUMNS" ]
[]
["TMP", "HOME", "COLUMNS"]
python
3
0
api_yamdb/api_yamdb/wsgi.py
import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'api_yamdb.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
vendor/github.com/stripe/goproxy/https.go
package goproxy import ( "bufio" "context" "crypto/tls" "errors" "fmt" "io" "io/ioutil" "net" "net/http" "net/url" "os" "regexp" "strconv" "strings" "sync/atomic" "golang.org/x/net/http/httpproxy" ) type ConnectActionLiteral int const ( ConnectAccept = iota ConnectReject ConnectMitm ConnectHijack ConnectHTTPMitm ConnectProxyAuthHijack ) var ( OkConnect = &ConnectAction{Action: ConnectAccept, TLSConfig: TLSConfigFromCA(&GoproxyCa)} MitmConnect = &ConnectAction{Action: ConnectMitm, TLSConfig: TLSConfigFromCA(&GoproxyCa)} HTTPMitmConnect = &ConnectAction{Action: ConnectHTTPMitm, TLSConfig: TLSConfigFromCA(&GoproxyCa)} RejectConnect = &ConnectAction{Action: ConnectReject, TLSConfig: TLSConfigFromCA(&GoproxyCa)} httpsRegexp = regexp.MustCompile(`^https:\/\/`) ) type ConnectAction struct { Action ConnectActionLiteral Hijack func(req *http.Request, client net.Conn, ctx *ProxyCtx) TLSConfig func(host string, ctx *ProxyCtx) (*tls.Config, error) } func stripPort(s string) string { ix := strings.IndexRune(s, ':') if ix == -1 { return s } return s[:ix] } func (proxy *ProxyHttpServer) dial(network, addr string) (c net.Conn, err error) { if proxy.Tr.Dial == nil { return net.Dial(network, addr) } return proxy.Tr.Dial(network, addr) } func (proxy *ProxyHttpServer) connectDial(network, addr string) (c net.Conn, err error) { if proxy.ConnectDial == nil { return proxy.dial(network, addr) } return proxy.ConnectDial(network, addr) } func (proxy *ProxyHttpServer) dialContext(ctx *ProxyCtx, network, addr string) (c net.Conn, err error) { if proxy.Tr.DialContext == nil { return proxy.connectDial(network, addr) } pctx := context.WithValue(context.Background(), ProxyContextKey, ctx) return proxy.Tr.DialContext(pctx, network, addr) } func (proxy *ProxyHttpServer) connectDialContext(ctx *ProxyCtx, network, addr string) (c net.Conn, err error) { if proxy.ConnectDialContext == nil { return proxy.dialContext(ctx, network, addr) } return proxy.ConnectDialContext(ctx, network, addr) } func (proxy *ProxyHttpServer) handleHttps(w http.ResponseWriter, r *http.Request) { ctx := &ProxyCtx{Req: r, Session: atomic.AddInt64(&proxy.sess, 1), proxy: proxy} hij, ok := w.(http.Hijacker) if !ok { panic("httpserver does not support hijacking") } proxyClient, _, e := hij.Hijack() if e != nil { panic("Cannot hijack connection " + e.Error()) } if proxy.ConnectClientConnHandler != nil { proxyClient = proxy.ConnectClientConnHandler(proxyClient) } ctx.Logf("Running %d CONNECT handlers", len(proxy.httpsHandlers)) todo, host := OkConnect, r.URL.Host for i, h := range proxy.httpsHandlers { newtodo, newhost := h.HandleConnect(host, ctx) // If found a result, break the loop immediately if newtodo != nil { todo, host = newtodo, newhost ctx.Logf("on %dth handler: %v %s", i, todo, host) break } } switch todo.Action { case ConnectAccept: if !hasPort.MatchString(host) { host += ":80" } httpsProxy, err := httpsProxyFromEnv(r.URL) if err != nil { ctx.Warnf("Error configuring HTTPS proxy err=%q url=%q", err, r.URL.String()) } var targetSiteCon net.Conn if httpsProxy == "" { targetSiteCon, err = proxy.connectDialContext(ctx, "tcp", host) } else { targetSiteCon, err = proxy.connectDialProxyWithContext(ctx, httpsProxy, host) } if err != nil { httpError(proxyClient, ctx, err) return } ctx.Logf("Accepting CONNECT to %s", host) proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n")) if proxy.ConnectCopyHandler != nil { go proxy.ConnectCopyHandler(ctx, proxyClient, targetSiteCon) return } targetTCP, targetOK := targetSiteCon.(*net.TCPConn) proxyClientTCP, clientOK := proxyClient.(*net.TCPConn) if targetOK && clientOK { go copyAndClose(ctx, targetTCP, proxyClientTCP) go copyAndClose(ctx, proxyClientTCP, targetTCP) } else { // There is a race with the runtime here. In the case where the // connection to the target site times out, we cannot control which // io.Copy loop will receive the timeout signal first. This means // that in some cases the error passed to the ConnErrorHandler will // be the timeout error, and in other cases it will be an error raised // by the use of a closed network connection. // // 2020/05/28 23:42:17 [001] WARN: Error copying to client: read tcp 127.0.0.1:33742->127.0.0.1:34763: i/o timeout // 2020/05/28 23:42:17 [001] WARN: Error copying to client: read tcp 127.0.0.1:45145->127.0.0.1:60494: use of closed network connection // // It's also not possible to synchronize these connection closures due to // TCP connections which are half-closed. When this happens, only the one // side of the connection breaks out of its io.Copy loop. The other side // of the connection remains open until it either times out or is reset by // the client. go func() { err := copyOrWarn(ctx, targetSiteCon, proxyClient) if err != nil && ctx.ConnErrorHandler != nil && !isClosedNetworkConnError(err) { ctx.ConnErrorHandler(err) } targetSiteCon.Close() }() go func() { copyOrWarn(ctx, proxyClient, targetSiteCon) proxyClient.Close() }() } case ConnectHijack: ctx.Logf("Hijacking CONNECT to %s", host) proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n")) todo.Hijack(r, proxyClient, ctx) case ConnectHTTPMitm: proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n")) ctx.Logf("Assuming CONNECT is plain HTTP tunneling, mitm proxying it") targetSiteCon, err := proxy.connectDial("tcp", host) if err != nil { ctx.Warnf("Error dialing to %s: %s", host, err.Error()) return } for { client := bufio.NewReader(proxyClient) remote := bufio.NewReader(targetSiteCon) req, err := http.ReadRequest(client) if err != nil && err != io.EOF { ctx.Warnf("cannot read request of MITM HTTP client: %+#v", err) } if err != nil { return } req, resp := proxy.filterRequest(req, ctx) if resp == nil { if err := req.Write(targetSiteCon); err != nil { httpError(proxyClient, ctx, err) return } resp, err = http.ReadResponse(remote, req) if err != nil { httpError(proxyClient, ctx, err) return } defer resp.Body.Close() } resp = proxy.filterResponse(resp, ctx) if err := resp.Write(proxyClient); err != nil { httpError(proxyClient, ctx, err) return } } case ConnectMitm: proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n")) ctx.Logf("Assuming CONNECT is TLS, mitm proxying it") // this goes in a separate goroutine, so that the net/http server won't think we're // still handling the request even after hijacking the connection. Those HTTP CONNECT // request can take forever, and the server will be stuck when "closed". // TODO: Allow Server.Close() mechanism to shut down this connection as nicely as possible tlsConfig := defaultTLSConfig if todo.TLSConfig != nil { var err error tlsConfig, err = todo.TLSConfig(host, ctx) if err != nil { httpError(proxyClient, ctx, err) return } } go func() { //TODO: cache connections to the remote website rawClientTls := tls.Server(proxyClient, tlsConfig) if err := rawClientTls.Handshake(); err != nil { ctx.Warnf("Cannot handshake client %v %v", r.Host, err) return } defer rawClientTls.Close() clientTlsReader := bufio.NewReader(rawClientTls) for !isEof(clientTlsReader) { req, err := http.ReadRequest(clientTlsReader) // Set the RoundTripper on the ProxyCtx within the `HandleConnect` action of goproxy, then // inject the roundtripper here in order to use a custom round tripper while mitm. var ctx = &ProxyCtx{Req: req, Session: atomic.AddInt64(&proxy.sess, 1), proxy: proxy, UserData: ctx.UserData, RoundTripper: ctx.RoundTripper} if err != nil && err != io.EOF { return } if err != nil { ctx.Warnf("Cannot read TLS request from mitm'd client %v %v", r.Host, err) return } req.RemoteAddr = r.RemoteAddr // since we're converting the request, need to carry over the original connecting IP as well ctx.Logf("req %v", r.Host) if !httpsRegexp.MatchString(req.URL.String()) { req.URL, err = url.Parse("https://" + r.Host + req.URL.String()) } // Bug fix which goproxy fails to provide request // information URL in the context when does HTTPS MITM ctx.Req = req req, resp := proxy.filterRequest(req, ctx) if resp == nil { if err != nil { ctx.Warnf("Illegal URL %s", "https://"+r.Host+req.URL.Path) return } removeProxyHeaders(ctx, req) resp, err = ctx.RoundTrip(req) if err != nil { ctx.Warnf("Cannot read TLS response from mitm'd server %v", err) return } ctx.Logf("resp %v", resp.Status) } resp = proxy.filterResponse(resp, ctx) defer resp.Body.Close() text := resp.Status statusCode := strconv.Itoa(resp.StatusCode) + " " if strings.HasPrefix(text, statusCode) { text = text[len(statusCode):] } // always use 1.1 to support chunked encoding if _, err := io.WriteString(rawClientTls, "HTTP/1.1"+" "+statusCode+text+"\r\n"); err != nil { ctx.Warnf("Cannot write TLS response HTTP status from mitm'd client: %v", err) return } // Since we don't know the length of resp, return chunked encoded response // TODO: use a more reasonable scheme resp.Header.Del("Content-Length") resp.Header.Set("Transfer-Encoding", "chunked") // Force connection close otherwise chrome will keep CONNECT tunnel open forever resp.Header.Set("Connection", "close") if err := resp.Header.Write(rawClientTls); err != nil { ctx.Warnf("Cannot write TLS response header from mitm'd client: %v", err) return } if _, err = io.WriteString(rawClientTls, "\r\n"); err != nil { ctx.Warnf("Cannot write TLS response header end from mitm'd client: %v", err) return } chunked := newChunkedWriter(rawClientTls) if _, err := io.Copy(chunked, resp.Body); err != nil { ctx.Warnf("Cannot write TLS response body from mitm'd client: %v", err) return } if err := chunked.Close(); err != nil { ctx.Warnf("Cannot write TLS chunked EOF from mitm'd client: %v", err) return } if _, err = io.WriteString(rawClientTls, "\r\n"); err != nil { ctx.Warnf("Cannot write TLS response chunked trailer from mitm'd client: %v", err) return } } ctx.Logf("Exiting on EOF") }() case ConnectProxyAuthHijack: proxyClient.Write([]byte("HTTP/1.1 407 Proxy Authentication Required\r\n")) todo.Hijack(r, proxyClient, ctx) case ConnectReject: if ctx.Resp != nil { if err := ctx.Resp.Write(proxyClient); err != nil { ctx.Warnf("Cannot write response that reject http CONNECT: %v", err) } } proxyClient.Close() } } func httpError(w io.WriteCloser, ctx *ProxyCtx, err error) { if ctx.HTTPErrorHandler != nil { ctx.HTTPErrorHandler(w, ctx, err) return } if _, err := io.WriteString(w, "HTTP/1.1 502 Bad Gateway\r\n\r\n"); err != nil { ctx.Warnf("Error responding to client: %s", err) } if err := w.Close(); err != nil { ctx.Warnf("Error closing client connection: %s", err) } } // isClosedNetworkConnError returns true if the error contains the suffix "use of closed network connection". // This isn't ideal, and in Go 1.16 we will be able to check for net.ErrClosed. func isClosedNetworkConnError(err error) bool { return strings.HasSuffix(err.Error(), "use of closed network connection") } func copyOrWarn(ctx *ProxyCtx, dst io.Writer, src io.Reader) error { _, err := io.Copy(dst, src) if err != nil && !isClosedNetworkConnError(err) { ctx.Warnf("Error copying to client: %s", err) } return err } func copyAndClose(ctx *ProxyCtx, dst, src *net.TCPConn) { if _, err := io.Copy(dst, src); err != nil { ctx.Warnf("Error copying to client: %s", err) } dst.CloseWrite() src.CloseRead() } func dialerFromEnv(proxy *ProxyHttpServer) func(network, addr string) (net.Conn, error) { https_proxy := os.Getenv("HTTPS_PROXY") if https_proxy == "" { https_proxy = os.Getenv("https_proxy") } if https_proxy == "" { return nil } return proxy.NewConnectDialToProxy(https_proxy) } func (proxy *ProxyHttpServer) NewConnectDialToProxy(https_proxy string) func(network, addr string) (net.Conn, error) { return proxy.NewConnectDialToProxyWithHandler(https_proxy, nil) } func (proxy *ProxyHttpServer) NewConnectDialToProxyWithHandler(https_proxy string, connectReqHandler func(req *http.Request)) func(network, addr string) (net.Conn, error) { u, err := url.Parse(https_proxy) if err != nil { return nil } if u.Scheme == "" || u.Scheme == "http" { if strings.IndexRune(u.Host, ':') == -1 { u.Host += ":80" } return func(network, addr string) (net.Conn, error) { connectReq := &http.Request{ Method: "CONNECT", URL: &url.URL{Opaque: addr}, Host: addr, Header: make(http.Header), } if connectReqHandler != nil { connectReqHandler(connectReq) } c, err := proxy.dial(network, u.Host) if err != nil { return nil, err } connectReq.Write(c) // Read response. // Okay to use and discard buffered reader here, because // TLS server will not speak until spoken to. br := bufio.NewReader(c) resp, err := http.ReadResponse(br, connectReq) if err != nil { c.Close() return nil, err } defer resp.Body.Close() if resp.StatusCode != 200 { resp, err := ioutil.ReadAll(io.LimitReader(resp.Body, 500)) if err != nil { return nil, err } c.Close() return nil, errors.New("proxy refused connection" + string(resp)) } return c, nil } } if u.Scheme == "https" { if strings.IndexRune(u.Host, ':') == -1 { u.Host += ":443" } return func(network, addr string) (net.Conn, error) { c, err := proxy.dial(network, u.Host) if err != nil { return nil, err } c = tls.Client(c, proxy.Tr.TLSClientConfig) connectReq := &http.Request{ Method: "CONNECT", URL: &url.URL{Opaque: addr}, Host: addr, Header: make(http.Header), } if connectReqHandler != nil { connectReqHandler(connectReq) } connectReq.Write(c) // Read response. // Okay to use and discard buffered reader here, because // TLS server will not speak until spoken to. br := bufio.NewReader(c) resp, err := http.ReadResponse(br, connectReq) if err != nil { c.Close() return nil, err } defer resp.Body.Close() if resp.StatusCode != 200 { body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 500)) if err != nil { return nil, err } c.Close() return nil, errors.New("proxy refused connection" + string(body)) } return c, nil } } return nil } func TLSConfigFromCA(ca *tls.Certificate) func(host string, ctx *ProxyCtx) (*tls.Config, error) { return func(host string, ctx *ProxyCtx) (*tls.Config, error) { config := defaultTLSConfig.Clone() ctx.Logf("signing for %s", stripPort(host)) cert, err := signHost(*ca, []string{stripPort(host)}) if err != nil { ctx.Warnf("Cannot sign host certificate with provided CA: %s", err) return nil, err } config.Certificates = append(config.Certificates, cert) return config, nil } } func (proxy *ProxyHttpServer) connectDialProxyWithContext(ctx *ProxyCtx, proxyHost, host string) (net.Conn, error) { proxyURL, err := url.Parse(proxyHost) if err != nil { return nil, err } c, err := proxy.connectDialContext(ctx, "tcp", proxyURL.Host) if err != nil { return nil, err } if proxyURL.Scheme == "https" { c = tls.Client(c, proxy.Tr.TLSClientConfig) } connectReq := &http.Request{ Method: "CONNECT", URL: &url.URL{Opaque: host}, Host: host, Header: make(http.Header), } connectReq.Write(c) // Read response. // Okay to use and discard buffered reader here, because // TLS server will not speak until spoken to. br := bufio.NewReader(c) resp, err := http.ReadResponse(br, connectReq) if err != nil { c.Close() return nil, err } defer resp.Body.Close() if resp.StatusCode != 200 { body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 500)) if err != nil { return nil, err } c.Close() return nil, errors.New("proxy refused connection" + string(body)) } return c, nil } // httpsProxyFromEnv allows goproxy to respect no_proxy env vars // https://github.com/stripe/goproxy/pull/5 func httpsProxyFromEnv(reqURL *url.URL) (string, error) { cfg := httpproxy.FromEnvironment() // We only use this codepath for HTTPS CONNECT proxies so we shouldn't // return anything from HTTPProxy cfg.HTTPProxy = "" // The request URL provided to the proxy for a CONNECT request does // not necessarily have an https scheme but ProxyFunc uses the scheme // to determine which env var to introspect. reqSchemeURL := reqURL reqSchemeURL.Scheme = "https" proxyURL, err := cfg.ProxyFunc()(reqSchemeURL) if err != nil { return "", err } if proxyURL == nil { return "", nil } service := proxyURL.Port() if service == "" { service = proxyURL.Scheme } return fmt.Sprintf("%s://%s:%s", proxyURL.Scheme, proxyURL.Hostname(), service), nil }
[ "\"HTTPS_PROXY\"", "\"https_proxy\"" ]
[]
[ "HTTPS_PROXY", "https_proxy" ]
[]
["HTTPS_PROXY", "https_proxy"]
go
2
0
digital_asset_management_api/settings/local.py
from .base import * ALLOWED_HOSTS = ["localhost", "0.0.0.0", "127.0.0.1"] DATABASES = { 'default': { 'ENGINE': os.getenv('SQL_ENGINE', 'django.db.backends.sqlite3'), 'NAME': os.getenv('SQL_DATABASE', os.path.join(BASE_DIR, 'db.sqlite3')), 'USER': os.getenv('SQL_USER', 'user'), 'PASSWORD': os.getenv('SQL_PASSWORD', 'password'), 'HOST': os.getenv('SQL_HOST', 'localhost'), 'PORT': os.getenv('SQL_PORT', '5432'), } } ASSETS_ASSET_FILE_PART_SIZE = 60
[]
[]
[ "SQL_PASSWORD", "SQL_ENGINE", "SQL_DATABASE", "SQL_USER", "SQL_PORT", "SQL_HOST" ]
[]
["SQL_PASSWORD", "SQL_ENGINE", "SQL_DATABASE", "SQL_USER", "SQL_PORT", "SQL_HOST"]
python
6
0
loader/checkpoint_test.go
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package loader import ( "os" "strconv" . "github.com/pingcap/check" "github.com/pingcap/dm/dm/config" ) var _ = Suite(&testCheckPointSuite{}) type testCheckPointSuite struct { cfg *config.SubTaskConfig } func (t *testCheckPointSuite) SetUpSuite(c *C) { host := os.Getenv("MYSQL_HOST") if host == "" { host = "127.0.0.1" } port, _ := strconv.Atoi(os.Getenv("MYSQL_PORT")) if port == 0 { port = 3306 } user := os.Getenv("MYSQL_USER") if user == "" { user = "root" } pswd := os.Getenv("MYSQL_PSWD") t.cfg = &config.SubTaskConfig{ To: config.DBConfig{ Host: host, User: user, Password: pswd, Port: port, }, MetaSchema: "test", } } func (t *testCheckPointSuite) TearDownSuite(c *C) { } // test checkpoint's db operation func (t *testCheckPointSuite) TestForDB(c *C) { cases := []struct { filename string endPos int64 }{ {"db1.tbl1.sql", 123}, {"db1.tbl2.sql", 456}, {"db1.tbl3.sql", 789}, } id := "test_for_db" cp, err := newRemoteCheckPoint(t.cfg, id) c.Assert(err, IsNil) defer cp.Close() cp.Clear() // no checkpoint exist err = cp.Load() c.Assert(err, IsNil) infos := cp.GetAllRestoringFileInfo() c.Assert(len(infos), Equals, 0) count, err := cp.Count() c.Assert(err, IsNil) c.Assert(count, Equals, 0) // insert default checkpoints for _, cs := range cases { err = cp.Init(cs.filename, cs.endPos) c.Assert(err, IsNil) } err = cp.Load() c.Assert(err, IsNil) infos = cp.GetAllRestoringFileInfo() c.Assert(len(infos), Equals, len(cases)) for _, cs := range cases { info, ok := infos[cs.filename] c.Assert(ok, IsTrue) c.Assert(len(info), Equals, 2) c.Assert(info[0], Equals, int64(0)) c.Assert(info[1], Equals, cs.endPos) } count, err = cp.Count() c.Assert(err, IsNil) c.Assert(count, Equals, len(cases)) // update checkpoints conn, err := createConn(t.cfg) c.Assert(err, IsNil) defer closeConn(conn) for _, cs := range cases { sql2 := cp.GenSQL(cs.filename, cs.endPos) err = conn.executeSQL([]string{sql2}, true) c.Assert(err, IsNil) } err = cp.Load() c.Assert(err, IsNil) infos = cp.GetAllRestoringFileInfo() c.Assert(len(infos), Equals, len(cases)) for _, cs := range cases { info, ok := infos[cs.filename] c.Assert(ok, IsTrue) c.Assert(len(info), Equals, 2) c.Assert(info[0], Equals, cs.endPos) c.Assert(info[1], Equals, cs.endPos) } count, err = cp.Count() c.Assert(err, IsNil) c.Assert(count, Equals, len(cases)) // clear all cp.Clear() // no checkpoint exist err = cp.Load() c.Assert(err, IsNil) infos = cp.GetAllRestoringFileInfo() c.Assert(len(infos), Equals, 0) // obtain count again count, err = cp.Count() c.Assert(err, IsNil) c.Assert(count, Equals, 0) }
[ "\"MYSQL_HOST\"", "\"MYSQL_PORT\"", "\"MYSQL_USER\"", "\"MYSQL_PSWD\"" ]
[]
[ "MYSQL_PORT", "MYSQL_USER", "MYSQL_PSWD", "MYSQL_HOST" ]
[]
["MYSQL_PORT", "MYSQL_USER", "MYSQL_PSWD", "MYSQL_HOST"]
go
4
0
main.go
package main import ( "io/ioutil" "net/http" "os" "strings" "html/template" "github.com/apex/log" jsonlog "github.com/apex/log/handlers/json" "github.com/apex/log/handlers/text" "github.com/gorilla/pat" ) func init() { if os.Getenv("UP_STAGE") == "" { log.SetHandler(text.Default) } else { log.SetHandler(jsonlog.Default) } } func main() { addr := ":" + os.Getenv("PORT") app := pat.New() s := http.StripPrefix("/static/", http.FileServer(http.Dir("static"))) app.PathPrefix("/static/").Handler(s) app.Get("/", get) if err := http.ListenAndServe(addr, app); err != nil { log.WithError(err).Fatal("error listening") } } func get(w http.ResponseWriter, r *http.Request) { t := template.Must(template.New("").ParseGlob("*.html")) content, err := ioutil.ReadFile("bins") if err != nil { http.Error(w, err.Error(), 500) return } bins := strings.Split(string(content), "\n") if r.URL.Query().Get("polyfill") != "" { t.ExecuteTemplate(w, "polyfill.html", bins) } else { t.ExecuteTemplate(w, "index.html", bins) } }
[ "\"UP_STAGE\"", "\"PORT\"" ]
[]
[ "PORT", "UP_STAGE" ]
[]
["PORT", "UP_STAGE"]
go
2
0
swarm/swarm.go
package main import ( "bytes" "encoding/json" "fmt" "io/ioutil" "log" "net/http" "net/url" "os" "os/exec" "strings" "text/template" "time" "github.com/coreos/go-etcd/etcd" "github.com/deis/deis/tests/utils" ) // EtcdCluster information about the nodes in the etcd cluster type EtcdCluster struct { Members []etcd.Member `json:"members"` } // NodeStat information about the local node in etcd type NodeStats struct { LeaderInfo struct { Name string `json:"leader"` Uptime string `json:"uptime"` StartTime time.Time `json:"startTime"` } `json:"leaderInfo"` } const ( swarmpath = "/deis/scheduler/swarm/node" swarmetcd = "/deis/scheduler/swarm/host" etcdport = "4001" timeout time.Duration = 3 * time.Second ttl time.Duration = timeout * 2 ) func run(cmd string) { var cmdBuf bytes.Buffer tmpl := template.Must(template.New("cmd").Parse(cmd)) if err := tmpl.Execute(&cmdBuf, nil); err != nil { log.Fatal(err) } cmdString := cmdBuf.String() fmt.Println(cmdString) var cmdl *exec.Cmd cmdl = exec.Command("sh", "-c", cmdString) if _, _, err := utils.RunCommandWithStdoutStderr(cmdl); err != nil { log.Fatal(err) } else { fmt.Println("ok") } } func getleaderHost() string { var nodeStats NodeStats client := &http.Client{} resp, _ := client.Get("http://" + os.Getenv("HOST") + ":2379/v2/stats/self") body, _ := ioutil.ReadAll(resp.Body) json.Unmarshal(body, &nodeStats) etcdLeaderID := nodeStats.LeaderInfo.Name var etcdCluster EtcdCluster resp, _ = client.Get("http://" + os.Getenv("HOST") + ":2379/v2/members") defer resp.Body.Close() body, _ = ioutil.ReadAll(resp.Body) json.Unmarshal(body, &etcdCluster) for _, node := range etcdCluster.Members { if node.ID == etcdLeaderID { u, err := url.Parse(node.ClientURLs[0]) if err == nil { return u.Host } } } return "" } func publishService(client *etcd.Client, host string, ttl uint64) { for { setEtcd(client, swarmetcd, host, ttl) time.Sleep(timeout) } } func setEtcd(client *etcd.Client, key, value string, ttl uint64) { _, err := client.Set(key, value, ttl) if err != nil && !strings.Contains(err.Error(), "Key already exists") { log.Println(err) } } func main() { etcdproto := "etcd://" + getleaderHost() + swarmpath etcdhost := os.Getenv("HOST") addr := "--addr=" + etcdhost + ":2375" client := etcd.NewClient([]string{"http://" + etcdhost + ":" + etcdport}) switch os.Args[1] { case "join": run("./deis-swarm join " + addr + " " + etcdproto) case "manage": go publishService(client, etcdhost, uint64(ttl.Seconds())) run("./deis-swarm manage " + etcdproto) } }
[ "\"HOST\"", "\"HOST\"", "\"HOST\"" ]
[]
[ "HOST" ]
[]
["HOST"]
go
1
0
github/client.go
package github import ( "bytes" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "os/exec" "path" "path/filepath" "sort" "strings" "time" "github.com/hilalisadev/hub/version" ) const ( GitHubHost string = "github.com" OAuthAppURL string = "https://hub.github.com/" ) var UserAgent = "Hub " + version.Version func NewClient(h string) *Client { return NewClientWithHost(&Host{Host: h}) } func NewClientWithHost(host *Host) *Client { return &Client{Host: host} } type Client struct { Host *Host cachedClient *simpleClient } type Gist struct { Files map[string]GistFile `json:"files"` Description string `json:"description,omitempty"` Id string `json:"id,omitempty"` Public bool `json:"public"` HtmlUrl string `json:"html_url"` } type GistFile struct { Type string `json:"type,omitempty"` Language string `json:"language,omitempty"` Content string `json:"content"` RawUrl string `json:"raw_url"` } func (client *Client) FetchPullRequests(project *Project, filterParams map[string]interface{}, limit int, filter func(*PullRequest) bool) (pulls []PullRequest, err error) { api, err := client.simpleApi() if err != nil { return } path := fmt.Sprintf("repos/%s/%s/pulls?per_page=%d", project.Owner, project.Name, perPage(limit, 100)) if filterParams != nil { path = addQuery(path, filterParams) } pulls = []PullRequest{} var res *simpleResponse for path != "" { res, err = api.GetFile(path, draftsType) if err = checkStatus(200, "fetching pull requests", res, err); err != nil { return } path = res.Link("next") pullsPage := []PullRequest{} if err = res.Unmarshal(&pullsPage); err != nil { return } for _, pr := range pullsPage { if filter == nil || filter(&pr) { pulls = append(pulls, pr) if limit > 0 && len(pulls) == limit { path = "" break } } } } return } func (client *Client) PullRequest(project *Project, id string) (pr *PullRequest, err error) { api, err := client.simpleApi() if err != nil { return } res, err := api.Get(fmt.Sprintf("repos/%s/%s/pulls/%s", project.Owner, project.Name, id)) if err = checkStatus(200, "getting pull request", res, err); err != nil { return } pr = &PullRequest{} err = res.Unmarshal(pr) return } func (client *Client) PullRequestPatch(project *Project, id string) (patch io.ReadCloser, err error) { api, err := client.simpleApi() if err != nil { return } res, err := api.GetFile(fmt.Sprintf("repos/%s/%s/pulls/%s", project.Owner, project.Name, id), patchMediaType) if err = checkStatus(200, "getting pull request patch", res, err); err != nil { return } return res.Body, nil } func (client *Client) CreatePullRequest(project *Project, params map[string]interface{}) (pr *PullRequest, err error) { api, err := client.simpleApi() if err != nil { return } res, err := api.PostJSONPreview(fmt.Sprintf("repos/%s/%s/pulls", project.Owner, project.Name), params, draftsType) if err = checkStatus(201, "creating pull request", res, err); err != nil { if res != nil && res.StatusCode == 404 { projectUrl := strings.SplitN(project.WebURL("", "", ""), "://", 2)[1] err = fmt.Errorf("%s\nAre you sure that %s exists?", err, projectUrl) } return } pr = &PullRequest{} err = res.Unmarshal(pr) return } func (client *Client) RequestReview(project *Project, prNumber int, params map[string]interface{}) (err error) { api, err := client.simpleApi() if err != nil { return } res, err := api.PostJSON(fmt.Sprintf("repos/%s/%s/pulls/%d/requested_reviewers", project.Owner, project.Name, prNumber), params) if err = checkStatus(201, "requesting reviewer", res, err); err != nil { return } res.Body.Close() return } func (client *Client) CommitPatch(project *Project, sha string) (patch io.ReadCloser, err error) { api, err := client.simpleApi() if err != nil { return } res, err := api.GetFile(fmt.Sprintf("repos/%s/%s/commits/%s", project.Owner, project.Name, sha), patchMediaType) if err = checkStatus(200, "getting commit patch", res, err); err != nil { return } return res.Body, nil } func (client *Client) GistPatch(id string) (patch io.ReadCloser, err error) { api, err := client.simpleApi() if err != nil { return } res, err := api.Get(fmt.Sprintf("gists/%s", id)) if err = checkStatus(200, "getting gist patch", res, err); err != nil { return } gist := Gist{} if err = res.Unmarshal(&gist); err != nil { return } rawUrl := "" for _, file := range gist.Files { rawUrl = file.RawUrl break } res, err = api.GetFile(rawUrl, textMediaType) if err = checkStatus(200, "getting gist patch", res, err); err != nil { return } return res.Body, nil } func (client *Client) Repository(project *Project) (repo *Repository, err error) { api, err := client.simpleApi() if err != nil { return } res, err := api.Get(fmt.Sprintf("repos/%s/%s", project.Owner, project.Name)) if err = checkStatus(200, "getting repository info", res, err); err != nil { return } repo = &Repository{} err = res.Unmarshal(&repo) return } func (client *Client) CreateRepository(project *Project, description, homepage string, isPrivate bool) (repo *Repository, err error) { repoURL := "user/repos" if project.Owner != client.Host.User { repoURL = fmt.Sprintf("orgs/%s/repos", project.Owner) } params := map[string]interface{}{ "name": project.Name, "description": description, "homepage": homepage, "private": isPrivate, } api, err := client.simpleApi() if err != nil { return } res, err := api.PostJSON(repoURL, params) if err = checkStatus(201, "creating repository", res, err); err != nil { return } repo = &Repository{} err = res.Unmarshal(repo) return } func (client *Client) DeleteRepository(project *Project) error { api, err := client.simpleApi() if err != nil { return err } repoURL := fmt.Sprintf("repos/%s/%s", project.Owner, project.Name) res, err := api.Delete(repoURL) return checkStatus(204, "deleting repository", res, err) } type Release struct { Name string `json:"name"` TagName string `json:"tag_name"` TargetCommitish string `json:"target_commitish"` Body string `json:"body"` Draft bool `json:"draft"` Prerelease bool `json:"prerelease"` Assets []ReleaseAsset `json:"assets"` TarballUrl string `json:"tarball_url"` ZipballUrl string `json:"zipball_url"` HtmlUrl string `json:"html_url"` UploadUrl string `json:"upload_url"` ApiUrl string `json:"url"` CreatedAt time.Time `json:"created_at"` PublishedAt time.Time `json:"published_at"` } type ReleaseAsset struct { Name string `json:"name"` Label string `json:"label"` DownloadUrl string `json:"browser_download_url"` ApiUrl string `json:"url"` } func (client *Client) FetchReleases(project *Project, limit int, filter func(*Release) bool) (releases []Release, err error) { api, err := client.simpleApi() if err != nil { return } path := fmt.Sprintf("repos/%s/%s/releases?per_page=%d", project.Owner, project.Name, perPage(limit, 100)) releases = []Release{} var res *simpleResponse for path != "" { res, err = api.Get(path) if err = checkStatus(200, "fetching releases", res, err); err != nil { return } path = res.Link("next") releasesPage := []Release{} if err = res.Unmarshal(&releasesPage); err != nil { return } for _, release := range releasesPage { if filter == nil || filter(&release) { releases = append(releases, release) if limit > 0 && len(releases) == limit { path = "" break } } } } return } func (client *Client) FetchRelease(project *Project, tagName string) (*Release, error) { releases, err := client.FetchReleases(project, 100, func(release *Release) bool { return release.TagName == tagName }) if err == nil { if len(releases) < 1 { return nil, fmt.Errorf("Unable to find release with tag name `%s'", tagName) } else { return &releases[0], nil } } else { return nil, err } } func (client *Client) CreateRelease(project *Project, releaseParams *Release) (release *Release, err error) { api, err := client.simpleApi() if err != nil { return } res, err := api.PostJSON(fmt.Sprintf("repos/%s/%s/releases", project.Owner, project.Name), releaseParams) if err = checkStatus(201, "creating release", res, err); err != nil { return } release = &Release{} err = res.Unmarshal(release) return } func (client *Client) EditRelease(release *Release, releaseParams map[string]interface{}) (updatedRelease *Release, err error) { api, err := client.simpleApi() if err != nil { return } res, err := api.PatchJSON(release.ApiUrl, releaseParams) if err = checkStatus(200, "editing release", res, err); err != nil { return } updatedRelease = &Release{} err = res.Unmarshal(updatedRelease) return } func (client *Client) DeleteRelease(release *Release) (err error) { api, err := client.simpleApi() if err != nil { return } res, err := api.Delete(release.ApiUrl) if err = checkStatus(204, "deleting release", res, err); err != nil { return } return } type LocalAsset struct { Name string Label string Contents io.Reader Size int64 } func (client *Client) UploadReleaseAssets(release *Release, assets []LocalAsset) (doneAssets []*ReleaseAsset, err error) { api, err := client.simpleApi() if err != nil { return } idx := strings.Index(release.UploadUrl, "{") uploadURL := release.UploadUrl[0:idx] for _, asset := range assets { for _, existingAsset := range release.Assets { if existingAsset.Name == asset.Name { if err = client.DeleteReleaseAsset(&existingAsset); err != nil { return } break } } params := map[string]interface{}{"name": filepath.Base(asset.Name)} if asset.Label != "" { params["label"] = asset.Label } uploadPath := addQuery(uploadURL, params) var res *simpleResponse attempts := 0 maxAttempts := 3 body := asset.Contents for { res, err = api.PostFile(uploadPath, body, asset.Size) if err == nil && res.StatusCode >= 500 && res.StatusCode < 600 && attempts < maxAttempts { attempts++ time.Sleep(time.Second * time.Duration(attempts)) var f *os.File f, err = os.Open(asset.Name) if err != nil { return } defer f.Close() body = f continue } if err = checkStatus(201, "uploading release asset", res, err); err != nil { return } break } newAsset := ReleaseAsset{} err = res.Unmarshal(&newAsset) if err != nil { return } doneAssets = append(doneAssets, &newAsset) } return } func (client *Client) DeleteReleaseAsset(asset *ReleaseAsset) (err error) { api, err := client.simpleApi() if err != nil { return } res, err := api.Delete(asset.ApiUrl) err = checkStatus(204, "deleting release asset", res, err) return } func (client *Client) DownloadReleaseAsset(url string) (asset io.ReadCloser, err error) { api, err := client.simpleApi() if err != nil { return } resp, err := api.GetFile(url, "application/octet-stream") if err = checkStatus(200, "downloading asset", resp, err); err != nil { return } return resp.Body, err } type CIStatusResponse struct { State string `json:"state"` Statuses []CIStatus `json:"statuses"` } type CIStatus struct { State string `json:"state"` Context string `json:"context"` TargetUrl string `json:"target_url"` } type CheckRunsResponse struct { CheckRuns []CheckRun `json:"check_runs"` } type CheckRun struct { Status string `json:"status"` Conclusion string `json:"conclusion"` Name string `json:"name"` HtmlUrl string `json:"html_url"` } func (client *Client) FetchCIStatus(project *Project, sha string) (status *CIStatusResponse, err error) { api, err := client.simpleApi() if err != nil { return } res, err := api.Get(fmt.Sprintf("repos/%s/%s/commits/%s/status", project.Owner, project.Name, sha)) if err = checkStatus(200, "fetching statuses", res, err); err != nil { return } status = &CIStatusResponse{} if err = res.Unmarshal(status); err != nil { return } sortStatuses := func() { sort.Slice(status.Statuses, func(a, b int) bool { sA := status.Statuses[a] sB := status.Statuses[b] cmp := strings.Compare(strings.ToLower(sA.Context), strings.ToLower(sB.Context)) if cmp == 0 { return strings.Compare(sA.TargetUrl, sB.TargetUrl) < 0 } else { return cmp < 0 } }) } sortStatuses() res, err = api.GetFile(fmt.Sprintf("repos/%s/%s/commits/%s/check-runs", project.Owner, project.Name, sha), checksType) if err == nil && (res.StatusCode == 403 || res.StatusCode == 404 || res.StatusCode == 422) { return } if err = checkStatus(200, "fetching checks", res, err); err != nil { return } checks := &CheckRunsResponse{} if err = res.Unmarshal(checks); err != nil { return } for _, checkRun := range checks.CheckRuns { state := "pending" if checkRun.Status == "completed" { state = checkRun.Conclusion } checkStatus := CIStatus{ State: state, Context: checkRun.Name, TargetUrl: checkRun.HtmlUrl, } status.Statuses = append(status.Statuses, checkStatus) } sortStatuses() return } type Repository struct { Name string `json:"name"` FullName string `json:"full_name"` Parent *Repository `json:"parent"` Owner *User `json:"owner"` Private bool `json:"private"` HasWiki bool `json:"has_wiki"` Permissions *RepositoryPermissions `json:"permissions"` HtmlUrl string `json:"html_url"` DefaultBranch string `json:"default_branch"` } type RepositoryPermissions struct { Admin bool `json:"admin"` Push bool `json:"push"` Pull bool `json:"pull"` } func (client *Client) ForkRepository(project *Project, params map[string]interface{}) (repo *Repository, err error) { api, err := client.simpleApi() if err != nil { return } res, err := api.PostJSON(fmt.Sprintf("repos/%s/%s/forks", project.Owner, project.Name), params) if err = checkStatus(202, "creating fork", res, err); err != nil { return } repo = &Repository{} err = res.Unmarshal(repo) return } type Comment struct { Id int `json:"id"` Body string `json:"body"` User *User `json:"user"` CreatedAt time.Time `json:"created_at"` } type Issue struct { Number int `json:"number"` State string `json:"state"` Title string `json:"title"` Body string `json:"body"` User *User `json:"user"` PullRequest *PullRequest `json:"pull_request"` Head *PullRequestSpec `json:"head"` Base *PullRequestSpec `json:"base"` MergeCommitSha string `json:"merge_commit_sha"` MaintainerCanModify bool `json:"maintainer_can_modify"` Draft bool `json:"draft"` Comments int `json:"comments"` Labels []IssueLabel `json:"labels"` Assignees []User `json:"assignees"` Milestone *Milestone `json:"milestone"` CreatedAt time.Time `json:"created_at"` UpdatedAt time.Time `json:"updated_at"` MergedAt time.Time `json:"merged_at"` RequestedReviewers []User `json:"requested_reviewers"` RequestedTeams []Team `json:"requested_teams"` ApiUrl string `json:"url"` HtmlUrl string `json:"html_url"` ClosedBy *User `json:"closed_by"` } type PullRequest Issue type PullRequestSpec struct { Label string `json:"label"` Ref string `json:"ref"` Sha string `json:"sha"` Repo *Repository `json:"repo"` } func (pr *PullRequest) IsSameRepo() bool { return pr.Head != nil && pr.Head.Repo != nil && pr.Head.Repo.Name == pr.Base.Repo.Name && pr.Head.Repo.Owner.Login == pr.Base.Repo.Owner.Login } func (pr *PullRequest) HasRequestedReviewer(name string) bool { for _, user := range pr.RequestedReviewers { if strings.EqualFold(user.Login, name) { return true } } return false } func (pr *PullRequest) HasRequestedTeam(name string) bool { for _, team := range pr.RequestedTeams { if strings.EqualFold(team.Slug, name) { return true } } return false } type IssueLabel struct { Name string `json:"name"` Color string `json:"color"` } type User struct { Login string `json:"login"` } type Team struct { Name string `json:"name"` Slug string `json:"slug"` } type Milestone struct { Number int `json:"number"` Title string `json:"title"` } func (client *Client) FetchIssues(project *Project, filterParams map[string]interface{}, limit int, filter func(*Issue) bool) (issues []Issue, err error) { api, err := client.simpleApi() if err != nil { return } path := fmt.Sprintf("repos/%s/%s/issues?per_page=%d", project.Owner, project.Name, perPage(limit, 100)) if filterParams != nil { path = addQuery(path, filterParams) } issues = []Issue{} var res *simpleResponse for path != "" { res, err = api.Get(path) if err = checkStatus(200, "fetching issues", res, err); err != nil { return } path = res.Link("next") issuesPage := []Issue{} if err = res.Unmarshal(&issuesPage); err != nil { return } for _, issue := range issuesPage { if filter == nil || filter(&issue) { issues = append(issues, issue) if limit > 0 && len(issues) == limit { path = "" break } } } } return } func (client *Client) FetchIssue(project *Project, number string) (issue *Issue, err error) { api, err := client.simpleApi() if err != nil { return } res, err := api.Get(fmt.Sprintf("repos/%s/%s/issues/%s", project.Owner, project.Name, number)) if err = checkStatus(200, "fetching issue", res, err); err != nil { return nil, err } issue = &Issue{} err = res.Unmarshal(issue) return } func (client *Client) FetchComments(project *Project, number string) (comments []Comment, err error) { api, err := client.simpleApi() if err != nil { return } res, err := api.Get(fmt.Sprintf("repos/%s/%s/issues/%s/comments", project.Owner, project.Name, number)) if err = checkStatus(200, "fetching comments for issue", res, err); err != nil { return nil, err } comments = []Comment{} err = res.Unmarshal(&comments) return } func (client *Client) CreateIssue(project *Project, params interface{}) (issue *Issue, err error) { api, err := client.simpleApi() if err != nil { return } res, err := api.PostJSON(fmt.Sprintf("repos/%s/%s/issues", project.Owner, project.Name), params) if err = checkStatus(201, "creating issue", res, err); err != nil { return } issue = &Issue{} err = res.Unmarshal(issue) return } func (client *Client) UpdateIssue(project *Project, issueNumber int, params map[string]interface{}) (err error) { api, err := client.simpleApi() if err != nil { return } res, err := api.PatchJSON(fmt.Sprintf("repos/%s/%s/issues/%d", project.Owner, project.Name, issueNumber), params) if err = checkStatus(200, "updating issue", res, err); err != nil { return } res.Body.Close() return } type sortedLabels []IssueLabel func (s sortedLabels) Len() int { return len(s) } func (s sortedLabels) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s sortedLabels) Less(i, j int) bool { return strings.Compare(strings.ToLower(s[i].Name), strings.ToLower(s[j].Name)) < 0 } func (client *Client) FetchLabels(project *Project) (labels []IssueLabel, err error) { api, err := client.simpleApi() if err != nil { return } path := fmt.Sprintf("repos/%s/%s/labels?per_page=100", project.Owner, project.Name) labels = []IssueLabel{} var res *simpleResponse for path != "" { res, err = api.Get(path) if err = checkStatus(200, "fetching labels", res, err); err != nil { return } path = res.Link("next") labelsPage := []IssueLabel{} if err = res.Unmarshal(&labelsPage); err != nil { return } labels = append(labels, labelsPage...) } sort.Sort(sortedLabels(labels)) return } func (client *Client) FetchMilestones(project *Project) (milestones []Milestone, err error) { api, err := client.simpleApi() if err != nil { return } path := fmt.Sprintf("repos/%s/%s/milestones?per_page=100", project.Owner, project.Name) milestones = []Milestone{} var res *simpleResponse for path != "" { res, err = api.Get(path) if err = checkStatus(200, "fetching milestones", res, err); err != nil { return } path = res.Link("next") milestonesPage := []Milestone{} if err = res.Unmarshal(&milestonesPage); err != nil { return } milestones = append(milestones, milestonesPage...) } return } func (client *Client) GenericAPIRequest(method, path string, data interface{}, headers map[string]string, ttl int) (*simpleResponse, error) { api, err := client.simpleApi() if err != nil { return nil, err } api.CacheTTL = ttl var body io.Reader switch d := data.(type) { case map[string]interface{}: if method == "GET" { path = addQuery(path, d) } else if len(d) > 0 { json, err := json.Marshal(d) if err != nil { return nil, err } body = bytes.NewBuffer(json) } case io.Reader: body = d } return api.performRequest(method, path, body, func(req *http.Request) { if body != nil { req.Header.Set("Content-Type", "application/json; charset=utf-8") } for key, value := range headers { req.Header.Set(key, value) } }) } func (client *Client) CurrentUser() (user *User, err error) { api, err := client.simpleApi() if err != nil { return } res, err := api.Get("user") if err = checkStatus(200, "getting current user", res, err); err != nil { return } user = &User{} err = res.Unmarshal(user) return } type AuthorizationEntry struct { Token string `json:"token"` } func isToken(api *simpleClient, password string) bool { api.PrepareRequest = func(req *http.Request) { req.Header.Set("Authorization", "token "+password) } res, _ := api.Get("user") if res != nil && res.StatusCode == 200 { return true } return false } func (client *Client) FindOrCreateToken(user, password, twoFactorCode string) (token string, err error) { api := client.apiClient() if len(password) >= 40 && isToken(api, password) { return password, nil } params := map[string]interface{}{ "scopes": []string{"repo", "gist"}, "note_url": OAuthAppURL, } api.PrepareRequest = func(req *http.Request) { req.SetBasicAuth(user, password) if twoFactorCode != "" { req.Header.Set("X-GitHub-OTP", twoFactorCode) } } count := 1 maxTries := 9 for { params["note"], err = authTokenNote(count) if err != nil { return } res, postErr := api.PostJSON("authorizations", params) if postErr != nil { err = postErr break } if res.StatusCode == 201 { auth := &AuthorizationEntry{} if err = res.Unmarshal(auth); err != nil { return } token = auth.Token break } else if res.StatusCode == 422 && count < maxTries { count++ } else { errInfo, e := res.ErrorInfo() if e == nil { err = errInfo } else { err = e } return } } return } func (client *Client) ensureAccessToken() error { if client.Host.AccessToken == "" { host, err := CurrentConfig().PromptForHost(client.Host.Host) if err != nil { return err } client.Host = host } return nil } func (client *Client) simpleApi() (c *simpleClient, err error) { err = client.ensureAccessToken() if err != nil { return } if client.cachedClient != nil { c = client.cachedClient return } c = client.apiClient() c.PrepareRequest = func(req *http.Request) { clientDomain := normalizeHost(client.Host.Host) if strings.HasPrefix(clientDomain, "api.github.") { clientDomain = strings.TrimPrefix(clientDomain, "api.") } requestHost := strings.ToLower(req.URL.Host) if requestHost == clientDomain || strings.HasSuffix(requestHost, "."+clientDomain) { req.Header.Set("Authorization", "token "+client.Host.AccessToken) } } client.cachedClient = c return } func (client *Client) apiClient() *simpleClient { unixSocket := os.ExpandEnv(client.Host.UnixSocket) httpClient := newHttpClient(os.Getenv("HUB_TEST_HOST"), os.Getenv("HUB_VERBOSE") != "", unixSocket) apiRoot := client.absolute(normalizeHost(client.Host.Host)) if !strings.HasPrefix(apiRoot.Host, "api.github.") { apiRoot.Path = "/api/v3/" } return &simpleClient{ httpClient: httpClient, rootUrl: apiRoot, } } func (client *Client) absolute(host string) *url.URL { u, err := url.Parse("https://" + host + "/") if err != nil { panic(err) } else if client.Host != nil && client.Host.Protocol != "" { u.Scheme = client.Host.Protocol } return u } func (client *Client) FetchGist(id string) (gist *Gist, err error) { api, err := client.simpleApi() if err != nil { return } response, err := api.Get(fmt.Sprintf("gists/%s", id)) if err = checkStatus(200, "getting gist", response, err); err != nil { return } response.Unmarshal(&gist) return } func (client *Client) CreateGist(filenames []string, public bool) (gist *Gist, err error) { api, err := client.simpleApi() if err != nil { return } files := map[string]GistFile{} var basename string var content []byte var gf GistFile for _, file := range filenames { if file == "-" { content, err = ioutil.ReadAll(os.Stdin) basename = "gistfile1.txt" } else { content, err = ioutil.ReadFile(file) basename = path.Base(file) } if err != nil { return } gf = GistFile{Content: string(content)} files[basename] = gf } g := Gist{ Files: files, Public: public, } res, err := api.PostJSON("gists", &g) if err = checkStatus(201, "creating gist", res, err); err != nil { if res != nil && res.StatusCode == 404 && !strings.Contains(res.Header.Get("x-oauth-scopes"), "gist") { err = fmt.Errorf("%s\nGo to https://%s/settings/tokens and enable the 'gist' scope for hub", err, client.Host.Host) } return } err = res.Unmarshal(&gist) return } func normalizeHost(host string) string { if host == "" { return GitHubHost } else if strings.EqualFold(host, GitHubHost) { return "api.github.com" } else if strings.EqualFold(host, "github.localhost") { return "api.github.localhost" } else { return strings.ToLower(host) } } func checkStatus(expectedStatus int, action string, response *simpleResponse, err error) error { if err != nil { return fmt.Errorf("Error %s: %s", action, err.Error()) } else if response.StatusCode != expectedStatus { errInfo, err := response.ErrorInfo() if err == nil { return FormatError(action, errInfo) } else { return fmt.Errorf("Error %s: %s (HTTP %d)", action, err.Error(), response.StatusCode) } } else { return nil } } func FormatError(action string, err error) (ee error) { switch e := err.(type) { default: ee = err case *errorInfo: statusCode := e.Response.StatusCode var reason string if s := strings.SplitN(e.Response.Status, " ", 2); len(s) >= 2 { reason = strings.TrimSpace(s[1]) } errStr := fmt.Sprintf("Error %s: %s (HTTP %d)", action, reason, statusCode) var errorSentences []string for _, err := range e.Errors { switch err.Code { case "custom": errorSentences = append(errorSentences, err.Message) case "missing_field": errorSentences = append(errorSentences, fmt.Sprintf("Missing field: \"%s\"", err.Field)) case "already_exists": errorSentences = append(errorSentences, fmt.Sprintf("Duplicate value for \"%s\"", err.Field)) case "invalid": errorSentences = append(errorSentences, fmt.Sprintf("Invalid value for \"%s\"", err.Field)) case "unauthorized": errorSentences = append(errorSentences, fmt.Sprintf("Not allowed to change field \"%s\"", err.Field)) } } var errorMessage string if len(errorSentences) > 0 { errorMessage = strings.Join(errorSentences, "\n") } else { errorMessage = e.Message if action == "getting current user" && e.Message == "Resource not accessible by integration" { errorMessage = errorMessage + "\nYou must specify GITHUB_USER via environment variable." } } if errorMessage != "" { errStr = fmt.Sprintf("%s\n%s", errStr, errorMessage) } ee = fmt.Errorf(errStr) } return } func authTokenNote(num int) (string, error) { n := os.Getenv("USER") if n == "" { n = os.Getenv("USERNAME") } if n == "" { whoami := exec.Command("whoami") whoamiOut, err := whoami.Output() if err != nil { return "", err } n = strings.TrimSpace(string(whoamiOut)) } h, err := os.Hostname() if err != nil { return "", err } if num > 1 { return fmt.Sprintf("hub for %s@%s %d", n, h, num), nil } return fmt.Sprintf("hub for %s@%s", n, h), nil } func perPage(limit, max int) int { if limit > 0 { limit = limit + (limit / 2) if limit < max { return limit } } return max } func addQuery(path string, params map[string]interface{}) string { if len(params) == 0 { return path } query := url.Values{} for key, value := range params { switch v := value.(type) { case string: query.Add(key, v) case nil: query.Add(key, "") case int: query.Add(key, fmt.Sprintf("%d", v)) case bool: query.Add(key, fmt.Sprintf("%v", v)) } } sep := "?" if strings.Contains(path, sep) { sep = "&" } return path + sep + query.Encode() }
[ "\"HUB_TEST_HOST\"", "\"HUB_VERBOSE\"", "\"USER\"", "\"USERNAME\"" ]
[]
[ "USER", "USERNAME", "HUB_VERBOSE", "HUB_TEST_HOST" ]
[]
["USER", "USERNAME", "HUB_VERBOSE", "HUB_TEST_HOST"]
go
4
0
mail/mail.go
package mail import ( "os" "strconv" log "github.com/meifamily/logrus" "github.com/meifamily/ptt-alertor/models/article" "gopkg.in/mailgun/mailgun-go.v1" ) type Mail struct { Title Body Receiver string } type Title struct { BoardName string Keyword string articleQuantity int } type Body struct { Articles article.Articles } func (title Title) String() string { return "[PttAlertor] 在 " + title.BoardName + " 板有 " + strconv.Itoa(title.articleQuantity) + " 篇關於「" + title.Keyword + "」的文章發表" } func (body Body) String() string { return body.Articles.String() + "\r\n\r\nSend From Ptt Alertor" } func (mail Mail) Send() { mg := newMailgun() mail.articleQuantity = len(mail.Body.Articles) message := mailgun.NewMessage( "[email protected]", mail.Title.String(), mail.Body.String(), mail.Receiver) resp, id, err := mg.Send(message) if err != nil { log.WithError(err).Error("Sent Email Failed") } else { log.WithFields(log.Fields{ "ID": id, "Resp": resp, }).Info("Sent Email") } } var ( domain = os.Getenv("MAILGUN_DOMAIN") apiKey = os.Getenv("MAILGUN_APIKEY") publicAPIKey = os.Getenv("MAILGUN_PUBLIC_APIKEY") ) func newMailgun() mailgun.Mailgun { return mailgun.NewMailgun(domain, apiKey, publicAPIKey) }
[ "\"MAILGUN_DOMAIN\"", "\"MAILGUN_APIKEY\"", "\"MAILGUN_PUBLIC_APIKEY\"" ]
[]
[ "MAILGUN_APIKEY", "MAILGUN_PUBLIC_APIKEY", "MAILGUN_DOMAIN" ]
[]
["MAILGUN_APIKEY", "MAILGUN_PUBLIC_APIKEY", "MAILGUN_DOMAIN"]
go
3
0
config.py
import os class Config: SECRET_KEY = os.environ.get('SECRET_KEY') SQLALCHEMY_TRACK_MODIFICATIONS = False UPLOADED_PHOTOS_DEST ='app/static/img' class TestConfig(Config): SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://habbiba:zully1234@localhost/blog' class DevConfig(Config): SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://habbiba:zully1234@localhost/blog' DEBUG = True class ProdConfig(Config): ''' Production configuration child class Args: Config:The parent configuration class with General configuration settings ''' SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL") config_options = { 'development':DevConfig, 'production':ProdConfig, 'test':TestConfig }
[]
[]
[ "SECRET_KEY", "DATABASE_URL" ]
[]
["SECRET_KEY", "DATABASE_URL"]
python
2
0
my_zoo/train_zoo.py
import os import time import difflib import argparse import importlib from pprint import pprint from collections import OrderedDict import gym import numpy as np import yaml # Optional dependencies try: import pybullet_envs except ImportError: pybullet_envs = None try: import highway_env except ImportError: highway_env = None try: import mpi4py from mpi4py import MPI except ImportError: mpi4py = None from stable_baselines.common import set_global_seeds from stable_baselines.common.cmd_util import make_atari_env from stable_baselines.common.vec_env import VecFrameStack, SubprocVecEnv, VecNormalize, DummyVecEnv from stable_baselines.common.noise import AdaptiveParamNoiseSpec, NormalActionNoise, OrnsteinUhlenbeckActionNoise from stable_baselines.ppo2.ppo2 import constfn from zoo.utils import make_env, ALGOS, linear_schedule, get_latest_run_id, get_wrapper_class, find_saved_model from zoo.utils.hyperparams_opt import hyperparam_optimization from zoo.utils.noise import LinearNormalActionNoise if __name__ == '__main__': #----------- Supress Tensorflow version warnings---------------------- os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'} import warnings # https://stackoverflow.com/questions/15777951/how-to-suppress-pandas-future-warning warnings.simplefilter(action='ignore', category=FutureWarning) warnings.simplefilter(action='ignore', category=Warning) import tensorflow as tf tf.get_logger().setLevel('INFO') tf.autograph.set_verbosity(0) import logging tf.get_logger().setLevel(logging.ERROR) #----------------------------------------------------------------------- parser = argparse.ArgumentParser() parser.add_argument('--env', type=str, nargs='+', default=["CartPole-v1"], help='environment ID(s)') parser.add_argument('-tb', '--tensorboard-log', help='Tensorboard log dir', default='', type=str) parser.add_argument('-i', '--trained-agent', help='Path to a pretrained agent to continue training', default='', type=str) parser.add_argument('--algo', help='RL Algorithm', default='ppo2', type=str, required=False, choices=list(ALGOS.keys())) parser.add_argument('-n', '--n-timesteps', help='Overwrite the number of timesteps', default=-1, type=int) parser.add_argument('--log-interval', help='Override log interval (default: -1, no change)', default=-1, type=int) parser.add_argument('-f', '--log-folder', help='Log folder', type=str, default='logs') parser.add_argument('--seed', help='Random generator seed', type=int, default=0) parser.add_argument('--n-trials', help='Number of trials for optimizing hyperparameters', type=int, default=10) parser.add_argument('-optimize', '--optimize-hyperparameters', action='store_true', default=False, help='Run hyperparameters search') parser.add_argument('--n-jobs', help='Number of parallel jobs when optimizing hyperparameters', type=int, default=1) parser.add_argument('--sampler', help='Sampler to use when optimizing hyperparameters', type=str, default='tpe', choices=['random', 'tpe', 'skopt']) parser.add_argument('--pruner', help='Pruner to use when optimizing hyperparameters', type=str, default='median', choices=['halving', 'median', 'none']) parser.add_argument('--verbose', help='Verbose mode (0: no output, 1: INFO)', default=1, type=int) parser.add_argument('--gym-packages', type=str, nargs='+', default=[], help='Additional external Gym environemnt package modules to import (e.g. gym_minigrid)') args = parser.parse_args() # Going through custom gym packages to let them register in the global registory for env_module in args.gym_packages: importlib.import_module(env_module) env_ids = args.env registered_envs = set(gym.envs.registry.env_specs.keys()) for env_id in env_ids: # If the environment is not found, suggest the closest match if env_id not in registered_envs: try: closest_match = difflib.get_close_matches(env_id, registered_envs, n=1)[0] except IndexError: closest_match = "'no close match found...'" raise ValueError('{} not found in gym registry, you maybe meant {}?'.format(env_id, closest_match)) set_global_seeds(args.seed) if args.trained_agent != "": valid_extension = args.trained_agent.endswith('.pkl') or args.trained_agent.endswith('.zip') assert valid_extension and os.path.isfile(args.trained_agent), \ "The trained_agent must be a valid path to a .zip/.pkl file" # if we run in multi process, the code runs for each worker and the worker can get its rank as the below rank = 0 if mpi4py is not None and MPI.COMM_WORLD.Get_size() > 1: print("Using MPI for multiprocessing with {} workers".format(MPI.COMM_WORLD.Get_size())) rank = MPI.COMM_WORLD.Get_rank() print("Worker rank: {}".format(rank)) # make sure that each worker has its own seed args.seed += rank # we allow only one worker to "speak" if rank != 0: args.verbose = 0 args.tensorboard_log = '' # now we start train a model for each of the environment in the list for env_id in env_ids: tensorboard_log = None if args.tensorboard_log == '' else os.path.join(args.tensorboard_log, env_id) is_atari = False if 'NoFrameskip' in env_id: is_atari = True print("=" * 10, env_id, "=" * 10) # Load hyperparameters from yaml file with open('hyperparams/{}.yml'.format(args.algo), 'r') as f: hyperparams_dict = yaml.load(f) if env_id in list(hyperparams_dict.keys()): hyperparams = hyperparams_dict[env_id] elif is_atari: hyperparams = hyperparams_dict['atari'] else: raise ValueError("Hyperparameters not found for {}-{}".format(args.algo, env_id)) # Sort hyperparams that will be saved saved_hyperparams = OrderedDict([(key, hyperparams[key]) for key in sorted(hyperparams.keys())]) algo_ = args.algo # HER is only a wrapper around an algo if args.algo == 'her': algo_ = saved_hyperparams['model_class'] assert algo_ in {'sac', 'ddpg', 'dqn', 'td3'}, "{} is not compatible with HER".format(algo_) # Retrieve the model class hyperparams['model_class'] = ALGOS[saved_hyperparams['model_class']] if hyperparams['model_class'] is None: raise ValueError('{} requires MPI to be installed'.format(algo_)) if args.verbose > 0: pprint(saved_hyperparams) ############################ # Build the Env n_envs = hyperparams.get('n_envs', 1) if args.verbose > 0: print("Using {} environments".format(n_envs)) # Delete keys so the dict can be pass to the model constructor if 'n_envs' in hyperparams.keys(): del hyperparams['n_envs'] normalize = False normalize_kwargs = {} if 'normalize' in hyperparams.keys(): normalize = hyperparams['normalize'] if isinstance(normalize, str): normalize_kwargs = eval(normalize) normalize = True del hyperparams['normalize'] # obtain a class object from a wrapper name string in hyperparams # and delete the entry env_wrapper = get_wrapper_class(hyperparams) if 'env_wrapper' in hyperparams.keys(): del hyperparams['env_wrapper'] def create_env(n_envs): """ Create the environment and wrap it if necessary :param n_envs: (int) :return: (gym.Env) """ global hyperparams if is_atari: if args.verbose > 0: print("Using Atari wrapper") env = make_atari_env(env_id, num_env=n_envs, seed=args.seed) # Frame-stacking with 4 frames env = VecFrameStack(env, n_stack=4) elif algo_ in ['dqn', 'ddpg']: if hyperparams.get('normalize', False): print("WARNING: normalization not supported yet for DDPG/DQN") env = gym.make(env_id) env.seed(args.seed) if env_wrapper is not None: env = env_wrapper(env) else: if n_envs == 1: env = DummyVecEnv([make_env(env_id, 0, args.seed, wrapper_class=env_wrapper)]) else: # env = SubprocVecEnv([make_env(env_id, i, args.seed) for i in range(n_envs)]) # On most env, SubprocVecEnv does not help and is quite memory hungry env = DummyVecEnv([make_env(env_id, i, args.seed, wrapper_class=env_wrapper) for i in range(n_envs)]) if normalize: if args.verbose > 0: if len(normalize_kwargs) > 0: print("Normalization activated: {}".format(normalize_kwargs)) else: print("Normalizing input and reward") env = VecNormalize(env, **normalize_kwargs) # Optional Frame-stacking if hyperparams.get('frame_stack', False): n_stack = hyperparams['frame_stack'] env = VecFrameStack(env, n_stack) print("Stacking {} frames".format(n_stack)) del hyperparams['frame_stack'] return env env = create_env(n_envs) # Stop env processes to free memory if args.optimize_hyperparameters and n_envs > 1: env.close() ############################ # Build the Agent # Create learning rate schedules for ppo2 and sac if algo_ in ["ppo2", "sac", "td3"]: for key in ['learning_rate', 'cliprange', 'cliprange_vf']: if key not in hyperparams: continue if isinstance(hyperparams[key], str): schedule, initial_value = hyperparams[key].split('_') initial_value = float(initial_value) hyperparams[key] = linear_schedule(initial_value) elif isinstance(hyperparams[key], (float, int)): # Negative value: ignore (ex: for clipping) if hyperparams[key] < 0: continue hyperparams[key] = constfn(float(hyperparams[key])) else: raise ValueError('Invalid value for {}: {}'.format(key, hyperparams[key])) # Should we overwrite the number of timesteps? if args.n_timesteps > 0: if args.verbose: print("Overwriting n_timesteps with n={}".format(args.n_timesteps)) n_timesteps = args.n_timesteps else: n_timesteps = int(hyperparams['n_timesteps']) del hyperparams['n_timesteps'] if 'policy_kwargs' in hyperparams.keys(): hyperparams['policy_kwargs'] = eval(hyperparams['policy_kwargs']) # Parse noise string for DDPG and SAC if algo_ in ['ddpg', 'sac', 'td3'] and hyperparams.get('noise_type') is not None: noise_type = hyperparams['noise_type'].strip() noise_std = hyperparams['noise_std'] n_actions = env.action_space.shape[0] if 'adaptive-param' in noise_type: assert algo_ == 'ddpg', 'Parameter is not supported by SAC' hyperparams['param_noise'] = AdaptiveParamNoiseSpec(initial_stddev=noise_std, desired_action_stddev=noise_std) elif 'normal' in noise_type: if 'lin' in noise_type: hyperparams['action_noise'] = LinearNormalActionNoise(mean=np.zeros(n_actions), sigma=noise_std * np.ones(n_actions), final_sigma=hyperparams.get('noise_std_final', 0.0) * np.ones(n_actions), max_steps=n_timesteps) else: hyperparams['action_noise'] = NormalActionNoise(mean=np.zeros(n_actions), sigma=noise_std * np.ones(n_actions)) elif 'ornstein-uhlenbeck' in noise_type: hyperparams['action_noise'] = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions), sigma=noise_std * np.ones(n_actions)) else: raise RuntimeError('Unknown noise type "{}"'.format(noise_type)) print("Applying {} noise with std {}".format(noise_type, noise_std)) del hyperparams['noise_type'] del hyperparams['noise_std'] if 'noise_std_final' in hyperparams: del hyperparams['noise_std_final'] if ALGOS[args.algo] is None: raise ValueError('{} requires MPI to be installed'.format(args.algo)) if os.path.isfile(args.trained_agent): # Continue training print("Loading pretrained agent") # Policy should not be changed del hyperparams['policy'] model = ALGOS[args.algo].load(args.trained_agent, env=env, tensorboard_log=tensorboard_log, verbose=args.verbose, **hyperparams) exp_folder = args.trained_agent[:-4] if normalize: print("Loading saved running average") env.load_running_average(exp_folder) elif args.optimize_hyperparameters: if args.verbose > 0: print("Optimizing hyperparameters") def create_model(*_args, **kwargs): """ Helper to create a model with different hyperparameters """ return ALGOS[args.algo](env=create_env(n_envs), tensorboard_log=tensorboard_log, verbose=0, **kwargs) data_frame = hyperparam_optimization(args.algo, create_model, create_env, n_trials=args.n_trials, n_timesteps=n_timesteps, hyperparams=hyperparams, n_jobs=args.n_jobs, seed=args.seed, sampler_method=args.sampler, pruner_method=args.pruner, verbose=args.verbose) report_name = "report_{}_{}-trials-{}-{}-{}_{}.csv".format(env_id, args.n_trials, n_timesteps, args.sampler, args.pruner, int(time.time())) log_path = os.path.join(args.log_folder, args.algo, report_name) if args.verbose: print("Writing report to {}".format(log_path)) os.makedirs(os.path.dirname(log_path), exist_ok=True) data_frame.to_csv(log_path) exit() else: # Train an agent from scratch model = ALGOS[args.algo](env=env, tensorboard_log=tensorboard_log, verbose=args.verbose, **hyperparams) kwargs = {} if args.log_interval > -1: kwargs = {'log_interval': args.log_interval} model.learn(n_timesteps, **kwargs) # Save trained model log_path = "{}/{}/".format(args.log_folder, args.algo) save_path = os.path.join(log_path, "{}_{}".format(env_id, get_latest_run_id(log_path, env_id) + 1)) params_path = "{}/{}".format(save_path, env_id) os.makedirs(params_path, exist_ok=True) # Only save worker of rank 0 when using mpi if rank == 0: print("Saving to {}".format(save_path)) model.save("{}/{}".format(save_path, env_id)) # Save hyperparams with open(os.path.join(params_path, 'config.yml'), 'w') as f: yaml.dump(saved_hyperparams, f) if normalize: # Unwrap if isinstance(env, VecFrameStack): env = env.venv # Important: save the running average, for testing the agent we need that normalization env.save_running_average(params_path)
[]
[]
[ "TF_CPP_MIN_LOG_LEVEL" ]
[]
["TF_CPP_MIN_LOG_LEVEL"]
python
1
0
paasta_tools/cli/cmds/local_run.py
#!/usr/bin/env python # Copyright 2015-2016 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import json import os import shutil import socket import sys import threading import time import uuid from os import execlpe from random import randint from urllib.parse import urlparse import requests from docker import errors from paasta_tools.adhoc_tools import get_default_interactive_config from paasta_tools.cli.cmds.check import makefile_responds_to from paasta_tools.cli.cmds.cook_image import paasta_cook_image from paasta_tools.cli.utils import figure_out_service_name from paasta_tools.cli.utils import get_instance_config from paasta_tools.cli.utils import lazy_choices_completer from paasta_tools.cli.utils import list_instances from paasta_tools.cli.utils import pick_random_port from paasta_tools.generate_deployments_for_service import build_docker_image_name from paasta_tools.long_running_service_tools import get_healthcheck_for_instance from paasta_tools.paasta_execute_docker_command import execute_in_container from paasta_tools.secret_tools import get_secret_provider from paasta_tools.secret_tools import is_secret_ref from paasta_tools.secret_tools import is_shared_secret from paasta_tools.secret_tools import SHARED_SECRET_SERVICE from paasta_tools.tron_tools import parse_time_variables from paasta_tools.utils import _run from paasta_tools.utils import DEFAULT_SOA_DIR from paasta_tools.utils import get_docker_client from paasta_tools.utils import get_possible_launched_by_user_variable_from_env from paasta_tools.utils import get_username from paasta_tools.utils import list_clusters from paasta_tools.utils import list_services from paasta_tools.utils import load_system_paasta_config from paasta_tools.utils import NoConfigurationForServiceError from paasta_tools.utils import NoDeploymentsAvailable from paasta_tools.utils import NoDockerImageError from paasta_tools.utils import paasta_print from paasta_tools.utils import PaastaColors from paasta_tools.utils import PaastaNotConfiguredError from paasta_tools.utils import SystemPaastaConfig from paasta_tools.utils import timed_flock from paasta_tools.utils import Timeout from paasta_tools.utils import TimeoutError from paasta_tools.utils import validate_service_instance def parse_date(date_string): return datetime.datetime.strptime(date_string, "%Y-%m-%d") def perform_http_healthcheck(url, timeout): """Returns true if healthcheck on url succeeds, false otherwise :param url: the healthcheck url :param timeout: timeout in seconds :returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise """ try: with Timeout(seconds=timeout): try: res = requests.get(url, verify=False) except requests.ConnectionError: return (False, "http request failed: connection failed") except TimeoutError: return (False, "http request timed out after %d seconds" % timeout) if "content-type" in res.headers and "," in res.headers["content-type"]: paasta_print( PaastaColors.yellow( "Multiple content-type headers detected in response." " The Mesos healthcheck system will treat this as a failure!" ) ) return (False, "http request succeeded, code %d" % res.status_code) # check if response code is valid per https://mesosphere.github.io/marathon/docs/health-checks.html elif res.status_code >= 200 and res.status_code < 400: return (True, "http request succeeded, code %d" % res.status_code) else: return (False, "http request failed, code %s" % str(res.status_code)) def perform_tcp_healthcheck(url, timeout): """Returns true if successfully connects to host and port, false otherwise :param url: the healthcheck url (in the form tcp://host:port) :param timeout: timeout in seconds :returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise """ url_elem = urlparse(url) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(timeout) result = sock.connect_ex((url_elem.hostname, url_elem.port)) sock.close() if result == 0: return (True, "tcp connection succeeded") else: return (False, "%s (timeout %d seconds)" % (os.strerror(result), timeout)) def perform_cmd_healthcheck(docker_client, container_id, command, timeout): """Returns true if return code of command is 0 when executed inside container, false otherwise :param docker_client: Docker client object :param container_id: Docker container id :param command: command to execute :param timeout: timeout in seconds :returns: True if command exits with return code 0, false otherwise """ (output, return_code) = execute_in_container( docker_client, container_id, command, timeout ) if return_code == 0: return (True, output) else: return (False, output) def run_healthcheck_on_container( docker_client, container_id, healthcheck_mode, healthcheck_data, timeout ): """Performs healthcheck on a container :param container_id: Docker container id :param healthcheck_mode: one of 'http', 'https', 'tcp', or 'cmd' :param healthcheck_data: a URL when healthcheck_mode is 'http[s]' or 'tcp', a command if healthcheck_mode is 'cmd' :param timeout: timeout in seconds for individual check :returns: a tuple of (bool, output string) """ healthcheck_result = (False, "unknown") if healthcheck_mode == "cmd": healthcheck_result = perform_cmd_healthcheck( docker_client, container_id, healthcheck_data, timeout ) elif healthcheck_mode == "http" or healthcheck_mode == "https": healthcheck_result = perform_http_healthcheck(healthcheck_data, timeout) elif healthcheck_mode == "tcp": healthcheck_result = perform_tcp_healthcheck(healthcheck_data, timeout) else: paasta_print( PaastaColors.yellow( "Healthcheck mode '%s' is not currently supported!" % healthcheck_mode ) ) sys.exit(1) return healthcheck_result def simulate_healthcheck_on_service( instance_config, docker_client, container_id, healthcheck_mode, healthcheck_data, healthcheck_enabled, ): """Simulates Marathon-style healthcheck on given service if healthcheck is enabled :param instance_config: service manifest :param docker_client: Docker client object :param container_id: Docker container id :param healthcheck_data: tuple url to healthcheck :param healthcheck_enabled: boolean :returns: healthcheck_passed: boolean """ healthcheck_link = PaastaColors.cyan(healthcheck_data) if healthcheck_enabled: grace_period = instance_config.get_healthcheck_grace_period_seconds() timeout = instance_config.get_healthcheck_timeout_seconds() interval = instance_config.get_healthcheck_interval_seconds() max_failures = instance_config.get_healthcheck_max_consecutive_failures() paasta_print( "\nStarting health check via %s (waiting %s seconds before " "considering failures due to grace period):" % (healthcheck_link, grace_period) ) # silently start performing health checks until grace period ends or first check succeeds graceperiod_end_time = time.time() + grace_period after_grace_period_attempts = 0 healthchecking = True def _stream_docker_logs(container_id, generator): while healthchecking: try: # the generator will block until another log line is available log_line = next(generator).decode("utf-8").rstrip("\n") if healthchecking: paasta_print(f"container [{container_id[:12]}]: {log_line}") else: # stop streaming at first opportunity, since generator.close() # cant be used until the container is dead break except StopIteration: # natural end of logs break docker_logs_generator = docker_client.logs( container_id, stderr=True, stream=True ) threading.Thread( target=_stream_docker_logs, daemon=True, args=(container_id, docker_logs_generator), ).start() while True: # First inspect the container for early exits container_state = docker_client.inspect_container(container_id) if not container_state["State"]["Running"]: paasta_print( PaastaColors.red( "Container exited with code {}".format( container_state["State"]["ExitCode"] ) ) ) healthcheck_passed = False break healthcheck_passed, healthcheck_output = run_healthcheck_on_container( docker_client, container_id, healthcheck_mode, healthcheck_data, timeout ) # Yay, we passed the healthcheck if healthcheck_passed: paasta_print( "{}'{}' (via {})".format( PaastaColors.green("Healthcheck succeeded!: "), healthcheck_output, healthcheck_link, ) ) break # Otherwise, print why we failed if time.time() < graceperiod_end_time: color = PaastaColors.grey msg = "(disregarded due to grace period)" extra_msg = f" (via: {healthcheck_link}. Output: {healthcheck_output})" else: # If we've exceeded the grace period, we start incrementing attempts after_grace_period_attempts += 1 color = PaastaColors.red msg = "(Attempt {} of {})".format( after_grace_period_attempts, max_failures ) extra_msg = f" (via: {healthcheck_link}. Output: {healthcheck_output})" paasta_print("{}{}".format(color(f"Healthcheck failed! {msg}"), extra_msg)) if after_grace_period_attempts == max_failures: break time.sleep(interval) healthchecking = False # end docker logs stream else: paasta_print( "\nPaaSTA would have healthchecked your service via\n%s" % healthcheck_link ) healthcheck_passed = True return healthcheck_passed def read_local_dockerfile_lines(): dockerfile = os.path.join(os.getcwd(), "Dockerfile") return open(dockerfile).readlines() def add_subparser(subparsers): list_parser = subparsers.add_parser( "local-run", help="Run service's Docker image locally", description=( "'paasta local-run' is useful for simulating how a PaaSTA service would be " "executed on a real cluster. It analyzes the local soa-configs and constructs " "a 'docker run' invocation to match. This is useful as a type of end-to-end " "test, ensuring that a service will work inside the docker container as expected. " "Additionally, 'local-run' can healthcheck a service per the configured healthcheck.\n\n" "Alternatively, 'local-run' can be used with --pull, which will pull the currently " "deployed docker image and use it, instead of building one." ), epilog=( "Note: 'paasta local-run' uses docker commands, which may require elevated privileges " "to run (sudo)." ), ) list_parser.add_argument( "-s", "--service", help="The name of the service you wish to inspect" ).completer = lazy_choices_completer(list_services) list_parser.add_argument( "-c", "--cluster", help=( "The name of the cluster you wish to simulate. " "If omitted, uses the default cluster defined in the paasta local-run configs" ), ).completer = lazy_choices_completer(list_clusters) list_parser.add_argument( "-y", "--yelpsoa-config-root", dest="yelpsoa_config_root", help="A directory from which yelpsoa-configs should be read from", default=DEFAULT_SOA_DIR, ) build_pull_group = list_parser.add_mutually_exclusive_group() build_pull_group.add_argument( "-b", "--build", help=( "Build the docker image to run from scratch using the local Makefile's " "'cook-image' target. Defaults to try to use the local Makefile if present." ), action="store_const", const="build", dest="action", ) build_pull_group.add_argument( "-p", "--pull", help=( "Pull the docker image marked for deployment from the Docker registry and " "use that for the local-run. This is the opposite of --build." ), action="store_const", const="pull", dest="action", ) build_pull_group.add_argument( "-d", "--dry-run", help="Shows the arguments supplied to docker as json.", action="store_const", const="dry_run", dest="action", ) build_pull_group.set_defaults(action="build") list_parser.add_argument( "--json-dict", help="When running dry run, output the arguments as a json dict", action="store_true", dest="dry_run_json_dict", ) list_parser.add_argument( "-C", "--cmd", help=( "Run Docker container with particular command, " 'for example: "bash". By default will use the command or args specified by the ' "soa-configs or what was specified in the Dockerfile" ), required=False, default=None, ) list_parser.add_argument( "-i", "--instance", help=( "Simulate a docker run for a particular instance of the service, like 'main' or 'canary'. " "NOTE: if you don't specify an instance, PaaSTA will run in interactive mode" ), required=False, default=None, ).completer = lazy_choices_completer(list_instances) list_parser.add_argument( "--date", default=datetime.datetime.today().strftime("%Y-%m-%d"), help="Date to use for interpolating date variables in a job. Defaults to use %(default)s.", type=parse_date, ) list_parser.add_argument( "-v", "--verbose", help="Show Docker commands output", action="store_true", required=False, default=True, ) list_parser.add_argument( "-I", "--interactive", help=( 'Run container in interactive mode. If interactive is set the default command will be "bash" ' 'unless otherwise set by the "--cmd" flag' ), action="store_true", required=False, default=False, ) list_parser.add_argument( "-k", "--no-healthcheck", help="Disable simulated healthcheck", dest="healthcheck", action="store_false", required=False, default=True, ) list_parser.add_argument( "-t", "--healthcheck-only", help="Terminates container after healthcheck (exits with status code 0 on success, 1 otherwise)", dest="healthcheck_only", action="store_true", required=False, default=False, ) list_parser.add_argument( "-o", "--port", help="Specify a port number to use. If not set, a random non-conflicting port will be found.", type=int, dest="user_port", required=False, default=False, ) list_parser.add_argument( "--vault-auth-method", help="Override how we auth with vault, defaults to token if not present", type=str, dest="vault_auth_method", required=False, default="token", choices=["token", "ldap"], ) list_parser.add_argument( "--vault-token-file", help="Override vault token file, defaults to %(default)s", type=str, dest="vault_token_file", required=False, default="/var/spool/.paasta_vault_token", ) list_parser.add_argument( "--skip-secrets", help="Skip decrypting secrets, useful if running non-interactively", dest="skip_secrets", required=False, action="store_true", default=False, ) list_parser.add_argument( "--sha", help=( "SHA to run instead of the currently marked-for-deployment SHA. Ignored when used with --build." " Must be a version that exists in the registry, i.e. it has been built by Jenkins." ), type=str, dest="sha", required=False, default=None, ) list_parser.set_defaults(command=paasta_local_run) def get_container_name(): return "paasta_local_run_{}_{}".format(get_username(), randint(1, 999999)) def get_docker_run_cmd( memory, chosen_port, container_port, container_name, volumes, env, interactive, docker_hash, command, net, docker_params, detach, ): cmd = ["paasta_docker_wrapper", "run"] for k in env.keys(): cmd.append("--env") cmd.append(f"{k}") cmd.append("--memory=%dm" % memory) for i in docker_params: cmd.append(f"--{i['key']}={i['value']}") if net == "bridge" and container_port is not None: cmd.append("--publish=%d:%d" % (chosen_port, container_port)) elif net == "host": cmd.append("--net=host") cmd.append("--name=%s" % container_name) for volume in volumes: cmd.append("--volume=%s" % volume) if interactive: cmd.append("--interactive=true") if sys.stdin.isatty(): cmd.append("--tty=true") else: if detach: cmd.append("--detach=true") cmd.append("%s" % docker_hash) if command: if isinstance(command, str): cmd.extend(("sh", "-c", command)) else: cmd.extend(command) return cmd class LostContainerException(Exception): pass def docker_pull_image(docker_url): """Pull an image via ``docker pull``. Uses the actual pull command instead of the python bindings due to the docker auth/registry transition. Once we are past Docker 1.6 we can use better credential management, but for now this function assumes the user running the command has already been authorized for the registry""" paasta_print( "Please wait while the image (%s) is pulled (times out after 30m)..." % docker_url, file=sys.stderr, ) DEVNULL = open(os.devnull, "wb") with open("/tmp/paasta-local-run-pull.lock", "w") as f: with timed_flock(f, seconds=1800): ret, output = _run( "docker pull %s" % docker_url, stream=True, stdin=DEVNULL ) if ret != 0: paasta_print( "\nPull failed. Are you authorized to run docker commands?", file=sys.stderr, ) sys.exit(ret) def get_container_id(docker_client, container_name): """Use 'docker_client' to find the container we started, identifiable by its 'container_name'. If we can't find the id, raise LostContainerException. """ containers = docker_client.containers(all=False) for container in containers: if "/%s" % container_name in container.get("Names", []): return container.get("Id") raise LostContainerException( "Can't find the container I just launched so I can't do anything else.\n" "Try docker 'ps --all | grep %s' to see where it went.\n" "Here were all the containers:\n" "%s" % (container_name, containers) ) def _cleanup_container(docker_client, container_id): if docker_client.inspect_container(container_id)["State"].get("OOMKilled", False): paasta_print( PaastaColors.red( "Your service was killed by the OOM Killer!\n" "You've exceeded the memory limit, try increasing the mem parameter in your soa_configs" ), file=sys.stderr, ) paasta_print("\nStopping and removing the old container %s..." % container_id) paasta_print("(Please wait or you may leave an orphaned container.)") try: docker_client.stop(container_id) docker_client.remove_container(container_id) paasta_print("...done") except errors.APIError: paasta_print( PaastaColors.yellow( "Could not clean up container! You should stop and remove container '%s' manually." % container_id ) ) def get_local_run_environment_vars(instance_config, port0, framework): """Returns a dictionary of environment variables to simulate what would be available to a paasta service running in a container""" hostname = socket.getfqdn() docker_image = instance_config.get_docker_image() if docker_image == "": # In a local_run environment, the docker_image may not be available # so we can fall-back to the injected DOCKER_TAG per the paasta contract docker_image = os.environ["DOCKER_TAG"] fake_taskid = uuid.uuid4() env = { "HOST": hostname, "MESOS_SANDBOX": "/mnt/mesos/sandbox", "MESOS_CONTAINER_NAME": "localrun-%s" % fake_taskid, "MESOS_TASK_ID": str(fake_taskid), "PAASTA_DOCKER_IMAGE": docker_image, "PAASTA_LAUNCHED_BY": get_possible_launched_by_user_variable_from_env(), } if framework == "marathon": env["MARATHON_PORT"] = str(port0) env["MARATHON_PORT0"] = str(port0) env["MARATHON_PORTS"] = str(port0) env["MARATHON_PORT_%d" % instance_config.get_container_port()] = str(port0) env["MARATHON_APP_VERSION"] = "simulated_marathon_app_version" env["MARATHON_APP_RESOURCE_CPUS"] = str(instance_config.get_cpus()) env["MARATHON_APP_DOCKER_IMAGE"] = docker_image env["MARATHON_APP_RESOURCE_MEM"] = str(instance_config.get_mem()) env["MARATHON_APP_RESOURCE_DISK"] = str(instance_config.get_disk()) env["MARATHON_APP_LABELS"] = "" env["MARATHON_APP_ID"] = "/simulated_marathon_app_id" env["MARATHON_HOST"] = hostname return env def check_if_port_free(port): temp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: temp_socket.bind(("127.0.0.1", port)) except socket.error: return False finally: temp_socket.close() return True def decrypt_secret_environment_variables( secret_provider_name, environment, soa_dir, service_name, cluster_name, secret_provider_kwargs, ): decrypted_secrets = {} service_secret_env = {} shared_secret_env = {} for k, v in environment.items(): if is_secret_ref(v): if is_shared_secret(v): shared_secret_env[k] = v else: service_secret_env[k] = v provider_args = { "secret_provider_name": secret_provider_name, "soa_dir": soa_dir, "cluster_name": cluster_name, "secret_provider_kwargs": secret_provider_kwargs, } secret_provider_kwargs["vault_num_uses"] = len(service_secret_env) + len( shared_secret_env ) try: decrypted_secrets.update( decrypt_secret_environment_for_service( service_secret_env, service_name, **provider_args ) ) decrypted_secrets.update( decrypt_secret_environment_for_service( shared_secret_env, SHARED_SECRET_SERVICE, **provider_args ) ) except Exception as e: paasta_print(f"Failed to retrieve secrets with {e.__class__.__name__}: {e}") paasta_print( "If you don't need the secrets for local-run, you can add --skip-secrets" ) sys.exit(1) return decrypted_secrets def decrypt_secret_environment_for_service( secret_env_vars, service_name, secret_provider_name, soa_dir, cluster_name, secret_provider_kwargs, ): if not secret_env_vars: return {} secret_provider = get_secret_provider( secret_provider_name=secret_provider_name, soa_dir=soa_dir, service_name=service_name, cluster_names=[cluster_name], secret_provider_kwargs=secret_provider_kwargs, ) return secret_provider.decrypt_environment(secret_env_vars) def run_docker_container( docker_client, service, instance, docker_url, volumes, interactive, command, healthcheck, healthcheck_only, user_port, instance_config, secret_provider_name, soa_dir=DEFAULT_SOA_DIR, dry_run=False, json_dict=False, framework=None, secret_provider_kwargs={}, skip_secrets=False, ): """docker-py has issues running a container with a TTY attached, so for consistency we execute 'docker run' directly in both interactive and non-interactive modes. In non-interactive mode when the run is complete, stop the container and remove it (with docker-py). """ if user_port: if check_if_port_free(user_port): chosen_port = user_port else: paasta_print( PaastaColors.red( "The chosen port is already in use!\n" "Try specifying another one, or omit (--port|-o) and paasta will find a free one for you" ), file=sys.stderr, ) sys.exit(1) else: chosen_port = pick_random_port(service) environment = instance_config.get_env_dictionary() if not skip_secrets: secret_environment = decrypt_secret_environment_variables( secret_provider_name=secret_provider_name, environment=environment, soa_dir=soa_dir, service_name=service, cluster_name=instance_config.cluster, secret_provider_kwargs=secret_provider_kwargs, ) environment.update(secret_environment) local_run_environment = get_local_run_environment_vars( instance_config=instance_config, port0=chosen_port, framework=framework ) environment.update(local_run_environment) net = instance_config.get_net() memory = instance_config.get_mem() container_name = get_container_name() docker_params = instance_config.format_docker_parameters() healthcheck_mode, healthcheck_data = get_healthcheck_for_instance( service, instance, instance_config, chosen_port, soa_dir=soa_dir ) if healthcheck_mode is None: container_port = None interactive = True elif not user_port and not healthcheck and not healthcheck_only: container_port = None else: try: container_port = instance_config.get_container_port() except AttributeError: container_port = None simulate_healthcheck = ( healthcheck_only or healthcheck ) and healthcheck_mode is not None docker_run_args = dict( memory=memory, chosen_port=chosen_port, container_port=container_port, container_name=container_name, volumes=volumes, env=environment, interactive=interactive, detach=simulate_healthcheck, docker_hash=docker_url, command=command, net=net, docker_params=docker_params, ) docker_run_cmd = get_docker_run_cmd(**docker_run_args) joined_docker_run_cmd = " ".join(docker_run_cmd) if dry_run: if json_dict: paasta_print(json.dumps(docker_run_args)) else: paasta_print(json.dumps(docker_run_cmd)) return 0 else: paasta_print( "Running docker command:\n%s" % PaastaColors.grey(joined_docker_run_cmd) ) merged_env = {**os.environ, **environment} if interactive or not simulate_healthcheck: # NOTE: This immediately replaces us with the docker run cmd. Docker # run knows how to clean up the running container in this situation. wrapper_path = shutil.which("paasta_docker_wrapper") # To properly simulate mesos, we pop the PATH, which is not available to # The executor merged_env.pop("PATH") execlpe(wrapper_path, *docker_run_cmd, merged_env) # For testing, when execlpe is patched out and doesn't replace us, we # still want to bail out. return 0 container_started = False container_id = None try: (returncode, output) = _run(docker_run_cmd, env=merged_env) if returncode != 0: paasta_print( "Failure trying to start your container!" "Returncode: %d" "Output:" "%s" "" "Fix that problem and try again." "http://y/paasta-troubleshooting" % (returncode, output), sep="\n", ) # Container failed to start so no need to cleanup; just bail. sys.exit(1) container_started = True container_id = get_container_id(docker_client, container_name) paasta_print("Found our container running with CID %s" % container_id) if simulate_healthcheck: healthcheck_result = simulate_healthcheck_on_service( instance_config=instance_config, docker_client=docker_client, container_id=container_id, healthcheck_mode=healthcheck_mode, healthcheck_data=healthcheck_data, healthcheck_enabled=healthcheck, ) def _output_exit_code(): returncode = docker_client.inspect_container(container_id)["State"][ "ExitCode" ] paasta_print(f"Container exited: {returncode})") if healthcheck_only: if container_started: _output_exit_code() _cleanup_container(docker_client, container_id) if healthcheck_mode is None: paasta_print( "--healthcheck-only, but no healthcheck is defined for this instance!" ) sys.exit(1) elif healthcheck_result is True: sys.exit(0) else: sys.exit(1) running = docker_client.inspect_container(container_id)["State"]["Running"] if running: paasta_print("Your service is now running! Tailing stdout and stderr:") for line in docker_client.attach( container_id, stderr=True, stream=True, logs=True ): paasta_print(line) else: _output_exit_code() returncode = 3 except KeyboardInterrupt: returncode = 3 # Cleanup if the container exits on its own or interrupted. if container_started: returncode = docker_client.inspect_container(container_id)["State"]["ExitCode"] _cleanup_container(docker_client, container_id) return returncode def format_command_for_type(command, instance_type, date): """ Given an instance_type, return a function that appropriately formats the command to be run. """ if instance_type == "tron": interpolated_command = parse_time_variables(command, date) return interpolated_command else: return command def configure_and_run_docker_container( docker_client, docker_url, docker_sha, service, instance, cluster, system_paasta_config, args, pull_image=False, dry_run=False, ): """ Run Docker container by image hash with args set in command line. Function prints the output of run command in stdout. """ if instance is None and args.healthcheck_only: paasta_print( "With --healthcheck-only, --instance MUST be provided!", file=sys.stderr ) return 1 if instance is None and not sys.stdin.isatty(): paasta_print( "--instance and --cluster must be specified when using paasta local-run without a tty!", file=sys.stderr, ) return 1 soa_dir = args.yelpsoa_config_root volumes = list() load_deployments = (docker_url is None or pull_image) and not docker_sha interactive = args.interactive try: if instance is None: instance_type = "adhoc" instance = "interactive" instance_config = get_default_interactive_config( service=service, cluster=cluster, soa_dir=soa_dir, load_deployments=load_deployments, ) interactive = True else: instance_type = validate_service_instance( service, instance, cluster, soa_dir ) instance_config = get_instance_config( service=service, instance=instance, cluster=cluster, load_deployments=load_deployments, soa_dir=soa_dir, ) except NoConfigurationForServiceError as e: paasta_print(str(e), file=sys.stderr) return 1 except NoDeploymentsAvailable: paasta_print( PaastaColors.red( "Error: No deployments.json found in %(soa_dir)s/%(service)s. " "You can generate this by running: " "generate_deployments_for_service -d %(soa_dir)s -s %(service)s" % {"soa_dir": soa_dir, "service": service} ), sep="\n", file=sys.stderr, ) return 1 if docker_sha is not None: instance_config.branch_dict = { "git_sha": docker_sha, "docker_image": build_docker_image_name(service=service, sha=docker_sha), "desired_state": "start", "force_bounce": None, } if docker_url is None: try: docker_url = instance_config.get_docker_url() except NoDockerImageError: if instance_config.get_deploy_group() is None: paasta_print( PaastaColors.red( f"Error: {service}.{instance} has no 'deploy_group' set. Please set one so " "the proper image can be used to run for this service." ), sep="", file=sys.stderr, ) else: paasta_print( PaastaColors.red( "Error: No sha has been marked for deployment for the %s deploy group.\n" "Please ensure this service has either run through a jenkins pipeline " "or paasta mark-for-deployment has been run for %s\n" % (instance_config.get_deploy_group(), service) ), sep="", file=sys.stderr, ) return 1 if pull_image: docker_pull_image(docker_url) for volume in instance_config.get_volumes(system_paasta_config.get_volumes()): if os.path.exists(volume["hostPath"]): volumes.append( "{}:{}:{}".format( volume["hostPath"], volume["containerPath"], volume["mode"].lower() ) ) else: paasta_print( PaastaColors.yellow( "Warning: Path %s does not exist on this host. Skipping this binding." % volume["hostPath"] ), file=sys.stderr, ) if interactive is True and args.cmd is None: command = "bash" elif args.cmd: command = args.cmd else: command_from_config = instance_config.get_cmd() if command_from_config: command = format_command_for_type( command=command_from_config, instance_type=instance_type, date=args.date ) else: command = instance_config.get_args() secret_provider_kwargs = { "vault_cluster_config": system_paasta_config.get_vault_cluster_config(), "vault_auth_method": args.vault_auth_method, "vault_token_file": args.vault_token_file, } return run_docker_container( docker_client=docker_client, service=service, instance=instance, docker_url=docker_url, volumes=volumes, interactive=interactive, command=command, healthcheck=args.healthcheck, healthcheck_only=args.healthcheck_only, user_port=args.user_port, instance_config=instance_config, soa_dir=args.yelpsoa_config_root, dry_run=dry_run, json_dict=args.dry_run_json_dict, framework=instance_type, secret_provider_name=system_paasta_config.get_secret_provider_name(), secret_provider_kwargs=secret_provider_kwargs, skip_secrets=args.skip_secrets, ) def docker_config_available(): home = os.path.expanduser("~") oldconfig = os.path.join(home, ".dockercfg") newconfig = os.path.join(home, ".docker", "config.json") return (os.path.isfile(oldconfig) and os.access(oldconfig, os.R_OK)) or ( os.path.isfile(newconfig) and os.access(newconfig, os.R_OK) ) def paasta_local_run(args): if args.action == "pull" and os.geteuid() != 0 and not docker_config_available(): paasta_print("Re-executing paasta local-run --pull with sudo..") os.execvp("sudo", ["sudo", "-H"] + sys.argv) if args.action == "build" and not makefile_responds_to("cook-image"): paasta_print( "A local Makefile with a 'cook-image' target is required for --build", file=sys.stderr, ) paasta_print( "If you meant to pull the docker image from the registry, explicitly pass --pull", file=sys.stderr, ) return 1 try: system_paasta_config = load_system_paasta_config() except PaastaNotConfiguredError: paasta_print( PaastaColors.yellow( "Warning: Couldn't load config files from '/etc/paasta'. This indicates" "PaaSTA is not configured locally on this host, and local-run may not behave" "the same way it would behave on a server configured for PaaSTA." ), sep="\n", ) system_paasta_config = SystemPaastaConfig({"volumes": []}, "/etc/paasta") local_run_config = system_paasta_config.get_local_run_config() service = figure_out_service_name(args, soa_dir=args.yelpsoa_config_root) if args.cluster: cluster = args.cluster else: try: cluster = local_run_config["default_cluster"] except KeyError: paasta_print( PaastaColors.red( "PaaSTA on this machine has not been configured with a default cluster." "Please pass one to local-run using '-c'." ), sep="\n", file=sys.stderr, ) return 1 instance = args.instance docker_client = get_docker_client() docker_sha = None docker_url = None if args.action == "build": default_tag = "paasta-local-run-{}-{}".format(service, get_username()) docker_url = os.environ.get("DOCKER_TAG", default_tag) os.environ["DOCKER_TAG"] = docker_url pull_image = False cook_return = paasta_cook_image( args=None, service=service, soa_dir=args.yelpsoa_config_root ) if cook_return != 0: return cook_return elif args.action == "dry_run": pull_image = False docker_url = None docker_sha = args.sha else: pull_image = True docker_url = None docker_sha = args.sha try: return configure_and_run_docker_container( docker_client=docker_client, docker_url=docker_url, docker_sha=docker_sha, service=service, instance=instance, cluster=cluster, args=args, pull_image=pull_image, system_paasta_config=system_paasta_config, dry_run=args.action == "dry_run", ) except errors.APIError as e: paasta_print("Can't run Docker container. Error: %s" % str(e), file=sys.stderr) return 1
[]
[]
[ "DOCKER_TAG" ]
[]
["DOCKER_TAG"]
python
1
0
google/cloud/dialogflow_v2/services/contexts/client.py
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object] # type: ignore from google.cloud.dialogflow_v2.services.contexts import pagers from google.cloud.dialogflow_v2.types import context from google.cloud.dialogflow_v2.types import context as gcd_context from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import struct_pb2 # type: ignore from .transports.base import ContextsTransport, DEFAULT_CLIENT_INFO from .transports.grpc import ContextsGrpcTransport from .transports.grpc_asyncio import ContextsGrpcAsyncIOTransport class ContextsClientMeta(type): """Metaclass for the Contexts client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[ContextsTransport]] _transport_registry["grpc"] = ContextsGrpcTransport _transport_registry["grpc_asyncio"] = ContextsGrpcAsyncIOTransport def get_transport_class( cls, label: str = None, ) -> Type[ContextsTransport]: """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class ContextsClient(metaclass=ContextsClientMeta): """Service for managing [Contexts][google.cloud.dialogflow.v2.Context].""" @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Converts api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "dialogflow.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: ContextsClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_info(info) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: ContextsClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> ContextsTransport: """Returns the transport used by the client instance. Returns: ContextsTransport: The transport used by the client instance. """ return self._transport @staticmethod def context_path( project: str, session: str, context: str, ) -> str: """Returns a fully-qualified context string.""" return "projects/{project}/agent/sessions/{session}/contexts/{context}".format( project=project, session=session, context=context, ) @staticmethod def parse_context_path(path: str) -> Dict[str, str]: """Parses a context path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/agent/sessions/(?P<session>.+?)/contexts/(?P<context>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def common_billing_account_path( billing_account: str, ) -> str: """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str, str]: """Parse a billing_account path into its component segments.""" m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path( folder: str, ) -> str: """Returns a fully-qualified folder string.""" return "folders/{folder}".format( folder=folder, ) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: """Parse a folder path into its component segments.""" m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path( organization: str, ) -> str: """Returns a fully-qualified organization string.""" return "organizations/{organization}".format( organization=organization, ) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: """Parse a organization path into its component segments.""" m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path( project: str, ) -> str: """Returns a fully-qualified project string.""" return "projects/{project}".format( project=project, ) @staticmethod def parse_common_project_path(path: str) -> Dict[str, str]: """Parse a project path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path( project: str, location: str, ) -> str: """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @staticmethod def parse_common_location_path(path: str) -> Dict[str, str]: """Parse a location path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path) return m.groupdict() if m else {} @classmethod def get_mtls_endpoint_and_cert_source( cls, client_options: Optional[client_options_lib.ClientOptions] = None ): """Return the API endpoint and client cert source for mutual TLS. The client cert source is determined in the following order: (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the client cert source is None. (2) if `client_options.client_cert_source` is provided, use the provided one; if the default client cert source exists, use the default one; otherwise the client cert source is None. The API endpoint is determined in the following order: (1) if `client_options.api_endpoint` if provided, use the provided one. (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the default mTLS endpoint; if the environment variabel is "never", use the default API endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise use the default API endpoint. More details can be found at https://google.aip.dev/auth/4114. Args: client_options (google.api_core.client_options.ClientOptions): Custom options for the client. Only the `api_endpoint` and `client_cert_source` properties may be used in this method. Returns: Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the client cert source to use. Raises: google.auth.exceptions.MutualTLSChannelError: If any errors happen. """ if client_options is None: client_options = client_options_lib.ClientOptions() use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_client_cert not in ("true", "false"): raise ValueError( "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) # Figure out the client cert source to use. client_cert_source = None if use_client_cert == "true": if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): client_cert_source = mtls.default_client_cert_source() # Figure out which api endpoint to use. if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint elif use_mtls_endpoint == "always" or ( use_mtls_endpoint == "auto" and client_cert_source ): api_endpoint = cls.DEFAULT_MTLS_ENDPOINT else: api_endpoint = cls.DEFAULT_ENDPOINT return api_endpoint, client_cert_source def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, ContextsTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the contexts client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ContextsTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( client_options ) api_key_value = getattr(client_options, "api_key", None) if api_key_value and credentials: raise ValueError( "client_options.api_key and credentials are mutually exclusive" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, ContextsTransport): # transport is a ContextsTransport instance. if credentials or client_options.credentials_file or api_key_value: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) self._transport = transport else: import google.auth._default # type: ignore if api_key_value and hasattr( google.auth._default, "get_api_key_credentials" ): credentials = google.auth._default.get_api_key_credentials( api_key_value ) Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, always_use_jwt_access=True, ) def list_contexts( self, request: Union[context.ListContextsRequest, dict] = None, *, parent: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListContextsPager: r"""Returns the list of all contexts in the specified session. .. code-block:: python from google.cloud import dialogflow_v2 def sample_list_contexts(): # Create a client client = dialogflow_v2.ContextsClient() # Initialize request argument(s) request = dialogflow_v2.ListContextsRequest( parent="parent_value", ) # Make the request page_result = client.list_contexts(request=request) # Handle the response for response in page_result: print(response) Args: request (Union[google.cloud.dialogflow_v2.types.ListContextsRequest, dict]): The request object. The request message for [Contexts.ListContexts][google.cloud.dialogflow.v2.Contexts.ListContexts]. parent (str): Required. The session to list all contexts from. Format: ``projects/<Project ID>/agent/sessions/<Session ID>`` or ``projects/<Project ID>/agent/environments/<Environment ID>/users/<User ID>/sessions/<Session ID>``. If ``Environment ID`` is not specified, we assume default 'draft' environment. If ``User ID`` is not specified, we assume default '-' user. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dialogflow_v2.services.contexts.pagers.ListContextsPager: The response message for [Contexts.ListContexts][google.cloud.dialogflow.v2.Contexts.ListContexts]. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a context.ListContextsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, context.ListContextsRequest): request = context.ListContextsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_contexts] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListContextsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def get_context( self, request: Union[context.GetContextRequest, dict] = None, *, name: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> context.Context: r"""Retrieves the specified context. .. code-block:: python from google.cloud import dialogflow_v2 def sample_get_context(): # Create a client client = dialogflow_v2.ContextsClient() # Initialize request argument(s) request = dialogflow_v2.GetContextRequest( name="name_value", ) # Make the request response = client.get_context(request=request) # Handle the response print(response) Args: request (Union[google.cloud.dialogflow_v2.types.GetContextRequest, dict]): The request object. The request message for [Contexts.GetContext][google.cloud.dialogflow.v2.Contexts.GetContext]. name (str): Required. The name of the context. Format: ``projects/<Project ID>/agent/sessions/<Session ID>/contexts/<Context ID>`` or ``projects/<Project ID>/agent/environments/<Environment ID>/users/<User ID>/sessions/<Session ID>/contexts/<Context ID>``. If ``Environment ID`` is not specified, we assume default 'draft' environment. If ``User ID`` is not specified, we assume default '-' user. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dialogflow_v2.types.Context: Dialogflow contexts are similar to natural language context. If a person says to you "they are orange", you need context in order to understand what "they" is referring to. Similarly, for Dialogflow to handle an end-user expression like that, it needs to be provided with context in order to correctly match an intent. Using contexts, you can control the flow of a conversation. You can configure contexts for an intent by setting input and output contexts, which are identified by string names. When an intent is matched, any configured output contexts for that intent become active. While any contexts are active, Dialogflow is more likely to match intents that are configured with input contexts that correspond to the currently active contexts. For more information about context, see the [Contexts guide](\ https://cloud.google.com/dialogflow/docs/contexts-overview). """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a context.GetContextRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, context.GetContextRequest): request = context.GetContextRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_context] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response def create_context( self, request: Union[gcd_context.CreateContextRequest, dict] = None, *, parent: str = None, context: gcd_context.Context = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gcd_context.Context: r"""Creates a context. If the specified context already exists, overrides the context. .. code-block:: python from google.cloud import dialogflow_v2 def sample_create_context(): # Create a client client = dialogflow_v2.ContextsClient() # Initialize request argument(s) context = dialogflow_v2.Context() context.name = "name_value" request = dialogflow_v2.CreateContextRequest( parent="parent_value", context=context, ) # Make the request response = client.create_context(request=request) # Handle the response print(response) Args: request (Union[google.cloud.dialogflow_v2.types.CreateContextRequest, dict]): The request object. The request message for [Contexts.CreateContext][google.cloud.dialogflow.v2.Contexts.CreateContext]. parent (str): Required. The session to create a context for. Format: ``projects/<Project ID>/agent/sessions/<Session ID>`` or ``projects/<Project ID>/agent/environments/<Environment ID>/users/<User ID>/sessions/<Session ID>``. If ``Environment ID`` is not specified, we assume default 'draft' environment. If ``User ID`` is not specified, we assume default '-' user. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. context (google.cloud.dialogflow_v2.types.Context): Required. The context to create. This corresponds to the ``context`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dialogflow_v2.types.Context: Dialogflow contexts are similar to natural language context. If a person says to you "they are orange", you need context in order to understand what "they" is referring to. Similarly, for Dialogflow to handle an end-user expression like that, it needs to be provided with context in order to correctly match an intent. Using contexts, you can control the flow of a conversation. You can configure contexts for an intent by setting input and output contexts, which are identified by string names. When an intent is matched, any configured output contexts for that intent become active. While any contexts are active, Dialogflow is more likely to match intents that are configured with input contexts that correspond to the currently active contexts. For more information about context, see the [Contexts guide](\ https://cloud.google.com/dialogflow/docs/contexts-overview). """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, context]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a gcd_context.CreateContextRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, gcd_context.CreateContextRequest): request = gcd_context.CreateContextRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if context is not None: request.context = context # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_context] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response def update_context( self, request: Union[gcd_context.UpdateContextRequest, dict] = None, *, context: gcd_context.Context = None, update_mask: field_mask_pb2.FieldMask = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gcd_context.Context: r"""Updates the specified context. .. code-block:: python from google.cloud import dialogflow_v2 def sample_update_context(): # Create a client client = dialogflow_v2.ContextsClient() # Initialize request argument(s) context = dialogflow_v2.Context() context.name = "name_value" request = dialogflow_v2.UpdateContextRequest( context=context, ) # Make the request response = client.update_context(request=request) # Handle the response print(response) Args: request (Union[google.cloud.dialogflow_v2.types.UpdateContextRequest, dict]): The request object. The request message for [Contexts.UpdateContext][google.cloud.dialogflow.v2.Contexts.UpdateContext]. context (google.cloud.dialogflow_v2.types.Context): Required. The context to update. This corresponds to the ``context`` field on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): Optional. The mask to control which fields get updated. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dialogflow_v2.types.Context: Dialogflow contexts are similar to natural language context. If a person says to you "they are orange", you need context in order to understand what "they" is referring to. Similarly, for Dialogflow to handle an end-user expression like that, it needs to be provided with context in order to correctly match an intent. Using contexts, you can control the flow of a conversation. You can configure contexts for an intent by setting input and output contexts, which are identified by string names. When an intent is matched, any configured output contexts for that intent become active. While any contexts are active, Dialogflow is more likely to match intents that are configured with input contexts that correspond to the currently active contexts. For more information about context, see the [Contexts guide](\ https://cloud.google.com/dialogflow/docs/contexts-overview). """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([context, update_mask]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a gcd_context.UpdateContextRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, gcd_context.UpdateContextRequest): request = gcd_context.UpdateContextRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if context is not None: request.context = context if update_mask is not None: request.update_mask = update_mask # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_context] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("context.name", request.context.name),) ), ) # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response def delete_context( self, request: Union[context.DeleteContextRequest, dict] = None, *, name: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the specified context. .. code-block:: python from google.cloud import dialogflow_v2 def sample_delete_context(): # Create a client client = dialogflow_v2.ContextsClient() # Initialize request argument(s) request = dialogflow_v2.DeleteContextRequest( name="name_value", ) # Make the request client.delete_context(request=request) Args: request (Union[google.cloud.dialogflow_v2.types.DeleteContextRequest, dict]): The request object. The request message for [Contexts.DeleteContext][google.cloud.dialogflow.v2.Contexts.DeleteContext]. name (str): Required. The name of the context to delete. Format: ``projects/<Project ID>/agent/sessions/<Session ID>/contexts/<Context ID>`` or ``projects/<Project ID>/agent/environments/<Environment ID>/users/<User ID>/sessions/<Session ID>/contexts/<Context ID>``. If ``Environment ID`` is not specified, we assume default 'draft' environment. If ``User ID`` is not specified, we assume default '-' user. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a context.DeleteContextRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, context.DeleteContextRequest): request = context.DeleteContextRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_context] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def delete_all_contexts( self, request: Union[context.DeleteAllContextsRequest, dict] = None, *, parent: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes all active contexts in the specified session. .. code-block:: python from google.cloud import dialogflow_v2 def sample_delete_all_contexts(): # Create a client client = dialogflow_v2.ContextsClient() # Initialize request argument(s) request = dialogflow_v2.DeleteAllContextsRequest( parent="parent_value", ) # Make the request client.delete_all_contexts(request=request) Args: request (Union[google.cloud.dialogflow_v2.types.DeleteAllContextsRequest, dict]): The request object. The request message for [Contexts.DeleteAllContexts][google.cloud.dialogflow.v2.Contexts.DeleteAllContexts]. parent (str): Required. The name of the session to delete all contexts from. Format: ``projects/<Project ID>/agent/sessions/<Session ID>`` or ``projects/<Project ID>/agent/environments/<Environment ID>/users/<User ID>/sessions/<Session ID>``. If ``Environment ID`` is not specified we assume default 'draft' environment. If ``User ID`` is not specified, we assume default '-' user. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a context.DeleteAllContextsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, context.DeleteAllContextsRequest): request = context.DeleteAllContextsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_all_contexts] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def __enter__(self): return self def __exit__(self, type, value, traceback): """Releases underlying transport's resources. .. warning:: ONLY use as a context manager if the transport is NOT shared with other clients! Exiting the with block will CLOSE the transport and may cause errors in other clients! """ self.transport.close() try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution( "google-cloud-dialogflow", ).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("ContextsClient",)
[]
[]
[ "GOOGLE_API_USE_MTLS_ENDPOINT", "GOOGLE_API_USE_CLIENT_CERTIFICATE" ]
[]
["GOOGLE_API_USE_MTLS_ENDPOINT", "GOOGLE_API_USE_CLIENT_CERTIFICATE"]
python
2
0
censys/transforms/search_https_body_hash_to_ip.py
import argparse import json import logging import os from censys_maltego import Censys from maltego_trx.transform import DiscoverableTransform log_file_path = os.path.dirname(os.path.realpath(__file__)) log_file_name = 'censys_maltego_transform.log' logging.basicConfig(filename=os.path.join(log_file_path, log_file_name), level=logging.INFO) def get_credentials(): try: credentials_path = os.path.dirname(os.path.realpath(__file__)) credentials_file = '.env' cred_dict = None with open(os.path.join(credentials_path, credentials_file), 'r') as creds_file: cred_dict = json.loads(creds_file.read()) return cred_dict except Exception as e: logging.critical("Please enter your credentials in the .env file in the transforms directory.") raise e class search_https_body_hash_to_ip(DiscoverableTransform): @classmethod def create_entities(cls, request, response): env_config = get_credentials() api_id, api_secret = env_config.get('censys_api_id'), env_config.get('censys_api_secret') ev = request.Value ep = request.Properties response = Censys(api_id, api_secret, max_pages=env_config.get('max_pages', 1)).search_https_body_hash_to_ip( body_text=ev, object_properties=ep ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--censys_api_id", required=False, metavar='XXXXXXXXXX', help="(optional) You must provide your Censys API ID here or as an environmental variable CENSYS_API_ID") parser.add_argument("--censys_api_secret", required=False, metavar='XXXXXXXXXX', help="(optional) You must provide your Censys API SECRET here or as an environmental variable CENSYS_API_SECRET") parser.add_argument('entity_value') parser.add_argument('entity_properties') args = parser.parse_args() censys_api_id = os.getenv('CENSYS_API_ID', args.censys_api_id) censys_api_secret = os.getenv('CENSYS_API_SECRET', args.censys_api_secret) ev = args.entity_value ep = args.entity_properties Censys(censys_api_id, censys_api_secret).search_https_body_hash_to_ip( body_text=ev, object_properties=ep )
[]
[]
[ "CENSYS_API_SECRET", "CENSYS_API_ID" ]
[]
["CENSYS_API_SECRET", "CENSYS_API_ID"]
python
2
0
migrate/migrate.go
package main import ( "database/sql" "fmt" "github.com/joho/godotenv" "github.com/lib/pq" migrate "github.com/rubenv/sql-migrate" "io/ioutil" "log" "os" "path" "strings" "time" ) const migrationsDir = "migrate/migrations" const migrationsTable = "_auth_migration" func init() { migrate.SetTable(migrationsTable) } func cmdNew(args []string) { if len(args) == 0 { log.Fatal("must provide a name for new migration") } var name = strings.Join(args, "_") const migrationTemplate = `-- +migrate Up -- +migrate Down ` fileName := fmt.Sprintf("%s-%s.sql", time.Now().Format("20060102150405"), name) err := os.MkdirAll(migrationsDir, os.ModePerm) if err != nil { log.Fatalf("cannot ensure migrations directory: %s", err) } err = ioutil.WriteFile(path.Join(migrationsDir, fileName), []byte(migrationTemplate), os.ModePerm) if err != nil { log.Fatalf("cannot create new migration file: %s", err) } log.Printf("new migration '%s' created.", fileName) } func cmdUp(args []string) { db := openDB() migrations := &migrate.FileMigrationSource{ Dir: migrationsDir, } n, err := migrate.Exec(db, "postgres", migrations, migrate.Up) log.Printf("applied %d migrations.", n) if err != nil { log.Fatalf("cannot apply all migrations: %s", err) } } func cmdDown(args []string) { db := openDB() migrations := &migrate.FileMigrationSource{ Dir: migrationsDir, } n, err := migrate.Exec(db, "postgres", migrations, migrate.Down) log.Printf("reverted %d migrations.", n) if err != nil { log.Fatalf("cannot revert all migrations: %s", err) } } func cmdStatus(args []string) { db := openDB() records, err := migrate.GetMigrationRecords(db, "postgres") if err != nil { log.Fatalf("cannot get migration records: %s", err) } for _, record := range records { log.Printf("%s %s", record.AppliedAt.Format(time.RFC3339), record.Id) } } func openDB() *sql.DB { dbURL := os.Getenv("DATABASE_URL") if dbURL == "" { log.Fatal("environment variable DATABASE_URL must be set") } dbSchema := os.Getenv("DATABASE_SCHEMA") db, err := sql.Open("postgres", dbURL) if err != nil { log.Fatalf("cannot open database: %s", err) } if dbSchema != "" { _, err = db.Exec(fmt.Sprintf("SET search_path TO %s", pq.QuoteIdentifier(dbSchema))) if err != nil { log.Fatalf("cannot set search_path: %s", err) } } return db } var commands = map[string]func(args []string){ "new": cmdNew, "up": cmdUp, "down": cmdDown, "status": cmdStatus, } func usage() { log.Print("usage:") log.Print(" migrate new <name>") log.Print(" migrate up") log.Print(" migrate down") log.Print(" migrate status") } func main() { log.SetFlags(0) err := godotenv.Load() if err != nil { log.Fatalf("failed to load .env: %s", err) } if len(os.Args) < 2 { log.Print("must provide sub-command") usage() os.Exit(1) } key := os.Args[1] cmd, ok := commands[key] if !ok { log.Printf("unrecognized sub-command: %s", key) usage() os.Exit(1) } cmd(os.Args[2:]) }
[ "\"DATABASE_URL\"", "\"DATABASE_SCHEMA\"" ]
[]
[ "DATABASE_URL", "DATABASE_SCHEMA" ]
[]
["DATABASE_URL", "DATABASE_SCHEMA"]
go
2
0
test/functional/test_runner.py
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Run regression test suite. This module calls down into individual test cases via subprocess. It will forward all unrecognized arguments onto the individual test scripts. Functional tests are disabled on Windows by default. Use --force to run them anyway. For a description of arguments recognized by test scripts, see `test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`. """ import argparse import configparser import datetime import os import time import shutil import signal import sys import subprocess import tempfile import re import logging # Formatting. Default colors to empty strings. BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") try: # Make sure python thinks it can write unicode to its stdout "\u2713".encode("utf_8").decode(sys.stdout.encoding) TICK = "✓ " CROSS = "✖ " CIRCLE = "○ " except UnicodeDecodeError: TICK = "P " CROSS = "x " CIRCLE = "o " if os.name == 'posix': # primitive formatting on supported # terminal via ANSI escape sequences: BOLD = ('\033[0m', '\033[1m') BLUE = ('\033[0m', '\033[0;34m') RED = ('\033[0m', '\033[0;31m') GREY = ('\033[0m', '\033[1;30m') TEST_EXIT_PASSED = 0 TEST_EXIT_SKIPPED = 77 BASE_SCRIPTS= [ # Scripts that are run by the travis build process. # Longest test should go first, to favor running tests in parallel 'dip3-deterministicmns.py', # NOTE: needs xazab_hash to pass 'wallet-hd.py', 'walletbackup.py', # vv Tests less than 5m vv 'p2p-fullblocktest.py', # NOTE: needs xazab_hash to pass 'fundrawtransaction.py', 'fundrawtransaction-hd.py', # vv Tests less than 2m vv 'p2p-instantsend.py', 'wallet.py', 'wallet-accounts.py', 'wallet-dump.py', 'listtransactions.py', 'multikeysporks.py', 'llmq-signing.py', # NOTE: needs xazab_hash to pass 'llmq-chainlocks.py', # NOTE: needs xazab_hash to pass 'llmq-simplepose.py', # NOTE: needs xazab_hash to pass 'llmq-is-cl-conflicts.py', # NOTE: needs xazab_hash to pass 'llmq-is-retroactive.py', # NOTE: needs xazab_hash to pass 'llmq-dkgerrors.py', # NOTE: needs xazab_hash to pass 'dip4-coinbasemerkleroots.py', # NOTE: needs xazab_hash to pass # vv Tests less than 60s vv 'sendheaders.py', # NOTE: needs xazab_hash to pass 'zapwallettxes.py', 'importmulti.py', 'mempool_limit.py', 'merkle_blocks.py', 'receivedby.py', 'abandonconflict.py', 'bip68-112-113-p2p.py', 'rawtransactions.py', 'reindex.py', # vv Tests less than 30s vv 'keypool-topup.py', 'zmq_test.py', 'bitcoin_cli.py', 'mempool_resurrect_test.py', 'txn_doublespend.py --mineblock', 'txn_clone.py', 'getchaintips.py', 'rest.py', 'mempool_spendcoinbase.py', 'mempool_reorg.py', 'mempool_persist.py', 'multiwallet.py', 'httpbasics.py', 'multi_rpc.py', 'proxy_test.py', 'signrawtransactions.py', 'disconnect_ban.py', 'addressindex.py', 'timestampindex.py', 'spentindex.py', 'decodescript.py', 'blockchain.py', 'disablewallet.py', 'net.py', 'keypool.py', 'keypool-hd.py', 'p2p-mempool.py', 'prioritise_transaction.py', 'invalidblockrequest.py', # NOTE: needs xazab_hash to pass 'invalidtxrequest.py', # NOTE: needs xazab_hash to pass 'p2p-versionbits-warning.py', 'preciousblock.py', 'importprunedfunds.py', 'signmessages.py', 'nulldummy.py', 'import-rescan.py', 'mining.py', 'rpcnamedargs.py', 'listsinceblock.py', 'p2p-leaktests.py', 'p2p-compactblocks.py', 'sporks.py', 'rpc_getblockstats.py', 'p2p-fingerprint.py', 'wallet-encryption.py', 'bipdersig-p2p.py', 'bip65-cltv-p2p.py', 'uptime.py', 'resendwallettransactions.py', 'minchainwork.py', 'p2p-acceptblock.py', # NOTE: needs xazab_hash to pass 'feature_shutdown.py', ] EXTENDED_SCRIPTS = [ # These tests are not run by the travis build process. # Longest test should go first, to favor running tests in parallel 'pruning.py', # NOTE: Prune mode is incompatible with -txindex, should work in litemode though. # vv Tests less than 20m vv 'smartfees.py', # vv Tests less than 5m vv 'maxuploadtarget.py', 'mempool_packages.py', 'dbcrash.py', # vv Tests less than 2m vv 'bip68-sequence.py', 'getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651 'p2p-timeouts.py', # vv Tests less than 60s vv 'bip9-softforks.py', 'rpcbind_test.py', # vv Tests less than 30s vv 'assumevalid.py', 'example_test.py', 'txn_doublespend.py', 'txn_clone.py --mineblock', 'txindex.py', 'forknotify.py', 'invalidateblock.py', ] # Place EXTENDED_SCRIPTS first since it has the 3 longest running tests ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS NON_SCRIPTS = [ # These are python files that live in the functional tests directory, but are not test scripts. "combine_logs.py", "create_cache.py", "test_runner.py", ] def main(): # Parse arguments and pass through unrecognised args parser = argparse.ArgumentParser(add_help=False, usage='%(prog)s [test_runner.py options] [script options] [scripts]', description=__doc__, epilog=''' Help text and arguments for individual test script:''', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface') parser.add_argument('--exclude', '-x', help='specify a comma-seperated-list of scripts to exclude.') parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests') parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).') parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit') parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.') parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs') parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.') parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs") args, unknown_args = parser.parse_known_args() # args to be passed on always start with two xazabes; tests are the remaining unknown args tests = [arg for arg in unknown_args if arg[:2] != "--"] passon_args = [arg for arg in unknown_args if arg[:2] == "--"] # Read config generated by configure. config = configparser.ConfigParser() configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini" config.read_file(open(configfile)) passon_args.append("--configfile=%s" % configfile) # Set up logging logging_level = logging.INFO if args.quiet else logging.DEBUG logging.basicConfig(format='%(message)s', level=logging_level) # Create base test directory tmpdir = "%s/xazab_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S")) os.makedirs(tmpdir) logging.debug("Temporary test directory at %s" % tmpdir) enable_wallet = config["components"].getboolean("ENABLE_WALLET") enable_utils = config["components"].getboolean("ENABLE_UTILS") enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND") if config["environment"]["EXEEXT"] == ".exe" and not args.force: # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9 # https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964 print("Tests currently disabled on Windows by default. Use --force option to enable") sys.exit(0) if not (enable_wallet and enable_utils and enable_bitcoind): print("No functional tests to run. Wallet, utils, and bitcoind must all be enabled") print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make") sys.exit(0) # Build list of tests if tests: # Individual tests have been specified. Run specified tests that exist # in the ALL_SCRIPTS list. Accept the name with or without .py extension. tests = [re.sub("\.py$", "", t) + ".py" for t in tests] test_list = [] for t in tests: if t in ALL_SCRIPTS: test_list.append(t) else: print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t)) else: # No individual tests have been specified. # Run all base tests, and optionally run extended tests. test_list = BASE_SCRIPTS if args.extended: # place the EXTENDED_SCRIPTS first since the three longest ones # are there and the list is shorter test_list = EXTENDED_SCRIPTS + test_list # Remove the test cases that the user has explicitly asked to exclude. if args.exclude: tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')] for exclude_test in tests_excl: if exclude_test in test_list: test_list.remove(exclude_test) else: print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test)) if not test_list: print("No valid test scripts specified. Check that your test is in one " "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") sys.exit(0) if args.help: # Print help for test_runner.py, then print help of the first script (with args removed) and exit. parser.print_help() subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h']) sys.exit(0) check_script_list(config["environment"]["SRCDIR"]) if not args.keepcache: shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True) run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args) def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[]): # Warn if xazabd is already running (unix only) try: pidof_output = subprocess.check_output(["pidof", "xazabd"]) if not (pidof_output is None or pidof_output == b''): print("%sWARNING!%s There is already a xazabd process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0])) except (OSError, subprocess.SubprocessError): pass # Warn if there is a cache directory cache_dir = "%s/test/cache" % build_dir if os.path.isdir(cache_dir): print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir)) #Set env vars if "BITCOIND" not in os.environ: os.environ["BITCOIND"] = build_dir + '/src/xazabd' + exeext os.environ["BITCOINCLI"] = build_dir + '/src/xazab-cli' + exeext tests_dir = src_dir + '/test/functional/' flags = ["--srcdir={}/src".format(build_dir)] + args flags.append("--cachedir=%s" % cache_dir) if enable_coverage: coverage = RPCCoverage() flags.append(coverage.flag) logging.debug("Initializing coverage directory at %s" % coverage.dir) else: coverage = None if len(test_list) > 1 and jobs > 1: # Populate cache subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir]) #Run Tests job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags) time0 = time.time() test_results = [] max_len_name = len(max(test_list, key=len)) for _ in range(len(test_list)): test_result, stdout, stderr = job_queue.get_next() test_results.append(test_result) if test_result.status == "Passed": logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time)) elif test_result.status == "Skipped": logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0])) else: print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time)) print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n') print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n') print_results(test_results, max_len_name, (int(time.time() - time0))) if coverage: coverage.report_rpc_coverage() logging.debug("Cleaning up coverage data") coverage.cleanup() # Clear up the temp directory if all subdirectories are gone if not os.listdir(tmpdir): os.rmdir(tmpdir) all_passed = all(map(lambda test_result: test_result.was_successful, test_results)) sys.exit(not all_passed) def print_results(test_results, max_len_name, runtime): results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0] test_results.sort(key=lambda result: result.name.lower()) all_passed = True time_sum = 0 for test_result in test_results: all_passed = all_passed and test_result.was_successful time_sum += test_result.time test_result.padding = max_len_name results += str(test_result) status = TICK + "Passed" if all_passed else CROSS + "Failed" results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0] results += "Runtime: %s s\n" % (runtime) print(results) class TestHandler: """ Trigger the testscrips passed in via the list. """ def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None): assert(num_tests_parallel >= 1) self.num_jobs = num_tests_parallel self.tests_dir = tests_dir self.tmpdir = tmpdir self.test_list = test_list self.flags = flags self.num_running = 0 # In case there is a graveyard of zombie bitcoinds, we can apply a # pseudorandom offset to hopefully jump over them. # (625 is PORT_RANGE/MAX_NODES) self.portseed_offset = int(time.time() * 1000) % 625 self.jobs = [] def get_next(self): while self.num_running < self.num_jobs and self.test_list: # Add tests self.num_running += 1 t = self.test_list.pop(0) portseed = len(self.test_list) + self.portseed_offset portseed_arg = ["--portseed={}".format(portseed)] log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) test_argv = t.split() tmpdir = ["--tmpdir=%s/%s_%s" % (self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)] self.jobs.append((t, time.time(), subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir, universal_newlines=True, stdout=log_stdout, stderr=log_stderr), log_stdout, log_stderr)) if not self.jobs: raise IndexError('pop from empty list') while True: # Return first proc that finishes time.sleep(.5) for j in self.jobs: (name, time0, proc, log_out, log_err) = j if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60: # In travis, timeout individual tests after 20 minutes (to stop tests hanging and not # providing useful output. proc.send_signal(signal.SIGINT) if proc.poll() is not None: log_out.seek(0), log_err.seek(0) [stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)] log_out.close(), log_err.close() if proc.returncode == TEST_EXIT_PASSED and stderr == "": status = "Passed" elif proc.returncode == TEST_EXIT_SKIPPED: status = "Skipped" else: status = "Failed" self.num_running -= 1 self.jobs.remove(j) return TestResult(name, status, int(time.time() - time0)), stdout, stderr print('.', end='', flush=True) class TestResult(): def __init__(self, name, status, time): self.name = name self.status = status self.time = time self.padding = 0 def __repr__(self): if self.status == "Passed": color = BLUE glyph = TICK elif self.status == "Failed": color = RED glyph = CROSS elif self.status == "Skipped": color = GREY glyph = CIRCLE return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0] @property def was_successful(self): return self.status != "Failed" def check_script_list(src_dir): """Check scripts directory. Check that there are no scripts in the functional tests directory which are not being run by pull-tester.py.""" script_dir = src_dir + '/test/functional/' python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"]) missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS))) if len(missed_tests) != 0: print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests))) if os.getenv('TRAVIS') == 'true': # On travis this warning is an error to prevent merging incomplete commits into master sys.exit(1) class RPCCoverage(object): """ Coverage reporting utilities for test_runner. Coverage calculation works by having each test script subprocess write coverage files into a particular directory. These files contain the RPC commands invoked during testing, as well as a complete listing of RPC commands per `bitcoin-cli help` (`rpc_interface.txt`). After all tests complete, the commands run are combined and diff'd against the complete list to calculate uncovered RPC commands. See also: test/functional/test_framework/coverage.py """ def __init__(self): self.dir = tempfile.mkdtemp(prefix="coverage") self.flag = '--coveragedir=%s' % self.dir def report_rpc_coverage(self): """ Print out RPC commands that were unexercised by tests. """ uncovered = self._get_uncovered_rpc_commands() if uncovered: print("Uncovered RPC commands:") print("".join((" - %s\n" % i) for i in sorted(uncovered))) else: print("All RPC commands covered.") def cleanup(self): return shutil.rmtree(self.dir) def _get_uncovered_rpc_commands(self): """ Return a set of currently untested RPC commands. """ # This is shared from `test/functional/test-framework/coverage.py` reference_filename = 'rpc_interface.txt' coverage_file_prefix = 'coverage.' coverage_ref_filename = os.path.join(self.dir, reference_filename) coverage_filenames = set() all_cmds = set() covered_cmds = set() if not os.path.isfile(coverage_ref_filename): raise RuntimeError("No coverage reference found") with open(coverage_ref_filename, 'r') as f: all_cmds.update([i.strip() for i in f.readlines()]) for root, dirs, files in os.walk(self.dir): for filename in files: if filename.startswith(coverage_file_prefix): coverage_filenames.add(os.path.join(root, filename)) for filename in coverage_filenames: with open(filename, 'r') as f: covered_cmds.update([i.strip() for i in f.readlines()]) return all_cmds - covered_cmds if __name__ == '__main__': main()
[]
[]
[ "BITCOINCLI", "TRAVIS", "BITCOIND" ]
[]
["BITCOINCLI", "TRAVIS", "BITCOIND"]
python
3
0
lib/analogues/conf.py
import os import sys sys.path.append("/usr/local/apps/grib_api/1.15.0/GNU/4.8.1/lib/python2.7/site-packages/gribapi") sys.path.append("/usr/local/apps/python/2.7.8-01/lib/python2.7/site-packages") try: import ecmwfapi except: pass import numpy as np import tempfile try: from grib import GribFile except: pass ANALOGUES_HOME = os.environ.get("ANALOGUES_HOME", os.path.expanduser("~/Dropbox/phd")) ANALOGUES_CACHE = os.environ.get("ANALOGUES_CACHE", os.path.expanduser("/tmp")) ANALOGUES_FILES = os.environ.get("ANALOGUES_FILES", os.path.expanduser("/tmp")) # MODE = {('2t', 3): 'meanz_19.03', # ('msl', 3): 'meanz_32.18', # ('sd', 3): 'meanz_0.13', # ('sp', 3): 'meanz_19.8', # ('tp', 3): 'meanz_0.25', # ('ws', 3): 'meanz_0.63', # ('z500', 3): 'meanz_13.05', # ('z850', 3): 'meanz_4.81'} """ MODE = {('2t', 3): 'meanw_1.07', (167, 0, 3): 'meanw_1.07', ('msl', 3): 'meanw_0', (151, 0, 3): 'meanw_0', ('sd', 3): 'meanw_0.000125', (141, 0, 3): 'meanw_0.000125', ('sp', 3): 'meanw_0.00354004', (134, 0, 3): 'meanw_0.00354004', ('tp', 3): 'meanw_0.000125', (228, 0, 3): 'meanw_0.000125', ('ws', 3): 'meanw_1.34687', (10, 0, 3): 'meanw_1.34687', (10, 10, 3): 'meanw_1.34687', (129, 500, 3): 'meanw_0.004', (129, 850, 3): 'meanw_0.00549316', ('z500', 3): 'meanw_0.004', ('z850', 3): 'meanw_0.00549316', (228029, 0, 3): 'meanw_1', ('i10fg', 3): 'meanw_1', (164, 0, 3): 'meanw_1', ('tcc', 3): 'meanw_1', (255, 0, 3): 'meanw_1', ('logtp', 3): 'meanw_1', ('logsf', 3): 'meanw_1', (144, 0, 3): 'meanw_1', ('expsf', 3): 'meanw_1', ('exptp', 3): 'meanw_1', ('expsd', 3): 'meanw_1', ('g1sf', 3): 'meanw_1', ('g1tp', 3): 'meanw_1', ('g1sd', 3): 'meanw_1', ('g2sf', 3): 'meanw_1', ('g2tp', 3): 'meanw_1', ('g2sd', 3): 'meanw_1', (229, 0, 3): 'meanw_1', ('sf', 3): 'meanw_1', ('swh', 3): 'meanw_1', ('10u', 3): 'meanw_1', ('10v', 3): 'meanw_1', (165, 0, 3): 'meanw_1', (166, 0, 3): 'meanw_1', (140229, 0, 3): 'meanw_1', (140229, 3): 'meanw_1', } NAME = {'tp': "Total precipitations", 'ws': "10m wind speed", '2t': "Surface air temperature", 'sp': "Surface pressure", 'msl': "Mean sea level pressure", 'sd': "Snow depth", 'sd': "Snow depth", 'z850': "Geopotential at 850 hPa", 'z500': "Geopotential at 500 hPa"} SCALE = {'ws': (1, 0, "m/s"), '2t': (1, -273.15, "&deg;C"), 'tp': (1000, 0, "mm"), 'g1tp': (1000, 0, "mm"), 'g2tp': (1000, 0, "mm"), 'sd': (100 / 0.2, 0, "cm"), # Assumes %20 density 'g1sd': (100 / 0.2, 0, "cm"), # Assumes %20 density 'g2sd': (100 / 0.2, 0, "cm"), # Assumes %20 density 'sd (exp)': (100 / 0.2, 0, "cm"), # Assumes %20 density 'z500': (0.0101971621297793, 0, "dam"), 'z850': (0.0101971621297793, 0, "dam"), 'msl': (0.01, 0, "hPa"), 'sp': (0.01, 0, "hPa"), 'tcc': (1, 0, '%'), 'i10fg': (1, 0, "m/s"), 'logtp': (1000, 0, "log(1+m)"), 'exptp': (1000, 0, "exp(m)"), 'sf': (1000, 0, 'mm'), 'g1sf': (1000, 0, 'mm'), 'g2sf': (1000, 0, 'mm'), 'logsf': (1000, 0, "log(1+m)"), 'expsf': (1000, 0, "exp(m)"), '10u': (1, 0, "m/s"), '10v': (1, 0, "m/s"), 'swh': (1, 0, "m"), } PARAMS = { 'tp': 228, } def param(param): return PARAMS[param] class DefaultRetriever(object): def execute(self, data): c = ecmwfapi.ECMWFService("mars") f, target = tempfile.mkstemp(".grib") os.close(f) c.execute(retrieve(data), target) field = GribFile(target).next() os.unlink(target) return field class WindRetriever(object): def execute(self, data): c = ecmwfapi.ECMWFService("mars") f, target = tempfile.mkstemp(".grib") os.close(f) c.execute(retrieve(data), target) w = GribFile(target) u = w.next() v = w.next() q = np.sqrt(u.array * u.array + v.array * v.array) u.set_array(q) os.unlink(target) return u class PrecipRetriever(object): def execute(self, data): c = ecmwfapi.ECMWFService("mars") f, target = tempfile.mkstemp(".grib") os.close(f) c.execute(retrieve(data), target) w = list(GribFile(target)) if len(w) == 2: cp = w[0] lsp = w[1] q = cp.array + lsp.array cp.set_array(q) field = cp else: field = w[0] os.unlink(target) return field RETRIEVER = {'ws': WindRetriever, 'tp': PrecipRetriever} def mars(data): Retriever = RETRIEVER.get(data['param'], DefaultRetriever) r = Retriever() return r.execute(data) def units(param): u = SCALE[param] return {"scale": u[0], "offset": u[1], "unit": u[2]} """ def params(f=lambda x: x): result = {} for k in SCALE.keys(): result[name(k)] = f(k) return result def variables(): return sorted(SCALE.keys()) def area_around(lat, lon): n, w = int(lat * 2 + 0.5) * 0.5 + 8, int(lon * 2 + 0.5) * 0.5 - 8 a = (n, w, n - 15.5, w + 15.5) return a def wavelet_mode(param, level, depth=None): if depth is None: return MODE[(param, level)] else: return MODE[(param, level, depth)] def name(param): return NAME.get(param, param) def cache_file(*args): return os.path.join(ANALOGUES_FILES, *args) def cache_data(*args): return os.path.join(ANALOGUES_DATA, *args) def analogue_home(*args): return os.path.join(ANALOGUES_HOME, *args) def data_path(*args): return analogue_home('data', *args) def image_path(name): return analogue_home("latex", "images", name) def latex_path(name): return analogue_home("latex", name) def fingerprints_path(*args): return data_path("fingerprints", *args) def validate_path(*args): return data_path("validate", *args) def validate_json(**kwargs): return validate_path("%s/validate-%s-%s-%s-%s-%s.json" % (kwargs['domain'], kwargs.get('distance', 'l2'), kwargs['param'], kwargs['depth'], kwargs.get('wavelet', 'haar'), kwargs['mode'])) def fingerprints_db(**kwargs): return fingerprints_path("%s/%s-%s/%s-%s.db" % (kwargs['domain'], kwargs.get('wavelet', 'haar'), kwargs['mode'], kwargs['param'], kwargs['depth'])) def fingerprints_available(domain): result = [] for n in os.listdir(fingerprints_path(domain)): for p in os.listdir(fingerprints_path(n)): if p.endswith('.db'): q = n.split('-') r = p[:-3].split('-') result.append(dict(wavelet=q[0], domain=domain, mode=q[1], param=r[0], depth=int(r[1]))) return result def validate_available(domain): result = [] for n in os.listdir(validate_path(domain)): if n.endswith(".json") and n.startswith("validate-"): x = n[:-5].split("-") if x[5].endswith('_'): x[5] += '-' + x[6] result.append(dict(distance=x[1], param=x[2], domain=domain, depth=int(x[3]), wavelet=x[4], mode=x[5])) return result #################################################################### if os.path.exists('/Users/baudouin/Dropbox'): from analogues.backend.sqlite import SqliteEngine cdsdb = SqliteEngine() ROOT = '/Users/baudouin/Dropbox/phd/data' CACHE = ROOT else: from analogues.backend.postgres import PostgresEngine cdsdb = PostgresEngine() ROOT = '/cache/analogues' CACHE = '/cache/data'
[]
[]
[ "ANALOGUES_HOME", "ANALOGUES_CACHE", "ANALOGUES_FILES" ]
[]
["ANALOGUES_HOME", "ANALOGUES_CACHE", "ANALOGUES_FILES"]
python
3
0
tests/test_sockets.py
# Copyright 2013 The Emscripten Authors. All rights reserved. # Emscripten is available under two separate licenses, the MIT license and the # University of Illinois/NCSA Open Source License. Both these licenses can be # found in the LICENSE file. import multiprocessing import os import socket import shutil import sys import time from subprocess import Popen, PIPE if __name__ == '__main__': raise Exception('do not run this file directly; do something like: tests/runner sockets') try: import websockify except Exception: # websockify won't successfully import on Windows under Python3, because socketserver.py doesn't export ForkingMixIn. # (On python2, ForkingMixIn was exported but it didn't actually work on Windows). # Swallowing the error here means that this file can always be imported, but won't work if actually used on Windows, # which is the same behavior as before. pass import clang_native from common import BrowserCore, no_windows, create_file, test_file, read_file from tools import shared, config, utils from tools.shared import PYTHON, EMCC, path_from_root, WINDOWS, run_process, CLANG_CC npm_checked = False NPM = os.path.join(os.path.dirname(config.NODE_JS[0]), 'npm.cmd' if WINDOWS else 'npm') def clean_processes(processes): for p in processes: if (not hasattr(p, 'exitcode') or p.exitcode is None) and (not hasattr(p, 'returncode') or p.returncode is None): # ask nicely (to try and catch the children) try: p.terminate() # SIGTERM except OSError: pass time.sleep(1) # send a forcible kill immediately afterwards. If the process did not die before, this should clean it. try: p.terminate() # SIGKILL except OSError: pass class WebsockifyServerHarness(): def __init__(self, filename, args, listen_port, do_server_check=True): self.processes = [] self.filename = filename self.listen_port = listen_port self.target_port = listen_port - 1 self.args = args or [] self.do_server_check = do_server_check def __enter__(self): # compile the server # NOTE empty filename support is a hack to support # the current test_enet if self.filename: proc = run_process([CLANG_CC, test_file(self.filename), '-o', 'server', '-DSOCKK=%d' % self.target_port] + clang_native.get_clang_native_args() + self.args, clang_native.get_clang_native_env(), stdout=PIPE, stderr=PIPE) print('Socket server build: out:', proc.stdout or '', '/ err:', proc.stderr or '') process = Popen([os.path.abspath('server')]) self.processes.append(process) # start the websocket proxy print('running websockify on %d, forward to tcp %d' % (self.listen_port, self.target_port), file=sys.stderr) wsp = websockify.WebSocketProxy(verbose=True, listen_port=self.listen_port, target_host="127.0.0.1", target_port=self.target_port, run_once=True) self.websockify = multiprocessing.Process(target=wsp.start_server) self.websockify.start() self.processes.append(self.websockify) # Make sure both the actual server and the websocket proxy are running for i in range(10): try: if self.do_server_check: server_sock = socket.create_connection(('localhost', self.target_port), timeout=1) server_sock.close() proxy_sock = socket.create_connection(('localhost', self.listen_port), timeout=1) proxy_sock.close() break except IOError: time.sleep(1) else: clean_processes(self.processes) raise Exception('[Websockify failed to start up in a timely manner]') print('[Websockify on process %s]' % str(self.processes[-2:])) def __exit__(self, *args, **kwargs): # try to kill the websockify proxy gracefully if self.websockify.is_alive(): self.websockify.terminate() self.websockify.join() # clean up any processes we started clean_processes(self.processes) class CompiledServerHarness(): def __init__(self, filename, args, listen_port): self.processes = [] self.filename = filename self.listen_port = listen_port self.args = args or [] def __enter__(self): # assuming this is only used for WebSocket tests at the moment, validate that # the ws module is installed global npm_checked if not npm_checked: child = run_process(config.NODE_JS + ['-e', 'require("ws");'], check=False) assert child.returncode == 0, '"ws" node module not found. you may need to run npm install' npm_checked = True # compile the server proc = run_process([EMCC, '-Werror', test_file(self.filename), '-o', 'server.js', '-DSOCKK=%d' % self.listen_port] + self.args) print('Socket server build: out:', proc.stdout or '', '/ err:', proc.stderr or '') process = Popen(config.NODE_JS + ['server.js']) self.processes.append(process) def __exit__(self, *args, **kwargs): # clean up any processes we started clean_processes(self.processes) # always run these tests last # make sure to use different ports in each one because it takes a while for the processes to be cleaned up # Executes a native executable server process class BackgroundServerProcess(): def __init__(self, args): self.processes = [] self.args = args def __enter__(self): print('Running background server: ' + str(self.args)) process = Popen(self.args) self.processes.append(process) def __exit__(self, *args, **kwargs): clean_processes(self.processes) def NodeJsWebSocketEchoServerProcess(): return BackgroundServerProcess(config.NODE_JS + [test_file('websocket', 'nodejs_websocket_echo_server.js')]) def PythonTcpEchoServerProcess(port): return BackgroundServerProcess([PYTHON, test_file('websocket', 'tcp_echo_server.py'), port]) class sockets(BrowserCore): emcc_args = [] @classmethod def setUpClass(cls): super().setUpClass() print() print('Running the socket tests. Make sure the browser allows popups from localhost.') print() # Use emscripten root for node module lookup. This is needed because the unit tests each # run with CWD set to a temporary directory outside the emscripten tree. print('Setting NODE_PATH=' + path_from_root('node_modules')) os.environ['NODE_PATH'] = path_from_root('node_modules') def test_sockets_echo(self, extra_args=[]): sockets_include = '-I' + test_file('sockets') # Note: in the WebsockifyServerHarness and CompiledServerHarness tests below, explicitly use consecutive server listen ports, # because server teardown might not occur deterministically (python dtor time) and is a bit racy. # WebsockifyServerHarness uses two port numbers, x and x-1, so increment it by two. # CompiledServerHarness only uses one. Start with 49160 & 49159 as the first server port addresses. If adding new tests, # increment the used port addresses below. # Websockify-proxied servers can't run dgram tests harnesses = [ (CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49161), 0), (CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49162), 1), # The following forces non-NULL addr and addlen parameters for the accept call (CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1'], 49163), 0) ] if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows. harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 49160), 0)] for harness, datagram in harnesses: with harness: self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, sockets_include]) def test_sockets_echo_pthreads(self, extra_args=[]): self.test_sockets_echo(['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']) def test_sdl2_sockets_echo(self): harness = CompiledServerHarness('sdl2_net_server.c', ['-s', 'USE_SDL=2', '-s', 'USE_SDL_NET=2'], 49164) with harness: self.btest('sdl2_net_client.c', expected='0', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_NET=2', '-DSOCKK=%d' % harness.listen_port]) def test_sockets_async_echo(self): sockets_include = '-I' + test_file('sockets') # Websockify-proxied servers can't run dgram tests harnesses = [ (CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ASYNC=1'], 49167), 0), (CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1', '-DTEST_ASYNC=1'], 49168), 1), # The following forces non-NULL addr and addlen parameters for the accept call (CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1', '-DTEST_ASYNC=1'], 49169), 0) ] if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows. harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_ASYNC=1'], 49166), 0)] for harness, datagram in harnesses: print('harness:', harness) with harness: self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, '-DTEST_ASYNC=1', sockets_include]) # Deliberately attempt a connection on a port that will fail to test the error callback and getsockopt print('expect fail') self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=49169', '-DTEST_ASYNC=1', sockets_include]) def test_sockets_echo_bigdata(self): sockets_include = '-I' + test_file('sockets') # generate a large string literal to use as our message message = '' for i in range(256 * 256 * 2): message += str(chr(ord('a') + (i % 26))) # re-write the client test with this literal (it's too big to pass via command line) input_filename = test_file('sockets', 'test_sockets_echo_client.c') input = read_file(input_filename) create_file('test_sockets_echo_bigdata.c', input.replace('#define MESSAGE "pingtothepong"', '#define MESSAGE "%s"' % message)) harnesses = [ (CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49172), 0), (CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49173), 1) ] if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows. harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 49171), 0)] for harness, datagram in harnesses: with harness: self.btest('test_sockets_echo_bigdata.c', expected='0', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram]) @no_windows('This test is Unix-specific.') def test_sockets_partial(self): for harness in [ WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_partial_server.c'), [], 49180), CompiledServerHarness(os.path.join('sockets', 'test_sockets_partial_server.c'), [], 49181) ]: with harness: self.btest_exit(os.path.join('sockets', 'test_sockets_partial_client.c'), assert_returncode=165, args=['-DSOCKK=%d' % harness.listen_port]) @no_windows('This test is Unix-specific.') def test_sockets_select_server_down(self): for harness in [ WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_select_server_down_server.c'), [], 49190, do_server_check=False), CompiledServerHarness(os.path.join('sockets', 'test_sockets_select_server_down_server.c'), [], 49191) ]: with harness: self.btest(os.path.join('sockets', 'test_sockets_select_server_down_client.c'), expected='266', args=['-DSOCKK=%d' % harness.listen_port]) @no_windows('This test is Unix-specific.') def test_sockets_select_server_closes_connection_rw(self): sockets_include = '-I' + test_file('sockets') for harness in [ WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49200), CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49201) ]: with harness: self.btest(os.path.join('sockets', 'test_sockets_select_server_closes_connection_client_rw.c'), expected='266', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port]) @no_windows('This test uses Unix-specific build architecture.') def test_enet(self): # this is also a good test of raw usage of emconfigure and emmake shared.try_delete('enet') shutil.copytree(test_file('third_party', 'enet'), 'enet') with utils.chdir('enet'): self.run_process([path_from_root('emconfigure'), './configure', '--disable-shared']) self.run_process([path_from_root('emmake'), 'make']) enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I' + self.in_dir('enet', 'include')] for harness in [ CompiledServerHarness(os.path.join('sockets', 'test_enet_server.c'), enet, 49210) ]: with harness: self.btest(os.path.join('sockets', 'test_enet_client.c'), expected='0', args=enet + ['-DSOCKK=%d' % harness.listen_port]) def test_nodejs_sockets_echo(self): # This test checks that sockets work when the client code is run in Node.js if config.NODE_JS not in config.JS_ENGINES: self.skipTest('node is not present') sockets_include = '-I' + test_file('sockets') harnesses = [ (CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 59162), 0), (CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 59164), 1) ] if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows. harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59160), 0)] # Basic test of node client against both a Websockified and compiled echo server. for harness, datagram in harnesses: with harness: self.run_process([EMCC, '-Werror', test_file('sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram], stdout=PIPE, stderr=PIPE) out = self.run_js('client.js') self.assertContained('do_msg_read: read 14 bytes', out) if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows. # Test against a Websockified server with compile time configured WebSocket subprotocol. We use a Websockified # server because as long as the subprotocol list contains binary it will configure itself to accept binary # This test also checks that the connect url contains the correct subprotocols. print("\nTesting compile time WebSocket configuration.\n") for harness in [ WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59166) ]: with harness: self.run_process([EMCC, '-Werror', test_file('sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-s', 'SOCKET_DEBUG', '-s', 'WEBSOCKET_SUBPROTOCOL="base64, binary"', '-DSOCKK=59166'], stdout=PIPE, stderr=PIPE) out = self.run_js('client.js') self.assertContained('do_msg_read: read 14 bytes', out) self.assertContained(['connect: ws://127.0.0.1:59166, base64,binary', 'connect: ws://127.0.0.1:59166/, base64,binary'], out) # Test against a Websockified server with runtime WebSocket configuration. We specify both url and subprotocol. # In this test we have *deliberately* used the wrong port '-DSOCKK=12345' to configure the echo_client.c, so # the connection would fail without us specifying a valid WebSocket URL in the configuration. print("\nTesting runtime WebSocket configuration.\n") for harness in [ WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59168) ]: with harness: open(os.path.join(self.get_dir(), 'websocket_pre.js'), 'w').write(''' var Module = { websocket: { url: 'ws://localhost:59168/testA/testB', subprotocol: 'text, base64, binary', } }; ''') self.run_process([EMCC, '-Werror', test_file('sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '--pre-js', 'websocket_pre.js', '-s', 'SOCKET_DEBUG', '-DSOCKK=12345'], stdout=PIPE, stderr=PIPE) out = self.run_js('client.js') self.assertContained('do_msg_read: read 14 bytes', out) self.assertContained('connect: ws://localhost:59168/testA/testB, text,base64,binary', out) # Test Emscripten WebSockets API to send and receive text and binary messages against an echo server. # N.B. running this test requires 'npm install ws' in Emscripten root directory def test_websocket_send(self): with NodeJsWebSocketEchoServerProcess(): self.btest(test_file('websocket', 'test_websocket_send.c'), expected='101', args=['-lwebsocket', '-s', 'NO_EXIT_RUNTIME', '-s', 'WEBSOCKET_DEBUG']) # Test that native POSIX sockets API can be used by proxying calls to an intermediate WebSockets -> POSIX sockets bridge server def test_posix_proxy_sockets(self): # Build the websocket bridge server self.run_process(['cmake', path_from_root('tools/websocket_to_posix_proxy')]) self.run_process(['cmake', '--build', '.']) if os.name == 'nt': # This is not quite exact, instead of "isWindows()" this should be "If CMake defaults to building with Visual Studio", but there is no good check for that, so assume Windows==VS. proxy_server = os.path.join(self.get_dir(), 'Debug', 'websocket_to_posix_proxy.exe') else: proxy_server = os.path.join(self.get_dir(), 'websocket_to_posix_proxy') with BackgroundServerProcess([proxy_server, '8080']): with PythonTcpEchoServerProcess('7777'): # Build and run the TCP echo client program with Emscripten self.btest(test_file('websocket', 'tcp_echo_client.cpp'), expected='101', args=['-lwebsocket', '-s', 'PROXY_POSIX_SOCKETS', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
[]
[]
[ "NODE_PATH" ]
[]
["NODE_PATH"]
python
1
0
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go
// +build ignore // Simple tool to create an archive stream from an old and new directory // // By default it will stream the comparison of two temporary directories with junk files package main import ( "flag" "fmt" "io" "io/ioutil" "os" "path" "github.com/austenLacy/docker-inspect/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" "github.com/austenLacy/docker-inspect/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive" ) var ( flDebug = flag.Bool("D", false, "debugging output") flNewDir = flag.String("newdir", "", "") flOldDir = flag.String("olddir", "", "") log = logrus.New() ) func main() { flag.Usage = func() { fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") fmt.Printf("%s [OPTIONS]\n", os.Args[0]) flag.PrintDefaults() } flag.Parse() log.Out = os.Stderr if (len(os.Getenv("DEBUG")) > 0) || *flDebug { logrus.SetLevel(logrus.DebugLevel) } var newDir, oldDir string if len(*flNewDir) == 0 { var err error newDir, err = ioutil.TempDir("", "docker-test-newDir") if err != nil { log.Fatal(err) } defer os.RemoveAll(newDir) if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { log.Fatal(err) } } else { newDir = *flNewDir } if len(*flOldDir) == 0 { oldDir, err := ioutil.TempDir("", "docker-test-oldDir") if err != nil { log.Fatal(err) } defer os.RemoveAll(oldDir) } else { oldDir = *flOldDir } changes, err := archive.ChangesDirs(newDir, oldDir) if err != nil { log.Fatal(err) } a, err := archive.ExportChanges(newDir, changes) if err != nil { log.Fatal(err) } defer a.Close() i, err := io.Copy(os.Stdout, a) if err != nil && err != io.EOF { log.Fatal(err) } fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil }
[ "\"DEBUG\"" ]
[]
[ "DEBUG" ]
[]
["DEBUG"]
go
1
0
sdk/schemaregistry/azure-schemaregistry-avroencoder/samples/sync_samples/encode_and_decode_event_data_message.py
# -------------------------------------------------------------------------- # # Copyright (c) Microsoft Corporation. All rights reserved. # # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the ""Software""), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # # -------------------------------------------------------------------------- import os from azure.identity import ClientSecretCredential from azure.schemaregistry import SchemaRegistryClient from azure.schemaregistry.encoder.avroencoder import AvroEncoder from azure.eventhub import EventData TENANT_ID = os.environ["AZURE_TENANT_ID"] CLIENT_ID = os.environ["AZURE_CLIENT_ID"] CLIENT_SECRET = os.environ["AZURE_CLIENT_SECRET"] SCHEMAREGISTRY_FULLY_QUALIFIED_NAMESPACE = os.environ[ "SCHEMAREGISTRY_FULLY_QUALIFIED_NAMESPACE" ] GROUP_NAME = os.environ["SCHEMAREGISTRY_GROUP"] SCHEMA_STRING = """ {"namespace": "example.avro", "type": "record", "name": "User", "fields": [ {"name": "name", "type": "string"}, {"name": "favorite_number", "type": ["int", "null"]}, {"name": "favorite_color", "type": ["string", "null"]} ] }""" token_credential = ClientSecretCredential( tenant_id=TENANT_ID, client_id=CLIENT_ID, client_secret=CLIENT_SECRET ) def encode_to_event_data_message(encoder): dict_data_ben = {"name": "Ben", "favorite_number": 7, "favorite_color": "red"} dict_data_alice = {"name": "Alice", "favorite_number": 15, "favorite_color": "green"} # Schema would be automatically registered into Schema Registry and cached locally. event_data_ben = encoder.encode( dict_data_ben, schema=SCHEMA_STRING, message_type=EventData ) # The second call won't trigger a service call. event_data_alice = encoder.encode( dict_data_alice, schema=SCHEMA_STRING, message_type=EventData ) print("Encoded data is: ", next(event_data_ben.body)) print("Encoded data is: ", next(event_data_alice.body)) print("Encoded content type is: ", event_data_ben.content_type) print("Encoded content type is: ", event_data_alice.content_type) return [event_data_ben, event_data_alice] def decode_event_data_message(encoder, event_data): # encoder.decode would extract the schema id from the content_type, # retrieve schema from Schema Registry and cache the schema locally. # If the schema id is in the local cache, the call won't trigger a service call. decoded_data = encoder.decode(event_data) print("Decoded data is: ", decoded_data) return decoded_data if __name__ == "__main__": schema_registry = SchemaRegistryClient( fully_qualified_namespace=SCHEMAREGISTRY_FULLY_QUALIFIED_NAMESPACE, credential=token_credential, ) encoder = AvroEncoder( client=schema_registry, group_name=GROUP_NAME, auto_register_schemas=True ) event_data_ben, event_data_alice = encode_to_event_data_message(encoder) decoded_data_ben = decode_event_data_message(encoder, event_data_ben) decoded_data_alice = decode_event_data_message(encoder, event_data_alice) encoder.close()
[]
[]
[ "AZURE_CLIENT_ID", "SCHEMAREGISTRY_FULLY_QUALIFIED_NAMESPACE\"", "AZURE_CLIENT_SECRET", "AZURE_TENANT_ID", "SCHEMAREGISTRY_GROUP" ]
[]
["AZURE_CLIENT_ID", "SCHEMAREGISTRY_FULLY_QUALIFIED_NAMESPACE\"", "AZURE_CLIENT_SECRET", "AZURE_TENANT_ID", "SCHEMAREGISTRY_GROUP"]
python
5
0
serving/samples/helloworld-go/helloworld.go
/* Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "fmt" "log" "net/http" "os" ) func handler(w http.ResponseWriter, r *http.Request) { log.Print("Hello world received a request.") target := os.Getenv("TARGET") if target == "" { target = "World" } fmt.Fprintf(w, "Hello %s!\n", target) } func main() { log.Print("Hello world sample started.") http.HandleFunc("/", handler) port := os.Getenv("PORT") if port == "" { port = "8080" } log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", port), nil)) }
[ "\"TARGET\"", "\"PORT\"" ]
[]
[ "PORT", "TARGET" ]
[]
["PORT", "TARGET"]
go
2
0
lusiada/wsgi.py
""" WSGI config for lusiada project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lusiada.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
model/directorysource.go
// Copyright 2019 Stratumn // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package model import ( "context" "path/filepath" ) // String is a string representation for the type instance. func (n *DirectorySource) String() string { return filepath.Base(n.Directory) } // Sync syncs the Source. func (n *DirectorySource) Sync(ctx context.Context) error { defer func() { n.IsSyncing = false n.MustStore(ctx) }() n.IsSyncing = true n.MustStore(ctx) workspaceIDs, err := SyncWorkspacesInDirectory(ctx, n.Directory, n.ID) if err != nil { return err } n.WorkspacesIDs = workspaceIDs n.MustStore(ctx) return nil }
[]
[]
[]
[]
[]
go
null
null
null
hsm_software/sw/hsm_tools/cryptech/muxd.py
#!/usr/bin/env python # # Copyright (c) 2016-2017, NORDUnet A/S All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # - Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # - Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # - Neither the name of the NORDUnet nor the names of its contributors may # be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED # TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ Implementation of Cryptech RPC protocol multiplexer in Python. Unlike the original C implementation, this uses SLIP encapsulation over a SOCK_STREAM channel, because support for SOCK_SEQPACKET is not what we might wish. We outsource all the heavy lifting for serial and network I/O to the PySerial and Tornado libraries, respectively. """ import os import sys import time import struct import atexit import weakref import logging import argparse import logging.handlers import serial import serial.tools.list_ports_posix import tornado.tcpserver import tornado.iostream import tornado.netutil import tornado.ioloop import tornado.queues import tornado.locks import tornado.gen from cryptech.libhal import HAL_OK, RPC_FUNC_GET_VERSION, RPC_FUNC_LOGOUT, RPC_FUNC_LOGOUT_ALL logger = logging.getLogger("cryptech_muxd") SLIP_END = chr(0300) # Indicates end of SLIP packet SLIP_ESC = chr(0333) # Indicates byte stuffing SLIP_ESC_END = chr(0334) # ESC ESC_END means END data byte SLIP_ESC_ESC = chr(0335) # ESC ESC_ESC means ESC data byte Control_U = chr(0025) # Console: clear line Control_M = chr(0015) # Console: end of line def slip_encode(buffer): "Encode a buffer using SLIP encapsulation." return SLIP_END + buffer.replace(SLIP_ESC, SLIP_ESC + SLIP_ESC_ESC).replace(SLIP_END, SLIP_ESC + SLIP_ESC_END) + SLIP_END def slip_decode(buffer): "Decode a SLIP-encapsulated buffer." return buffer.strip(SLIP_END).replace(SLIP_ESC + SLIP_ESC_END, SLIP_END).replace(SLIP_ESC + SLIP_ESC_ESC, SLIP_ESC) def client_handle_get(msg): "Extract client_handle field from a Cryptech RPC message." return struct.unpack(">L", msg[4:8])[0] def client_handle_set(msg, handle): "Replace client_handle field in a Cryptech RPC message." return msg[:4] + struct.pack(">L", handle) + msg[8:] logout_msg = struct.pack(">LL", RPC_FUNC_LOGOUT, 0) logout_all_msg = struct.pack(">LL", RPC_FUNC_LOGOUT_ALL, 0) class SerialIOStream(tornado.iostream.BaseIOStream): """ Implementation of a Tornado IOStream over a PySerial device. """ # In theory, we want zero (non-blocking mode) for both the read # and write timeouts here so that PySerial will let Tornado handle # all the select()/poll()/epoll()/kqueue() fun, delivering maximum # throughput to all. In practice, this has always worked for the # author, but another developer reports that on some (not all) # platforms this fails consistently with Tornado reporting write # timeout errors, presumably as the result of receiving an IOError # or OSError exception from PySerial. For reasons we don't really # understand, setting a PySerial write timeout on the order of # 50-100 ms "solves" this problem. Again in theory, this will # result in lower throughput if PySerial spends too much time # blocking on a single serial device when Tornado could be doing # something useful elsewhere, but such is life. def __init__(self, device): self.serial = serial.Serial(device, 921600, timeout = 0, write_timeout = 0.1) self.serial_device = device super(SerialIOStream, self).__init__() def fileno(self): return self.serial.fileno() def close_fd(self): self.serial.close() def write_to_fd(self, data): return self.serial.write(data) if tornado.version > "5": # .. versionchanged:: 5.0 # Interface redesigned to take a buffer and return a number # of bytes instead of a freshly-allocated object. def read_from_fd(self, buf): buf[:] = self.serial.read(len(buf)) return len(buf) or None else: def read_from_fd(self): return self.serial.read(self.read_chunk_size) or None class PFUnixServer(tornado.tcpserver.TCPServer): """ Variant on tornado.tcpserver.TCPServer, listening on a PF_UNIX (aka PF_LOCAL) socket instead of a TCP socket. """ def __init__(self, serial_stream, socket_filename, mode = 0600): super(PFUnixServer, self).__init__() self.serial = serial_stream self.socket_filename = socket_filename self.add_socket(tornado.netutil.bind_unix_socket(socket_filename, mode)) atexit.register(self.atexit_unlink) def atexit_unlink(self): try: os.unlink(self.socket_filename) except: pass class RPCIOStream(SerialIOStream): """ Tornado IOStream for a serial RPC channel. """ def __init__(self, device): super(RPCIOStream, self).__init__(device) self.queues = weakref.WeakValueDictionary() self.rpc_input_lock = tornado.locks.Lock() @tornado.gen.coroutine def rpc_input(self, query, handle = 0, queue = None): "Send a query to the HSM." logger.debug("RPC send: %s", ":".join("{:02x}".format(ord(c)) for c in query)) if queue is not None: self.queues[handle] = queue with (yield self.rpc_input_lock.acquire()): yield self.write(query) logger.debug("RPC sent") @tornado.gen.coroutine def rpc_output_loop(self): "Handle reply stream HSM -> network." while True: try: logger.debug("RPC UART read") reply = yield self.read_until(SLIP_END) except tornado.iostream.StreamClosedError: logger.info("RPC UART closed") for q in self.queues.itervalues(): q.put_nowait(None) return logger.debug("RPC recv: %s", ":".join("{:02x}".format(ord(c)) for c in reply)) if reply == SLIP_END: continue try: handle = client_handle_get(slip_decode(reply)) except: logger.debug("RPC skipping bad packet") continue if handle not in self.queues: logger.debug("RPC ignoring response: handle 0x%x", handle) continue logger.debug("RPC queue put: handle 0x%x, qsize %s", handle, self.queues[handle].qsize()) self.queues[handle].put_nowait(reply) def logout_all(self): "Execute an RPC LOGOUT_ALL operation." return self.rpc_input(slip_encode(logout_all_msg)) class QueuedStreamClosedError(tornado.iostream.StreamClosedError): "Deferred StreamClosedError passed throught a Queue." class RPCServer(PFUnixServer): """ Serve multiplexed Cryptech RPC over a PF_UNIX socket. """ @tornado.gen.coroutine def handle_stream(self, stream, address): "Handle one network connection." handle = self.next_client_handle() queue = tornado.queues.Queue() logger.info("RPC connected %r, handle 0x%x", stream, handle) while True: try: logger.debug("RPC socket read, handle 0x%x", handle) query = yield stream.read_until(SLIP_END) if len(query) < 9: continue query = slip_encode(client_handle_set(slip_decode(query), handle)) yield self.serial.rpc_input(query, handle, queue) logger.debug("RPC queue wait, handle 0x%x", handle) reply = yield queue.get() if reply is None: raise QueuedStreamClosedError() logger.debug("RPC socket write, handle 0x%x", handle) yield stream.write(SLIP_END + reply) except tornado.iostream.StreamClosedError: logger.info("RPC closing %r, handle 0x%x", stream, handle) stream.close() query = slip_encode(client_handle_set(logout_msg, handle)) yield self.serial.rpc_input(query, handle) return client_handle = int(time.time()) << 4 @classmethod def next_client_handle(cls): cls.client_handle += 1 cls.client_handle &= 0xFFFFFFFF return cls.client_handle class CTYIOStream(SerialIOStream): """ Tornado IOStream for a serial console channel. """ def __init__(self, device, console_log = None): super(CTYIOStream, self).__init__(device) self.attached_cty = None self.console_log = console_log @tornado.gen.coroutine def cty_output_loop(self): while True: try: buffer = yield self.read_bytes(self.read_chunk_size, partial = True) except tornado.iostream.StreamClosedError: logger.info("CTY UART closed") if self.attached_cty is not None: self.attached_cty.close() return try: futures = [] if self.console_log is not None: futures.append(self.console_log.write(buffer)) if self.attached_cty is not None: futures.append(self.attached_cty.write(buffer)) if futures: yield futures except tornado.iostream.StreamClosedError: pass class CTYServer(PFUnixServer): """ Serve Cryptech console over a PF_UNIX socket. """ @tornado.gen.coroutine def handle_stream(self, stream, address): "Handle one network connection." if self.serial.attached_cty is not None: yield stream.write("[Console already in use, sorry]\n") stream.close() return logger.info("CTY connected to %r", stream) try: self.serial.attached_cty = stream while self.serial.attached_cty is stream: yield self.serial.write((yield stream.read_bytes(1024, partial = True))) except tornado.iostream.StreamClosedError: stream.close() finally: logger.info("CTY disconnected from %r", stream) if self.serial.attached_cty is stream: self.serial.attached_cty = None class ProbeIOStream(SerialIOStream): """ Tornado IOStream for probing a serial port. This is nasty. """ def __init__(self, device): super(ProbeIOStream, self).__init__(device) @classmethod @tornado.gen.coroutine def run_probes(cls, args): if args.rpc_device is not None and args.cty_device is not None: return if args.probe: devs = set(args.probe) else: devs = set(str(port) for port, desc, hwid in serial.tools.list_ports_posix.comports() if "VID:PID=0403:6014" in hwid) devs.discard(args.rpc_device) devs.discard(args.cty_device) if not devs: return logging.debug("Probing candidate devices %s", " ".join(devs)) results = yield dict((dev, ProbeIOStream(dev).run_probe()) for dev in devs) for dev, result in results.iteritems(): if result == "cty" and args.cty_device is None: logger.info("Selecting %s as CTY device", dev) args.cty_device = dev if result == "rpc" and args.rpc_device is None: logger.info("Selecting %s as RPC device", dev) args.rpc_device = dev @tornado.gen.coroutine def run_probe(self): RPC_query = struct.pack(">LL", RPC_FUNC_GET_VERSION, 0) RPC_reply = struct.pack(">LLL", RPC_FUNC_GET_VERSION, 0, HAL_OK) probe_string = SLIP_END + Control_U + SLIP_END + RPC_query + SLIP_END + Control_U + Control_M yield self.write(probe_string) yield tornado.gen.sleep(0.5) response = yield self.read_bytes(self.read_chunk_size, partial = True) logger.debug("Probing %s: %r %s", self.serial_device, response, ":".join("{:02x}".format(ord(c)) for c in response)) is_cty = any(prompt in response for prompt in ("Username:", "Password:", "cryptech>")) try: is_rpc = response[response.index(SLIP_END + RPC_reply) + len(SLIP_END + RPC_reply) + 4] == SLIP_END except ValueError: is_rpc = False except IndexError: is_rpc = False assert not is_cty or not is_rpc result = None if is_cty: result = "cty" yield self.write(Control_U) if is_rpc: result = "rpc" yield self.write(SLIP_END) self.close() raise tornado.gen.Return(result) @tornado.gen.coroutine def main(): parser = argparse.ArgumentParser(formatter_class = argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("-v", "--verbose", action = "count", help = "blather about what we're doing") parser.add_argument("-l", "--log-file", help = "log to file instead of stderr") parser.add_argument("-L", "--console-log", type = argparse.FileType("a"), help = "log console output to file") parser.add_argument("-p", "--probe", nargs = "*", metavar = "DEVICE", help = "probe for device UARTs") parser.add_argument("--rpc-device", help = "RPC serial device name", default = os.getenv("CRYPTECH_RPC_CLIENT_SERIAL_DEVICE")) parser.add_argument("--rpc-socket", help = "RPC PF_UNIX socket name", default = os.getenv("CRYPTECH_RPC_CLIENT_SOCKET_NAME", "/tmp/.cryptech_muxd.rpc")) parser.add_argument("--rpc-socket-mode", help = "permission bits for RPC socket inode", default = 0600, type = lambda s: int(s, 8)) parser.add_argument("--cty-device", help = "CTY serial device name", default = os.getenv("CRYPTECH_CTY_CLIENT_SERIAL_DEVICE")) parser.add_argument("--cty-socket", help = "CTY PF_UNIX socket name", default = os.getenv("CRYPTECH_CTY_CLIENT_SOCKET_NAME", "/tmp/.cryptech_muxd.cty")) parser.add_argument("--cty-socket-mode", help = "permission bits for CTY socket inode", default = 0600, type = lambda s: int(s, 8)) args = parser.parse_args() if args.log_file is not None: logging.getLogger().handlers[:] = [logging.handlers.WatchedFileHandler(args.log_file)] logging.getLogger().handlers[0].setFormatter( logging.Formatter("%(asctime)-15s %(name)s[%(process)d]:%(levelname)s: %(message)s", "%Y-%m-%d %H:%M:%S")) if args.verbose: logging.getLogger().setLevel(logging.DEBUG if args.verbose > 1 else logging.INFO) if args.probe is not None: yield ProbeIOStream.run_probes(args) if args.console_log is not None: console_log = tornado.iostream.PipeIOStream(args.console_log.fileno()) else: console_log = None futures = [] if args.rpc_device is None: logger.warn("No RPC device found") else: rpc_stream = RPCIOStream(device = args.rpc_device) rpc_server = RPCServer(rpc_stream, args.rpc_socket, args.rpc_socket_mode) futures.append(rpc_stream.rpc_output_loop()) futures.append(rpc_stream.logout_all()) if args.cty_device is None: logger.warn("No CTY device found") else: cty_stream = CTYIOStream(device = args.cty_device, console_log = console_log) cty_server = CTYServer(cty_stream, args.cty_socket, args.cty_socket_mode) futures.append(cty_stream.cty_output_loop()) # Might want to use WaitIterator(dict(...)) here so we can # diagnose and restart output loops if they fail? if futures: yield futures if __name__ == "__main__": try: tornado.ioloop.IOLoop.current().run_sync(main) except (SystemExit, KeyboardInterrupt): pass except: logger.exception("Unhandled exception") else: logger.debug("Main loop exited")
[]
[]
[ "CRYPTECH_RPC_CLIENT_SERIAL_DEVICE", "CRYPTECH_CTY_CLIENT_SERIAL_DEVICE", "CRYPTECH_CTY_CLIENT_SOCKET_NAME", "CRYPTECH_RPC_CLIENT_SOCKET_NAME" ]
[]
["CRYPTECH_RPC_CLIENT_SERIAL_DEVICE", "CRYPTECH_CTY_CLIENT_SERIAL_DEVICE", "CRYPTECH_CTY_CLIENT_SOCKET_NAME", "CRYPTECH_RPC_CLIENT_SOCKET_NAME"]
python
4
0
manage.py
#!/usr/bin/env python import os import sys if __name__ == '__main__': os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dukapoint.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
court_scraper/configs.py
import os import yaml from pathlib import Path try: from yaml import CLoader as Loader, CDumper as Dumper except ImportError: from yaml import Loader class ConfigurationError(Exception): pass class Configs: def __init__(self): try: self.cache_dir = os.environ['COURT_SCRAPER_DIR'] except KeyError: self.cache_dir = str( Path(os.path.expanduser('~'))\ .joinpath('.court-scraper') ) self.config_file_path = str( Path(self.cache_dir)\ .joinpath('config.yaml') ) self.db_path = str( Path(self.cache_dir)\ .joinpath('cases.db') ) @property def captcha_service_api_key(self): with open(self.config_file_path,'r') as fh: configs = yaml.load(fh, Loader=Loader) try: return configs['captcha_service_api_key'] except KeyError: msg = f"Set captcha_service_api_key in {self.config_file_path}" raise ConfigurationError(msg)
[]
[]
[ "COURT_SCRAPER_DIR" ]
[]
["COURT_SCRAPER_DIR"]
python
1
0
stable_baselines/logger.py
import os import sys import shutil import json import time import datetime import tempfile import warnings from collections import defaultdict DEBUG = 10 INFO = 20 WARN = 30 ERROR = 40 DISABLED = 50 class KVWriter(object): """ Key Value writer """ def writekvs(self, kvs): """ write a dictionary to file :param kvs: (dict) """ raise NotImplementedError class SeqWriter(object): """ sequence writer """ def writeseq(self, seq): """ write an array to file :param seq: (list) """ raise NotImplementedError class HumanOutputFormat(KVWriter, SeqWriter): def __init__(self, filename_or_file): """ log to a file, in a human readable format :param filename_or_file: (str or File) the file to write the log to """ if isinstance(filename_or_file, str): self.file = open(filename_or_file, 'wt') self.own_file = True else: assert hasattr(filename_or_file, 'write'), 'Expected file or str, got {}'.format(filename_or_file) self.file = filename_or_file self.own_file = False def writekvs(self, kvs): # Create strings for printing key2str = {} for (key, val) in sorted(kvs.items()): if isinstance(val, float): valstr = '%-8.3g' % (val,) else: valstr = str(val) key2str[self._truncate(key)] = self._truncate(valstr) # Find max widths if len(key2str) == 0: warnings.warn('Tried to write empty key-value dict') return else: keywidth = max(map(len, key2str.keys())) valwidth = max(map(len, key2str.values())) # Write out the data dashes = '-' * (keywidth + valwidth + 7) lines = [dashes] for (key, val) in sorted(key2str.items()): lines.append('| %s%s | %s%s |' % ( key, ' ' * (keywidth - len(key)), val, ' ' * (valwidth - len(val)), )) lines.append(dashes) self.file.write('\n'.join(lines) + '\n') # Flush the output to the file self.file.flush() @classmethod def _truncate(cls, string): return string[:20] + '...' if len(string) > 23 else string def writeseq(self, seq): seq = list(seq) for (i, elem) in enumerate(seq): self.file.write(elem) if i < len(seq) - 1: # add space unless this is the last one self.file.write(' ') self.file.write('\n') self.file.flush() def close(self): """ closes the file """ if self.own_file: self.file.close() class JSONOutputFormat(KVWriter): def __init__(self, filename): """ log to a file, in the JSON format :param filename: (str) the file to write the log to """ self.file = open(filename, 'wt') def writekvs(self, kvs): for key, value in sorted(kvs.items()): if hasattr(value, 'dtype'): value = value.tolist() kvs[key] = float(value) self.file.write(json.dumps(kvs) + '\n') self.file.flush() def close(self): """ closes the file """ self.file.close() class CSVOutputFormat(KVWriter): def __init__(self, filename): """ log to a file, in a CSV format :param filename: (str) the file to write the log to """ self.file = open(filename, 'w+t') self.keys = [] self.sep = ',' def writekvs(self, kvs): # Add our current row to the history extra_keys = kvs.keys() - self.keys if extra_keys: self.keys.extend(extra_keys) self.file.seek(0) lines = self.file.readlines() self.file.seek(0) for (i, key) in enumerate(self.keys): if i > 0: self.file.write(',') self.file.write(key) self.file.write('\n') for line in lines[1:]: self.file.write(line[:-1]) self.file.write(self.sep * len(extra_keys)) self.file.write('\n') for i, key in enumerate(self.keys): if i > 0: self.file.write(',') value = kvs.get(key) if value is not None: self.file.write(str(value)) self.file.write('\n') self.file.flush() def close(self): """ closes the file """ self.file.close() class TensorBoardOutputFormat(KVWriter): def __init__(self, folder): """ Dumps key/value pairs into TensorBoard's numeric format. :param folder: (str) the folder to write the log to """ os.makedirs(folder, exist_ok=True) self.dir = folder self.step = 1 prefix = 'events' path = os.path.join(os.path.abspath(folder), prefix) import tensorflow as tf from tensorflow.python import pywrap_tensorflow from tensorflow.core.util import event_pb2 from tensorflow.python.util import compat self._tf = tf self.event_pb2 = event_pb2 self.pywrap_tensorflow = pywrap_tensorflow self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path)) def writekvs(self, kvs): def summary_val(key, value): kwargs = {'tag': key, 'simple_value': float(value)} return self._tf.Summary.Value(**kwargs) summary = self._tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()]) event = self.event_pb2.Event(wall_time=time.time(), summary=summary) event.step = self.step # is there any reason why you'd want to specify the step? self.writer.WriteEvent(event) self.writer.Flush() self.step += 1 def close(self): """ closes the file """ if self.writer: self.writer.Close() self.writer = None def make_output_format(_format, ev_dir, log_suffix='', tensorboard_dir=None): """ return a logger for the requested format :param _format: (str) the requested format to log to ('stdout', 'log', 'json', 'csv' or 'tensorboard') :param ev_dir: (str) the logging directory :param log_suffix: (str) the suffix for the log file :return: (KVWrite) the logger """ os.makedirs(ev_dir, exist_ok=True) if _format == 'stdout': return HumanOutputFormat(sys.stdout) elif _format == 'log': return HumanOutputFormat(os.path.join(ev_dir, 'log%s.txt' % log_suffix)) elif _format == 'json': return JSONOutputFormat(os.path.join(ev_dir, 'progress%s.json' % log_suffix)) elif _format == 'csv': return CSVOutputFormat(os.path.join(ev_dir, 'progress%s.csv' % log_suffix)) elif _format == 'tensorboard': assert tensorboard_dir is not None return TensorBoardOutputFormat(tensorboard_dir) else: raise ValueError('Unknown format specified: %s' % (_format,)) # ================================================================ # API # ================================================================ def logkv(key, val): """ Log a value of some diagnostic Call this once for each diagnostic quantity, each iteration If called many times, last value will be used. :param key: (Any) save to log this key :param val: (Any) save to log this value """ Logger.CURRENT.logkv(key, val) def logkv_mean(key, val): """ The same as logkv(), but if called many times, values averaged. :param key: (Any) save to log this key :param val: (Number) save to log this value """ Logger.CURRENT.logkv_mean(key, val) def logkvs(key_values): """ Log a dictionary of key-value pairs :param key_values: (dict) the list of keys and values to save to log """ for key, value in key_values.items(): logkv(key, value) def dumpkvs(): """ Write all of the diagnostics from the current iteration """ Logger.CURRENT.dumpkvs() def getkvs(): """ get the key values logs :return: (dict) the logged values """ return Logger.CURRENT.name2val def log(*args, level=INFO): """ Write the sequence of args, with no separators, to the console and output files (if you've configured an output file). level: int. (see logger.py docs) If the global logger level is higher than the level argument here, don't print to stdout. :param args: (list) log the arguments :param level: (int) the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50) """ Logger.CURRENT.log(*args, level=level) def debug(*args): """ Write the sequence of args, with no separators, to the console and output files (if you've configured an output file). Using the DEBUG level. :param args: (list) log the arguments """ log(*args, level=DEBUG) def info(*args): """ Write the sequence of args, with no separators, to the console and output files (if you've configured an output file). Using the INFO level. :param args: (list) log the arguments """ log(*args, level=INFO) def warn(*args): """ Write the sequence of args, with no separators, to the console and output files (if you've configured an output file). Using the WARN level. :param args: (list) log the arguments """ log(*args, level=WARN) def error(*args): """ Write the sequence of args, with no separators, to the console and output files (if you've configured an output file). Using the ERROR level. :param args: (list) log the arguments """ log(*args, level=ERROR) def set_level(level): """ Set logging threshold on current logger. :param level: (int) the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50) """ Logger.CURRENT.set_level(level) def get_level(): """ Get logging threshold on current logger. :return: (int) the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50) """ return Logger.CURRENT.level def get_dir(): """ Get directory that log files are being written to. will be None if there is no output directory (i.e., if you didn't call start) :return: (str) the logging directory """ return Logger.CURRENT.get_dir() record_tabular = logkv dump_tabular = dumpkvs class ProfileKV: def __init__(self, name): """ Usage: with logger.ProfileKV("interesting_scope"): code :param name: (str) the profiling name """ self.name = "wait_" + name def __enter__(self): self.start_time = time.time() def __exit__(self, _type, value, traceback): Logger.CURRENT.name2val[self.name] += time.time() - self.start_time def profile(name): """ Usage: @profile("my_func") def my_func(): code :param name: (str) the profiling name :return: (function) the wrapped function """ def decorator_with_name(func): def func_wrapper(*args, **kwargs): with ProfileKV(name): return func(*args, **kwargs) return func_wrapper return decorator_with_name # ================================================================ # Backend # ================================================================ class Logger(object): # A logger with no output files. (See right below class definition) # So that you can still log to the terminal without setting up any output files DEFAULT = None CURRENT = None # Current logger being used by the free functions above def __init__(self, folder, output_formats): """ the logger class :param folder: (str) the logging location :param output_formats: ([str]) the list of output format """ self.name2val = defaultdict(float) # values this iteration self.name2cnt = defaultdict(int) self.level = INFO self.dir = folder self.output_formats = output_formats # Logging API, forwarded # ---------------------------------------- def logkv(self, key, val): """ Log a value of some diagnostic Call this once for each diagnostic quantity, each iteration If called many times, last value will be used. :param key: (Any) save to log this key :param val: (Any) save to log this value """ self.name2val[key] = val def logkv_mean(self, key, val): """ The same as logkv(), but if called many times, values averaged. :param key: (Any) save to log this key :param val: (Number) save to log this value """ if val is None: self.name2val[key] = None return oldval, cnt = self.name2val[key], self.name2cnt[key] self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1) self.name2cnt[key] = cnt + 1 def dumpkvs(self): """ Write all of the diagnostics from the current iteration """ if self.level == DISABLED: return for fmt in self.output_formats: if isinstance(fmt, KVWriter): fmt.writekvs(self.name2val) self.name2val.clear() self.name2cnt.clear() def log(self, *args, level=INFO): """ Write the sequence of args, with no separators, to the console and output files (if you've configured an output file). level: int. (see logger.py docs) If the global logger level is higher than the level argument here, don't print to stdout. :param args: (list) log the arguments :param level: (int) the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50) """ if self.level <= level: self._do_log(args) # Configuration # ---------------------------------------- def set_level(self, level): """ Set logging threshold on current logger. :param level: (int) the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50) """ self.level = level def get_dir(self): """ Get directory that log files are being written to. will be None if there is no output directory (i.e., if you didn't call start) :return: (str) the logging directory """ return self.dir def close(self): """ closes the file """ for fmt in self.output_formats: fmt.close() # Misc # ---------------------------------------- def _do_log(self, args): """ log to the requested format outputs :param args: (list) the arguments to log """ for fmt in self.output_formats: if isinstance(fmt, SeqWriter): fmt.writeseq(map(str, args)) Logger.DEFAULT = Logger.CURRENT = Logger(folder=None, output_formats=[HumanOutputFormat(sys.stdout)]) def configure(log_dir=None, format_strs=None, tensorboard_dir=None): """ configure the current logger :param log_dir: (str) the save location (if None, $OPENAI_LOGDIR, if still None, tempdir/openai-[date & time]) :param format_strs: (list) the output logging format (if None, $OPENAI_LOG_FORMAT, if still None, ['stdout', 'log', 'csv']) """ if log_dir is None: log_dir = os.getenv('OPENAI_LOGDIR') if log_dir is None: log_dir = os.path.join(tempfile.gettempdir(), datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f")) assert isinstance(log_dir, str) os.makedirs(log_dir, exist_ok=True) log_suffix = '' from mpi4py import MPI rank = MPI.COMM_WORLD.Get_rank() if rank > 0: log_suffix = "-rank%03i" % rank if format_strs is None: if rank == 0: format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',') else: format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',') format_strs = filter(None, format_strs) output_formats = [make_output_format(f, log_dir, log_suffix, tensorboard_dir) for f in format_strs] Logger.CURRENT = Logger(folder=log_dir, output_formats=output_formats) log('Logging to %s' % log_dir) def reset(): """ reset the current logger """ if Logger.CURRENT is not Logger.DEFAULT: Logger.CURRENT.close() Logger.CURRENT = Logger.DEFAULT log('Reset logger') class ScopedConfigure(object): def __init__(self, folder=None, format_strs=None): """ Class for using context manager while logging usage: with ScopedConfigure(folder=None, format_strs=None): {code} :param folder: (str) the logging folder :param format_strs: ([str]) the list of output logging format """ self.dir = folder self.format_strs = format_strs self.prevlogger = None def __enter__(self): self.prevlogger = Logger.CURRENT configure(folder=self.dir, format_strs=self.format_strs) def __exit__(self, *args): Logger.CURRENT.close() Logger.CURRENT = self.prevlogger # ================================================================ def _demo(): """ tests for the logger module """ info("hi") debug("shouldn't appear") set_level(DEBUG) debug("should appear") folder = "/tmp/testlogging" if os.path.exists(folder): shutil.rmtree(folder) configure(folder=folder) logkv("a", 3) logkv("b", 2.5) dumpkvs() logkv("b", -2.5) logkv("a", 5.5) dumpkvs() info("^^^ should see a = 5.5") logkv_mean("b", -22.5) logkv_mean("b", -44.4) logkv("a", 5.5) dumpkvs() with ScopedConfigure(None, None): info("^^^ should see b = 33.3") with ScopedConfigure("/tmp/test-logger/", ["json"]): logkv("b", -2.5) dumpkvs() reset() logkv("a", "longasslongasslongasslongasslongasslongassvalue") dumpkvs() warn("hey") error("oh") logkvs({"test": 1}) # ================================================================ # Readers # ================================================================ def read_json(fname): """ read a json file using pandas :param fname: (str) the file path to read :return: (pandas DataFrame) the data in the json """ import pandas data = [] with open(fname, 'rt') as file_handler: for line in file_handler: data.append(json.loads(line)) return pandas.DataFrame(data) def read_csv(fname): """ read a csv file using pandas :param fname: (str) the file path to read :return: (pandas DataFrame) the data in the csv """ import pandas return pandas.read_csv(fname, index_col=None, comment='#') def read_tb(path): """ read a tensorboard output :param path: (str) a tensorboard file OR a directory, where we will find all TB files of the form events. :return: (pandas DataFrame) the tensorboad data """ import pandas import numpy as np from glob import glob # from collections import defaultdict import tensorflow as tf if os.path.isdir(path): fnames = glob(os.path.join(path, "events.*")) elif os.path.basename(path).startswith("events."): fnames = [path] else: raise NotImplementedError("Expected tensorboard file or directory containing them. Got %s" % path) tag2pairs = defaultdict(list) maxstep = 0 for fname in fnames: for summary in tf.train.summary_iterator(fname): if summary.step > 0: for value in summary.summary.value: pair = (summary.step, value.simple_value) tag2pairs[value.tag].append(pair) maxstep = max(summary.step, maxstep) data = np.empty((maxstep, len(tag2pairs))) data[:] = np.nan tags = sorted(tag2pairs.keys()) for (colidx, tag) in enumerate(tags): pairs = tag2pairs[tag] for (step, value) in pairs: data[step - 1, colidx] = value return pandas.DataFrame(data, columns=tags) if __name__ == "__main__": _demo()
[]
[]
[ "OPENAI_LOGDIR", "OPENAI_LOG_FORMAT_MPI", "OPENAI_LOG_FORMAT" ]
[]
["OPENAI_LOGDIR", "OPENAI_LOG_FORMAT_MPI", "OPENAI_LOG_FORMAT"]
python
3
0
back-end/vp/vp/settings.py
""" Django settings for vp project. Generated by 'django-admin startproject' using Django 3.0.4. For more information on this file, see https://docs.djangoproject.com/en/3.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.0/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ['SECRET_KEY'] # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ['back-end:8000', 'back-end', 'localhost', '127.0.0.1', '0.0.0.0', '[::1]'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Project apps 'workflow.apps.WorkflowConfig', 'node.apps.NodeConfig', 'drf_yasg' ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', # Custom middleware 'workflow.middleware.WorkflowMiddleware', ] ROOT_URLCONF = 'vp.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] SESSION_ENGINE = 'django.contrib.sessions.backends.file' WSGI_APPLICATION = 'vp.wsgi.application' # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases # Not yet setup DATABASES = {} MEDIA_ROOT = '/tmp' # Password validation # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATIC_URL = '/static/'
[]
[]
[ "SECRET_KEY" ]
[]
["SECRET_KEY"]
python
1
0
src/main/java/com/artipie/HttpClientSettings.java
/* * The MIT License (MIT) Copyright (c) 2020-2021 artipie.com * https://github.com/artipie/artipie/LICENSE.txt */ package com.artipie; import com.google.common.base.Strings; import java.util.Optional; import java.util.concurrent.TimeUnit; /** * HTTP client settings from system environment. * * @since 0.9 */ final class HttpClientSettings implements com.artipie.http.client.Settings { /** * Proxy host system property key. */ static final String PROXY_HOST = "http.proxyHost"; /** * Proxy port system property key. */ static final String PROXY_PORT = "http.proxyPort"; @Override public Optional<Proxy> proxy() { final Optional<Proxy> result; final String host = System.getProperty(HttpClientSettings.PROXY_HOST); final String port = System.getProperty(HttpClientSettings.PROXY_PORT); if (Strings.isNullOrEmpty(host) || Strings.isNullOrEmpty(port)) { result = Optional.empty(); } else { result = Optional.of(new Proxy.Simple(false, host, Integer.parseInt(port))); } return result; } @Override public boolean trustAll() { return "true".equals(System.getenv("SSL_TRUSTALL")); } @Override public boolean followRedirects() { return true; } @Override public long connectTimeout() { final int seconds = 15; return TimeUnit.SECONDS.toMillis(seconds); } @Override public long idleTimeout() { final int seconds = 30; return TimeUnit.SECONDS.toMillis(seconds); } }
[ "\"SSL_TRUSTALL\"" ]
[]
[ "SSL_TRUSTALL" ]
[]
["SSL_TRUSTALL"]
java
1
0
main.go
package main import ( "bytes" "encoding/json" "errors" "fmt" "io" "io/ioutil" "mime/multipart" "net/http" "os" "strings" "github.com/bitrise-io/depman/pathutil" "github.com/bitrise-io/go-utils/command" "github.com/bitrise-io/go-utils/log" ) const ( hockeyAppDeployStatusKey = "HOCKEYAPP_DEPLOY_STATUS" hockeyAppDeployStatusSuccess = "success" hockeyAppDeployStatusFailed = "failed" hockeyAppDeployPublicURLKey = "HOCKEYAPP_DEPLOY_PUBLIC_URL" hockeyAppDeployBuildURLKey = "HOCKEYAPP_DEPLOY_BUILD_URL" hockeyAppDeployConfigURLKey = "HOCKEYAPP_DEPLOY_CONFIG_URL" hockeyAppDeployPublicURLKeyList = "HOCKEYAPP_DEPLOY_PUBLIC_URL_LIST" hockeyAppDeployBuildURLKeyList = "HOCKEYAPP_DEPLOY_BUILD_URL_LIST" hockeyAppDeployConfigURLKeyList = "HOCKEYAPP_DEPLOY_CONFIG_URL_LIST" ) var configs ConfigsModel // ConfigsModel ... type ConfigsModel struct { ApkPath []string MappingPath string APIToken string AppID string Notes string NotesType string Notify string Status string Tags string CommitSHA string BuildServerURL string RepositoryURL string Mandatory string } func createConfigsModelFromEnvs() ConfigsModel { mandatory := os.Getenv("mandatory") if mandatory == "1" || mandatory == "true" { mandatory = "1" } else { mandatory = "0" } apkPath := []string{} for _, pth := range strings.Split(os.Getenv("apk_path"), "|") { if pth != "" { apkPath = append(apkPath, pth) } } return ConfigsModel{ ApkPath: apkPath, MappingPath: os.Getenv("mapping_path"), APIToken: os.Getenv("api_token"), AppID: os.Getenv("app_id"), Notes: os.Getenv("notes"), NotesType: os.Getenv("notes_type"), Notify: os.Getenv("notify"), Status: os.Getenv("status"), Tags: os.Getenv("tags"), CommitSHA: os.Getenv("commit_sha"), BuildServerURL: os.Getenv("build_server_url"), RepositoryURL: os.Getenv("repository_url"), Mandatory: mandatory, } } func (configs ConfigsModel) print() { fmt.Println() log.Infof("Configs:") log.Printf(" - ApkPath: %s", configs.ApkPath) log.Printf(" - MappingPath: %s", configs.MappingPath) log.Printf(" - APIToken: %s", configs.APIToken) log.Printf(" - AppID: %s", configs.AppID) log.Printf(" - Notes: %s", configs.Notes) log.Printf(" - NotesType: %s", configs.NotesType) log.Printf(" - Notify: %s", configs.Notify) log.Printf(" - Status: %s", configs.Status) log.Printf(" - Tags: %s", configs.Tags) log.Printf(" - CommitSHA: %s", configs.CommitSHA) log.Printf(" - BuildServerURL: %s", configs.BuildServerURL) log.Printf(" - RepositoryURL: %s", configs.RepositoryURL) log.Printf(" - Mandatory: %s", configs.Mandatory) } func (configs ConfigsModel) validate() error { if len(configs.ApkPath) == 0 { return errors.New("no ApkPath parameter specified") } for _, apkPath := range configs.ApkPath { if exist, err := pathutil.IsPathExists(apkPath); err != nil { return fmt.Errorf("failed to check if ApkPath exist at: %s, error: %v", apkPath, err) } else if !exist { return fmt.Errorf("apkPath not exist at: %s", apkPath) } } required := map[string]string{ "APIToken": configs.APIToken, "NotesType": configs.NotesType, "Notify": configs.Notify, "Status": configs.Status, "Mandatory": configs.Mandatory, } for k, v := range required { if v == "" { return fmt.Errorf("no %s parameter specified", k) } } if configs.MappingPath != "" { if exist, err := pathutil.IsPathExists(configs.MappingPath); err != nil { return fmt.Errorf("failed to check if MappingPath exist at: %s, error: %v", configs.MappingPath, err) } else if !exist { return fmt.Errorf("mappingPath not exist at: %s", configs.MappingPath) } } return nil } // ResponseModel ... type ResponseModel struct { ConfigURL string `json:"config_url"` PublicURL string `json:"public_url"` BuildURL string `json:"build_url"` } func exportEnvironmentWithEnvman(keyStr, valueStr string) error { cmd := command.New("envman", "add", "--key", keyStr) cmd.SetStdin(strings.NewReader(valueStr)) return cmd.Run() } func createRequest(url string, fields, files map[string]string) (*http.Request, error) { var b bytes.Buffer w := multipart.NewWriter(&b) for key, value := range fields { if err := w.WriteField(key, value); err != nil { return nil, err } } for key, file := range files { f, err := os.Open(file) if err != nil { return nil, err } fw, err := w.CreateFormFile(key, file) if err != nil { return nil, err } if _, err = io.Copy(fw, f); err != nil { return nil, err } } if err := w.Close(); err != nil { return nil, err } req, err := http.NewRequest("POST", url, &b) if err != nil { return nil, err } req.Header.Set("Content-Type", w.FormDataContentType()) return req, nil } func deploy(apkPath string) (ResponseModel, error) { fmt.Println() log.Infof("Performing request") requestURL := "https://rink.hockeyapp.net/api/2/apps/upload" if configs.AppID != "" { requestURL = fmt.Sprintf("https://rink.hockeyapp.net/api/2/apps/%s/app_versions/upload", configs.AppID) } fields := map[string]string{ "notes": configs.Notes, "notes_type": configs.NotesType, "notify": configs.Notify, "status": configs.Status, "mandatory": configs.Mandatory, "tags": configs.Tags, "commit_sha": configs.CommitSHA, "build_server_url": configs.BuildServerURL, "repository_url": configs.RepositoryURL, } files := map[string]string{ "ipa": apkPath, } if configs.MappingPath != "" { files["dsym"] = configs.MappingPath } request, err := createRequest(requestURL, fields, files) if err != nil { return ResponseModel{}, fmt.Errorf("Failed to create request, error: %v", err) } request.Header.Add("X-HockeyAppToken", configs.APIToken) client := http.Client{} response, err := client.Do(request) if err != nil { return ResponseModel{}, fmt.Errorf("Performing request failed, error: %v", err) } defer func() { if err := response.Body.Close(); err != nil { log.Warnf("Failed to close response body, error: %v", err) } }() contents, readErr := ioutil.ReadAll(response.Body) if readErr != nil { return ResponseModel{}, fmt.Errorf("Failed to read response body, error: %v", readErr) } else if response.StatusCode < 200 || response.StatusCode > 300 { return ResponseModel{}, fmt.Errorf("Performing request failed, status code: %d", response.StatusCode) } log.Donef("Request succeeded") fmt.Println() log.Infof("Response:") log.Printf(" status code: %d", response.StatusCode) log.Printf(" body: %s", contents) responseModel := ResponseModel{} if err := json.Unmarshal([]byte(contents), &responseModel); err != nil { return ResponseModel{}, fmt.Errorf("Failed to parse response body, error: %v", err) } return responseModel, nil } func contains(list []string, item string) bool { for _, i := range list { if i == item { return true } } return false } func main() { configs = createConfigsModelFromEnvs() configs.print() if err := configs.validate(); err != nil { log.Errorf("Issue with input: %s", err) os.Exit(1) } configURLs := []string{} buildURLs := []string{} publicURLs := []string{} for _, apkPath := range configs.ApkPath { responseModel, err := deploy(apkPath) if err != nil { log.Errorf("Hockeyapp deploy failed: %v", err) if err := exportEnvironmentWithEnvman(hockeyAppDeployStatusKey, hockeyAppDeployStatusFailed); err != nil { log.Warnf("Failed to export %s, error: %v", hockeyAppDeployStatusKey, err) } os.Exit(1) } if responseModel.ConfigURL != "" && !contains(configURLs, responseModel.ConfigURL) { configURLs = append(configURLs, responseModel.ConfigURL) log.Donef("Config URL: %s", responseModel.ConfigURL) } if responseModel.BuildURL != "" && !contains(buildURLs, responseModel.BuildURL) { buildURLs = append(buildURLs, responseModel.BuildURL) log.Donef("Build (direct download) URL: %s", responseModel.BuildURL) } if responseModel.PublicURL != "" && !contains(publicURLs, responseModel.PublicURL) { publicURLs = append(publicURLs, responseModel.PublicURL) log.Donef("Public URL: %s", responseModel.PublicURL) } } outputs := map[string]string{ hockeyAppDeployStatusKey: hockeyAppDeployStatusSuccess, hockeyAppDeployConfigURLKeyList: strings.Join(configURLs, "|"), hockeyAppDeployBuildURLKeyList: strings.Join(buildURLs, "|"), hockeyAppDeployPublicURLKeyList: strings.Join(publicURLs, "|"), } if len(configURLs) > 0 { outputs[hockeyAppDeployConfigURLKey] = configURLs[len(configURLs)-1] } if len(buildURLs) > 0 { outputs[hockeyAppDeployBuildURLKey] = buildURLs[len(buildURLs)-1] } if len(publicURLs) > 0 { outputs[hockeyAppDeployPublicURLKey] = publicURLs[len(publicURLs)-1] } for k, v := range outputs { if err := exportEnvironmentWithEnvman(k, v); err != nil { log.Warnf("Failed to export %s, error: %v", k, err) } } }
[ "\"mandatory\"", "\"apk_path\"", "\"mapping_path\"", "\"api_token\"", "\"app_id\"", "\"notes\"", "\"notes_type\"", "\"notify\"", "\"status\"", "\"tags\"", "\"commit_sha\"", "\"build_server_url\"", "\"repository_url\"" ]
[]
[ "mapping_path", "apk_path", "mandatory", "notify", "api_token", "status", "tags", "build_server_url", "commit_sha", "app_id", "notes_type", "repository_url", "notes" ]
[]
["mapping_path", "apk_path", "mandatory", "notify", "api_token", "status", "tags", "build_server_url", "commit_sha", "app_id", "notes_type", "repository_url", "notes"]
go
13
0
main.go
package main import ( "log" "net/http" "os" "strconv" "google.golang.org/appengine" "github.com/gin-gonic/gin" _ "github.com/go-sql-driver/mysql" "database/sql" "math/rand" "crypto/md5" "encoding/hex" "time" // _ "github.com/heroku/x/hmetrics/onload" ) type User struct{ Name string `json:"name, omitempty"` Mobile string `json:"mobile"` Role string `json:"role, omitempty"` } type Product struct{ Id int `json:"id", omitempty` Name string `json:"name"` Mrp int `json:"mrp"` Price int `json:"price"` RetailerPrice int `json:"retailer_price, omitempty"` Brand string `json:"brand"` Category string `json:"category"` Description string `json:"description, omitempty"` Image string `json:"image, omitempty"` } func main() { port := os.Getenv("PORT") if port == "" { log.Fatal("$PORT must be set") } router := gin.New() router.Use(gin.Logger()) // router.LoadHTMLGlob("templates/*.tmpl.html") //router.Static("/static", "static") // router.GET("/", func(c *gin.Context) { // c.HTML(http.StatusOK, "index.tmpl.html", nil) // }) db, err := sql.Open("mysql", os.Getenv("DATABASE_URL")) defer db.Close() if err != nil { panic(err) } router.GET("/products", func(c *gin.Context){ var ( product Product products []Product ) rows, err := db.Query("select id, name, mrp, price, retailer_price, brand, category, description, image from product") if err != nil { c.JSON(http.StatusBadRequest, gin.H{"success": false, "code": 4002, "message": err.Error()}) return } for rows.Next() { err = rows.Scan(&product.Id, &product.Name, &product.Mrp, &product.Price, &product.RetailerPrice, &product.Brand, &product.Category, &product.Description, &product.Image) if err != nil { continue } products = append(products, product) } defer rows.Close() c.JSON(http.StatusOK, gin.H{ "success": true, "code": 2000, "products": products, }) }) router.POST("/product", func(c *gin.Context){ var product Product if err := c.ShouldBindJSON(&product); err == nil { stmt, err := db.Prepare("insert into product(name, mrp, price, retailer_price, brand, category, description, image) values(?, ?, ?, ?, ?, ?, ?, ?)") _, err = stmt.Exec(product.Name, product.Mrp, product.Price, product.RetailerPrice, product.Brand, product.Category, product.Description, product.Image) if(err != nil){ c.JSON(http.StatusBadRequest, gin.H{"success": false, "code": 4001, "message": err.Error()}) return } c.JSON(http.StatusOK, gin.H{"success": true, "code": 2000, "message": "Product added successfully!"}) }else{ c.JSON(http.StatusBadRequest, gin.H{"success": false, "code": 4001, "message": err.Error()}) } }) router.POST("/user", func(c *gin.Context){ var ( user User isLogin bool ) queryIsLogin := c.Query("is_login") if(queryIsLogin == ""){ isLogin = false }else{ isLogin, _= strconv.ParseBool(queryIsLogin) } if err := c.ShouldBindJSON(&user); err == nil { tx, er := db.Begin() if er != nil { c.JSON(http.StatusBadRequest, gin.H{"success": false, "code": 4000, "message": er.Error()}) return } defer tx.Rollback() var roleId int row := tx.QueryRow("select id from role where type = ?", getRole(user.Role)) er = row.Scan(&roleId) _, er = tx.Query("insert into user(name, mobile, is_active, role) values(?, ?, ?, ?) on duplicate key update name = ?, role = ?, is_active=true", user.Name, user.Mobile, isLogin, roleId, user.Name, roleId) tx.Commit() if er != nil{ c.JSON(http.StatusBadRequest, gin.H{"success": false, "code": 4000, "message": er.Error()}) return } c.JSON(http.StatusOK, gin.H{"success": true, "code": 2000, "message": "User added successfully!"}) } else { c.JSON(http.StatusBadRequest, gin.H{"success": false, "code": 4000, "message": err.Error()}) } }) router.POST("/link", func(c *gin.Context){ var ( mobile = c.PostForm("mobile") name = c.PostForm("name") ) stmt, err := db.Prepare("insert into code(id, code) values(?, ?)") defer stmt.Close() if err != nil { log.Print(err) c.JSON(http.StatusBadRequest, gin.H{"success": false, "code": 4003, "message": "Error sending sms"}) return } var random = 1000 + rand.Intn(8999) hash := getMD5Hash(mobile + time.Now().String()) _, err = stmt.Exec(hash, random) if err != nil{ c.JSON(http.StatusBadRequest, gin.H{"success": false, "code": 4003, "message": "Error sending sms"}) return } go insertLoginUser(db, mobile, name) log.Print("Link SMS Code " + hash + ": " + strconv.Itoa(random)) c.JSON(http.StatusOK, gin.H{"success": true, "code": 2000, "message": "SMS sent successfully!", "sms_code": random, "ref_id": hash}) }) router.POST("/verify", func(c *gin.Context){ var ( hash = c.PostForm("ref_id") otpCode = c.PostForm("otp") mobile = c.PostForm("mobile") ) var code int row := db.QueryRow("select code from code where id = ?", hash) err = row.Scan(&code) if err != nil{ c.JSON(http.StatusBadRequest, gin.H{"success": false, "code": 4004, "message": err.Error()}) return } if otp, _ := strconv.Atoi(otpCode); otp == code { go activateUser(db, mobile) c.JSON(http.StatusOK, gin.H{"success": true, "code": 2000, "message": "User verified successfully!"}) } else { c.JSON(http.StatusBadRequest, gin.H{"success": false, "code": 4005, "message": "Invalid otp code"}) } }) router.POST("/login", func(c *gin.Context){ var mobile = c.PostForm("mobile") var random = 1000 + rand.Intn(8999) token := getMD5Hash(strconv.Itoa(random) + mobile) tx, err := db.Begin() if err != nil { c.JSON(http.StatusBadRequest, gin.H{"success": false, "code": 4000, "message": err.Error()}) return } defer tx.Rollback() var ( user User userId int ) user.Mobile = mobile row := tx.QueryRow("select u.id, r.type, u.name from user u inner join role r on u.role = r.id where u.mobile = ?", mobile) err = row.Scan(&userId, &user.Role, &user.Name) if err == nil{ _, err = tx.Exec("insert into usermeta(access_token, user_id, created_at) values(?, ?, ?) on duplicate key update access_token=?", token, userId, time.Now(), token) if err != nil{ c.JSON(http.StatusBadRequest, gin.H{"success": false, "code": 4006, "message": "Error Logging In"}) return } tx.Commit() c.Header("X-Nothing", token) c.JSON(http.StatusOK, gin.H{"success": true,"code": 2000,"message": "Logged In successfully!","user": user}) return } c.JSON(http.StatusBadRequest, gin.H{"success": false, "code": 4006, "message": "Error Logging In"}) }) router.Run(":" + port) appengine.Main() } func insertLoginUser(db *sql.DB, mobile string, name string){ if _, err := db.Exec("insert into user(name, mobile) values(?, ?) on duplicate key update name = ?", name, mobile, name); err != nil{ log.Print("User could not be inserted: " + mobile) } } func activateUser(db *sql.DB, mobile string){ if _, err := db.Exec("update user set is_active=true where mobile=?", mobile); err != nil{ log.Print("User could not be activated: " + mobile) } } func getMD5Hash(text string) string { hasher := md5.New() hasher.Write([]byte(text)) return hex.EncodeToString(hasher.Sum(nil)) } func getRole(role string) string{ if role != ""{ return role } return "CUSTOMER" }
[ "\"PORT\"", "\"DATABASE_URL\"" ]
[]
[ "PORT", "DATABASE_URL" ]
[]
["PORT", "DATABASE_URL"]
go
2
0
src/third_party/angle/third_party/glmark2/src/waflib/Tools/tex.py
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file import os,re from waflib import Utils,Task,Errors,Logs,Node from waflib.TaskGen import feature,before_method re_bibunit=re.compile(r'\\(?P<type>putbib)\[(?P<file>[^\[\]]*)\]',re.M) def bibunitscan(self): node=self.inputs[0] nodes=[] if not node:return nodes code=node.read() for match in re_bibunit.finditer(code): path=match.group('file') if path: for k in('','.bib'): Logs.debug('tex: trying %s%s',path,k) fi=node.parent.find_resource(path+k) if fi: nodes.append(fi) else: Logs.debug('tex: could not find %s',path) Logs.debug('tex: found the following bibunit files: %s',nodes) return nodes exts_deps_tex=['','.ltx','.tex','.bib','.pdf','.png','.eps','.ps','.sty'] exts_tex=['.ltx','.tex'] re_tex=re.compile(r'\\(?P<type>usepackage|RequirePackage|include|bibliography([^\[\]{}]*)|putbib|includegraphics|input|import|bringin|lstinputlisting)(\[[^\[\]]*\])?{(?P<file>[^{}]*)}',re.M) g_bibtex_re=re.compile('bibdata',re.M) g_glossaries_re=re.compile('\\@newglossary',re.M) class tex(Task.Task): bibtex_fun,_=Task.compile_fun('${BIBTEX} ${BIBTEXFLAGS} ${SRCFILE}',shell=False) bibtex_fun.__doc__=""" Execute the program **bibtex** """ makeindex_fun,_=Task.compile_fun('${MAKEINDEX} ${MAKEINDEXFLAGS} ${SRCFILE}',shell=False) makeindex_fun.__doc__=""" Execute the program **makeindex** """ makeglossaries_fun,_=Task.compile_fun('${MAKEGLOSSARIES} ${SRCFILE}',shell=False) makeglossaries_fun.__doc__=""" Execute the program **makeglossaries** """ def exec_command(self,cmd,**kw): kw['stdout']=kw['stderr']=None return super(tex,self).exec_command(cmd,**kw) def scan_aux(self,node): nodes=[node] re_aux=re.compile(r'\\@input{(?P<file>[^{}]*)}',re.M) def parse_node(node): code=node.read() for match in re_aux.finditer(code): path=match.group('file') found=node.parent.find_or_declare(path) if found and found not in nodes: Logs.debug('tex: found aux node %r',found) nodes.append(found) parse_node(found) parse_node(node) return nodes def scan(self): node=self.inputs[0] nodes=[] names=[] seen=[] if not node:return(nodes,names) def parse_node(node): if node in seen: return seen.append(node) code=node.read() global re_tex for match in re_tex.finditer(code): multibib=match.group('type') if multibib and multibib.startswith('bibliography'): multibib=multibib[len('bibliography'):] if multibib.startswith('style'): continue else: multibib=None for path in match.group('file').split(','): if path: add_name=True found=None for k in exts_deps_tex: for up in self.texinputs_nodes: Logs.debug('tex: trying %s%s',path,k) found=up.find_resource(path+k) if found: break for tsk in self.generator.tasks: if not found or found in tsk.outputs: break else: nodes.append(found) add_name=False for ext in exts_tex: if found.name.endswith(ext): parse_node(found) break if found and multibib and found.name.endswith('.bib'): try: self.multibibs.append(found) except AttributeError: self.multibibs=[found] if add_name: names.append(path) parse_node(node) for x in nodes: x.parent.get_bld().mkdir() Logs.debug("tex: found the following : %s and names %s",nodes,names) return(nodes,names) def check_status(self,msg,retcode): if retcode!=0: raise Errors.WafError('%r command exit status %r'%(msg,retcode)) def bibfile(self): for aux_node in self.aux_nodes: try: ct=aux_node.read() except EnvironmentError: Logs.error('Error reading %s: %r',aux_node.abspath()) continue if g_bibtex_re.findall(ct): Logs.info('calling bibtex') self.env.env={} self.env.env.update(os.environ) self.env.env.update({'BIBINPUTS':self.texinputs(),'BSTINPUTS':self.texinputs()}) self.env.SRCFILE=aux_node.name[:-4] self.check_status('error when calling bibtex',self.bibtex_fun()) for node in getattr(self,'multibibs',[]): self.env.env={} self.env.env.update(os.environ) self.env.env.update({'BIBINPUTS':self.texinputs(),'BSTINPUTS':self.texinputs()}) self.env.SRCFILE=node.name[:-4] self.check_status('error when calling bibtex',self.bibtex_fun()) def bibunits(self): try: bibunits=bibunitscan(self) except OSError: Logs.error('error bibunitscan') else: if bibunits: fn=['bu'+str(i)for i in range(1,len(bibunits)+1)] if fn: Logs.info('calling bibtex on bibunits') for f in fn: self.env.env={'BIBINPUTS':self.texinputs(),'BSTINPUTS':self.texinputs()} self.env.SRCFILE=f self.check_status('error when calling bibtex',self.bibtex_fun()) def makeindex(self): self.idx_node=self.inputs[0].change_ext('.idx') try: idx_path=self.idx_node.abspath() os.stat(idx_path) except OSError: Logs.info('index file %s absent, not calling makeindex',idx_path) else: Logs.info('calling makeindex') self.env.SRCFILE=self.idx_node.name self.env.env={} self.check_status('error when calling makeindex %s'%idx_path,self.makeindex_fun()) def bibtopic(self): p=self.inputs[0].parent.get_bld() if os.path.exists(os.path.join(p.abspath(),'btaux.aux')): self.aux_nodes+=p.ant_glob('*[0-9].aux') def makeglossaries(self): src_file=self.inputs[0].abspath() base_file=os.path.basename(src_file) base,_=os.path.splitext(base_file) for aux_node in self.aux_nodes: try: ct=aux_node.read() except EnvironmentError: Logs.error('Error reading %s: %r',aux_node.abspath()) continue if g_glossaries_re.findall(ct): if not self.env.MAKEGLOSSARIES: raise Errors.WafError("The program 'makeglossaries' is missing!") Logs.warn('calling makeglossaries') self.env.SRCFILE=base self.check_status('error when calling makeglossaries %s'%base,self.makeglossaries_fun()) return def texinputs(self): return os.pathsep.join([k.abspath()for k in self.texinputs_nodes])+os.pathsep def run(self): env=self.env if not env.PROMPT_LATEX: env.append_value('LATEXFLAGS','-interaction=batchmode') env.append_value('PDFLATEXFLAGS','-interaction=batchmode') env.append_value('XELATEXFLAGS','-interaction=batchmode') self.cwd=self.inputs[0].parent.get_bld() Logs.info('first pass on %s',self.__class__.__name__) cur_hash=self.hash_aux_nodes() self.call_latex() self.hash_aux_nodes() self.bibtopic() self.bibfile() self.bibunits() self.makeindex() self.makeglossaries() for i in range(10): prev_hash=cur_hash cur_hash=self.hash_aux_nodes() if not cur_hash: Logs.error('No aux.h to process') if cur_hash and cur_hash==prev_hash: break Logs.info('calling %s',self.__class__.__name__) self.call_latex() def hash_aux_nodes(self): try: self.aux_nodes except AttributeError: try: self.aux_nodes=self.scan_aux(self.inputs[0].change_ext('.aux')) except IOError: return None return Utils.h_list([Utils.h_file(x.abspath())for x in self.aux_nodes]) def call_latex(self): self.env.env={} self.env.env.update(os.environ) self.env.env.update({'TEXINPUTS':self.texinputs()}) self.env.SRCFILE=self.inputs[0].abspath() self.check_status('error when calling latex',self.texfun()) class latex(tex): texfun,vars=Task.compile_fun('${LATEX} ${LATEXFLAGS} ${SRCFILE}',shell=False) class pdflatex(tex): texfun,vars=Task.compile_fun('${PDFLATEX} ${PDFLATEXFLAGS} ${SRCFILE}',shell=False) class xelatex(tex): texfun,vars=Task.compile_fun('${XELATEX} ${XELATEXFLAGS} ${SRCFILE}',shell=False) class dvips(Task.Task): run_str='${DVIPS} ${DVIPSFLAGS} ${SRC} -o ${TGT}' color='BLUE' after=['latex','pdflatex','xelatex'] class dvipdf(Task.Task): run_str='${DVIPDF} ${DVIPDFFLAGS} ${SRC} ${TGT}' color='BLUE' after=['latex','pdflatex','xelatex'] class pdf2ps(Task.Task): run_str='${PDF2PS} ${PDF2PSFLAGS} ${SRC} ${TGT}' color='BLUE' after=['latex','pdflatex','xelatex'] @feature('tex') @before_method('process_source') def apply_tex(self): if not getattr(self,'type',None)in('latex','pdflatex','xelatex'): self.type='pdflatex' outs=Utils.to_list(getattr(self,'outs',[])) self.env.PROMPT_LATEX=getattr(self,'prompt',1) deps_lst=[] if getattr(self,'deps',None): deps=self.to_list(self.deps) for dep in deps: if isinstance(dep,str): n=self.path.find_resource(dep) if not n: self.bld.fatal('Could not find %r for %r'%(dep,self)) if not n in deps_lst: deps_lst.append(n) elif isinstance(dep,Node.Node): deps_lst.append(dep) for node in self.to_nodes(self.source): if self.type=='latex': task=self.create_task('latex',node,node.change_ext('.dvi')) elif self.type=='pdflatex': task=self.create_task('pdflatex',node,node.change_ext('.pdf')) elif self.type=='xelatex': task=self.create_task('xelatex',node,node.change_ext('.pdf')) task.env=self.env if deps_lst: for n in deps_lst: if not n in task.dep_nodes: task.dep_nodes.append(n) if hasattr(self,'texinputs_nodes'): task.texinputs_nodes=self.texinputs_nodes else: task.texinputs_nodes=[node.parent,node.parent.get_bld(),self.path,self.path.get_bld()] lst=os.environ.get('TEXINPUTS','') if self.env.TEXINPUTS: lst+=os.pathsep+self.env.TEXINPUTS if lst: lst=lst.split(os.pathsep) for x in lst: if x: if os.path.isabs(x): p=self.bld.root.find_node(x) if p: task.texinputs_nodes.append(p) else: Logs.error('Invalid TEXINPUTS folder %s',x) else: Logs.error('Cannot resolve relative paths in TEXINPUTS %s',x) if self.type=='latex': if'ps'in outs: tsk=self.create_task('dvips',task.outputs,node.change_ext('.ps')) tsk.env.env=dict(os.environ) if'pdf'in outs: tsk=self.create_task('dvipdf',task.outputs,node.change_ext('.pdf')) tsk.env.env=dict(os.environ) elif self.type=='pdflatex': if'ps'in outs: self.create_task('pdf2ps',task.outputs,node.change_ext('.ps')) self.source=[] def configure(self): v=self.env for p in'tex latex pdflatex xelatex bibtex dvips dvipdf ps2pdf makeindex pdf2ps makeglossaries'.split(): try: self.find_program(p,var=p.upper()) except self.errors.ConfigurationError: pass v.DVIPSFLAGS='-Ppdf'
[]
[]
[ "TEXINPUTS" ]
[]
["TEXINPUTS"]
python
1
0
utils/subpath.go
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. // See LICENSE.txt for license information. package utils import ( "crypto/sha256" "encoding/base64" "fmt" "io/ioutil" "net/url" "os" "path" "path/filepath" "regexp" "strings" "github.com/pkg/errors" "github.com/cjdelisle/matterfoss-server/v5/model" "github.com/cjdelisle/matterfoss-server/v5/shared/mlog" "github.com/cjdelisle/matterfoss-server/v5/utils/fileutils" ) // getSubpathScript renders the inline script that defines window.publicPath to change how webpack loads assets. func getSubpathScript(subpath string) string { if subpath == "" { subpath = "/" } newPath := path.Join(subpath, "static") + "/" return fmt.Sprintf("window.publicPath='%s'", newPath) } // GetSubpathScriptHash computes the script-src addition required for the subpath script to bypass CSP protections. func GetSubpathScriptHash(subpath string) string { // No hash is required for the default subpath. if subpath == "" || subpath == "/" { return "" } scriptHash := sha256.Sum256([]byte(getSubpathScript(subpath))) return fmt.Sprintf(" 'sha256-%s'", base64.StdEncoding.EncodeToString(scriptHash[:])) } // UpdateAssetsSubpathInDir rewrites assets in the given directory to assume the application is // hosted at the given subpath instead of at the root. No changes are written unless necessary. func UpdateAssetsSubpathInDir(subpath, directory string) error { if subpath == "" { subpath = "/" } staticDir, found := fileutils.FindDir(directory) if !found { return errors.New("failed to find client dir") } staticDir, err := filepath.EvalSymlinks(staticDir) if err != nil { return errors.Wrapf(err, "failed to resolve symlinks to %s", staticDir) } rootHTMLPath := filepath.Join(staticDir, "root.html") oldRootHTML, err := ioutil.ReadFile(rootHTMLPath) if err != nil { return errors.Wrap(err, "failed to open root.html") } oldSubpath := "/" // Determine if a previous subpath had already been rewritten into the assets. reWebpackPublicPathScript := regexp.MustCompile("window.publicPath='([^']+/)static/'") alreadyRewritten := false if matches := reWebpackPublicPathScript.FindStringSubmatch(string(oldRootHTML)); matches != nil { oldSubpath = matches[1] alreadyRewritten = true } pathToReplace := path.Join(oldSubpath, "static") + "/" newPath := path.Join(subpath, "static") + "/" mlog.Debug("Rewriting static assets", mlog.String("from_subpath", oldSubpath), mlog.String("to_subpath", subpath)) newRootHTML := string(oldRootHTML) reCSP := regexp.MustCompile(`<meta http-equiv="Content-Security-Policy" content="script-src 'self' cdn.rudderlabs.com/ js.stripe.com/v3([^"]*)">`) if results := reCSP.FindAllString(newRootHTML, -1); len(results) == 0 { return fmt.Errorf("failed to find 'Content-Security-Policy' meta tag to rewrite") } newRootHTML = reCSP.ReplaceAllLiteralString(newRootHTML, fmt.Sprintf( `<meta http-equiv="Content-Security-Policy" content="script-src 'self' cdn.rudderlabs.com/ js.stripe.com/v3%s">`, GetSubpathScriptHash(subpath), )) // Rewrite the root.html references to `/static/*` to include the given subpath. // This potentially includes a previously injected inline script that needs to // be updated (and isn't covered by the cases above). newRootHTML = strings.Replace(newRootHTML, pathToReplace, newPath, -1) if alreadyRewritten && subpath == "/" { // Remove the injected script since no longer required. Note that the rewrite above // will have affected the script, so look for the new subpath, not the old one. oldScript := getSubpathScript(subpath) newRootHTML = strings.Replace(newRootHTML, fmt.Sprintf("</style><script>%s</script>", oldScript), "</style>", 1) } else if !alreadyRewritten && subpath != "/" { // Otherwise, inject the script to define `window.publicPath`. script := getSubpathScript(subpath) newRootHTML = strings.Replace(newRootHTML, "</style>", fmt.Sprintf("</style><script>%s</script>", script), 1) } // Write out the updated root.html. if err = ioutil.WriteFile(rootHTMLPath, []byte(newRootHTML), 0); err != nil { return errors.Wrapf(err, "failed to update root.html with subpath %s", subpath) } // Rewrite the manifest.json and *.css references to `/static/*` (or a previously rewritten subpath). err = filepath.Walk(staticDir, func(walkPath string, info os.FileInfo, err error) error { if filepath.Base(walkPath) == "manifest.json" || filepath.Ext(walkPath) == ".css" { old, err := ioutil.ReadFile(walkPath) if err != nil { return errors.Wrapf(err, "failed to open %s", walkPath) } new := strings.Replace(string(old), pathToReplace, newPath, -1) if err = ioutil.WriteFile(walkPath, []byte(new), 0); err != nil { return errors.Wrapf(err, "failed to update %s with subpath %s", walkPath, subpath) } } return nil }) if err != nil { return errors.Wrapf(err, "error walking %s", staticDir) } return nil } // UpdateAssetsSubpath rewrites assets in the /client directory to assume the application is hosted // at the given subpath instead of at the root. No changes are written unless necessary. func UpdateAssetsSubpath(subpath string) error { return UpdateAssetsSubpathInDir(subpath, model.CLIENT_DIR) } // UpdateAssetsSubpathFromConfig uses UpdateAssetsSubpath and any path defined in the SiteURL. func UpdateAssetsSubpathFromConfig(config *model.Config) error { // Don't rewrite in development environments, since webpack in developer mode constantly // updates the assets and must be configured separately. if model.BuildNumber == "dev" { mlog.Debug("Skipping update to assets subpath since dev build") return nil } // Similarly, don't rewrite during a CI build, when the assets may not even be present. if os.Getenv("IS_CI") == "true" { mlog.Debug("Skipping update to assets subpath since CI build") return nil } subpath, err := GetSubpathFromConfig(config) if err != nil { return err } return UpdateAssetsSubpath(subpath) } func GetSubpathFromConfig(config *model.Config) (string, error) { if config == nil { return "", errors.New("no config provided") } else if config.ServiceSettings.SiteURL == nil { return "/", nil } u, err := url.Parse(*config.ServiceSettings.SiteURL) if err != nil { return "", errors.Wrap(err, "failed to parse SiteURL from config") } if u.Path == "" { return "/", nil } return path.Clean(u.Path), nil }
[ "\"IS_CI\"" ]
[]
[ "IS_CI" ]
[]
["IS_CI"]
go
1
0
tests/basic_checks/platform_info.py
def get_module_version_when_available(module_name): try: import importlib mod = importlib.import_module(module_name) return mod.__version__ except: return "NOT_INSTALLED" def getVersions(): import os, platform lVersionDict = {}; lVersionDict["system_platform"] = platform.platform(); lVersionDict["system_uname"] = platform.uname(); lVersionDict["system_processor"] = platform.processor(); lVersionDict["python_implementation"] = platform.python_implementation(); lVersionDict["python_version"] = platform.python_version(); lModules = ["sklearn", "pandas", "numpy" , "scipy" , "matplotlib", "pydot", "sqlalchemy" , "xgboost" , "keras", "pip" , "setuptools", "Cython", "dill" , "pathos"] for module_name in lModules: lVersionDict[module_name + "_version"] = get_module_version_when_available(module_name) # print([(k, lVersionDict[k]) for k in sorted(lVersionDict)]); return lVersionDict; lDict = getVersions() for k in sorted(lDict.keys()): print("PYAF_SYSTEM_DEPENDENT_VERSION_INFO" , (k , lDict[k])) import os lDict = os.environ for k in sorted(lDict.keys()): print("PYAF_SYSTEM_DEPENDENT_ENVIRONMENT_VARIABLE" , (k , lDict[k])) import sys assert sys.version_info >= (3, 5)
[]
[]
[]
[]
[]
python
0
0
server/run.py
from flask import Flask, jsonify, request from flask_jwt import JWT, jwt_required, current_identity from werkzeug.security import safe_str_cmp from flask_cors import CORS from scripts.restMethods import * from scripts.flask_httpauth import * import os import sys import json import bcrypt import datetime JWT_EXPIRATION_DELTA = 1 app = Flask(__name__) app.config['JWT_EXPIRATION_DELTA'] = datetime.timedelta(days=5) CORS(app) auth = HTTPBasicAuth() class User(object): def __init__(self, id, username, password, firstName, lastName, role, email): self.id = id self.username = username self.password = password self.firstName = firstName self.lastName = lastName self.role = role self.email = email def __str__(self): return "User(id='%s')" % self.id def authenticate(username, password): # Get the user from the database to confirm his/her password user = sendGetRequest(["*"], ["users"], {"user_username":username}) # convert json reponse from string to json object rawJson = json.loads(user.response[0])[0] # check if the hashed password in the database equals the password entered by the user if user and bcrypt.checkpw(password.encode('utf-8'), rawJson["user_password"].encode('utf-8')): return User(rawJson["user_id"], rawJson["user_username"], rawJson["user_password"], rawJson["user_firstName"], rawJson["user_role"], rawJson["user_lastName"], rawJson["user_email"]) def identity(payload): user = sendGetRequest(["*"], ["users"], {"user_id":str(payload['identity'])}) return user.response[0] app.debug = True app.config['SECRET_KEY'] = 'super-secret' jwt = JWT(app, authenticate, identity) ''' GET - BROWSE ''' #add a new user @app.route('/users/hasUsers', methods=['GET']) def returnhasUsers(): try: return hasUsers() except: return error() #returns all users @app.route('/users', methods=['GET']) @jwt_required() def returnUsers(): try: return sendGetRequest(["*"], ["users"]) except: return error() #returns all products @app.route('/products', methods=['GET']) def returnProducts(): try: return sendGetRequest(["*"], ["products p", "productCategories c"], andConditions=["p.category_id=c.category_id"], andOnFirst=False) except: return error() #returns all feeds @app.route('/feeds', methods=['GET']) def returnFeeds(): try: return sendGetRequest(["*"], ["feeds f", "feedTypes t"], andConditions=["f.feed_type = t.type_id"], andOnFirst=False) except: return error() #returns all adversaries @app.route('/adversaries', methods=['GET']) def returnAdversaries(): try: return sendGetRequest(["*"], ["adversaries"]) except: return error() #returns all assets @app.route('/assets', methods=['GET']) def returnAssets(): try: return sendGetRequest(["*"], ["assets"]) except: return error() #returns all attack_types @app.route('/attack_types', methods=['GET']) def returnAttackTypes(): try: return sendGetRequest(["*"], ["attack_types"]) except: return error() #returns all attack_vectors @app.route('/attack_vectors', methods=['GET']) def returnAttackVectors(): try: return sendGetRequest(["*"], ["attack_vectors"]) except: return error() #returns all vulnerabilities @app.route('/vulnerabilities', methods=['GET']) def returnVulnerabilities(): try: return sendGetRequest(["*"], ["vulnerabilities"]) except: return error() #returns all product categories @app.route('/productCategories', methods=['GET']) def returnCategories(): try: return sendGetRequest(["*"], ["productCategories"]) except: return error() #returns all feed types @app.route('/feedTypes', methods=['GET']) def getTypesofFeeds(): try: return sendGetRequest(["*"], ["feedTypes"]) except: return error() ''' GET - QUERY ''' #returns keywords for a product @app.route('/products/<int:id>/keywords', methods=['GET']) def returnProductKeywords(id): try: return sendGetRequest(["*"], ["keywords"], {"product_id":str(id)}) except: return error() #returns threats for a product @app.route('/products/<int:id>/threats', methods=['GET']) def returnProductThreats(id): try: return sendGetRequest(["*"], ["threats"], {"product_id":str(id)}) except: return error() #return all products affected by a threat @app.route('/threats/<int:id>/affected', methods=['GET']) def returnAffectedProducts(id): try: return sendGetRequest(["p1.*"], ["products p1", "threats t1", "threats t2"], {"t1.threat_id":str(id)}, ["t1.threat_link=t2.threat_link", "t2.product_id = p1.product_Id"]) except: return error() @app.route('/threats/<int:id>/entities', methods=['GET']) def returnThreatEntities(id): try: return getThreatEntities(id) except: return error() @app.route('/threats/<int:id>/adversaries', methods=['GET']) def returnThreatAdversaries(id): try: return getThreatClassifications(id, "adversaries", "adv") except: return error() @app.route('/threats/<int:id>/assets', methods=['GET']) def returnThreatAssets(id): try: return getThreatClassifications(id, "assets", "asset") except: return error() @app.route('/threats/<int:id>/attack_types', methods=['GET']) def returnThreatAttackTypes(id): try: return getThreatClassifications(id, "attack_types", "atktyp") except: return error() @app.route('/threats/<int:id>/attack_vectors', methods=['GET']) def returnThreatAttackVectors(id): try: return getThreatClassifications(id, "attack_vectors", "atkvtr") except: return error() @app.route('/threats/<int:id>/vulnerabilities', methods=['GET']) def returnThreatVulnerabilities(id): try: return getThreatClassifications(id, "vulnerabilities", "vuln") except: return error() #return saved threats for a user @app.route('/<int:id>/savedThreats', methods=['GET']) def returnSavedThreats(id): try: return sendGetRequest(["*"], ["threats t", "users_threats ut"], {"ut.user_id":str(id)}, ["t.threat_id=ut.threat_id"]) except: return error() #return saved products for a user @app.route('/<int:id>/savedProducts', methods=['GET']) def returnSavedProducts(id): try: return sendGetRequest(["*"], ["products p", "users_products up"], {"up.user_id":str(id)}, ["p.product_id=up.product_id"]) except: return error() ''' GET - READ ''' #return one user @app.route('/users/<int:id>', methods=['GET']) def returnAUser(id): try: return sendGetRequest(["*"], ["users"], {"user_id":str(id)}) except: return error() #return one product @app.route('/products/<int:id>', methods=['GET']) def returnAProduct(id): try: return sendGetRequest(["*"], ["products p", "productCategories c"], {"product_id":str(id)}, ["p.category_id = c.category_id"]) except: return error() #return one feed @app.route('/feeds/<int:id>', methods=['GET']) def returnAFeed(id): try: return sendGetRequest(["*"], ["feeds"], {"feed_id":str(id)}) except: return error() #return one threat @app.route('/threats/<int:id>', methods=['GET']) def returnAThreat(id): try: return sendGetRequest(["*"], ["threats"], {"threat_id":str(id)}) except: return error() #return one adversary @app.route('/adversaries/<int:id>', methods=['GET']) def returnAnAdversary(id): try: return sendGetRequest(["*"], ["adversaries"], {"adv_id":str(id)}) except: return error() #return one product category @app.route('/productCategories/<int:id>', methods=['GET']) def returnACategory(id): try: return sendGetRequest(["*"], ["productCategories"], {"category_id":str(id)}) except: return error() #get a feed type @app.route('/feedTypes/<int:id>', methods=['GET']) def getOneFeedType(id): try: return sendGetRequest(["*"], ["feedTypes"], {"type_id":str(id)}) except: return error() ''' GET - COUNT ''' #return count of assets @app.route('/count/assets', methods=['GET']) def returnAssetsCount(): try: return getBasicCount("assets") except: return error() #return count of attack types @app.route('/count/attackTypes', methods=['GET']) def returnAttackCount(): try: return getBasicCount("attack_types") except: return error() #return count of adversaries @app.route('/count/adversaries', methods=['GET']) def returnAttackerCount(): try: return getBasicCount("adversaries") except: return error() #return count of attack vectors @app.route('/count/attackVectors', methods=['GET']) def returnVectorsCount(): try: return getBasicCount("attack_vectors") except: return error() #return count of vulnerabilities TODO @app.route('/count/vulnerabilities', methods=['GET']) def returnVulnerabilitiesCount(): try: return getBasicCount("vulnerabilities") except: return error() #return count of products @app.route('/count/products', methods=['GET']) def returnProductCount(): try: return sendUnsafeGetRequest("SELECT COUNT(*) from products p, productCategories c WHERE p.category_id=c.category_id") except: return error() #return count of threat feeds @app.route('/count/threatFeeds', methods=['GET']) def returnThreatFeedsCount(): try: return getBasicCount("feeds") except: return error() #return count of users @app.route('/count/users', methods=['GET']) def returnUserCount(): try: return getBasicCount("users") except: return error() #return response for count of threats for a feed given its id @app.route('/count/threatActivity/<int:id>', methods=['GET']) def returnFeedThreatCount(id): try: return sendUnsafeGetRequest("select count(*) from threats t where t.feed_id=id and ((t.threat_status not like \"-N/A-\") and (t.threat_status not like \"-New N/A-\"))") except: return error() #return all-time count of threat activity per each feed @app.route('/count/feeds', methods=['GET']) def returnFeedsCount(): try: return sendUnsafeGetRequest("SELECT feeds.feed_title, COUNT(threats.feed_id) AS feed_count FROM feeds LEFT JOIN threats on feeds.feed_id = threats.feed_id where (threats.threat_status not like \"-N/A-\") and (threats.threat_status not like \"-New N/A-\") GROUP BY feeds.feed_id, feeds.feed_title") except: return error() #return count of threat activity per each feed given a range @app.route('/count/feeds/<string:start>/<string:end>', methods=['GET']) def returnFeedsDatedCount(start, end): try: return sendUnsafeGetRequest("SELECT feeds.feed_title, COUNT(threats.feed_id) AS feed_count FROM feeds LEFT JOIN threats on feeds.feed_id = threats.feed_id where (threats.threat_status not like \"-N/A-\") and (threats.threat_status not like \"-New N/A-\") AND str_to_date(threats.threat_date, \'%m/%d/%Y\')" + "between str_to_date(\'" + start.replace("-","/") + "\', \'%m/%d/%Y\') and str_to_date(\'" + end.replace("-","/") + "\', \'%m/%d/%Y\') GROUP BY feeds.feed_id, feeds.feed_title") except: return error() #return counts of the assets, attack_types, adversaries, attack_vectors, and vulnerabilities tables. NOTE: this is hardcoded explicitly for the bar graph! @app.route('/count/bar-graph', methods=['GET']) def returnBarGraphData(): try: return sendUnsafeGetRequest("SELECT '" + db + "/assets' AS NAME, COUNT(*) AS NUM_ROWS FROM assets UNION " + "SELECT '" + db + "/attack_types' AS NAME, COUNT(*) AS NUM_ROWS FROM attack_types UNION " + "SELECT '" + db + "/adversaries' AS NAME, COUNT(*) AS NUM_ROWS FROM adversaries UNION " + "SELECT '" + db + "/attack_vectors' AS NAME, COUNT(*) AS NUM_ROWS FROM attack_vectors UNION " + "SELECT '" + db + "/vulnerabilities' AS NAME, COUNT(*) AS NUM_ROWS FROM vulnerabilities") except: return error() ''' GET - QUERY ''' #get all threats in the databse @app.route('/count/threats', methods=['GET']) def returnThreatCount(): try: return sendUnsafeGetRequest("SELECT threats.threat_date, COUNT(threats.threat_date) AS threat_count FROM threats where (threat_status not like \"-N/A-\") and (threat_status not like \"-New N/A-\") GROUP BY threats.threat_date ORDER BY threats.threat_id") except: return error() #get all threats on a certain date in the databse @app.route('/count/threats/<string:start>/<string:end>', methods=['GET']) def returnDateThreatCount(start, end): try: return sendUnsafeGetRequest("SELECT threats.threat_date, COUNT(threats.threat_date) AS threat_count FROM threats where (threats.threat_status not like \"-N/A-\") and (threats.threat_status not like \"-New N/A-\") AND str_to_date(threats.threat_date, \'%m/%d/%Y\') " + "between str_to_date(\'" + start.replace("-","/") + "\', \'%m/%d/%Y\') and str_to_date(\'" + end.replace("-","/") + "\', \'%m/%d/%Y\') GROUP BY threats.threat_date ORDER BY threats.threat_id") except: return error() ''' POST ''' #add a new user @app.route('/users/firstUser', methods=['POST']) def addFirstUser(): try: return postFirstUser() except: return error() #get a count of number of threats in the databse @app.route('/threats/pageCount/<string:type>', methods=['POST']) def returnThreatPageCount(type): try: return getThreatPageCount(type) except: return error() #get a paginated threat query @app.route('/threats/currentPage/<int:currentPage>/type/<string:type>', methods=['POST']) def returnThreatQuery(currentPage, type): try: return getThreatQuery(currentPage, type) except: return error() #get a full threat query @app.route('/threats/type/<string:type>', methods=['POST']) def returnFullThreatQuery(type): try: return getFullThreatQuery(type) except: return error() #get a threat query @app.route('/threats/advanced', methods=['POST']) def returnAdvancedThreatQuery(): try: return getAdvancedThreatQuery() except: return error() #add a new user @app.route('/users', methods=['POST']) @jwt_required() def addUser(): try: return postNewUser() except: return error() #add a new feed @app.route('/feeds', methods=['POST']) @jwt_required() def addFeed(): try: return sendPostRequest("feeds") except: return error() #add a new feed type @app.route('/feedTypes', methods=['POST']) @jwt_required() def addFeedType(): try: return sendPostRequest("feedTypes") except: return error() #add a new adversary @app.route('/adversaries', methods=['POST']) @jwt_required() def addAdversary(): try: return sendPostRequest("adversaries") except: return error() #add a new asset @app.route('/assets', methods=['POST']) @jwt_required() def addAsset(): try: return sendPostRequest("assets") except: return error() #add a new attack type @app.route('/attack_types', methods=['POST']) @jwt_required() def addAttackType(): try: return sendPostRequest("attack_types") except: return error() #add a new attack vector @app.route('/attack_vectors', methods=['POST']) @jwt_required() def addAttackVector(): try: return sendPostRequest("attack_vectors") except: return error() #add a new vulnerability @app.route('/vulnerabilities', methods=['POST']) @jwt_required() def addVulnerability(): try: return sendPostRequest("vulnerabilities") except: return error() #add a new product @app.route('/products', methods=['POST']) @jwt_required() def addProduct(): try: return postProduct() except: return error() #add a new product category @app.route('/productCategories', methods=['POST']) @jwt_required() def addCategory(): try: return sendPostRequest("productCategories") except: return error() #add a new keyword for a product @app.route('/products/<int:id>/keywords', methods=['POST']) @jwt_required() def addProductKeyword(id): try: return postProductKeyword(id) except: return error() #add a new threat for a product @app.route('/products/<int:id>/threats', methods=['POST']) @jwt_required() def addProductThreat(id): try: return postProductThreat(id) except: return error() #save a product @app.route('/<int:id>/savedProducts', methods=['POST']) @jwt_required() def saveProduct(id): try: return postSavedProduct(id) except: return error() #save a threat @app.route('/<int:id>/savedThreats', methods=['POST']) @jwt_required() def saveThreat(id): try: return postSavedThreat(id) except: return error() @app.route('/functions/getTokenizedText/<int:isThreat>', methods=['POST']) def getTokenizedText(isThreat): try: return calculateTokenizedText(isThreat=(isThreat == 1)) except: return error() @app.route('/users_products/insertTrainingData', methods=['POST']) @jwt_required() def saveTrainingdata(): try: return postTrainingdata() except: return error() ''' DELETE ''' #delete user @app.route('/users/<int:id>', methods=['DELETE']) @jwt_required() def deleteAUser(id): try: return sendDeleteRequest("users", {"user_id":id}) except: return error() #delete all of a product's keywords @app.route('/products/<int:id>/keywords', methods=['DELETE']) @jwt_required() def deleteAllKeywords(id): try: return sendDeleteRequest("keywords", {"product_id":id}) except: return error() #delete a product @app.route('/products/<int:id>', methods=['DELETE']) @jwt_required() def deleteAProduct(id): try: return deleteProduct(id) except: return error() #delete a feed @app.route('/feeds/<int:id>', methods=['DELETE']) @jwt_required() def deleteAFeed(id): try: return deleteFeed(id) except: return error() #delete an adversary @app.route('/adversaries/<int:id>', methods=['DELETE']) @jwt_required() def deleteAAdversary(id): try: return sendDeleteRequest("adversaries", {"adv_id":id}) except: return error() #delete an asset @app.route('/assets/<int:id>', methods=['DELETE']) @jwt_required() def deleteAnAsset(id): try: return sendDeleteRequest("assets", {"asset_id":id}) except: return error() #delete an attack type @app.route('/attack_types/<int:id>', methods=['DELETE']) @jwt_required() def deleteAnAttackType(id): try: return sendDeleteRequest("attack_types", {"atktyp_id":id}) except: return error() #delete an attack vector @app.route('/attack_vectors/<int:id>', methods=['DELETE']) @jwt_required() def deleteAnAttackVector(id): try: return sendDeleteRequest("attack_vectors", {"atkvtr_id":id}) except: return error() #delete a vulnerability @app.route('/vulnerabilities/<int:id>', methods=['DELETE']) @jwt_required() def deleteAVulnerability(id): try: return sendDeleteRequest("vulnerabilities", {"vuln_id":id}) except: return error() #delete a product category @app.route('/productCategories/<int:id>', methods=['DELETE']) @jwt_required() def deleteACategory(id): try: return sendDeleteRequest("productCategories", {"category_id":id}) except: return error() #delete a threat @app.route('/threats/<int:id>', methods=['DELETE']) @jwt_required() def deleteAThreat(id): try: return deleteThreat(id) except: return error() #delete a product's keyword @app.route('/products/<int:pid>/keywords/<int:kid>', methods=['DELETE']) @jwt_required() def deleteAProductKeyword(pid, kid): try: return sendUnsafeGetRequest("delete from keywords where keyword_id=" + str(kid) + " and product_id=" + str(pid)) except: return error() #delete a product's threat @app.route('/products/<int:pid>/threats/<int:tid>', methods=['DELETE']) @jwt_required() def deleteAProductThreat(pid, tid): try: return deleteProductThreat(pid, tid) except: return error() #delete a feed type @app.route('/feedTypes/<int:id>', methods=['DELETE']) @jwt_required() def deleteOneFeedType(id): try: return deleteFeedType(id) except: return error() #delete a saved product (remove from watchlist) @app.route('/<int:id>/savedProducts/<int:pid>', methods=['DELETE']) @jwt_required() def deleteOneSavedProduct(id, pid): try: return removeProductFromWatchlist(id, pid) except: return error() #delete a saved product (remove from watchlist) @app.route('/<int:id>/savedThreats/<int:tid>', methods=['DELETE']) @jwt_required() def deleteOneSavedThreat(id, tid): try: return removeThreatFromWatchlist(id, tid) except: return error() ''' PUT ''' #update a feed @app.route('/feed/<int:id>', methods=['PUT']) @jwt_required() def updateFeed(id): try: return sendPutRequest("feeds", "feed_id", id) except: return error() #update a product's name @app.route('/products/<int:id>/name', methods=['PUT']) #update a product's description @app.route('/products/<int:id>/desc', methods=['PUT']) #update a product's description @app.route('/products/<int:id>/category', methods=['PUT']) #update a product @app.route('/products/<int:id>', methods=['PUT']) @jwt_required() def updateAProduct(id): try: return sendPutRequest("products", "product_id", id) except: return error() #update a feed @app.route('/feeds/<int:id>', methods=['PUT']) @jwt_required() def updateAFeed(id): try: return sendPutRequest("feeds", "feed_id", id) except: return error() #update a feed type @app.route('/feedTypes/<int:id>', methods=['PUT']) @jwt_required() def updateAFeedType(id): try: return sendPutRequest("feedTypes", "type_id", id) except: return error() #update an adversary @app.route('/adversaries/<int:id>', methods=['PUT']) @jwt_required() def updateAnAdversary(id): try: return sendPutRequest("adversaries", "adv_id", id) except: return error() #update an asset @app.route('/assets/<int:id>', methods=['PUT']) @jwt_required() def updateAnAsset(id): try: return sendPutRequest("assets", "asset_id", id) except: return error() #update an attack_type @app.route('/attack_types/<int:id>', methods=['PUT']) @jwt_required() def updateAnAttackType(id): try: return sendPutRequest("attack_types", "atktyp_id", id) except: return error() #update an attack_vector @app.route('/attack_vectors/<int:id>', methods=['PUT']) @jwt_required() def updateAnAttackVector(id): try: return sendPutRequest("attack_vectors", "atkvtr_id", id) except: return error() #update a vulnerability @app.route('/vulnerabilities/<int:id>', methods=['PUT']) @jwt_required() def updateAVulnerability(id): try: return sendPutRequest("vulnerabilities", "vuln_id", id) except: return error() #update a product category @app.route('/productCategories/<int:id>', methods=['PUT']) @jwt_required() def updateAProductCategory(id): try: return sendPutRequest("productCategories", "category_id", id) except: return error() #updates a threat's description @app.route('/threats/<int:id>/desc', methods=['PUT']) @jwt_required() def updateThreatDesc(id): try: return putThreatDesc(id) except: return error() #update multiple threats' ratings @app.route('/threats/ratings', methods=['PUT']) @jwt_required() def updateThreatRatings(): try: return putThreatRatings() except: return error() #update multiple threats' statuses @app.route('/threats/statuses', methods=['PUT']) @jwt_required() def updateThreatStatuses(): try: return putThreatStatuses() except: return error() #update a threat's title @app.route('/threats/<int:id>/title', methods=['PUT']) #update a threat's link @app.route('/threats/<int:id>/link', methods=['PUT']) #update a threat's status @app.route('/threats/<int:id>/status', methods=['PUT']) #update a threat's rating @app.route('/threats/<int:id>/rating', methods=['PUT']) #update a threat's owner @app.route('/threats/<int:id>/owner', methods=['PUT']) #update a threat's note @app.route('/threats/<int:id>/note', methods=['PUT']) #update a threat's adversary @app.route('/threats/<int:id>/adversary', methods=['PUT']) @jwt_required() def updateThreat(id): try: return sendPutRequest("threats", "threat_id", id) except: return error() @app.route('/threats/<int:id>/retrain', methods=['PUT']) @jwt_required() def retrainAThreat(id): try: return retrainThreat(id) except: return error() #update a user's role @app.route('/users/<int:id>/role', methods=['PUT']) @jwt_required() def updateUserRole(id): try: return sendPutRequest("users", "user_id", id) except: return error() if __name__ == '__main__': port = int(os.environ.get('PORT', 5000)) if (config["isDeveloping"]): app.run(host='127.0.0.1', port=port) #debug=True) else: app.run(host='0.0.0.0', port=port) #debug=True)
[]
[]
[ "PORT" ]
[]
["PORT"]
python
1
0
config/viper_test.go
package config import ( "fmt" "github.com/jinlongchen/golang-utilities/map/helper" "io/ioutil" "os" "path" "runtime" "testing" "time" "github.com/jinlongchen/golang-utilities/converter" ) func TestNewEtcdConfig(t *testing.T) { cfg := NewRemoteConfig("etcd", "http://192.168.2.42:2379", "/configs/a.toml", "toml") ticker := time.NewTicker(time.Second * 10) for { select { case <-ticker.C: println(cfg.GetInt("abc.def")) } } } func TestConfig_EncryptString2(t *testing.T) { _, filename, _, _ := runtime.Caller(0) tomlFile := path.Join(path.Dir(filename), "conf-file.toml") cfg := NewConfig(tomlFile) AesKeySalt = os.Getenv("GO_AES_SALT") println("code.qcse.com:", cfg.EncryptString("code.qcse.com")) println("https://code.qcse.com/:", cfg.EncryptString("https://code.qcse.com/")) println("MFrL8AXyOIH6uuBABwKKUGlAVoKVSIX4ObCa61GZtE2KZOm6J72fxO85wOSE99Oq:", cfg.EncryptString("MFrL8AXyOIH6uuBABwKKUGlAVoKVSIX4ObCa61GZtE2KZOm6J72fxO85wOSE99Oq")) println("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYmYiOjE1NDMyODk5MTN9.EV30v2VZQ1hiQhcTpCHf55UX7AfmzlMg5T2iD3neehs:", cfg.EncryptString("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYmYiOjE1NDMyODk5MTN9.EV30v2VZQ1hiQhcTpCHf55UX7AfmzlMg5T2iD3neehs")) } func TestConfig_EncryptString(t *testing.T) { _, filename, _, _ := runtime.Caller(0) tomlFile := path.Join(path.Dir(filename), "conf-file.toml") cfg := NewConfig(tomlFile) AesKeySalt = os.Getenv("GO_AES_SALT") //println("server param:", cfg.EncryptString("ss://CHACHA20-IETF-POLY1305:981f08f5dcb43b1ac05f82632C65_d160127f2cc7@:11197")) //// local -> hk server println("ss://CHACHA20-IETF-POLY1305:981f08f5dcb43b1ac05f82632C65_76f070767cfd7eafb920@[kpc]:11196:", cfg.EncryptString("ss://CHACHA20-IETF-POLY1305:981f08f5dcb43b1ac05f82632C65_76f070767cfd7eafb920@[kpc]:11196")) //println("ss://CHACHA20-IETF-POLY1305:981f08f5dcb43b1ac05f82632C65_76f070767cfd7eafb920@:11197 = ", // cfg.EncryptString("ss://CHACHA20-IETF-POLY1305:981f08f5dcb43b1ac05f82632C65_76f070767cfd7eafb920@:11197")) //println("ss://CHACHA20-IETF-POLY1305:981f08f5dcb43b1ac05f82632C65_d160127f2cc7@[35.182.130.31]:11197:", cfg.EncryptString("ss://CHACHA20-IETF-POLY1305:981f08f5dcb43b1ac05f82632C65_d160127f2cc7@[35.182.130.31]:11197")) //println("149.129.83.138:11195", cfg.EncryptString("149.129.83.138:11195")) println("149.129.83.138:11194", cfg.EncryptString("149.129.83.138:11194")) println("149.129.83.138:11195", cfg.EncryptString("149.129.83.138:11195")) //println("ss://CHACHA20-IETF-POLY1305:981f08f5dcb43b1ac05f82632C65_d160127f2cc7@[47.240.20.123]:11197:", // cfg.EncryptString("ss://CHACHA20-IETF-POLY1305:981f08f5dcb43b1ac05f82632C65_d160127f2cc7@[47.240.20.123]:11197")) //println("ss://CHACHA20-IETF-POLY1305:981f08f5dcb43b1ac05f82632C65_d160127f2cc7@[47.52.232.133]:11197:", cfg.EncryptString("ss://CHACHA20-IETF-POLY1305:981f08f5dcb43b1ac05f82632C65_d160127f2cc7@[47.52.232.133]:11197")) //println("->> local server param kcp:", cfg.EncryptString("ss://CHACHA20-IETF-POLY1305:981f08f5dcb43b1ac05f82632C65_d160127f2cc7@[172.17.0.5]:11196")) //println("->> kcp server config:", cfg.EncryptString("172.17.0.2:11197")) //println("->> kcp server config ca:", cfg.EncryptString("35.182.130.31:11197")) // //println("->> ca 35.182.130.31:11195:", cfg.EncryptString("35.182.130.31:11195")) //println("->> ca 52.60.222.79:11195:", cfg.EncryptString("52.60.222.79:11195")) //println("->> jp 54.95.26.192:11195:", cfg.EncryptString("54.95.26.192:11195")) //println("->> ca 35.183.4.31:11195:", cfg.EncryptString("35.183.4.31:11195")) // //println("->> :4000:", cfg.EncryptString(":4000")) //println("->> kpc:11196:", cfg.EncryptString("kpc:11196")) // hHLayibyzDjn59jeqTc8QA== //println("->> cache2:11197:", cfg.EncryptString("cache2:11197")) // hHLayibyzDjn59jeqTc8QA== // //println("xxx:", cfg.GetString("file.minio.accessid")) //println("yyy:", cfg.GetString("file.minio.accesssecret")) } func TestConfig_DecryptString(t *testing.T) { _, filename, _, _ := runtime.Caller(0) tomlFile := path.Join(path.Dir(filename), "conf-file.toml") cfg := NewConfig(tomlFile) AesKeySalt = os.Getenv("GO_AES_SALT") //ss://CHACHA20-IETF-POLY1305:981f08f5dcb43b1ac05f82632C65_d160127f2cc7@[kpc]:11196 //ss://CHACHA20-IETF-POLY1305:981f08f5dcb43b1ac05f82632C65_76f070767cfd7eafb920@:11197 println(cfg.DecryptString(`Mv3GdsNAMkqgdRx+2pdZFZ/nhjolcIAYiprUkJ/SYA3amBxet6Rtp3mlnaT399TG4vbMmiIO3r+H8OeV2NJbOsCYeirTNZ2jQ9AGzmBjR48nGgfjIThS8I0V8Fy9Djxw`)) println(cfg.DecryptString(`Mv3GdsNAMkqgdRx+2pdZFZ/nhjolcIAYiprUkJ/SYA3amBxet6Rtp3mlnaT399TG4vbMmiIO3r+H8OeV2NJbOiSpxnYwEGI6VA8G0j22Hw+iPJXZZAO+3XhmGuSxw93S`)) println(cfg.DecryptString(`Mv3GdsNAMkqgdRx+2pdZFZ/nhjolcIAYiprUkJ/SYA3amBxet6Rtp3mlnaT399TGSal/299uOLtxHjbtaFB6d4x4ZDGErhpsB3RDrYuD6p3NnTlg10fCxMGlusP+dT09`)) println(cfg.DecryptString(`K343GB5qBBxFc+CRfCwThFnLRax20m4tQ6GXnONH9VA=`)) println(cfg.DecryptString(`Z5WHqYZtEQCNVRkGUZFt/6Klocyl17f6EI7ndcE2rUM=`)) } func TestConfig_GetStringSlice(t *testing.T) { _, filename, _, _ := runtime.Caller(0) tomlFile := path.Join(path.Dir(filename), "conf-file.toml") cfg := NewConfig(tomlFile) fmt.Printf("xxx:%v\n", cfg.GetStringSlice("test.key2")) fmt.Printf("xxx2:%v\n", cfg.GetStringSlice("test.key2")) } func TestConfig_GetString(t *testing.T) { AesKeySalt = os.Getenv("GO_AES_SALT") _, filename, _, _ := runtime.Caller(0) tomlFile := path.Join(path.Dir(filename), "conf-file.toml") cfg := NewConfig(tomlFile) corpps := cfg.GetMapSlice("corpps") fmt.Printf("xxx2:%v\n", corpps) for _, corpp := range corpps { corpName := helper.GetValueAsString(corpp, "corpName", "") corpID := helper.GetValueAsString(corpp, "corpID", "") fmt.Printf("%s %s\n",corpID, corpName) } } func TestConfig_GetBool(t *testing.T) { _, filename, _, _ := runtime.Caller(0) tomlFile := path.Join(path.Dir(filename), "conf-file.toml") cfg := NewConfig(tomlFile) fmt.Printf("xxx:%v\n", cfg.GetBool("test.key3")) fmt.Printf("xxx2:%v\n", cfg.GetBool("test.key3")) } func TestConfig_GetInt(t *testing.T) { _, filename, _, _ := runtime.Caller(0) tomlFile := path.Join(path.Dir(filename), "conf-file.toml") cfg := NewConfig(tomlFile) fmt.Printf("xxx:%v\n", cfg.GetInt("test.key4")) fmt.Printf("xxx2:%v\n", cfg.GetInt("test.key4")) } func TestConfig_GetDuration(t *testing.T) { _, filename, _, _ := runtime.Caller(0) tomlFile := path.Join(path.Dir(filename), "conf-file.toml") cfg := NewConfig(tomlFile) fmt.Printf("xxx:%v\n", cfg.GetDuration("test.key5")) fmt.Printf("xxx2:%v\n", cfg.GetDuration("test.key5")) } func TestConfig_GetValue(t *testing.T) { _, filename, _, _ := runtime.Caller(0) tomlFile := path.Join(path.Dir(filename), "conf-file.toml") cfg := NewConfig(tomlFile) origins1 := converter.AsStringSlice(cfg.GetValue("http.header.origins"), []string{}) origins2 := converter.AsStringSlice(cfg.GetValue("http.header.origins"), []string{}) fmt.Printf("xxx:%v\n", origins1) fmt.Printf("xxx2:%v\n", origins2) } func TestConfig_GetStringMapString(t *testing.T) { _, filename, _, _ := runtime.Caller(0) tomlFile := path.Join(path.Dir(filename), "conf-file.toml") cfg := NewConfig(tomlFile) fmt.Printf("xxx :%v\n", cfg.GetStringMapString("cache.redis.addresses")) fmt.Printf("xxx2:%v\n", cfg.GetStringMapString("cache.redis.addresses")) } func getTestingAesKeyKey(t *testing.T) string { _, filename, _, _ := runtime.Caller(0) data, err := ioutil.ReadFile(path.Join(path.Dir(filename), "aeskeykey.txt")) if err != nil { t.Fatal(err) } return string(data) } func getProductionAesKeyKey(t *testing.T) string { _, filename, _, _ := runtime.Caller(0) data, err := ioutil.ReadFile(path.Join(path.Dir(filename), "aeskeykey_yijiu.txt")) if err != nil { t.Fatal(err) } return string(data) }
[ "\"GO_AES_SALT\"", "\"GO_AES_SALT\"", "\"GO_AES_SALT\"", "\"GO_AES_SALT\"" ]
[]
[ "GO_AES_SALT" ]
[]
["GO_AES_SALT"]
go
1
0
setup.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import gettext import glob import os import subprocess import sys from setuptools import find_packages from setuptools.command.sdist import sdist # In order to run the i18n commands for compiling and # installing message catalogs, we use DistUtilsExtra. # Don't make this a hard requirement, but warn that # i18n commands won't be available if DistUtilsExtra is # not installed... try: from DistUtilsExtra.auto import setup except ImportError: from setuptools import setup print "Warning: DistUtilsExtra required to use i18n builders. " print "To build nova with support for message catalogs, you need " print " https://launchpad.net/python-distutils-extra >= 2.18" gettext.install('nova', unicode=1) from nova.utils import parse_mailmap, str_dict_replace from nova import version if os.path.isdir('.bzr'): with open("nova/vcsversion.py", 'w') as version_file: vcs_cmd = subprocess.Popen(["bzr", "version-info", "--python"], stdout=subprocess.PIPE) vcsversion = vcs_cmd.communicate()[0] version_file.write(vcsversion) class local_sdist(sdist): """Customized sdist hook - builds the ChangeLog file from VC first""" def run(self): if os.path.isdir('.bzr'): # We're in a bzr branch env = os.environ.copy() env['BZR_PLUGIN_PATH'] = os.path.abspath('./bzrplugins') log_cmd = subprocess.Popen(["bzr", "log", "--novalog"], stdout=subprocess.PIPE, env=env) changelog = log_cmd.communicate()[0] mailmap = parse_mailmap() with open("ChangeLog", "w") as changelog_file: changelog_file.write(str_dict_replace(changelog, mailmap)) sdist.run(self) nova_cmdclass = {'sdist': local_sdist} try: from sphinx.setup_command import BuildDoc class local_BuildDoc(BuildDoc): def run(self): for builder in ['html', 'man']: self.builder = builder self.finalize_options() BuildDoc.run(self) nova_cmdclass['build_sphinx'] = local_BuildDoc except: pass try: from babel.messages import frontend as babel nova_cmdclass['compile_catalog'] = babel.compile_catalog nova_cmdclass['extract_messages'] = babel.extract_messages nova_cmdclass['init_catalog'] = babel.init_catalog nova_cmdclass['update_catalog'] = babel.update_catalog except: pass def find_data_files(destdir, srcdir): package_data = [] files = [] for d in glob.glob('%s/*' % (srcdir, )): if os.path.isdir(d): package_data += find_data_files( os.path.join(destdir, os.path.basename(d)), d) else: files += [d] package_data += [(destdir, files)] return package_data setup(name='nova', version=version.canonical_version_string(), description='cloud computing fabric controller', author='OpenStack', author_email='[email protected]', url='http://www.openstack.org/', cmdclass=nova_cmdclass, packages=find_packages(exclude=['bin', 'smoketests']), include_package_data=True, test_suite='nose.collector', data_files=find_data_files('share/nova', 'tools'), scripts=['bin/nova-ajax-console-proxy', 'bin/nova-api', 'bin/nova-compute', 'bin/nova-console', 'bin/nova-dhcpbridge', 'bin/nova-direct-api', 'bin/nova-logspool', 'bin/nova-manage', 'bin/dodai-machine-state-monitor', 'bin/dodai-delete-machine', 'bin/dodai-db-all', 'bin/dodai-db-create', 'bin/dodai-db-drop', 'bin/dodai-db-init', 'bin/dodai-db-machine-reset', 'bin/dodai-db-show', 'bin/dodai-instances-remove', 'bin/nova-network', 'bin/nova-objectstore', 'bin/nova-scheduler', 'bin/nova-spoolsentry', 'bin/stack', 'bin/nova-volume', 'bin/nova-vncproxy', 'tools/nova-debug'], py_modules=[])
[]
[]
[]
[]
[]
python
0
0
certbot-ci/certbot_integration_tests/utils/certbot_call.py
#!/usr/bin/env python """Module to call certbot in test mode""" import os import pkg_resources import subprocess import sys import certbot_integration_tests # pylint: disable=wildcard-import,unused-wildcard-import from certbot_integration_tests.utils.constants import * def certbot_test(certbot_args, directory_url, http_01_port, tls_alpn_01_port, config_dir, workspace, force_renew=True): """ Invoke the certbot executable available in PATH in a test context for the given args. The test context consists in running certbot in debug mode, with various flags suitable for tests (eg. no ssl check, customizable ACME challenge ports and config directory ...). This command captures both stdout and stderr and returns it to the caller. :param list certbot_args: the arguments to pass to the certbot executable :param str directory_url: URL of the ACME directory server to use :param int http_01_port: port for the HTTP-01 challenges :param int tls_alpn_01_port: port for the TLS-ALPN-01 challenges :param str config_dir: certbot configuration directory to use :param str workspace: certbot current directory to use :param bool force_renew: set False to not force renew existing certificates (default: True) :return: stdout and stderr as strings :rtype: `tuple` of `str` """ command, env = _prepare_args_env(certbot_args, directory_url, http_01_port, tls_alpn_01_port, config_dir, workspace, force_renew) proc = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False, universal_newlines=True, cwd=workspace, env=env) print('--> Certbot log output was:') print(proc.stderr) proc.check_returncode() return proc.stdout, proc.stderr def _prepare_environ(workspace): # pylint: disable=missing-function-docstring new_environ = os.environ.copy() new_environ['TMPDIR'] = workspace # So, pytest is nice, and a little too nice for our usage. # In order to help user to call seamlessly any piece of python code without requiring to # install it as a full-fledged setuptools distribution for instance, it may inject the path # to the test files into the PYTHONPATH. This allows the python interpreter to import # as modules any python file available at this path. # See https://docs.pytest.org/en/3.2.5/pythonpath.html for the explanation and description. # However this behavior is not good in integration tests, in particular the nginx oldest ones. # Indeed during these kind of tests certbot is installed as a transitive dependency to # certbot-nginx. Here is the trick: this certbot version is not necessarily the same as # the certbot codebase lying in current working directory. For instance in oldest tests # certbot==0.36.0 may be installed while the codebase corresponds to certbot==0.37.0.dev0. # Then during a pytest run, PYTHONPATH contains the path to the Certbot codebase, so invoking # certbot will import the modules from the codebase (0.37.0.dev0), not from the # required/installed version (0.36.0). # This will lead to funny and totally incomprehensible errors. To avoid that, we ensure that # if PYTHONPATH is set, it does not contain the path to the root of the codebase. if new_environ.get('PYTHONPATH'): # certbot_integration_tests.__file__ is: # '/path/to/certbot/certbot-ci/certbot_integration_tests/__init__.pyc' # ... and we want '/path/to/certbot' certbot_root = os.path.dirname(os.path.dirname( os.path.dirname(certbot_integration_tests.__file__)) ) python_paths = [ path for path in new_environ['PYTHONPATH'].split(':') if path != certbot_root ] new_environ['PYTHONPATH'] = ':'.join(python_paths) return new_environ def _compute_additional_args(workspace, environ, force_renew): additional_args = [] output = subprocess.check_output(['certbot', '--version'], universal_newlines=True, stderr=subprocess.STDOUT, cwd=workspace, env=environ) # Typical response is: output = 'certbot 0.31.0.dev0' version_str = output.split(' ')[1].strip() if pkg_resources.parse_version(version_str) >= pkg_resources.parse_version('0.30.0'): additional_args.append('--no-random-sleep-on-renew') if force_renew: additional_args.append('--renew-by-default') return additional_args def _prepare_args_env(certbot_args, directory_url, http_01_port, tls_alpn_01_port, config_dir, workspace, force_renew): new_environ = _prepare_environ(workspace) additional_args = _compute_additional_args(workspace, new_environ, force_renew) command = [ 'certbot', '--server', directory_url, '--no-verify-ssl', '--http-01-port', str(http_01_port), '--https-port', str(tls_alpn_01_port), '--manual-public-ip-logging-ok', '--config-dir', config_dir, '--work-dir', os.path.join(workspace, 'work'), '--logs-dir', os.path.join(workspace, 'logs'), '--non-interactive', '--no-redirect', '--agree-tos', '--register-unsafely-without-email', '--debug', '-vv' ] command.extend(certbot_args) command.extend(additional_args) print('--> Invoke command:\n=====\n{0}\n====='.format(subprocess.list2cmdline(command))) return command, new_environ def main(): # pylint: disable=missing-function-docstring args = sys.argv[1:] # Default config is pebble directory_url = os.environ.get('SERVER', PEBBLE_DIRECTORY_URL) http_01_port = int(os.environ.get('HTTP_01_PORT', DEFAULT_HTTP_01_PORT)) tls_alpn_01_port = int(os.environ.get('TLS_ALPN_01_PORT', TLS_ALPN_01_PORT)) # Execution of certbot in a self-contained workspace workspace = os.environ.get('WORKSPACE', os.path.join(os.getcwd(), '.certbot_test_workspace')) if not os.path.exists(workspace): print('--> Creating a workspace for certbot_test: {0}'.format(workspace)) os.mkdir(workspace) else: print('--> Using an existing workspace for certbot_test: {0}'.format(workspace)) config_dir = os.path.join(workspace, 'conf') # Invoke certbot in test mode, without capturing output so users see directly the outcome. command, env = _prepare_args_env(args, directory_url, http_01_port, tls_alpn_01_port, config_dir, workspace, True) subprocess.check_call(command, universal_newlines=True, cwd=workspace, env=env) if __name__ == '__main__': main()
[]
[]
[ "HTTP_01_PORT", "SERVER", "WORKSPACE", "TLS_ALPN_01_PORT" ]
[]
["HTTP_01_PORT", "SERVER", "WORKSPACE", "TLS_ALPN_01_PORT"]
python
4
0
main.go
package main import ( "bufio" "bytes" "context" "crypto/tls" "encoding/json" "fmt" "io/ioutil" "log" "math" "net/http" "os" "strconv" "time" "cloud.google.com/go/datastore" "golang.org/x/net/http2" ) type Device struct { ApiVersion int `datastore:"api_version,noindex"` App string `datastore:"app"` Created time.Time `datastore:"created,noindex"` DeviceId string `datastore:"device_id,noindex"` DeviceInfo string `datastore:"device_info,noindex"` Environment string `datastore:"environment,noindex"` Failures int `datastore:"failures,noindex"` LastSuccess time.Time `datastore:"last_success,noindex"` Platform string `datastore:"platform,noindex"` Token string `datastore:"token"` TotalFailures int `datastore:"total_failures,noindex"` TotalSuccesses int `datastore:"total_successes,noindex"` Updated time.Time `datastore:"updated,noindex"` } type Payload struct { AccountID int64 `json:"account_id"` App string `json:"app"` Data json.RawMessage `json:"data"` DeviceToken string `json:"device_token"` Environment string `json:"environment"` } type PushError struct { Body []byte StatusCode int } func (pe PushError) Error() string { return fmt.Sprintf("HTTP %d (%s)", pe.StatusCode, pe.Body) } func (pe PushError) Permanent() bool { return pe.StatusCode == 400 || pe.StatusCode == 410 } func (pe PushError) Retryable() bool { return pe.StatusCode == 429 || pe.StatusCode == 500 || pe.StatusCode == 503 } type ClientMap map[string]*http.Client func (m ClientMap) Create(app string) *http.Client { if _, ok := m[app]; ok { panic("tried to overwrite existing client") } client := NewClient(app) m[app] = client return client } var ( store *datastore.Client clients = make(ClientMap) ctx = context.Background() timestamp = time.Now() ) const ( ProjectId = "roger-api" AppleHost = "https://api.push.apple.com" AppleHostDev = "https://api.development.push.apple.com" DefaultPort = "8080" MaxRetries = 3 PingFrequency = time.Second PingThreshold = time.Minute ) func main() { // Set up the Datastore client. var err error store, err = datastore.NewClient(ctx, ProjectId) if err != nil { log.Fatalf("Failed to create Datastore client (datastore.NewClient: %v)", err) } // Set up the APNS clients. clients.Create("cam.reaction.ReactionCam") port := DefaultPort if s := os.Getenv("PORT"); s != "" { port = s } http.HandleFunc("/ping", pingHandler) http.HandleFunc("/v1/push", pushHandler) go pinger() // Set up the server. log.Printf("Serving on %s...", port) if err := http.ListenAndServe(":"+port, nil); err != nil { log.Fatalf("http.ListenAndServe: %v", err) } } func NewClient(app string) *http.Client { cert, err := tls.LoadX509KeyPair( fmt.Sprintf("secrets/%s.pem", app), fmt.Sprintf("secrets/%s.key", app)) if err != nil { log.Fatalf("Failed to create client for %s: %v", app, err) } config := &tls.Config{ Certificates: []tls.Certificate{cert}, } config.BuildNameToCertificate() transport := &http.Transport{ TLSClientConfig: config, } // Explicitly enable HTTP/2 as TLS-configured clients don't auto-upgrade. // See: https://github.com/golang/go/issues/14275 if err := http2.ConfigureTransport(transport); err != nil { log.Fatalf("Failed to configure HTTP/2 for %s client: %v", app, err) } return &http.Client{ Timeout: 3 * time.Second, Transport: transport, } } func Push(app, deviceToken, env string, data json.RawMessage) (err error) { client, ok := clients[app] if !ok { err = fmt.Errorf("invalid app \"%s\"", app) return } var url string if env == "development" { url = fmt.Sprintf("%s/3/device/%s", AppleHostDev, deviceToken) } else { url = fmt.Sprintf("%s/3/device/%s", AppleHost, deviceToken) } req, err := http.NewRequest("POST", url, bytes.NewReader(data)) if err != nil { return } expiration := time.Now().Add(168 * time.Hour).Unix() req.Header.Set("apns-expiration", strconv.FormatInt(expiration, 10)) req.Header.Set("apns-topic", app) req.Header.Set("Content-Type", "application/json") resp, err := client.Do(req) if err != nil { return } defer resp.Body.Close() timestamp = time.Now() if resp.StatusCode == http.StatusOK { return nil } // Something went wrong – get the error from body. body, err := ioutil.ReadAll(resp.Body) if err != nil { return } return PushError{Body: body, StatusCode: resp.StatusCode} } func pinger() { // TODO: Figure out how to ping connections instead of closing. for { if time.Since(timestamp) > PingThreshold { timestamp = time.Now() for app := range clients { clients[app] = NewClient(app) } } time.Sleep(PingFrequency) } } // Push with retry. func push(payload Payload) { app := payload.App if app == "" { log.Printf("Unrecognized app %#v", app) return } accountKey := datastore.IDKey("Account", payload.AccountID, nil) deviceKey := datastore.NameKey("Device", payload.DeviceToken, accountKey) attempt := 1 for { err := Push(app, payload.DeviceToken, payload.Environment, payload.Data) if err, ok := err.(PushError); ok && err.Permanent() { if err.Permanent() { log.Printf("[%d] PERMANENT FAILURE: %s", payload.AccountID, err) if err := store.Delete(ctx, deviceKey); err != nil { log.Printf("[%d] FAILED TO DELETE TOKEN: %v", payload.AccountID, err) } } else if !err.Retryable() { log.Printf("[%d] DROPPING NOTIFICATION: %s", payload.AccountID, err) } log.Printf("[%d] %s", payload.AccountID, string(payload.Data)) return } if updateErr := updateDeviceStats(ctx, deviceKey, err == nil); updateErr != nil { log.Printf("[%d] FAILED TO UPDATE TOKEN: %v", payload.AccountID, updateErr) } if err == nil { return } // An error occurred. log.Printf("[%d] Failed to push (attempt %d/%d): %s", payload.AccountID, attempt, MaxRetries, err) // Exponential backoff. if attempt >= MaxRetries { log.Printf("[%d] DROPPING NOTIFICATION: exceeded max retries", payload.AccountID) log.Printf("[%d] %s", payload.AccountID, string(payload.Data)) return } time.Sleep(time.Duration(math.Exp2(float64(attempt-1))) * time.Second) attempt += 1 } } func updateDeviceStats(ctx context.Context, key *datastore.Key, success bool) error { _, err := store.RunInTransaction(ctx, func(tx *datastore.Transaction) error { var device Device if err := tx.Get(key, &device); err != nil { return err } now := time.Now() device.Updated = now if success { device.LastSuccess = now device.TotalSuccesses += 1 device.Failures = 0 } else { device.Failures += 1 device.TotalFailures += 1 } _, err := tx.Put(key, &device) return err }) return err } func pingHandler(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) fmt.Fprintln(w, "ok") } func pushHandler(w http.ResponseWriter, r *http.Request) { scanner := bufio.NewScanner(r.Body) for scanner.Scan() { var payload Payload if err := json.Unmarshal(scanner.Bytes(), &payload); err != nil { log.Printf("Failed to parse JSON: %s | %s", err, scanner.Text()) continue } go push(payload) } if err := scanner.Err(); err != nil { log.Printf("Failed to read data: %s", err) } }
[ "\"PORT\"" ]
[]
[ "PORT" ]
[]
["PORT"]
go
1
0
wip/read_direction_change_configs.go
package main import ( "fmt" "os" ) func hasBit(n int, pos uint) bool { val := n & (1 << pos) return (val > 0) } var elementNames = []string{ "ELEM_FLAT", "ELEM_END_STATION", "ELEM_BEGIN_STATION", "ELEM_MIDDLE_STATION", "ELEM_25_DEG_UP", "ELEM_60_DEG_UP", "ELEM_FLAT_TO_25_DEG_UP", "ELEM_25_DEG_UP_TO_60_DEG_UP", "ELEM_60_DEG_UP_TO_25_DEG_UP", "ELEM_25_DEG_UP_TO_FLAT", "ELEM_25_DEG_DOWN", "ELEM_60_DEG_DOWN", "ELEM_FLAT_TO_25_DEG_DOWN", "ELEM_25_DEG_DOWN_TO_60_DEG_DOWN", "ELEM_60_DEG_DOWN_TO_25_DEG_DOWN", "ELEM_25_DEG_DOWN_TO_FLAT", "ELEM_LEFT_QUARTER_TURN_5_TILES", "ELEM_RIGHT_QUARTER_TURN_5_TILES", "ELEM_FLAT_TO_LEFT_BANK", "ELEM_FLAT_TO_RIGHT_BANK", "ELEM_LEFT_BANK_TO_FLAT", "ELEM_RIGHT_BANK_TO_FLAT", "ELEM_BANKED_LEFT_QUARTER_TURN_5_TILES", "ELEM_BANKED_RIGHT_QUARTER_TURN_5_TILES", "ELEM_LEFT_BANK_TO_25_DEG_UP", "ELEM_RIGHT_BANK_TO_25_DEG_UP", "ELEM_25_DEG_UP_TO_LEFT_BANK", "ELEM_25_DEG_UP_TO_RIGHT_BANK", "ELEM_LEFT_BANK_TO_25_DEG_DOWN", "ELEM_RIGHT_BANK_TO_25_DEG_DOWN", "ELEM_25_DEG_DOWN_TO_LEFT_BANK", "ELEM_25_DEG_DOWN_TO_RIGHT_BANK", "ELEM_LEFT_BANK", "ELEM_RIGHT_BANK", "ELEM_LEFT_QUARTER_TURN_5_TILES_25_DEG_UP", "ELEM_RIGHT_QUARTER_TURN_5_TILES_25_DEG_UP", "ELEM_LEFT_QUARTER_TURN_5_TILES_25_DEG_DOWN", "ELEM_RIGHT_QUARTER_TURN_5_TILES_25_DEG_DOWN", "ELEM_S_BEND_LEFT", "ELEM_S_BEND_RIGHT", "ELEM_LEFT_VERTICAL_LOOP", "ELEM_RIGHT_VERTICAL_LOOP", "ELEM_LEFT_QUARTER_TURN_3_TILES", "ELEM_RIGHT_QUARTER_TURN_3_TILES", "ELEM_LEFT_QUARTER_TURN_3_TILES_BANK", "ELEM_RIGHT_QUARTER_TURN_3_TILES_BANK", "ELEM_LEFT_QUARTER_TURN_3_TILES_25_DEG_UP", "ELEM_RIGHT_QUARTER_TURN_3_TILES_25_DEG_UP", "ELEM_LEFT_QUARTER_TURN_3_TILES_25_DEG_DOWN", "ELEM_RIGHT_QUARTER_TURN_3_TILES_25_DEG_DOWN", "ELEM_LEFT_QUARTER_TURN_1_TILE", "ELEM_RIGHT_QUARTER_TURN_1_TILE", "ELEM_LEFT_TWIST_DOWN_TO_UP", "ELEM_RIGHT_TWIST_DOWN_TO_UP", "ELEM_LEFT_TWIST_UP_TO_DOWN", "ELEM_RIGHT_TWIST_UP_TO_DOWN", "ELEM_HALF_LOOP_UP", "ELEM_HALF_LOOP_DOWN", "ELEM_LEFT_CORKSCREW_UP", "ELEM_RIGHT_CORKSCREW_UP", "ELEM_LEFT_CORKSCREW_DOWN", "ELEM_RIGHT_CORKSCREW_DOWN", "ELEM_FLAT_TO_60_DEG_UP", "ELEM_60_DEG_UP_TO_FLAT", "ELEM_FLAT_TO_60_DEG_DOWN", "ELEM_60_DEG_DOWN_TO_FLAT", "ELEM_TOWER_BASE", "ELEM_TOWER_SECTION", "ELEM_FLAT_COVERED", "ELEM_25_DEG_UP_COVERED", "ELEM_60_DEG_UP_COVERED", "ELEM_FLAT_TO_25_DEG_UP_COVERED", "ELEM_25_DEG_UP_TO_60_DEG_UP_COVERED", "ELEM_60_DEG_UP_TO_25_DEG_UP_COVERED", "ELEM_25_DEG_UP_TO_FLAT_COVERED", "ELEM_25_DEG_DOWN_COVERED", "ELEM_60_DEG_DOWN_COVERED", "ELEM_FLAT_TO_25_DEG_DOWN_COVERED", "ELEM_25_DEG_DOWN_TO_60_DEG_DOWN_COVERED", "ELEM_60_DEG_DOWN_TO_25_DEG_DOWN_COVERED", "ELEM_25_DEG_DOWN_TO_FLAT_COVERED", "ELEM_LEFT_QUARTER_TURN_5_TILES_COVERED", "ELEM_RIGHT_QUARTER_TURN_5_TILES_COVERED", "ELEM_S_BEND_LEFT_COVERED", "ELEM_S_BEND_RIGHT_COVERED", "ELEM_LEFT_QUARTER_TURN_3_TILES_COVERED", "ELEM_RIGHT_QUARTER_TURN_3_TILES_COVERED", "ELEM_LEFT_HALF_BANKED_HELIX_UP_SMALL", "ELEM_RIGHT_HALF_BANKED_HELIX_UP_SMALL", "ELEM_LEFT_HALF_BANKED_HELIX_DOWN_SMALL", "ELEM_RIGHT_HALF_BANKED_HELIX_DOWN_SMALL", "ELEM_LEFT_HALF_BANKED_HELIX_UP_LARGE", "ELEM_RIGHT_HALF_BANKED_HELIX_UP_LARGE", "ELEM_LEFT_HALF_BANKED_HELIX_DOWN_LARGE", "ELEM_RIGHT_HALF_BANKED_HELIX_DOWN_LARGE", "ELEM_LEFT_QUARTER_TURN_1_TILE_60_DEG_UP", "ELEM_RIGHT_QUARTER_TURN_1_TILE_60_DEG_UP", "ELEM_LEFT_QUARTER_TURN_1_TILE_60_DEG_DOWN", "ELEM_RIGHT_QUARTER_TURN_1_TILE_60_DEG_DOWN", "ELEM_BRAKES", "ELEM_ROTATION_CONTROL_TOGGLE", "ELEM_INVERTED_90_DEG_UP_TO_FLAT_QUARTER_LOOP", "ELEM_LEFT_QUARTER_BANKED_HELIX_LARGE_UP", "ELEM_RIGHT_QUARTER_BANKED_HELIX_LARGE_UP", "ELEM_LEFT_QUARTER_BANKED_HELIX_LARGE_DOWN", "ELEM_RIGHT_QUARTER_BANKED_HELIX_LARGE_DOWN", "ELEM_LEFT_QUARTER_HELIX_LARGE_UP", "ELEM_RIGHT_QUARTER_HELIX_LARGE_UP", "ELEM_LEFT_QUARTER_HELIX_LARGE_DOWN", "ELEM_RIGHT_QUARTER_HELIX_LARGE_DOWN", "ELEM_25_DEG_UP_LEFT_BANKED", "ELEM_25_DEG_UP_RIGHT_BANKED", "ELEM_WATERFALL", "ELEM_RAPIDS", "ELEM_ON_RIDE_PHOTO", "ELEM_25_DEG_DOWN_LEFT_BANKED", "ELEM_25_DEG_DOWN_RIGHT_BANKED", "ELEM_WATER_SPLASH", "ELEM_FLAT_TO_60_DEG_UP_LONG_BASE", "ELEM_60_DEG_UP_TO_FLAT_LONG_BASE", "ELEM_WHIRLPOOL", "ELEM_FLAT_TO_60_DEG_DOWN_LONG_BASE", "ELEM_60_DEG_UP_TO_FLAT_LONG_BASE", "ELEM_CABLE_LIFT_HILL", "ELEM_REVERSE_WHOA_BELLY_SLOPE", "ELEM_REVERSE_WHOA_BELLY_VERTICAL", "ELEM_90_DEG_UP", "ELEM_90_DEG_DOWN", "ELEM_60_DEG_UP_TO_90_DEG_UP", "ELEM_90_DEG_DOWN_TO_60_DEG_DOWN", "ELEM_90_DEG_UP_TO_60_DEG_UP", "ELEM_60_DEG_DOWN_TO_90_DEG_DOWN", "ELEM_BRAKE_FOR_DROP", "ELEM_LEFT_EIGHTH_TO_DIAG", "ELEM_RIGHT_EIGHTH_TO_DIAG", "ELEM_LEFT_EIGHTH_TO_ORTHOGONAL", "ELEM_RIGHT_EIGHTH_TO_ORTHOGONAL", "ELEM_LEFT_EIGHTH_BANK_TO_DIAG", "ELEM_RIGHT_EIGHTH_BANK_TO_DIAG", "ELEM_LEFT_EIGHTH_BANK_TO_ORTHOGONAL", "ELEM_RIGHT_EIGHTH_BANK_TO_ORTHOGONAL", "ELEM_DIAG_FLAT", "ELEM_DIAG_25_DEG_UP", "ELEM_DIAG_60_DEG_UP", "ELEM_DIAG_FLAT_TO_25_DEG_UP", "ELEM_DIAG_25_DEG_UP_TO_60_DEG_UP", "ELEM_DIAG_60_DEG_UP_TO_25_DEG_UP", "ELEM_DIAG_25_DEG_UP_TO_FLAT", "ELEM_DIAG_25_DEG_DOWN", "ELEM_DIAG_60_DEG_DOWN", "ELEM_DIAG_FLAT_TO_25_DEG_DOWN", "ELEM_DIAG_25_DEG_DOWN_TO_60_DEG_DOWN", "ELEM_DIAG_60_DEG_DOWN_TO_25_DEG_DOWN", "ELEM_DIAG_25_DEG_DOWN_TO_FLAT", "ELEM_DIAG_FLAT_TO_60_DEG_UP", "ELEM_DIAG_60_DEG_UP_TO_FLAT", "ELEM_DIAG_FLAT_TO_60_DEG_DOWN", "ELEM_DIAG_60_DEG_DOWN_TO_FLAT", "ELEM_DIAG_FLAT_TO_LEFT_BANK", "ELEM_DIAG_FLAT_TO_RIGHT_BANK", "ELEM_DIAG_LEFT_BANK_TO_FLAT", "ELEM_DIAG_RIGHT_BANK_TO_FLAT", "ELEM_DIAG_LEFT_BANK_TO_25_DEG_UP", "ELEM_DIAG_RIGHT_BANK_TO_25_DEG_UP", "ELEM_DIAG_25_DEG_UP_TO_LEFT_BANK", "ELEM_DIAG_25_DEG_UP_TO_RIGHT_BANK", "ELEM_DIAG_LEFT_BANK_TO_25_DEG_DOWN", "ELEM_DIAG_RIGHT_BANK_TO_25_DEG_DOWN", "ELEM_DIAG_25_DEG_DOWN_TO_LEFT_BANK", "ELEM_DIAG_25_DEG_DOWN_TO_RIGHT_BANK", "ELEM_DIAG_LEFT_BANK", "ELEM_DIAG_RIGHT_BANK", "ELEM_LOG_FLUME_REVERSER", "ELEM_SPINNING_TUNNEL", "ELEM_LEFT_BARREL_ROLL_UP_TO_DOWN", "ELEM_RIGHT_BARREL_ROLL_UP_TO_DOWN", "ELEM_LEFT_BARREL_ROLL_DOWN_TO_UP", "ELEM_RIGHT_BARREL_ROLL_DOWN_TO_UP", "ELEM_LEFT_BANK_TO_LEFT_QUARTER_TURN_3_TILES_25_DEG_UP", "ELEM_RIGHT_BANK_TO_RIGHT_QUARTER_TURN_3_TILES_25_DEG_UP", "ELEM_LEFT_QUARTER_TURN_3_TILES_25_DEG_DOWN_TO_LEFT_BANK", "ELEM_RIGHT_QUARTER_TURN_3_TILES_25_DEG_DOWN_TO_RIGHT_BANK", "ELEM_POWERED_LIFT", "ELEM_LEFT_LARGE_HALF_LOOP_UP", "ELEM_RIGHT_LARGE_HALF_LOOP_UP", "ELEM_RIGHT_LARGE_HALF_LOOP_DOWN", "ELEM_LEFT_LARGE_HALF_LOOP_DOWN", } var rides = []string{ "Spiral Roller coaster", "Stand Up Coaster", "Suspended Swinging", "Inverted", "Steel Mini Coaster", "Mini Railroad", "Monorail", "Mini Suspended Coaster", "Bumper Boats", "Wooden Wild Mine/Mouse", "Steeplechase/Motorbike/Soap Box Derby", "Car Ride", "Launched Freefall", "Bobsleigh Coaster", "Flying Turns", "Observation Tower", "Looping Roller Coaster", "Dinghy Slide", "Mine Train Coaster", "Chairlift", "Corkscrew Roller Coaster", "Maze", "Spiral Slide", "Go Karts", } func main() { f, err := os.Open(os.Getenv("HOME") + "/code/OpenRCT2/openrct2.exe") if err != nil { panic(err) } defer f.Close() var WIDTH = 10 b := make([]byte, 256*WIDTH) addr := 0x005972bb f.ReadAt(b, int64(addr)) var colHeaderFmt = "%55s " // header row fmt.Printf(colHeaderFmt, "Number") for j := 0; j < WIDTH; j++ { fmt.Printf("%4x ", addr+j-0x590000) } fmt.Printf("\n") for i := 0; i < len(elementNames)+30; i++ { if i < len(elementNames) { fmt.Printf(colHeaderFmt, elementNames[i]) } else { fmt.Printf(colHeaderFmt, "unknown") } for j := 0; j < WIDTH; j++ { bijint := int(b[i*WIDTH+j]) fmt.Printf("%4d ", bijint) } fmt.Printf("\n") } }
[ "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
pypi_tools/main.py
import os import logging import sentry_sdk from aiogram import Bot, Dispatcher, executor, types from datetime import datetime, timedelta from pypi_tools.logic import remove_track_for_package import pypi_tools.data as d from pypi_tools.helpers import validate_input import pypi_tools.vizualizer as v import pypi_tools.readme as r import asyncio import aioredis logging.basicConfig(level=logging.INFO) redis_host = f"redis://{os.environ.get('REDIS_HOST')}" #sentry_sdk.init(os.environ["SENTRY_PATH"]) bot = Bot(token=os.environ["BOT_API_KEY"], parse_mode="html") dp = Dispatcher(bot) @dp.message_handler(commands=['start']) async def send_welcome(message): text = f"Hello, {message.chat.first_name} {message.chat.last_name}! \n" \ f"Welcome to <b>PyPi Tools Bot.</b>\n\n" \ "This Bot created special to obtain information from Official Python PyPi Server\n" \ + r.help_text + r.current_version await message.answer(text) @dp.message_handler(commands=['help']) async def send_welcome(message): await message.answer(r.help_text) @dp.message_handler(lambda message: message.text and ( '/stats' in message.text.lower() or 'stats:' in message.text.lower())) @validate_input(command='stats', known_sub_commands={'@any_number': lambda num: num}) async def send_package_stats(message): output = message.output sub_command = message.sub_command or 5 if len(output.split()) == 1: days = sub_command package_name = output current_date = datetime.now().date() data_ = await d.cached_package_downloads_stats(package_name, days, current_date) output = d.stats_text(data_, package_name, days) await message.answer(output) @dp.message_handler(lambda message: message.text and ( '/plot' in message.text.lower() or 'plot:' in message.text.lower())) @validate_input(command='plot', known_sub_commands={'@any_number': lambda num: num}) async def send_package_stats_with_graph(message): output = message.output sub_command = message.sub_command or 5 if len(output.split()) == 1: days = sub_command package_name = output current_date = datetime.now().date() data_ = await d.cached_package_downloads_stats(package_name, days, current_date) output = d.stats_text(data_, package_name, days) temp = 'temp/' os.makedirs(temp, exist_ok=True) # for pandas range start_date = current_date - timedelta(days=2) file_name = f'{temp}/{package_name}:{current_date - timedelta(days=1)}:{days}.png' if not os.path.isfile(file_name): file_name = v.generate_graph(start_date, [item for _, item in data_.items()][::-1], file_name) file_ = types.InputFile(file_name) await message.answer(output) await message.answer_photo(file_) @dp.message_handler(commands=['random']) async def command(message): output = await d.get_random_package() await message.answer(output) @dp.message_handler(commands=['search', 'search:detailed']) @validate_input(command='search', known_sub_commands={'detailed': lambda _package_name: d.request_package_info_from_pypi( _package_name, detailed=True)}, additional_error="Or use with sub-command to get detailed information:" "/search:detailed aiohttp") async def search_command(message): output = message.output sub_command = message.sub_command if len(output.split()) == 1: package_name = output if sub_command: output = await sub_command(package_name) else: output = await d.request_package_info_from_pypi(package_name) await message.answer(output) @dp.message_handler(commands=['releases', 'releases:full']) @validate_input(command='releases', known_sub_commands={'full': 'full'}, additional_error="Or use with sub-command to get full list of releases:" "/releases:full aiohttp") async def releases_command(message): output = message.output sub_command = message.sub_command if len(output.split()) == 1: package_name = output releases = await d.get_release_list(package_name=package_name) if sub_command and sub_command == 'full': output = f"Full Releases list for Package {package_name}\n\n" for version, v_date in releases.items(): output += f"<b>{version}</b>: {v_date}\n" else: output = f"Last 7 Releases for Package {package_name}\n\n" for num, items in enumerate(list(releases.items())): if num > 7: break version, v_date = items output += f"<b>{version}</b>: {v_date}\n" await message.answer(output) track_sub_commands = {'stop': lambda key: remove_track_for_package(key), 'nodev': 'nodev'} @dp.message_handler(commands=['track', 'track:stop', 'track:nodev']) @validate_input(command='track', known_sub_commands=track_sub_commands, additional_error="Or use with sub-command to stop track a package releases" "/track:stop aiohttp") async def track_command(message): """ handler to react on /track command and it sub-commands""" pool = await aioredis.create_redis_pool(redis_host) with await pool as redis: output = message.output sub_command = message.sub_command if len(output.split()) == 1: package_name = output chat_id = str(message.chat.id) key = chat_id + ":" + package_name if sub_command and sub_command != 'nodev': output = await sub_command(key) else: nodev = False if sub_command: nodev = True versions = await d.get_release_list(package_name, nodev) if versions is None: output = f'Package {package_name} does not exists' else: current_version = d.get_last_release_version(versions) output = f"Current {package_name} version is {current_version} \n" \ "You will be announced with new version release" version = current_version[0] if nodev: version = version + ':nodev' await redis.set(key, version) await message.answer(output) @dp.message_handler() async def echo_all(message: types.Message): await message.answer(message.text) if __name__ == '__main__': try: executor.start_polling(dp, skip_updates=True) except Exception as e: sentry_sdk.capture_exception(e)
[]
[]
[ "SENTRY_PATH", "REDIS_HOST", "BOT_API_KEY" ]
[]
["SENTRY_PATH", "REDIS_HOST", "BOT_API_KEY"]
python
3
0
ooni/settings.py
import os import yaml import getpass from ConfigParser import SafeConfigParser from twisted.internet import defer, reactor from twisted.internet.endpoints import TCP4ClientEndpoint from os.path import abspath, expanduser from ooni.utils.net import ConnectAndCloseProtocol, connectProtocol from ooni import geoip from ooni.utils import Storage, log, get_ooni_root from ooni import errors class OConfig(object): _custom_home = None def __init__(self): self.current_user = getpass.getuser() self.global_options = {} self.reports = Storage() self.scapyFactory = None self.tor_state = None # This is used to store the probes IP address obtained via Tor self.probe_ip = geoip.ProbeIP() self.logging = True self.basic = Storage() self.advanced = Storage() self.tor = Storage() self.privacy = Storage() self.set_paths() def embedded_settings(self, category, option): embedded_settings = os.path.join(get_ooni_root(), 'settings.ini') if os.path.isfile(embedded_settings): settings = SafeConfigParser() with open(embedded_settings) as fp: settings.readfp(fp) return settings.get(category, option) return None @property def var_lib_path(self): var_lib_path = self.embedded_settings("directories", "var_lib") if var_lib_path: return os.path.abspath(var_lib_path) return "/var/lib/ooni" @property def usr_share_path(self): usr_share_path = self.embedded_settings("directories", "usr_share") if usr_share_path: return os.path.abspath(usr_share_path) return "/usr/share/ooni" @property def data_directory_candidates(self): dirs = [ self.ooni_home, self.var_lib_path, self.usr_share_path, os.path.join(get_ooni_root(), '..', 'data'), '/usr/share/' ] if os.getenv("OONI_DATA_DIR"): dirs.insert(0, os.getenv("OONI_DATA_DIR")) if self.global_options.get('datadir'): dirs.insert(0, abspath(expanduser(self.global_options['datadir']))) return dirs @property def data_directory(self): for target_dir in self.data_directory_candidates: if os.path.isdir(target_dir): return target_dir return self.var_lib_path @property def ooni_home(self): home = expanduser('~'+self.current_user) if os.getenv("HOME"): home = os.getenv("HOME") if self._custom_home: return self._custom_home else: return os.path.join(home, '.ooni') def get_data_file_path(self, file_name): for target_dir in self.data_directory_candidates: file_path = os.path.join(target_dir, file_name) if os.path.isfile(file_path): return file_path def set_paths(self): self.nettest_directory = os.path.join(get_ooni_root(), 'nettests') if self.advanced.inputs_dir: self.inputs_directory = self.advanced.inputs_dir else: self.inputs_directory = os.path.join(self.ooni_home, 'inputs') if self.advanced.decks_dir: self.decks_directory = self.advanced.decks_dir else: self.decks_directory = os.path.join(self.ooni_home, 'decks') self.reports_directory = os.path.join(self.ooni_home, 'reports') self.resources_directory = os.path.join(self.data_directory, "resources") if self.advanced.report_log_file: self.report_log_file = self.advanced.report_log_file else: self.report_log_file = os.path.join(self.ooni_home, 'reporting.yml') if self.global_options.get('configfile'): config_file = self.global_options['configfile'] self.config_file = expanduser(config_file) else: self.config_file = os.path.join(self.ooni_home, 'ooniprobe.conf') if 'logfile' in self.basic: self.basic.logfile = expanduser(self.basic.logfile.replace( '~', '~'+self.current_user)) def initialize_ooni_home(self, custom_home=None): if custom_home: self._custom_home = custom_home self.set_paths() if not os.path.isdir(self.ooni_home): print "Ooni home directory does not exist." print "Creating it in '%s'." % self.ooni_home os.mkdir(self.ooni_home) os.mkdir(self.inputs_directory) os.mkdir(self.decks_directory) def _create_config_file(self): target_config_file = self.config_file print "Creating it for you in '%s'." % target_config_file sample_config_file = self.get_data_file_path('ooniprobe.conf.sample') with open(sample_config_file) as f: with open(target_config_file, 'w+') as w: for line in f: if line.startswith(' logfile: '): w.write(' logfile: %s\n' % ( os.path.join(self.ooni_home, 'ooniprobe.log')) ) else: w.write(line) def read_config_file(self, check_incoherences=False): if not os.path.isfile(self.config_file): print "Configuration file does not exist." self._create_config_file() self.read_config_file() with open(self.config_file) as f: config_file_contents = '\n'.join(f.readlines()) configuration = yaml.safe_load(config_file_contents) for setting in configuration.keys(): if setting in dir(self) and configuration[setting] is not None: for k, v in configuration[setting].items(): getattr(self, setting)[k] = v self.set_paths() if check_incoherences: self.check_incoherences(configuration) def check_incoherences(self, configuration): incoherent = [] if configuration['advanced']['interface'] != 'auto': from scapy.all import get_if_list if configuration['advanced']['interface'] not in get_if_list(): incoherent.append('advanced:interface') self.log_incoherences(incoherent) def log_incoherences(self, incoherences): if len(incoherences) > 0: if len(incoherences) > 1: incoherent_pretty = ", ".join(incoherences[:-1]) + ' and ' + incoherences[-1] else: incoherent_pretty = incoherences[0] log.err("You must set properly %s in %s." % (incoherent_pretty, self.config_file)) raise errors.ConfigFileIncoherent @defer.inlineCallbacks def check_tor(self): """ Called only when we must start tor by director.start """ incoherent = [] if not self.advanced.start_tor: if self.tor.socks_port is None: incoherent.append('tor:socks_port') else: socks_port_ep = TCP4ClientEndpoint(reactor, "localhost", self.tor.socks_port) try: yield connectProtocol(socks_port_ep, ConnectAndCloseProtocol()) except Exception: incoherent.append('tor:socks_port') if self.tor.control_port is not None: control_port_ep = TCP4ClientEndpoint(reactor, "localhost", self.tor.control_port) try: yield connectProtocol(control_port_ep, ConnectAndCloseProtocol()) except Exception: incoherent.append('tor:control_port') self.log_incoherences(incoherent) config = OConfig() if not os.path.isfile(config.config_file) \ and os.path.isfile('/etc/ooniprobe.conf'): config.global_options['configfile'] = '/etc/ooniprobe.conf' config.set_paths()
[]
[]
[ "HOME", "OONI_DATA_DIR" ]
[]
["HOME", "OONI_DATA_DIR"]
python
2
0
main.go
package main import ( "encoding/json" "flag" "log" "net" "net/http" "os" "os/signal" "path" "strconv" "strings" "syscall" "time" "github.com/docker/go-units" "golang.org/x/net/websocket" "fmt" "path/filepath" ggr "github.com/aerokube/ggr/config" "github.com/aerokube/selenoid/config" "github.com/aerokube/selenoid/protect" "github.com/aerokube/selenoid/service" "github.com/aerokube/selenoid/session" "github.com/aerokube/selenoid/upload" "github.com/aerokube/util" "github.com/aerokube/util/docker" "github.com/docker/docker/client" ) type memLimit int64 func (limit *memLimit) String() string { return units.HumanSize(float64(*limit)) } func (limit *memLimit) Set(s string) error { v, err := units.RAMInBytes(s) if err != nil { return fmt.Errorf("set memory limit: %v", err) } *limit = memLimit(v) return nil } type cpuLimit int64 func (limit *cpuLimit) String() string { return strconv.FormatFloat(float64(*limit/1000000000), 'f', -1, 64) } func (limit *cpuLimit) Set(s string) error { v, err := strconv.ParseFloat(s, 64) if err != nil { return fmt.Errorf("set cpu limit: %v", err) } *limit = cpuLimit(v * 1000000000) return nil } var ( hostname string disableDocker bool disableQueue bool enableFileUpload bool listen string timeout time.Duration maxTimeout time.Duration newSessionAttemptTimeout time.Duration sessionDeleteTimeout time.Duration serviceStartupTimeout time.Duration limit int retryCount int containerNetwork string sessions = session.NewMap() confPath string logConfPath string captureDriverLogs bool disablePrivileged bool videoOutputDir string videoRecorderImage string logOutputDir string saveAllLogs bool ggrHost *ggr.Host conf *config.Config queue *protect.Queue manager service.Manager cli *client.Client startTime = time.Now() version bool gitRevision = "HEAD" buildStamp = "unknown" ) func init() { var mem memLimit var cpu cpuLimit flag.BoolVar(&disableDocker, "disable-docker", false, "Disable docker support") flag.BoolVar(&disableQueue, "disable-queue", false, "Disable wait queue") flag.BoolVar(&enableFileUpload, "enable-file-upload", false, "File upload support") flag.StringVar(&listen, "listen", ":4444", "Network address to accept connections") flag.StringVar(&confPath, "conf", "config/browsers.json", "Browsers configuration file") flag.StringVar(&logConfPath, "log-conf", "", "Container logging configuration file") flag.IntVar(&limit, "limit", 5, "Simultaneous container runs") flag.IntVar(&retryCount, "retry-count", 1, "New session attempts retry count") flag.DurationVar(&timeout, "timeout", 60*time.Second, "Session idle timeout in time.Duration format") flag.DurationVar(&maxTimeout, "max-timeout", 1*time.Hour, "Maximum valid session idle timeout in time.Duration format") flag.DurationVar(&newSessionAttemptTimeout, "session-attempt-timeout", 30*time.Second, "New session attempt timeout in time.Duration format") flag.DurationVar(&sessionDeleteTimeout, "session-delete-timeout", 30*time.Second, "Session delete timeout in time.Duration format") flag.DurationVar(&serviceStartupTimeout, "service-startup-timeout", 30*time.Second, "Service startup timeout in time.Duration format") flag.BoolVar(&version, "version", false, "Show version and exit") flag.Var(&mem, "mem", "Containers memory limit e.g. 128m or 1g") flag.Var(&cpu, "cpu", "Containers cpu limit as float e.g. 0.2 or 1.0") flag.StringVar(&containerNetwork, "container-network", service.DefaultContainerNetwork, "Network to be used for containers") flag.BoolVar(&captureDriverLogs, "capture-driver-logs", false, "Whether to add driver process logs to Selenoid output") flag.BoolVar(&disablePrivileged, "disable-privileged", false, "Whether to disable privileged container mode") flag.StringVar(&videoOutputDir, "video-output-dir", "video", "Directory to save recorded video to") flag.StringVar(&videoRecorderImage, "video-recorder-image", "selenoid/video-recorder:latest-release", "Image to use as video recorder") flag.StringVar(&logOutputDir, "log-output-dir", "", "Directory to save session log to") flag.BoolVar(&saveAllLogs, "save-all-logs", false, "Whether to save all logs without considering capabilities") flag.Parse() if version { showVersion() os.Exit(0) } var err error hostname, err = os.Hostname() if err != nil { log.Fatalf("[-] [INIT] [%s: %v]", os.Args[0], err) } if ggrHostEnv := os.Getenv("GGR_HOST"); ggrHostEnv != "" { ggrHost = parseGgrHost(ggrHostEnv) } queue = protect.New(limit, disableQueue) conf = config.NewConfig() err = conf.Load(confPath, logConfPath) if err != nil { log.Fatalf("[-] [INIT] [%s: %v]", os.Args[0], err) } onSIGHUP(func() { err := conf.Load(confPath, logConfPath) if err != nil { log.Printf("[-] [INIT] [%s: %v]", os.Args[0], err) } }) cancelOnSignal() inDocker := false _, err = os.Stat("/.dockerenv") if err == nil { inDocker = true } if !disableDocker { videoOutputDir, err = filepath.Abs(videoOutputDir) if err != nil { log.Fatalf("[-] [INIT] [Invalid video output dir %s: %v]", videoOutputDir, err) } err = os.MkdirAll(videoOutputDir, os.FileMode(0644)) if err != nil { log.Fatalf("[-] [INIT] [Failed to create video output dir %s: %v]", videoOutputDir, err) } log.Printf("[-] [INIT] [Video Dir: %s]", videoOutputDir) } if logOutputDir != "" { logOutputDir, err = filepath.Abs(logOutputDir) if err != nil { log.Fatalf("[-] [INIT] [Invalid log output dir %s: %v]", logOutputDir, err) } err = os.MkdirAll(logOutputDir, os.FileMode(0644)) if err != nil { log.Fatalf("[-] [INIT] [Failed to create log output dir %s: %v]", logOutputDir, err) } log.Printf("[-] [INIT] [Logs Dir: %s]", logOutputDir) if saveAllLogs { log.Printf("[-] [INIT] [Saving all logs]") } } upload.Init() environment := service.Environment{ InDocker: inDocker, CPU: int64(cpu), Memory: int64(mem), Network: containerNetwork, StartupTimeout: serviceStartupTimeout, SessionDeleteTimeout: sessionDeleteTimeout, CaptureDriverLogs: captureDriverLogs, VideoOutputDir: videoOutputDir, VideoContainerImage: videoRecorderImage, LogOutputDir: logOutputDir, SaveAllLogs: saveAllLogs, Privileged: !disablePrivileged, } if disableDocker { manager = &service.DefaultManager{Environment: &environment, Config: conf} if logOutputDir != "" && captureDriverLogs { log.Fatalf("[-] [INIT] [In drivers mode only one of -capture-driver-logs and -log-output-dir flags is allowed]") } return } dockerHost := os.Getenv("DOCKER_HOST") if dockerHost == "" { dockerHost = client.DefaultDockerHost } u, err := client.ParseHostURL(dockerHost) if err != nil { log.Fatalf("[-] [INIT] [%v]", err) } ip, _, _ := net.SplitHostPort(u.Host) environment.IP = ip cli, err = docker.CreateCompatibleDockerClient( func(specifiedApiVersion string) { log.Printf("[-] [INIT] [Using Docker API version: %s]", specifiedApiVersion) }, func(determinedApiVersion string) { log.Printf("[-] [INIT] [Your Docker API version is %s]", determinedApiVersion) }, func(defaultApiVersion string) { log.Printf("[-] [INIT] [Did not manage to determine your Docker API version - using default version: %s]", defaultApiVersion) }, ) if err != nil { log.Fatalf("[-] [INIT] [New docker client: %v]", err) } manager = &service.DefaultManager{Environment: &environment, Client: cli, Config: conf} } func parseGgrHost(s string) *ggr.Host { h, p, err := net.SplitHostPort(s) if err != nil { log.Fatalf("[-] [INIT] [Invalid Ggr host: %v]", err) } ggrPort, err := strconv.Atoi(p) if err != nil { log.Fatalf("[-] [INIT] [Invalid Ggr host: %v]", err) } host := &ggr.Host{ Name: h, Port: ggrPort, } log.Printf("[-] [INIT] [Will prefix all session IDs with a hash-sum: %s]", host.Sum()) return host } func cancelOnSignal() { sig := make(chan os.Signal) signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM) go func() { <-sig log.Println("[-] [-] [SHUTTING_DOWN] [-] [-] [-] [-] [-] [-] [-]") sessions.Each(func(k string, s *session.Session) { if enableFileUpload { os.RemoveAll(path.Join(os.TempDir(), k)) } s.Cancel() }) if !disableDocker { err := cli.Close() if err != nil { log.Fatalf("[-] [SHUTTING_DOWN] [Error closing docker client: %v]", err) } } os.Exit(0) }() } func onSIGHUP(fn func()) { sig := make(chan os.Signal) signal.Notify(sig, syscall.SIGHUP) go func() { for { <-sig fn() } }() } var seleniumPaths = struct { CreateSession, ProxySession string }{ CreateSession: "/session", ProxySession: "/session/", } func selenium() http.Handler { mux := http.NewServeMux() mux.HandleFunc(seleniumPaths.CreateSession, queue.Try(queue.Check(queue.Protect(post(create))))) mux.HandleFunc(seleniumPaths.ProxySession, proxy) mux.HandleFunc(paths.Status, status) return mux } func post(next http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) return } next.ServeHTTP(w, r) } } func ping(w http.ResponseWriter, _ *http.Request) { w.Header().Add("Content-Type", "application/json") json.NewEncoder(w).Encode(struct { Uptime string `json:"uptime"` LastReloadTime string `json:"lastReloadTime"` NumRequests uint64 `json:"numRequests"` Version string `json:"version"` }{time.Since(startTime).String(), conf.LastReloadTime.String(), getSerial(), gitRevision}) } func video(w http.ResponseWriter, r *http.Request) { if r.Method == http.MethodDelete { deleteFileIfExists(w, r, videoOutputDir, paths.Video, "DELETED_VIDEO_FILE") return } fileServer := http.StripPrefix(paths.Video, http.FileServer(http.Dir(videoOutputDir))) fileServer.ServeHTTP(w, r) } func deleteFileIfExists(w http.ResponseWriter, r *http.Request, dir string, prefix string, status string) { fileName := strings.TrimPrefix(r.URL.Path, prefix) filePath := filepath.Join(dir, fileName) _, err := os.Stat(filePath) if err != nil { http.Error(w, fmt.Sprintf("Unknown file %s", filePath), http.StatusNotFound) return } err = os.Remove(filePath) if err != nil { http.Error(w, fmt.Sprintf("Failed to delete file %s: %v", filePath, err), http.StatusInternalServerError) return } log.Printf("[%d] [%s] [%s]", serial(), status, fileName) } var paths = struct { Video, VNC, Logs, Devtools, Download, Clipboard, File, Ping, Status, Error, WdHub string }{ Video: "/video/", VNC: "/vnc/", Logs: "/logs/", Devtools: "/devtools/", Download: "/download/", Clipboard: "/clipboard/", Status: "/status", File: "/file", Ping: "/ping", Error: "/error", WdHub: "/wd/hub", } func handler() http.Handler { root := http.NewServeMux() root.HandleFunc(paths.WdHub+"/", func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Content-Type", "application/json") r.URL.Scheme = "http" r.URL.Host = (&request{r}).localaddr() r.URL.Path = strings.TrimPrefix(r.URL.Path, paths.WdHub) selenium().ServeHTTP(w, r) }) root.HandleFunc(paths.Error, func(w http.ResponseWriter, r *http.Request) { util.JsonError(w, "Session timed out or not found", http.StatusNotFound) }) root.HandleFunc(paths.Status, func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Content-Type", "application/json") json.NewEncoder(w).Encode(conf.State(sessions, limit, queue.Queued(), queue.Pending())) }) root.HandleFunc(paths.Ping, ping) root.Handle(paths.VNC, websocket.Handler(vnc)) root.Handle(paths.Devtools, websocket.Server{Handler: devtools}) root.HandleFunc(paths.Logs, logs) root.HandleFunc(paths.Video, video) root.HandleFunc(paths.Download, reverseProxy(func(sess *session.Session) string { return sess.HostPort.Fileserver }, "DOWNLOADING_FILE")) root.HandleFunc(paths.Clipboard, reverseProxy(func(sess *session.Session) string { return sess.HostPort.Clipboard }, "CLIPBOARD")) if enableFileUpload { root.HandleFunc(paths.File, fileUpload) } return root } func showVersion() { fmt.Printf("Git Revision: %s\n", gitRevision) fmt.Printf("UTC Build Time: %s\n", buildStamp) } func main() { log.Printf("[-] [INIT] [Timezone: %s]", time.Local) log.Printf("[-] [INIT] [Listening on %s]", listen) log.Fatal(http.ListenAndServe(listen, handler())) }
[ "\"GGR_HOST\"", "\"DOCKER_HOST\"" ]
[]
[ "DOCKER_HOST", "GGR_HOST" ]
[]
["DOCKER_HOST", "GGR_HOST"]
go
2
0
examples/v2/incident-services/ListIncidentServices.go
// Get a list of all incident services returns "OK" response package main import ( "context" "encoding/json" "fmt" "os" datadog "github.com/DataDog/datadog-api-client-go/api/v2/datadog" ) func main() { // there is a valid "service" in the system ServiceDataAttributesName := os.Getenv("SERVICE_DATA_ATTRIBUTES_NAME") ctx := datadog.NewDefaultContext(context.Background()) configuration := datadog.NewConfiguration() configuration.SetUnstableOperationEnabled("ListIncidentServices", true) apiClient := datadog.NewAPIClient(configuration) resp, r, err := apiClient.IncidentServicesApi.ListIncidentServices(ctx, *datadog.NewListIncidentServicesOptionalParameters().WithFilter(ServiceDataAttributesName)) if err != nil { fmt.Fprintf(os.Stderr, "Error when calling `IncidentServicesApi.ListIncidentServices`: %v\n", err) fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r) } responseContent, _ := json.MarshalIndent(resp, "", " ") fmt.Fprintf(os.Stdout, "Response from `IncidentServicesApi.ListIncidentServices`:\n%s\n", responseContent) }
[ "\"SERVICE_DATA_ATTRIBUTES_NAME\"" ]
[]
[ "SERVICE_DATA_ATTRIBUTES_NAME" ]
[]
["SERVICE_DATA_ATTRIBUTES_NAME"]
go
1
0
cmd/cli.go
package cmd import ( "encoding/json" "fmt" "os" "github.com/ghodss/yaml" "github.com/olekukonko/tablewriter" "github.com/packethost/packngo" "github.com/spf13/cobra" ) // Cli struct type Cli struct { Client *packngo.Client MainCmd *cobra.Command } // VERSION build var ( Version string = "0.0.7" ) // NewCli struct func NewCli() *Cli { var err error cli := &Cli{} cli.Client, err = packngo.NewClientWithBaseURL("Packet CLI", os.Getenv("PACKET_TOKEN"), nil, "https://api.packet.net/") if err != nil { fmt.Println("Client error:", err) return nil } rootCmd.Execute() rootCmd.DisableSuggestions = false cli.MainCmd = rootCmd return cli } func output(in interface{}, header []string, data *[][]string) { if !isJSON && !isYaml { table := tablewriter.NewWriter(os.Stdout) table.SetAutoWrapText(false) table.SetAlignment(tablewriter.ALIGN_LEFT) table.SetHeader(header) table.AppendBulk(*data) table.Render() } else if isJSON { output, err := json.MarshalIndent(in, "", " ") if err != nil { fmt.Println(err) return } fmt.Println(string(output)) } else if isYaml { output, err := yaml.Marshal(in) if err != nil { fmt.Println(err) return } fmt.Println(string(output)) } } func outputMergingCells(in interface{}, header []string, data *[][]string) { if !isJSON && !isYaml { table := tablewriter.NewWriter(os.Stdout) table.SetAutoMergeCells(true) table.SetRowLine(true) table.SetHeader(header) table.AppendBulk(*data) table.Render() } else if isJSON { output, err := json.MarshalIndent(in, "", " ") if err != nil { fmt.Println(err) return } fmt.Println(string(output)) } else if isYaml { output, err := yaml.Marshal(in) if err != nil { fmt.Println(err) return } fmt.Println(string(output)) } }
[ "\"PACKET_TOKEN\"" ]
[]
[ "PACKET_TOKEN" ]
[]
["PACKET_TOKEN"]
go
1
0
pkg/network/network.go
package network import ( "context" "log" "os" "reflect" "strings" osv1 "github.com/openshift/api/operator/v1" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" k8sclient "sigs.k8s.io/controller-runtime/pkg/client" cnao "github.com/kubevirt/cluster-network-addons-operator/pkg/apis/networkaddonsoperator/shared" ) // Canonicalize converts configuration to a canonical form. func Canonicalize(conf *cnao.NetworkAddonsConfigSpec) { // TODO } // Validate checks that the supplied configuration is reasonable. // This should be called after Canonicalize func Validate(conf *cnao.NetworkAddonsConfigSpec, openshiftNetworkConfig *osv1.Network) error { errs := []error{} errs = append(errs, validateMultus(conf, openshiftNetworkConfig)...) errs = append(errs, validateKubeMacPool(conf)...) errs = append(errs, validateImagePullPolicy(conf)...) errs = append(errs, validateSelfSignConfiguration(conf)...) if len(errs) > 0 { return errors.Errorf("invalid configuration:\n%s", errorListToMultiLineString(errs)) } return nil } // FillDefaults computes any default values and applies them to the configuration // This is a mutating operation. It should be called after Validate. // // Defaults are carried forward from previous if it is provided. This is so we // can change defaults as we move forward, but won't disrupt existing clusters. func FillDefaults(conf, previous *cnao.NetworkAddonsConfigSpec) error { errs := []error{} errs = append(errs, fillDefaultsPlacementConfiguration(conf, previous)...) errs = append(errs, fillDefaultsSelfSignConfiguration(conf, previous)...) errs = append(errs, fillDefaultsImagePullPolicy(conf, previous)...) errs = append(errs, fillDefaultsKubeMacPool(conf, previous)...) if len(errs) > 0 { return errors.Errorf("invalid configuration:\n%s", errorListToMultiLineString(errs)) } return nil } // specialCleanUp checks if there are any specific outdated objects or ones that are no longer compatible and deletes them. func SpecialCleanUp(conf *cnao.NetworkAddonsConfigSpec, client k8sclient.Client) error { errs := []error{} ctx := context.TODO() errs = append(errs, cleanUpMultus(conf, ctx, client)...) errs = append(errs, cleanUpNMState(conf, ctx, client)...) if len(errs) > 0 { return errors.Errorf("invalid configuration:\n%v", errorListToMultiLineString(errs)) } return nil } // IsChangeSafe checks to see if the change between prev and next are allowed // FillDefaults and Validate should have been called. func IsChangeSafe(prev, next *cnao.NetworkAddonsConfigSpec) error { if prev == nil { return nil } // Easy way out: nothing changed. if reflect.DeepEqual(prev, next) { return nil } errs := []error{} errs = append(errs, changeSafeKubeMacPool(prev, next)...) errs = append(errs, changeSafeImagePullPolicy(prev, next)...) errs = append(errs, changeSafeSelfSignConfiguration(prev, next)...) if len(errs) > 0 { return errors.Errorf("invalid configuration:\n%s", errorListToMultiLineString(errs)) } return nil } // Render creates a list of components to be created func Render(conf *cnao.NetworkAddonsConfigSpec, manifestDir string, openshiftNetworkConfig *osv1.Network, clusterInfo *ClusterInfo) ([]*unstructured.Unstructured, error) { log.Print("starting render phase") objs := []*unstructured.Unstructured{} // render Multus o, err := renderMultus(conf, manifestDir, openshiftNetworkConfig, clusterInfo) if err != nil { return nil, err } objs = append(objs, o...) // render Linux Bridge o, err = renderLinuxBridge(conf, manifestDir, clusterInfo) if err != nil { return nil, err } objs = append(objs, o...) // render kubeMacPool o, err = renderKubeMacPool(conf, manifestDir) if err != nil { return nil, err } objs = append(objs, o...) // render NMState o, err = renderNMState(conf, manifestDir, clusterInfo) if err != nil { return nil, err } objs = append(objs, o...) // render Ovs o, err = renderOvs(conf, manifestDir, clusterInfo) if err != nil { return nil, err } objs = append(objs, o...) // render MacvtapCni o, err = renderMacvtapCni(conf, manifestDir, clusterInfo) if err != nil { return nil, err } objs = append(objs, o...) log.Printf("render phase done, rendered %d objects", len(objs)) return objs, nil } // RenderObjsToRemove creates list of components to be removed func RenderObjsToRemove(prev, conf *cnao.NetworkAddonsConfigSpec, manifestDir string, openshiftNetworkConfig *osv1.Network, clusterInfo *ClusterInfo) ([]*unstructured.Unstructured, error) { log.Print("starting rendering objects to delete phase") objsToRemove := []*unstructured.Unstructured{} if prev == nil { return nil, nil } if conf.Multus == nil { o, err := renderMultus(prev, manifestDir, openshiftNetworkConfig, clusterInfo) if err != nil { return nil, err } objsToRemove = append(objsToRemove, o...) } if conf.LinuxBridge == nil { o, err := renderLinuxBridge(prev, manifestDir, clusterInfo) if err != nil { return nil, err } objsToRemove = append(objsToRemove, o...) } if conf.KubeMacPool == nil { o, err := renderKubeMacPool(prev, manifestDir) if err != nil { return nil, err } objsToRemove = append(objsToRemove, o...) } if conf.NMState == nil { o, err := renderNMState(prev, manifestDir, clusterInfo) if err != nil { return nil, err } objsToRemove = append(objsToRemove, o...) } if conf.Ovs == nil { o, err := renderOvs(prev, manifestDir, clusterInfo) if err != nil { return nil, err } objsToRemove = append(objsToRemove, o...) } // render MacvtapCni if conf.MacvtapCni == nil { o, err := renderMacvtapCni(conf, manifestDir, clusterInfo) if err != nil { return nil, err } objsToRemove = append(objsToRemove, o...) } // Remove OPERAND_NAMESPACE occurences // TODO cleanup OPERAND_NAMESPACE once there are no components using it. objsToRemoveWithoutNamespace := []*unstructured.Unstructured{} operandNamespace := os.Getenv("OPERAND_NAMESPACE") for _, obj := range objsToRemove { if !(obj.GetName() == operandNamespace && obj.GetKind() == "Namespace") { objsToRemoveWithoutNamespace = append(objsToRemoveWithoutNamespace, obj) } } objsToRemove = objsToRemoveWithoutNamespace // Do not remove CustomResourceDefinitions, they should be kept even after // removal of the operator objsToRemoveWithoutCRDs := []*unstructured.Unstructured{} for _, obj := range objsToRemove { if obj.GetKind() != "CustomResourceDefinition" { objsToRemoveWithoutCRDs = append(objsToRemoveWithoutCRDs, obj) } } objsToRemove = objsToRemoveWithoutCRDs log.Printf("object removal render phase done, rendered %d objects to remove", len(objsToRemove)) return objsToRemove, nil } func errorListToMultiLineString(errs []error) string { stringErrs := []string{} for _, err := range errs { if err != nil { stringErrs = append(stringErrs, err.Error()) } } return strings.Join(stringErrs, "\n") }
[ "\"OPERAND_NAMESPACE\"" ]
[]
[ "OPERAND_NAMESPACE" ]
[]
["OPERAND_NAMESPACE"]
go
1
0
locallibrary/settings.py
""" Django settings for locallibrary project. Generated by 'django-admin startproject' using Django 3.1.2. For more information on this file, see https://docs.djangoproject.com/en/3.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.1/ref/settings/ """ from pathlib import Path # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! #SECRET_KEY = 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag' import os SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag') # SECURITY WARNING: don't run with debug turned on in production! #DEBUG = True DEBUG = os.environ.get('DJANGO_DEBUG', '') != 'False' # Set hosts to allow any app on Heroku and the local testing URL ALLOWED_HOSTS = ['.herokuapp.com','127.0.0.1', '.elasticbeanstalk.com'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Add our new application 'catalog.apps.CatalogConfig', #This object was created for us in /catalog/apps.py ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'locallibrary.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'locallibrary.wsgi.application' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'local_library', 'USER': 'local_library', 'PASSWORD': 'asdf1234', 'HOST': 'eb-postgres.cke5zq28rpih.ap-northeast-2.rds.amazonaws.com', 'PORT': '5432', } } # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Redirect to home URL after login (Default redirects to /accounts/profile/) LOGIN_REDIRECT_URL = '/' # Add to test email: EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.1/howto/static-files/ # The absolute path to the directory where collectstatic will collect static files for deployment. STATIC_ROOT = BASE_DIR / 'staticfiles' #. os.path.join(BASE_DIR, 'staticfiles') # The URL to use when referring to static files (where they will be served from) STATIC_URL = '/static/' # Static file serving. # http://whitenoise.evans.io/en/stable/django.html#django-middleware STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
[]
[]
[ "DJANGO_DEBUG", "DJANGO_SECRET_KEY" ]
[]
["DJANGO_DEBUG", "DJANGO_SECRET_KEY"]
python
2
0
tensorflow/python/kernel_tests/conv_ops_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for convolutional operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python.client import session as session_lib from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.layers import convolutional from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import nn_impl from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variables import tensorflow.python.ops.nn_grad # pylint: disable=unused-import from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging from tensorflow.python.util.compat import collections_abc def GetShrunkInceptionShapes(shrink=10): """Iterator for smaller versions of convolution shapes in 2015 Inception. Relative to inception, each depth value is `depth // shrink`. Args: shrink: Factor to shrink each depth value by relative to Inception. Yields: Tuple (input_size, filter_size, out_size, stride, padding), the convolution parameters of Inception layers. """ input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384], [4, 8, 8, 2048], [4, 8, 8, 448], [4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1248], [4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224], [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1216], [4, 17, 17, 1216], [4, 17, 17, 224], [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152], [4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 1152], [4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 768], [4, 17, 17, 128], [4, 17, 17, 128], [4, 17, 17, 768], [4, 17, 17, 768], [4, 35, 35, 96], [4, 35, 35, 288], [4, 35, 35, 64], [4, 35, 35, 288], [4, 35, 35, 256], [4, 35, 35, 48], [4, 35, 35, 256], [4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192], [4, 35, 35, 192], [4, 73, 73, 64], [4, 73, 73, 64], [4, 147, 147, 24]] filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384], [1, 1, 2048, 192], [3, 3, 448, 384], [1, 1, 2048, 320], [1, 1, 2048, 448], [1, 1, 2048, 384], [1, 1, 1760, 384], [1, 1, 1760, 192], [1, 1, 1760, 448], [1, 1, 1760, 320], [3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192], [3, 3, 128, 320], [1, 1, 1248, 128], [1, 3, 224, 224], [3, 1, 192, 256], [1, 3, 192, 256], [1, 1, 1216, 192], [1, 1, 1216, 96], [3, 1, 224, 224], [3, 3, 192, 224], [1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128], [3, 1, 192, 192], [3, 3, 160, 192], [1, 1, 1152, 160], [1, 1, 1024, 128], [1, 3, 128, 192], [1, 1, 1024, 160], [3, 1, 128, 192], [1, 1, 1024, 256], [3, 1, 128, 128], [1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128], [1, 1, 768, 128], [1, 1, 768, 320], [3, 3, 96, 96], [3, 3, 288, 384], [3, 3, 64, 96], [1, 1, 288, 64], [1, 1, 256, 64], [5, 5, 48, 64], [1, 1, 256, 48], [3, 3, 96, 96], [1, 1, 192, 32], [1, 1, 192, 64], [1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64, 64], [1, 1, 24, 64]] out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384], [4, 8, 8, 192], [4, 8, 8, 384], [4, 8, 8, 320], [4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384], [4, 8, 8, 192], [4, 8, 8, 448], [4, 8, 8, 320], [4, 8, 8, 192], [4, 17, 17, 192], [4, 17, 17, 192], [4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224], [4, 17, 17, 256], [4, 17, 17, 256], [4, 17, 17, 192], [4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224], [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 192], [4, 17, 17, 256], [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128], [4, 17, 17, 128], [4, 17, 17, 320], [4, 17, 17, 96], [4, 17, 17, 384], [4, 35, 35, 96], [4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48], [4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64], [4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64], [4, 147, 147, 64]] strides = [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ] # Shrink sizes to make the test faster for i in input_sizes: i[3] //= shrink for f in filter_sizes: f[2] //= shrink f[3] //= shrink for o in out_sizes: o[3] //= shrink # pylint: disable=invalid-name VALID = "VALID" SAME = "SAME" # pylint: enable=invalid-name paddings = [ SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, VALID, VALID, VALID ] for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides, paddings): yield i, f, o, s, p def GetTestConfigs(): """Get all the valid tests configs to run. Returns: all the valid test configs as tuples of data_format and use_gpu. """ test_configs = [("NHWC", False), ("NHWC", True)] if test.is_gpu_available(cuda_only=True): # "NCHW" format is only supported on CUDA. test_configs += [("NCHW", True)] return test_configs class Conv2DTest(test.TestCase): def _DtypesToTest(self, use_gpu): # double datatype is currently not supported for convolution ops # on the ROCm platform optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64] if use_gpu and not test_util.GpuSupportsHalfMatMulAndConv(): return [dtypes.float32] + optional_float64 else: # It is important that float32 comes before float16 here, # as we will be using its gradients as reference for fp16 gradients. return [dtypes.float32, dtypes.float16] + optional_float64 def _CreateNumpyTensor(self, shape): total_size = 1 for s in shape: total_size *= s return np.arange(1, total_size + 1, dtype=np.float32).reshape(shape) def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, dtype, use_gpu): """Verifies the output values of the convolution function. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. dilations: Dilated rate: [col_dilation, row_dilation] strides: Stride: [col_stride, row_stride] padding: Padding type. data_format: Format of the data tensors. dtype: Data type for inputs and outputs. use_gpu: True if the operations should be run on GPU Returns: Symbolic tensor value that can be used to execute the computation """ x1 = self._CreateNumpyTensor(tensor_in_sizes) x2 = self._CreateNumpyTensor(filter_in_sizes) with test_util.device(use_gpu): t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype) t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype) strides = [1] + strides + [1] dilations = [1] + dilations + [1] if isinstance(padding, (list, tuple)): padding = [(0, 0)] + padding + [(0, 0)] if data_format == "NCHW": t1 = test_util.NHWCToNCHW(t1) strides = test_util.NHWCToNCHW(strides) dilations = test_util.NHWCToNCHW(dilations) if isinstance(padding, (list, tuple)): padding = test_util.NHWCToNCHW(padding) conv = nn_ops.conv2d( t1, t2, dilations=dilations, strides=strides, padding=padding, data_format=data_format) self.assertEqual(conv.dtype, dtype) if data_format == "NCHW": conv = test_util.NCHWToNHWC(conv) return conv def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides, padding): """Verifies that CPU and GPU produce the same values. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. conv_strides: [row_stride, col_stride] for the convolution; padding: Padding type. """ x1 = np.random.rand(*tensor_in_sizes).astype(np.float32) x2 = np.random.rand(*filter_in_sizes).astype(np.float32) def _SetupVal(data_format, use_gpu): with test_util.device(use_gpu): t1 = constant_op.constant(x1, shape=tensor_in_sizes) t2 = constant_op.constant(x2, shape=filter_in_sizes) strides = [1] + conv_strides + [1] if data_format == "NCHW": t1 = test_util.NHWCToNCHW(t1) strides = test_util.NHWCToNCHW(strides) conv = nn_ops.conv2d( t1, t2, strides=strides, padding=padding, data_format=data_format) if data_format == "NCHW": conv = test_util.NCHWToNHWC(conv) return conv tensors = [] for (data_format, use_gpu) in GetTestConfigs(): tensors.append(_SetupVal(data_format, use_gpu)) values = self.evaluate(tensors) for i in range(1, len(values)): self.assertAllClose(values[0], values[i], rtol=1e-3, atol=1e-3) def _ComputeReferenceDilatedConv(self, tensor_in_sizes, filter_in_sizes, stride, dilation, padding, data_format, use_gpu): x1 = self._CreateNumpyTensor(tensor_in_sizes) x2 = self._CreateNumpyTensor(filter_in_sizes) with test_util.device(use_gpu): t1 = constant_op.constant(x1, shape=tensor_in_sizes) t2 = constant_op.constant(x2, shape=filter_in_sizes) if isinstance(stride, collections_abc.Iterable): strides = list(stride) else: strides = [stride, stride] if data_format == "NCHW": t1 = test_util.NHWCToNCHW(t1) full_strides = [1, 1] + strides full_dilation = [1, 1] + dilation else: full_strides = [1] + strides + [1] full_dilation = [1] + dilation + [1] expected = nn_ops.convolution( t1, t2, padding=padding, strides=strides, dilation_rate=dilation, data_format=data_format) computed = nn_ops.conv2d( t1, t2, strides=full_strides, dilations=full_dilation, padding=padding, data_format=data_format) if data_format == "NCHW": expected = test_util.NCHWToNHWC(expected) computed = test_util.NCHWToNHWC(computed) return expected, computed def _VerifyDilatedConvValues(self, tensor_in_sizes, filter_in_sizes, strides, padding, dilations, rtol=1e-4): expected_results = [] computed_results = [] for data_format, use_gpu in GetTestConfigs(): expected, computed = self._ComputeReferenceDilatedConv( tensor_in_sizes, filter_in_sizes, strides, dilations, padding, data_format, use_gpu) expected_results.append(expected) computed_results.append(computed) tolerance = 1e-2 if use_gpu else 1e-5 expected_values = self.evaluate(expected_results) computed_values = self.evaluate(computed_results) for e_value, c_value in zip(expected_values, computed_values): tf_logging.debug("expected = %s", e_value) tf_logging.debug("actual = %s", c_value) self.assertAllClose( e_value.flatten(), c_value.flatten(), atol=tolerance, rtol=rtol) def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, strides, padding, expected, dilations=(1, 1), gpu_only=False, test_grappler_layout_optimizer=False, tol=1e-5, fp16_tol=1e-3): if gpu_only and not test.is_gpu_available(cuda_only=True): return tensors = [] dilations = list(dilations) for (data_format, use_gpu) in GetTestConfigs(): if gpu_only and not use_gpu: continue dtypes_to_test = self._DtypesToTest(use_gpu) if not test_grappler_layout_optimizer and data_format == "NHWC": dtypes_to_test.append(dtypes.int32) for dtype in dtypes_to_test: result = self._SetupValuesForDevice( tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, dtype, use_gpu=use_gpu) if test_grappler_layout_optimizer and data_format == "NHWC" and use_gpu: # Grappler's layout optimizer will not optimize a fetch node, so # this identity allows Grappler to optimize the Conv2D node. result = array_ops.identity(result) tensors.append(result) values = self.evaluate(tensors) for i in range(len(tensors)): conv = tensors[i] value = values[i] tf_logging.debug("expected = %s", expected) tf_logging.debug("actual = %s", value) tol_to_use = fp16_tol if value.dtype == np.float16 else tol if np.issubdtype(value.dtype, np.integer): self.assertAllEqual(np.rint(expected), np.ravel(value)) else: self.assertAllClose(expected, np.ravel(value), atol=tol_to_use, rtol=tol_to_use) self.assertShapeEqual(value, conv) self.assertEqual(value.dtype, conv.dtype.as_numpy_dtype) def _VerifyExplicitPaddings(self, tensor_in_sizes, filter_in_sizes, strides, padding, dilations=(1, 1), test_grappler_layout_optimizer=False, tol=1e-5, fp16_tol=1e-3): """Verifies Conv2D with explicit padding generates correct values. It does this by comparing with Conv2D without explicit padding. This function assumes Conv2D without explicit padding works correctly. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. strides: [row_stride, col_stride] for the convolution; padding: Explicit padding amounts. dilations: Dilation values test_grappler_layout_optimizer: If True, allow the Grappler layout optimizer to run, which turns NHWC Conv2Ds on the GPU to NCHW Conv2Ds. tol: The absolute and relative tolerance for non-fp16 dtypes. fp16_tol: The absolute and relative tolerance for fp16. """ input_tensor = self._CreateNumpyTensor(tensor_in_sizes) filter_tensor = self._CreateNumpyTensor(filter_in_sizes) input_tensor = array_ops.pad(input_tensor, [(0, 0)] + padding + [(0, 0)]) dilations = list(dilations) conv2d_result = nn_ops.conv2d( input_tensor, filter_tensor, [1] + list(strides) + [1], "VALID", dilations=[1] + dilations + [1]) expected = list(self.evaluate(array_ops.reshape(conv2d_result, [-1]))) self._VerifyValues( tensor_in_sizes, filter_in_sizes, strides, padding, expected, dilations, test_grappler_layout_optimizer=test_grappler_layout_optimizer, tol=tol, fp16_tol=fp16_tol) @test_util.run_in_graph_and_eager_modes def testConv2D1x1Filter(self): expected_output = [ 30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0, 204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0 ] self._VerifyValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[1, 1, 3, 3], strides=[1, 1], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2DExpandedBatch(self): tensor_in_sizes_batch = [10, 2, 3, 3] tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3] filter_in_sizes = [1, 1, 3, 3] filter_in = self._CreateNumpyTensor(filter_in_sizes) x1 = self._CreateNumpyTensor(tensor_in_sizes_batch) x2 = x1.reshape(tensor_in_sizes_expanded_batch) conv1 = nn_ops.conv2d( x1, filter_in, strides=[1, 1], padding="VALID") conv2 = nn_ops.conv2d( x2, filter_in, strides=[1, 1], padding="VALID") self.assertEqual(conv1.shape, tensor_in_sizes_batch) self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch) self.assertAllEqual( conv1, self.evaluate(conv2).reshape(conv1.shape)) @test_util.run_in_graph_and_eager_modes def testConvolutionClass2DExpandedBatch(self): tensor_in_sizes_batch = [10, 2, 3, 3] tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3] filter_in_sizes = [1, 1, 3, 3] filter_in = self._CreateNumpyTensor(filter_in_sizes) x1 = self._CreateNumpyTensor(tensor_in_sizes_batch) x2 = x1.reshape(tensor_in_sizes_expanded_batch) convolver1 = nn_ops.Convolution( input_shape=x1.shape, filter_shape=filter_in.shape, strides=[1, 1], padding="VALID") self.assertEqual(convolver1.num_batch_dims, 1) convolver2 = nn_ops.Convolution( input_shape=x2.shape, filter_shape=filter_in.shape, strides=[1, 1], padding="VALID") self.assertEqual(convolver2.num_batch_dims, 2) conv1 = convolver1(x1, filter_in) conv2 = convolver2(x2, filter_in) self.assertEqual(conv1.shape, tensor_in_sizes_batch) self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch) self.assertAllEqual( conv1, self.evaluate(conv2).reshape(conv1.shape)) @test_util.run_in_graph_and_eager_modes def testConvolutionWith2SpatialDimensionsAndExpandedBatch(self): tensor_in_sizes_batch = [10, 2, 3, 3] tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3] filter_in_sizes = [1, 1, 3, 3] filter_in = self._CreateNumpyTensor(filter_in_sizes) x1 = self._CreateNumpyTensor(tensor_in_sizes_batch) x2 = x1.reshape(tensor_in_sizes_expanded_batch) conv1 = nn_ops.convolution( x1, filter_in, strides=[1, 1], padding="VALID") conv2 = nn_ops.convolution( x2, filter_in, strides=[1, 1], padding="VALID") self.assertEqual(conv1.shape, tensor_in_sizes_batch) self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch) self.assertAllEqual( conv1, self.evaluate(conv2).reshape(conv1.shape)) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Filter2x1Dilation(self): self._VerifyDilatedConvValues( tensor_in_sizes=[1, 4, 4, 1], filter_in_sizes=[2, 2, 1, 1], strides=[1, 1], dilations=[2, 1], padding="VALID") @test_util.run_in_graph_and_eager_modes def testConv2DEmpty(self): expected_output = [] self._VerifyValues( tensor_in_sizes=[0, 2, 3, 3], filter_in_sizes=[1, 1, 3, 3], strides=[1, 1], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2DEmptyDilation(self): self._VerifyDilatedConvValues( tensor_in_sizes=[0, 2, 3, 3], filter_in_sizes=[1, 1, 3, 3], strides=[1, 1], dilations=[2, 1], padding="VALID") @test_util.run_in_graph_and_eager_modes def testConv2D2x2Filter(self): # The outputs are computed using third_party/py/IPython/notebook. expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0] self._VerifyValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[2, 2, 3, 3], strides=[1, 1], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2D2x2FilterDilation(self): self._VerifyDilatedConvValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[2, 2, 3, 3], strides=[1, 1], dilations=[1, 2], padding="VALID") @test_util.run_in_graph_and_eager_modes def testConv2D1x2Filter(self): # The outputs are computed using third_party/py/IPython/notebook. expected_output = [ 231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0, 936.0, 1029.0 ] self._VerifyValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[1, 2, 3, 3], strides=[1, 1], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2D1x2FilterDilation(self): self._VerifyDilatedConvValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[1, 2, 3, 3], strides=[1, 1], dilations=[2, 1], padding="VALID") @test_util.run_in_graph_and_eager_modes def testConv2D2x2FilterStride2(self): expected_output = [2271.0, 2367.0, 2463.0] self._VerifyValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[2, 2, 3, 3], strides=[2, 2], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2D2x2FilterStride2Same(self): expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0] self._VerifyValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[2, 2, 3, 3], strides=[2, 2], padding="SAME", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2D2x2FilterStride1x2(self): expected_output = [58.0, 78.0, 98.0, 118.0, 138.0, 158.0] self._VerifyValues( tensor_in_sizes=[1, 3, 6, 1], filter_in_sizes=[2, 2, 1, 1], strides=[1, 2], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2DKernelSmallerThanStrideValid(self): expected_output = [65, 95, 275, 305] self._VerifyValues( tensor_in_sizes=[1, 7, 7, 1], filter_in_sizes=[2, 2, 1, 1], strides=[3, 3], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2DKernelSmallerThanStrideSame(self): self._VerifyValues( tensor_in_sizes=[1, 3, 3, 1], filter_in_sizes=[1, 1, 1, 1], strides=[2, 2], padding="SAME", expected=[1, 3, 7, 9]) self._VerifyValues( tensor_in_sizes=[1, 4, 4, 1], filter_in_sizes=[1, 1, 1, 1], strides=[2, 2], padding="SAME", expected=[1, 3, 9, 11]) self._VerifyValues( tensor_in_sizes=[1, 4, 4, 1], filter_in_sizes=[2, 2, 1, 1], strides=[3, 3], padding="SAME", expected=[44, 28, 41, 16]) @test_util.run_in_graph_and_eager_modes def testConv2DKernelSizeMatchesInputSize(self): self._VerifyValues( tensor_in_sizes=[1, 2, 2, 1], filter_in_sizes=[2, 2, 1, 2], strides=[1, 1], padding="VALID", expected=[50, 60]) @test_util.run_in_graph_and_eager_modes def testConv2DKernelSizeMatchesInputSizeDilation(self): self._VerifyDilatedConvValues( tensor_in_sizes=[1, 3, 3, 1], filter_in_sizes=[2, 2, 1, 2], strides=[1, 1], dilations=[2, 2], padding="VALID") @test_util.run_in_graph_and_eager_modes() def testConv2D0x0Padding(self): self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[2, 2, 3, 3], strides=[1, 1], padding=[[0, 0], [0, 0]]) self._VerifyExplicitPaddings( tensor_in_sizes=[3, 4, 3, 2], filter_in_sizes=[1, 1, 2, 1], strides=[2, 2], padding=[[0, 0], [0, 0]]) @test_util.run_in_graph_and_eager_modes() def testConv2D1x1Padding(self): self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 3, 2], filter_in_sizes=[2, 2, 2, 2], strides=[1, 1], padding=[[1, 1], [1, 1]]) self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 2, 1], filter_in_sizes=[1, 1, 1, 2], strides=[1, 1], padding=[[1, 1], [1, 1]]) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Padding(self): self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 1, 2], filter_in_sizes=[2, 1, 2, 1], strides=[1, 1], padding=[[2, 2], [2, 2]]) self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 1, 2], filter_in_sizes=[1, 1, 2, 1], strides=[2, 1], padding=[[2, 2], [2, 2]]) @test_util.run_in_graph_and_eager_modes() def testConv2DOnlyBottomPadding(self): self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[2, 2, 3, 2], strides=[1, 1], padding=[[0, 3], [0, 0]], tol=2e-5) self._VerifyExplicitPaddings( tensor_in_sizes=[2, 2, 4, 3], filter_in_sizes=[1, 2, 3, 2], strides=[2, 2], padding=[[0, 3], [0, 0]]) @test_util.run_in_graph_and_eager_modes() def testConv2DOnlyTopRightPadding(self): self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[2, 2, 3, 2], strides=[1, 1], padding=[[1, 0], [0, 2]], tol=5e-5) self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 4, 2], filter_in_sizes=[2, 2, 2, 2], strides=[1, 3], padding=[[1, 0], [0, 2]]) @test_util.run_in_graph_and_eager_modes() def testConv2DLotsPadding(self): self._VerifyExplicitPaddings( tensor_in_sizes=[1, 1, 1, 3], filter_in_sizes=[2, 2, 3, 3], strides=[1, 1], padding=[[3, 4], [4, 2]]) self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 1, 1], filter_in_sizes=[2, 2, 1, 3], strides=[2, 1], padding=[[3, 4], [4, 2]]) @test_util.run_in_graph_and_eager_modes() def testConv2DExplicitPaddingWithDilations(self): self._VerifyExplicitPaddings( tensor_in_sizes=[1, 3, 2, 1], filter_in_sizes=[1, 2, 1, 2], strides=[1, 1], padding=[[1, 0], [0, 1]], dilations=[2, 1]) self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 3, 2], filter_in_sizes=[3, 2, 2, 1], strides=[1, 1], padding=[[2, 1], [1, 2]], dilations=[2, 3]) def testConv2DExplicitPaddingWithLayoutOptimizer(self): # Test with Grappler's layout optimizer, to ensure the layout optimizer # handles explicit padding correctly. self._VerifyExplicitPaddings( tensor_in_sizes=[1, 3, 2, 1], filter_in_sizes=[1, 2, 1, 2], strides=[1, 1], padding=[[1, 0], [0, 1]], dilations=[2, 1], test_grappler_layout_optimizer=True) self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 3, 2], filter_in_sizes=[3, 2, 2, 1], strides=[1, 1], padding=[[2, 1], [1, 2]], dilations=[2, 3], test_grappler_layout_optimizer=True) def _VerifyGroupConvFwd(self, tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, dtype): """Verify the output of group convolution is equal to a for-loop implementation. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. dilations: Dilated rate: [col_dilation, row_dilation] strides: Stride: [col_stride, row_stride] padding: Padding type. data_format: Format of the data tensors. dtype: Data type for inputs and outputs. """ tensor_in = self._CreateNumpyTensor(tensor_in_sizes) filter_in = self._CreateNumpyTensor(filter_in_sizes) num_groups = tensor_in_sizes[3] // filter_in_sizes[2] assert num_groups > 1 and \ filter_in_sizes[2] * num_groups == tensor_in_sizes[3] with test_util.device(True): t1 = constant_op.constant(tensor_in, dtype=dtype) t2 = constant_op.constant(filter_in, dtype=dtype) strides = [1] + strides + [1] dilations = [1] + dilations + [1] if data_format == "NCHW": t1 = test_util.NHWCToNCHW(t1) strides = test_util.NHWCToNCHW(strides) dilations = test_util.NHWCToNCHW(dilations) t1_splits = array_ops.split(t1, num_groups, axis=1) else: t1_splits = array_ops.split(t1, num_groups, axis=3) t2_splits = array_ops.split(t2, num_groups, axis=3) def MakeConv2d(inputs, filters): return nn_ops.conv2d( inputs, filters, strides, padding, dilations=dilations, data_format=data_format) group_conv = MakeConv2d(t1, t2) group_conv_loop = array_ops.concat( [MakeConv2d(t1s, t2s) for t1s, t2s in zip(t1_splits, t2_splits)], axis=1 if data_format == "NCHW" else 3) results = self.evaluate([group_conv, group_conv_loop]) tol_to_use = 1e-5 self.assertAllClose( results[0], results[1], atol=tol_to_use, rtol=tol_to_use) @test_util.run_in_graph_and_eager_modes @test_util.run_cuda_only def testConv2DGroupConvFwd(self): for data_format in ["NHWC", "NCHW"]: for dilation in [1, 2]: for stride in [1, 2]: self._VerifyGroupConvFwd([10, 32, 32, 16], [3, 3, 4, 8], dilations=[dilation, dilation], strides=[stride, stride], padding="SAME", data_format=data_format, dtype=dtypes.float32) @test_util.deprecated_graph_mode_only @test_util.run_cuda_only def testInputGradientGroupConv(self): for data_format in ["NCHW", "NHWC"]: for test_input in [True, False]: self.ConstructAndTestGradient( batch=2, input_rows=5, input_cols=4, filter_rows=3, filter_cols=3, num_groups=2, padding="VALID", in_depth=4, out_depth=6, stride_rows=1, stride_cols=1, test_input=test_input, data_format=data_format, use_gpu=True, max_err=0.005) @test_util.deprecated_graph_mode_only @test_util.run_cuda_only def testFilterGradientGroupConv(self): for data_format in ["NCHW", "NHWC"]: for test_input in [True, False]: self.ConstructAndTestGradient( batch=2, input_rows=5, input_cols=4, filter_rows=3, filter_cols=3, num_groups=2, padding="VALID", in_depth=4, out_depth=6, stride_rows=1, stride_cols=1, test_input=test_input, data_format=data_format, use_gpu=True, max_err=0.005) # TODO(yzhwang): this currently fails. # self._VerifyValues(tensor_in_sizes=[1, 8, 8, 1], # filter_in_sizes=[2, 2, 1, 1], # strides=[4, 4], padding="SAME", # expected=[72, 112, 392, 432]) # Testing for backprops def _RunAndVerifyBackpropInput(self, input_sizes, filter_sizes, output_sizes, strides, padding, expected, data_format, use_gpu, err, dilations=(1, 1)): if use_gpu and not test.is_gpu_available(cuda_only=True): return x1 = self._CreateNumpyTensor(filter_sizes) x2 = self._CreateNumpyTensor(output_sizes) dilations = list(dilations) with test_util.device(use_gpu): if len(input_sizes) == 4: if data_format == "NCHW": input_sizes = test_util.NHWCToNCHW(input_sizes) t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)]) t1 = constant_op.constant(x1, shape=filter_sizes) t2 = constant_op.constant(x2, shape=output_sizes) strides = [1] + strides + [1] dilations = [1] + dilations + [1] if isinstance(padding, (list, tuple)): padding = [(0, 0)] + padding + [(0, 0)] if data_format == "NCHW": t2 = test_util.NHWCToNCHW(t2) strides = test_util.NHWCToNCHW(strides) dilations = test_util.NHWCToNCHW(dilations) if isinstance(padding, (list, tuple)): padding = test_util.NHWCToNCHW((padding)) conv = nn_ops.conv2d_backprop_input( t0, t1, t2, strides=strides, padding=padding, data_format=data_format, dilations=dilations) if data_format == "NCHW": conv = test_util.NCHWToNHWC(conv) # "values" consists of two tensors for two backprops value = self.evaluate(conv) self.assertShapeEqual(value, conv) tf_logging.debug("expected = %s", expected) tf_logging.debug("actual = %s", value) self.assertAllCloseAccordingToType(expected, value.flatten(), atol=1e-5) def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes, conv_strides, padding): x1 = np.random.rand(*filter_sizes).astype(np.float32) x2 = np.random.rand(*output_sizes).astype(np.float32) def _GetVal(data_format, use_gpu): with test_util.device(use_gpu): if data_format == "NCHW": new_input_sizes = test_util.NHWCToNCHW(input_sizes) else: new_input_sizes = input_sizes t0 = constant_op.constant(new_input_sizes, shape=[len(new_input_sizes)]) t1 = constant_op.constant(x1, shape=filter_sizes) t2 = constant_op.constant(x2, shape=output_sizes) strides = [1] + conv_strides + [1] if data_format == "NCHW": t2 = test_util.NHWCToNCHW(t2) strides = test_util.NHWCToNCHW(strides) conv = nn_ops.conv2d_backprop_input( t0, t1, t2, strides=strides, padding=padding, data_format=data_format) if data_format == "NCHW": conv = test_util.NCHWToNHWC(conv) ret = self.evaluate(conv) self.assertShapeEqual(ret, conv) return ret values = [] for (data_format, use_gpu) in GetTestConfigs(): values.append(_GetVal(data_format, use_gpu)) for i in range(1, len(values)): self.assertAllClose(values[0], values[i], rtol=1e-2, atol=1e-2) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Depth1ValidBackpropInput(self): expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInput( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 2, 1], strides=[1, 1], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.run_in_graph_and_eager_modes def testConv2DEmptyBackpropInput(self): expected_output = [] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInput( input_sizes=[0, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[0, 1, 2, 1], strides=[1, 1], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Depth3ValidBackpropInput(self): expected_output = [ 14.0, 32.0, 50.0, 100.0, 163.0, 226.0, 167.0, 212.0, 257.0, 122.0, 140.0, 158.0, 478.0, 541.0, 604.0, 437.0, 482.0, 527.0 ] for (data_format, use_gpu) in GetTestConfigs(): # The GPU version of this test is not very stable. So adjusting the # error threshold to 1e-4. self._RunAndVerifyBackpropInput( input_sizes=[1, 2, 3, 3], filter_sizes=[2, 2, 3, 3], output_sizes=[1, 1, 2, 3], strides=[1, 1], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-4) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Depth3ValidBackpropInputStride1x2(self): expected_output = [ 1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 7.0, 12.0, 11.0, 18.0, 15.0, 24.0, 12.0, 16.0, 15.0, 20.0, 18.0, 24.0 ] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInput( input_sizes=[1, 3, 6, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 2, 3, 1], strides=[1, 2], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.run_in_graph_and_eager_modes def testConv2DStrideTwoFilterOneSameBackpropInput(self): expected_output = [ 1.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0 ] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInput( input_sizes=[1, 4, 4, 1], filter_sizes=[1, 1, 1, 1], output_sizes=[1, 2, 2, 1], strides=[2, 2], padding="SAME", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.run_in_graph_and_eager_modes def testConv2DKernelSizeMatchesInputSizeBackpropInput(self): expected_output = [5.0, 11.0, 17.0, 23.0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInput( input_sizes=[1, 2, 2, 1], filter_sizes=[2, 2, 1, 2], output_sizes=[1, 1, 1, 2], strides=[1, 1], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.run_in_graph_and_eager_modes @test_util.disable_xla("XLA requires input_sizes to be a 4D shape.") def testConv2DInputSizesContainsOnlySpatialDimensionsBackpropInput(self): expected_output = [5.0, 11.0, 17.0, 23.0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInput( input_sizes=[2, 2], filter_sizes=[2, 2, 1, 2], output_sizes=[1, 1, 1, 2], strides=[1, 1], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-5) # Testing for backprops def _RunAndVerifyBackpropFilter(self, input_sizes, filter_sizes, output_sizes, strides, padding, expected, data_format, use_gpu, dilations=(1, 1), err=1e-5): x0 = self._CreateNumpyTensor(input_sizes) x2 = self._CreateNumpyTensor(output_sizes) dilations = list(dilations) explicit_strides = [1] + strides + [1] new_padding = padding new_dilations = [1] + dilations + [1] if isinstance(new_padding, (list, tuple)): new_padding = [(0, 0)] + new_padding + [(0, 0)] if data_format == "NCHW": explicit_strides = test_util.NHWCToNCHW(explicit_strides) new_dilations = test_util.NHWCToNCHW(new_dilations) if isinstance(padding, (list, tuple)): new_padding = test_util.NHWCToNCHW(new_padding) for dtype in self._DtypesToTest(use_gpu=use_gpu): with test_util.device(use_gpu): t0 = constant_op.constant(x0, shape=input_sizes, dtype=dtype) t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)]) t2 = constant_op.constant(x2, shape=output_sizes, dtype=dtype) if data_format == "NCHW": t0 = test_util.NHWCToNCHW(t0) t2 = test_util.NHWCToNCHW(t2) conv = nn_ops.conv2d_backprop_filter( t0, t1, t2, strides=explicit_strides, padding=new_padding, dilations=new_dilations, data_format=data_format) value = self.evaluate(conv) self.assertShapeEqual(value, conv) tf_logging.debug("expected = %s", expected) tf_logging.debug("actual = %s", value) self.assertArrayNear(expected, value.flatten(), err) def _CompareBackFilter(self, input_sizes, filter_sizes, output_sizes, conv_strides, padding): x0 = np.random.rand(*input_sizes).astype(np.float32) x2 = np.random.rand(*output_sizes).astype(np.float32) def _GetVal(data_format, use_gpu): with test_util.device(use_gpu): t0 = constant_op.constant(x0, shape=input_sizes) t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)]) t2 = constant_op.constant(x2, shape=output_sizes) strides = [1] + conv_strides + [1] if data_format == "NCHW": t0 = test_util.NHWCToNCHW(t0) t2 = test_util.NHWCToNCHW(t2) strides = test_util.NHWCToNCHW(strides) conv = nn_ops.conv2d_backprop_filter( t0, t1, t2, strides=strides, padding=padding, data_format=data_format) ret = self.evaluate(conv) self.assertShapeEqual(ret, conv) return ret values = [] for (data_format, use_gpu) in GetTestConfigs(): values.append(_GetVal(data_format, use_gpu)) for i in range(1, len(values)): self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Depth1ValidBackpropFilter(self): expected = [5.0, 8.0, 14.0, 17.0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 2, 1], strides=[1, 1], padding="VALID", expected=expected, data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes def testConv2DEmptyBackpropFilter(self): expected = [] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 0], output_sizes=[1, 1, 2, 0], strides=[1, 1], padding="VALID", expected=expected, data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes def testConv2DBackpropFilterWithEmptyInput(self): expected = [0, 0, 0, 0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[0, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[0, 1, 2, 1], strides=[1, 1], padding="VALID", expected=expected, data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Depth3ValidBackpropFilter(self): expected = [ 17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0, 32.0, 43.0, 54.0, 37.0, 50.0, 63.0, 42.0, 57.0, 72.0, 62.0, 85.0, 108.0, 67.0, 92.0, 117.0, 72.0, 99.0, 126.0, 77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0, 120.0, 153.0 ] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[1, 2, 3, 3], filter_sizes=[2, 2, 3, 3], output_sizes=[1, 1, 2, 3], strides=[1, 1], padding="VALID", expected=expected, data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Depth3ValidBackpropFilterStride1x2(self): expected = [161.0, 182.0, 287.0, 308.0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[1, 3, 6, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 2, 3, 1], strides=[1, 2], padding="VALID", expected=expected, data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes def testConv2DStrideTwoFilterOneSameBackpropFilter(self): expected_output = [78.] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[1, 4, 4, 1], filter_sizes=[1, 1, 1, 1], output_sizes=[1, 2, 2, 1], strides=[2, 2], padding="SAME", expected=expected_output, data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes def testConv2DKernelSizeMatchesInputSizeBackpropFilter(self): expected_output = [1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 4.0, 8.0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[1, 2, 2, 1], filter_sizes=[2, 2, 1, 2], output_sizes=[1, 1, 1, 2], strides=[1, 1], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu) # Testing for backprops def _RunAndVerifyBackpropInputDilation(self, input_sizes, filter_sizes, output_sizes, strides, dilations, padding, data_format, use_gpu, err): x1 = self._CreateNumpyTensor(input_sizes) x2 = self._CreateNumpyTensor(filter_sizes) default_dilations = (dilations[0] == 1 and dilations[1] == 1) if default_dilations or use_gpu: with self.cached_session(use_gpu=use_gpu) as sess: if data_format == "NCHW": input_sizes = test_util.NHWCToNCHW(input_sizes) t1 = constant_op.constant(x1, shape=input_sizes) t2 = constant_op.constant(x2, shape=filter_sizes) full_strides = [1] + strides + [1] full_dilations = [1] + dilations + [1] if data_format == "NCHW": full_strides = test_util.NHWCToNCHW(full_strides) full_dilations = test_util.NHWCToNCHW(full_dilations) conv_forward = nn_ops.conv2d( t1, t2, strides=full_strides, dilations=full_dilations, padding=padding, data_format=data_format) conv_forward_2 = nn_ops.convolution( t1, t2, padding=padding, strides=strides, dilation_rate=dilations, data_format=data_format) if data_format == "NCHW": conv_forward = test_util.NCHWToNHWC(conv_forward) conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2) conv = gradients_impl.gradients(conv_forward, t1)[0] conv_2 = gradients_impl.gradients(conv_forward_2, t1)[0] # "values" consists of two tensors for two backprops value = self.evaluate(conv) value_2 = self.evaluate(conv_2) self.assertShapeEqual(value, conv) self.assertShapeEqual(value_2, conv_2) tf_logging.debug("expected = %s", value_2) tf_logging.debug("actual = %s", value) self.assertArrayNear(value_2.flatten(), value.flatten(), err) # Testing for backprops def _RunAndVerifyBackpropFilterDilation(self, input_sizes, filter_sizes, output_sizes, strides, dilations, padding, data_format, use_gpu, err): x1 = self._CreateNumpyTensor(input_sizes) x2 = self._CreateNumpyTensor(filter_sizes) default_dilations = (dilations[0] == 1 and dilations[1] == 1) if default_dilations or use_gpu: with self.cached_session(use_gpu=use_gpu) as sess: if data_format == "NCHW": input_sizes = test_util.NHWCToNCHW(input_sizes) t1 = constant_op.constant(x1, shape=input_sizes) t2 = constant_op.constant(x2, shape=filter_sizes) full_strides = [1] + strides + [1] full_dilations = [1] + dilations + [1] if data_format == "NCHW": full_strides = test_util.NHWCToNCHW(full_strides) full_dilations = test_util.NHWCToNCHW(full_dilations) conv_forward = nn_ops.conv2d( t1, t2, strides=full_strides, dilations=full_dilations, padding=padding, data_format=data_format) conv_forward_2 = nn_ops.convolution( t1, t2, padding=padding, strides=strides, dilation_rate=dilations, data_format=data_format) if data_format == "NCHW": conv_forward = test_util.NCHWToNHWC(conv_forward) conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2) conv = gradients_impl.gradients(conv_forward, t2)[0] conv_2 = gradients_impl.gradients(conv_forward, t2)[0] value = self.evaluate(conv) value_2 = self.evaluate(conv_2) self.assertShapeEqual(value, conv) self.assertShapeEqual(value_2, conv_2) tf_logging.debug("expected = %s", value_2) tf_logging.debug("actual = %s", value) self.assertArrayNear(value_2.flatten(), value.flatten(), err) @test_util.deprecated_graph_mode_only def testConv2D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterDilation( input_sizes=[1, 3, 6, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 5, 1], strides=[1, 1], dilations=[2, 1], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.deprecated_graph_mode_only def testConv2D2x2Depth1ValidBackpropFilterDilation1x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterDilation( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 2, 1], strides=[1, 1], dilations=[1, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.deprecated_graph_mode_only def testConv2DEmptyBackpropFilterDilation1x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterDilation( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 0], output_sizes=[1, 1, 2, 0], strides=[1, 1], dilations=[1, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.deprecated_graph_mode_only def testConv2D2x2Depth3ValidBackpropFilterDilation2x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterDilation( input_sizes=[1, 3, 4, 3], filter_sizes=[2, 2, 3, 3], output_sizes=[1, 1, 2, 3], strides=[1, 1], dilations=[2, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.deprecated_graph_mode_only def testConv2DKernelSizeMatchesInputSizeBackpropFilterDilation2x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterDilation( input_sizes=[1, 3, 3, 1], filter_sizes=[2, 2, 1, 2], output_sizes=[1, 1, 1, 2], strides=[1, 1], dilations=[2, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.deprecated_graph_mode_only def testConv2D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputDilation( input_sizes=[1, 3, 6, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 5, 1], strides=[1, 1], dilations=[2, 1], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.deprecated_graph_mode_only def testConv2D2x2Depth1ValidBackpropInputDilation1x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputDilation( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 2, 1], strides=[1, 1], dilations=[1, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.deprecated_graph_mode_only def testConv2DEmptyBackpropInputDilation1x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputDilation( input_sizes=[0, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[0, 1, 2, 1], strides=[1, 1], dilations=[1, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.deprecated_graph_mode_only def testConv2D2x2Depth3ValidBackpropInputDilation2x1(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): # The GPU version of this test is not very stable. So adjusting the # error threshold to 1e-4. self._RunAndVerifyBackpropInputDilation( input_sizes=[1, 3, 2, 3], filter_sizes=[2, 2, 3, 3], output_sizes=[1, 1, 2, 3], strides=[1, 1], dilations=[2, 1], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-4) @test_util.deprecated_graph_mode_only def testConv2DKernelSizeMatchesInputSizeBackpropInputDilation2x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputDilation( input_sizes=[1, 3, 3, 1], filter_sizes=[2, 2, 1, 2], output_sizes=[1, 1, 1, 2], strides=[1, 1], dilations=[2, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) def _RunAndVerifyBackpropInputExplicitPadding(self, input_sizes, filter_sizes, output_sizes, strides, padding, data_format, use_gpu, dilations=(1, 1), err=2e-5): if use_gpu and not test.is_gpu_available(cuda_only=True): return if not use_gpu and dilations != (1, 1): return # Non-default dilations is currently not supported on the CPU. x1 = self._CreateNumpyTensor(filter_sizes) x2 = self._CreateNumpyTensor(output_sizes) dilations = list(dilations) padded_input_sizes = input_sizes[:] padded_input_sizes[1] += padding[0][0] + padding[0][1] padded_input_sizes[2] += padding[1][0] + padding[1][1] c = nn_ops.conv2d_backprop_input( padded_input_sizes, x1, x2, strides=[1] + strides + [1], padding="VALID", dilations=[1] + dilations + [1]) c = c[:, padding[0][0]:(c.shape[1] - padding[0][1]), padding[1][0]:( c.shape[2] - padding[1][1]), :] expected = list(self.evaluate(array_ops.reshape(c, [-1]))) self._RunAndVerifyBackpropInput( input_sizes, filter_sizes, output_sizes, strides, padding, expected, data_format, use_gpu=use_gpu, err=err, dilations=dilations) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding0x0BackpropInput(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 2, 1], strides=[1, 1], padding=[[0, 0], [0, 0]], data_format=data_format, use_gpu=use_gpu) self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 3, 4, 2], filter_sizes=[2, 2, 2, 3], output_sizes=[1, 1, 2, 3], strides=[2, 2], padding=[[0, 0], [0, 0]], data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding1x1BackpropInput(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 2], output_sizes=[1, 3, 4, 2], strides=[1, 1], padding=[[1, 1], [1, 1]], data_format=data_format, use_gpu=use_gpu, err=1e-4) self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 2, 3, 2], filter_sizes=[1, 1, 2, 1], output_sizes=[1, 4, 3, 1], strides=[1, 2], padding=[[1, 1], [1, 1]], data_format=data_format, use_gpu=use_gpu) self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 4, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 4, 2, 1], strides=[1, 2], padding=[[1, 1], [1, 1]], data_format=data_format, dilations=[2, 2], use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding2x2BackpropInput(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[2, 3, 1, 1], filter_sizes=[2, 1, 1, 1], output_sizes=[2, 2, 5, 1], strides=[3, 1], padding=[[2, 2], [2, 2]], data_format=data_format, use_gpu=use_gpu) self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 3, 6, 1], filter_sizes=[3, 2, 1, 1], output_sizes=[1, 3, 4, 1], strides=[1, 2], padding=[[2, 2], [2, 2]], data_format=data_format, dilations=[2, 3], use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding_1_8_4_1_BackpropInput(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 10, 8, 1], strides=[1, 1], padding=[[1, 8], [4, 2]], data_format=data_format, use_gpu=use_gpu, err=5e-5) self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 5, 3, 1], filter_sizes=[3, 2, 1, 1], output_sizes=[1, 4, 8, 1], strides=[3, 1], padding=[[1, 8], [4, 2]], data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding_5_0_2_2_BackpropInput(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 3, 3, 1], filter_sizes=[2, 1, 1, 1], output_sizes=[1, 7, 7, 1], strides=[1, 1], padding=[[5, 0], [2, 2]], data_format=data_format, err=5e-5, use_gpu=use_gpu) self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 4, 2, 1], filter_sizes=[3, 3, 1, 1], output_sizes=[1, 5, 2, 1], strides=[1, 2], padding=[[5, 0], [2, 2]], data_format=data_format, dilations=[2, 1], use_gpu=use_gpu) def _RunAndVerifyBackpropFilterExplicitPadding(self, input_sizes, filter_sizes, output_sizes, strides, padding, data_format, use_gpu, dilations=(1, 1), err=1e-5): if use_gpu and not test.is_gpu_available(cuda_only=True): return if not use_gpu and dilations != (1, 1): return # Non-default dilations is currently not supported on the CPU. x0 = self._CreateNumpyTensor(input_sizes) x2 = self._CreateNumpyTensor(output_sizes) dilations = list(dilations) x0 = np.pad(x0, [(0, 0)] + padding + [(0, 0)], "constant") c = nn_ops.conv2d_backprop_filter( x0, filter_sizes, x2, strides=[1] + strides + [1], padding="VALID", dilations=[1] + dilations + [1]) expected = list(self.evaluate(array_ops.reshape(c, [-1]))) self._RunAndVerifyBackpropFilter( input_sizes, filter_sizes, output_sizes, strides, padding, expected, data_format, use_gpu=use_gpu, dilations=dilations, err=err) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding0x0BackpropFilter(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 2, 1], strides=[1, 1], padding=[[0, 0], [0, 0]], data_format=data_format, use_gpu=use_gpu) self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 3, 4, 2], filter_sizes=[2, 2, 2, 3], output_sizes=[1, 1, 2, 3], strides=[2, 2], padding=[[0, 0], [0, 0]], data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding1x1BackpropFilter(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 2], output_sizes=[1, 3, 4, 2], strides=[1, 1], padding=[[1, 1], [1, 1]], data_format=data_format, use_gpu=use_gpu, err=5e-5) self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 2, 3, 2], filter_sizes=[1, 1, 2, 1], output_sizes=[1, 4, 3, 1], strides=[1, 2], padding=[[1, 1], [1, 1]], use_gpu=use_gpu, data_format=data_format) self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 4, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 4, 2, 1], strides=[1, 2], padding=[[1, 1], [1, 1]], data_format=data_format, use_gpu=use_gpu, dilations=[2, 2]) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding2x2BackpropFilter(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[2, 3, 1, 1], filter_sizes=[2, 1, 1, 1], output_sizes=[2, 2, 5, 1], strides=[3, 1], padding=[[2, 2], [2, 2]], data_format=data_format, use_gpu=use_gpu) self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 3, 6, 1], filter_sizes=[3, 2, 1, 1], output_sizes=[1, 3, 4, 1], strides=[1, 2], padding=[[2, 2], [2, 2]], data_format=data_format, use_gpu=use_gpu, dilations=[2, 3]) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding_1_8_4_1_BackpropFilter(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 10, 8, 1], strides=[1, 1], padding=[[1, 8], [4, 2]], data_format=data_format, use_gpu=use_gpu, err=1e-4) self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 5, 3, 1], filter_sizes=[3, 2, 1, 1], output_sizes=[1, 4, 8, 1], strides=[3, 1], padding=[[1, 8], [4, 2]], use_gpu=use_gpu, data_format=data_format) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding_5_0_2_2_BackpropFilter(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 3, 3, 1], filter_sizes=[2, 1, 1, 1], output_sizes=[1, 7, 7, 1], strides=[1, 1], padding=[[5, 0], [2, 2]], data_format=data_format, use_gpu=use_gpu, err=1e-4) self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 4, 2, 1], filter_sizes=[3, 3, 1, 1], output_sizes=[1, 5, 2, 1], strides=[1, 2], padding=[[5, 0], [2, 2]], data_format=data_format, use_gpu=use_gpu, dilations=[2, 1]) # Gradient checkers def ConstructAndTestGradient(self, batch, input_rows, input_cols, filter_rows, filter_cols, in_depth, out_depth, stride_rows, stride_cols, padding, test_input, data_format, use_gpu, num_groups=1, max_err=0.003): assert in_depth % num_groups == 0 and out_depth % num_groups == 0 input_shape = [batch, input_rows, input_cols, in_depth] filter_shape = [filter_rows, filter_cols, in_depth // num_groups, out_depth] # TODO(yangke): re-factor the computation of output shape. if padding == "VALID": output_rows = (input_rows - filter_rows + stride_rows) // stride_rows output_cols = (input_cols - filter_cols + stride_cols) // stride_cols elif padding == "SAME": output_rows = (input_rows + stride_rows - 1) // stride_rows output_cols = (input_cols + stride_cols - 1) // stride_cols else: self.assertIsInstance(padding, (list, tuple)) output_rows = (input_rows + padding[1][0] + padding[1][1] - filter_rows + stride_rows) // stride_rows output_cols = (input_cols + padding[2][0] + padding[2][1] - filter_cols + stride_cols) // stride_cols output_shape = [batch, output_rows, output_cols, out_depth] input_size = 1 for x in input_shape: input_size *= x filter_size = 1 for x in filter_shape: filter_size *= x input_data = [x * 1.0 / input_size for x in range(0, input_size)] filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)] # Conv2DGrad functions are not compiled for double due to # a problem in the way Eigen's Conv2DGrad works for double. # So we disable the DOUBLE path. We should re-enable this # when double support returns for CPU and/or GPU. for dtype in self._DtypesToTest(use_gpu=use_gpu): with self.cached_session(use_gpu=use_gpu): input_tensor = constant_op.constant( input_data, shape=input_shape, dtype=dtype, name="input") filter_tensor = constant_op.constant( filter_data, shape=filter_shape, dtype=dtype, name="filter") strides = [1, stride_rows, stride_cols, 1] new_padding = padding if data_format == "NCHW": new_input_tensor = test_util.NHWCToNCHW(input_tensor) strides = test_util.NHWCToNCHW(strides) if isinstance(padding, (list, tuple)): new_padding = test_util.NHWCToNCHW(padding) else: new_input_tensor = input_tensor conv = nn_ops.conv2d( new_input_tensor, filter_tensor, strides, new_padding, data_format=data_format, name="conv") if data_format == "NCHW": conv = test_util.NCHWToNHWC(conv) self.assertEqual(output_shape, conv.get_shape()) if test_input: jacob_t, jacob_n = gradient_checker.compute_gradient(input_tensor, input_shape, conv, output_shape) else: jacob_t, jacob_n = gradient_checker.compute_gradient(filter_tensor, filter_shape, conv, output_shape) if dtype == dtypes.float32: reference_jacob_t = jacob_t err = np.fabs(jacob_t - jacob_n).max() else: # Compare fp16 theoretical gradients to fp32 theoretical gradients, # since fp16 numerical gradients are too imprecise. err = np.fabs(jacob_t - reference_jacob_t).max() tf_logging.debug("conv_2d gradient error = %s", err) self.assertLess(err, max_err) @test_util.deprecated_graph_mode_only def testInputGradientValidPaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=5, input_cols=4, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding="VALID", test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradientValidPaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=4, input_rows=6, input_cols=5, filter_rows=2, filter_cols=2, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding="VALID", test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradientValidPaddingStrideTwo(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=4, input_cols=5, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=2, stride_cols=2, padding="VALID", test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradientValidPaddingStrideTwo(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=4, input_rows=6, input_cols=5, filter_rows=2, filter_cols=2, in_depth=2, out_depth=3, stride_rows=2, stride_cols=2, padding="VALID", test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradientValidPaddingStrideThree(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=7, input_cols=6, filter_rows=3, filter_cols=3, in_depth=4, out_depth=5, stride_rows=3, stride_cols=3, padding="VALID", test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradientValidPaddingStrideThree(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=8, input_cols=7, filter_rows=4, filter_cols=4, in_depth=2, out_depth=3, stride_rows=3, stride_cols=3, padding="VALID", test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradientSamePaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=7, input_cols=6, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding="SAME", test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradientSamePaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=4, input_rows=6, input_cols=5, filter_rows=2, filter_cols=2, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding="SAME", test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradientSamePaddingStrideTwo(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=5, input_cols=4, filter_rows=3, filter_cols=3, in_depth=3, out_depth=3, stride_rows=2, stride_cols=2, padding="SAME", test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradientSamePaddingStrideTwo(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=4, input_rows=6, input_cols=5, filter_rows=2, filter_cols=2, in_depth=2, out_depth=3, stride_rows=2, stride_cols=2, padding="SAME", test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradientSamePaddingStrideThree(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=7, input_cols=6, filter_rows=3, filter_cols=3, in_depth=4, out_depth=5, stride_rows=3, stride_cols=3, padding="SAME", test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradientSamePaddingStrideThree(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=8, input_cols=7, filter_rows=4, filter_cols=4, in_depth=2, out_depth=3, stride_rows=3, stride_cols=3, padding="SAME", test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradientSamePaddingStride2x1(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=8, input_cols=7, filter_rows=4, filter_cols=4, in_depth=2, out_depth=3, stride_rows=2, stride_cols=1, padding="SAME", test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradientKernelSizeMatchesInputSize(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=4, input_cols=3, filter_rows=4, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding="VALID", test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradientKernelSizeMatchesInputSize(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=4, input_cols=3, filter_rows=4, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding="VALID", test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradient1x1PaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=5, input_cols=4, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding=[[0, 0], [1, 1], [1, 1], [0, 0]], test_input=True, data_format=data_format, use_gpu=use_gpu, max_err=0.0025) @test_util.deprecated_graph_mode_only def testFilterGradient1x1PaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=5, input_cols=4, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding=[[0, 0], [1, 1], [1, 1], [0, 0]], test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradient1x1PaddingStrideTwo(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=4, input_cols=5, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=2, stride_cols=2, padding=[[0, 0], [1, 1], [1, 1], [0, 0]], test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradient1x1PaddingStrideTwo(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=4, input_cols=5, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=2, stride_cols=2, padding=[[0, 0], [1, 1], [1, 1], [0, 0]], test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradient2x2PaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=5, input_cols=4, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding=[[0, 0], [2, 2], [2, 2], [0, 0]], test_input=True, data_format=data_format, use_gpu=use_gpu, max_err=0.003) @test_util.deprecated_graph_mode_only def testFilterGradient2x2PaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=5, input_cols=4, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding=[[0, 0], [2, 2], [2, 2], [0, 0]], test_input=False, data_format=data_format, use_gpu=use_gpu, max_err=0.003) @test_util.deprecated_graph_mode_only def testInputGradient1_2_3_4PaddingStride3x2(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=8, input_cols=5, filter_rows=4, filter_cols=2, in_depth=3, out_depth=2, stride_rows=3, stride_cols=2, padding=[[0, 0], [1, 2], [3, 4], [0, 0]], test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradient1_2_3_4PaddingStride3x2(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=8, input_cols=5, filter_rows=4, filter_cols=2, in_depth=3, out_depth=2, stride_rows=3, stride_cols=2, padding=[[0, 0], [1, 2], [3, 4], [0, 0]], test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradient4_3_2_1PaddingStride2x1(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=3, input_rows=5, input_cols=7, filter_rows=3, filter_cols=2, in_depth=1, out_depth=2, stride_rows=2, stride_cols=1, padding=[[0, 0], [4, 3], [2, 1], [0, 0]], test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradient4_3_2_1PaddingStride2x1(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=3, input_rows=5, input_cols=7, filter_rows=3, filter_cols=2, in_depth=1, out_depth=2, stride_rows=2, stride_cols=1, padding=[[0, 0], [4, 3], [2, 1], [0, 0]], test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradient0_0_0_5PaddingStride1x2(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=6, input_cols=7, filter_rows=3, filter_cols=4, in_depth=3, out_depth=2, stride_rows=1, stride_cols=2, padding=[[0, 0], [0, 0], [0, 5], [0, 0]], test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradient0_0_0_5PaddingStride1x2(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=6, input_cols=7, filter_rows=3, filter_cols=4, in_depth=3, out_depth=2, stride_rows=1, stride_cols=2, padding=[[0, 0], [0, 0], [0, 5], [0, 0]], test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testShapeFunctionEdgeCases(self): # All shapes unknown. c1 = nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 1], padding="SAME") self.assertEqual([None, None, None, None], c1.get_shape().as_list()) # Incorrect input shape. with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder( dtypes.float32, shape=[1, 3]), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 1], padding="SAME") # Incorrect filter shape. with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder( dtypes.float32, shape=[1, 3]), strides=[1, 1, 1, 1], padding="SAME") # Depth mismatch. with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder( dtypes.float32, shape=[32, 20, 20, 3]), array_ops.placeholder( dtypes.float32, shape=[4, 4, 2, 2]), strides=[1, 1, 1, 1], padding="SAME") # Input depth divisible by filter depth (group convolution). # No exceptions should appear. nn_ops.conv2d( array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 8]), array_ops.placeholder(dtypes.float32, shape=[4, 4, 2, 16]), strides=[1, 1, 1, 1], padding="SAME") # Negative padding. with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 1], padding=[[0, 0], [0, -1], [1, 2], [0, 0]]) # Nonzero padding in nonspatial dimension. with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 1], padding=[[1, 0], [0, 0], [0, 0], [0, 0]]) # Nonzero NCHW padding in nonspatial dimension. with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 1], padding=[[0, 0], [0, 1], [0, 0], [0, 0]], data_format="NCHW") # Wrong amount of padding with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 1], padding=[[0, 0], [0, 0], [0, 0]]) # Only specify one padding amount per dimension with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 1], padding=[[0], [0], [0], [0]]) # Explicit padding elements are not lists with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 1], padding=[0, 0, 0, 0]) @test_util.deprecated_graph_mode_only def testOpEdgeCases(self): with self.cached_session() as sess: # Illegal strides. with self.assertRaisesRegex(errors_impl.UnimplementedError, "strides in the batch and depth"): input_placeholder = array_ops.placeholder(dtypes.float32) input_val = np.ones([10, 10]) filter_placeholder = array_ops.placeholder(dtypes.float32) filter_val = np.ones([10, 10]) sess.run( nn_ops.conv2d( input_placeholder, filter_placeholder, strides=[2, 1, 1, 1], padding="SAME"), feed_dict={ input_placeholder: input_val, filter_placeholder: filter_val }) with self.assertRaisesRegex(errors_impl.UnimplementedError, "strides in the batch and depth"): input_placeholder = array_ops.placeholder(dtypes.float32) filter_placeholder = array_ops.placeholder(dtypes.float32) input_val = np.ones([10, 10]) filter_val = np.ones([10, 10]) sess.run( nn_ops.conv2d( input_placeholder, filter_placeholder, strides=[1, 1, 1, 2], padding="SAME"), feed_dict={ input_placeholder: input_val, filter_placeholder: filter_val }) # Filter larger than input. with self.assertRaisesRegex(ValueError, "Negative dimension size"): input_placeholder = array_ops.placeholder( dtypes.float32, shape=[32, 20, 20, 3]) input_val = np.ones([32, 20, 20, 3]) filter_placeholder = array_ops.placeholder( dtypes.float32, shape=[20, 21, 3, 2]) filter_val = np.ones([20, 21, 3, 2]) sess.run( nn_ops.conv2d( input_placeholder, filter_placeholder, strides=[1, 1, 1, 1], padding="VALID"), feed_dict={ input_placeholder: input_val, filter_placeholder: filter_val }) with self.assertRaisesRegex(ValueError, "Negative dimension size"): input_placeholder = array_ops.placeholder( dtypes.float32, shape=[32, 20, 20, 3]) input_val = np.ones([32, 20, 20, 3]) filter_placeholder = array_ops.placeholder( dtypes.float32, shape=[21, 20, 3, 2]) filter_val = np.ones([21, 20, 3, 2]) sess.run( nn_ops.conv2d( input_placeholder, filter_placeholder, strides=[1, 1, 1, 1], padding="VALID"), feed_dict={ input_placeholder: input_val, filter_placeholder: filter_val }) # Filter larger than input + padding. with self.assertRaisesRegex(ValueError, "Negative dimension size"): input_placeholder = array_ops.placeholder( dtypes.float32, shape=[32, 20, 20, 3]) input_val = np.ones([32, 20, 20, 3]) filter_placeholder = array_ops.placeholder( dtypes.float32, shape=[24, 25, 3, 2]) filter_val = np.ones([24, 25, 3, 2]) sess.run( nn_ops.conv2d( input_placeholder, filter_placeholder, strides=[1, 1, 1, 1], padding=[[0, 0], [2, 2], [2, 2], [0, 0]]), feed_dict={ input_placeholder: input_val, filter_placeholder: filter_val }) # Negative padding during backprop. with self.assertRaisesRegex( errors_impl.InvalidArgumentError, "All elements of explicit_paddings must be nonnegative"): filter_placeholder = array_ops.placeholder( dtypes.float32, shape=[18, 18, 3, 2]) filter_val = np.ones([18, 18, 3, 2]) out_backprop = array_ops.placeholder( dtypes.float32, shape=[32, 3, 2, 2]) out_backprop_val = np.ones([32, 3, 2, 2]) sess.run( nn_ops.conv2d_backprop_input([32, 20, 20, 3], filter_placeholder, out_backprop, strides=[1, 1, 1, 1], padding=[[0, 0], [-1, 0], [0, 0], [0, 0]]), feed_dict={ filter_placeholder: filter_val, out_backprop: out_backprop_val }) with self.assertRaisesRegex( errors_impl.InvalidArgumentError, "All elements of explicit_paddings must be nonnegative"): input_placeholder = array_ops.placeholder( dtypes.float32, shape=[32, 20, 20, 3]) input_val = np.ones([32, 20, 20, 3]) out_backprop = array_ops.placeholder( dtypes.float32, shape=[32, 3, 2, 2]) out_backprop_val = np.ones([32, 3, 2, 2]) sess.run( nn_ops.conv2d_backprop_filter( input_placeholder, [18, 18, 3, 2], out_backprop, strides=[1, 1, 1, 1], padding=[[0, 0], [-1, 0], [0, 0], [0, 0]]), feed_dict={ input_placeholder: input_val, out_backprop: out_backprop_val }) class DepthwiseConv2DTest(test.TestCase): def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, expected): """Verifies the output values of the convolution function. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [filter_rows, filter_cols, input_depth, depth_multiplier]. stride: Stride. padding: Padding type. expected: An array containing the expected operation outputs. """ total_size_1 = 1 total_size_2 = 1 for s in tensor_in_sizes: total_size_1 *= s for s in filter_in_sizes: total_size_2 *= s # Initializes the input tensor with array containing incrementing # numbers from 1. x1 = [f * 1.0 for f in range(1, total_size_1 + 1)] x2 = [f * 1.0 for f in range(1, total_size_2 + 1)] with self.cached_session() as sess: t1 = constant_op.constant(x1, shape=tensor_in_sizes) t1.set_shape(tensor_in_sizes) t2 = constant_op.constant(x2, shape=filter_in_sizes) conv = nn_impl.depthwise_conv2d( t1, t2, strides=[1, stride, stride, 1], padding=padding) value = self.evaluate(conv) tf_logging.debug("value = %s", value) self.assertArrayNear(expected, np.ravel(value), 1e-5) self.assertShapeEqual(value, conv) def testConv2D2x2Filter(self): # The inputs look like this (it's a 3 x 2 matrix, each of depth 2): # # [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ] # [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ] # We can view this as two inputs # # input depth 0: # # [ 1.0, 3.0, 5.0 ] # [ 7.0, 9.0, 11.0 ] # # input depth 1: # # [ 2.0, 4.0, 6.0 ] # [ 8.0, 10.0, 12.0 ] # # The filter looks like this (it has two 2 x 2 patches, each generating 2 # depths): # # filter #0: # # [ (1.0, 3.0), ( 5.0, 7.0)] # [ (9.0, 11.0), (13.0, 15.0)] # # filter #1: # # [ ( 2.0, 4.0), ( 6.0, 8.0)] # [ (10.0, 12.0), (14.0, 16.0)] # # So the outputs are: # # (position 0, 0: in_depth 0, output_depth 0 -- using filter #0) # 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196 # (position 0, 0: in_depth 0, output_depth 1 -- using filter #1) # 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216 # (position 0, 0: in_depth 1, output_depth 2 -- using filter #0) # 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272 # (position 0, 0: in_depth 1, output_depth 3 -- using filter #1) # 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296 # # (position 1, 0: in_depth 0, output_depth 0 -- using filter #0) # 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252 # (position 1, 0: in_depth 0, output_depth 1 -- using filter #1) # 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280 # (position 1, 0: in_depth 1, output_depth 2 -- using filter #0) # 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344 # (position 1, 0: in_depth 1, output_depth 3 -- using filter #1) # 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376 expected_output = [196, 216, 272, 296, 252, 280, 344, 376] self._VerifyValues( tensor_in_sizes=[1, 2, 3, 2], filter_in_sizes=[2, 2, 2, 2], stride=1, padding="VALID", expected=expected_output) class SeparableConv2DTest(test.TestCase): def _InitValues(self, sizes): """Initializes values for input tensors. Args: sizes: Tensor dimensions. Returns: Tensor initialized to values. """ total_size = 1 for s in sizes: total_size *= s x = [f * 0.5 for f in range(1, total_size + 1)] return constant_op.constant(x, shape=sizes) def _VerifyValues(self, tensor_in_sizes, depthwise_filter_in_sizes, pointwise_filter_in_sizes, stride, padding, expected, data_format="NHWC"): """Verifies the output values of the separable convolution function. Args: tensor_in_sizes: Input tensor dimensions. depthwise_filter_in_sizes: Depthwise filter tensor dimensions. pointwise_filter_in_sizes: Pointwise filter tensor dimensions. stride: Stride. padding: Padding type. expected: An array containing the expected operation outputs. data_format: string data format for input tensor. """ with self.cached_session(use_gpu=True) as sess: t1 = self._InitValues(tensor_in_sizes) f1 = self._InitValues(depthwise_filter_in_sizes) f1.set_shape(depthwise_filter_in_sizes) f2 = self._InitValues(pointwise_filter_in_sizes) real_t1 = t1 strides = [1, stride, stride, 1] if data_format == "NCHW": real_t1 = array_ops.transpose(t1, [0, 3, 1, 2]) strides = [1, 1, stride, stride] if isinstance(padding, list): padding = [padding[0], padding[3], padding[1], padding[2]] conv = nn_impl.separable_conv2d( real_t1, f1, f2, strides=strides, padding=padding, data_format=data_format) if data_format == "NCHW": conv = array_ops.transpose(conv, [0, 2, 3, 1]) value = self.evaluate(conv) tf_logging.debug("value = %s", value) self.assertArrayNear(expected, np.ravel(value), 2e-3) self.assertShapeEqual(value, conv) def _testSeparableConv2D(self, data_format): # The output is the result of two convolutions: # First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 2, 3]. # Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 7]. # Complexity is O(2*3*2*2 + 6*7*1*1) as opposed to O(2*7*2*2). expected_output = [ 6644.5, 6971.5, 7298.5, 7625.5, 7952.5, 8279.5, 8606.5, 8154.5, 8556.5, 8958.5, 9360.5, 9762.5, 10164.5, 10566.5, 9664.5, 10141.5, 10618.5, 11095.5, 11572.5, 12049.5, 12526.5, 4145.5, 4346.5, 4547.5, 4748.5, 4949.5, 5150.5, 5351.5, 12684.5, 13311.5, 13938.5, 14565.5, 15192.5, 15819.5, 16446.5, 14194.5, 14896.5, 15598.5, 16300.5, 17002.5, 17704.5, 18406.5, 15704.5, 16481.5, 17258.5, 18035.5, 18812.5, 19589.5, 20366.5, 6499.5, 6814.5, 7129.5, 7444.5, 7759.5, 8074.5, 8389.5, 18724.5, 19651.5, 20578.5, 21505.5, 22432.5, 23359.5, 24286.5, 20234.5, 21236.5, 22238.5, 23240.5, 24242.5, 25244.5, 26246.5, 21744.5, 22821.5, 23898.5, 24975.5, 26052.5, 27129.5, 28206.5, 8853.5, 9282.5, 9711.5, 10140.5, 10569.5, 10998.5, 11427.5, 5746.75, 6010.75, 6274.75, 6538.75, 6802.75, 7066.75, 7330.75, 6168.75, 6452.25, 6735.75, 7019.25, 7302.75, 7586.25, 7869.75, 6590.75, 6893.75, 7196.75, 7499.75, 7802.75, 8105.75, 8408.75, 2036.25, 2119.5, 2202.75, 2286.0, 2369.25, 2452.5, 2535.75 ] self._VerifyValues( tensor_in_sizes=[1, 4, 4, 2], depthwise_filter_in_sizes=[2, 2, 2, 3], pointwise_filter_in_sizes=[1, 1, 6, 7], stride=1, padding="SAME", expected=expected_output, data_format=data_format) def testSeparableConv2D(self): self._testSeparableConv2D("NHWC") def disabledtestSeparableConv2DNCHW(self): if not test.is_gpu_available(): return self._testSeparableConv2D("NCHW") def _testSeparableConv2DEqualInputOutputDepth(self, data_format): # The output is the result of two convolutions: # First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 3, 3]. # Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 6]. # Complexity is O(2*3*2*2 + 6*6*1*1) as opposed to O(2*6*2*2). expected_output = [ 5742.0, 6069.0, 6396.0, 6723.0, 7050.0, 7377.0, 7047.0, 7449.0, 7851.0, 8253.0, 8655.0, 9057.0, 8352.0, 8829.0, 9306.0, 9783.0, 10260.0, 10737.0, 3582.0, 3783.0, 3984.0, 4185.0, 4386.0, 4587.0, 10962.0, 11589.0, 12216.0, 12843.0, 13470.0, 14097.0, 12267.0, 12969.0, 13671.0, 14373.0, 15075.0, 15777.0, 13572.0, 14349.0, 15126.0, 15903.0, 16680.0, 17457.0, 5616.0, 5931.0, 6246.0, 6561.0, 6876.0, 7191.0, 16182.0, 17109.0, 18036.0, 18963.0, 19890.0, 20817.0, 17487.0, 18489.0, 19491.0, 20493.0, 21495.0, 22497.0, 18792.0, 19869.0, 20946.0, 22023.0, 23100.0, 24177.0, 7650.0, 8079.0, 8508.0, 8937.0, 9366.0, 9795.0, 4963.5, 5227.5, 5491.5, 5755.5, 6019.5, 6283.5, 5328.0, 5611.5, 5895.0, 6178.5, 6462.0, 6745.5, 5692.5, 5995.5, 6298.5, 6601.5, 6904.5, 7207.5, 1757.25, 1840.5, 1923.75, 2007.0, 2090.25, 2173.5 ] self._VerifyValues( tensor_in_sizes=[1, 4, 4, 2], depthwise_filter_in_sizes=[2, 2, 2, 3], pointwise_filter_in_sizes=[1, 1, 6, 6], stride=1, padding="SAME", expected=expected_output, data_format=data_format) @test_util.deprecated_graph_mode_only def testSeparableConv2DEqualInputOutputDepth(self): self._testSeparableConv2DEqualInputOutputDepth("NHWC") def testSeparableConv2DEqualInputOutputDepthNCHW(self): if not test.is_gpu_available(): return self._testSeparableConv2DEqualInputOutputDepth("NCHW") def _testSeparableConv2dExplicitPadding(self, data_format): tensor_in_sizes = [1, 4, 4, 2] depthwise_filter_in_sizes = [2, 2, 2, 3] pointwise_filter_in_sizes = [1, 1, 6, 7] padding = [[0, 0], [1, 2], [3, 4], [0, 0]] with self.cached_session(use_gpu=True): # Compute the 'expected' values by manually padding before calling # separable_conv2d t1 = self._InitValues(tensor_in_sizes) t1 = array_ops.pad(t1, padding) f1 = self._InitValues(depthwise_filter_in_sizes) f1.set_shape(depthwise_filter_in_sizes) f2 = self._InitValues(pointwise_filter_in_sizes) conv = nn_impl.separable_conv2d( t1, f1, f2, strides=[1, 1, 1, 1], padding="VALID", data_format="NHWC") expected = self.evaluate(conv) expected = np.ravel(expected) self._VerifyValues( tensor_in_sizes=tensor_in_sizes, depthwise_filter_in_sizes=depthwise_filter_in_sizes, pointwise_filter_in_sizes=pointwise_filter_in_sizes, stride=1, padding=padding, expected=expected, data_format=data_format) def testSeparableConv2dExplicitPadding(self): self._testSeparableConv2dExplicitPadding("NHWC") def testSeparableConv2dExplicitPaddingNCHW(self): if not test.is_gpu_available(): return self._testSeparableConv2dExplicitPadding("NCHW") class DeepConv2DTest(test.TestCase): def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes, conv_strides, padding): """Verifies that DeepConv2D and Conv2D produce the same values. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. conv_strides: [row_stride, col_stride] for the convolution; padding: Padding type. """ x1 = np.random.rand(*tensor_in_sizes).astype(np.float32) x2 = np.random.rand(*filter_in_sizes).astype(np.float32) with self.cached_session(use_gpu=False) as sess: t1 = constant_op.constant(x1, shape=tensor_in_sizes) t2 = constant_op.constant(x2, shape=filter_in_sizes) strides = [1] + conv_strides + [1] conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding) os.environ["TF_USE_DEEP_CONV2D"] = "0" values_expect = self.evaluate([conv]) os.environ["TF_USE_DEEP_CONV2D"] = "1" values_test = self.evaluate([conv]) self.assertAllClose(values_expect, values_test, rtol=1e-5, atol=1e-5) def _RunTestCases(self, conv_strides, padding): input_sizes = [[5, 5, 5, 1248], [3, 17, 17, 192], [2, 35, 35, 288], [2, 6, 8, 517], [2, 7, 4, 81], [3, 11, 3, 77]] filter_sizes = [[3, 3, 1248, 128], [3, 3, 192, 192], [3, 3, 288, 384], [3, 3, 517, 64], [3, 3, 81, 77], [3, 3, 77, 181]] for input_shape, filter_shape in zip(input_sizes, filter_sizes): self._CompareFwdConv2D(input_shape, filter_shape, conv_strides, padding) def testConv2D3x3FilterStride1x1Valid(self): self._RunTestCases([1, 1], "VALID") def testConv2D3x3FilterStride1x1Same(self): self._RunTestCases([1, 1], "SAME") class Conv2DBenchmark(test.Benchmark): def benchmarkGPUConvStackFirst(self): # Benchmark the first iteration of a conv-net with many identical conv # operations. if not test.is_gpu_available(): return with ops.Graph().as_default(), session_lib.Session() as session: batch_size = 1 timesteps = 600 features = 1 inputs = random_ops.random_uniform( [batch_size, 1, timesteps, features], seed=1234) num_outputs_list = [512] * 40 + [1] kernel_w = 3 x = inputs for num_outputs in num_outputs_list: x = convolutional.conv2d(x, num_outputs, [1, kernel_w]) outputs = x self.evaluate(variables.global_variables_initializer()) num_iterations = 4 for iter_index in xrange(num_iterations): start = time.time() session.run(outputs) wall_time = time.time() - start self.report_benchmark( name="conv_stack_iter_%d" % iter_index, wall_time=wall_time) tf_logging.info("conv_stack_iter_%d: %.4f" % (iter_index, wall_time)) def _bench_op(self, name, op, burn_iters, num_iters): config = config_pb2.ConfigProto() # Prevent Grappler from optimizing away the entire graph. config.graph_options.rewrite_options.dependency_optimization = ( rewriter_config_pb2.RewriterConfig.OFF) with session_lib.Session(config=config) as session: self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( session, op, burn_iters=burn_iters, min_iters=num_iters, name=name) def benchmarkExplicitVsManualPadding(self): """Compare performance of EXPLICIT padding and calling tf.pad. A Conv2D op with EXPLICIT padding is benchmarked, and a tf.pad with the same padding followed by an equivalent Conv2D op is benchmarked. """ if not test.is_gpu_available(): return with ops.Graph().as_default(): burn_iters = 15 num_iters = 300 batch_size = 64 # The input and filter correspond to the first layer of Resnet50. input = variables.Variable( # pylint: disable=redefined-builtin random_ops.random_uniform([ batch_size, 3, 224, 224 ])) filter = variables.Variable(random_ops.random_uniform([7, 7, 3, 64])) # pylint: disable=redefined-builtin strides = [1, 1, 2, 2] padding = [(0, 0), (0, 0), (3, 3), (3, 3)] output_explicit_pad = nn_ops.conv2d( input, filter, strides, padding=padding, data_format="NCHW") input_padded = array_ops.pad(input, padding) output_manual_pad = nn_ops.conv2d( input_padded, filter, strides, padding="VALID", data_format="NCHW") # Benchmark just the forward pass. self._bench_op("explicit_pad_forward", output_explicit_pad.op, burn_iters, num_iters) self._bench_op("manual_pad_forward", output_manual_pad.op, burn_iters, num_iters) # Benchmark both the forward and backwards passes. input_grad_explicit_pad, filter_grad_explicit_pad = ( gradients_impl.gradients(output_explicit_pad, [input, filter])) self._bench_op( "explicit_pad_backward", control_flow_ops.group(input_grad_explicit_pad, filter_grad_explicit_pad), burn_iters, num_iters) input_grad_manual_pad, filter_grad_manual_pad = gradients_impl.gradients( output_manual_pad, [input, filter]) self._bench_op( "manual_pad_backward", control_flow_ops.group(input_grad_manual_pad, filter_grad_manual_pad), burn_iters, num_iters) def benchmarkExplicitVsSamePaddingGraph(self): """Compare performance of EXPLICIT and SAME padding in graph mode. A Conv2D op with SAME padding is benchmarked, and an equivalent Conv2D op with explicit padding is benchmarked, where the padding is the same as in the SAME case. The purpose is to ensure EXPLICIT padding is just as efficient as the SAME case """ if not test.is_gpu_available(): return with ops.Graph().as_default(): burn_iters = 15 num_convs = 20 num_iters = 50 batch_size = 64 # The input and filter correspond to a middle layer of Resnet50. input = variables.Variable( # pylint: disable=redefined-builtin random_ops.random_uniform([ batch_size, 256, 14, 14 ])) filter = variables.Variable(random_ops.random_uniform([3, 3, 256, 256])) # pylint: disable=redefined-builtin strides = [1, 1, 1, 1] padding = [(0, 0), (0, 0), (1, 1), (1, 1)] output_explicit_pad = input output_same_pad = input for _ in range(num_convs): output_explicit_pad = nn_ops.conv2d( output_explicit_pad, filter, strides, padding=padding, data_format="NCHW") output_same_pad = nn_ops.conv2d( output_same_pad, filter, strides, padding="SAME", data_format="NCHW") grad_explicit_pad, = gradients_impl.gradients(output_explicit_pad, filter) grad_same_pad, = gradients_impl.gradients(output_same_pad, filter) self._bench_op("graph_explicit_pad", grad_explicit_pad.op, burn_iters, num_iters) self._bench_op("graph_same_pad", grad_same_pad.op, burn_iters, num_iters) def benchmarkExplicitVsSamePaddingEager(self): """Compare performance of EXPLICIT and SAME padding in eager mode. A Conv2D op with SAME padding is benchmarked, and an equivalent Conv2D op with explicit padding is benchmarked, where the padding is the same as in the SAME case. Currently, EXPLICIT padding is slightly slower, due to the fact the Python padding list must be checked and processed before the Conv2D op can run. """ # TODO(reedwm): Make EXPLICIT padding as fast as SAME padding. if not test.is_gpu_available(): return with context.eager_mode(): burn_iters = 15 num_convs = 20 num_iters = 50 batch_size = 64 # The input and filter correspond to a middle layer of Resnet50. input = variables.Variable( # pylint: disable=redefined-builtin random_ops.random_uniform([ batch_size, 256, 14, 14 ])) filter = variables.Variable(random_ops.random_uniform([3, 3, 256, 256])) # pylint: disable=redefined-builtin strides = [1, 1, 1, 1] padding = [(0, 0), (0, 0), (1, 1), (1, 1)] output_explicit_pad = input output_same_pad = input for _ in range(burn_iters): output_explicit_pad = nn_ops.conv2d( output_explicit_pad, filter, strides, padding=padding, data_format="NCHW") output_same_pad = nn_ops.conv2d( output_same_pad, filter, strides, padding="SAME", data_format="NCHW") start = time.time() for _ in range(num_iters): with backprop.GradientTape() as tape: for _ in range(num_convs): output_explicit_pad = nn_ops.conv2d( output_explicit_pad, filter, strides, padding=padding, data_format="NCHW") tape.gradient(output_explicit_pad, filter) end = time.time() self.report_benchmark( name="eager_explicit_pad", wall_time=(end - start) / num_iters, iters=num_iters) start = time.time() for _ in range(num_iters): with backprop.GradientTape() as tape: for _ in range(num_convs): output_same_pad = nn_ops.conv2d( output_same_pad, filter, strides, padding="SAME", data_format="NCHW") tape.gradient(output_same_pad, filter) end = time.time() self.report_benchmark( name="eager_same_pad", wall_time=(end - start) / num_iters, iters=num_iters) def GetInceptionFwdTest(input_size, filter_size, stride, padding, gpu_only=False): def Test(self): if gpu_only and not test.is_gpu_available(): tf_logging.info("Skipping InceptionFwd %s", (input_size, filter_size, stride, padding)) return tf_logging.info("Testing InceptionFwd %s", (input_size, filter_size, stride, padding)) self._CompareFwdValues(input_size, filter_size, [stride, stride], padding) return Test def GetInceptionFwdDilatedConvTest(input_size, filter_size, stride, padding): def Test(self): if stride == 1: tf_logging.info("Testing InceptionFwd with dilations %s", (input_size, filter_size, stride, padding)) self._VerifyDilatedConvValues( tensor_in_sizes=input_size, filter_in_sizes=filter_size, strides=[stride, stride], dilations=[2, 2], padding=padding, rtol=5e-4) return Test def GetInceptionBackInputTest(input_size, filter_size, output_size, stride, padding, gpu_only=False): def Test(self): if gpu_only and not test.is_gpu_available(): tf_logging.info("Skipping InceptionBackInput %s", (input_size, filter_size, output_size, stride, padding)) return tf_logging.info("Testing InceptionBackInput %s", (input_size, filter_size, output_size, stride, padding)) self._CompareBackpropInput(input_size, filter_size, output_size, [stride, stride], padding) return Test def GetInceptionBackFilterTest(input_size, filter_size, output_size, strides, padding, gpu_only=False): def Test(self): if gpu_only and not test.is_gpu_available(): tf_logging.info("Skipping InceptionBackFilter %s", (input_size, filter_size, output_size, strides, padding)) return tf_logging.info("Testing InceptionBackFilter %s", (input_size, filter_size, output_size, strides, padding)) self._CompareBackFilter(input_size, filter_size, output_size, strides, padding) return Test if __name__ == "__main__": for index, (input_size_, filter_size_, output_size_, stride_, padding_) in enumerate(GetShrunkInceptionShapes()): setattr(Conv2DTest, "testInceptionFwd_" + str(index), test_util.run_in_graph_and_eager_modes( GetInceptionFwdTest(input_size_, filter_size_, stride_, padding_))) setattr( Conv2DTest, "testInceptionFwdDilatedConv_" + str(index), test_util.run_in_graph_and_eager_modes(GetInceptionFwdDilatedConvTest( input_size_, filter_size_, stride_, padding_))) setattr(Conv2DTest, "testInceptionBackInput_" + str(index), test_util.run_in_graph_and_eager_modes( GetInceptionBackInputTest(input_size_, filter_size_, output_size_, stride_, padding_))) setattr(Conv2DTest, "testInceptionBackFilter_" + str(index), test_util.run_in_graph_and_eager_modes( GetInceptionBackFilterTest(input_size_, filter_size_, output_size_, [stride_, stride_], padding_))) # TODO(b/35359731) # Fwd, BckInput, and BackFilter to test that for certain input parameter # set, winograd nonfused algorithm will be excluded from conv autotune. If # in such case, winograd nonfused algorithm is added as one option of the # conv autotune, and cuDNN version is smaller than 7, the following tests # will fail. ishape = [1, 400, 400, 1] fshape = [1, 1, 1, 256] oshape = [1, 400, 400, 256] setattr(Conv2DTest, "testInceptionFwd_No_Winograd_Nonfused", test_util.run_in_graph_and_eager_modes( GetInceptionFwdTest(ishape, fshape, 1, "SAME", gpu_only=True))) setattr(Conv2DTest, "testInceptionFwdDilatedConv_No_Winograd_Nonfused", test_util.run_in_graph_and_eager_modes( GetInceptionFwdDilatedConvTest(ishape, fshape, 1, "SAME"))) setattr(Conv2DTest, "testInceptionBackInput_No_Winograd_Nonfused", test_util.run_in_graph_and_eager_modes( GetInceptionBackInputTest(ishape, fshape, oshape, 1, "SAME", gpu_only=True))) setattr(Conv2DTest, "testInceptionBackFilter_No_Winograd_Nonfused", test_util.run_in_graph_and_eager_modes( GetInceptionBackFilterTest(ishape, fshape, oshape, [1, 1], "SAME", gpu_only=True))) test.main()
[]
[]
[ "TF_USE_DEEP_CONV2D" ]
[]
["TF_USE_DEEP_CONV2D"]
python
1
0
test/functional/test_framework/test_node.py
#!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Class for bitcoind node under test""" import decimal import errno from enum import Enum import http.client import json import logging import os import re import subprocess import tempfile import time from .authproxy import JSONRPCException from .util import ( append_config, delete_cookie_file, get_rpc_proxy, rpc_url, wait_until, p2p_port, ) # For Python 3.4 compatibility JSONDecodeError = getattr(json, "JSONDecodeError", ValueError) BITCOIND_PROC_WAIT_TIMEOUT = 60 class FailedToStartError(Exception): """Raised when a node fails to start correctly.""" class ErrorMatch(Enum): FULL_TEXT = 1 FULL_REGEX = 2 PARTIAL_REGEX = 3 class TestNode(): """A class for representing a bitcoind node under test. This class contains: - state about the node (whether it's running, etc) - a Python subprocess.Popen object representing the running process - an RPC connection to the node - one or more P2P connections to the node To make things easier for the test writer, any unrecognised messages will be dispatched to the RPC connection.""" def __init__(self, i, datadir, rpchost, timewait, bitcoind, bitcoin_cli, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False): self.index = i self.datadir = datadir self.stdout_dir = os.path.join(self.datadir, "stdout") self.stderr_dir = os.path.join(self.datadir, "stderr") self.rpchost = rpchost if timewait: self.rpc_timeout = timewait else: # Wait for up to 60 seconds for the RPC server to respond self.rpc_timeout = 60 self.binary = bitcoind self.coverage_dir = coverage_dir if extra_conf != None: append_config(datadir, extra_conf) # Most callers will just need to add extra args to the standard list below. # For those callers that need more flexibility, they can just set the args property directly. # Note that common args are set in the config file (see initialize_datadir) self.extra_args = extra_args self.args = [ self.binary, "-datadir=" + self.datadir, "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i ] self.cli = TestNodeCLI(bitcoin_cli, self.datadir) self.use_cli = use_cli self.running = False self.process = None self.rpc_connected = False self.rpc = None self.url = None self.log = logging.getLogger('TestFramework.node%d' % i) self.cleanup_on_exit = True # Whether to kill the node when this object goes away self.p2ps = [] def _node_msg(self, msg: str) -> str: """Return a modified msg that identifies this node by its index as a debugging aid.""" return "[node %d] %s" % (self.index, msg) def _raise_assertion_error(self, msg: str): """Raise an AssertionError with msg modified to identify this node.""" raise AssertionError(self._node_msg(msg)) def __del__(self): # Ensure that we don't leave any bitcoind processes lying around after # the test ends if self.process and self.cleanup_on_exit: # Should only happen on test failure # Avoid using logger, as that may have already been shutdown when # this destructor is called. print(self._node_msg("Cleaning up leftover process")) self.process.kill() def __getattr__(self, name): """Dispatches any unrecognised messages to the RPC connection or a CLI instance.""" if self.use_cli: return getattr(self.cli, name) else: assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection") return getattr(self.rpc, name) def start(self, extra_args=None, stdout=None, stderr=None, *args, **kwargs): """Start the node.""" if extra_args is None: extra_args = self.extra_args # Add a new stdout and stderr file each time bitcoind is started if stderr is None: stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) if stdout is None: stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) self.stderr = stderr self.stdout = stdout # Delete any existing cookie file -- if such a file exists (eg due to # unclean shutdown), it will get overwritten anyway by bitcoind, and # potentially interfere with our attempt to authenticate delete_cookie_file(self.datadir) # add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1") self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, *args, **kwargs) self.running = True self.log.debug("bitcoind started, waiting for RPC to come up") def wait_for_rpc_connection(self): """Sets up an RPC connection to the bitcoind process. Returns False if unable to connect.""" # Poll at a rate of four times per second poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): if self.process.poll() is not None: raise FailedToStartError(self._node_msg( 'bitcoind exited with status {} during initialization'.format(self.process.returncode))) try: self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir) self.rpc.getblockcount() # If the call to getblockcount() succeeds then the RPC connection is up self.rpc_connected = True self.url = self.rpc.url self.log.debug("RPC successfully started") return except IOError as e: if e.errno != errno.ECONNREFUSED: # Port not yet open? raise # unknown IO error except JSONRPCException as e: # Initialization phase if e.error['code'] != -28: # RPC in warmup? raise # unknown JSON RPC exception except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting if "No RPC credentials" not in str(e): raise time.sleep(1.0 / poll_per_s) self._raise_assertion_error("Unable to connect to bitcoind") def get_wallet_rpc(self, wallet_name): if self.use_cli: return self.cli("-rpcwallet={}".format(wallet_name)) else: assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected") wallet_path = "wallet/%s" % wallet_name return self.rpc / wallet_path def stop_node(self, expected_stderr=''): """Stop the node.""" if not self.running: return self.log.debug("Stopping node") try: self.stop() except http.client.CannotSendRequest: self.log.exception("Unable to stop node.") # Check that stderr is as expected self.stderr.seek(0) stderr = self.stderr.read().decode('utf-8').strip() if stderr != expected_stderr: raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr)) del self.p2ps[:] def is_node_stopped(self): """Checks whether the node has stopped. Returns True if the node has stopped. False otherwise. This method is responsible for freeing resources (self.process).""" if not self.running: return True return_code = self.process.poll() if return_code is None: return False # process has stopped. Assert that it didn't return an error code. assert return_code == 0, self._node_msg( "Node returned non-zero exit code (%d) when stopping" % return_code) self.running = False self.process = None self.rpc_connected = False self.rpc = None self.log.debug("Node stopped") return True def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT): wait_until(self.is_node_stopped, timeout=timeout) def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs): """Attempt to start the node and expect it to raise an error. extra_args: extra arguments to pass through to bitcoind expected_msg: regex that stderr should match when bitcoind fails Will throw if bitcoind starts without an error. Will throw if an expected_msg is provided and it does not match bitcoind's stdout.""" with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \ tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout: try: self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs) self.wait_for_rpc_connection() self.stop_node() self.wait_until_stopped() except FailedToStartError as e: self.log.debug('bitcoind failed to start: %s', e) self.running = False self.process = None # Check stderr for expected message if expected_msg is not None: log_stderr.seek(0) stderr = log_stderr.read().decode('utf-8').strip() if match == ErrorMatch.PARTIAL_REGEX: if re.search(expected_msg, stderr, flags=re.MULTILINE) is None: self._raise_assertion_error( 'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr)) elif match == ErrorMatch.FULL_REGEX: if re.fullmatch(expected_msg, stderr) is None: self._raise_assertion_error( 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr)) elif match == ErrorMatch.FULL_TEXT: if expected_msg != stderr: self._raise_assertion_error( 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr)) else: if expected_msg is None: assert_msg = "bitcoind should have exited with an error" else: assert_msg = "bitcoind should have exited with expected error " + expected_msg self._raise_assertion_error(assert_msg) def node_encrypt_wallet(self, passphrase): """"Encrypts the wallet. This causes bitcoind to shutdown, so this method takes care of cleaning up resources.""" self.encryptwallet(passphrase) self.wait_until_stopped() def add_p2p_connection(self, p2p_conn, *args, **kwargs): """Add a p2p connection to the node. This method adds the p2p connection to the self.p2ps list and also returns the connection to the caller.""" if 'dstport' not in kwargs: kwargs['dstport'] = p2p_port(self.index) if 'dstaddr' not in kwargs: kwargs['dstaddr'] = '127.0.0.1' p2p_conn.peer_connect(*args, **kwargs) self.p2ps.append(p2p_conn) return p2p_conn @property def p2p(self): """Return the first p2p connection Convenience property - most tests only use a single p2p connection to each node, so this saves having to write node.p2ps[0] many times.""" assert self.p2ps, self._node_msg("No p2p connection") return self.p2ps[0] def disconnect_p2ps(self): """Close all p2p connections to the node.""" for p in self.p2ps: p.peer_disconnect() del self.p2ps[:] class TestNodeCLIAttr: def __init__(self, cli, command): self.cli = cli self.command = command def __call__(self, *args, **kwargs): return self.cli.send_cli(self.command, *args, **kwargs) def get_request(self, *args, **kwargs): return lambda: self(*args, **kwargs) class TestNodeCLI(): """Interface to bitcoin-cli for an individual node""" def __init__(self, binary, datadir): self.options = [] self.binary = binary self.datadir = datadir self.input = None self.log = logging.getLogger('TestFramework.bitcoincli') def __call__(self, *options, input=None): # TestNodeCLI is callable with bitcoin-cli command-line options cli = TestNodeCLI(self.binary, self.datadir) cli.options = [str(o) for o in options] cli.input = input return cli def __getattr__(self, command): return TestNodeCLIAttr(self, command) def batch(self, requests): results = [] for request in requests: try: results.append(dict(result=request())) except JSONRPCException as e: results.append(dict(error=e)) return results def send_cli(self, command=None, *args, **kwargs): """Run bitcoin-cli command. Deserializes returned string as python object.""" pos_args = [str(arg) for arg in args] named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()] assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call" p_args = [self.binary, "-datadir=" + self.datadir] + self.options if named_args: p_args += ["-named"] if command is not None: p_args += [command] p_args += pos_args + named_args self.log.debug("Running bitcoin-cli command: %s" % command) process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) cli_stdout, cli_stderr = process.communicate(input=self.input) returncode = process.poll() if returncode: match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr) if match: code, message = match.groups() raise JSONRPCException(dict(code=int(code), message=message)) # Ignore cli_stdout, raise with cli_stderr raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr) try: return json.loads(cli_stdout, parse_float=decimal.Decimal) except JSONDecodeError: return cli_stdout.rstrip("\n")
[]
[]
[]
[]
[]
python
0
0
scripts.py
# Say "Hello, World!" With Python print("Hello, World!") # Write a function def is_leap(year): leap = False leap = year % 400 == 0 or (year % 4 == 0 and year % 100 != 0) return leap year = int(input()) print(is_leap(year)) # Print Function if __name__ == '__main__': n = int(input()) print("".join(list(map(str,range(1,n+1))))) # Python If-Else if __name__ == '__main__': n = int(raw_input().strip()) if n % 2 == 1: print("Weird") else: if 2 <= n <= 5 or n > 20: print("Not Weird") else: print("Weird") # Arithmetic Operators if __name__ == '__main__': a = int(raw_input()) b = int(raw_input()) print(a+b) print(a-b) print(a*b) # Python Division if __name__ == '__main__': a = int(input()) b = int(input()) # Loops if __name__ == '__main__': n = int(raw_input()) for i in range(n): print(i**2) # Lists if __name__ == '__main__': N = int(input()) L = [] for _ in range(N): cmd = input().split() if cmd[0] == "pop": L.pop() elif cmd[0] == "sort": L.sort() elif cmd[0] == "reverse": L.reverse() elif cmd[0] == "print": print(L) elif cmd[0] == "insert": L.insert(int(cmd[1]), int(cmd[2])) elif cmd[0] == "remove": L.remove(int(cmd[1])) elif cmd[0] == "append": L.append(int(cmd[1])) # List Comprehension if __name__ == '__main__': x = int(raw_input()) y = int(raw_input()) z = int(raw_input()) n = int(raw_input()) l = [[x,y,z] for x in range(x+1) for y in range(y+1) for z in range(z+1) if x+y+z != n] print(l) # Find the Runner-Up Score! if __name__ == '__main__': n = int(raw_input()) arr = map(int, raw_input().split()) winner = max(arr) arr_wo_winner = [x for x in arr if x != winner] print(max(arr_wo_winner)) # Nested Lists names = [] scores = [] for _ in range(int(raw_input())): name = raw_input() score = float(raw_input()) names.append(name) scores.append(score) min_score = min(scores) l = [[x,y] for x,y in sorted(zip(scores,names)) if x != min_score] new_min = l[0][0] l = list(filter(lambda x: x[0] == new_min, l)) for x,y in l: print(y) # Finding the Percentage if __name__ == '__main__': n = int(raw_input()) student_marks = {} for _ in range(n): line = raw_input().split() name, scores = line[0], line[1:] scores = map(float, scores) student_marks[name] = scores query_name = raw_input() scores = student_marks[query_name] avg = sum(scores)/len(scores) print("{:.2f}".format(avg)) # Tuples if __name__ == '__main__': n = int(raw_input()) integer_list = map(int, raw_input().split()) print(hash(tuple(integer_list))) # Find a string def count_substring(string, sub_string): count = 0 n = len(string) m = len(sub_string) for i in range(n): if string[i] == sub_string[0]: found = True for j in range(1,m): if i+j >= n or string[i+j] != sub_string[j]: found = False break if found: count += 1 return count if __name__ == '__main__': # sWAP cASE def swap_case(s): string = "" for c in s: if c.isupper(): string += c.lower() else: string += c.upper() return string # String Split and Join def split_and_join(line): return "-".join(line.split()) # What's Your Name? def print_full_name(a, b): print "Hello {} {}! You just delved into python.".format(a,b) # Mutations def mutate_string(string, position, character): l = list(string) l[position] = character return "".join(l) # String Validators if __name__ == '__main__': s = raw_input() digit = False alnum = False alpha = False lower = False upper = False for c in s: if c.isalnum(): alnum = True if c.isdigit(): digit = True if c.isalpha(): alpha = True if c.islower(): lower = True if c.isupper(): upper = True print(alnum) print(alpha) print(digit) print(lower) print(upper) # Text Alignment thickness = int(raw_input()) c = 'H' for i in range(thickness): print (c*i).rjust(thickness-1)+c+(c*i).ljust(thickness-1) for i in range(thickness+1): print (c*thickness).center(thickness*2)+(c*thickness).center(thickness*6) for i in range((thickness+1)/2): print (c*thickness*5).center(thickness*6) for i in range(thickness+1): print (c*thickness).center(thickness*2)+(c*thickness).center(thickness*6) for i in range(thickness): print ((c*(thickness-i-1)).rjust(thickness)+c+(c*(thickness-i-1)).ljust(thickness)).rjust(thickness*6) # Text Wrap def wrap(string, max_width): wrapper = textwrap.TextWrapper(width=max_width) return "\n".join(wrapper.wrap(string)) # Designer Door Mat N, M = map(int, raw_input().split()) rows = (N-1)/2 for i in range(rows): print((".|."*(1+2*i)).center(M,"-")) print("WELCOME".center(M,"-")) for i in reversed(range(rows)): print((".|."*(1+2*i)).center(M,"-")) # String Formatting def print_formatted(number): width = len(bin(number)[2:]) for i in range(1,number+1): x = str(i).rjust(width) y = oct(i)[1:].rjust(width) w = hex(i)[2:].upper().rjust(width) z = bin(i)[2:].rjust(width) print("{} {} {} {}".format(x,y,w,z)) # Alphabet Rangoli import string def print_rangoli(size): width = (size-1)*4+1 L = list(string.ascii_lowercase)[:size] rows = [] for row in range(1,size+1): L1 = L[size-row:] L2 = L[size-row+1:size] L3 = list(reversed(L1))+L2 rows.append("-".join(L3)) rows += list(reversed(rows[:-1])) for row in rows: print(row.center(width,"-")) # Capitalize! def solve(s): s = list(s) s[0] = s[0].upper() for i,c in enumerate(s): if i > 0 and s[i-1] == ' ': s[i] = c.upper() return "".join(s) # The Minion Game def minion_game(string): stuart = 0 kevin = 0 n = len(string) vowels = ['A','E','I','O','U'] for i in range(n): if string[i] in vowels: kevin += n-i else: stuart += n-i if kevin == stuart: print("Draw") elif kevin > stuart: print("Kevin {}".format(kevin)) else: print("Stuart {}".format(stuart)) # Merge the Tools! def merge_the_tools(string, k): for i in range(len(string)/k): s = list(string[k*i:k*(i+1)]) seen = {} u = "" for i,x in enumerate(s): if x not in seen: seen[x] = True u += x print(u) # Introduction to Sets def average(array): s = set(array) return sum(s)/len(s) # No Idea! n, m = map(int, raw_input().split()) array = map(int, raw_input().split()) A = set(map(int, raw_input().split())) B = set(map(int, raw_input().split())) h = 0 occurrence = {} for x in array: if x not in occurrence: occurrence[x] = 1 else: occurrence[x] += 1 A1 = A.intersection(set(array)) B1 = B.intersection(set(array)) for x in A1: h += occurrence[x] for x in B1: h -= occurrence[x] print(h) # Symmetric Difference M = int(raw_input()) set1 = set(map(int, raw_input().split())) N = int(raw_input()) set2 = set(map(int, raw_input().split())) D = set1.difference(set2).union(set2.difference(set1)) D = sorted(list(D)) for x in D: print(x) # Set .add() n = int(raw_input()) s = set() for i in range(n): s.add(raw_input()) print(len(s)) # Set .discard(), .remove() & .pop() n = input() s = set(map(int, raw_input().split())) N = int(raw_input()) for i in range(N): op = raw_input() if op[0] == "p": s.pop() else: op, x = op.split() x = int(x) if op[0] == "r": s.remove(x) elif op[0] == "d": s.discard(x) print(sum(s)) # Set .union() Operation n = raw_input() students = set(map(int, raw_input().split())) n = raw_input() students = students.union(set(map(int, raw_input().split()))) print(len(students)) # Set .intersection() Operation n = raw_input() S = set(map(int,raw_input().split())) n = raw_input() S = S.intersection(set(map(int, raw_input().split()))) print(len(S)) # Set .difference() Operation raw_input() S = set(map(int, raw_input().split())) raw_input() S = S.difference(set(map(int, raw_input().split()))) print(len(S)) # Set .symmetric_difference() Operation raw_input() S1 = set(map(int, raw_input().split())) raw_input() S2 = set(map(int, raw_input().split())) S = S1.difference(S2).union(S2.difference(S1)) print(len(S)) # Set Mutations a = int(raw_input()) A = set(map(int, raw_input().split())) n = int(raw_input()) for i in range(n): op, size = raw_input().split() size = int(size) S = set(map(int, raw_input().split())) if op == "intersection_update": A.intersection_update(S) elif op == "update": A.update(S) elif op == "symmetric_difference_update": A.symmetric_difference_update(S) elif op == "difference_update": A.difference_update(S) print(sum(A)) # The Captain's Room n = int(raw_input()) L = list(map(int, raw_input().split())) S = set(L) for x in S: L.remove(x) print(S.difference(set(L)).pop()) # Check Subset n = int(raw_input()) for i in range(n): a = int(raw_input()) A = set(map(int, raw_input().split())) b = int(raw_input()) B = set(map(int, raw_input().split())) I = B.intersection(A) if I == A: print("True") else: print("False") # Check Strict Superset A = set(map(int, raw_input().split())) n = int(raw_input()) flag = True for i in range(n): S = set(map(int, raw_input().split())) if len(S.difference(A)) > 0: flag = False break print(flag) # collections.Counter() from collections import Counter n = int(raw_input()) shoes = list(map(int, raw_input().split())) n_customers = int(raw_input()) counter = Counter(shoes) earned = 0 for i in range(n_customers): size, pay = list(map(int, raw_input().split())) if size in counter and counter[size] > 0: counter[size] -= 1 earned += pay print(earned) # DefaultDict Tutorial from collections import defaultdict n, m = list(map(int, raw_input().split())) A = defaultdict(list) for i in range(n): x = raw_input() A[x].append(i+1) for i in range(m): x = raw_input() if x in A: print(" ".join(map(str, A[x]))) else: print("-1") # Collections.namedtuple() from collections import namedtuple n = int(raw_input()) Student = namedtuple('Student',raw_input().split()) students = [] marks = 0.0 for i in range(n): marks += int(Student._make(raw_input().split()).MARKS) print(round(marks/n,2)) # Collections.OrderedDict() from collections import OrderedDict d = OrderedDict() n = int(raw_input()) for i in range(n): row_parts = raw_input().split() price = int(row_parts[-1]) item = " ".join(row_parts[:-1]) if item not in d: d[item] = price else: d[item] += price for key in d.keys(): print("{} {}".format(key, d[key])) # Word Order from collections import defaultdict n = int(raw_input()) d = defaultdict(int) l = list() for i in range(n): w = raw_input() d[w] += 1 if d[w] == 1: l.append(w) print(len(d.keys())) print(" ".join(map(lambda w: str(d[w]), l))) # Collections.dequeu() from collections import deque d = deque() n = int(raw_input()) for i in range(n): op = raw_input() if op[:3] != "pop": op, x = op.split() x = int(x) if op == "append": d.append(x) elif op == "appendleft": d.appendleft(x) elif op == "pop": d.pop() elif op == "popleft": d.popleft() print(" ".join(map(str, d))) # Company Logo import math import os import random import re import sys from collections import defaultdict if __name__ == '__main__': s = raw_input() d = defaultdict(int) d_sort = defaultdict(list) for c in list(s): d[c] += 1 for key,val in d.items(): d_sort[val].append(key) i = 0 for key in reversed(sorted(d_sort.keys())): for val in sorted(d_sort[key]): print("{} {}".format(val,key)) i += 1 if i == 3: break if i == 3: break # Piling Up! from collections import deque T = int(raw_input()) for i in range(T): n = int(raw_input()) d = deque(map(int, raw_input().split())) last = d.popleft() result = "Yes" try: x = d.pop() if x > last: d.appendleft(last) last = x except: print("Yes") continue while True: try: x = d.popleft() except: break if x > last: try: d.appendleft(x) x = d.pop() if x > last: result = "No" break else: last = x except: break else: try: y = d.pop() if y > x and y <= last: d.appendleft(x) last = y else: d.append(y) except: break print(result) # Calendar Module import calendar days = ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"] month, day, year = list(map(int, raw_input().split())) print(days[calendar.weekday(year, month, day)]) # Time Delta import math import os import random import re import sys from datetime import datetime # Complete the time_delta function below. def time_delta(t1, t2): dt1 = datetime.strptime(t1, "%a %d %b %Y %X %z") dt2 = datetime.strptime(t2, "%a %d %b %Y %X %z") ms = (dt1 - dt2).total_seconds() return int(abs(ms)) if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') t = int(input()) for t_itr in range(t): t1 = input() t2 = input() delta = time_delta(t1, t2) fptr.write(str(delta) + '\n') fptr.close() # Exceptions for _ in range(int(input())): try: a,b = map(int, input().split()) print(a//b) except Exception as e: print("Error Code:",e) # Zipped _, n = list(map(int, input().split())) scores = [list(map(float, input().split())) for _ in range(n)] for x in list(zip(*scores)): v = round(sum(x)/n, 1) print(v) # Athlete Sort import math import os import random import re import sys if __name__ == '__main__': nm = input().split() n = int(nm[0]) m = int(nm[1]) arr = [] for _ in range(n): arr.append(list(map(int, input().rstrip().split()))) k = int(input()) for x in sorted(arr, key=lambda x: x[k]): print(" ".join(map(str, x))) # ginortS s = input() lower = sorted(list(filter(lambda x: x.islower(), s))) upper = sorted(list(filter(lambda x: x.isupper(), s))) odd = sorted(list(filter(lambda x: x.isdigit() and int(x) % 2 == 1, s))) even = sorted(list(filter(lambda x: x.isdigit() and int(x) % 2 == 0, s))) s = lower + upper + odd + even print("".join(s)) # Map and Lambda Functions cube = lambda x: x**3 complete the lambda function def fibonacci(n): return a list of fibonacci numbers if n == 0: return [] elif n == 1: return [0] F = [0,1] for i in range(2,n): F.append(F[i-2] + F[i-1]) return F # Detect Floating Point Number import re T = int(input()) for _ in range(T): s = input() if re.search(r"^[\+|-]?\d*\.\d+$", s) == None: print("False") else: print("True") # Re.split() regex_pattern = r",|\." # Group(), Groups() & Groupdict() import re m = re.search(r'([0-9|a-z|A-Z])\1', input()) try: print(m.group(1)) except: print("-1") # Re.findall() & Re.finditer() import re V = "aeiouAEIOU" C = "[QWRTYPSDFGHJKLZXCVBNMqwrtypsdfghjklzxcvbnm]" m = re.findall("(?="+C+"(["+V+"]{2,})"+C+")", input()) if len(m) == 0: print("-1") else: for s in m: print(s) # Re.start() & Re.end() import re S = input() k = input() matches = list(re.finditer("(?=(%s))" % k, S)) if matches == []: print("(-1, -1)") for m in matches: print("({}, {})".format(m.start(1),m.end(1)-1)) # Regex Substitution import re def swap(m): s = m.group(0) if s == "&&": return "and" elif s == "||": return "or" N = int(input()) for _ in range(N): s = input() print(re.sub(r"(?<= )(&&|\|\|)(?= )",swap,s)) # Validating phone numbers import re for _ in range(int(input())): if re.search(r"^[789][0-9]{9}$", input()) == None: print("NO") else: print("YES") # Validating and Parsing Email Adresses import email.utils import re for _ in range(int(input())): addr = email.utils.parseaddr(input()) if re.search(r"^[a-zA-Z][\w|.|-]*@[a-zA-Z]+\.[a-zA-Z]{1,3}$", addr[1]) != None: print(email.utils.formataddr(addr)) # HTML Parser - Part 1 from html.parser import HTMLParser class MyHTMLParser(HTMLParser): def handle_startendtag(self, tag, attrs): print("Empty :", tag) for name, value in attrs: print("-> {} > {}".format(name, value)) def handle_starttag(self, tag, attrs): print("Start :", tag) for name, value in attrs: print("-> {} > {}".format(name, value)) def handle_endtag(self, tag): print("End :", tag) def handle_commenct(self, data): pass parser = MyHTMLParser() for _ in range(int(input())): parser.feed(input()) # HTML Parser - Part 2 from html.parser import HTMLParser class MyHTMLParser(HTMLParser): def handle_data(self, data): if data != "\n": print(">>> Data") print(data.strip()) def handle_comment(self, data): if "\n" in data: print(">>> Multi-line Comment") else: print(">>> Single-line Comment") print(data.strip()) html = "" for i in range(int(input())): html += input().rstrip() html += '\n' parser = MyHTMLParser() parser.feed(html) parser.close() # Detect HTML Tags, Attributes and Attribute Values from html.parser import HTMLParser class MyHTMLParser(HTMLParser): def handle_starttag(self, tag, attrs): print(tag) for name,value in attrs: print("-> {} > {}".format(name,value)) parser = MyHTMLParser() for _ in range(int(input())): parser.feed(input()) # Validating UID import re def check_uid(s): if re.search("[A-Z].*[A-Z]",s) == None: return "Invalid" if re.search("[0-9].*[0-9].*[0-9]",s) == None: return "Invalid" if re.search("^[a-zA-Z0-9]+$",s) == None: return "Invalid" if re.search(r"([a-zA-Z0-9]).*\1+",s) != None: return "Invalid" if len(s) != 10: return "Invalid" return "Valid" T = int(input()) for _ in range(T): print(check_uid(input())) # Validating Credit Card Numbers import re for _ in range(int(input())): s = input() if re.match("[456][0-9]{3}-?[0-9]{4}-?[0-9]{4}-?[0-9]{4}$", s) != None: if re.search(r"([0-9])\1{3,}", s.replace("-","")) != None: print("Invalid") else: print("Valid") else: print("Invalid") # Validating Postal Codes regex_integer_in_range = r"^[^0]\d{5}$" regex_alternating_repetitive_digit_pair = r"(\d)(?=\d\1)" # Matrix Script import math import os import random import re import sys first_multiple_input = input().rstrip().split() n = int(first_multiple_input[0]) m = int(first_multiple_input[1]) matrix = [] for _ in range(n): matrix_item = input() matrix.append(matrix_item) l = list("".join(matrix)) s = "" for i in range(m): s += "".join(l[i::m]) s = re.sub(r"([0-9a-zA-Z])[^0-9a-zA-Z]+([0-9a-zA-Z])",r"\1 \2",s) print(s) # XML 1 - Find the Score def get_attr_number(node): your code goes here score = 0 for elem in node.iter(): score += len(elem.attrib) return score # XML 2 - Find the Maximum Depth maxdepth = -1 def depth(elem, level): global maxdepth your code goes here if level == maxdepth: maxdepth += 1 for child in elem: depth(child, level+1) # Standardize Mobile Number Using Decorators def wrapper(f): def fun(l): complete the function for i,x in enumerate(l): x = x[len(x)-10:] l[i] = "+91 " + x[:5] + " " + x[5:] l.sort() return f(l) return fun # Decorators 2 - Name Directory import operator def person_lister(f): def inner(people): complete the function people = list(map(lambda x: [x[0],x[1],int(x[2]), x[3]], people)) return list(map(f, sorted(people, key=operator.itemgetter(2)))) return inner # Arrays def arrays(arr): return numpy.flip(numpy.array(arr, dtype=numpy.float32)) # Shape and Reshape import numpy arr = list(map(int, input().split())) print(numpy.array(arr).reshape((3,3))) # Transpose and Flatten import numpy arr = [] N, M = list(map(int, input().split())) for _ in range(N): arr.append(list(map(int, input().split()))) A = numpy.array(arr).reshape(N,M) print(numpy.transpose(A)) print(A.flatten()) # Concatenate import numpy N, M, P = list(map(int, input().split())) arr1 = [] arr2 = [] for _ in range(N): arr1.append(list(map(int, input().split()))) for _ in range(M): arr2.append(list(map(int, input().split()))) arr1 = numpy.array(arr1).reshape(N,P) arr2 = numpy.array(arr2).reshape(M,P) print(numpy.concatenate((arr1, arr2), axis=0)) # Zeros and Ones import numpy dim = list(map(int, input().split())) print(numpy.zeros(dim, dtype=numpy.int)) print(numpy.ones(dim, dtype=numpy.int)) # Eye and Identity import numpy N,M = list(map(int, input().split())) numpy.set_printoptions(sign=' ') print(numpy.eye(N,M)) # Array Mathematics import numpy N,M = list(map(int, input().split())) arr1 = [] arr2 = [] for _ in range(N): arr1.append(list(map(int, input().split()))) for _ in range(N): arr2.append(list(map(int, input().split()))) A = numpy.array(arr1) B = numpy.array(arr2) print(A+B) print(A-B) print(A*B) print(A//B) print(A%B) print(A**B) # Floor, Ceil and Rint import numpy numpy.set_printoptions(sign=' ') A = numpy.array(list(map(float, input().split()))) print(numpy.floor(A)) print(numpy.ceil(A)) print(numpy.rint(A)) # Sum and Prod import numpy N, _ = list(map(int,input().split())) arr = [] for _ in range(N): arr.append(list(map(int,input().split()))) A = numpy.array(arr) print(numpy.prod(numpy.sum(A,axis=0))) # Min and Max import numpy N = int(input().split()[0]) A = [] for _ in range(N): A.append(list(map(int, input().split()))) A = numpy.array(A) print(numpy.max(numpy.min(A,axis=1))) # Mean, Var and Std import numpy N = int(input().split()[0]) numpy.set_printoptions(legacy='1.13') A = [] for _ in range(N): A.append(list(map(int, input().split()))) A = numpy.array(A) print(numpy.mean(A,axis=1)) print(numpy.var(A,axis=0)) print(numpy.std(A,axis=None)) # Dot and Cross import numpy N = int(input()) arr1 = [] arr2 = [] for _ in range(N): arr1.append(input().split()) for _ in range(N): arr2.append(input().split()) A = numpy.array(arr1, int) B = numpy.array(arr2, int) print(numpy.dot(A,B)) # Inner and Outer import numpy A = numpy.array(input().split(),int) B = numpy.array(input().split(),int) print(numpy.inner(A,B)) print(numpy.outer(A,B)) # Polynomials import numpy print(numpy.polyval(numpy.array(input().split(),float),float(input()))) # Linear Algebra import numpy print(round(numpy.linalg.det(numpy.array([input().split() for _ in range(int(input()))], float)),2)) # Birthday Cake Candles def birthdayCakeCandles(candles): max_candle = max(candles) return candles.count(max_candle) if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') candles_count = int(input().strip()) candles = list(map(int, input().rstrip().split())) result = birthdayCakeCandles(candles) fptr.write(str(result) + '\n') fptr.close() # Number Line Jumps def kangaroo(x1, v1, x2, v2): try: n = (x1-x2)/(v2-v1) except: return "NO" print(n) if n - int(n) == 0 and n>0: return "YES" else: return "NO" if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') x1V1X2V2 = input().split() x1 = int(x1V1X2V2[0]) v1 = int(x1V1X2V2[1]) x2 = int(x1V1X2V2[2]) v2 = int(x1V1X2V2[3]) result = kangaroo(x1, v1, x2, v2) fptr.write(result + '\n') fptr.close() # Viral advertising def viralAdvertising(n): total = like = 2 for _ in range(1,n): like = math.floor(like*3/2) total += like return total if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') n = int(input()) result = viralAdvertising(n) fptr.write(str(result) + '\n') fptr.close() # Recursive Digit Sum def superDigit(n, k): p = list(str(n)) while(len(p) > 1): p = list(str(sum(map(int,list(p))))) return sum(list(map(int,str(int(p[0])*k)))) if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') nk = input().split() n = nk[0] k = int(nk[1]) result = superDigit(n, k) fptr.write(str(result) + '\n') fptr.close() # Insertion sort 1 def insertionSort1(n, arr): x = arr[-1] for i in range(n-2,-1,-1): if arr[i] > x: arr[i+1] = arr[i] elif arr[i] <= x: arr[i+1] = x print(" ".join(map(str,arr))) break print(" ".join(map(str,arr))) if i == 0: arr[i] = x print(" ".join(map(str,arr))) if __name__ == '__main__': n = int(input()) arr = list(map(int, input().rstrip().split())) insertionSort1(n, arr) # Insertion sort 2 def insertionSort2(n, arr): for i in range(1,len(arr)): if arr[i] > arr[i-1]: print(" ".join(map(str,arr))) else: j = i while j>=1 and arr[j] < arr[j-1]: y = arr[j-1] arr[j-1] = arr[j] arr[j] = y j -= 1 print(" ".join(map(str,arr))) if __name__ == '__main__': n = int(input()) arr = list(map(int, input().rstrip().split())) insertionSort2(n, arr)
[]
[]
[ "OUTPUT_PATH" ]
[]
["OUTPUT_PATH"]
python
1
0
examples/graphqldemo/server.go
package main import ( "log" "net/http" "os" "github.com/99designs/gqlgen/graphql/handler" "github.com/99designs/gqlgen/graphql/playground" "github.com/chandlersong/examples/graphqldemo/graph" "github.com/chandlersong/examples/graphqldemo/graph/generated" ) const defaultPort = "8080" func main() { port := os.Getenv("PORT") if port == "" { port = defaultPort } srv := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &graph.Resolver{}})) http.Handle("/", playground.Handler("GraphQL playground", "/query")) http.Handle("/query", srv) log.Printf("connect to http://localhost:%s/ for GraphQL playground", port) log.Fatal(http.ListenAndServe(":"+port, nil)) }
[ "\"PORT\"" ]
[]
[ "PORT" ]
[]
["PORT"]
go
1
0
pkg/trace/sampler/remote_rates_test.go
// Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. package sampler import ( "os" "testing" "github.com/DataDog/datadog-agent/pkg/proto/pbgo" "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestRemoteConfInit(t *testing.T) { assert := assert.New(t) // disabled by default assert.Nil(newRemoteRates()) // subscription to subscriber fails old := os.Getenv("DD_APM_FEATURES") os.Setenv("DD_APM_FEATURES", "remote_rates") assert.Nil(newRemoteRates()) os.Setenv("DD_APM_FEATURES", old) // todo:raphael mock grpc server } func newTestRemoteRates() *RemoteRates { return &RemoteRates{ samplers: make(map[Signature]*Sampler), exit: make(chan struct{}), stopped: make(chan struct{}), } } func configGenerator(version uint64, rates pb.APMSampling) *pbgo.ConfigResponse { raw, _ := rates.MarshalMsg(nil) return &pbgo.ConfigResponse{ ConfigDelegatedTargetVersion: version, TargetFiles: []*pbgo.File{{Raw: raw}}, } } func TestRemoteTPSUpdate(t *testing.T) { assert := assert.New(t) type sampler struct { service string env string targetTPS float64 } var testSteps = []struct { name string ratesToApply pb.APMSampling countServices []ServiceSignature expectedSamplers []sampler version uint64 }{ { name: "first rates received", ratesToApply: pb.APMSampling{ TargetTps: []pb.TargetTPS{ { Service: "willBeRemoved", Value: 3.2, }, { Service: "willBeRemoved", Env: "env2", Value: 33, }, { Service: "keep", Value: 1, }, }, }, version: 30, }, { name: "enable a sampler after counting a matching service", countServices: []ServiceSignature{ { Name: "willBeRemoved", }, }, expectedSamplers: []sampler{ { service: "willBeRemoved", targetTPS: 3.2, }, }, version: 30, }, { name: "nothing happens when counting a service not set remotely", countServices: []ServiceSignature{ { Name: "no remote tps", }, }, expectedSamplers: []sampler{ { service: "willBeRemoved", targetTPS: 3.2, }, }, version: 30, }, { name: "add 2 more samplers", countServices: []ServiceSignature{ { Name: "keep", }, { Name: "willBeRemoved", Env: "env2", }, }, expectedSamplers: []sampler{ { service: "willBeRemoved", targetTPS: 3.2, }, { service: "willBeRemoved", env: "env2", targetTPS: 33, }, { service: "keep", targetTPS: 1, }, }, version: 30, }, { name: "receive new remote rates, non matching samplers are trimmed", ratesToApply: pb.APMSampling{ TargetTps: []pb.TargetTPS{ { Service: "keep", Value: 27, }, }, }, expectedSamplers: []sampler{ { service: "keep", targetTPS: 27, }, }, version: 35, }, } r := newTestRemoteRates() for _, step := range testSteps { t.Log(step.name) if step.ratesToApply.TargetTps != nil { r.loadNewConfig(configGenerator(step.version, step.ratesToApply)) } for _, s := range step.countServices { r.CountSignature(s.Hash()) } assert.Len(r.samplers, len(step.expectedSamplers)) for _, expectedS := range step.expectedSamplers { sig := ServiceSignature{Name: expectedS.service, Env: expectedS.env}.Hash() s, ok := r.samplers[sig] require.True(t, ok) root := &pb.Span{Metrics: map[string]float64{}} assert.Equal(expectedS.targetTPS, s.targetTPS.Load()) r.CountSample(root, sig) tpsTag, ok := root.Metrics[tagRemoteTPS] assert.True(ok) assert.Equal(expectedS.targetTPS, tpsTag) versionTag, ok := root.Metrics[tagRemoteVersion] assert.True(ok) assert.Equal(float64(step.version), versionTag) } } }
[ "\"DD_APM_FEATURES\"" ]
[]
[ "DD_APM_FEATURES" ]
[]
["DD_APM_FEATURES"]
go
1
0
docs/conf.py
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys import django if os.getenv("READTHEDOCS", default=False) == "True": sys.path.insert(0, os.path.abspath("..")) os.environ["DJANGO_READ_DOT_ENV_FILE"] = "True" os.environ["USE_DOCKER"] = "no" else: sys.path.insert(0, os.path.abspath("/app")) os.environ["DATABASE_URL"] = "sqlite:///readthedocs.db" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local") django.setup() # -- Project information ----------------------------------------------------- project = "Sudoku Teacher" copyright = """2021, Idan Nikritin""" author = "Idan Nikritin" # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.napoleon", ] # Add any paths that contain templates here, relative to this directory. # templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "alabaster" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ["_static"]
[]
[]
[ "DJANGO_READ_DOT_ENV_FILE", "DATABASE_URL", "USE_DOCKER", "READTHEDOCS" ]
[]
["DJANGO_READ_DOT_ENV_FILE", "DATABASE_URL", "USE_DOCKER", "READTHEDOCS"]
python
4
0
pkg/webhook/util/util.go
/* Copyright 2020 The Kruise Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package util import ( "os" "strconv" "k8s.io/klog" ) func GetHost() string { return os.Getenv("WEBHOOK_HOST") } func GetNamespace() string { if ns := os.Getenv("POD_NAMESPACE"); len(ns) > 0 { return ns } return "kruise-system" } func GetSecretName() string { if name := os.Getenv("SECRET_NAME"); len(name) > 0 { return name } return "kruise-webhook-certs" } func GetServiceName() string { if name := os.Getenv("SERVICE_NAME"); len(name) > 0 { return name } return "kruise-webhook-service" } func GetPort() int { port := 9876 if p := os.Getenv("WEBHOOK_PORT"); len(p) > 0 { if p, err := strconv.ParseInt(p, 10, 32); err == nil { port = int(p) } else { klog.Fatalf("failed to convert WEBHOOK_PORT=%v in env: %v", p, err) } } return port } func GetCertDir() string { if p := os.Getenv("WEBHOOK_CERT_DIR"); len(p) > 0 { return p } return "/tmp/kruise-webhook-certs" }
[ "\"WEBHOOK_HOST\"", "\"POD_NAMESPACE\"", "\"SECRET_NAME\"", "\"SERVICE_NAME\"", "\"WEBHOOK_PORT\"", "\"WEBHOOK_CERT_DIR\"" ]
[]
[ "SERVICE_NAME", "WEBHOOK_HOST", "WEBHOOK_PORT", "POD_NAMESPACE", "SECRET_NAME", "WEBHOOK_CERT_DIR" ]
[]
["SERVICE_NAME", "WEBHOOK_HOST", "WEBHOOK_PORT", "POD_NAMESPACE", "SECRET_NAME", "WEBHOOK_CERT_DIR"]
go
6
0
pkg/api/management/main.go
package main import ( "context" "net/http" "os" "github.com/rancher/rancher/pkg/api/management/server" "github.com/rancher/types/config" "k8s.io/client-go/tools/clientcmd" ) func main() { if err := run(); err != nil { panic(err) } } func run() error { kubeConfig, err := clientcmd.BuildConfigFromFlags("", os.Getenv("KUBECONFIG")) if err != nil { return err } management, err := config.NewManagementContext(*kubeConfig) if err != nil { return err } ctx := context.Background() var handler http.Handler handler, err = server.New(ctx, 8080, 8443, management, func() http.Handler { return handler }) if err != nil { return err } <-ctx.Done() return ctx.Err() }
[ "\"KUBECONFIG\"" ]
[]
[ "KUBECONFIG" ]
[]
["KUBECONFIG"]
go
1
0
wolframclient/evaluation/kernel/path.py
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals from wolframclient.utils import six from wolframclient.utils.api import os def explore_paths(*paths): highest_version = -1 best_path = None for root in paths: if os.isdir(root): for version in os.listdir(root): full_path = os.path_join(root, version) if os.isdir(full_path): try: v_num = float(version) except ValueError: v_num = -2 if v_num > highest_version and v_num > 0: highest_version = v_num best_path = full_path if best_path: yield best_path def installation_directories(): env = os.environ.get("WOLFRAM_INSTALLATION_DIRECTORY", None) if env: yield env if six.WINDOWS: for p in explore_paths( "C:\\Program Files\\Wolfram Research\\Wolfram Desktop", "C:\\Program Files\\Wolfram Research\\Mathematica", "C:\\Program Files\\Wolfram Research\\Wolfram Engine", ): yield p elif six.LINUX: for p in explore_paths( "/usr/local/Wolfram/Desktop", "/usr/local/Wolfram/Mathematica", "/usr/local/Wolfram/WolframEngine", ): yield p elif six.MACOS: yield "/Applications/Wolfram Desktop.app/Contents" yield "/Applications/Mathematica.app/Contents" yield "/Applications/Wolfram Engine.app/Contents" def exe_path(): if six.WINDOWS: return "WolframKernel.exe" elif six.LINUX: return "Executables/WolframKernel" elif six.MACOS: return "MacOS/WolframKernel" def find_default_kernel_path(): """ Look for the most recent installed kernel. """ rel = exe_path() for path in installation_directories(): if rel: path = os.path_join(path, rel) if os.isfile(path): return path
[]
[]
[ "WOLFRAM_INSTALLATION_DIRECTORY" ]
[]
["WOLFRAM_INSTALLATION_DIRECTORY"]
python
1
0
desktop/core/src/desktop/tests.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import desktop import desktop.urls import desktop.conf import logging import json import os import time import desktop.views as views import proxy.conf from nose.plugins.attrib import attr from nose.plugins.skip import SkipTest from nose.tools import assert_true, assert_false, assert_equal, assert_not_equal, assert_raises from django.conf.urls.defaults import patterns, url from django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.http import HttpResponse from django.db.models import query, CharField, SmallIntegerField from useradmin.models import GroupPermission from beeswax.conf import HIVE_SERVER_HOST from desktop.lib import django_mako from desktop.lib.django_test_util import make_logged_in_client from desktop.lib.paginator import Paginator from desktop.lib.conf import validate_path from desktop.lib.django_util import TruncatingModel from desktop.lib.exceptions_renderable import PopupException from desktop.lib.test_utils import grant_access from desktop.models import Document from desktop.views import check_config, home from pig.models import PigScript def setup_test_environment(): """ Sets up mako to signal template rendering. """ django_mako.render_to_string = django_mako.render_to_string_test setup_test_environment.__test__ = False def teardown_test_environment(): """ This method is called by nose_runner when the tests all finish. This helps track down when tests aren't cleaning up after themselves and leaving threads hanging around. """ import threading # We should shut down all relevant threads by test completion. threads = list(threading.enumerate()) try: import threadframe import traceback if len(threads) > 1: for v in threadframe.dict().values(): traceback.print_stack(v) finally: # threadframe is only available in the dev build. pass assert 1 == len(threads), threads django_mako.render_to_string = django_mako.render_to_string_normal teardown_test_environment.__test__ = False def test_home(): c = make_logged_in_client(username="test_home", groupname="test_home", recreate=True, is_superuser=False) user = User.objects.get(username="test_home") response = c.get(reverse(home)) assert_equal(["notmine", "trash", "mine", "history"], json.loads(response.context['json_tags']).keys()) assert_equal(200, response.status_code) script, created = PigScript.objects.get_or_create(owner=user) doc = Document.objects.link(script, owner=script.owner, name='test_home') response = c.get(reverse(home)) assert_true(str(doc.id) in json.loads(response.context['json_documents'])) response = c.get(reverse(home)) tags = json.loads(response.context['json_tags']) assert_equal([doc.id], tags['mine'][0]['docs'], tags) assert_equal([], tags['trash']['docs'], tags) assert_equal([], tags['history']['docs'], tags) doc.send_to_trash() response = c.get(reverse(home)) tags = json.loads(response.context['json_tags']) assert_equal([], tags['mine'][0]['docs'], tags) assert_equal([doc.id], tags['trash']['docs'], tags) assert_equal([], tags['history']['docs'], tags) doc.restore_from_trash() response = c.get(reverse(home)) tags = json.loads(response.context['json_tags']) assert_equal([doc.id], tags['mine'][0]['docs'], tags) assert_equal([], tags['trash']['docs'], tags) assert_equal([], tags['history']['docs'], tags) doc.add_to_history() response = c.get(reverse(home)) tags = json.loads(response.context['json_tags']) assert_equal([], tags['mine'][0]['docs'], tags) assert_equal([], tags['trash']['docs'], tags) assert_equal([doc.id], tags['history']['docs'], tags) def test_skip_wizard(): c = make_logged_in_client() # is_superuser response = c.get('/', follow=True) assert_true(['admin_wizard.mako' in _template.filename for _template in response.template], [_template.filename for _template in response.template]) c.cookies['hueLandingPage'] = 'home' response = c.get('/', follow=True) assert_true(['home.mako' in _template.filename for _template in response.template], [_template.filename for _template in response.template]) c.cookies['hueLandingPage'] = '' response = c.get('/', follow=True) assert_true(['admin_wizard.mako' in _template.filename for _template in response.template], [_template.filename for _template in response.template]) c = make_logged_in_client(username="test_skip_wizard", password="test_skip_wizard", is_superuser=False) response = c.get('/', follow=True) assert_true(['home.mako' in _template.filename for _template in response.template], [_template.filename for _template in response.template]) c.cookies['hueLandingPage'] = 'home' response = c.get('/', follow=True) assert_true(['home.mako' in _template.filename for _template in response.template], [_template.filename for _template in response.template]) c.cookies['hueLandingPage'] = '' response = c.get('/', follow=True) assert_true(['home.mako' in _template.filename for _template in response.template], [_template.filename for _template in response.template]) def test_log_view(): c = make_logged_in_client() URL = reverse(views.log_view) LOG = logging.getLogger(__name__) LOG.warn('une voix m’a réveillé') # UnicodeDecodeError: 'ascii' codec can't decode byte... should not happen response = c.get(URL) assert_equal(200, response.status_code) c = make_logged_in_client() URL = reverse(views.log_view) LOG = logging.getLogger(__name__) LOG.warn('Got response: PK\x03\x04\n\x00\x00\x08\x00\x00\xad\x0cN?\x00\x00\x00\x00') # DjangoUnicodeDecodeError: 'utf8' codec can't decode byte 0xad in position 75: invalid start byte... should not happen response = c.get(URL) assert_equal(200, response.status_code) def test_download_log_view(): c = make_logged_in_client() URL = reverse(views.download_log_view) LOG = logging.getLogger(__name__) LOG.warn(u'une voix m’a réveillé') # UnicodeDecodeError: 'ascii' codec can't decode byte... should not happen response = c.get(URL) assert_equal("application/zip", response.get('Content-Type', '')) def test_dump_config(): c = make_logged_in_client() CANARY = "abracadabra" # Depending on the order of the conf.initialize() in settings, the set_for_testing() are not seen in the global settings variable clear = HIVE_SERVER_HOST.set_for_testing(CANARY) response1 = c.get(reverse('desktop.views.dump_config')) assert_true(CANARY in response1.content, response1.content) response2 = c.get(reverse('desktop.views.dump_config'), dict(private="true")) assert_true(CANARY in response2.content) # There are more private variables... assert_true(len(response1.content) < len(response2.content)) clear() CANARY = "(localhost|127\.0\.0\.1):(50030|50070|50060|50075)" clear = proxy.conf.WHITELIST.set_for_testing(CANARY) response1 = c.get(reverse('desktop.views.dump_config')) assert_true(CANARY in response1.content) clear() # Malformed port per HUE-674 CANARY = "asdfoijaoidfjaosdjffjfjaoojosjfiojdosjoidjfoa" clear = HIVE_SERVER_HOST.set_for_testing(CANARY) response1 = c.get(reverse('desktop.views.dump_config')) assert_true(CANARY in response1.content, response1.content) clear() CANARY = '/tmp/spacé.dat' finish = proxy.conf.WHITELIST.set_for_testing(CANARY) try: response = c.get(reverse('desktop.views.dump_config')) assert_true(CANARY in response.content, response.content) finally: finish() # Not showing some passwords response = c.get(reverse('desktop.views.dump_config')) assert_false('bind_password' in response.content) # Login as someone else client_not_me = make_logged_in_client(username='not_me', is_superuser=False, groupname='test') grant_access("not_me", "test", "desktop") response = client_not_me.get(reverse('desktop.views.dump_config')) assert_true("You must be a superuser" in response.content, response.content) os.environ["HUE_CONF_DIR"] = "/tmp/test_hue_conf_dir" resp = c.get(reverse('desktop.views.dump_config')) del os.environ["HUE_CONF_DIR"] assert_true('/tmp/test_hue_conf_dir' in resp.content, resp) def test_prefs(): c = make_logged_in_client() # Get everything response = c.get('/desktop/prefs/') assert_equal('{}', response.content) # Set and get response = c.get('/desktop/prefs/foo', dict(set="bar")) assert_equal('true', response.content) response = c.get('/desktop/prefs/foo') assert_equal('"bar"', response.content) # Reset (use post this time) c.post('/desktop/prefs/foo', dict(set="baz")) response = c.get('/desktop/prefs/foo') assert_equal('"baz"', response.content) # Check multiple values c.post('/desktop/prefs/elephant', dict(set="room")) response = c.get('/desktop/prefs/') assert_true("baz" in response.content) assert_true("room" in response.content) # Delete everything c.get('/desktop/prefs/elephant', dict(delete="")) c.get('/desktop/prefs/foo', dict(delete="")) response = c.get('/desktop/prefs/') assert_equal('{}', response.content) # Check non-existent value response = c.get('/desktop/prefs/doesNotExist') assert_equal('null', response.content) def test_status_bar(): """ Subs out the status_bar_views registry with temporary examples. Tests handling of errors on view functions. """ backup = views._status_bar_views views._status_bar_views = [] c = make_logged_in_client() views.register_status_bar_view(lambda _: HttpResponse("foo", status=200)) views.register_status_bar_view(lambda _: HttpResponse("bar")) views.register_status_bar_view(lambda _: None) def f(r): raise Exception() views.register_status_bar_view(f) response = c.get("/desktop/status_bar") assert_equal("foobar", response.content) views._status_bar_views = backup def test_paginator(): """ Test that the paginator works with partial list. """ def assert_page(page, data, start, end): assert_equal(page.object_list, data) assert_equal(page.start_index(), start) assert_equal(page.end_index(), end) # First page 1-20 obj = range(20) pgn = Paginator(obj, per_page=20, total=25) assert_page(pgn.page(1), obj, 1, 20) # Second page 21-25 obj = range(5) pgn = Paginator(obj, per_page=20, total=25) assert_page(pgn.page(2), obj, 21, 25) # Handle extra data on first page (22 items on a 20-page) obj = range(22) pgn = Paginator(obj, per_page=20, total=25) assert_page(pgn.page(1), range(20), 1, 20) # Handle extra data on second page (22 items on a 20-page) obj = range(22) pgn = Paginator(obj, per_page=20, total=25) assert_page(pgn.page(2), range(5), 21, 25) # Handle total < len(obj). Only works for QuerySet. obj = query.QuerySet() obj._result_cache = range(10) pgn = Paginator(obj, per_page=10, total=9) assert_page(pgn.page(1), range(10), 1, 10) # Still works with a normal complete list obj = range(25) pgn = Paginator(obj, per_page=20) assert_page(pgn.page(1), range(20), 1, 20) assert_page(pgn.page(2), range(20, 25), 21, 25) def test_thread_dump(): c = make_logged_in_client() response = c.get("/desktop/debug/threads") assert_true("test_thread_dump" in response.content) def test_truncating_model(): class TinyModel(TruncatingModel): short_field = CharField(max_length=10) non_string_field = SmallIntegerField() a = TinyModel() a.short_field = 'a' * 9 # One less than it's max length assert_true(a.short_field == 'a' * 9, 'Short-enough field does not get truncated') a.short_field = 'a' * 11 # One more than it's max_length assert_true(a.short_field == 'a' * 10, 'Too-long field gets truncated') a.non_string_field = 10**10 assert_true(a.non_string_field == 10**10, 'non-string fields are not truncated') def test_error_handling(): raise SkipTest restore_django_debug = desktop.conf.DJANGO_DEBUG_MODE.set_for_testing(False) restore_500_debug = desktop.conf.HTTP_500_DEBUG_MODE.set_for_testing(False) exc_msg = "error_raising_view: Test earráid handling" def error_raising_view(request, *args, **kwargs): raise Exception(exc_msg) def popup_exception_view(request, *args, **kwargs): raise PopupException(exc_msg, title="earráid", detail=exc_msg) # Add an error view error_url_pat = patterns('', url('^500_internal_error$', error_raising_view), url('^popup_exception$', popup_exception_view)) desktop.urls.urlpatterns.extend(error_url_pat) try: def store_exc_info(*args, **kwargs): pass # Disable the test client's exception forwarding c = make_logged_in_client() c.store_exc_info = store_exc_info response = c.get('/500_internal_error') assert_true(any(["500.mako" in _template.filename for _template in response.template])) assert_true('Thank you for your patience' in response.content) assert_true(exc_msg not in response.content) # Now test the 500 handler with backtrace desktop.conf.HTTP_500_DEBUG_MODE.set_for_testing(True) response = c.get('/500_internal_error') assert_equal(response.template.name, 'Technical 500 template') assert_true(exc_msg in response.content) # PopupException response = c.get('/popup_exception') assert_true(any(["popup_error.mako" in _template.filename for _template in response.template])) assert_true(exc_msg in response.content) finally: # Restore the world for i in error_url_pat: desktop.urls.urlpatterns.remove(i) restore_django_debug() restore_500_debug() def test_desktop_permissions(): USERNAME = 'test_core_permissions' GROUPNAME = 'default' c = make_logged_in_client(USERNAME, groupname=GROUPNAME, recreate=True, is_superuser=False) # Access to the basic works assert_equal(200, c.get('/accounts/login/', follow=True).status_code) assert_equal(200, c.get('/accounts/logout', follow=True).status_code) assert_equal(200, c.get('/home', follow=True).status_code) def test_app_permissions(): USERNAME = 'test_app_permissions' GROUPNAME = 'impala_only' c = make_logged_in_client(USERNAME, groupname=GROUPNAME, recreate=True, is_superuser=False) # Reset all perms GroupPermission.objects.filter(group__name=GROUPNAME).delete() # Access to nothing assert_equal(401, c.get('/beeswax', follow=True).status_code) assert_equal(401, c.get('/impala', follow=True).status_code) assert_equal(401, c.get('/hbase', follow=True).status_code) # Add access to beeswax grant_access(USERNAME, GROUPNAME, "beeswax") assert_equal(200, c.get('/beeswax', follow=True).status_code) assert_equal(401, c.get('/impala', follow=True).status_code) assert_equal(401, c.get('/hbase', follow=True).status_code) # Add access to hbase grant_access(USERNAME, GROUPNAME, "hbase") assert_equal(200, c.get('/beeswax', follow=True).status_code) assert_equal(401, c.get('/impala', follow=True).status_code) assert_equal(200, c.get('/hbase', follow=True).status_code) # Reset all perms GroupPermission.objects.filter(group__name=GROUPNAME).delete() assert_equal(401, c.get('/beeswax', follow=True).status_code) assert_equal(401, c.get('/impala', follow=True).status_code) assert_equal(401, c.get('/hbase', follow=True).status_code) # Test only impala perm grant_access(USERNAME, GROUPNAME, "impala") assert_equal(401, c.get('/beeswax', follow=True).status_code) assert_equal(200, c.get('/impala', follow=True).status_code) assert_equal(401, c.get('/hbase', follow=True).status_code) def test_error_handling_failure(): # Change rewrite_user to call has_hue_permission # Try to get filebrowser page # test for default 500 page # Restore rewrite_user import desktop.auth.backend c = make_logged_in_client() restore_django_debug = desktop.conf.DJANGO_DEBUG_MODE.set_for_testing(False) restore_500_debug = desktop.conf.HTTP_500_DEBUG_MODE.set_for_testing(False) original_rewrite_user = desktop.auth.backend.rewrite_user def rewrite_user(user): user = original_rewrite_user(user) delattr(user, 'has_hue_permission') return user original_rewrite_user = desktop.auth.backend.rewrite_user desktop.auth.backend.rewrite_user = rewrite_user try: # Make sure we are showing default 500.html page. # See django.test.client#L246 assert_raises(AttributeError, c.get, reverse('desktop.views.dump_config')) finally: # Restore the world restore_django_debug() restore_500_debug() desktop.auth.backend.rewrite_user = original_rewrite_user def test_404_handling(): view_name = '/the-view-that-is-not-there' c = make_logged_in_client() response = c.get(view_name) assert_true(any(['404.mako' in _template.filename for _template in response.template]), response.template) assert_true('Not Found' in response.content) assert_true(view_name in response.content) class RecordingHandler(logging.Handler): def __init__(self, *args, **kwargs): logging.Handler.__init__(self, *args, **kwargs) self.records = [] def emit(self, r): self.records.append(r) def test_log_event(): c = make_logged_in_client() root = logging.getLogger("desktop.views.log_frontend_event") handler = RecordingHandler() root.addHandler(handler) c.get("/desktop/log_frontend_event?level=info&message=foo") assert_equal("INFO", handler.records[-1].levelname) assert_equal("Untrusted log event from user test: foo", handler.records[-1].message) assert_equal("desktop.views.log_frontend_event", handler.records[-1].name) c.get("/desktop/log_frontend_event?level=error&message=foo2") assert_equal("ERROR", handler.records[-1].levelname) assert_equal("Untrusted log event from user test: foo2", handler.records[-1].message) c.get("/desktop/log_frontend_event?message=foo3") assert_equal("INFO", handler.records[-1].levelname) assert_equal("Untrusted log event from user test: foo3", handler.records[-1].message) c.post("/desktop/log_frontend_event", { "message": "01234567" * 1024}) assert_equal("INFO", handler.records[-1].levelname) assert_equal("Untrusted log event from user test: " + "01234567"*(1024/8), handler.records[-1].message) root.removeHandler(handler) def test_validate_path(): reset = desktop.conf.SSL_PRIVATE_KEY.set_for_testing('/') assert_equal([], validate_path(desktop.conf.SSL_PRIVATE_KEY, is_dir=True)) reset() reset = desktop.conf.SSL_PRIVATE_KEY.set_for_testing('/tmm/does_not_exist') assert_not_equal([], validate_path(desktop.conf.SSL_PRIVATE_KEY, is_dir=True)) reset() @attr('requires_hadoop') def test_config_check(): reset = ( desktop.conf.SECRET_KEY.set_for_testing(''), desktop.conf.SSL_CERTIFICATE.set_for_testing('foobar'), desktop.conf.SSL_PRIVATE_KEY.set_for_testing(''), desktop.conf.DEFAULT_SITE_ENCODING.set_for_testing('klingon') ) try: cli = make_logged_in_client() resp = cli.get('/desktop/debug/check_config') assert_true('Secret key should be configured' in resp.content, resp) assert_true('desktop.ssl_certificate' in resp.content, resp) assert_true('Path does not exist' in resp.content, resp) assert_true('SSL private key file should be set' in resp.content, resp) assert_true('klingon' in resp.content, resp) assert_true('Encoding not supported' in resp.content, resp) # Set HUE_CONF_DIR and make sure check_config returns appropriate conf os.environ["HUE_CONF_DIR"] = "/tmp/test_hue_conf_dir" resp = cli.get('/desktop/debug/check_config') del os.environ["HUE_CONF_DIR"] assert_true('/tmp/test_hue_conf_dir' in resp.content, resp) finally: for old_conf in reset: old_conf() def test_last_access_time(): c = make_logged_in_client(username="access_test") c.post('/accounts/login/') login = desktop.auth.views.get_current_users() before_access_time = time.time() response = c.get('/home') after_access_time = time.time() access = desktop.auth.views.get_current_users() user = response.context['user'] login_time = login[user]['time'] access_time = access[user]['time'] # Check that 'last_access_time' is later than login time assert_true(login_time < access_time) # Check that 'last_access_time' is in between the timestamps before and after the last access path assert_true(before_access_time < access_time) assert_true(access_time < after_access_time) def test_ui_customizations(): custom_banner = 'test ui customization' reset = ( desktop.conf.CUSTOM.BANNER_TOP_HTML.set_for_testing(custom_banner), ) try: c = make_logged_in_client() resp = c.get('/about', follow=True) assert_true(custom_banner in resp.content, resp) finally: for old_conf in reset: old_conf() @attr('requires_hadoop') def test_check_config_ajax(): c = make_logged_in_client() response = c.get(reverse(check_config)) assert_true("misconfiguration" in response.content, response.content)
[]
[]
[ "HUE_CONF_DIR" ]
[]
["HUE_CONF_DIR"]
python
1
0
cmd/uptermd/main.go
package main import ( "os" "github.com/heroku/rollrus" "github.com/jingweno/upterm/cmd/uptermd/internal/command" log "github.com/sirupsen/logrus" ) func main() { logger := log.New() token := os.Getenv("ROLLBAR_ACCESS_TOKEN") if token != "" { logger.Info("Using Rollbar for error reporting") defer rollrus.ReportPanic(token, "uptermd.upterm.dev") logger.AddHook(rollrus.NewHook(token, "uptermd.upterm.dev")) } if err := command.Root(logger).Execute(); err != nil { logger.Fatal(err) } }
[ "\"ROLLBAR_ACCESS_TOKEN\"" ]
[]
[ "ROLLBAR_ACCESS_TOKEN" ]
[]
["ROLLBAR_ACCESS_TOKEN"]
go
1
0
example_test.go
package sm2lib import ( "bytes" "encoding/base64" "os" "testing" ) var ( privateKey PrivateKey publicKey PublicKey privateKeyPWD = []byte("123456") privateBase64File = "./private_base64.key" privateHexFile = "./private_hex.key" publicBase64File = "./public_base64.key" publicHexFile = "./public_hex.key" plaintext = "abc" ) // 测试前准备 func TestMain(m *testing.M) { // 开始运行测试用例 m.Run() os.Exit(0) } // 测试入口 func TestAll(t *testing.T) { // 私钥转换 t.Run("TestPrivateKeyNew", TestPrivateKey_New) t.Run("TestPrivateKey_GetPublicKey", TestPrivateKey_GetPublicKey) t.Run("TestPrivateKey_Raw", TestPrivateKey_Raw) t.Run("TestPrivateKey_RawBytes", TestPrivateKey_RawBytes) t.Run("TestPrivateKey_Base64", TestPrivateKey_Base64) t.Run("TestPrivateKey_Base64File", TestPrivateKey_Base64File) t.Run("TestPrivateKey_Hex", TestPrivateKey_Hex) t.Run("TestPrivateKey_File", TestPrivateKey_HexFile) t.Run("TestPublicKey_Raw", TestPublicKey_Raw) t.Run("TestPublicKey_RawBytes", TestPublicKey_RawBytes) t.Run("TestPublicKey_Base64", TestPublicKey_Base64) t.Run("TestPublicKey_Base64File", TestPublicKey_Base64File) t.Run("TestPublicKey_Hex", TestPublicKey_Hex) t.Run("TestPublicKey_HexFile", TestPublicKey_HexFile) t.Run("TestSign", TestSign) t.Run("TestSignBase64", TestSignBase64) t.Run("TestSignHex", TestSignHex) t.Run("TestEncrypt", TestEncrypt) t.Run("TestEncryptASN1", TestEncryptASN1) t.Run("TestEncryptBase64", TestEncryptBase64) t.Run("TestEncryptASN1Base64", TestEncryptASN1Base64) t.Run("TestEncryptHex", TestEncryptHex) t.Run("TestEncryptASN1Hex", TestEncryptASN1Hex) } func TestPrivateKey_New(t *testing.T) { err := privateKey.New() if err != nil { t.Error(err) return } } func TestPrivateKey_Raw(t *testing.T) { err := privateKey.FromRaw(privateKey.ToRaw()) if err != nil { t.Error(err) return } } func TestPrivateKey_RawBytes(t *testing.T) { src, err := privateKey.ToRawBytes(privateKeyPWD) if err != nil { t.Error(err) return } err = privateKey.FromRawBytes(src, privateKeyPWD) if err != nil { t.Error(err) return } } func TestPrivateKey_Base64(t *testing.T) { src, err := privateKey.ToBase64(base64.RawURLEncoding, privateKeyPWD) if err != nil { t.Error(err) return } err = privateKey.FromBase64(base64.RawURLEncoding, src, privateKeyPWD) if err != nil { t.Error(err) return } } func TestPrivateKey_Base64File(t *testing.T) { err := privateKey.ToBase64File(base64.RawURLEncoding, privateBase64File, privateKeyPWD, 0600) if err != nil { t.Error(err) return } err = privateKey.FromBase64File(base64.RawURLEncoding, privateBase64File, privateKeyPWD) if err != nil { t.Error(err) return } } func TestPrivateKey_Hex(t *testing.T) { src, err := privateKey.ToHex(privateKeyPWD) if err != nil { t.Error(err) return } err = privateKey.FromHex(src, privateKeyPWD) if err != nil { t.Error(err) return } } func TestPrivateKey_HexFile(t *testing.T) { err := privateKey.ToHexFile(privateHexFile, privateKeyPWD, 0600) if err != nil { t.Error(err) return } err = privateKey.FromHexFile(privateHexFile, privateKeyPWD) if err != nil { t.Error(err) return } } func TestPrivateKey_GetPublicKey(t *testing.T) { publicKey = privateKey.GetPublicKey() } func TestPublicKey_Raw(t *testing.T) { publicKey.FromRaw(publicKey.ToRaw()) } func TestPublicKey_RawBytes(t *testing.T) { data, err := publicKey.ToRawBytes() if err != nil { t.Error(err) return } err = publicKey.FromRawBytes(data) if err != nil { t.Error(err) return } } func TestPublicKey_Base64(t *testing.T) { data, err := publicKey.ToBase64(base64.RawURLEncoding) if err != nil { t.Error(err) return } err = publicKey.FromBase64(base64.RawURLEncoding, data) if err != nil { t.Error(err) return } } func TestPublicKey_Base64File(t *testing.T) { err := publicKey.ToBase64File(base64.RawURLEncoding, publicBase64File, 0600) if err != nil { t.Error(err) return } err = publicKey.FromBase64File(base64.RawURLEncoding, publicBase64File) if err != nil { t.Error(err) return } } func TestPublicKey_Hex(t *testing.T) { data, err := publicKey.ToHex() if err != nil { t.Error(err) return } err = publicKey.FromHex(data) if err != nil { t.Error(err) return } } func TestPublicKey_HexFile(t *testing.T) { err := publicKey.ToHexFile(publicHexFile, 0600) if err != nil { t.Error(err) return } err = publicKey.FromHexFile(publicHexFile) if err != nil { t.Error(err) return } } func TestEncrypt(t *testing.T) { cipher, err := publicKey.Encrypt([]byte(plaintext)) if err != nil { t.Error(err) return } plain, err2 := privateKey.Decrypt(cipher) if err2 != nil { t.Error(err2) return } if !bytes.Equal(plain, []byte(plaintext)) { t.Error("加解密结果不同") } } func TestEncryptASN1(t *testing.T) { cipher, err := publicKey.EncryptASN1([]byte(plaintext)) if err != nil { t.Error(err) return } plain, err2 := privateKey.DecryptASN1(cipher) if err2 != nil { t.Error(err2) return } if !bytes.Equal(plain, []byte(plaintext)) { t.Error("加解密结果不同") } } func TestEncryptBase64(t *testing.T) { cipher, err := publicKey.EncryptToBase64(base64.RawURLEncoding, []byte(plaintext)) if err != nil { t.Error(err) return } plain, err2 := privateKey.DecryptFromBase64(base64.RawURLEncoding, cipher) if err2 != nil { t.Error(err2) return } if !bytes.Equal(plain, []byte(plaintext)) { t.Error("加解密结果不同") } } func TestEncryptASN1Base64(t *testing.T) { cipher, err := publicKey.EncryptASN1ToBase64(base64.RawURLEncoding, []byte(plaintext)) if err != nil { t.Error(err) return } plain, err2 := privateKey.DecryptASN1FromBase64(base64.RawURLEncoding, cipher) if err2 != nil { t.Error(err2) return } if !bytes.Equal(plain, []byte(plaintext)) { t.Error("加解密结果不同") } } func TestEncryptHex(t *testing.T) { cipher, err := publicKey.EncryptToHex([]byte(plaintext)) if err != nil { t.Error(err) return } plain, err2 := privateKey.DecryptFromHex(cipher) if err2 != nil { t.Error(err2) return } if !bytes.Equal(plain, []byte(plaintext)) { t.Error("加解密结果不同") } } func TestEncryptASN1Hex(t *testing.T) { cipher, err := publicKey.EncryptASN1ToHex([]byte(plaintext)) if err != nil { t.Error(err) return } plain, err2 := privateKey.DecryptASN1FromHex(cipher) if err2 != nil { t.Error(err2) return } if !bytes.Equal(plain, []byte(plaintext)) { t.Error("加解密结果不同") } } func TestSign(t *testing.T) { sign, err := privateKey.Sign([]byte(plaintext)) if err != nil { t.Error(err) return } if !publicKey.Verify([]byte(plaintext), sign) { t.Error("验签失败") return } } func TestSignBase64(t *testing.T) { signBase64, err := privateKey.SignToBase64(base64.RawURLEncoding, []byte(plaintext)) if err != nil { t.Error(err) return } sign, err2 := Base64Decode(base64.RawURLEncoding, signBase64) if err2 != nil { t.Error(err2) return } if !publicKey.Verify([]byte(plaintext), sign) { t.Error("验签失败") return } } func TestSignHex(t *testing.T) { signHex, err := privateKey.SignToHex([]byte(plaintext)) if err != nil { t.Error(err) return } sign, err2 := HexDecode(signHex) if err2 != nil { t.Error(err2) return } if !publicKey.Verify([]byte(plaintext), sign) { t.Error("验签失败") return } }
[]
[]
[]
[]
[]
go
null
null
null
src/deploy/main.go
package main import ( "gopkg.in/yaml.v2" "io/ioutil" "fmt" "os" "os/exec" "flag" "regexp" "path" ) const configFile = "config.yml" const remoteDestinationPattern = "(.+)@(.+):(.+)" const hostsFile = "~/.ssh/known_hosts" type Config struct { Projects map[string]Project } type Project struct { Repository string Branch string Destinations []string } type Destination struct { Full string Auth string Host string Path string Filename string } func checkError(err error) { if err != nil { panic(err) } } func getConfig(path string) Config { yamlFile, err := ioutil.ReadFile(path) checkError(err) var config Config err = yaml.Unmarshal(yamlFile, &config) checkError(err) return config } func updateCode(codePath string, definition Project) { if _, err := os.Stat(codePath); err == nil { out, err1 := exec.Command("git", "-C", codePath, "reset", "--hard").Output() checkError(err1) fmt.Print("-- " + string(out)) out, err2 := exec.Command("git", "-C", codePath, "pull", "--rebase").Output() checkError(err2) fmt.Print("-- " + string(out)) } else { err := exec.Command("git", "clone", "-b", definition.Branch, definition.Repository, codePath).Run() checkError(err) } } func getDependencies() []byte { out, err := exec.Command("go", "get", "-d").Output() checkError(err) return out } func runTests() []byte { out, err := exec.Command("go", "test").Output() checkError(err) return out } func install() { err := exec.Command("go", "install").Run() checkError(err) } func build(binaryPath string) { err := exec.Command("go", "build", "-o", binaryPath).Run() checkError(err) } func getDestination(destinationString string) Destination { match, _ := regexp.MatchString(remoteDestinationPattern, destinationString) if !match { fmt.Printf( "\nInvalid destination pattern: %s\nExpected: %s\n\n", destinationString, remoteDestinationPattern, ) os.Exit(0) } r, _ := regexp.Compile(remoteDestinationPattern) segments := r.FindStringSubmatch(destinationString) return Destination { Full: destinationString, Auth: segments[1], Host: segments[2], Path: path.Dir(segments[3]), Filename: path.Base(segments[3]), } } func sync(localBinaryPath string, destination Destination) { exec.Command("sh", "-c", "ssh-keyscan " + destination.Host + " >> " + hostsFile).Run() rsync := "rsync -aq --rsync-path='mkdir -p " + destination.Path + " && rsync' " + localBinaryPath + " " + destination.Full exec.Command("sh", "-c", rsync).Run() } func main() { requiredProjectPointer := flag.String("p", "", "-p project_name") flag.Parse() goPath := os.Getenv("GOPATH") config := getConfig(goPath + "/" + configFile) requiredProject := *requiredProjectPointer if len(requiredProject) > 1{ _, isset := config.Projects[requiredProject] if !isset { fmt.Printf("Project %s not defined\n.", requiredProject) os.Exit(0) } } var result []byte; for project, definition := range config.Projects { if len(requiredProject) > 0 && project != requiredProject { continue } fmt.Printf("\nProject: %s\n", project) codePath := goPath + "/src/" + project fmt.Println("- Update code") updateCode(codePath, definition) os.Chdir(codePath) fmt.Println("- Dependencies") result = getDependencies() if out := string(result); len(out) > 0 { fmt.Println(string(result)) } install() fmt.Println("- Run tests") result = runTests() fmt.Print("-- " + string(result)) fmt.Println("- Build") localBinaryPath := goPath + "/bin/" + project build(localBinaryPath) fmt.Println("- Sync") for _, destination := range definition.Destinations { fmt.Printf("-- %s\n", destination) sync(localBinaryPath, getDestination(destination)) } fmt.Println() } }
[ "\"GOPATH\"" ]
[]
[ "GOPATH" ]
[]
["GOPATH"]
go
1
0
server/constants.go
package main import ( "os" ) var BackendOrigin = os.Getenv("BACKEND_ORIGIN") var MySQLUsernameCredential = os.Getenv("MYSQL_USERNAME_CREDENTIAL") var MySQLPasswordCredential = os.Getenv("MYSQL_PASSWORD_CREDENTIAL") var MySQLDatabaseCredential = os.Getenv("MYSQL_DATABASE_CREDENTIAL")
[ "\"BACKEND_ORIGIN\"", "\"MYSQL_USERNAME_CREDENTIAL\"", "\"MYSQL_PASSWORD_CREDENTIAL\"", "\"MYSQL_DATABASE_CREDENTIAL\"" ]
[]
[ "MYSQL_PASSWORD_CREDENTIAL", "BACKEND_ORIGIN", "MYSQL_DATABASE_CREDENTIAL", "MYSQL_USERNAME_CREDENTIAL" ]
[]
["MYSQL_PASSWORD_CREDENTIAL", "BACKEND_ORIGIN", "MYSQL_DATABASE_CREDENTIAL", "MYSQL_USERNAME_CREDENTIAL"]
go
4
0
utils/convert.py
import argparse import os import numpy as np import librosa import scipy.io.wavfile as scwav import scipy.signal as scisig import pylab import numpy.matlib as npmat import utils.preprocess as preproc from utils.helper import smooth, generate_interpolation #from nn_models.model_embedding_wasserstein import VariationalCycleGAN as VCGAN_embedding from nn_models.model_pitch_mfc_discriminate_wasserstein import VariationalCycleGAN as VCGAN_embedding from nn_models.model_separate_discriminate_id import VariationalCycleGAN as VCGAN from encoder_decoder import AE #from model_pair_lvi import CycleGAN as CycleGAN_f0s num_mfcc = 23 num_pitch = 1 sampling_rate = 16000 frame_period = 5.0 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" def conversion(model_dir=None, model_name=None, audio_file=None, data_dir=None, conversion_direction=None, output_dir=None, embedding=True, only_energy=False): if embedding: ae_model = AE(dim_mfc=num_mfcc) ae_model.load(filename='./model/AE_cmu_pre_trained_noise_std_1.ckpt') model = VCGAN_embedding(dim_mfc=1, dim_pitch=1, mode='test') model.load(filepath=os.path.join(model_dir, model_name)) else: model = VCGAN(dim_mfc=23, dim_pitch=1, mode='test') model.load(filepath=os.path.join(model_dir, model_name)) if audio_file is not None: wav, sr = librosa.load(audio_file, sr=sampling_rate, mono=True) assert (sr==sampling_rate) wav = preproc.wav_padding(wav=wav, sr=sampling_rate, \ frame_period=frame_period, multiple=4) f0, sp, ap = preproc.world_decompose(wav=wav, \ fs=sampling_rate, frame_period=frame_period) coded_sp = preproc.world_encode_spectral_envelope(sp=sp, \ fs=sampling_rate, dim=num_mfcc) coded_sp = np.expand_dims(coded_sp, axis=0) coded_sp = np.transpose(coded_sp, (0,2,1)) if embedding: sp_embedding = ae_model.get_embedding(mfc_features=coded_sp) coded_sp = sp_embedding f0 = scisig.medfilt(f0, kernel_size=3) z_idx = np.where(f0<10.0)[0] f0 = generate_interpolation(f0) f0 = smooth(f0, window_len=13) f0 = np.reshape(f0, (1,1,-1)) f0_converted, coded_sp_converted = model.test(input_pitch=f0, input_mfc=coded_sp, direction=conversion_direction) if embedding: coded_sp_converted = ae_model.get_mfcc(embeddings=coded_sp_converted) coded_sp_converted = np.asarray(np.transpose(coded_sp_converted[0]), np.float64) coded_sp_converted = np.ascontiguousarray(coded_sp_converted) f0_converted = np.asarray(np.reshape(f0_converted[0], (-1,)), np.float64) f0_converted = np.ascontiguousarray(f0_converted) f0_converted[z_idx] = 0 decoded_sp_converted = preproc.world_decode_spectral_envelope(coded_sp=coded_sp_converted, fs=sampling_rate) # Normalization of converted features decoded_sp_converted = decoded_sp_converted / np.max(decoded_sp_converted) wav_transformed = preproc.world_speech_synthesis(f0=f0_converted, decoded_sp=decoded_sp_converted, ap=ap, fs=sampling_rate, frame_period=frame_period) scwav.write(os.path.join('/home/ravi/Desktop', os.path.basename(audio_file)), sampling_rate, wav_transformed) print('Processed: ' + audio_file) else: os.makedirs(output_dir, exist_ok=True) for file in os.listdir(data_dir): filepath = os.path.join(data_dir, file) wav, sr = librosa.load(filepath, sr=sampling_rate, mono=True) wav = (wav - np.min(wav)) / (np.max(wav) - np.min(wav)) assert (sr==sampling_rate) wav = preproc.wav_padding(wav=wav, sr=sampling_rate, \ frame_period=frame_period, multiple=4) f0, sp, ap = preproc.world_decompose(wav=wav, \ fs=sampling_rate, frame_period=frame_period) coded_sp = preproc.world_encode_spectral_envelope(sp=sp, \ fs=sampling_rate, dim=num_mfcc) coded_sp = np.expand_dims(coded_sp, axis=0) coded_sp = np.transpose(coded_sp, (0,2,1)) if embedding: sp_embedding = ae_model.get_embedding(mfc_features=coded_sp) else: sp_embedding = coded_sp f0 = scisig.medfilt(f0, kernel_size=3) z_idx = np.where(f0<10.0)[0] f0 = generate_interpolation(f0) f0 = smooth(f0, window_len=13) f0 = np.reshape(f0, (1,1,-1)) f0_converted, coded_sp_converted = model.test(input_pitch=f0, input_mfc=sp_embedding, direction=conversion_direction) # f0_converted = cgan_f0.test(input_pitch=f0, input_mfc=coded_sp, # direction='A2B') if embedding: coded_sp_converted = ae_model.get_mfcc(embeddings=coded_sp_converted) coded_sp_converted = np.asarray(np.transpose(np.squeeze(coded_sp_converted)), np.float64) coded_sp_converted = np.ascontiguousarray(coded_sp_converted) f0_converted = np.asarray(np.reshape(f0_converted[0], (-1,)), np.float64) f0_converted = np.ascontiguousarray(f0_converted) f0_converted[z_idx] = 0 # Mixing the mfcc features print(np.min(coded_sp_converted), np.min(coded_sp)) if embedding and not only_energy: coded_sp_converted = 0.6*coded_sp_converted + 0.4*np.transpose(np.squeeze(coded_sp)) elif embedding and only_energy: energy_contour_converted = np.sum(coded_sp_converted**2, axis=1, keepdims=True) energy_contour = np.sum(np.squeeze(coded_sp).T**2, axis=1, keepdims=True) factor = (energy_contour_converted / energy_contour)**(0.5) coded_sp_converted = np.squeeze(coded_sp).T * (npmat.repmat(factor, 1,coded_sp.shape[1])) # Pyworld decoding decoded_sp_converted = preproc.world_decode_spectral_envelope(coded_sp=coded_sp_converted, fs=sampling_rate) # Normalization of converted features # decoded_sp_converted = decoded_sp_converted / np.max(decoded_sp_converted) wav_transformed = preproc.world_speech_synthesis(f0=f0_converted, decoded_sp=decoded_sp_converted, ap=ap, fs=sampling_rate, frame_period=frame_period) wav_transformed = (wav_transformed - np.min(wav_transformed)) \ / (np.max(wav_transformed) - np.min(wav_transformed)) wav_transformed = wav_transformed - np.mean(wav_transformed) scwav.write(os.path.join(output_dir, os.path.basename(file)), 16000, wav_transformed) print('Processed: ' + file) if __name__ == '__main__': parser = argparse.ArgumentParser(description = 'Convert Emotion using pre-trained VariationalCycleGAN model.') model_dir_default = './model/neu-ang/lp_1e-05_lm_0.1_lmo_1e-06_lrg_2e-06_lrd_1e-07_li_0.05_pre_trained_pitch_mfc_discriminate_wasserstein_all_spk' model_name_default = 'neu-ang_450.ckpt' data_dir_default = 'data/evaluation/neu-ang/test/neutral' conversion_direction_default = 'A2B' output_dir_default = '/home/ravi/Desktop/AE_wasserstein_energy' audio_file_default = None parser.add_argument('--model_dir', type = str, help='Directory for the pre-trained model.', default=model_dir_default) parser.add_argument('--model_name', type = str, help='Filename for the pre-trained model.', default=model_name_default) parser.add_argument('--data_dir', type=str, help='Directory for the voices for conversion.', default=data_dir_default) parser.add_argument('--conversion_direction', type=str, help='Conversion direction for VCGAN, A2B or B2A', default=conversion_direction_default) parser.add_argument('--output_dir', type=str, help='Directory for the converted voices.', default=output_dir_default) parser.add_argument('--audio_file', type=str, help='convert a single audio file', default=audio_file_default) argv = parser.parse_args() model_dir = argv.model_dir model_name = argv.model_name data_dir = argv.data_dir conversion_direction = argv.conversion_direction output_dir = argv.output_dir audio_file = argv.audio_file conversion(model_dir=model_dir, model_name=model_name, audio_file=audio_file, data_dir=data_dir, conversion_direction=conversion_direction, output_dir=output_dir, embedding=True, only_energy=True)
[]
[]
[ "TF_CPP_MIN_LOG_LEVEL" ]
[]
["TF_CPP_MIN_LOG_LEVEL"]
python
1
0
acceptance/acceptance_test.go
// +build acceptance package acceptance import ( "bytes" "context" "crypto/sha256" "encoding/hex" "encoding/json" "fmt" "io" "io/ioutil" "math/rand" "os" "path/filepath" "regexp" "runtime" "strings" "testing" "time" dockertypes "github.com/docker/docker/api/types" "github.com/docker/docker/client" "github.com/ghodss/yaml" "github.com/google/go-containerregistry/pkg/name" "github.com/pelletier/go-toml" "github.com/sclevine/spec" "github.com/sclevine/spec/report" "github.com/buildpacks/pack/acceptance/assertions" "github.com/buildpacks/pack/acceptance/buildpacks" "github.com/buildpacks/pack/acceptance/config" "github.com/buildpacks/pack/acceptance/invoke" "github.com/buildpacks/pack/acceptance/managers" pubcfg "github.com/buildpacks/pack/config" "github.com/buildpacks/pack/internal/cache" internalConfig "github.com/buildpacks/pack/internal/config" "github.com/buildpacks/pack/internal/style" "github.com/buildpacks/pack/pkg/archive" h "github.com/buildpacks/pack/testhelpers" ) const ( runImage = "pack-test/run" buildImage = "pack-test/build" ) var ( dockerCli client.CommonAPIClient registryConfig *h.TestRegistryConfig suiteManager *SuiteManager imageManager managers.ImageManager assertImage assertions.ImageAssertionManager ) func TestAcceptance(t *testing.T) { var err error h.RequireDocker(t) rand.Seed(time.Now().UTC().UnixNano()) assert := h.NewAssertionManager(t) dockerCli, err = client.NewClientWithOpts(client.FromEnv, client.WithVersion("1.38")) assert.Nil(err) imageManager = managers.NewImageManager(t, dockerCli) registryConfig = h.RunRegistry(t) defer registryConfig.RmRegistry(t) assertImage = assertions.NewImageAssertionManager(t, imageManager, registryConfig) inputConfigManager, err := config.NewInputConfigurationManager() assert.Nil(err) assetsConfig := config.ConvergedAssetManager(t, assert, inputConfigManager) suiteManager = &SuiteManager{out: t.Logf} suite := spec.New("acceptance suite", spec.Report(report.Terminal{})) if inputConfigManager.Combinations().IncludesCurrentSubjectPack() { suite("p_current", func(t *testing.T, when spec.G, it spec.S) { testWithoutSpecificBuilderRequirement( t, when, it, assetsConfig.NewPackAsset(config.Current), ) }, spec.Report(report.Terminal{})) } for _, combo := range inputConfigManager.Combinations() { t.Logf(`setting up run combination %s: %s`, style.Symbol(combo.String()), combo.Describe(assetsConfig), ) suite(combo.String(), func(t *testing.T, when spec.G, it spec.S) { testAcceptance( t, when, it, assetsConfig.NewPackAsset(combo.Pack), assetsConfig.NewPackAsset(combo.PackCreateBuilder), assetsConfig.NewLifecycleAsset(combo.Lifecycle), ) }, spec.Report(report.Terminal{})) } suite.Run(t) assert.Nil(suiteManager.CleanUp()) } // These tests either (a) do not require a builder or (b) do not require a specific builder to be provided // in order to test compatibility. // They should only be run against the "current" (i.e., main) version of pack. func testWithoutSpecificBuilderRequirement( t *testing.T, when spec.G, it spec.S, packConfig config.PackAsset, ) { var ( pack *invoke.PackInvoker assert = h.NewAssertionManager(t) buildpackManager buildpacks.BuildpackManager ) it.Before(func() { pack = invoke.NewPackInvoker(t, assert, packConfig, registryConfig.DockerConfigDir) pack.EnableExperimental() buildpackManager = buildpacks.NewBuildpackManager(t, assert) }) it.After(func() { pack.Cleanup() }) when("invalid subcommand", func() { it("prints usage", func() { output, err := pack.Run("some-bad-command") assert.NotNil(err) assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsCommandUnknown("some-bad-command") assertOutput.IncludesUsagePrompt() }) }) when("build with default builders not set", func() { it("informs the user", func() { output, err := pack.Run( "build", "some/image", "-p", filepath.Join("testdata", "mock_app"), ) assert.NotNil(err) assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.IncludesMessageToSetDefaultBuilder() assertOutput.IncludesPrefixedGoogleBuilder() assertOutput.IncludesPrefixedHerokuBuilders() assertOutput.IncludesPrefixedPaketoBuilders() }) }) when("buildpack", func() { when("package", func() { var ( tmpDir string buildpackManager buildpacks.BuildpackManager simplePackageConfigFixtureName = "package.toml" ) it.Before(func() { var err error tmpDir, err = ioutil.TempDir("", "buildpack-package-tests") assert.Nil(err) buildpackManager = buildpacks.NewBuildpackManager(t, assert) buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.SimpleLayersParent, buildpacks.SimpleLayers) }) it.After(func() { assert.Nil(os.RemoveAll(tmpDir)) }) generateAggregatePackageToml := func(buildpackURI, nestedPackageName, os string) string { t.Helper() packageTomlFile, err := ioutil.TempFile(tmpDir, "package_aggregate-*.toml") assert.Nil(err) pack.FixtureManager().TemplateFixtureToFile( "package_aggregate.toml", packageTomlFile, map[string]interface{}{ "BuildpackURI": buildpackURI, "PackageName": nestedPackageName, "OS": os, }, ) assert.Nil(packageTomlFile.Close()) return packageTomlFile.Name() } when("no --format is provided", func() { it("creates the package as image", func() { packageName := "test/package-" + h.RandString(10) packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS()) output := pack.RunSuccessfully("buildpack", "package", packageName, "-c", packageTomlPath) assertions.NewOutputAssertionManager(t, output).ReportsPackageCreation(packageName) defer imageManager.CleanupImages(packageName) assertImage.ExistsLocally(packageName) }) }) when("--format image", func() { it("creates the package", func() { t.Log("package w/ only buildpacks") nestedPackageName := "test/package-" + h.RandString(10) packageName := "test/package-" + h.RandString(10) packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS()) aggregatePackageToml := generateAggregatePackageToml("simple-layers-parent-buildpack.tgz", nestedPackageName, imageManager.HostOS()) packageBuildpack := buildpacks.NewPackageImage( t, pack, packageName, aggregatePackageToml, buildpacks.WithRequiredBuildpacks( buildpacks.SimpleLayersParent, buildpacks.NewPackageImage( t, pack, nestedPackageName, packageTomlPath, buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers), ), ), ) buildpackManager.PrepareBuildpacks(tmpDir, packageBuildpack) defer imageManager.CleanupImages(nestedPackageName, packageName) assertImage.ExistsLocally(nestedPackageName) assertImage.ExistsLocally(packageName) }) when("--publish", func() { it("publishes image to registry", func() { packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS()) nestedPackageName := registryConfig.RepoName("test/package-" + h.RandString(10)) nestedPackage := buildpacks.NewPackageImage( t, pack, nestedPackageName, packageTomlPath, buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers), buildpacks.WithPublish(), ) buildpackManager.PrepareBuildpacks(tmpDir, nestedPackage) aggregatePackageToml := generateAggregatePackageToml("simple-layers-parent-buildpack.tgz", nestedPackageName, imageManager.HostOS()) packageName := registryConfig.RepoName("test/package-" + h.RandString(10)) output := pack.RunSuccessfully( "buildpack", "package", packageName, "-c", aggregatePackageToml, "--publish", ) defer imageManager.CleanupImages(packageName) assertions.NewOutputAssertionManager(t, output).ReportsPackagePublished(packageName) assertImage.NotExistsLocally(packageName) assertImage.CanBePulledFromRegistry(packageName) }) }) when("--pull-policy=never", func() { it("should use local image", func() { packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS()) nestedPackageName := "test/package-" + h.RandString(10) nestedPackage := buildpacks.NewPackageImage( t, pack, nestedPackageName, packageTomlPath, buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers), ) buildpackManager.PrepareBuildpacks(tmpDir, nestedPackage) defer imageManager.CleanupImages(nestedPackageName) aggregatePackageToml := generateAggregatePackageToml("simple-layers-parent-buildpack.tgz", nestedPackageName, imageManager.HostOS()) packageName := registryConfig.RepoName("test/package-" + h.RandString(10)) defer imageManager.CleanupImages(packageName) pack.JustRunSuccessfully( "buildpack", "package", packageName, "-c", aggregatePackageToml, "--pull-policy", pubcfg.PullNever.String()) assertImage.ExistsLocally(packageName) }) it("should not pull image from registry", func() { packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS()) nestedPackageName := registryConfig.RepoName("test/package-" + h.RandString(10)) nestedPackage := buildpacks.NewPackageImage( t, pack, nestedPackageName, packageTomlPath, buildpacks.WithPublish(), buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers), ) buildpackManager.PrepareBuildpacks(tmpDir, nestedPackage) aggregatePackageToml := generateAggregatePackageToml("simple-layers-parent-buildpack.tgz", nestedPackageName, imageManager.HostOS()) packageName := registryConfig.RepoName("test/package-" + h.RandString(10)) output, err := pack.Run( "buildpack", "package", packageName, "-c", aggregatePackageToml, "--pull-policy", pubcfg.PullNever.String(), ) assert.NotNil(err) assertions.NewOutputAssertionManager(t, output).ReportsImageNotExistingOnDaemon(nestedPackageName) }) }) }) when("--format file", func() { when("the file extension is .cnb", func() { it("creates the package with the same extension", func() { packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS()) destinationFile := filepath.Join(tmpDir, "package.cnb") output := pack.RunSuccessfully( "buildpack", "package", destinationFile, "--format", "file", "-c", packageTomlPath, ) assertions.NewOutputAssertionManager(t, output).ReportsPackageCreation(destinationFile) h.AssertTarball(t, destinationFile) }) }) when("the file extension is empty", func() { it("creates the package with a .cnb extension", func() { packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS()) destinationFile := filepath.Join(tmpDir, "package") expectedFile := filepath.Join(tmpDir, "package.cnb") output := pack.RunSuccessfully( "buildpack", "package", destinationFile, "--format", "file", "-c", packageTomlPath, ) assertions.NewOutputAssertionManager(t, output).ReportsPackageCreation(expectedFile) h.AssertTarball(t, expectedFile) }) }) when("the file extension is not .cnb", func() { it("creates the package with the given extension but shows a warning", func() { packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS()) destinationFile := filepath.Join(tmpDir, "package.tar.gz") output := pack.RunSuccessfully( "buildpack", "package", destinationFile, "--format", "file", "-c", packageTomlPath, ) assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsPackageCreation(destinationFile) assertOutput.ReportsInvalidExtension(".gz") h.AssertTarball(t, destinationFile) }) }) }) when("package.toml is invalid", func() { it("displays an error", func() { output, err := pack.Run( "buildpack", "package", "some-package", "-c", pack.FixtureManager().FixtureLocation("invalid_package.toml"), ) assert.NotNil(err) assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsReadingConfig() }) }) }) when("inspect", func() { var tmpDir string it.Before(func() { var err error tmpDir, err = ioutil.TempDir("", "buildpack-inspect-tests") assert.Nil(err) }) it.After(func() { assert.Succeeds(os.RemoveAll(tmpDir)) }) when("buildpack archive", func() { it("succeeds", func() { packageFileLocation := filepath.Join( tmpDir, fmt.Sprintf("buildpack-%s.cnb", h.RandString(8)), ) packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, "package_for_build_cmd.toml", imageManager.HostOS()) packageFile := buildpacks.NewPackageFile( t, pack, packageFileLocation, packageTomlPath, buildpacks.WithRequiredBuildpacks( buildpacks.FolderSimpleLayersParent, buildpacks.FolderSimpleLayers, ), ) buildpackManager.PrepareBuildpacks(tmpDir, packageFile) expectedOutput := pack.FixtureManager().TemplateFixture( "inspect_buildpack_output.txt", map[string]interface{}{ "buildpack_source": "LOCAL ARCHIVE", "buildpack_name": packageFileLocation, }, ) output := pack.RunSuccessfully("buildpack", "inspect", packageFileLocation) assert.TrimmedEq(output, expectedOutput) }) }) when("buildpack image", func() { when("inspect", func() { it("succeeds", func() { packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, "package_for_build_cmd.toml", imageManager.HostOS()) packageImageName := registryConfig.RepoName("buildpack-" + h.RandString(8)) packageImage := buildpacks.NewPackageImage( t, pack, packageImageName, packageTomlPath, buildpacks.WithRequiredBuildpacks( buildpacks.FolderSimpleLayersParent, buildpacks.FolderSimpleLayers, ), ) defer imageManager.CleanupImages(packageImageName) buildpackManager.PrepareBuildpacks(tmpDir, packageImage) expectedOutput := pack.FixtureManager().TemplateFixture( "inspect_buildpack_output.txt", map[string]interface{}{ "buildpack_source": "LOCAL IMAGE", "buildpack_name": packageImageName, }, ) output := pack.RunSuccessfully("buildpack", "inspect", packageImageName) assert.TrimmedEq(output, expectedOutput) }) }) }) }) }) when("builder", func() { when("suggest", func() { it("displays suggested builders", func() { output := pack.RunSuccessfully("builder", "suggest") assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.IncludesSuggestedBuildersHeading() assertOutput.IncludesPrefixedGoogleBuilder() assertOutput.IncludesPrefixedHerokuBuilders() assertOutput.IncludesPrefixedPaketoBuilders() }) }) }) when("config", func() { when("default-builder", func() { it("sets the default builder in ~/.pack/config.toml", func() { builderName := "paketobuildpacks/builder:base" output := pack.RunSuccessfully("config", "default-builder", builderName) assertions.NewOutputAssertionManager(t, output).ReportsSettingDefaultBuilder(builderName) }) }) when("trusted-builders", func() { it("prints list of trusted builders", func() { output := pack.RunSuccessfully("config", "trusted-builders") assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.IncludesTrustedBuildersHeading() assertOutput.IncludesHerokuBuilders() assertOutput.IncludesGoogleBuilder() assertOutput.IncludesPaketoBuilders() }) when("add", func() { it("sets the builder as trusted in ~/.pack/config.toml", func() { builderName := "some-builder" + h.RandString(10) pack.JustRunSuccessfully("config", "trusted-builders", "add", builderName) assert.Contains(pack.ConfigFileContents(), builderName) }) }) when("remove", func() { it("removes the previously trusted builder from ~/${PACK_HOME}/config.toml", func() { builderName := "some-builder" + h.RandString(10) pack.JustRunSuccessfully("config", "trusted-builders", "add", builderName) assert.Contains(pack.ConfigFileContents(), builderName) pack.JustRunSuccessfully("config", "trusted-builders", "remove", builderName) assert.NotContains(pack.ConfigFileContents(), builderName) }) }) when("list", func() { it("prints list of trusted builders", func() { output := pack.RunSuccessfully("config", "trusted-builders", "list") assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.IncludesTrustedBuildersHeading() assertOutput.IncludesHerokuBuilders() assertOutput.IncludesGoogleBuilder() assertOutput.IncludesPaketoBuilders() }) it("shows a builder trusted by pack config trusted-builders add", func() { builderName := "some-builder" + h.RandString(10) pack.JustRunSuccessfully("config", "trusted-builders", "add", builderName) output := pack.RunSuccessfully("config", "trusted-builders", "list") assert.Contains(output, builderName) }) }) }) }) when("stack", func() { when("suggest", func() { it("displays suggested stacks", func() { output, err := pack.Run("stack", "suggest") assert.Nil(err) assertions.NewOutputAssertionManager(t, output).IncludesSuggestedStacksHeading() }) }) }) when("report", func() { when("default builder is set", func() { it("redacts default builder", func() { pack.RunSuccessfully("config", "default-builder", "paketobuildpacks/builder:base") output := pack.RunSuccessfully("report") version := pack.Version() expectedOutput := pack.FixtureManager().TemplateFixture( "report_output.txt", map[string]interface{}{ "DefaultBuilder": "[REDACTED]", "Version": version, "OS": runtime.GOOS, "Arch": runtime.GOARCH, }, ) assert.Equal(output, expectedOutput) }) it("explicit mode doesn't redact", func() { pack.RunSuccessfully("config", "default-builder", "paketobuildpacks/builder:base") output := pack.RunSuccessfully("report", "--explicit") version := pack.Version() expectedOutput := pack.FixtureManager().TemplateFixture( "report_output.txt", map[string]interface{}{ "DefaultBuilder": "paketobuildpacks/builder:base", "Version": version, "OS": runtime.GOOS, "Arch": runtime.GOARCH, }, ) assert.Equal(output, expectedOutput) }) }) }) } func testAcceptance( t *testing.T, when spec.G, it spec.S, subjectPackConfig, createBuilderPackConfig config.PackAsset, lifecycle config.LifecycleAsset, ) { var ( pack, createBuilderPack *invoke.PackInvoker buildpackManager buildpacks.BuildpackManager bpDir = buildpacksDir(lifecycle.EarliestBuildpackAPIVersion()) assert = h.NewAssertionManager(t) ) it.Before(func() { pack = invoke.NewPackInvoker(t, assert, subjectPackConfig, registryConfig.DockerConfigDir) pack.EnableExperimental() createBuilderPack = invoke.NewPackInvoker(t, assert, createBuilderPackConfig, registryConfig.DockerConfigDir) createBuilderPack.EnableExperimental() buildpackManager = buildpacks.NewBuildpackManager( t, assert, buildpacks.WithBuildpackAPIVersion(lifecycle.EarliestBuildpackAPIVersion()), ) }) it.After(func() { pack.Cleanup() createBuilderPack.Cleanup() }) when("stack is created", func() { var ( runImageMirror string stackBaseImages = map[string][]string{ "linux": {"ubuntu:bionic"}, "windows": {"mcr.microsoft.com/windows/nanoserver:1809", "golang:1.14-nanoserver-1809"}, } ) it.Before(func() { value, err := suiteManager.RunTaskOnceString("create-stack", func() (string, error) { runImageMirror := registryConfig.RepoName(runImage) err := createStack(t, dockerCli, runImageMirror) if err != nil { return "", err } return runImageMirror, nil }) assert.Nil(err) baseStackNames := stackBaseImages[imageManager.HostOS()] suiteManager.RegisterCleanUp("remove-stack-images", func() error { imageManager.CleanupImages(baseStackNames...) imageManager.CleanupImages(runImage, buildImage, value) return nil }) runImageMirror = value }) when("builder is created", func() { var builderName string it.Before(func() { key := taskKey( "create-builder", append( []string{runImageMirror, createBuilderPackConfig.Path(), lifecycle.Identifier()}, createBuilderPackConfig.FixturePaths()..., )..., ) value, err := suiteManager.RunTaskOnceString(key, func() (string, error) { return createBuilder(t, assert, createBuilderPack, lifecycle, buildpackManager, runImageMirror) }) assert.Nil(err) suiteManager.RegisterCleanUp("clean-"+key, func() error { imageManager.CleanupImages(value) return nil }) builderName = value }) when("complex builder", func() { it.Before(func() { // create our nested builder h.SkipIf(t, imageManager.HostOS() == "windows", "These tests are not yet compatible with Windows-based containers") h.SkipIf(t, !createBuilderPack.SupportsFeature(invoke.BuilderNoDuplicateLayers), "bug fixed in 0.18.0") // create a task, handled by a 'task manager' which executes our pack commands during tests. // looks like this is used to de-dup tasks key := taskKey( "create-complex-builder", append( []string{runImageMirror, createBuilderPackConfig.Path(), lifecycle.Identifier()}, createBuilderPackConfig.FixturePaths()..., )..., ) value, err := suiteManager.RunTaskOnceString(key, func() (string, error) { return createComplexBuilder( t, assert, createBuilderPack, lifecycle, buildpackManager, runImageMirror, ) }) assert.Nil(err) // register task to be run to 'clean up' a task suiteManager.RegisterCleanUp("clean-"+key, func() error { imageManager.CleanupImages(value) return nil }) builderName = value output := pack.RunSuccessfully( "config", "run-image-mirrors", "add", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1") assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsSuccesfulRunImageMirrorsAdd("pack-test/run", "some-registry.com/pack-test/run1") }) when("builder has duplicate buildpacks", func() { it("buildpack layers have no duplication", func() { assertImage.DoesNotHaveDuplicateLayers(builderName) }) }) }) when("builder.toml is invalid", func() { it("displays an error", func() { builderConfigPath := createBuilderPack.FixtureManager().FixtureLocation("invalid_builder.toml") output, err := createBuilderPack.Run( "builder", "create", "some-builder:build", "--config", builderConfigPath, ) assert.NotNil(err) assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsInvalidBuilderToml() }) }) when("build", func() { var repo, repoName string it.Before(func() { repo = "some-org/" + h.RandString(10) repoName = registryConfig.RepoName(repo) }) it.After(func() { imageManager.CleanupImages(repoName) ref, err := name.ParseReference(repoName, name.WeakValidation) assert.Nil(err) cacheImage := cache.NewImageCache(ref, dockerCli) buildCacheVolume := cache.NewVolumeCache(ref, "build", dockerCli) launchCacheVolume := cache.NewVolumeCache(ref, "launch", dockerCli) cacheImage.Clear(context.TODO()) buildCacheVolume.Clear(context.TODO()) launchCacheVolume.Clear(context.TODO()) }) when("builder is untrusted", func() { var untrustedBuilderName string it.Before(func() { var err error untrustedBuilderName, err = createBuilder( t, assert, createBuilderPack, lifecycle, buildpackManager, runImageMirror, ) suiteManager.RegisterCleanUp("remove-lifecycle-"+lifecycle.Version(), func() error { img := imageManager.GetImageID(fmt.Sprintf("%s:%s", internalConfig.DefaultLifecycleImageRepo, lifecycle.Version())) imageManager.CleanupImages(img) return nil }) assert.Nil(err) }) it.After(func() { imageManager.CleanupImages(untrustedBuilderName) }) when("daemon", func() { it("uses the 5 phases", func() { output := pack.RunSuccessfully( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "-B", untrustedBuilderName, ) assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName) assertOutput := assertions.NewLifecycleOutputAssertionManager(t, output) assertOutput.IncludesLifecycleImageTag() assertOutput.IncludesSeparatePhases() }) }) when("--publish", func() { it("uses the 5 phases", func() { buildArgs := []string{ repoName, "-p", filepath.Join("testdata", "mock_app"), "-B", untrustedBuilderName, "--publish", } if imageManager.HostOS() != "windows" { buildArgs = append(buildArgs, "--network", "host") } output := pack.RunSuccessfully("build", buildArgs...) assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName) assertOutput := assertions.NewLifecycleOutputAssertionManager(t, output) assertOutput.IncludesLifecycleImageTag() assertOutput.IncludesSeparatePhases() }) }) when("additional tags", func() { var additionalRepoName string it.Before(func() { additionalRepoName = fmt.Sprintf("%s_additional", repoName) }) it.After(func() { imageManager.CleanupImages(additionalRepoName) }) it("pushes image to additional tags", func() { output := pack.RunSuccessfully( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "-B", untrustedBuilderName, "--tag", additionalRepoName, ) assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName) assert.Contains(output, additionalRepoName) }) }) }) when("default builder is set", func() { it.Before(func() { pack.RunSuccessfully("config", "default-builder", builderName) pack.JustRunSuccessfully("config", "trusted-builders", "add", builderName) }) it("creates a runnable, rebuildable image on daemon from app dir", func() { appPath := filepath.Join("testdata", "mock_app") output := pack.RunSuccessfully( "build", repoName, "-p", appPath, ) assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsSuccessfulImageBuild(repoName) assertOutput.ReportsUsingBuildCacheVolume() assertOutput.ReportsSelectingRunImageMirror(runImageMirror) t.Log("app is runnable") assertImage.RunsWithOutput(repoName, "Launch Dep Contents", "Cached Dep Contents") t.Log("it uses the run image as a base image") assertImage.HasBaseImage(repoName, runImage) t.Log("sets the run image metadata") assertImage.HasLabelWithData(repoName, "io.buildpacks.lifecycle.metadata", fmt.Sprintf(`"stack":{"runImage":{"image":"%s","mirrors":["%s"]}}}`, runImage, runImageMirror)) t.Log("registry is empty") assertImage.NotExistsInRegistry(repo) t.Log("add a local mirror") localRunImageMirror := registryConfig.RepoName("pack-test/run-mirror") imageManager.TagImage(runImage, localRunImageMirror) defer imageManager.CleanupImages(localRunImageMirror) pack.JustRunSuccessfully("config", "run-image-mirrors", "add", runImage, "-m", localRunImageMirror) t.Log("rebuild") output = pack.RunSuccessfully( "build", repoName, "-p", appPath, ) assertOutput = assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsSuccessfulImageBuild(repoName) assertOutput.ReportsSelectingRunImageMirrorFromLocalConfig(localRunImageMirror) cachedLaunchLayer := "simple/layers:cached-launch-layer" assertLifecycleOutput := assertions.NewLifecycleOutputAssertionManager(t, output) assertLifecycleOutput.ReportsRestoresCachedLayer(cachedLaunchLayer) assertLifecycleOutput.ReportsExporterReusingUnchangedLayer(cachedLaunchLayer) assertLifecycleOutput.ReportsCacheReuse(cachedLaunchLayer) t.Log("app is runnable") assertImage.RunsWithOutput(repoName, "Launch Dep Contents", "Cached Dep Contents") t.Log("rebuild with --clear-cache") output = pack.RunSuccessfully("build", repoName, "-p", appPath, "--clear-cache") assertOutput = assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsSuccessfulImageBuild(repoName) assertLifecycleOutput = assertions.NewLifecycleOutputAssertionManager(t, output) assertLifecycleOutput.ReportsSkippingBuildpackLayerAnalysis() assertLifecycleOutput.ReportsExporterReusingUnchangedLayer(cachedLaunchLayer) assertLifecycleOutput.ReportsCacheCreation(cachedLaunchLayer) t.Log("cacher adds layers") assert.Matches(output, regexp.MustCompile(`(?i)Adding cache layer 'simple/layers:cached-launch-layer'`)) t.Log("inspecting image") inspectCmd := "inspect" if !pack.Supports("inspect") { inspectCmd = "inspect-image" } var ( webCommand string helloCommand string helloArgs []string helloArgsPrefix string ) if imageManager.HostOS() == "windows" { webCommand = ".\\run" helloCommand = "cmd" helloArgs = []string{"/c", "echo hello world"} helloArgsPrefix = " " } else { webCommand = "./run" helloCommand = "echo" helloArgs = []string{"hello", "world"} helloArgsPrefix = "" } formats := []compareFormat{ { extension: "txt", compareFunc: assert.TrimmedEq, outputArg: "human-readable", }, { extension: "json", compareFunc: assert.EqualJSON, outputArg: "json", }, { extension: "yaml", compareFunc: assert.EqualYAML, outputArg: "yaml", }, { extension: "toml", compareFunc: assert.EqualTOML, outputArg: "toml", }, } for _, format := range formats { t.Logf("inspecting image %s format", format.outputArg) output = pack.RunSuccessfully(inspectCmd, repoName, "--output", format.outputArg) expectedOutput := pack.FixtureManager().TemplateFixture( fmt.Sprintf("inspect_image_local_output.%s", format.extension), map[string]interface{}{ "image_name": repoName, "base_image_id": h.ImageID(t, runImageMirror), "base_image_top_layer": h.TopLayerDiffID(t, runImageMirror), "run_image_local_mirror": localRunImageMirror, "run_image_mirror": runImageMirror, "web_command": webCommand, "hello_command": helloCommand, "hello_args": helloArgs, "hello_args_prefix": helloArgsPrefix, }, ) format.compareFunc(output, expectedOutput) } }) when("--no-color", func() { it("doesn't have color", func() { appPath := filepath.Join("testdata", "mock_app") // --no-color is set as a default option in our tests, and doesn't need to be explicitly provided output := pack.RunSuccessfully("build", repoName, "-p", appPath) assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsSuccessfulImageBuild(repoName) assertOutput.WithoutColors() }) }) when("--quiet", func() { it("only logs app name and sha", func() { appPath := filepath.Join("testdata", "mock_app") pack.SetVerbose(false) defer pack.SetVerbose(true) output := pack.RunSuccessfully("build", repoName, "-p", appPath, "--quiet") assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.ReportSuccessfulQuietBuild(repoName) }) }) it("supports building app from a zip file", func() { appPath := filepath.Join("testdata", "mock_app.zip") output := pack.RunSuccessfully("build", repoName, "-p", appPath) assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName) }) when("--network", func() { var tmpDir string it.Before(func() { h.SkipIf(t, imageManager.HostOS() == "windows", "temporarily disabled on WCOW due to CI flakiness") var err error tmpDir, err = ioutil.TempDir("", "archive-buildpacks-") assert.Nil(err) buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.InternetCapable) }) it.After(func() { h.SkipIf(t, imageManager.HostOS() == "windows", "temporarily disabled on WCOW due to CI flakiness") assert.Succeeds(os.RemoveAll(tmpDir)) }) when("the network mode is not provided", func() { it("reports buildpack access to internet", func() { output := pack.RunSuccessfully( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "--buildpack", buildpacks.InternetCapable.FullPathIn(tmpDir), ) assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output) assertBuildpackOutput.ReportsConnectedToInternet() }) }) when("the network mode is set to default", func() { it("reports buildpack access to internet", func() { output := pack.RunSuccessfully( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "--buildpack", buildpacks.InternetCapable.FullPathIn(tmpDir), "--network", "default", ) assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output) assertBuildpackOutput.ReportsConnectedToInternet() }) }) when("the network mode is set to none", func() { it("reports buildpack disconnected from internet", func() { output := pack.RunSuccessfully( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "--buildpack", buildpacks.InternetCapable.FullPathIn(tmpDir), "--network", "none", ) assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output) assertBuildpackOutput.ReportsDisconnectedFromInternet() }) }) }) when("--volume", func() { var ( volumeRoot = "/" slash = "/" tmpDir string tmpVolumeSrc string ) it.Before(func() { h.SkipIf(t, os.Getenv("DOCKER_HOST") != "", "cannot mount volume when DOCKER_HOST is set") if imageManager.HostOS() == "windows" { volumeRoot = `c:\` slash = `\` } var err error tmpDir, err = ioutil.TempDir("", "volume-buildpack-tests-") assert.Nil(err) buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.ReadVolume, buildpacks.ReadWriteVolume) tmpVolumeSrc, err = ioutil.TempDir("", "volume-mount-source") assert.Nil(err) assert.Succeeds(os.Chmod(tmpVolumeSrc, 0777)) // Override umask // Some OSes (like macOS) use symlinks for the standard temp dir. // Resolve it so it can be properly mounted by the Docker daemon. tmpVolumeSrc, err = filepath.EvalSymlinks(tmpVolumeSrc) assert.Nil(err) err = ioutil.WriteFile(filepath.Join(tmpVolumeSrc, "some-file"), []byte("some-content\n"), 0777) assert.Nil(err) }) it.After(func() { _ = os.RemoveAll(tmpDir) _ = os.RemoveAll(tmpVolumeSrc) }) when("volume is read-only", func() { it("mounts the provided volume in the detect and build phases", func() { volumeDest := volumeRoot + "platform" + slash + "volume-mount-target" testFilePath := volumeDest + slash + "some-file" output := pack.RunSuccessfully( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "--volume", fmt.Sprintf("%s:%s", tmpVolumeSrc, volumeDest), "--buildpack", buildpacks.ReadVolume.FullPathIn(tmpDir), "--env", "TEST_FILE_PATH="+testFilePath, ) bpOutputAsserts := assertions.NewTestBuildpackOutputAssertionManager(t, output) bpOutputAsserts.ReportsReadingFileContents("Detect", testFilePath, "some-content") bpOutputAsserts.ReportsReadingFileContents("Build", testFilePath, "some-content") }) it("should fail to write", func() { volumeDest := volumeRoot + "platform" + slash + "volume-mount-target" testDetectFilePath := volumeDest + slash + "detect-file" testBuildFilePath := volumeDest + slash + "build-file" output := pack.RunSuccessfully( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "--volume", fmt.Sprintf("%s:%s", tmpVolumeSrc, volumeDest), "--buildpack", buildpacks.ReadWriteVolume.FullPathIn(tmpDir), "--env", "DETECT_TEST_FILE_PATH="+testDetectFilePath, "--env", "BUILD_TEST_FILE_PATH="+testBuildFilePath, ) bpOutputAsserts := assertions.NewTestBuildpackOutputAssertionManager(t, output) bpOutputAsserts.ReportsFailingToWriteFileContents("Detect", testDetectFilePath) bpOutputAsserts.ReportsFailingToWriteFileContents("Build", testBuildFilePath) }) }) when("volume is read-write", func() { it("can be written to", func() { volumeDest := volumeRoot + "volume-mount-target" testDetectFilePath := volumeDest + slash + "detect-file" testBuildFilePath := volumeDest + slash + "build-file" output := pack.RunSuccessfully( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "--volume", fmt.Sprintf("%s:%s:rw", tmpVolumeSrc, volumeDest), "--buildpack", buildpacks.ReadWriteVolume.FullPathIn(tmpDir), "--env", "DETECT_TEST_FILE_PATH="+testDetectFilePath, "--env", "BUILD_TEST_FILE_PATH="+testBuildFilePath, ) bpOutputAsserts := assertions.NewTestBuildpackOutputAssertionManager(t, output) bpOutputAsserts.ReportsWritingFileContents("Detect", testDetectFilePath) bpOutputAsserts.ReportsReadingFileContents("Detect", testDetectFilePath, "some-content") bpOutputAsserts.ReportsWritingFileContents("Build", testBuildFilePath) bpOutputAsserts.ReportsReadingFileContents("Build", testBuildFilePath, "some-content") }) }) }) when("--default-process", func() { it("sets the default process from those in the process list", func() { pack.RunSuccessfully( "build", repoName, "--default-process", "hello", "-p", filepath.Join("testdata", "mock_app"), ) assertImage.RunsWithLogs(repoName, "hello world") }) }) when("--buildpack", func() { when("the argument is an ID", func() { it("adds the buildpacks to the builder if necessary and runs them", func() { output := pack.RunSuccessfully( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "--buildpack", "simple/layers", // can omit version if only one "--buildpack", "[email protected]", ) assertOutput := assertions.NewOutputAssertionManager(t, output) assertTestAppOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output) assertTestAppOutput.ReportsBuildStep("Simple Layers Buildpack") assertTestAppOutput.ReportsBuildStep("NOOP Buildpack") assertOutput.ReportsSuccessfulImageBuild(repoName) t.Log("app is runnable") assertImage.RunsWithOutput( repoName, "Launch Dep Contents", "Cached Dep Contents", ) }) }) when("the argument is an archive", func() { var tmpDir string it.Before(func() { var err error tmpDir, err = ioutil.TempDir("", "archive-buildpack-tests-") assert.Nil(err) }) it.After(func() { assert.Succeeds(os.RemoveAll(tmpDir)) }) it("adds the buildpack to the builder and runs it", func() { buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.ArchiveNotInBuilder) output := pack.RunSuccessfully( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "--buildpack", buildpacks.ArchiveNotInBuilder.FullPathIn(tmpDir), ) assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsAddingBuildpack("local/bp", "local-bp-version") assertOutput.ReportsSuccessfulImageBuild(repoName) assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output) assertBuildpackOutput.ReportsBuildStep("Local Buildpack") }) }) when("the argument is directory", func() { var tmpDir string it.Before(func() { var err error tmpDir, err = ioutil.TempDir("", "folder-buildpack-tests-") assert.Nil(err) }) it.After(func() { _ = os.RemoveAll(tmpDir) }) it("adds the buildpacks to the builder and runs it", func() { h.SkipIf(t, runtime.GOOS == "windows", "buildpack directories not supported on windows") buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.FolderNotInBuilder) output := pack.RunSuccessfully( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "--buildpack", buildpacks.FolderNotInBuilder.FullPathIn(tmpDir), ) assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsAddingBuildpack("local/bp", "local-bp-version") assertOutput.ReportsSuccessfulImageBuild(repoName) assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output) assertBuildpackOutput.ReportsBuildStep("Local Buildpack") }) }) when("the argument is a buildpackage image", func() { var ( tmpDir string packageImageName string ) it.After(func() { imageManager.CleanupImages(packageImageName) _ = os.RemoveAll(tmpDir) }) it("adds the buildpacks to the builder and runs them", func() { packageImageName = registryConfig.RepoName("buildpack-" + h.RandString(8)) packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, "package_for_build_cmd.toml", imageManager.HostOS()) packageImage := buildpacks.NewPackageImage( t, pack, packageImageName, packageTomlPath, buildpacks.WithRequiredBuildpacks( buildpacks.FolderSimpleLayersParent, buildpacks.FolderSimpleLayers, ), ) buildpackManager.PrepareBuildpacks(tmpDir, packageImage) output := pack.RunSuccessfully( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "--buildpack", packageImageName, ) assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsAddingBuildpack( "simple/layers/parent", "simple-layers-parent-version", ) assertOutput.ReportsAddingBuildpack("simple/layers", "simple-layers-version") assertOutput.ReportsSuccessfulImageBuild(repoName) assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output) assertBuildpackOutput.ReportsBuildStep("Simple Layers Buildpack") }) }) when("the argument is a buildpackage file", func() { var tmpDir string it.Before(func() { var err error tmpDir, err = ioutil.TempDir("", "package-file") assert.Nil(err) }) it.After(func() { assert.Succeeds(os.RemoveAll(tmpDir)) }) it("adds the buildpacks to the builder and runs them", func() { packageFileLocation := filepath.Join( tmpDir, fmt.Sprintf("buildpack-%s.cnb", h.RandString(8)), ) packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, "package_for_build_cmd.toml", imageManager.HostOS()) packageFile := buildpacks.NewPackageFile( t, pack, packageFileLocation, packageTomlPath, buildpacks.WithRequiredBuildpacks( buildpacks.FolderSimpleLayersParent, buildpacks.FolderSimpleLayers, ), ) buildpackManager.PrepareBuildpacks(tmpDir, packageFile) output := pack.RunSuccessfully( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "--buildpack", packageFileLocation, ) assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsAddingBuildpack( "simple/layers/parent", "simple-layers-parent-version", ) assertOutput.ReportsAddingBuildpack("simple/layers", "simple-layers-version") assertOutput.ReportsSuccessfulImageBuild(repoName) assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output) assertBuildpackOutput.ReportsBuildStep("Simple Layers Buildpack") }) }) when("the buildpack stack doesn't match the builder", func() { var otherStackBuilderTgz string it.Before(func() { otherStackBuilderTgz = h.CreateTGZ(t, filepath.Join(bpDir, "other-stack-buildpack"), "./", 0755) }) it.After(func() { assert.Succeeds(os.Remove(otherStackBuilderTgz)) }) it("errors", func() { output, err := pack.Run( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "--buildpack", otherStackBuilderTgz, ) assert.NotNil(err) assert.Contains(output, "other/stack/bp") assert.Contains(output, "other-stack-version") assert.Contains(output, "does not support stack 'pack.test.stack'") }) }) }) when("--env-file", func() { var envPath string it.Before(func() { envfile, err := ioutil.TempFile("", "envfile") assert.Nil(err) defer envfile.Close() err = os.Setenv("ENV2_CONTENTS", "Env2 Layer Contents From Environment") assert.Nil(err) envfile.WriteString(` DETECT_ENV_BUILDPACK=true ENV1_CONTENTS=Env1 Layer Contents From File ENV2_CONTENTS `) envPath = envfile.Name() }) it.After(func() { assert.Succeeds(os.Unsetenv("ENV2_CONTENTS")) assert.Succeeds(os.RemoveAll(envPath)) }) it("provides the env vars to the build and detect steps", func() { output := pack.RunSuccessfully( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "--env-file", envPath, ) assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName) assertImage.RunsWithOutput( repoName, "Env2 Layer Contents From Environment", "Env1 Layer Contents From File", ) }) }) when("--env", func() { it.Before(func() { assert.Succeeds(os.Setenv("ENV2_CONTENTS", "Env2 Layer Contents From Environment")) }) it.After(func() { assert.Succeeds(os.Unsetenv("ENV2_CONTENTS")) }) it("provides the env vars to the build and detect steps", func() { output := pack.RunSuccessfully( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "--env", "DETECT_ENV_BUILDPACK=true", "--env", `ENV1_CONTENTS="Env1 Layer Contents From Command Line"`, "--env", "ENV2_CONTENTS", ) assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName) assertImage.RunsWithOutput( repoName, "Env2 Layer Contents From Environment", "Env1 Layer Contents From Command Line", ) }) }) when("--run-image", func() { var runImageName string when("the run-image has the correct stack ID", func() { it.Before(func() { user := func() string { if imageManager.HostOS() == "windows" { return "ContainerAdministrator" } return "root" } runImageName = h.CreateImageOnRemote(t, dockerCli, registryConfig, "custom-run-image"+h.RandString(10), fmt.Sprintf(` FROM %s USER %s RUN echo "custom-run" > /custom-run.txt USER pack `, runImage, user())) }) it.After(func() { imageManager.CleanupImages(runImageName) }) it("uses the run image as the base image", func() { output := pack.RunSuccessfully( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "--run-image", runImageName, ) assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsSuccessfulImageBuild(repoName) assertOutput.ReportsPullingImage(runImageName) t.Log("app is runnable") assertImage.RunsWithOutput( repoName, "Launch Dep Contents", "Cached Dep Contents", ) t.Log("uses the run image as the base image") assertImage.HasBaseImage(repoName, runImageName) }) }) when("the run image has the wrong stack ID", func() { it.Before(func() { runImageName = h.CreateImageOnRemote(t, dockerCli, registryConfig, "custom-run-image"+h.RandString(10), fmt.Sprintf(` FROM %s LABEL io.buildpacks.stack.id=other.stack.id USER pack `, runImage)) }) it.After(func() { imageManager.CleanupImages(runImageName) }) it("fails with a message", func() { output, err := pack.Run( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "--run-image", runImageName, ) assert.NotNil(err) assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsRunImageStackNotMatchingBuilder( "other.stack.id", "pack.test.stack", ) }) }) }) when("--publish", func() { it("creates image on the registry", func() { buildArgs := []string{ repoName, "-p", filepath.Join("testdata", "mock_app"), "--publish", } if imageManager.HostOS() != "windows" { buildArgs = append(buildArgs, "--network", "host") } output := pack.RunSuccessfully("build", buildArgs...) assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName) t.Log("checking that registry has contents") assertImage.ExistsInRegistryCatalog(repo) // TODO: remove this if block after pack 0.18.0 is released if !pack.SupportsFeature(invoke.InspectRemoteImage) { imageManager.PullImage(repoName, registryConfig.RegistryAuth()) } cmdName := "inspect" if !pack.Supports("inspect") { cmdName = "inspect-image" } t.Log("inspect-image") var ( webCommand string helloCommand string helloArgs []string helloArgsPrefix string ) if imageManager.HostOS() == "windows" { webCommand = ".\\run" helloCommand = "cmd" helloArgs = []string{"/c", "echo hello world"} helloArgsPrefix = " " } else { webCommand = "./run" helloCommand = "echo" helloArgs = []string{"hello", "world"} helloArgsPrefix = "" } formats := []compareFormat{ { extension: "txt", compareFunc: assert.TrimmedEq, outputArg: "human-readable", }, { extension: "json", compareFunc: assert.EqualJSON, outputArg: "json", }, { extension: "yaml", compareFunc: assert.EqualYAML, outputArg: "yaml", }, { extension: "toml", compareFunc: assert.EqualTOML, outputArg: "toml", }, } for _, format := range formats { t.Logf("inspecting image %s format", format.outputArg) output = pack.RunSuccessfully(cmdName, repoName, "--output", format.outputArg) expectedOutput := pack.FixtureManager().TemplateFixture( fmt.Sprintf("inspect_image_published_output.%s", format.extension), map[string]interface{}{ "image_name": repoName, "base_image_ref": strings.Join([]string{runImageMirror, h.Digest(t, runImageMirror)}, "@"), "base_image_top_layer": h.TopLayerDiffID(t, runImageMirror), "run_image_mirror": runImageMirror, "web_command": webCommand, "hello_command": helloCommand, "hello_args": helloArgs, "hello_args_prefix": helloArgsPrefix, }, ) format.compareFunc(output, expectedOutput) } // TODO: remove this if block after pack 0.18.0 is released if pack.SupportsFeature(invoke.InspectRemoteImage) { imageManager.PullImage(repoName, registryConfig.RegistryAuth()) } t.Log("app is runnable") assertImage.RunsWithOutput( repoName, "Launch Dep Contents", "Cached Dep Contents", ) }) when("additional tags are specified with --tag", func() { var additionalRepo string var additionalRepoName string it.Before(func() { additionalRepo = fmt.Sprintf("%s_additional", repo) additionalRepoName = fmt.Sprintf("%s_additional", repoName) }) it.After(func() { imageManager.CleanupImages(additionalRepoName) }) it("creates additional tags on the registry", func() { buildArgs := []string{ repoName, "-p", filepath.Join("testdata", "mock_app"), "--publish", "--tag", additionalRepoName, } if imageManager.HostOS() != "windows" { buildArgs = append(buildArgs, "--network", "host") } output := pack.RunSuccessfully("build", buildArgs...) assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName) t.Log("checking that registry has contents") assertImage.ExistsInRegistryCatalog(repo) assertImage.ExistsInRegistryCatalog(additionalRepo) imageManager.PullImage(repoName, registryConfig.RegistryAuth()) imageManager.PullImage(additionalRepoName, registryConfig.RegistryAuth()) t.Log("additional app is runnable") assertImage.RunsWithOutput( additionalRepoName, "Launch Dep Contents", "Cached Dep Contents", ) imageDigest := h.Digest(t, repoName) additionalDigest := h.Digest(t, additionalRepoName) assert.Equal(imageDigest, additionalDigest) }) }) }) when("--cache-image", func() { var cacheImageName string it.Before(func() { cacheImageName = fmt.Sprintf("%s-cache", repoName) }) it("creates image and cache image on the registry", func() { buildArgs := []string{ repoName, "-p", filepath.Join("testdata", "mock_app"), "--publish", "--cache-image", cacheImageName, } if imageManager.HostOS() != "windows" { buildArgs = append(buildArgs, "--network", "host") } output := pack.RunSuccessfully("build", buildArgs...) assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName) cacheImageRef, err := name.ParseReference(cacheImageName, name.WeakValidation) assert.Nil(err) t.Log("checking that registry has contents") assertImage.CanBePulledFromRegistry(repoName) if imageManager.HostOS() == "windows" { // Cache images are automatically Linux container images, and therefore can't be pulled // and inspected correctly on WCOW systems //https://github.com/buildpacks/lifecycle/issues/529 imageManager.PullImage(cacheImageRef.Name(), registryConfig.RegistryAuth()) } else { assertImage.CanBePulledFromRegistry(cacheImageRef.Name()) } defer imageManager.CleanupImages(cacheImageRef.Name()) }) }) when("ctrl+c", func() { it("stops the execution", func() { var buf = new(bytes.Buffer) command := pack.StartWithWriter( buf, "build", repoName, "-p", filepath.Join("testdata", "mock_app"), ) go command.TerminateAtStep("DETECTING") err := command.Wait() assert.NotNil(err) assert.NotContains(buf.String(), "Successfully built image") }) }) when("--descriptor", func() { when("using a included buildpack", func() { var tempAppDir, tempWorkingDir, origWorkingDir string it.Before(func() { h.SkipIf(t, runtime.GOOS == "windows", "buildpack directories not supported on windows") var err error tempAppDir, err = ioutil.TempDir("", "descriptor-app") assert.Nil(err) tempWorkingDir, err = ioutil.TempDir("", "descriptor-app") assert.Nil(err) origWorkingDir, err = os.Getwd() assert.Nil(err) // Create test directories and files: // // ├── cookie.jar // ├── descriptor-buildpack/... // ├── media // │   ├── mountain.jpg // │   └── person.png // └── test.sh assert.Succeeds(os.Mkdir(filepath.Join(tempAppDir, "descriptor-buildpack"), os.ModePerm)) h.RecursiveCopy(t, filepath.Join(bpDir, "descriptor-buildpack"), filepath.Join(tempAppDir, "descriptor-buildpack")) err = os.Mkdir(filepath.Join(tempAppDir, "media"), 0755) assert.Nil(err) err = ioutil.WriteFile(filepath.Join(tempAppDir, "media", "mountain.jpg"), []byte("fake image bytes"), 0755) assert.Nil(err) err = ioutil.WriteFile(filepath.Join(tempAppDir, "media", "person.png"), []byte("fake image bytes"), 0755) assert.Nil(err) err = ioutil.WriteFile(filepath.Join(tempAppDir, "cookie.jar"), []byte("chocolate chip"), 0755) assert.Nil(err) err = ioutil.WriteFile(filepath.Join(tempAppDir, "test.sh"), []byte("echo test"), 0755) assert.Nil(err) projectToml := ` [project] name = "exclude test" [[project.licenses]] type = "MIT" [build] exclude = [ "*.sh", "media/person.png", "descriptor-buildpack" ] [[build.buildpacks]] uri = "descriptor-buildpack" ` excludeDescriptorPath := filepath.Join(tempAppDir, "project.toml") err = ioutil.WriteFile(excludeDescriptorPath, []byte(projectToml), 0755) assert.Nil(err) // set working dir to be outside of the app we are building assert.Succeeds(os.Chdir(tempWorkingDir)) }) it.After(func() { os.RemoveAll(tempAppDir) if origWorkingDir != "" { assert.Succeeds(os.Chdir(origWorkingDir)) } }) it("uses buildpack specified by descriptor", func() { output := pack.RunSuccessfully( "build", repoName, "-p", tempAppDir, ) assert.NotContains(output, "person.png") assert.NotContains(output, "test.sh") }) }) when("exclude and include", func() { var buildpackTgz, tempAppDir string it.Before(func() { buildpackTgz = h.CreateTGZ(t, filepath.Join(bpDir, "descriptor-buildpack"), "./", 0755) var err error tempAppDir, err = ioutil.TempDir("", "descriptor-app") assert.Nil(err) // Create test directories and files: // // ├── cookie.jar // ├── other-cookie.jar // ├── nested-cookie.jar // ├── nested // │   └── nested-cookie.jar // ├── secrets // │   ├── api_keys.json // | |── user_token // ├── media // │   ├── mountain.jpg // │   └── person.png // └── test.sh err = os.Mkdir(filepath.Join(tempAppDir, "secrets"), 0755) assert.Nil(err) err = ioutil.WriteFile(filepath.Join(tempAppDir, "secrets", "api_keys.json"), []byte("{}"), 0755) assert.Nil(err) err = ioutil.WriteFile(filepath.Join(tempAppDir, "secrets", "user_token"), []byte("token"), 0755) assert.Nil(err) err = os.Mkdir(filepath.Join(tempAppDir, "nested"), 0755) assert.Nil(err) err = ioutil.WriteFile(filepath.Join(tempAppDir, "nested", "nested-cookie.jar"), []byte("chocolate chip"), 0755) assert.Nil(err) err = ioutil.WriteFile(filepath.Join(tempAppDir, "other-cookie.jar"), []byte("chocolate chip"), 0755) assert.Nil(err) err = ioutil.WriteFile(filepath.Join(tempAppDir, "nested-cookie.jar"), []byte("chocolate chip"), 0755) assert.Nil(err) err = os.Mkdir(filepath.Join(tempAppDir, "media"), 0755) assert.Nil(err) err = ioutil.WriteFile(filepath.Join(tempAppDir, "media", "mountain.jpg"), []byte("fake image bytes"), 0755) assert.Nil(err) err = ioutil.WriteFile(filepath.Join(tempAppDir, "media", "person.png"), []byte("fake image bytes"), 0755) assert.Nil(err) err = ioutil.WriteFile(filepath.Join(tempAppDir, "cookie.jar"), []byte("chocolate chip"), 0755) assert.Nil(err) err = ioutil.WriteFile(filepath.Join(tempAppDir, "test.sh"), []byte("echo test"), 0755) assert.Nil(err) }) it.After(func() { assert.Succeeds(os.RemoveAll(tempAppDir)) }) it("should exclude ALL specified files and directories", func() { projectToml := ` [project] name = "exclude test" [[project.licenses]] type = "MIT" [build] exclude = [ "*.sh", "secrets/", "media/metadata", "/other-cookie.jar" ,"/nested-cookie.jar"] ` excludeDescriptorPath := filepath.Join(tempAppDir, "exclude.toml") err := ioutil.WriteFile(excludeDescriptorPath, []byte(projectToml), 0755) assert.Nil(err) output := pack.RunSuccessfully( "build", repoName, "-p", tempAppDir, "--buildpack", buildpackTgz, "--descriptor", excludeDescriptorPath, ) assert.NotContains(output, "api_keys.json") assert.NotContains(output, "user_token") assert.NotContains(output, "test.sh") assert.NotContains(output, "other-cookie.jar") assert.Contains(output, "cookie.jar") assert.Contains(output, "nested-cookie.jar") assert.Contains(output, "mountain.jpg") assert.Contains(output, "person.png") }) it("should ONLY include specified files and directories", func() { projectToml := ` [project] name = "include test" [[project.licenses]] type = "MIT" [build] include = [ "*.jar", "media/mountain.jpg", "/media/person.png", ] ` includeDescriptorPath := filepath.Join(tempAppDir, "include.toml") err := ioutil.WriteFile(includeDescriptorPath, []byte(projectToml), 0755) assert.Nil(err) output := pack.RunSuccessfully( "build", repoName, "-p", tempAppDir, "--buildpack", buildpackTgz, "--descriptor", includeDescriptorPath, ) assert.NotContains(output, "api_keys.json") assert.NotContains(output, "user_token") assert.NotContains(output, "test.sh") assert.Contains(output, "cookie.jar") assert.Contains(output, "mountain.jpg") assert.Contains(output, "person.png") }) }) }) }) }) when("inspecting builder", func() { when("inspecting a nested builder", func() { it.Before(func() { // create our nested builder h.SkipIf(t, imageManager.HostOS() == "windows", "These tests are not yet compatible with Windows-based containers") h.SkipIf(t, !pack.SupportsFeature(invoke.BuilderNoDuplicateLayers), "bug fixed in 0.18.0") // create a task, handled by a 'task manager' which executes our pack commands during tests. // looks like this is used to de-dup tasks key := taskKey( "create-complex-builder", append( []string{runImageMirror, createBuilderPackConfig.Path(), lifecycle.Identifier()}, createBuilderPackConfig.FixturePaths()..., )..., ) // run task on taskmanager and save output, in case there are future calls to the same task // likely all our changes need to go on the createBuilderPack. value, err := suiteManager.RunTaskOnceString(key, func() (string, error) { return createComplexBuilder( t, assert, createBuilderPack, lifecycle, buildpackManager, runImageMirror, ) }) assert.Nil(err) // register task to be run to 'clean up' a task suiteManager.RegisterCleanUp("clean-"+key, func() error { imageManager.CleanupImages(value) return nil }) builderName = value output := pack.RunSuccessfully( "config", "run-image-mirrors", "add", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1") assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsSuccesfulRunImageMirrorsAdd("pack-test/run", "some-registry.com/pack-test/run1") }) it("displays nested Detection Order groups", func() { var output string if pack.Supports("builder inspect") { output = pack.RunSuccessfully("builder", "inspect", builderName) } else { output = pack.RunSuccessfully("inspect-builder", builderName) } deprecatedBuildpackAPIs, supportedBuildpackAPIs, deprecatedPlatformAPIs, supportedPlatformAPIs := lifecycle.OutputForAPIs() expectedOutput := pack.FixtureManager().TemplateVersionedFixture( "inspect_%s_builder_nested_output.txt", createBuilderPack.SanitizedVersion(), "inspect_builder_nested_output.txt", map[string]interface{}{ "builder_name": builderName, "lifecycle_version": lifecycle.Version(), "deprecated_buildpack_apis": deprecatedBuildpackAPIs, "supported_buildpack_apis": supportedBuildpackAPIs, "deprecated_platform_apis": deprecatedPlatformAPIs, "supported_platform_apis": supportedPlatformAPIs, "run_image_mirror": runImageMirror, "pack_version": createBuilderPack.Version(), "trusted": "No", // set previous pack template fields "buildpack_api_version": lifecycle.EarliestBuildpackAPIVersion(), "platform_api_version": lifecycle.EarliestPlatformAPIVersion(), }, ) assert.TrimmedEq(output, expectedOutput) }) it("provides nested detection output up to depth", func() { depth := "1" var output string if pack.Supports("builder inspect") { output = pack.RunSuccessfully("builder", "inspect", "--depth", depth, builderName) } else { output = pack.RunSuccessfully("inspect-builder", "--depth", depth, builderName) } deprecatedBuildpackAPIs, supportedBuildpackAPIs, deprecatedPlatformAPIs, supportedPlatformAPIs := lifecycle.OutputForAPIs() expectedOutput := pack.FixtureManager().TemplateVersionedFixture( "inspect_%s_builder_nested_depth_2_output.txt", createBuilderPack.SanitizedVersion(), "inspect_builder_nested_depth_2_output.txt", map[string]interface{}{ "builder_name": builderName, "lifecycle_version": lifecycle.Version(), "deprecated_buildpack_apis": deprecatedBuildpackAPIs, "supported_buildpack_apis": supportedBuildpackAPIs, "deprecated_platform_apis": deprecatedPlatformAPIs, "supported_platform_apis": supportedPlatformAPIs, "run_image_mirror": runImageMirror, "pack_version": createBuilderPack.Version(), "trusted": "No", // set previous pack template fields "buildpack_api_version": lifecycle.EarliestBuildpackAPIVersion(), "platform_api_version": lifecycle.EarliestPlatformAPIVersion(), }, ) assert.TrimmedEq(output, expectedOutput) }) when("output format is toml", func() { it("prints builder information in toml format", func() { var output string if pack.Supports("builder inspect") { output = pack.RunSuccessfully("builder", "inspect", builderName, "--output", "toml") } else { output = pack.RunSuccessfully("inspect-builder", builderName, "--output", "toml") } err := toml.NewDecoder(strings.NewReader(string(output))).Decode(&struct{}{}) assert.Nil(err) deprecatedBuildpackAPIs, supportedBuildpackAPIs, deprecatedPlatformAPIs, supportedPlatformAPIs := lifecycle.TOMLOutputForAPIs() expectedOutput := pack.FixtureManager().TemplateVersionedFixture( "inspect_%s_builder_nested_output_toml.txt", createBuilderPack.SanitizedVersion(), "inspect_builder_nested_output_toml.txt", map[string]interface{}{ "builder_name": builderName, "lifecycle_version": lifecycle.Version(), "deprecated_buildpack_apis": deprecatedBuildpackAPIs, "supported_buildpack_apis": supportedBuildpackAPIs, "deprecated_platform_apis": deprecatedPlatformAPIs, "supported_platform_apis": supportedPlatformAPIs, "run_image_mirror": runImageMirror, "pack_version": createBuilderPack.Version(), }, ) assert.TrimmedEq(string(output), expectedOutput) }) }) when("output format is yaml", func() { it("prints builder information in yaml format", func() { var output string if pack.Supports("builder inspect") { output = pack.RunSuccessfully("builder", "inspect", builderName, "--output", "yaml") } else { output = pack.RunSuccessfully("inspect-builder", builderName, "--output", "yaml") } err := yaml.Unmarshal([]byte(output), &struct{}{}) assert.Nil(err) deprecatedBuildpackAPIs, supportedBuildpackAPIs, deprecatedPlatformAPIs, supportedPlatformAPIs := lifecycle.YAMLOutputForAPIs(14) expectedOutput := pack.FixtureManager().TemplateVersionedFixture( "inspect_%s_builder_nested_output_yaml.txt", createBuilderPack.SanitizedVersion(), "inspect_builder_nested_output_yaml.txt", map[string]interface{}{ "builder_name": builderName, "lifecycle_version": lifecycle.Version(), "deprecated_buildpack_apis": deprecatedBuildpackAPIs, "supported_buildpack_apis": supportedBuildpackAPIs, "deprecated_platform_apis": deprecatedPlatformAPIs, "supported_platform_apis": supportedPlatformAPIs, "run_image_mirror": runImageMirror, "pack_version": createBuilderPack.Version(), }, ) assert.TrimmedEq(string(output), expectedOutput) }) }) when("output format is json", func() { it("prints builder information in json format", func() { var output string if pack.Supports("builder inspect") { output = pack.RunSuccessfully("builder", "inspect", builderName, "--output", "json") } else { output = pack.RunSuccessfully("inspect-builder", builderName, "--output", "json") } err := json.Unmarshal([]byte(output), &struct{}{}) assert.Nil(err) var prettifiedOutput bytes.Buffer err = json.Indent(&prettifiedOutput, []byte(output), "", " ") assert.Nil(err) deprecatedBuildpackAPIs, supportedBuildpackAPIs, deprecatedPlatformAPIs, supportedPlatformAPIs := lifecycle.JSONOutputForAPIs(8) expectedOutput := pack.FixtureManager().TemplateVersionedFixture( "inspect_%s_builder_nested_output_json.txt", createBuilderPack.SanitizedVersion(), "inspect_builder_nested_output_json.txt", map[string]interface{}{ "builder_name": builderName, "lifecycle_version": lifecycle.Version(), "deprecated_buildpack_apis": deprecatedBuildpackAPIs, "supported_buildpack_apis": supportedBuildpackAPIs, "deprecated_platform_apis": deprecatedPlatformAPIs, "supported_platform_apis": supportedPlatformAPIs, "run_image_mirror": runImageMirror, "pack_version": createBuilderPack.Version(), }, ) assert.Equal(prettifiedOutput.String(), expectedOutput) }) }) }) it("displays configuration for a builder (local and remote)", func() { output := pack.RunSuccessfully( "config", "run-image-mirrors", "add", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1", ) assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsSuccesfulRunImageMirrorsAdd("pack-test/run", "some-registry.com/pack-test/run1") if pack.Supports("builder inspect") { output = pack.RunSuccessfully("builder", "inspect", builderName) } else { output = pack.RunSuccessfully("inspect-builder", builderName) } deprecatedBuildpackAPIs, supportedBuildpackAPIs, deprecatedPlatformAPIs, supportedPlatformAPIs := lifecycle.OutputForAPIs() expectedOutput := pack.FixtureManager().TemplateVersionedFixture( "inspect_%s_builder_output.txt", createBuilderPack.SanitizedVersion(), "inspect_builder_output.txt", map[string]interface{}{ "builder_name": builderName, "lifecycle_version": lifecycle.Version(), "deprecated_buildpack_apis": deprecatedBuildpackAPIs, "supported_buildpack_apis": supportedBuildpackAPIs, "deprecated_platform_apis": deprecatedPlatformAPIs, "supported_platform_apis": supportedPlatformAPIs, "run_image_mirror": runImageMirror, "pack_version": createBuilderPack.Version(), "trusted": "No", // set previous pack template fields "buildpack_api_version": lifecycle.EarliestBuildpackAPIVersion(), "platform_api_version": lifecycle.EarliestPlatformAPIVersion(), }, ) assert.TrimmedEq(output, expectedOutput) }) it("indicates builder is trusted", func() { pack.JustRunSuccessfully("config", "trusted-builders", "add", builderName) pack.JustRunSuccessfully("config", "run-image-mirrors", "add", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1") var output string if pack.Supports("builder inspect") { output = pack.RunSuccessfully("builder", "inspect", builderName) } else { output = pack.RunSuccessfully("inspect-builder", builderName) } deprecatedBuildpackAPIs, supportedBuildpackAPIs, deprecatedPlatformAPIs, supportedPlatformAPIs := lifecycle.OutputForAPIs() expectedOutput := pack.FixtureManager().TemplateVersionedFixture( "inspect_%s_builder_output.txt", createBuilderPack.SanitizedVersion(), "inspect_builder_output.txt", map[string]interface{}{ "builder_name": builderName, "lifecycle_version": lifecycle.Version(), "deprecated_buildpack_apis": deprecatedBuildpackAPIs, "supported_buildpack_apis": supportedBuildpackAPIs, "deprecated_platform_apis": deprecatedPlatformAPIs, "supported_platform_apis": supportedPlatformAPIs, "run_image_mirror": runImageMirror, "pack_version": createBuilderPack.Version(), "trusted": "Yes", // set previous pack template fields "buildpack_api_version": lifecycle.EarliestBuildpackAPIVersion(), "platform_api_version": lifecycle.EarliestPlatformAPIVersion(), }, ) assert.TrimmedEq(output, expectedOutput) }) }) when("rebase", func() { var repoName, runBefore, origID string var buildRunImage func(string, string, string) it.Before(func() { pack.JustRunSuccessfully("config", "trusted-builders", "add", builderName) repoName = registryConfig.RepoName("some-org/" + h.RandString(10)) runBefore = registryConfig.RepoName("run-before/" + h.RandString(10)) buildRunImage = func(newRunImage, contents1, contents2 string) { user := func() string { if imageManager.HostOS() == "windows" { return "ContainerAdministrator" } return "root" } h.CreateImage(t, dockerCli, newRunImage, fmt.Sprintf(` FROM %s USER %s RUN echo %s > /contents1.txt RUN echo %s > /contents2.txt USER pack `, runImage, user(), contents1, contents2)) } buildRunImage(runBefore, "contents-before-1", "contents-before-2") pack.RunSuccessfully( "build", repoName, "-p", filepath.Join("testdata", "mock_app"), "--builder", builderName, "--run-image", runBefore, "--pull-policy", pubcfg.PullNever.String(), ) origID = h.ImageID(t, repoName) assertImage.RunsWithOutput( repoName, "contents-before-1", "contents-before-2", ) }) it.After(func() { imageManager.CleanupImages(origID, repoName, runBefore) ref, err := name.ParseReference(repoName, name.WeakValidation) assert.Nil(err) buildCacheVolume := cache.NewVolumeCache(ref, "build", dockerCli) launchCacheVolume := cache.NewVolumeCache(ref, "launch", dockerCli) assert.Succeeds(buildCacheVolume.Clear(context.TODO())) assert.Succeeds(launchCacheVolume.Clear(context.TODO())) }) when("daemon", func() { when("--run-image", func() { var runAfter string it.Before(func() { runAfter = registryConfig.RepoName("run-after/" + h.RandString(10)) buildRunImage(runAfter, "contents-after-1", "contents-after-2") }) it.After(func() { imageManager.CleanupImages(runAfter) }) it("uses provided run image", func() { output := pack.RunSuccessfully( "rebase", repoName, "--run-image", runAfter, "--pull-policy", pubcfg.PullNever.String(), ) assert.Contains(output, fmt.Sprintf("Successfully rebased image '%s'", repoName)) assertImage.RunsWithOutput( repoName, "contents-after-1", "contents-after-2", ) }) }) when("local config has a mirror", func() { var localRunImageMirror string it.Before(func() { localRunImageMirror = registryConfig.RepoName("run-after/" + h.RandString(10)) buildRunImage(localRunImageMirror, "local-mirror-after-1", "local-mirror-after-2") pack.JustRunSuccessfully("config", "run-image-mirrors", "add", runImage, "-m", localRunImageMirror) }) it.After(func() { imageManager.CleanupImages(localRunImageMirror) }) it("prefers the local mirror", func() { output := pack.RunSuccessfully("rebase", repoName, "--pull-policy", pubcfg.PullNever.String()) assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsSelectingRunImageMirrorFromLocalConfig(localRunImageMirror) assertOutput.ReportsSuccessfulRebase(repoName) assertImage.RunsWithOutput( repoName, "local-mirror-after-1", "local-mirror-after-2", ) }) }) when("image metadata has a mirror", func() { it.Before(func() { // clean up existing mirror first to avoid leaking images imageManager.CleanupImages(runImageMirror) buildRunImage(runImageMirror, "mirror-after-1", "mirror-after-2") }) it("selects the best mirror", func() { output := pack.RunSuccessfully("rebase", repoName, "--pull-policy", pubcfg.PullNever.String()) assertOutput := assertions.NewOutputAssertionManager(t, output) assertOutput.ReportsSelectingRunImageMirror(runImageMirror) assertOutput.ReportsSuccessfulRebase(repoName) assertImage.RunsWithOutput( repoName, "mirror-after-1", "mirror-after-2", ) }) }) }) when("--publish", func() { it.Before(func() { assert.Succeeds(h.PushImage(dockerCli, repoName, registryConfig)) }) when("--run-image", func() { var runAfter string it.Before(func() { runAfter = registryConfig.RepoName("run-after/" + h.RandString(10)) buildRunImage(runAfter, "contents-after-1", "contents-after-2") assert.Succeeds(h.PushImage(dockerCli, runAfter, registryConfig)) }) it.After(func() { imageManager.CleanupImages(runAfter) }) it("uses provided run image", func() { output := pack.RunSuccessfully("rebase", repoName, "--publish", "--run-image", runAfter) assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulRebase(repoName) assertImage.CanBePulledFromRegistry(repoName) assertImage.RunsWithOutput( repoName, "contents-after-1", "contents-after-2", ) }) }) }) }) }) }) } func buildpacksDir(bpAPIVersion string) string { return filepath.Join("testdata", "mock_buildpacks", bpAPIVersion) } func createComplexBuilder(t *testing.T, assert h.AssertionManager, pack *invoke.PackInvoker, lifecycle config.LifecycleAsset, buildpackManager buildpacks.BuildpackManager, runImageMirror string, ) (string, error) { t.Log("creating complex builder image...") // CREATE TEMP WORKING DIR tmpDir, err := ioutil.TempDir("", "create-complex-test-builder") if err != nil { return "", err } defer os.RemoveAll(tmpDir) // ARCHIVE BUILDPACKS builderBuildpacks := []buildpacks.TestBuildpack{ buildpacks.Noop, buildpacks.Noop2, buildpacks.OtherStack, buildpacks.ReadEnv, } templateMapping := map[string]interface{}{ "run_image_mirror": runImageMirror, } packageImageName := registryConfig.RepoName("nested-level-1-buildpack-" + h.RandString(8)) nestedLevelTwoBuildpackName := registryConfig.RepoName("nested-level-2-buildpack-" + h.RandString(8)) simpleLayersBuildpackName := registryConfig.RepoName("simple-layers-buildpack-" + h.RandString(8)) simpleLayersBuildpackDifferentShaName := registryConfig.RepoName("simple-layers-buildpack-different-name-" + h.RandString(8)) templateMapping["package_id"] = "simple/nested-level-1" templateMapping["package_image_name"] = packageImageName templateMapping["nested_level_1_buildpack"] = packageImageName templateMapping["nested_level_2_buildpack"] = nestedLevelTwoBuildpackName templateMapping["simple_layers_buildpack"] = simpleLayersBuildpackName templateMapping["simple_layers_buildpack_different_sha"] = simpleLayersBuildpackDifferentShaName fixtureManager := pack.FixtureManager() nestedLevelOneConfigFile, err := ioutil.TempFile(tmpDir, "nested-level-1-package.toml") assert.Nil(err) fixtureManager.TemplateFixtureToFile( "nested-level-1-buildpack_package.toml", nestedLevelOneConfigFile, templateMapping, ) err = nestedLevelOneConfigFile.Close() assert.Nil(err) nestedLevelTwoConfigFile, err := ioutil.TempFile(tmpDir, "nested-level-2-package.toml") assert.Nil(err) fixtureManager.TemplateFixtureToFile( "nested-level-2-buildpack_package.toml", nestedLevelTwoConfigFile, templateMapping, ) err = nestedLevelTwoConfigFile.Close() assert.Nil(err) packageImageBuildpack := buildpacks.NewPackageImage( t, pack, packageImageName, nestedLevelOneConfigFile.Name(), buildpacks.WithRequiredBuildpacks( buildpacks.NestedLevelOne, buildpacks.NewPackageImage( t, pack, nestedLevelTwoBuildpackName, nestedLevelTwoConfigFile.Name(), buildpacks.WithRequiredBuildpacks( buildpacks.NestedLevelTwo, buildpacks.NewPackageImage( t, pack, simpleLayersBuildpackName, fixtureManager.FixtureLocation("simple-layers-buildpack_package.toml"), buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers), ), ), ), ), ) simpleLayersDifferentShaBuildpack := buildpacks.NewPackageImage( t, pack, simpleLayersBuildpackDifferentShaName, fixtureManager.FixtureLocation("simple-layers-buildpack-different-sha_package.toml"), buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayersDifferentSha), ) defer imageManager.CleanupImages(packageImageName, nestedLevelTwoBuildpackName, simpleLayersBuildpackName, simpleLayersBuildpackDifferentShaName) builderBuildpacks = append( builderBuildpacks, packageImageBuildpack, simpleLayersDifferentShaBuildpack, ) buildpackManager.PrepareBuildpacks(tmpDir, builderBuildpacks...) // ADD lifecycle if lifecycle.HasLocation() { lifecycleURI := lifecycle.EscapedPath() t.Logf("adding lifecycle path '%s' to builder config", lifecycleURI) templateMapping["lifecycle_uri"] = lifecycleURI } else { lifecycleVersion := lifecycle.Version() t.Logf("adding lifecycle version '%s' to builder config", lifecycleVersion) templateMapping["lifecycle_version"] = lifecycleVersion } // RENDER builder.toml builderConfigFile, err := ioutil.TempFile(tmpDir, "nested_builder.toml") if err != nil { return "", err } pack.FixtureManager().TemplateFixtureToFile("nested_builder.toml", builderConfigFile, templateMapping) err = builderConfigFile.Close() if err != nil { return "", err } // NAME BUILDER bldr := registryConfig.RepoName("test/builder-" + h.RandString(10)) // CREATE BUILDER output := pack.RunSuccessfully( "builder", "create", bldr, "-c", builderConfigFile.Name(), "--no-color", ) assert.Contains(output, fmt.Sprintf("Successfully created builder image '%s'", bldr)) assert.Succeeds(h.PushImage(dockerCli, bldr, registryConfig)) return bldr, nil } func createBuilder( t *testing.T, assert h.AssertionManager, pack *invoke.PackInvoker, lifecycle config.LifecycleAsset, buildpackManager buildpacks.BuildpackManager, runImageMirror string, ) (string, error) { t.Log("creating builder image...") // CREATE TEMP WORKING DIR tmpDir, err := ioutil.TempDir("", "create-test-builder") assert.Nil(err) defer os.RemoveAll(tmpDir) templateMapping := map[string]interface{}{ "run_image_mirror": runImageMirror, } // ARCHIVE BUILDPACKS builderBuildpacks := []buildpacks.TestBuildpack{ buildpacks.Noop, buildpacks.Noop2, buildpacks.OtherStack, buildpacks.ReadEnv, } packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, "package.toml", imageManager.HostOS()) packageImageName := registryConfig.RepoName("simple-layers-package-image-buildpack-" + h.RandString(8)) packageImageBuildpack := buildpacks.NewPackageImage( t, pack, packageImageName, packageTomlPath, buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers), ) defer imageManager.CleanupImages(packageImageName) builderBuildpacks = append(builderBuildpacks, packageImageBuildpack) templateMapping["package_image_name"] = packageImageName templateMapping["package_id"] = "simple/layers" buildpackManager.PrepareBuildpacks(tmpDir, builderBuildpacks...) // ADD lifecycle var lifecycleURI string var lifecycleVersion string if lifecycle.HasLocation() { lifecycleURI = lifecycle.EscapedPath() t.Logf("adding lifecycle path '%s' to builder config", lifecycleURI) templateMapping["lifecycle_uri"] = lifecycleURI } else { lifecycleVersion = lifecycle.Version() t.Logf("adding lifecycle version '%s' to builder config", lifecycleVersion) templateMapping["lifecycle_version"] = lifecycleVersion } // RENDER builder.toml configFileName := "builder.toml" builderConfigFile, err := ioutil.TempFile(tmpDir, "builder.toml") assert.Nil(err) pack.FixtureManager().TemplateFixtureToFile( configFileName, builderConfigFile, templateMapping, ) err = builderConfigFile.Close() assert.Nil(err) // NAME BUILDER bldr := registryConfig.RepoName("test/builder-" + h.RandString(10)) // CREATE BUILDER output := pack.RunSuccessfully( "builder", "create", bldr, "-c", builderConfigFile.Name(), "--no-color", ) assert.Contains(output, fmt.Sprintf("Successfully created builder image '%s'", bldr)) assert.Succeeds(h.PushImage(dockerCli, bldr, registryConfig)) return bldr, nil } func generatePackageTomlWithOS( t *testing.T, assert h.AssertionManager, pack *invoke.PackInvoker, tmpDir string, fixtureName string, platform_os string, ) string { t.Helper() packageTomlFile, err := ioutil.TempFile(tmpDir, "package-*.toml") assert.Nil(err) pack.FixtureManager().TemplateFixtureToFile( fixtureName, packageTomlFile, map[string]interface{}{ "OS": platform_os, }, ) assert.Nil(packageTomlFile.Close()) return packageTomlFile.Name() } func createStack(t *testing.T, dockerCli client.CommonAPIClient, runImageMirror string) error { t.Helper() t.Log("creating stack images...") stackBaseDir := filepath.Join("testdata", "mock_stack", imageManager.HostOS()) if err := createStackImage(dockerCli, runImage, filepath.Join(stackBaseDir, "run")); err != nil { return err } if err := createStackImage(dockerCli, buildImage, filepath.Join(stackBaseDir, "build")); err != nil { return err } imageManager.TagImage(runImage, runImageMirror) if err := h.PushImage(dockerCli, runImageMirror, registryConfig); err != nil { return err } return nil } func createStackImage(dockerCli client.CommonAPIClient, repoName string, dir string) error { defaultFilterFunc := func(file string) bool { return true } ctx := context.Background() buildContext := archive.ReadDirAsTar(dir, "/", 0, 0, -1, true, defaultFilterFunc) res, err := dockerCli.ImageBuild(ctx, buildContext, dockertypes.ImageBuildOptions{ Tags: []string{repoName}, Remove: true, ForceRemove: true, }) if err != nil { return err } _, err = io.Copy(ioutil.Discard, res.Body) if err != nil { return err } return res.Body.Close() } // taskKey creates a key from the prefix and all arguments to be unique func taskKey(prefix string, args ...string) string { hash := sha256.New() for _, v := range args { hash.Write([]byte(v)) } return fmt.Sprintf("%s-%s", prefix, hex.EncodeToString(hash.Sum(nil))) } type compareFormat struct { extension string compareFunc func(string, string) outputArg string }
[ "\"DOCKER_HOST\"" ]
[]
[ "DOCKER_HOST" ]
[]
["DOCKER_HOST"]
go
1
0
pkg/cli/common.go
package cli // the cli package contains urfave/cli related structs that help make up // the command line for buildah commands. it resides here so other projects // that vendor in this code can use them too. import ( "fmt" "os" "runtime" "strings" "github.com/containers/buildah/define" "github.com/containers/buildah/pkg/completion" "github.com/containers/buildah/pkg/parse" commonComp "github.com/containers/common/pkg/completion" "github.com/containers/common/pkg/config" "github.com/containers/storage/pkg/unshare" "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/spf13/pflag" ) // LayerResults represents the results of the layer flags type LayerResults struct { ForceRm bool Layers bool } // UserNSResults represents the results for the UserNS flags type UserNSResults struct { UserNS string UserNSUIDMap []string UserNSGIDMap []string UserNSUIDMapUser string UserNSGIDMapGroup string } // NameSpaceResults represents the results for Namespace flags type NameSpaceResults struct { IPC string Network string CNIConfigDir string CNIPlugInPath string PID string UTS string } // BudResults represents the results for Build flags type BudResults struct { AllPlatforms bool Annotation []string Authfile string BuildArg []string CacheFrom string CertDir string Compress bool Creds string DisableCompression bool DisableContentTrust bool IgnoreFile string File []string Format string From string Iidfile string Label []string Logfile string Manifest string NoCache bool Timestamp int64 Pull bool PullAlways bool PullNever bool Quiet bool Rm bool Runtime string RuntimeFlags []string Secrets []string SSH []string SignaturePolicy string SignBy string Squash bool Stdin bool Tag []string Target string TLSVerify bool Jobs int LogRusage bool RusageLogFile string UnsetEnvs []string } // FromAndBugResults represents the results for common flags // in build and from type FromAndBudResults struct { AddHost []string BlobCache string CapAdd []string CapDrop []string CgroupParent string CPUPeriod uint64 CPUQuota int64 CPUSetCPUs string CPUSetMems string CPUShares uint64 DecryptionKeys []string Devices []string DNSSearch []string DNSServers []string DNSOptions []string HTTPProxy bool Isolation string Memory string MemorySwap string SecurityOpt []string ShmSize string Ulimit []string Volumes []string } // GetUserNSFlags returns the common flags for usernamespace func GetUserNSFlags(flags *UserNSResults) pflag.FlagSet { usernsFlags := pflag.FlagSet{} usernsFlags.StringVar(&flags.UserNS, "userns", "", "'container', `path` of user namespace to join, or 'host'") usernsFlags.StringSliceVar(&flags.UserNSUIDMap, "userns-uid-map", []string{}, "`containerUID:hostUID:length` UID mapping to use in user namespace") usernsFlags.StringSliceVar(&flags.UserNSGIDMap, "userns-gid-map", []string{}, "`containerGID:hostGID:length` GID mapping to use in user namespace") usernsFlags.StringVar(&flags.UserNSUIDMapUser, "userns-uid-map-user", "", "`name` of entries from /etc/subuid to use to set user namespace UID mapping") usernsFlags.StringVar(&flags.UserNSGIDMapGroup, "userns-gid-map-group", "", "`name` of entries from /etc/subgid to use to set user namespace GID mapping") return usernsFlags } // GetUserNSFlagsCompletions returns the FlagCompletions for the userns flags func GetUserNSFlagsCompletions() commonComp.FlagCompletions { flagCompletion := commonComp.FlagCompletions{} flagCompletion["userns"] = completion.AutocompleteNamespaceFlag flagCompletion["userns-uid-map"] = commonComp.AutocompleteNone flagCompletion["userns-gid-map"] = commonComp.AutocompleteNone flagCompletion["userns-uid-map-user"] = commonComp.AutocompleteSubuidName flagCompletion["userns-gid-map-group"] = commonComp.AutocompleteSubgidName return flagCompletion } // GetNameSpaceFlags returns the common flags for a namespace menu func GetNameSpaceFlags(flags *NameSpaceResults) pflag.FlagSet { fs := pflag.FlagSet{} fs.StringVar(&flags.IPC, string(specs.IPCNamespace), "", "'private', `path` of IPC namespace to join, or 'host'") fs.StringVar(&flags.Network, string(specs.NetworkNamespace), "", "'private', 'none', 'ns:path' of network namespace to join, or 'host'") fs.StringVar(&flags.CNIConfigDir, "cni-config-dir", define.DefaultCNIConfigDir, "`directory` of CNI configuration files") fs.StringVar(&flags.CNIPlugInPath, "cni-plugin-path", define.DefaultCNIPluginPath, "`path` of CNI network plugins") fs.StringVar(&flags.PID, string(specs.PIDNamespace), "", "private, `path` of PID namespace to join, or 'host'") fs.StringVar(&flags.UTS, string(specs.UTSNamespace), "", "private, :`path` of UTS namespace to join, or 'host'") return fs } // GetNameSpaceFlagsCompletions returns the FlagCompletions for the namespace flags func GetNameSpaceFlagsCompletions() commonComp.FlagCompletions { flagCompletion := commonComp.FlagCompletions{} flagCompletion[string(specs.IPCNamespace)] = completion.AutocompleteNamespaceFlag flagCompletion[string(specs.NetworkNamespace)] = completion.AutocompleteNamespaceFlag flagCompletion["cni-config-dir"] = commonComp.AutocompleteDefault flagCompletion["cni-plugin-path"] = commonComp.AutocompleteDefault flagCompletion[string(specs.PIDNamespace)] = completion.AutocompleteNamespaceFlag flagCompletion[string(specs.UTSNamespace)] = completion.AutocompleteNamespaceFlag return flagCompletion } // GetLayerFlags returns the common flags for layers func GetLayerFlags(flags *LayerResults) pflag.FlagSet { fs := pflag.FlagSet{} fs.BoolVar(&flags.ForceRm, "force-rm", false, "Always remove intermediate containers after a build, even if the build is unsuccessful.") fs.BoolVar(&flags.Layers, "layers", UseLayers(), fmt.Sprintf("cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override.")) return fs } // Note: GetLayerFlagsCompletion is not needed since GetLayerFlags only contains bool flags // GetBudFlags returns common build flags func GetBudFlags(flags *BudResults) pflag.FlagSet { fs := pflag.FlagSet{} fs.BoolVar(&flags.AllPlatforms, "all-platforms", false, "attempt to build for all base image platforms") fs.String("arch", runtime.GOARCH, "set the ARCH of the image to the provided value instead of the architecture of the host") fs.StringArrayVar(&flags.Annotation, "annotation", []string{}, "Set metadata for an image (default [])") fs.StringVar(&flags.Authfile, "authfile", "", "path of the authentication file.") fs.StringArrayVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder") fs.StringVar(&flags.CacheFrom, "cache-from", "", "Images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.") fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry") fs.BoolVar(&flags.Compress, "compress", false, "This is legacy option, which has no effect on the image") fs.StringVar(&flags.Creds, "creds", "", "use `[username[:password]]` for accessing the registry") fs.BoolVarP(&flags.DisableCompression, "disable-compression", "D", true, "don't compress layers by default") fs.BoolVar(&flags.DisableContentTrust, "disable-content-trust", false, "This is a Docker specific option and is a NOOP") fs.StringVar(&flags.From, "from", "", "image name used to replace the value in the first FROM instruction in the Containerfile") fs.StringVar(&flags.IgnoreFile, "ignorefile", "", "path to an alternate .dockerignore file") fs.StringSliceVarP(&flags.File, "file", "f", []string{}, "`pathname or URL` of a Dockerfile") fs.StringVar(&flags.Format, "format", DefaultFormat(), "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.") fs.StringVar(&flags.Iidfile, "iidfile", "", "`file` to write the image ID to") fs.IntVar(&flags.Jobs, "jobs", 1, "how many stages to run in parallel") fs.StringArrayVar(&flags.Label, "label", []string{}, "Set metadata for an image (default [])") fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr") fs.Int("loglevel", 0, "NO LONGER USED, flag ignored, and hidden") if err := fs.MarkHidden("loglevel"); err != nil { panic(fmt.Sprintf("error marking the loglevel flag as hidden: %v", err)) } fs.BoolVar(&flags.LogRusage, "log-rusage", false, "log resource usage at each build step") if err := fs.MarkHidden("log-rusage"); err != nil { panic(fmt.Sprintf("error marking the log-rusage flag as hidden: %v", err)) } fs.StringVar(&flags.RusageLogFile, "rusage-logfile", "", "destination file to which rusage should be logged to instead of stdout (= the default).") if err := fs.MarkHidden("rusage-logfile"); err != nil { panic(fmt.Sprintf("error marking the rusage-logfile flag as hidden: %v", err)) } fs.StringVar(&flags.Manifest, "manifest", "", "add the image to the specified manifest list. Creates manifest list if it does not exist") fs.BoolVar(&flags.NoCache, "no-cache", false, "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.") fs.String("os", runtime.GOOS, "set the OS to the provided value instead of the current operating system of the host") fs.BoolVar(&flags.Pull, "pull", true, "pull the image from the registry if newer or not present in store, if false, only pull the image if not present") fs.BoolVar(&flags.PullAlways, "pull-always", false, "pull the image even if the named image is present in store") fs.BoolVar(&flags.PullNever, "pull-never", false, "do not pull the image, use the image present in store if available") fs.BoolVarP(&flags.Quiet, "quiet", "q", false, "refrain from announcing build instructions and image read/write progress") fs.BoolVar(&flags.Rm, "rm", true, "Remove intermediate containers after a successful build") // "runtime" definition moved to avoid name collision in podman build. Defined in cmd/buildah/build.go. fs.StringSliceVar(&flags.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime") fs.StringArrayVar(&flags.Secrets, "secret", []string{}, "secret file to expose to the build") fs.StringVar(&flags.SignBy, "sign-by", "", "sign the image using a GPG key with the specified `FINGERPRINT`") fs.StringVar(&flags.SignaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)") if err := fs.MarkHidden("signature-policy"); err != nil { panic(fmt.Sprintf("error marking the signature-policy flag as hidden: %v", err)) } fs.BoolVar(&flags.Squash, "squash", false, "squash newly built layers into a single new layer") fs.StringArrayVar(&flags.SSH, "ssh", []string{}, "SSH agent socket or keys to expose to the build. (format: default|<id>[=<socket>|<key>[,<key>]])") fs.BoolVar(&flags.Stdin, "stdin", false, "pass stdin into containers") fs.StringArrayVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image") fs.StringVar(&flags.Target, "target", "", "set the target build stage to build") fs.Int64Var(&flags.Timestamp, "timestamp", 0, "set created timestamp to the specified epoch seconds to allow for deterministic builds, defaults to current time") fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry") fs.String("variant", "", "override the `variant` of the specified image") fs.StringSliceVar(&flags.UnsetEnvs, "unsetenv", nil, "Unset environment variable from final image") return fs } // GetBudFlagsCompletions returns the FlagCompletions for the common build flags func GetBudFlagsCompletions() commonComp.FlagCompletions { flagCompletion := commonComp.FlagCompletions{} flagCompletion["arch"] = commonComp.AutocompleteNone flagCompletion["annotation"] = commonComp.AutocompleteNone flagCompletion["authfile"] = commonComp.AutocompleteDefault flagCompletion["build-arg"] = commonComp.AutocompleteNone flagCompletion["cache-from"] = commonComp.AutocompleteNone flagCompletion["cert-dir"] = commonComp.AutocompleteDefault flagCompletion["creds"] = commonComp.AutocompleteNone flagCompletion["file"] = commonComp.AutocompleteDefault flagCompletion["from"] = commonComp.AutocompleteDefault flagCompletion["format"] = commonComp.AutocompleteNone flagCompletion["ignorefile"] = commonComp.AutocompleteDefault flagCompletion["iidfile"] = commonComp.AutocompleteDefault flagCompletion["jobs"] = commonComp.AutocompleteNone flagCompletion["label"] = commonComp.AutocompleteNone flagCompletion["logfile"] = commonComp.AutocompleteDefault flagCompletion["manifest"] = commonComp.AutocompleteDefault flagCompletion["os"] = commonComp.AutocompleteNone flagCompletion["runtime-flag"] = commonComp.AutocompleteNone flagCompletion["secret"] = commonComp.AutocompleteNone flagCompletion["ssh"] = commonComp.AutocompleteNone flagCompletion["sign-by"] = commonComp.AutocompleteNone flagCompletion["signature-policy"] = commonComp.AutocompleteNone flagCompletion["tag"] = commonComp.AutocompleteNone flagCompletion["target"] = commonComp.AutocompleteNone flagCompletion["timestamp"] = commonComp.AutocompleteNone flagCompletion["variant"] = commonComp.AutocompleteNone flagCompletion["unsetenv"] = commonComp.AutocompleteNone return flagCompletion } // GetFromAndBudFlags returns from and build flags func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults, namespaceResults *NameSpaceResults) (pflag.FlagSet, error) { fs := pflag.FlagSet{} defaultContainerConfig, err := config.Default() if err != nil { return fs, errors.Wrapf(err, "failed to get container config") } fs.StringSliceVar(&flags.AddHost, "add-host", []string{}, "add a custom host-to-IP mapping (`host:ip`) (default [])") fs.StringVar(&flags.BlobCache, "blob-cache", "", "assume image blobs in the specified directory will be available for pushing") if err := fs.MarkHidden("blob-cache"); err != nil { panic(fmt.Sprintf("error marking net flag as hidden: %v", err)) } fs.StringSliceVar(&flags.CapAdd, "cap-add", []string{}, "add the specified capability when running (default [])") fs.StringSliceVar(&flags.CapDrop, "cap-drop", []string{}, "drop the specified capability when running (default [])") fs.StringVar(&flags.CgroupParent, "cgroup-parent", "", "optional parent cgroup for the container") fs.Uint64Var(&flags.CPUPeriod, "cpu-period", 0, "limit the CPU CFS (Completely Fair Scheduler) period") fs.Int64Var(&flags.CPUQuota, "cpu-quota", 0, "limit the CPU CFS (Completely Fair Scheduler) quota") fs.Uint64VarP(&flags.CPUShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") fs.StringVar(&flags.CPUSetCPUs, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") fs.StringVar(&flags.CPUSetMems, "cpuset-mems", "", "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.") fs.StringSliceVar(&flags.DecryptionKeys, "decryption-key", nil, "key needed to decrypt the image") fs.StringArrayVar(&flags.Devices, "device", defaultContainerConfig.Containers.Devices, "Additional devices to be used within containers (default [])") fs.StringSliceVar(&flags.DNSSearch, "dns-search", defaultContainerConfig.Containers.DNSSearches, "Set custom DNS search domains") fs.StringSliceVar(&flags.DNSServers, "dns", defaultContainerConfig.Containers.DNSServers, "Set custom DNS servers or disable it completely by setting it to 'none', which prevents the automatic creation of `/etc/resolv.conf`.") fs.StringSliceVar(&flags.DNSOptions, "dns-option", defaultContainerConfig.Containers.DNSOptions, "Set custom DNS options") fs.BoolVar(&flags.HTTPProxy, "http-proxy", true, "pass through HTTP Proxy environment variables") fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.") fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: <number>[<unit>], where unit = b, k, m or g)") fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap") fs.String("arch", runtime.GOARCH, "set the ARCH of the image to the provided value instead of the architecture of the host") fs.String("os", runtime.GOOS, "prefer `OS` instead of the running OS when pulling images") fs.StringSlice("platform", []string{parse.DefaultPlatform()}, "set the OS/ARCH/VARIANT of the image to the provided value instead of the current operating system and architecture of the host (for example `linux/arm`)") fs.String("variant", "", "override the `variant` of the specified image") fs.StringArrayVar(&flags.SecurityOpt, "security-opt", []string{}, "security options (default [])") fs.StringVar(&flags.ShmSize, "shm-size", defaultContainerConfig.Containers.ShmSize, "size of '/dev/shm'. The format is `<number><unit>`.") fs.StringSliceVar(&flags.Ulimit, "ulimit", defaultContainerConfig.Containers.DefaultUlimits, "ulimit options") fs.StringArrayVarP(&flags.Volumes, "volume", "v", defaultContainerConfig.Containers.Volumes, "bind mount a volume into the container") // Add in the usernamespace and namespaceflags usernsFlags := GetUserNSFlags(usernsResults) namespaceFlags := GetNameSpaceFlags(namespaceResults) fs.AddFlagSet(&usernsFlags) fs.AddFlagSet(&namespaceFlags) return fs, nil } // GetFromAndBudFlagsCompletions returns the FlagCompletions for the from and build flags func GetFromAndBudFlagsCompletions() commonComp.FlagCompletions { flagCompletion := commonComp.FlagCompletions{} flagCompletion["arch"] = commonComp.AutocompleteNone flagCompletion["add-host"] = commonComp.AutocompleteNone flagCompletion["blob-cache"] = commonComp.AutocompleteNone flagCompletion["cap-add"] = commonComp.AutocompleteCapabilities flagCompletion["cap-drop"] = commonComp.AutocompleteCapabilities flagCompletion["cgroup-parent"] = commonComp.AutocompleteDefault // FIXME: This would be a path right?! flagCompletion["cpu-period"] = commonComp.AutocompleteNone flagCompletion["cpu-quota"] = commonComp.AutocompleteNone flagCompletion["cpu-shares"] = commonComp.AutocompleteNone flagCompletion["cpuset-cpus"] = commonComp.AutocompleteNone flagCompletion["cpuset-mems"] = commonComp.AutocompleteNone flagCompletion["decryption-key"] = commonComp.AutocompleteNone flagCompletion["device"] = commonComp.AutocompleteDefault flagCompletion["dns-search"] = commonComp.AutocompleteNone flagCompletion["dns"] = commonComp.AutocompleteNone flagCompletion["dns-option"] = commonComp.AutocompleteNone flagCompletion["isolation"] = commonComp.AutocompleteNone flagCompletion["memory"] = commonComp.AutocompleteNone flagCompletion["memory-swap"] = commonComp.AutocompleteNone flagCompletion["os"] = commonComp.AutocompleteNone flagCompletion["platform"] = commonComp.AutocompleteNone flagCompletion["security-opt"] = commonComp.AutocompleteNone flagCompletion["shm-size"] = commonComp.AutocompleteNone flagCompletion["ulimit"] = commonComp.AutocompleteNone flagCompletion["volume"] = commonComp.AutocompleteDefault flagCompletion["variant"] = commonComp.AutocompleteNone // Add in the usernamespace and namespace flag completions userNsComp := GetUserNSFlagsCompletions() for name, comp := range userNsComp { flagCompletion[name] = comp } namespaceComp := GetNameSpaceFlagsCompletions() for name, comp := range namespaceComp { flagCompletion[name] = comp } return flagCompletion } // UseLayers returns true if BUILDAH_LAYERS is set to "1" or "true" // otherwise it returns false func UseLayers() bool { layers := os.Getenv("BUILDAH_LAYERS") if strings.ToLower(layers) == "true" || layers == "1" { return true } return false } // DefaultFormat returns the default image format func DefaultFormat() string { format := os.Getenv("BUILDAH_FORMAT") if format != "" { return format } return define.OCI } // DefaultIsolation returns the default image format func DefaultIsolation() string { isolation := os.Getenv("BUILDAH_ISOLATION") if isolation != "" { return isolation } if unshare.IsRootless() { return "rootless" } return define.OCI } // DefaultHistory returns the default add-history setting func DefaultHistory() bool { history := os.Getenv("BUILDAH_HISTORY") if strings.ToLower(history) == "true" || history == "1" { return true } return false } func VerifyFlagsArgsOrder(args []string) error { for _, arg := range args { if strings.HasPrefix(arg, "-") { return errors.Errorf("No options (%s) can be specified after the image or container name", arg) } } return nil } // aliasFlags is a function to handle backwards compatibility with old flags func AliasFlags(f *pflag.FlagSet, name string) pflag.NormalizedName { switch name { case "net": name = "network" case "override-arch": name = "arch" case "override-os": name = "os" case "purge": name = "rm" case "tty": name = "terminal" } return pflag.NormalizedName(name) }
[ "\"BUILDAH_LAYERS\"", "\"BUILDAH_FORMAT\"", "\"BUILDAH_ISOLATION\"", "\"BUILDAH_HISTORY\"" ]
[]
[ "BUILDAH_ISOLATION", "BUILDAH_HISTORY", "BUILDAH_FORMAT", "BUILDAH_LAYERS" ]
[]
["BUILDAH_ISOLATION", "BUILDAH_HISTORY", "BUILDAH_FORMAT", "BUILDAH_LAYERS"]
go
4
0
env_other.go
// +build !linux,!windows,!darwin package config // environment for unixy system that are not linux and not darwin, like the BSD family import ( "os" "path/filepath" "strings" ) func setUserDir() { home := os.Getenv("HOME") if home == "" { home = filepath.Join("/home", os.Getenv("USER")) } USER_DIR = filepath.Join(home + ".config") } func setGlobalDir() { GLOBAL_DIRS = "/usr/local/etc" } func setWorkingDir() { wd, err := os.Getwd() if err != nil { wd = "." } WORKING_DIR = wd } func splitGlobals() []string { return strings.Split(GLOBAL_DIRS, ":") } func init() { setUserDir() setGlobalDir() setWorkingDir() }
[ "\"HOME\"", "\"USER\"" ]
[]
[ "USER", "HOME" ]
[]
["USER", "HOME"]
go
2
0
bundler.go
package astibundler import ( "context" "fmt" "go/build" "io/ioutil" "os" "os/exec" "os/signal" "path/filepath" "strings" "syscall" "time" "github.com/Nebulabots/go-astilectron" "github.com/akavel/rsrc/rsrc" "github.com/asticode/go-astikit" "github.com/asticode/go-bindata" "github.com/sam-kamerer/go-plister" ) // Configuration represents the bundle configuration type Configuration struct { // The app name as it should be displayed everywhere // It's also set as an ldflag and therefore accessible in a global var package_name.AppName AppName string `json:"app_name"` // The bind configuration Bind ConfigurationBind `json:"bind"` // Whether the app is a darwin agent app DarwinAgentApp bool `json:"darwin_agent_app"` // List of environments the bundling should be done upon. // An environment is a combination of OS and ARCH Environments []ConfigurationEnvironment `json:"environments"` // The path of the go binary // Defaults to "go" GoBinaryPath string `json:"go_binary_path"` // Paths to icons IconPathDarwin string `json:"icon_path_darwin"` // .icns IconPathLinux string `json:"icon_path_linux"` IconPathWindows string `json:"icon_path_windows"` // .ico // Info.plist property list InfoPlist map[string]interface{} `json:"info_plist"` // The path of the project. // Defaults to the current directory InputPath string `json:"input_path"` // Build flags to pass into go build BuildFlags map[string]string `json:"build_flags"` // LDFlags to pass through to go build LDFlags LDFlags `json:"ldflags"` // The path used for the LD Flags // Defaults to the `Bind.Package` value LDFlagsPackage string `json:"ldflags_package"` // The path to application manifest file (WINDOWS ONLY) ManifestPath string `json:"manifest_path"` // The path where the files will be written // Defaults to "output" OutputPath string `json:"output_path"` // List of commands executed on resources // Paths inside commands must be relative to the resources folder ResourcesAdapters []ConfigurationResourcesAdapter `json:"resources_adapters"` // The path where the resources are/will be created // This path must be relative to the input path // Defaults to "resources" ResourcesPath string `json:"resources_path"` // Show Windows console ShowWindowsConsole bool `json:"show_windows_console"` // The path where the vendor directory will be created // This path must be relative to the output path // Defaults to a temp directory VendorDirPath string `json:"vendor_dir_path"` // Version of Astilectron install VersionAstilectron string `json:"version_astilectron"` // Version of Electron install VersionElectron string `json:"version_electron"` // The path to the working directory. // Defaults to a temp directory WorkingDirectoryPath string `json:"working_directory_path"` //!\\ DEBUG ONLY AstilectronPath string `json:"astilectron_path"` // when making changes to astilectron } type ConfigurationBind struct { // The path where the file will be written // Defaults to the input path OutputPath string `json:"output_path"` // The package of the generated file // Defaults to "main" Package string `json:"package"` } // ConfigurationEnvironment represents the bundle configuration environment type ConfigurationEnvironment struct { Arch string `json:"arch"` EnvironmentVariables map[string]string `json:"env"` OS string `json:"os"` } type ConfigurationResourcesAdapter struct { Args []string `json:"args"` Dir string `json:"dir"` Name string `json:"name"` } // Bundler represents an object capable of bundling an Astilectron app type Bundler struct { appName string bindPackage string buildFlags map[string]string cancel context.CancelFunc ctx context.Context d *astikit.HTTPDownloader darwinAgentApp bool environments []ConfigurationEnvironment infoPlist map[string]interface{} l astikit.SeverityLogger ldflags LDFlags ldflagsPackage string pathAstilectron string pathBindInput string pathBindOutput string pathBuild string pathCache string pathIconDarwin string pathIconLinux string pathIconWindows string pathInput string pathGoBinary string pathOutput string pathResources string pathVendor string pathWorkingDirectory string pathManifest string resourcesAdapters []ConfigurationResourcesAdapter showWindowsConsole bool versionAstilectron string versionElectron string } // absPath computes the absolute path func absPath(configPath string, defaultPathFn func() (string, error)) (o string, err error) { if len(configPath) > 0 { if o, err = filepath.Abs(configPath); err != nil { err = fmt.Errorf("filepath.Abs of %s failed: %w", configPath, err) return } } else if defaultPathFn != nil { if o, err = defaultPathFn(); err != nil { err = fmt.Errorf("default path function to compute absPath of %s failed: %w", configPath, err) return } } return } // New builds a new bundler based on a configuration func New(c *Configuration, l astikit.StdLogger) (b *Bundler, err error) { // Init b = &Bundler{ appName: c.AppName, bindPackage: c.Bind.Package, d: astikit.NewHTTPDownloader(astikit.HTTPDownloaderOptions{ Sender: astikit.HTTPSenderOptions{ Logger: l, }, }), environments: c.Environments, darwinAgentApp: c.DarwinAgentApp, resourcesAdapters: c.ResourcesAdapters, l: astikit.AdaptStdLogger(l), ldflags: c.LDFlags, ldflagsPackage: c.LDFlagsPackage, infoPlist: c.InfoPlist, showWindowsConsole: c.ShowWindowsConsole, versionAstilectron: astilectron.DefaultVersionAstilectron, versionElectron: astilectron.DefaultVersionElectron, } // Ldflags if b.ldflags == nil { b.ldflags = make(LDFlags) } if c.VersionAstilectron != "" { b.versionAstilectron = c.VersionAstilectron } if c.VersionElectron != "" { b.versionElectron = c.VersionElectron } if len(c.BuildFlags) > 0 { b.buildFlags = c.BuildFlags } // Add context b.ctx, b.cancel = context.WithCancel(context.Background()) // Loop through environments for _, env := range b.environments { // Validate OS if !astilectron.IsValidOS(env.OS) { err = fmt.Errorf("OS %s is invalid", env.OS) return } } // Astilectron path if b.pathAstilectron, err = absPath(c.AstilectronPath, nil); err != nil { return } // Working directory path if b.pathWorkingDirectory, err = absPath(c.WorkingDirectoryPath, func() (string, error) { return filepath.Join(os.TempDir(), "astibundler"), nil }); err != nil { return } // Paths that depend on the working directory path b.pathBindInput = filepath.Join(b.pathWorkingDirectory, "bind") b.pathCache = filepath.Join(b.pathWorkingDirectory, "cache") // Darwin icon path if b.pathIconDarwin, err = absPath(c.IconPathDarwin, nil); err != nil { return } // Linux icon path if b.pathIconLinux, err = absPath(c.IconPathLinux, nil); err != nil { return } // Windows icon path if b.pathIconWindows, err = absPath(c.IconPathWindows, nil); err != nil { return } // Windows application manifest path if b.pathManifest, err = absPath(c.ManifestPath, nil); err != nil { return } // Input path if b.pathInput, err = absPath(c.InputPath, os.Getwd); err != nil { return } // Paths that depends on the input path for _, i := range filepath.SplitList(os.Getenv("GOPATH")) { var p = filepath.Join(i, "src") if strings.HasPrefix(b.pathInput, p) { b.pathBuild = strings.TrimPrefix(strings.TrimPrefix(b.pathInput, p), string(os.PathSeparator)) break } } // Bind output path if b.pathBindOutput, err = absPath(c.Bind.OutputPath, func() (string, error) { return b.pathInput, nil }); err != nil { return } // If build path is empty, ldflags are not set properly if len(b.pathBuild) == 0 { b.pathBuild = "." } // Resources path if b.pathResources = c.ResourcesPath; len(b.pathResources) == 0 { b.pathResources = "resources" } // Vendor path if b.pathVendor = c.VendorDirPath; len(b.pathVendor) == 0 { b.pathVendor = vendorDirectoryName } b.pathVendor = filepath.Join(b.pathBindInput, b.pathVendor) // Go binary path b.pathGoBinary = "go" if len(c.GoBinaryPath) > 0 { b.pathGoBinary = c.GoBinaryPath } // Output path if b.pathOutput, err = absPath(c.OutputPath, func() (string, error) { p, err := os.Getwd() if err != nil { return "", err } p = filepath.Join(p, "output") return p, err }); err != nil { return } // Bind package if len(b.bindPackage) == 0 { b.bindPackage = "main" } // Ldflags package if len(b.ldflagsPackage) == 0 { b.ldflagsPackage = b.bindPackage } return } // HandleSignals handles signals func (b *Bundler) HandleSignals() { ch := make(chan os.Signal, 1) signal.Notify(ch, syscall.SIGABRT, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM) go func() { for s := range ch { b.l.Infof("Received signal %s", s) b.Stop() return } }() } // Stop stops the bundler func (b *Bundler) Stop() { b.cancel() } // ClearCache clears the bundler cache func (b *Bundler) ClearCache() (err error) { // Remove cache folder b.l.Debugf("Removing %s", b.pathCache) if err = os.RemoveAll(b.pathCache); err != nil { err = fmt.Errorf("removing %s failed: %w", b.pathCache, err) return } return } // Bundle bundles an astilectron app based on a configuration func (b *Bundler) Bundle() (err error) { // Create the output folder if err = os.MkdirAll(b.pathOutput, 0755); err != nil { err = fmt.Errorf("mkdirall %s failed: %w", b.pathOutput, err) return } // Loop through environments for _, e := range b.environments { b.l.Debugf("Bundling for environment %s/%s", e.OS, e.Arch) if err = b.bundle(e); err != nil { err = fmt.Errorf("bundling for environment %s/%s failed: %w", e.OS, e.Arch, err) return } } return } // bundle bundles an os func (b *Bundler) bundle(e ConfigurationEnvironment) (err error) { // Bind data b.l.Debug("Binding data") if err = b.BindData(e.OS, e.Arch); err != nil { err = fmt.Errorf("binding data failed: %w", err) return } // Add windows .syso if e.OS == "windows" { if err = b.addWindowsSyso(e.Arch); err != nil { err = fmt.Errorf("adding windows .syso failed: %w", err) return } } // Reset output dir var environmentPath = filepath.Join(b.pathOutput, e.OS+"-"+e.Arch) if err = b.resetDir(environmentPath); err != nil { err = fmt.Errorf("resetting dir %s failed: %w", environmentPath, err) return } std := LDFlags{ "X": []string{ b.ldflagsPackage + `.AppName=` + b.appName, b.ldflagsPackage + `.BuiltAt=` + time.Now().String(), b.ldflagsPackage + `.VersionAstilectron=` + b.versionAstilectron, b.ldflagsPackage + `.VersionElectron=` + b.versionElectron, }, } if e.OS == "windows" && !b.showWindowsConsole { std["H"] = []string{"windowsgui"} } std.Merge(b.ldflags) // Get gopath gp := os.Getenv("GOPATH") if len(gp) == 0 { gp = build.Default.GOPATH } args := []string{"build", "-ldflags", std.String()} var flag string for k, v := range b.buildFlags { if hasDash := strings.HasPrefix(k, "-"); hasDash { flag = k } else { flag = "-" + k } if v != "" { args = append(args, flag, v) } else { args = append(args, flag) } } var binaryPath = filepath.Join(environmentPath, "binary") args = append(args, "-o", binaryPath, b.pathBuild) // Build cmd b.l.Debugf("Building for os %s and arch %s astilectron: %s electron: %s", e.OS, e.Arch, b.versionAstilectron, b.versionElectron) var cmd = exec.Command(b.pathGoBinary, args...) cmd.Env = os.Environ() cmd.Env = append(cmd.Env, "GOARCH="+e.Arch, "GOOS="+e.OS, "GOPATH="+gp, ) if e.EnvironmentVariables != nil { for k, v := range e.EnvironmentVariables { cmd.Env = append(cmd.Env, k+"="+v) } } // Exec var o []byte b.l.Debugf("Executing %s", strings.Join(cmd.Args, " ")) if o, err = cmd.CombinedOutput(); err != nil { err = fmt.Errorf("building failed: %s", o) return } // Finish bundle based on OS switch e.OS { case "darwin": err = b.finishDarwin(environmentPath, binaryPath) case "linux": err = b.finishLinux(environmentPath, binaryPath) case "windows": err = b.finishWindows(environmentPath, binaryPath) default: err = fmt.Errorf("OS %s is not yet implemented", e.OS) } return } func (b *Bundler) resetDir(p string) (err error) { // Remove b.l.Debugf("Removing %s", p) if err = os.RemoveAll(p); err != nil { err = fmt.Errorf("removing %s failed: %w", p, err) return } // Mkdir b.l.Debugf("Creating %s", p) if err = os.MkdirAll(p, 0755); err != nil { err = fmt.Errorf("mkdirall %s failed: %w", p, err) return } return } // BindData binds the data func (b *Bundler) BindData(os, arch string) (err error) { // Reset bind dir if err = b.resetDir(b.pathBindInput); err != nil { err = fmt.Errorf("resetting dir %s failed: %w", b.pathBindInput, err) return } // Provision the vendor if err = b.provisionVendor(os, arch); err != nil { err = fmt.Errorf("provisioning the vendor failed: %w", err) return } // Adapt resources if err = b.adaptResources(); err != nil { err = fmt.Errorf("adapting resources failed: %w", err) return } // Build bindata config var c = bindata.NewConfig() c.Input = []bindata.InputConfig{{Path: b.pathBindInput, Recursive: true}} c.Output = filepath.Join(b.pathBindOutput, fmt.Sprintf("bind_%s_%s.go", os, arch)) c.Package = b.bindPackage c.Prefix = b.pathBindInput c.Tags = fmt.Sprintf("%s,%s", os, arch) // Bind data b.l.Debugf("Generating %s", c.Output) err = bindata.Translate(c) return } // provisionVendor provisions the vendor folder func (b *Bundler) provisionVendor(oS, arch string) (err error) { // Create the vendor folder b.l.Debugf("Creating %s", b.pathVendor) if err = os.MkdirAll(b.pathVendor, 0755); err != nil { err = fmt.Errorf("mkdirall %s failed: %w", b.pathVendor, err) return } // Create the cache folder b.l.Debugf("Creating %s", b.pathCache) if err = os.MkdirAll(b.pathCache, 0755); err != nil { err = fmt.Errorf("mkdirall %s failed: %w", b.pathCache, err) return } // Provision astilectron if err = b.provisionVendorAstilectron(); err != nil { err = fmt.Errorf("provisioning astilectron vendor failed: %w", err) return } // Provision electron if err = b.provisionVendorElectron(oS, arch); err != nil { err = fmt.Errorf("provisioning electron vendor for OS %s and arch %s failed: %w", oS, arch, err) return } return } // provisionVendorZip provisions a vendor zip file func (b *Bundler) provisionVendorZip(pathDownload, pathCache, pathVendor string) (err error) { // Download source if _, errStat := os.Stat(pathCache); os.IsNotExist(errStat) { if err = astilectron.Download(b.ctx, b.l, b.d, pathDownload, pathCache); err != nil { err = fmt.Errorf("downloading %s into %s failed: %w", pathDownload, pathCache, err) return } } else { b.l.Debugf("%s already exists, skipping download of %s", pathCache, pathDownload) } // Check context error if b.ctx.Err() != nil { return b.ctx.Err() } // Copy b.l.Debugf("Copying %s to %s", pathCache, pathVendor) if err = astikit.CopyFile(b.ctx, pathVendor, pathCache, astikit.LocalCopyFileFunc); err != nil { err = fmt.Errorf("copying %s to %s failed: %w", pathCache, pathVendor, err) return } // Check context error if b.ctx.Err() != nil { return b.ctx.Err() } return } // provisionVendorAstilectron provisions the astilectron vendor zip file func (b *Bundler) provisionVendorAstilectron() (err error) { var p = filepath.Join(b.pathCache, fmt.Sprintf("astilectron-%s.zip", b.versionAstilectron)) if len(b.pathAstilectron) > 0 { // Zip b.l.Debugf("Zipping %s into %s", b.pathAstilectron, p) if err = astikit.Zip(b.ctx, p+"/"+fmt.Sprintf("astilectron-%s", b.versionAstilectron), b.pathAstilectron); err != nil { err = fmt.Errorf("zipping %s into %s failed: %w", b.pathAstilectron, p, err) return } // Check context error if b.ctx.Err() != nil { return b.ctx.Err() } } return b.provisionVendorZip(astilectron.AstilectronDownloadSrc(b.versionAstilectron), p, filepath.Join(b.pathVendor, zipNameAstilectron)) } // provisionVendorElectron provisions the electron vendor zip file func (b *Bundler) provisionVendorElectron(oS, arch string) error { return b.provisionVendorZip( astilectron.ElectronDownloadSrc(oS, arch, b.versionElectron), filepath.Join(b.pathCache, fmt.Sprintf("electron-%s-%s-%s.zip", oS, arch, b.versionElectron)), filepath.Join(b.pathVendor, zipNameElectron)) } func (b *Bundler) adaptResources() (err error) { // Create dir var o = filepath.Join(b.pathBindInput, b.pathResources) b.l.Debugf("Creating %s", o) if err = os.MkdirAll(o, 0755); err != nil { err = fmt.Errorf("mkdirall %s failed: %w", o, err) return } // Copy resources var i = filepath.Join(b.pathInput, b.pathResources) b.l.Debugf("Copying %s to %s", i, o) if err = astikit.CopyFile(b.ctx, o, i, astikit.LocalCopyFileFunc); err != nil { err = fmt.Errorf("copying %s to %s failed: %w", i, o, err) return } // Nothing to do if len(b.resourcesAdapters) == 0 { return } // Loop through adapters for _, a := range b.resourcesAdapters { // Create cmd cmd := exec.CommandContext(b.ctx, a.Name, a.Args...) cmd.Dir = o if a.Dir != "" { cmd.Dir = filepath.Join(o, a.Dir) } // Run b.l.Debugf("Running %s in directory %s", strings.Join(cmd.Args, " "), cmd.Dir) var b []byte if b, err = cmd.CombinedOutput(); err != nil { err = fmt.Errorf("running %s failed with output %s", strings.Join(cmd.Args, " "), b) return } } return } // addWindowsSyso adds the proper windows .syso if needed func (b *Bundler) addWindowsSyso(arch string) (err error) { if len(b.pathIconWindows) > 0 || len(b.pathManifest) > 0 { var p = filepath.Join(b.pathInput, "windows.syso") b.l.Debugf("Running rsrc for icon %s into %s", b.pathIconWindows, p) if err = rsrc.Embed(p, arch, b.pathManifest, b.pathIconWindows); err != nil { err = fmt.Errorf("running rsrc for icon %s into %s failed: %w", b.pathIconWindows, p, err) return } } return } // finishDarwin finishes bundling for a darwin system func (b *Bundler) finishDarwin(environmentPath, binaryPath string) (err error) { // Create MacOS folder var contentsPath = filepath.Join(environmentPath, b.appName+".app", "Contents") var macOSPath = filepath.Join(contentsPath, "MacOS") b.l.Debugf("Creating %s", macOSPath) if err = os.MkdirAll(macOSPath, 0777); err != nil { err = fmt.Errorf("mkdirall of %s failed: %w", macOSPath, err) return } var macOSBinaryPath = filepath.Join(macOSPath, b.appName) var infoPlist *plister.InfoPlist if b.infoPlist != nil { infoPlist = plister.MapToInfoPlist(b.infoPlist) binaryName, _ := infoPlist.Get("CFBundleExecutable").(string) if binaryName != "" { macOSBinaryPath = filepath.Join(macOSPath, binaryName) } } // Move binary b.l.Debugf("Moving %s to %s", binaryPath, macOSBinaryPath) if err = astikit.MoveFile(b.ctx, macOSBinaryPath, binaryPath, astikit.LocalCopyFileFunc); err != nil { err = fmt.Errorf("moving %s to %s failed: %w", binaryPath, macOSBinaryPath, err) return } // Check context error if b.ctx.Err() != nil { return b.ctx.Err() } // Make sure the binary is executable b.l.Debugf("Chmoding %s", macOSBinaryPath) if err = os.Chmod(macOSBinaryPath, 0777); err != nil { err = fmt.Errorf("chmoding %s failed: %w", macOSBinaryPath, err) return } // Icon if len(b.pathIconDarwin) > 0 { // Create Resources folder var resourcesPath = filepath.Join(contentsPath, "Resources") b.l.Debugf("Creating %s", resourcesPath) if err = os.MkdirAll(resourcesPath, 0777); err != nil { err = fmt.Errorf("mkdirall of %s failed: %w", resourcesPath, err) return } iconFileName := b.appName + filepath.Ext(b.pathIconDarwin) if infoPlist != nil { ifn, _ := infoPlist.Get("CFBundleIconFile").(string) if ifn != "" { iconFileName = ifn } } // Copy icon var ip = filepath.Join(resourcesPath, iconFileName) b.l.Debugf("Copying %s to %s", b.pathIconDarwin, ip) if err = astikit.CopyFile(b.ctx, ip, b.pathIconDarwin, astikit.LocalCopyFileFunc); err != nil { err = fmt.Errorf("copying %s to %s failed: %w", b.pathIconDarwin, ip, err) return } // Check context error if b.ctx.Err() != nil { return b.ctx.Err() } } // Add Info.plist file var fp = filepath.Join(contentsPath, "Info.plist") b.l.Debugf("Adding Info.plist to %s", fp) if infoPlist != nil { if err = plister.Generate(fp, infoPlist); err != nil { err = fmt.Errorf("generating Info.plist failed: %w", err) } return } lsuiElement := "NO" if b.darwinAgentApp { lsuiElement = "YES" } if err = ioutil.WriteFile(fp, []byte(`<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>CFBundleIconFile</key> <string>`+b.appName+filepath.Ext(b.pathIconDarwin)+`</string> <key>CFBundleDisplayName</key> <string>`+b.appName+`</string> <key>CFBundleExecutable</key> <string>`+b.appName+`</string> <key>CFBundleName</key> <string>`+b.appName+`</string> <key>CFBundleIdentifier</key> <string>com.`+b.appName+`</string> <key>LSUIElement</key> <string>`+lsuiElement+`</string> <key>CFBundlePackageType</key> <string>APPL</string> </dict> </plist>`), 0777); err != nil { err = fmt.Errorf("adding Info.plist to %s failed: %w", fp, err) return } return } // finishLinux finishes bundling for a linux system // TODO Add .desktop file func (b *Bundler) finishLinux(environmentPath, binaryPath string) (err error) { // Move binary var linuxBinaryPath = filepath.Join(environmentPath, b.appName) b.l.Debugf("Moving %s to %s", binaryPath, linuxBinaryPath) if err = astikit.MoveFile(b.ctx, linuxBinaryPath, binaryPath, astikit.LocalCopyFileFunc); err != nil { err = fmt.Errorf("moving %s to %s failed: %w", binaryPath, linuxBinaryPath, err) return } // Check context error if b.ctx.Err() != nil { return b.ctx.Err() } return } // finishWindows finishes bundling for a linux system func (b *Bundler) finishWindows(environmentPath, binaryPath string) (err error) { // Move binary var windowsBinaryPath = filepath.Join(environmentPath, b.appName+".exe") b.l.Debugf("Moving %s to %s", binaryPath, windowsBinaryPath) if err = astikit.MoveFile(b.ctx, windowsBinaryPath, binaryPath, astikit.LocalCopyFileFunc); err != nil { err = fmt.Errorf("moving %s to %s failed: %w", binaryPath, windowsBinaryPath, err) return } // Check context error if b.ctx.Err() != nil { return b.ctx.Err() } return }
[ "\"GOPATH\"", "\"GOPATH\"" ]
[]
[ "GOPATH" ]
[]
["GOPATH"]
go
1
0
sdk/management/samples/src/main/java/com/azure/management/resources/samples/DeployUsingARMTemplate.java
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. package com.azure.management.resources.samples; import com.azure.core.http.policy.HttpLogDetailLevel; import com.azure.core.http.rest.PagedIterable; import com.azure.management.Azure; import com.azure.management.resources.Deployment; import com.azure.management.resources.DeploymentMode; import com.azure.management.resources.DeploymentOperation; import com.azure.management.resources.fluentcore.arm.Region; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import java.io.File; import java.io.IOException; import java.io.InputStream; /** * Azure Resource sample for deploying resources using an ARM template. */ public final class DeployUsingARMTemplate { /** * Main function which runs the actual sample. * * @param azure instance of the azure client * @return true if sample runs successfully */ public static boolean runSample(Azure azure) { final String rgName = azure.sdkContext().randomResourceName("rgRSAT", 24); final String deploymentName = azure.sdkContext().randomResourceName("dpRSAT", 24); try { String templateJson = DeployUsingARMTemplate.getTemplate(azure); //============================================================= // Create resource group. System.out.println("Creating a resource group with name: " + rgName); azure.resourceGroups().define(rgName) .withRegion(Region.US_EAST2) .create(); System.out.println("Created a resource group with name: " + rgName); //============================================================= // Create a deployment for an Azure App Service via an ARM // template. System.out.println(templateJson); // System.out.println("Starting a deployment for an Azure App Service: " + deploymentName); azure.deployments().define(deploymentName) .withExistingResourceGroup(rgName) .withTemplate(templateJson) .withParameters("{}") .withMode(DeploymentMode.INCREMENTAL) .create(); System.out.println("Started a deployment for an Azure App Service: " + deploymentName); return true; } catch (Exception f) { System.out.println(f.getMessage()); f.printStackTrace(); } finally { try { Deployment deployment = azure.deployments() .getByResourceGroup(rgName, deploymentName); PagedIterable<DeploymentOperation> operations = deployment.deploymentOperations() .list(); for (DeploymentOperation operation : operations) { if (operation.targetResource() != null) { String operationTxt = String.format("id:%s name:%s type: %s provisioning-state:%s code: %s msg: %s", operation.targetResource().getId(), operation.targetResource().resourceName(), operation.targetResource().resourceType(), operation.provisioningState(), operation.statusCode(), operation.statusMessage()); System.out.println(operationTxt); } } } catch (Exception ex) { System.out.println(ex.getMessage()); } try { System.out.println("Deleting Resource Group: " + rgName); azure.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } return false; } /** * Main entry point. * * @param args the parameters */ public static void main(String[] args) { try { //================================================================= // Authenticate final File credFile = new File(System.getenv("AZURE_AUTH_LOCATION")); Azure azure = Azure.configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credFile) .withDefaultSubscription(); runSample(azure); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private static String getTemplate(Azure azure) throws IllegalAccessException, JsonProcessingException, IOException { final String hostingPlanName = azure.sdkContext().randomResourceName("hpRSAT", 24); final String webappName = azure.sdkContext().randomResourceName("wnRSAT", 24); final InputStream embeddedTemplate; embeddedTemplate = DeployUsingARMTemplate.class.getResourceAsStream("/templateValue.json"); final ObjectMapper mapper = new ObjectMapper(); final JsonNode tmp = mapper.readTree(embeddedTemplate); DeployUsingARMTemplate.validateAndAddFieldValue("string", hostingPlanName, "hostingPlanName", null, tmp); DeployUsingARMTemplate.validateAndAddFieldValue("string", webappName, "webSiteName", null, tmp); DeployUsingARMTemplate.validateAndAddFieldValue("string", "F1", "skuName", null, tmp); DeployUsingARMTemplate.validateAndAddFieldValue("int", "1", "skuCapacity", null, tmp); return tmp.toString(); } private static void validateAndAddFieldValue(String type, String fieldValue, String fieldName, String errorMessage, JsonNode tmp) throws IllegalAccessException { // Add count variable for loop.... final ObjectMapper mapper = new ObjectMapper(); final ObjectNode parameter = mapper.createObjectNode(); parameter.put("type", type); if ("int".equals(type)) { parameter.put("defaultValue", Integer.parseInt(fieldValue)); } else { parameter.put("defaultValue", fieldValue); } ObjectNode.class.cast(tmp.get("parameters")).replace(fieldName, parameter); } }
[ "\"AZURE_AUTH_LOCATION\"" ]
[]
[ "AZURE_AUTH_LOCATION" ]
[]
["AZURE_AUTH_LOCATION"]
java
1
0
pacing/model.py
from __future__ import absolute_import, print_function # --- System --- import os import sys import time import warnings # --- Utility --- import pandas as pd import numpy as np import math import random import logging import pickle import warnings warnings.filterwarnings('ignore') from sklearn.model_selection import train_test_split from sklearn import preprocessing # --- Plot --- import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns # --- Pytorch --- import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision import torchvision.transforms as transforms import torch.backends.cudnn as cudnn from torch.utils.data import Dataset, DataLoader, TensorDataset from tqdm import tqdm from datetime import datetime from torch.utils.data import random_split # ----------------------------------------------------------- # random weight initialization def seed_everything(seed=42): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False seed_everything() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") root_dir = os.getcwd() # ----------------------------------------------------------- # data loading and preprocessing dataPath = "data/statistics (pacing).csv" df = pd.read_csv(dataPath) # Dropping columns that are not required at the moment df = df.drop(columns=[ 'Unnamed: 0', 'UUID', 'HOSTNAME', 'ALIAS', 'TIMESTAMP', 'THROUGHPUT (Receiver)', 'LATENCY (min.)', 'LATENCY (max.)', 'CONGESTION (Receiver)', 'BYTES (Receiver)' ]) # Pre-processing pacing = df['PACING'].values for i, p in enumerate(pacing): v, _ = p.split("gbit") pacing[i] = int(v) df['PACING'] = pacing df['CONGESTION (Sender)'] = (df['CONGESTION (Sender)'] == 'cubic').astype(int) X = df[['THROUGHPUT (Sender)', 'LATENCY (mean)', 'RETRANSMITS', 'STREAMS', 'CONGESTION (Sender)']].values y = df['PACING'].values y = y.astype('int') # Normalization minmax_scale = preprocessing.MinMaxScaler().fit(df[['THROUGHPUT (Sender)', 'LATENCY (mean)', 'RETRANSMITS', 'STREAMS', 'CONGESTION (Sender)']]) df_minmax = minmax_scale.transform(df[['THROUGHPUT (Sender)', 'LATENCY (mean)', 'RETRANSMITS', 'STREAMS', 'CONGESTION (Sender)']]) final_df = pd.DataFrame(df_minmax, columns=['THROUGHPUT (Sender)', 'LATENCY (mean)', 'RETRANSMITS', 'STREAMS', 'CONGESTION (Sender)']) X = final_df[['THROUGHPUT (Sender)', 'LATENCY (mean)', 'RETRANSMITS', 'STREAMS', 'CONGESTION (Sender)']].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1) X_train = torch.tensor(X_train) y_train = torch.tensor(y_train) X_test = torch.tensor(X_test) y_test = torch.tensor(y_test) # ----------------------------------------------------------- # Custom data loader for ELK stack dataset class PacingDataset(Dataset): """ TensorDataset with support of transforms. """ def __init__(self, tensors, transform=None): assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors) self.tensors = tensors self.transform = transform def __getitem__(self, index): x = self.tensors[0][index] if self.transform: x = self.transform(x) y = self.tensors[1][index] return x, y def __len__(self): return self.tensors[0].size(0) # ----------------------------------------------------------- # accuracy computation def accuracy(model, ds, pct): # assumes model.eval() # percent correct within pct of true pacing rate n_correct = 0; n_wrong = 0 for i in range(len(ds)): (X, Y) = ds[i] # (predictors, target) X, Y = X.float(), Y.float() with torch.no_grad(): output, _, _, _ = model(X) # computed price abs_delta = np.abs(output.item() - Y.item()) max_allow = np.abs(pct * Y.item()) if abs_delta < max_allow: n_correct +=1 else: n_wrong += 1 acc = (n_correct * 1.0) / (n_correct + n_wrong) return acc*100 # ----------------------------------------------------------- input_feature = 5 latent_feature = 16 class VAERegressor(nn.Module): def __init__(self): super(VAERegressor, self).__init__() # encoder self.enc1 = nn.Linear(in_features=input_feature, out_features=128) self.enc2 = nn.Linear(in_features=128, out_features=latent_feature*2) # decoder self.dec1 = nn.Linear(in_features=latent_feature, out_features=128) self.dec2 = nn.Linear(in_features=128, out_features=5) # Regressor self.fc1 = torch.nn.Linear (5, 32) self.fc2 = torch.nn.Linear (32, 1) torch.nn.init.xavier_uniform_(self.enc1.weight) torch.nn.init.zeros_(self.enc1.bias) torch.nn.init.xavier_uniform_(self.enc2.weight) torch.nn.init.zeros_(self.enc2.bias) torch.nn.init.xavier_uniform_(self.dec1.weight) torch.nn.init.zeros_(self.dec1.bias) torch.nn.init.xavier_uniform_(self.dec2.weight) torch.nn.init.zeros_(self.dec2.bias) torch.nn.init.xavier_uniform_(self.fc1.weight) torch.nn.init.zeros_(self.fc1.bias) torch.nn.init.xavier_uniform_(self.fc2.weight) torch.nn.init.zeros_(self.fc2.bias) def reparameterize(self, mu, log_var): """ :param mu: mean from the encoder's latent space :param log_var: log variance from the encoder's latent space """ std = torch.exp(0.5*log_var) # standard deviation eps = torch.randn_like(std) # `randn_like` as we need the same size sample = mu + (eps * std) # sampling as if coming from the input space return sample def forward(self, x): # encoding x = self.enc1(x) x = F.relu(x) x = self.enc2(x).view(-1, 2, latent_feature) # get `mu` and `log_var` mu = x[:, 0, :] # the first feature values as mean log_var = x[:, 1, :] # the other feature values as variance # get the latent vector through reparameterization z = self.reparameterize(mu, log_var) # decoding x = self.dec1(z) x = F.relu(x) x = self.dec2(x) recon = torch.sigmoid(x) # regressor x = self.fc1(recon) x = F.relu(x) x = self.fc2(x) return x, recon, mu, log_var # model definition class PacingOptimizer(nn.Module): # https://visualstudiomagazine.com/Articles/2021/02/11/pytorch-define.aspx?Page=2 def __init__(self): super(PacingOptimizer, self).__init__() self.hid1 = torch.nn.Linear(5, 32) self.drop1 = torch.nn.Dropout(0.25) self.hid2 = torch.nn.Linear(32, 64) self.drop2 = torch.nn.Dropout(0.50) self.hid3 = torch.nn.Linear(64, 32) self.oupt = torch.nn.Linear(32, 1) torch.nn.init.xavier_uniform_(self.hid1.weight) torch.nn.init.zeros_(self.hid1.bias) torch.nn.init.xavier_uniform_(self.hid2.weight) torch.nn.init.zeros_(self.hid2.bias) torch.nn.init.xavier_uniform_(self.hid3.weight) torch.nn.init.zeros_(self.hid3.bias) torch.nn.init.xavier_uniform_(self.oupt.weight) torch.nn.init.zeros_(self.oupt.bias) def forward(self, x): z = self.drop1(torch.relu(self.hid1(x))) z = self.drop2(torch.relu(self.hid2(z))) z = torch.relu(self.hid3(z)) z = self.oupt(z) # no activation return z # ----------------------------------------------------------- model = VAERegressor() # model = PacingOptimizer() # Hyperparameters EPOCH = 100 BATCH = 64 LEARNING_RATE = 0.005 INTERVAL = 50 SAVE = False BESTLOSS = 10 CE = nn.CrossEntropyLoss() BCE = nn.BCELoss(reduction='mean') MSE = nn.MSELoss(reduction='mean') # 'mean', 'sum'. 'none' def criterion(bce_loss, mu, logvar): """ This function will add the reconstruction loss (BCELoss) and the KL-Divergence. KL-Divergence = 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) :param bce_loss: recontruction loss :param mu: the mean from the latent vector :param logvar: log variance from the latent vector """ BCE = bce_loss KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) return BCE + KLD # optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE) optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=0.9) print("\nBatch Size = %3d " % BATCH) print("Loss = " + str(criterion)) print("Pptimizer = Adam") print("Max Epochs = %3d " % EPOCH) print("Learning Rate = %0.3f " % LEARNING_RATE) # Dataset w/o any tranformations traindata = PacingDataset(tensors=(X_train, y_train), transform=None) trainloader = torch.utils.data.DataLoader(traindata, batch_size=BATCH) testdata = PacingDataset(tensors=(X_test, y_test), transform=None) testloader = torch.utils.data.DataLoader(testdata, batch_size=BATCH) print("\nStarting training with saved checkpoints") model.train() for epoch in range(0, EPOCH): torch.manual_seed(epoch+1) # recovery reproducibility epoch_loss = 0 # for one full epoch for (batch_idx, batch) in enumerate(trainloader): (xs, ys) = batch # (predictors, targets) xs, ys = xs.float(), ys.float() optimizer.zero_grad() # prepare gradients # output = model(xs) # predicted pacing rate # loss = criterion(ys, output) # avg per item in batch output, recon, mu, log_var = model(xs) mse_loss = MSE(ys, output) bce_loss = BCE(recon, xs) loss = criterion(bce_loss, mu, log_var) + mse_loss epoch_loss += loss.item() # accumulate averages loss.backward() # compute gradients optimizer.step() # update weights if epoch % INTERVAL == 0: print("Epoch = %4d Loss = %0.4f" % (epoch, epoch_loss)) # save checkpoint dt = time.strftime("%Y_%m_%d-%H_%M_%S") fn = str(dt) + str("-") + str(epoch) + "_ckpt.pt" info_dict = { 'epoch' : epoch, 'model_state' : model.state_dict(), 'optimizer_state' : optimizer.state_dict() } if SAVE: torch.save(info_dict, fn) print("\nDone") # evaluate model accuracy model.eval() gap = 0.50 acc_train = accuracy(model, traindata, gap) print(f"Accuracy (within {gap:.2f}) on train data = {acc_train:.2f}%") # make prediction on a random sample from test data pivot = random.randint(0, len(testdata)) print("Pivot: ", pivot) x, y = testdata[pivot] tput, lat, loss, streams, cong = x.detach().numpy() # tput, lat, loss, streams, cong = 0.149677, 0.577766, 1.00000, 0.0, 1.0 print(f"\nPredicting pacing rate for:\n\ (norm. values)\n\ throughput = {tput}\n\ latency = {lat}\n\ loss = {loss}\n\ congestion = {cong}\n\ streams = {streams}\n\ pacing = {y}") # converting the sample to tensor array ukn = np.array([[tput, lat, loss, streams, cong]], dtype=np.float32) sample = torch.tensor(ukn, dtype=torch.float32).to(device) # testing the sample with torch.no_grad(): pred, _, _, _ = model(sample) pred = pred.item() print(f"\nPredicted Pacing rate: {pred:.4f}\nGround-truth Pacing rate: {y:.4f}\n")
[]
[]
[ "PYTHONHASHSEED" ]
[]
["PYTHONHASHSEED"]
python
1
0
helper/util/stderrlog/stderrlog.go
package stderrlog import ( "fmt" "io" "os" ) var Writer io.Writer = os.Stderr var debugModeEnabled = os.Getenv("DEVSPACE_HELPER_DEBUG") == "true" func Errorf(message string, args ...interface{}) { _, _ = fmt.Fprintf(Writer, "error: "+message+"\n", args...) } func Infof(message string, args ...interface{}) { _, _ = fmt.Fprintf(Writer, message+"\n", args...) } func Debugf(message string, args ...interface{}) { if debugModeEnabled { _, _ = fmt.Fprintf(Writer, message+"\n", args...) } }
[ "\"DEVSPACE_HELPER_DEBUG\"" ]
[]
[ "DEVSPACE_HELPER_DEBUG" ]
[]
["DEVSPACE_HELPER_DEBUG"]
go
1
0
kubetest2/kubetest2-gce/deployer/common.go
/* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package deployer import ( "fmt" "os" "time" "k8s.io/klog" ) func (d *deployer) init() error { var err error d.doInit.Do(func() { err = d.initialize() }) return err } // initialize should only be called by init(), behind a sync.Once func (d *deployer) initialize() error { if d.commonOptions.ShouldBuild() { if err := d.verifyBuildFlags(); err != nil { return fmt.Errorf("init failed to check build flags: %s", err) } } if d.commonOptions.ShouldUp() { if err := d.verifyUpFlags(); err != nil { return fmt.Errorf("init failed to verify flags for up: %s", err) } if d.GCPProject == "" { klog.V(1).Info("No GCP project provided, acquiring from Boskos") boskos, err := makeBoskosClient(d.BoskosLocation) if err != nil { return fmt.Errorf("failed to make boskos client: %s", err) } d.boskos = boskos projectName, err := getProjectFromBoskos( d.boskos, time.Duration(d.BoskosAcquireTimeoutSeconds)*time.Second, d.boskosHeartbeatClose, ) if err != nil { return fmt.Errorf("init failed to get project from boskos: %s", err) } d.GCPProject = projectName klog.V(1).Infof("Got project %s from boskos", d.GCPProject) } } if d.commonOptions.ShouldDown() { if err := d.verifyDownFlags(); err != nil { return fmt.Errorf("init failed to verify flags for down: %s", err) } } return nil } func (d *deployer) buildEnv() []string { // The base env currently does not inherit the current os env (except for PATH) // because (for now) it doesn't have to. In future, this may have to change when // support is added for k/k's kube-up.sh and kube-down.sh which support a wide // variety of environment variables. Before doing so, it is worth investigating // inheriting the os env vs. adding flags to this deployer on a case-by-case // basis to support individual environment configurations. var env []string // path is necessary for scripts to find gsutil, gcloud, etc // can be removed if env is inherited from the os env = append(env, fmt.Sprintf("PATH=%s", os.Getenv("PATH"))) // used by config-test.sh to set $NETWORK in the default case // if unset, bash's set -u gets angry and kills the log dump script // can be removed if env is inherited from the os env = append(env, fmt.Sprintf("USER=%s", os.Getenv("USER"))) // kube-up.sh, kube-down.sh etc. use PROJECT as a parameter for all gcloud commands env = append(env, fmt.Sprintf("PROJECT=%s", d.GCPProject)) // kubeconfig is set to tell kube-up.sh where to generate the kubeconfig // we don't want this to be the default because this kubeconfig "belongs" to // the run of kubetest2 and so should be placed in the artifacts directory env = append(env, fmt.Sprintf("KUBECONFIG=%s", d.kubeconfigPath)) // kube-up and kube-down get this as a default ("kubernetes") but log-dump // does not. opted to set it manually here for maximum consistency env = append(env, "KUBE_GCE_INSTANCE_PREFIX=kubetest2") return env }
[ "\"PATH\"", "\"USER\"" ]
[]
[ "USER", "PATH" ]
[]
["USER", "PATH"]
go
2
0
vt/vttablet/tabletserver/tx_engine_test.go
/* Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package tabletserver import ( "fmt" "strings" "sync" "testing" "time" "gopkg.in/src-d/go-vitess.v1/mysql/fakesqldb" "gopkg.in/src-d/go-vitess.v1/sqltypes" "gopkg.in/src-d/go-vitess.v1/vt/vttablet/tabletserver/tabletenv" "golang.org/x/net/context" querypb "gopkg.in/src-d/go-vitess.v1/vt/proto/query" ) func TestTxEngineClose(t *testing.T) { db := setUpQueryExecutorTest(t) defer db.Close() testUtils := newTestUtils() dbcfgs := testUtils.newDBConfigs(db) ctx := context.Background() config := tabletenv.DefaultQsConfig config.TransactionCap = 10 config.TransactionTimeout = 0.5 config.TxShutDownGracePeriod = 0 te := NewTxEngine(nil, config) te.InitDBConfig(dbcfgs) // Normal close. te.open() start := time.Now() te.close(false) if diff := time.Since(start); diff > 500*time.Millisecond { t.Errorf("Close time: %v, must be under 0.5s", diff) } // Normal close with timeout wait. te.open() c, beginSQL, err := te.txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } if beginSQL != "begin" { t.Errorf("beginSQL: %q, want 'begin'", beginSQL) } c.Recycle() start = time.Now() te.close(false) if diff := time.Since(start); diff < 500*time.Millisecond { t.Errorf("Close time: %v, must be over 0.5s", diff) } // Immediate close. te.open() c, beginSQL, err = te.txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } c.Recycle() start = time.Now() te.close(true) if diff := time.Since(start); diff > 500*time.Millisecond { t.Errorf("Close time: %v, must be under 0.5s", diff) } // Normal close with short grace period. te.shutdownGracePeriod = 250 * time.Millisecond te.open() c, beginSQL, err = te.txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } c.Recycle() start = time.Now() te.close(false) if diff := time.Since(start); diff > 500*time.Millisecond { t.Errorf("Close time: %v, must be under 0.5s", diff) } if diff := time.Since(start); diff < 250*time.Millisecond { t.Errorf("Close time: %v, must be over 0.25s", diff) } // Normal close with short grace period, but pool gets empty early. te.shutdownGracePeriod = 250 * time.Millisecond te.open() c, beginSQL, err = te.txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } c.Recycle() go func() { time.Sleep(100 * time.Millisecond) _, err := te.txPool.Get(c.TransactionID, "return") if err != nil { t.Error(err) } te.txPool.LocalConclude(ctx, c) }() start = time.Now() te.close(false) if diff := time.Since(start); diff > 250*time.Millisecond { t.Errorf("Close time: %v, must be under 0.25s", diff) } if diff := time.Since(start); diff < 100*time.Millisecond { t.Errorf("Close time: %v, must be over 0.1", diff) } // Immediate close, but connection is in use. te.open() c, beginSQL, err = te.txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } go func() { time.Sleep(100 * time.Millisecond) te.txPool.LocalConclude(ctx, c) }() start = time.Now() te.close(true) if diff := time.Since(start); diff > 250*time.Millisecond { t.Errorf("Close time: %v, must be under 0.25s", diff) } if diff := time.Since(start); diff < 100*time.Millisecond { t.Errorf("Close time: %v, must be over 0.1", diff) } } type TxType int const ( NoTx TxType = iota ReadOnlyAccepted WriteAccepted ReadOnlyRejected WriteRejected ) func (t TxType) String() string { names := [...]string{ "no transaction", "read only transaction accepted", "write transaction accepted", "read only transaction rejected", "write transaction rejected", } if t < NoTx || t > WriteRejected { return "unknown" } return names[t] } type TestCase struct { startState txEngineState TxEngineStates []txEngineState tx TxType stateAssertion func(state txEngineState) error } func (test TestCase) String() string { var sb strings.Builder sb.WriteString("start from ") sb.WriteString(test.startState.String()) sb.WriteString(" with ") sb.WriteString(test.tx.String()) for _, change := range test.TxEngineStates { sb.WriteString(" change state to ") sb.WriteString(change.String()) } return sb.String() } func changeState(te *TxEngine, state txEngineState) error { switch state { case AcceptingReadAndWrite: return te.AcceptReadWrite() case AcceptingReadOnly: return te.AcceptReadOnly() case NotServing: return te.Stop() default: return fmt.Errorf("don't know how to do that: %v", state) } } func TestWithInnerTests(outerT *testing.T) { tests := []TestCase{ // Start from RW and test all single hop transitions with and without tx {AcceptingReadAndWrite, []txEngineState{ NotServing}, NoTx, assertEndStateIs(NotServing)}, {AcceptingReadAndWrite, []txEngineState{ AcceptingReadAndWrite}, NoTx, assertEndStateIs(AcceptingReadAndWrite)}, {AcceptingReadAndWrite, []txEngineState{ AcceptingReadOnly}, NoTx, assertEndStateIs(AcceptingReadOnly)}, {AcceptingReadAndWrite, []txEngineState{ NotServing}, WriteAccepted, assertEndStateIs(NotServing)}, {AcceptingReadAndWrite, []txEngineState{ AcceptingReadAndWrite}, WriteAccepted, assertEndStateIs(AcceptingReadAndWrite)}, {AcceptingReadAndWrite, []txEngineState{ AcceptingReadOnly}, WriteAccepted, assertEndStateIs(AcceptingReadOnly)}, {AcceptingReadAndWrite, []txEngineState{ NotServing}, ReadOnlyAccepted, assertEndStateIs(NotServing)}, {AcceptingReadAndWrite, []txEngineState{ AcceptingReadAndWrite}, ReadOnlyAccepted, assertEndStateIs(AcceptingReadAndWrite)}, {AcceptingReadAndWrite, []txEngineState{ AcceptingReadOnly}, ReadOnlyAccepted, assertEndStateIs(AcceptingReadOnly)}, // Start from RW and test all transitions with and without tx, plus a concurrent Stop() {AcceptingReadAndWrite, []txEngineState{ NotServing, NotServing}, NoTx, assertEndStateIs(NotServing)}, {AcceptingReadAndWrite, []txEngineState{ AcceptingReadAndWrite, NotServing}, NoTx, assertEndStateIs(NotServing)}, {AcceptingReadAndWrite, []txEngineState{ AcceptingReadOnly, NotServing}, NoTx, assertEndStateIs(NotServing)}, {AcceptingReadAndWrite, []txEngineState{ NotServing, NotServing}, WriteAccepted, assertEndStateIs(NotServing)}, {AcceptingReadAndWrite, []txEngineState{ AcceptingReadAndWrite, NotServing}, WriteAccepted, assertEndStateIs(NotServing)}, {AcceptingReadAndWrite, []txEngineState{ AcceptingReadOnly, NotServing}, WriteAccepted, assertEndStateIs(NotServing)}, // Start from RW and test all transitions with and without tx, plus a concurrent ReadOnly() {AcceptingReadAndWrite, []txEngineState{ NotServing, AcceptingReadOnly}, NoTx, assertEndStateIs(AcceptingReadOnly)}, {AcceptingReadAndWrite, []txEngineState{ AcceptingReadAndWrite, AcceptingReadOnly}, NoTx, assertEndStateIs(AcceptingReadOnly)}, {AcceptingReadAndWrite, []txEngineState{ AcceptingReadOnly, AcceptingReadOnly}, NoTx, assertEndStateIs(AcceptingReadOnly)}, {AcceptingReadAndWrite, []txEngineState{ NotServing, AcceptingReadOnly}, WriteAccepted, assertEndStateIs(AcceptingReadOnly)}, {AcceptingReadAndWrite, []txEngineState{ AcceptingReadAndWrite, AcceptingReadOnly}, WriteAccepted, assertEndStateIs(AcceptingReadOnly)}, {AcceptingReadAndWrite, []txEngineState{ AcceptingReadOnly, AcceptingReadOnly}, WriteAccepted, assertEndStateIs(AcceptingReadOnly)}, // Start from RO and test all single hop transitions with and without tx {AcceptingReadOnly, []txEngineState{ NotServing}, NoTx, assertEndStateIs(NotServing)}, {AcceptingReadOnly, []txEngineState{ AcceptingReadAndWrite}, NoTx, assertEndStateIs(AcceptingReadAndWrite)}, {AcceptingReadOnly, []txEngineState{ AcceptingReadOnly}, NoTx, assertEndStateIs(AcceptingReadOnly)}, {AcceptingReadOnly, []txEngineState{ NotServing}, WriteRejected, assertEndStateIs(NotServing)}, {AcceptingReadOnly, []txEngineState{ AcceptingReadAndWrite}, WriteRejected, assertEndStateIs(AcceptingReadAndWrite)}, {AcceptingReadOnly, []txEngineState{ AcceptingReadOnly}, WriteRejected, assertEndStateIs(AcceptingReadOnly)}, // Start from RO and test all transitions with and without tx, plus a concurrent Stop() {AcceptingReadOnly, []txEngineState{ NotServing, NotServing}, NoTx, assertEndStateIs(NotServing)}, {AcceptingReadOnly, []txEngineState{ AcceptingReadAndWrite, NotServing}, NoTx, assertEndStateIs(NotServing)}, {AcceptingReadOnly, []txEngineState{ AcceptingReadOnly, NotServing}, NoTx, assertEndStateIs(NotServing)}, {AcceptingReadOnly, []txEngineState{ NotServing, NotServing}, WriteRejected, assertEndStateIs(NotServing)}, {AcceptingReadOnly, []txEngineState{ AcceptingReadAndWrite, NotServing}, WriteRejected, assertEndStateIs(NotServing)}, {AcceptingReadOnly, []txEngineState{ AcceptingReadOnly, NotServing}, WriteRejected, assertEndStateIs(NotServing)}, // Start from RO and test all transitions with and without tx, plus a concurrent ReadWrite() {AcceptingReadOnly, []txEngineState{ NotServing, AcceptingReadAndWrite}, NoTx, assertEndStateIs(AcceptingReadAndWrite)}, {AcceptingReadOnly, []txEngineState{ AcceptingReadAndWrite, AcceptingReadAndWrite}, NoTx, assertEndStateIs(AcceptingReadAndWrite)}, {AcceptingReadOnly, []txEngineState{ AcceptingReadOnly, AcceptingReadAndWrite}, NoTx, assertEndStateIs(AcceptingReadAndWrite)}, {AcceptingReadOnly, []txEngineState{ NotServing, AcceptingReadAndWrite}, WriteRejected, assertEndStateIs(AcceptingReadAndWrite)}, {AcceptingReadOnly, []txEngineState{ AcceptingReadAndWrite, AcceptingReadAndWrite}, WriteRejected, assertEndStateIs(AcceptingReadAndWrite)}, {AcceptingReadOnly, []txEngineState{ AcceptingReadOnly, AcceptingReadAndWrite}, WriteRejected, assertEndStateIs(AcceptingReadAndWrite)}, // Make sure that all transactions are rejected when we are not serving {NotServing, []txEngineState{}, WriteRejected, assertEndStateIs(NotServing)}, {NotServing, []txEngineState{}, ReadOnlyRejected, assertEndStateIs(NotServing)}, } for _, test := range tests { outerT.Run(test.String(), func(t *testing.T) { db := setUpQueryExecutorTest(t) db.AddQuery("set transaction isolation level REPEATABLE READ", &sqltypes.Result{}) db.AddQuery("start transaction with consistent snapshot, read only", &sqltypes.Result{}) defer db.Close() te := setupTxEngine(db) failIfError(t, changeState(te, test.startState)) switch test.tx { case NoTx: // nothing to do case WriteAccepted: failIfError(t, startTransaction(te, true)) case ReadOnlyAccepted: failIfError(t, startTransaction(te, false)) case WriteRejected: err := startTransaction(te, true) if err == nil { t.Fatalf("expected an error to be returned when opening write transaction, but got nil") } case ReadOnlyRejected: err := startTransaction(te, false) if err == nil { t.Fatalf("expected an error to be returned when opening read transaction, but got nil") } default: t.Fatalf("don't know how to [%v]", test.tx) } wg := sync.WaitGroup{} for _, newState := range test.TxEngineStates { wg.Add(1) go func(s txEngineState) { defer wg.Done() failIfError(t, changeState(te, s)) }(newState) // We give the state changes a chance to get started time.Sleep(10 * time.Millisecond) } // Let's wait for all transitions to wrap up wg.Wait() failIfError(t, test.stateAssertion(te.state)) }) } } func setupTxEngine(db *fakesqldb.DB) *TxEngine { testUtils := newTestUtils() dbcfgs := testUtils.newDBConfigs(db) config := tabletenv.DefaultQsConfig config.TransactionCap = 10 config.TransactionTimeout = 0.5 config.TxShutDownGracePeriod = 0 te := NewTxEngine(nil, config) te.InitDBConfig(dbcfgs) return te } func failIfError(t *testing.T, err error) { if err != nil { t.Logf("%+v", err) t.FailNow() } } func assertEndStateIs(expected txEngineState) func(actual txEngineState) error { return func(actual txEngineState) error { if actual != expected { return fmt.Errorf("expected the end state to be %v, but it was %v", expected, actual) } return nil } } func startTransaction(te *TxEngine, writeTransaction bool) error { options := &querypb.ExecuteOptions{} if writeTransaction { options.TransactionIsolation = querypb.ExecuteOptions_DEFAULT } else { options.TransactionIsolation = querypb.ExecuteOptions_CONSISTENT_SNAPSHOT_READ_ONLY } _, _, err := te.Begin(context.Background(), options) return err }
[]
[]
[]
[]
[]
go
null
null
null